mirror of
https://github.com/kevinveenbirkenbach/docker-volume-backup.git
synced 2025-12-27 11:06:35 +00:00
refactor: migrate to src/ package + add DinD-based E2E runner with debug artifacts
- Replace legacy standalone scripts with a proper src-layout Python package (baudolo backup/restore/configure entrypoints via pyproject.toml) - Remove old scripts/files (backup-docker-to-local.py, recover-docker-from-local.sh, databases.csv.tpl, Todo.md) - Add Dockerfile to build the project image for local/E2E usage - Update Makefile: build image and run E2E via external runner script - Add scripts/test-e2e.sh: - start DinD + dedicated network - recreate DinD data volume (and shared /tmp volume) - pre-pull helper images (alpine-rsync, alpine) - load local baudolo:local image into DinD - run unittest E2E suite inside DinD and abort on first failure - on failure: dump host+DinD diagnostics and archive shared /tmp into artifacts/ - Add artifacts/ debug outputs produced by failing E2E runs (logs, events, tmp archive) https://chatgpt.com/share/694ec23f-0794-800f-9a59-8365bc80f435
This commit is contained in:
0
tests/e2e/__init__.py
Normal file
0
tests/e2e/__init__.py
Normal file
176
tests/e2e/helpers.py
Normal file
176
tests/e2e/helpers.py
Normal file
@@ -0,0 +1,176 @@
|
||||
# tests/e2e/helpers.py
|
||||
from __future__ import annotations
|
||||
|
||||
import shutil
|
||||
import subprocess
|
||||
import time
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def run(cmd: list[str], *, capture: bool = True, check: bool = True, cwd: str | None = None) -> subprocess.CompletedProcess:
|
||||
return subprocess.run(
|
||||
cmd,
|
||||
check=check,
|
||||
cwd=cwd,
|
||||
text=True,
|
||||
capture_output=capture,
|
||||
)
|
||||
|
||||
|
||||
def sh(cmd: str, *, capture: bool = True, check: bool = True) -> subprocess.CompletedProcess:
|
||||
return run(["sh", "-lc", cmd], capture=capture, check=check)
|
||||
|
||||
|
||||
def unique(prefix: str) -> str:
|
||||
return f"{prefix}-{uuid.uuid4().hex[:10]}"
|
||||
|
||||
|
||||
def require_docker() -> None:
|
||||
run(["docker", "version"], capture=True, check=True)
|
||||
|
||||
|
||||
def machine_hash() -> str:
|
||||
out = sh("sha256sum /etc/machine-id | awk '{print $1}'").stdout.strip()
|
||||
if len(out) < 16:
|
||||
raise RuntimeError("Could not determine machine hash from /etc/machine-id")
|
||||
return out
|
||||
|
||||
|
||||
def wait_for_log(container: str, pattern: str, timeout_s: int = 60) -> None:
|
||||
deadline = time.time() + timeout_s
|
||||
while time.time() < deadline:
|
||||
p = run(["docker", "logs", container], capture=True, check=False)
|
||||
if pattern in (p.stdout or ""):
|
||||
return
|
||||
time.sleep(1)
|
||||
raise TimeoutError(f"Timed out waiting for log pattern '{pattern}' in {container}")
|
||||
|
||||
|
||||
def wait_for_postgres(container: str, *, user: str = "postgres", timeout_s: int = 90) -> None:
|
||||
"""
|
||||
Docker-outside-of-Docker friendly readiness: check from inside the DB container.
|
||||
"""
|
||||
deadline = time.time() + timeout_s
|
||||
while time.time() < deadline:
|
||||
p = run(
|
||||
["docker", "exec", container, "sh", "-lc", f"pg_isready -U {user} -h localhost"],
|
||||
capture=True,
|
||||
check=False,
|
||||
)
|
||||
if p.returncode == 0:
|
||||
return
|
||||
time.sleep(1)
|
||||
raise TimeoutError(f"Timed out waiting for Postgres readiness in container {container}")
|
||||
|
||||
|
||||
def wait_for_mariadb(container: str, *, root_password: str, timeout_s: int = 90) -> None:
|
||||
"""
|
||||
Docker-outside-of-Docker friendly readiness: check from inside the DB container.
|
||||
"""
|
||||
deadline = time.time() + timeout_s
|
||||
while time.time() < deadline:
|
||||
# mariadb-admin is present in the official mariadb image
|
||||
p = run(
|
||||
["docker", "exec", container, "sh", "-lc", f"mariadb-admin -uroot -p{root_password} ping -h localhost"],
|
||||
capture=True,
|
||||
check=False,
|
||||
)
|
||||
if p.returncode == 0:
|
||||
return
|
||||
time.sleep(1)
|
||||
raise TimeoutError(f"Timed out waiting for MariaDB readiness in container {container}")
|
||||
|
||||
|
||||
def backup_run(
|
||||
*,
|
||||
backups_dir: str,
|
||||
repo_name: str,
|
||||
compose_dir: str,
|
||||
databases_csv: str,
|
||||
database_containers: list[str],
|
||||
images_no_stop_required: list[str],
|
||||
images_no_backup_required: list[str] | None = None,
|
||||
dump_only: bool = False,
|
||||
) -> None:
|
||||
cmd = [
|
||||
"baudolo",
|
||||
"--compose-dir", compose_dir,
|
||||
"--docker-compose-hard-restart-required", "mailu",
|
||||
"--repo-name", repo_name,
|
||||
"--databases-csv", databases_csv,
|
||||
"--backups-dir", backups_dir,
|
||||
"--database-containers", *database_containers,
|
||||
"--images-no-stop-required", *images_no_stop_required,
|
||||
]
|
||||
if images_no_backup_required:
|
||||
cmd += ["--images-no-backup-required", *images_no_backup_required]
|
||||
if dump_only:
|
||||
cmd += ["--dump-only"]
|
||||
|
||||
try:
|
||||
run(cmd, capture=True, check=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
# Print captured output so failing E2E tests are "live" / debuggable in CI logs
|
||||
print(">>> baudolo failed (exit code:", e.returncode, ")")
|
||||
if e.stdout:
|
||||
print(">>> baudolo STDOUT:\n" + e.stdout)
|
||||
if e.stderr:
|
||||
print(">>> baudolo STDERR:\n" + e.stderr)
|
||||
raise
|
||||
|
||||
|
||||
def latest_version_dir(backups_dir: str, repo_name: str) -> tuple[str, str]:
|
||||
"""
|
||||
Returns (hash, version) for the latest backup.
|
||||
"""
|
||||
h = machine_hash()
|
||||
root = Path(backups_dir) / h / repo_name
|
||||
if not root.is_dir():
|
||||
raise FileNotFoundError(str(root))
|
||||
|
||||
versions = sorted([p.name for p in root.iterdir() if p.is_dir()])
|
||||
if not versions:
|
||||
raise RuntimeError(f"No versions found under {root}")
|
||||
return h, versions[-1]
|
||||
|
||||
|
||||
def backup_path(backups_dir: str, repo_name: str, version: str, volume: str) -> Path:
|
||||
h = machine_hash()
|
||||
return Path(backups_dir) / h / repo_name / version / volume
|
||||
|
||||
|
||||
def create_minimal_compose_dir(base: str) -> str:
|
||||
"""
|
||||
baudolo requires --compose-dir. Create an empty dir with one non-compose subdir.
|
||||
"""
|
||||
p = Path(base) / "compose-root"
|
||||
p.mkdir(parents=True, exist_ok=True)
|
||||
(p / "noop").mkdir(parents=True, exist_ok=True)
|
||||
return str(p)
|
||||
|
||||
|
||||
def write_databases_csv(path: str, rows: list[tuple[str, str, str, str]]) -> None:
|
||||
"""
|
||||
rows: (instance, database, username, password)
|
||||
database may be '' (empty) to trigger pg_dumpall behavior if you want, but here we use db name.
|
||||
"""
|
||||
Path(path).parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(path, "w", encoding="utf-8") as f:
|
||||
f.write("instance;database;username;password\n")
|
||||
for inst, db, user, pw in rows:
|
||||
f.write(f"{inst};{db};{user};{pw}\n")
|
||||
|
||||
|
||||
def cleanup_docker(*, containers: list[str], volumes: list[str]) -> None:
|
||||
for c in containers:
|
||||
run(["docker", "rm", "-f", c], capture=True, check=False)
|
||||
for v in volumes:
|
||||
run(["docker", "volume", "rm", "-f", v], capture=True, check=False)
|
||||
|
||||
|
||||
def ensure_empty_dir(path: str) -> None:
|
||||
p = Path(path)
|
||||
if p.exists():
|
||||
shutil.rmtree(p)
|
||||
p.mkdir(parents=True, exist_ok=True)
|
||||
84
tests/e2e/test_e2e_files_full.py
Normal file
84
tests/e2e/test_e2e_files_full.py
Normal file
@@ -0,0 +1,84 @@
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
|
||||
from .helpers import (
|
||||
backup_run,
|
||||
backup_path,
|
||||
cleanup_docker,
|
||||
create_minimal_compose_dir,
|
||||
ensure_empty_dir,
|
||||
latest_version_dir,
|
||||
require_docker,
|
||||
unique,
|
||||
write_databases_csv,
|
||||
run,
|
||||
)
|
||||
|
||||
|
||||
class TestE2EFilesFull(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls) -> None:
|
||||
require_docker()
|
||||
cls.prefix = unique("baudolo-e2e-files-full")
|
||||
cls.backups_dir = f"/tmp/{cls.prefix}/Backups"
|
||||
ensure_empty_dir(cls.backups_dir)
|
||||
|
||||
cls.compose_dir = create_minimal_compose_dir(f"/tmp/{cls.prefix}")
|
||||
cls.repo_name = cls.prefix
|
||||
|
||||
cls.volume_src = f"{cls.prefix}-vol-src"
|
||||
cls.volume_dst = f"{cls.prefix}-vol-dst"
|
||||
cls.containers = []
|
||||
cls.volumes = [cls.volume_src, cls.volume_dst]
|
||||
|
||||
# create source volume with a file
|
||||
run(["docker", "volume", "create", cls.volume_src])
|
||||
run([
|
||||
"docker", "run", "--rm",
|
||||
"-v", f"{cls.volume_src}:/data",
|
||||
"alpine:3.20",
|
||||
"sh", "-lc", "mkdir -p /data && echo 'hello' > /data/hello.txt",
|
||||
])
|
||||
|
||||
# databases.csv (unused, but required by CLI)
|
||||
cls.databases_csv = f"/tmp/{cls.prefix}/databases.csv"
|
||||
write_databases_csv(cls.databases_csv, [])
|
||||
|
||||
# Run backup (files should be copied)
|
||||
backup_run(
|
||||
backups_dir=cls.backups_dir,
|
||||
repo_name=cls.repo_name,
|
||||
compose_dir=cls.compose_dir,
|
||||
databases_csv=cls.databases_csv,
|
||||
database_containers=["dummy-db"],
|
||||
images_no_stop_required=["alpine", "postgres", "mariadb", "mysql"],
|
||||
)
|
||||
|
||||
cls.hash, cls.version = latest_version_dir(cls.backups_dir, cls.repo_name)
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls) -> None:
|
||||
cleanup_docker(containers=cls.containers, volumes=cls.volumes)
|
||||
|
||||
def test_files_backup_exists(self) -> None:
|
||||
p = backup_path(self.backups_dir, self.repo_name, self.version, self.volume_src) / "files" / "hello.txt"
|
||||
self.assertTrue(p.is_file(), f"Expected backed up file at: {p}")
|
||||
|
||||
def test_restore_files_into_new_volume(self) -> None:
|
||||
# restore files into dst volume
|
||||
run([
|
||||
"baudolo-restore", "files",
|
||||
self.volume_dst, self.hash, self.version,
|
||||
"--backups-dir", self.backups_dir,
|
||||
"--repo-name", self.repo_name,
|
||||
"--rsync-image", "ghcr.io/kevinveenbirkenbach/alpine-rsync",
|
||||
])
|
||||
|
||||
# verify restored file exists in dst volume
|
||||
p = run([
|
||||
"docker", "run", "--rm",
|
||||
"-v", f"{self.volume_dst}:/data",
|
||||
"alpine:3.20",
|
||||
"sh", "-lc", "cat /data/hello.txt",
|
||||
])
|
||||
self.assertEqual((p.stdout or "").strip(), "hello")
|
||||
72
tests/e2e/test_e2e_files_no_copy.py
Normal file
72
tests/e2e/test_e2e_files_no_copy.py
Normal file
@@ -0,0 +1,72 @@
|
||||
import unittest
|
||||
|
||||
from .helpers import (
|
||||
backup_run,
|
||||
backup_path,
|
||||
cleanup_docker,
|
||||
create_minimal_compose_dir,
|
||||
ensure_empty_dir,
|
||||
latest_version_dir,
|
||||
require_docker,
|
||||
unique,
|
||||
write_databases_csv,
|
||||
run,
|
||||
)
|
||||
|
||||
|
||||
class TestE2EFilesNoCopy(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls) -> None:
|
||||
require_docker()
|
||||
cls.prefix = unique("baudolo-e2e-files-nocopy")
|
||||
cls.backups_dir = f"/tmp/{cls.prefix}/Backups"
|
||||
ensure_empty_dir(cls.backups_dir)
|
||||
|
||||
cls.compose_dir = create_minimal_compose_dir(f"/tmp/{cls.prefix}")
|
||||
cls.repo_name = cls.prefix
|
||||
|
||||
cls.volume_src = f"{cls.prefix}-vol-src"
|
||||
cls.volume_dst = f"{cls.prefix}-vol-dst"
|
||||
cls.containers = []
|
||||
cls.volumes = [cls.volume_src, cls.volume_dst]
|
||||
|
||||
run(["docker", "volume", "create", cls.volume_src])
|
||||
run([
|
||||
"docker", "run", "--rm",
|
||||
"-v", f"{cls.volume_src}:/data",
|
||||
"alpine:3.20",
|
||||
"sh", "-lc", "echo 'hello' > /data/hello.txt",
|
||||
])
|
||||
|
||||
cls.databases_csv = f"/tmp/{cls.prefix}/databases.csv"
|
||||
write_databases_csv(cls.databases_csv, [])
|
||||
|
||||
# dump-only => NO file rsync backups
|
||||
backup_run(
|
||||
backups_dir=cls.backups_dir,
|
||||
repo_name=cls.repo_name,
|
||||
compose_dir=cls.compose_dir,
|
||||
databases_csv=cls.databases_csv,
|
||||
database_containers=["dummy-db"],
|
||||
images_no_stop_required=["alpine", "postgres", "mariadb", "mysql"],
|
||||
dump_only=True,
|
||||
)
|
||||
|
||||
cls.hash, cls.version = latest_version_dir(cls.backups_dir, cls.repo_name)
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls) -> None:
|
||||
cleanup_docker(containers=cls.containers, volumes=cls.volumes)
|
||||
|
||||
def test_files_backup_not_present(self) -> None:
|
||||
p = backup_path(self.backups_dir, self.repo_name, self.version, self.volume_src) / "files"
|
||||
self.assertFalse(p.exists(), f"Did not expect files backup dir at: {p}")
|
||||
|
||||
def test_restore_files_fails_expected(self) -> None:
|
||||
p = run([
|
||||
"baudolo-restore", "files",
|
||||
self.volume_dst, self.hash, self.version,
|
||||
"--backups-dir", self.backups_dir,
|
||||
"--repo-name", self.repo_name,
|
||||
], check=False)
|
||||
self.assertEqual(p.returncode, 2, f"Expected exitcode 2, got {p.returncode}\nSTDOUT={p.stdout}\nSTDERR={p.stderr}")
|
||||
105
tests/e2e/test_e2e_mariadb_full.py
Normal file
105
tests/e2e/test_e2e_mariadb_full.py
Normal file
@@ -0,0 +1,105 @@
|
||||
# tests/e2e/test_e2e_mariadb_full.py
|
||||
import unittest
|
||||
|
||||
from .helpers import (
|
||||
backup_run,
|
||||
backup_path,
|
||||
cleanup_docker,
|
||||
create_minimal_compose_dir,
|
||||
ensure_empty_dir,
|
||||
latest_version_dir,
|
||||
require_docker,
|
||||
unique,
|
||||
write_databases_csv,
|
||||
run,
|
||||
wait_for_mariadb,
|
||||
)
|
||||
|
||||
|
||||
class TestE2EMariaDBFull(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls) -> None:
|
||||
require_docker()
|
||||
cls.prefix = unique("baudolo-e2e-mariadb-full")
|
||||
cls.backups_dir = f"/tmp/{cls.prefix}/Backups"
|
||||
ensure_empty_dir(cls.backups_dir)
|
||||
cls.compose_dir = create_minimal_compose_dir(f"/tmp/{cls.prefix}")
|
||||
cls.repo_name = cls.prefix
|
||||
|
||||
cls.db_container = f"{cls.prefix}-mariadb"
|
||||
cls.db_volume = f"{cls.prefix}-mariadb-vol"
|
||||
cls.containers = [cls.db_container]
|
||||
cls.volumes = [cls.db_volume]
|
||||
|
||||
run(["docker", "volume", "create", cls.db_volume])
|
||||
|
||||
# Start MariaDB (no host port publishing needed; we will exec into the container)
|
||||
run([
|
||||
"docker", "run", "-d",
|
||||
"--name", cls.db_container,
|
||||
"-e", "MARIADB_ROOT_PASSWORD=rootpw",
|
||||
"-v", f"{cls.db_volume}:/var/lib/mysql",
|
||||
"mariadb:11",
|
||||
])
|
||||
wait_for_mariadb(cls.db_container, root_password="rootpw", timeout_s=90)
|
||||
|
||||
# Create DB + data
|
||||
run([
|
||||
"docker", "exec", cls.db_container,
|
||||
"sh", "-lc",
|
||||
"mariadb -uroot -prootpw -e \"CREATE DATABASE appdb; "
|
||||
"CREATE TABLE appdb.t (id INT PRIMARY KEY, v VARCHAR(50)); "
|
||||
"INSERT INTO appdb.t VALUES (1,'ok');\"",
|
||||
])
|
||||
|
||||
cls.databases_csv = f"/tmp/{cls.prefix}/databases.csv"
|
||||
instance = cls.db_container
|
||||
write_databases_csv(cls.databases_csv, [(instance, "appdb", "root", "rootpw")])
|
||||
|
||||
# Backup with file+dump
|
||||
backup_run(
|
||||
backups_dir=cls.backups_dir,
|
||||
repo_name=cls.repo_name,
|
||||
compose_dir=cls.compose_dir,
|
||||
databases_csv=cls.databases_csv,
|
||||
database_containers=[cls.db_container],
|
||||
images_no_stop_required=["mariadb", "mysql", "alpine", "postgres"],
|
||||
)
|
||||
|
||||
cls.hash, cls.version = latest_version_dir(cls.backups_dir, cls.repo_name)
|
||||
|
||||
# Wipe DB
|
||||
run([
|
||||
"docker", "exec", cls.db_container,
|
||||
"sh", "-lc",
|
||||
"mariadb -uroot -prootpw -e \"DROP DATABASE appdb;\"",
|
||||
])
|
||||
|
||||
# Restore DB
|
||||
run([
|
||||
"baudolo-restore", "mariadb",
|
||||
cls.db_volume, cls.hash, cls.version,
|
||||
"--backups-dir", cls.backups_dir,
|
||||
"--repo-name", cls.repo_name,
|
||||
"--container", cls.db_container,
|
||||
"--db-name", "appdb",
|
||||
"--db-user", "root",
|
||||
"--db-password", "rootpw",
|
||||
"--empty",
|
||||
])
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls) -> None:
|
||||
cleanup_docker(containers=cls.containers, volumes=cls.volumes)
|
||||
|
||||
def test_dump_file_exists(self) -> None:
|
||||
p = backup_path(self.backups_dir, self.repo_name, self.version, self.db_volume) / "sql" / "appdb.backup.sql"
|
||||
self.assertTrue(p.is_file(), f"Expected dump file at: {p}")
|
||||
|
||||
def test_data_restored(self) -> None:
|
||||
p = run([
|
||||
"docker", "exec", self.db_container,
|
||||
"sh", "-lc",
|
||||
"mariadb -uroot -prootpw -N -e \"SELECT v FROM appdb.t WHERE id=1;\"",
|
||||
])
|
||||
self.assertEqual((p.stdout or "").strip(), "ok")
|
||||
102
tests/e2e/test_e2e_mariadb_no_copy.py
Normal file
102
tests/e2e/test_e2e_mariadb_no_copy.py
Normal file
@@ -0,0 +1,102 @@
|
||||
# tests/e2e/test_e2e_mariadb_no_copy.py
|
||||
import unittest
|
||||
|
||||
from .helpers import (
|
||||
backup_run,
|
||||
backup_path,
|
||||
cleanup_docker,
|
||||
create_minimal_compose_dir,
|
||||
ensure_empty_dir,
|
||||
latest_version_dir,
|
||||
require_docker,
|
||||
unique,
|
||||
write_databases_csv,
|
||||
run,
|
||||
wait_for_mariadb,
|
||||
)
|
||||
|
||||
|
||||
class TestE2EMariaDBNoCopy(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls) -> None:
|
||||
require_docker()
|
||||
cls.prefix = unique("baudolo-e2e-mariadb-nocopy")
|
||||
cls.backups_dir = f"/tmp/{cls.prefix}/Backups"
|
||||
ensure_empty_dir(cls.backups_dir)
|
||||
cls.compose_dir = create_minimal_compose_dir(f"/tmp/{cls.prefix}")
|
||||
cls.repo_name = cls.prefix
|
||||
|
||||
cls.db_container = f"{cls.prefix}-mariadb"
|
||||
cls.db_volume = f"{cls.prefix}-mariadb-vol"
|
||||
cls.containers = [cls.db_container]
|
||||
cls.volumes = [cls.db_volume]
|
||||
|
||||
run(["docker", "volume", "create", cls.db_volume])
|
||||
run([
|
||||
"docker", "run", "-d",
|
||||
"--name", cls.db_container,
|
||||
"-e", "MARIADB_ROOT_PASSWORD=rootpw",
|
||||
"-v", f"{cls.db_volume}:/var/lib/mysql",
|
||||
"mariadb:11",
|
||||
])
|
||||
wait_for_mariadb(cls.db_container, root_password="rootpw", timeout_s=90)
|
||||
|
||||
run([
|
||||
"docker", "exec", cls.db_container,
|
||||
"sh", "-lc",
|
||||
"mariadb -uroot -prootpw -e \"CREATE DATABASE appdb; "
|
||||
"CREATE TABLE appdb.t (id INT PRIMARY KEY, v VARCHAR(50)); "
|
||||
"INSERT INTO appdb.t VALUES (1,'ok');\"",
|
||||
])
|
||||
|
||||
cls.databases_csv = f"/tmp/{cls.prefix}/databases.csv"
|
||||
write_databases_csv(cls.databases_csv, [(cls.db_container, "appdb", "root", "rootpw")])
|
||||
|
||||
# dump-only => no files
|
||||
backup_run(
|
||||
backups_dir=cls.backups_dir,
|
||||
repo_name=cls.repo_name,
|
||||
compose_dir=cls.compose_dir,
|
||||
databases_csv=cls.databases_csv,
|
||||
database_containers=[cls.db_container],
|
||||
images_no_stop_required=["mariadb", "mysql", "alpine", "postgres"],
|
||||
dump_only=True,
|
||||
)
|
||||
|
||||
cls.hash, cls.version = latest_version_dir(cls.backups_dir, cls.repo_name)
|
||||
|
||||
# Wipe DB
|
||||
run([
|
||||
"docker", "exec", cls.db_container,
|
||||
"sh", "-lc",
|
||||
"mariadb -uroot -prootpw -e \"DROP DATABASE appdb;\"",
|
||||
])
|
||||
|
||||
# Restore DB
|
||||
run([
|
||||
"baudolo-restore", "mariadb",
|
||||
cls.db_volume, cls.hash, cls.version,
|
||||
"--backups-dir", cls.backups_dir,
|
||||
"--repo-name", cls.repo_name,
|
||||
"--container", cls.db_container,
|
||||
"--db-name", "appdb",
|
||||
"--db-user", "root",
|
||||
"--db-password", "rootpw",
|
||||
"--empty",
|
||||
])
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls) -> None:
|
||||
cleanup_docker(containers=cls.containers, volumes=cls.volumes)
|
||||
|
||||
def test_files_backup_not_present(self) -> None:
|
||||
p = backup_path(self.backups_dir, self.repo_name, self.version, self.db_volume) / "files"
|
||||
self.assertFalse(p.exists(), f"Did not expect files backup dir at: {p}")
|
||||
|
||||
def test_data_restored(self) -> None:
|
||||
p = run([
|
||||
"docker", "exec", self.db_container,
|
||||
"sh", "-lc",
|
||||
"mariadb -uroot -prootpw -N -e \"SELECT v FROM appdb.t WHERE id=1;\"",
|
||||
])
|
||||
self.assertEqual((p.stdout or "").strip(), "ok")
|
||||
102
tests/e2e/test_e2e_postgres_full.py
Normal file
102
tests/e2e/test_e2e_postgres_full.py
Normal file
@@ -0,0 +1,102 @@
|
||||
# tests/e2e/test_e2e_postgres_full.py
|
||||
import unittest
|
||||
|
||||
from .helpers import (
|
||||
backup_run,
|
||||
backup_path,
|
||||
cleanup_docker,
|
||||
create_minimal_compose_dir,
|
||||
ensure_empty_dir,
|
||||
latest_version_dir,
|
||||
require_docker,
|
||||
unique,
|
||||
write_databases_csv,
|
||||
run,
|
||||
wait_for_postgres,
|
||||
)
|
||||
|
||||
|
||||
class TestE2EPostgresFull(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls) -> None:
|
||||
require_docker()
|
||||
cls.prefix = unique("baudolo-e2e-postgres-full")
|
||||
cls.backups_dir = f"/tmp/{cls.prefix}/Backups"
|
||||
ensure_empty_dir(cls.backups_dir)
|
||||
cls.compose_dir = create_minimal_compose_dir(f"/tmp/{cls.prefix}")
|
||||
cls.repo_name = cls.prefix
|
||||
|
||||
cls.pg_container = f"{cls.prefix}-pg"
|
||||
cls.pg_volume = f"{cls.prefix}-pg-vol"
|
||||
cls.containers = [cls.pg_container]
|
||||
cls.volumes = [cls.pg_volume]
|
||||
|
||||
run(["docker", "volume", "create", cls.pg_volume])
|
||||
|
||||
run([
|
||||
"docker", "run", "-d",
|
||||
"--name", cls.pg_container,
|
||||
"-e", "POSTGRES_PASSWORD=pgpw",
|
||||
"-e", "POSTGRES_DB=appdb",
|
||||
"-e", "POSTGRES_USER=postgres",
|
||||
"-v", f"{cls.pg_volume}:/var/lib/postgresql/data",
|
||||
"postgres:16",
|
||||
])
|
||||
wait_for_postgres(cls.pg_container, user="postgres", timeout_s=90)
|
||||
|
||||
# Create a table + data
|
||||
run([
|
||||
"docker", "exec", cls.pg_container,
|
||||
"sh", "-lc",
|
||||
"psql -U postgres -d appdb -c \"CREATE TABLE t (id int primary key, v text); INSERT INTO t VALUES (1,'ok');\"",
|
||||
])
|
||||
|
||||
cls.databases_csv = f"/tmp/{cls.prefix}/databases.csv"
|
||||
write_databases_csv(cls.databases_csv, [(cls.pg_container, "appdb", "postgres", "pgpw")])
|
||||
|
||||
backup_run(
|
||||
backups_dir=cls.backups_dir,
|
||||
repo_name=cls.repo_name,
|
||||
compose_dir=cls.compose_dir,
|
||||
databases_csv=cls.databases_csv,
|
||||
database_containers=[cls.pg_container],
|
||||
images_no_stop_required=["postgres", "mariadb", "mysql", "alpine"],
|
||||
)
|
||||
|
||||
cls.hash, cls.version = latest_version_dir(cls.backups_dir, cls.repo_name)
|
||||
|
||||
# Wipe schema
|
||||
run([
|
||||
"docker", "exec", cls.pg_container,
|
||||
"sh", "-lc",
|
||||
"psql -U postgres -d appdb -c \"DROP TABLE t;\"",
|
||||
])
|
||||
|
||||
# Restore
|
||||
run([
|
||||
"baudolo-restore", "postgres",
|
||||
cls.pg_volume, cls.hash, cls.version,
|
||||
"--backups-dir", cls.backups_dir,
|
||||
"--repo-name", cls.repo_name,
|
||||
"--container", cls.pg_container,
|
||||
"--db-name", "appdb",
|
||||
"--db-user", "postgres",
|
||||
"--db-password", "pgpw",
|
||||
"--empty",
|
||||
])
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls) -> None:
|
||||
cleanup_docker(containers=cls.containers, volumes=cls.volumes)
|
||||
|
||||
def test_dump_file_exists(self) -> None:
|
||||
p = backup_path(self.backups_dir, self.repo_name, self.version, self.pg_volume) / "sql" / "appdb.backup.sql"
|
||||
self.assertTrue(p.is_file(), f"Expected dump file at: {p}")
|
||||
|
||||
def test_data_restored(self) -> None:
|
||||
p = run([
|
||||
"docker", "exec", self.pg_container,
|
||||
"sh", "-lc",
|
||||
"psql -U postgres -d appdb -t -c \"SELECT v FROM t WHERE id=1;\"",
|
||||
])
|
||||
self.assertEqual((p.stdout or "").strip(), "ok")
|
||||
99
tests/e2e/test_e2e_postgres_no_copy.py
Normal file
99
tests/e2e/test_e2e_postgres_no_copy.py
Normal file
@@ -0,0 +1,99 @@
|
||||
# tests/e2e/test_e2e_postgres_no_copy.py
|
||||
import unittest
|
||||
|
||||
from .helpers import (
|
||||
backup_run,
|
||||
backup_path,
|
||||
cleanup_docker,
|
||||
create_minimal_compose_dir,
|
||||
ensure_empty_dir,
|
||||
latest_version_dir,
|
||||
require_docker,
|
||||
unique,
|
||||
write_databases_csv,
|
||||
run,
|
||||
wait_for_postgres,
|
||||
)
|
||||
|
||||
|
||||
class TestE2EPostgresNoCopy(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls) -> None:
|
||||
require_docker()
|
||||
cls.prefix = unique("baudolo-e2e-postgres-nocopy")
|
||||
cls.backups_dir = f"/tmp/{cls.prefix}/Backups"
|
||||
ensure_empty_dir(cls.backups_dir)
|
||||
cls.compose_dir = create_minimal_compose_dir(f"/tmp/{cls.prefix}")
|
||||
cls.repo_name = cls.prefix
|
||||
|
||||
cls.pg_container = f"{cls.prefix}-pg"
|
||||
cls.pg_volume = f"{cls.prefix}-pg-vol"
|
||||
cls.containers = [cls.pg_container]
|
||||
cls.volumes = [cls.pg_volume]
|
||||
|
||||
run(["docker", "volume", "create", cls.pg_volume])
|
||||
run([
|
||||
"docker", "run", "-d",
|
||||
"--name", cls.pg_container,
|
||||
"-e", "POSTGRES_PASSWORD=pgpw",
|
||||
"-e", "POSTGRES_DB=appdb",
|
||||
"-e", "POSTGRES_USER=postgres",
|
||||
"-v", f"{cls.pg_volume}:/var/lib/postgresql/data",
|
||||
"postgres:16",
|
||||
])
|
||||
wait_for_postgres(cls.pg_container, user="postgres", timeout_s=90)
|
||||
|
||||
run([
|
||||
"docker", "exec", cls.pg_container,
|
||||
"sh", "-lc",
|
||||
"psql -U postgres -d appdb -c \"CREATE TABLE t (id int primary key, v text); INSERT INTO t VALUES (1,'ok');\"",
|
||||
])
|
||||
|
||||
cls.databases_csv = f"/tmp/{cls.prefix}/databases.csv"
|
||||
write_databases_csv(cls.databases_csv, [(cls.pg_container, "appdb", "postgres", "pgpw")])
|
||||
|
||||
backup_run(
|
||||
backups_dir=cls.backups_dir,
|
||||
repo_name=cls.repo_name,
|
||||
compose_dir=cls.compose_dir,
|
||||
databases_csv=cls.databases_csv,
|
||||
database_containers=[cls.pg_container],
|
||||
images_no_stop_required=["postgres", "mariadb", "mysql", "alpine"],
|
||||
dump_only=True,
|
||||
)
|
||||
|
||||
cls.hash, cls.version = latest_version_dir(cls.backups_dir, cls.repo_name)
|
||||
|
||||
run([
|
||||
"docker", "exec", cls.pg_container,
|
||||
"sh", "-lc",
|
||||
"psql -U postgres -d appdb -c \"DROP TABLE t;\"",
|
||||
])
|
||||
|
||||
run([
|
||||
"baudolo-restore", "postgres",
|
||||
cls.pg_volume, cls.hash, cls.version,
|
||||
"--backups-dir", cls.backups_dir,
|
||||
"--repo-name", cls.repo_name,
|
||||
"--container", cls.pg_container,
|
||||
"--db-name", "appdb",
|
||||
"--db-user", "postgres",
|
||||
"--db-password", "pgpw",
|
||||
"--empty",
|
||||
])
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls) -> None:
|
||||
cleanup_docker(containers=cls.containers, volumes=cls.volumes)
|
||||
|
||||
def test_files_backup_not_present(self) -> None:
|
||||
p = backup_path(self.backups_dir, self.repo_name, self.version, self.pg_volume) / "files"
|
||||
self.assertFalse(p.exists(), f"Did not expect files backup dir at: {p}")
|
||||
|
||||
def test_data_restored(self) -> None:
|
||||
p = run([
|
||||
"docker", "exec", self.pg_container,
|
||||
"sh", "-lc",
|
||||
"psql -U postgres -d appdb -t -c \"SELECT v FROM t WHERE id=1;\"",
|
||||
])
|
||||
self.assertEqual((p.stdout or "").strip(), "ok")
|
||||
@@ -1,64 +1,36 @@
|
||||
# tests/unit/test_backup.py
|
||||
|
||||
import unittest
|
||||
from unittest.mock import patch
|
||||
import importlib.util
|
||||
import sys
|
||||
import os
|
||||
import pathlib
|
||||
|
||||
# Prevent actual directory creation in backup script import
|
||||
dummy_mkdir = lambda self, *args, **kwargs: None
|
||||
original_mkdir = pathlib.Path.mkdir
|
||||
pathlib.Path.mkdir = dummy_mkdir
|
||||
from baudolo.backup.app import requires_stop
|
||||
|
||||
# Create a virtual databases.csv in the project root for the module import
|
||||
test_dir = os.path.dirname(__file__)
|
||||
project_root = os.path.abspath(os.path.join(test_dir, '../../'))
|
||||
sys.path.insert(0, project_root)
|
||||
db_csv_path = os.path.join(project_root, 'databases.csv')
|
||||
with open(db_csv_path, 'w') as f:
|
||||
f.write('instance;database;username;password\n')
|
||||
|
||||
# Dynamically load the hyphenated script as module 'backup'
|
||||
script_path = os.path.join(project_root, 'backup-docker-to-local.py')
|
||||
spec = importlib.util.spec_from_file_location('backup', script_path)
|
||||
backup = importlib.util.module_from_spec(spec)
|
||||
sys.modules['backup'] = backup
|
||||
spec.loader.exec_module(backup)
|
||||
class TestRequiresStop(unittest.TestCase):
|
||||
@patch("baudolo.backup.app.get_image_info")
|
||||
def test_requires_stop_false_when_all_images_are_whitelisted(self, mock_get_image_info):
|
||||
# All containers use images containing allowed substrings
|
||||
mock_get_image_info.side_effect = [
|
||||
"repo/mastodon:v4",
|
||||
"repo/wordpress:latest",
|
||||
]
|
||||
containers = ["c1", "c2"]
|
||||
whitelist = ["mastodon", "wordpress"]
|
||||
self.assertFalse(requires_stop(containers, whitelist))
|
||||
|
||||
# Restore original mkdir
|
||||
pathlib.Path.mkdir = original_mkdir
|
||||
@patch("baudolo.backup.app.get_image_info")
|
||||
def test_requires_stop_true_when_any_image_is_not_whitelisted(self, mock_get_image_info):
|
||||
mock_get_image_info.side_effect = [
|
||||
"repo/mastodon:v4",
|
||||
"repo/nginx:latest",
|
||||
]
|
||||
containers = ["c1", "c2"]
|
||||
whitelist = ["mastodon", "wordpress"]
|
||||
self.assertTrue(requires_stop(containers, whitelist))
|
||||
|
||||
class TestIsImageWhitelisted(unittest.TestCase):
|
||||
@patch('backup.get_image_info')
|
||||
def test_returns_true_when_image_matches(self, mock_get_image_info):
|
||||
# Simulate a container image containing 'mastodon'
|
||||
mock_get_image_info.return_value = ['repo/mastodon:v4']
|
||||
images = ['mastodon', 'wordpress']
|
||||
self.assertTrue(
|
||||
backup.is_image_whitelisted('any_container', images),
|
||||
"Should return True when at least one image substring matches"
|
||||
)
|
||||
@patch("baudolo.backup.app.get_image_info")
|
||||
def test_requires_stop_true_when_whitelist_empty(self, mock_get_image_info):
|
||||
mock_get_image_info.return_value = "repo/anything:latest"
|
||||
self.assertTrue(requires_stop(["c1"], []))
|
||||
|
||||
@patch('backup.get_image_info')
|
||||
def test_returns_false_when_no_image_matches(self, mock_get_image_info):
|
||||
# Simulate a container image without matching substrings
|
||||
mock_get_image_info.return_value = ['repo/nginx:latest']
|
||||
images = ['mastodon', 'wordpress']
|
||||
self.assertFalse(
|
||||
backup.is_image_whitelisted('any_container', images),
|
||||
"Should return False when no image substring matches"
|
||||
)
|
||||
|
||||
@patch('backup.get_image_info')
|
||||
def test_returns_false_with_empty_image_list(self, mock_get_image_info):
|
||||
# Even if get_image_info returns something, an empty list yields False
|
||||
mock_get_image_info.return_value = ['repo/element:1.0']
|
||||
self.assertFalse(
|
||||
backup.is_image_whitelisted('any_container', []),
|
||||
"Should return False when the images list is empty"
|
||||
)
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
||||
Reference in New Issue
Block a user