mirror of
https://github.com/kevinveenbirkenbach/docker-volume-backup.git
synced 2025-12-29 03:42:08 +00:00
fix(backup): log missing db config instead of raising
- Use module logger in backup/db.py - Skip db dump when no databases.csv entry is present - Apply black/formatting cleanup across backup/restore/tests https://chatgpt.com/share/69519d45-b0dc-800f-acb6-6fb8504e9b46
This commit is contained in:
@@ -34,7 +34,9 @@ def run(
|
||||
raise
|
||||
|
||||
|
||||
def sh(cmd: str, *, capture: bool = True, check: bool = True) -> subprocess.CompletedProcess:
|
||||
def sh(
|
||||
cmd: str, *, capture: bool = True, check: bool = True
|
||||
) -> subprocess.CompletedProcess:
|
||||
return run(["sh", "-lc", cmd], capture=capture, check=check)
|
||||
|
||||
|
||||
@@ -63,24 +65,37 @@ def wait_for_log(container: str, pattern: str, timeout_s: int = 60) -> None:
|
||||
raise TimeoutError(f"Timed out waiting for log pattern '{pattern}' in {container}")
|
||||
|
||||
|
||||
def wait_for_postgres(container: str, *, user: str = "postgres", timeout_s: int = 90) -> None:
|
||||
def wait_for_postgres(
|
||||
container: str, *, user: str = "postgres", timeout_s: int = 90
|
||||
) -> None:
|
||||
"""
|
||||
Docker-outside-of-Docker friendly readiness: check from inside the DB container.
|
||||
"""
|
||||
deadline = time.time() + timeout_s
|
||||
while time.time() < deadline:
|
||||
p = run(
|
||||
["docker", "exec", container, "sh", "-lc", f"pg_isready -U {user} -h localhost"],
|
||||
[
|
||||
"docker",
|
||||
"exec",
|
||||
container,
|
||||
"sh",
|
||||
"-lc",
|
||||
f"pg_isready -U {user} -h localhost",
|
||||
],
|
||||
capture=True,
|
||||
check=False,
|
||||
)
|
||||
if p.returncode == 0:
|
||||
return
|
||||
time.sleep(1)
|
||||
raise TimeoutError(f"Timed out waiting for Postgres readiness in container {container}")
|
||||
raise TimeoutError(
|
||||
f"Timed out waiting for Postgres readiness in container {container}"
|
||||
)
|
||||
|
||||
|
||||
def wait_for_mariadb(container: str, *, root_password: str, timeout_s: int = 90) -> None:
|
||||
def wait_for_mariadb(
|
||||
container: str, *, root_password: str, timeout_s: int = 90
|
||||
) -> None:
|
||||
"""
|
||||
Liveness probe for MariaDB.
|
||||
|
||||
@@ -92,17 +107,28 @@ def wait_for_mariadb(container: str, *, root_password: str, timeout_s: int = 90)
|
||||
deadline = time.time() + timeout_s
|
||||
while time.time() < deadline:
|
||||
p = run(
|
||||
["docker", "exec", container, "sh", "-lc", "mariadb -uroot --protocol=socket -e \"SELECT 1;\""],
|
||||
[
|
||||
"docker",
|
||||
"exec",
|
||||
container,
|
||||
"sh",
|
||||
"-lc",
|
||||
'mariadb -uroot --protocol=socket -e "SELECT 1;"',
|
||||
],
|
||||
capture=True,
|
||||
check=False,
|
||||
)
|
||||
if p.returncode == 0:
|
||||
return
|
||||
time.sleep(1)
|
||||
raise TimeoutError(f"Timed out waiting for MariaDB readiness in container {container}")
|
||||
raise TimeoutError(
|
||||
f"Timed out waiting for MariaDB readiness in container {container}"
|
||||
)
|
||||
|
||||
|
||||
def wait_for_mariadb_sql(container: str, *, user: str, password: str, timeout_s: int = 90) -> None:
|
||||
def wait_for_mariadb_sql(
|
||||
container: str, *, user: str, password: str, timeout_s: int = 90
|
||||
) -> None:
|
||||
"""
|
||||
SQL login readiness for the *dedicated test user* over TCP.
|
||||
|
||||
@@ -118,7 +144,7 @@ def wait_for_mariadb_sql(container: str, *, user: str, password: str, timeout_s:
|
||||
container,
|
||||
"sh",
|
||||
"-lc",
|
||||
f"mariadb -h 127.0.0.1 -u{user} -p{password} -e \"SELECT 1;\"",
|
||||
f'mariadb -h 127.0.0.1 -u{user} -p{password} -e "SELECT 1;"',
|
||||
],
|
||||
capture=True,
|
||||
check=False,
|
||||
@@ -126,7 +152,9 @@ def wait_for_mariadb_sql(container: str, *, user: str, password: str, timeout_s:
|
||||
if p.returncode == 0:
|
||||
return
|
||||
time.sleep(1)
|
||||
raise TimeoutError(f"Timed out waiting for MariaDB SQL login readiness in container {container}")
|
||||
raise TimeoutError(
|
||||
f"Timed out waiting for MariaDB SQL login readiness in container {container}"
|
||||
)
|
||||
|
||||
|
||||
def backup_run(
|
||||
@@ -142,13 +170,20 @@ def backup_run(
|
||||
) -> None:
|
||||
cmd = [
|
||||
"baudolo",
|
||||
"--compose-dir", compose_dir,
|
||||
"--docker-compose-hard-restart-required", "mailu",
|
||||
"--repo-name", repo_name,
|
||||
"--databases-csv", databases_csv,
|
||||
"--backups-dir", backups_dir,
|
||||
"--database-containers", *database_containers,
|
||||
"--images-no-stop-required", *images_no_stop_required,
|
||||
"--compose-dir",
|
||||
compose_dir,
|
||||
"--docker-compose-hard-restart-required",
|
||||
"mailu",
|
||||
"--repo-name",
|
||||
repo_name,
|
||||
"--databases-csv",
|
||||
databases_csv,
|
||||
"--backups-dir",
|
||||
backups_dir,
|
||||
"--database-containers",
|
||||
*database_containers,
|
||||
"--images-no-stop-required",
|
||||
*images_no_stop_required,
|
||||
]
|
||||
if images_no_backup_required:
|
||||
cmd += ["--images-no-backup-required", *images_no_backup_required]
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
|
||||
from .helpers import (
|
||||
backup_run,
|
||||
@@ -33,12 +32,19 @@ class TestE2EFilesFull(unittest.TestCase):
|
||||
|
||||
# create source volume with a file
|
||||
run(["docker", "volume", "create", cls.volume_src])
|
||||
run([
|
||||
"docker", "run", "--rm",
|
||||
"-v", f"{cls.volume_src}:/data",
|
||||
"alpine:3.20",
|
||||
"sh", "-lc", "mkdir -p /data && echo 'hello' > /data/hello.txt",
|
||||
])
|
||||
run(
|
||||
[
|
||||
"docker",
|
||||
"run",
|
||||
"--rm",
|
||||
"-v",
|
||||
f"{cls.volume_src}:/data",
|
||||
"alpine:3.20",
|
||||
"sh",
|
||||
"-lc",
|
||||
"mkdir -p /data && echo 'hello' > /data/hello.txt",
|
||||
]
|
||||
)
|
||||
|
||||
# databases.csv (unused, but required by CLI)
|
||||
cls.databases_csv = f"/tmp/{cls.prefix}/databases.csv"
|
||||
@@ -75,20 +81,36 @@ class TestE2EFilesFull(unittest.TestCase):
|
||||
|
||||
def test_restore_files_into_new_volume(self) -> None:
|
||||
# restore files from volume_src backup into volume_dst
|
||||
run([
|
||||
"baudolo-restore", "files",
|
||||
self.volume_dst, self.hash, self.version,
|
||||
"--backups-dir", self.backups_dir,
|
||||
"--repo-name", self.repo_name,
|
||||
"--source-volume", self.volume_src,
|
||||
"--rsync-image", "ghcr.io/kevinveenbirkenbach/alpine-rsync",
|
||||
])
|
||||
run(
|
||||
[
|
||||
"baudolo-restore",
|
||||
"files",
|
||||
self.volume_dst,
|
||||
self.hash,
|
||||
self.version,
|
||||
"--backups-dir",
|
||||
self.backups_dir,
|
||||
"--repo-name",
|
||||
self.repo_name,
|
||||
"--source-volume",
|
||||
self.volume_src,
|
||||
"--rsync-image",
|
||||
"ghcr.io/kevinveenbirkenbach/alpine-rsync",
|
||||
]
|
||||
)
|
||||
|
||||
# verify restored file exists in dst volume
|
||||
p = run([
|
||||
"docker", "run", "--rm",
|
||||
"-v", f"{self.volume_dst}:/data",
|
||||
"alpine:3.20",
|
||||
"sh", "-lc", "cat /data/hello.txt",
|
||||
])
|
||||
p = run(
|
||||
[
|
||||
"docker",
|
||||
"run",
|
||||
"--rm",
|
||||
"-v",
|
||||
f"{self.volume_dst}:/data",
|
||||
"alpine:3.20",
|
||||
"sh",
|
||||
"-lc",
|
||||
"cat /data/hello.txt",
|
||||
]
|
||||
)
|
||||
self.assertEqual((p.stdout or "").strip(), "hello")
|
||||
|
||||
@@ -31,12 +31,19 @@ class TestE2EFilesNoCopy(unittest.TestCase):
|
||||
cls.volumes = [cls.volume_src, cls.volume_dst]
|
||||
|
||||
run(["docker", "volume", "create", cls.volume_src])
|
||||
run([
|
||||
"docker", "run", "--rm",
|
||||
"-v", f"{cls.volume_src}:/data",
|
||||
"alpine:3.20",
|
||||
"sh", "-lc", "echo 'hello' > /data/hello.txt",
|
||||
])
|
||||
run(
|
||||
[
|
||||
"docker",
|
||||
"run",
|
||||
"--rm",
|
||||
"-v",
|
||||
f"{cls.volume_src}:/data",
|
||||
"alpine:3.20",
|
||||
"sh",
|
||||
"-lc",
|
||||
"echo 'hello' > /data/hello.txt",
|
||||
]
|
||||
)
|
||||
|
||||
cls.databases_csv = f"/tmp/{cls.prefix}/databases.csv"
|
||||
write_databases_csv(cls.databases_csv, [])
|
||||
@@ -59,14 +66,29 @@ class TestE2EFilesNoCopy(unittest.TestCase):
|
||||
cleanup_docker(containers=cls.containers, volumes=cls.volumes)
|
||||
|
||||
def test_files_backup_not_present(self) -> None:
|
||||
p = backup_path(self.backups_dir, self.repo_name, self.version, self.volume_src) / "files"
|
||||
p = (
|
||||
backup_path(self.backups_dir, self.repo_name, self.version, self.volume_src)
|
||||
/ "files"
|
||||
)
|
||||
self.assertFalse(p.exists(), f"Did not expect files backup dir at: {p}")
|
||||
|
||||
def test_restore_files_fails_expected(self) -> None:
|
||||
p = run([
|
||||
"baudolo-restore", "files",
|
||||
self.volume_dst, self.hash, self.version,
|
||||
"--backups-dir", self.backups_dir,
|
||||
"--repo-name", self.repo_name,
|
||||
], check=False)
|
||||
self.assertEqual(p.returncode, 2, f"Expected exitcode 2, got {p.returncode}\nSTDOUT={p.stdout}\nSTDERR={p.stderr}")
|
||||
p = run(
|
||||
[
|
||||
"baudolo-restore",
|
||||
"files",
|
||||
self.volume_dst,
|
||||
self.hash,
|
||||
self.version,
|
||||
"--backups-dir",
|
||||
self.backups_dir,
|
||||
"--repo-name",
|
||||
self.repo_name,
|
||||
],
|
||||
check=False,
|
||||
)
|
||||
self.assertEqual(
|
||||
p.returncode,
|
||||
2,
|
||||
f"Expected exitcode 2, got {p.returncode}\nSTDOUT={p.stdout}\nSTDERR={p.stderr}",
|
||||
)
|
||||
|
||||
@@ -62,8 +62,12 @@ class TestE2EMariaDBFull(unittest.TestCase):
|
||||
)
|
||||
|
||||
# Liveness + actual SQL login readiness (TCP)
|
||||
wait_for_mariadb(cls.db_container, root_password=cls.root_password, timeout_s=90)
|
||||
wait_for_mariadb_sql(cls.db_container, user=cls.db_user, password=cls.db_password, timeout_s=90)
|
||||
wait_for_mariadb(
|
||||
cls.db_container, root_password=cls.root_password, timeout_s=90
|
||||
)
|
||||
wait_for_mariadb_sql(
|
||||
cls.db_container, user=cls.db_user, password=cls.db_password, timeout_s=90
|
||||
)
|
||||
|
||||
# Create table + data via the dedicated user (TCP)
|
||||
run(
|
||||
@@ -74,14 +78,17 @@ class TestE2EMariaDBFull(unittest.TestCase):
|
||||
"sh",
|
||||
"-lc",
|
||||
f"mariadb -h 127.0.0.1 -u{cls.db_user} -p{cls.db_password} "
|
||||
f"-e \"CREATE TABLE {cls.db_name}.t (id INT PRIMARY KEY, v VARCHAR(50)); "
|
||||
f'-e "CREATE TABLE {cls.db_name}.t (id INT PRIMARY KEY, v VARCHAR(50)); '
|
||||
f"INSERT INTO {cls.db_name}.t VALUES (1,'ok');\"",
|
||||
]
|
||||
)
|
||||
|
||||
cls.databases_csv = f"/tmp/{cls.prefix}/databases.csv"
|
||||
# IMPORTANT: baudolo backup expects credentials for the DB dump.
|
||||
write_databases_csv(cls.databases_csv, [(cls.db_container, cls.db_name, cls.db_user, cls.db_password)])
|
||||
write_databases_csv(
|
||||
cls.databases_csv,
|
||||
[(cls.db_container, cls.db_name, cls.db_user, cls.db_password)],
|
||||
)
|
||||
|
||||
# Backup with file+dump
|
||||
backup_run(
|
||||
@@ -104,7 +111,7 @@ class TestE2EMariaDBFull(unittest.TestCase):
|
||||
"sh",
|
||||
"-lc",
|
||||
f"mariadb -h 127.0.0.1 -u{cls.db_user} -p{cls.db_password} "
|
||||
f"-e \"DROP TABLE {cls.db_name}.t;\"",
|
||||
f'-e "DROP TABLE {cls.db_name}.t;"',
|
||||
]
|
||||
)
|
||||
|
||||
@@ -137,7 +144,11 @@ class TestE2EMariaDBFull(unittest.TestCase):
|
||||
cleanup_docker(containers=cls.containers, volumes=cls.volumes)
|
||||
|
||||
def test_dump_file_exists(self) -> None:
|
||||
p = backup_path(self.backups_dir, self.repo_name, self.version, self.db_volume) / "sql" / f"{self.db_name}.backup.sql"
|
||||
p = (
|
||||
backup_path(self.backups_dir, self.repo_name, self.version, self.db_volume)
|
||||
/ "sql"
|
||||
/ f"{self.db_name}.backup.sql"
|
||||
)
|
||||
self.assertTrue(p.is_file(), f"Expected dump file at: {p}")
|
||||
|
||||
def test_data_restored(self) -> None:
|
||||
@@ -149,7 +160,7 @@ class TestE2EMariaDBFull(unittest.TestCase):
|
||||
"sh",
|
||||
"-lc",
|
||||
f"mariadb -h 127.0.0.1 -u{self.db_user} -p{self.db_password} "
|
||||
f"-N -e \"SELECT v FROM {self.db_name}.t WHERE id=1;\"",
|
||||
f'-N -e "SELECT v FROM {self.db_name}.t WHERE id=1;"',
|
||||
]
|
||||
)
|
||||
self.assertEqual((p.stdout or "").strip(), "ok")
|
||||
|
||||
@@ -60,8 +60,12 @@ class TestE2EMariaDBNoCopy(unittest.TestCase):
|
||||
]
|
||||
)
|
||||
|
||||
wait_for_mariadb(cls.db_container, root_password=cls.root_password, timeout_s=90)
|
||||
wait_for_mariadb_sql(cls.db_container, user=cls.db_user, password=cls.db_password, timeout_s=90)
|
||||
wait_for_mariadb(
|
||||
cls.db_container, root_password=cls.root_password, timeout_s=90
|
||||
)
|
||||
wait_for_mariadb_sql(
|
||||
cls.db_container, user=cls.db_user, password=cls.db_password, timeout_s=90
|
||||
)
|
||||
|
||||
# Create table + data (TCP)
|
||||
run(
|
||||
@@ -72,13 +76,16 @@ class TestE2EMariaDBNoCopy(unittest.TestCase):
|
||||
"sh",
|
||||
"-lc",
|
||||
f"mariadb -h 127.0.0.1 -u{cls.db_user} -p{cls.db_password} "
|
||||
f"-e \"CREATE TABLE {cls.db_name}.t (id INT PRIMARY KEY, v VARCHAR(50)); "
|
||||
f'-e "CREATE TABLE {cls.db_name}.t (id INT PRIMARY KEY, v VARCHAR(50)); '
|
||||
f"INSERT INTO {cls.db_name}.t VALUES (1,'ok');\"",
|
||||
]
|
||||
)
|
||||
|
||||
cls.databases_csv = f"/tmp/{cls.prefix}/databases.csv"
|
||||
write_databases_csv(cls.databases_csv, [(cls.db_container, cls.db_name, cls.db_user, cls.db_password)])
|
||||
write_databases_csv(
|
||||
cls.databases_csv,
|
||||
[(cls.db_container, cls.db_name, cls.db_user, cls.db_password)],
|
||||
)
|
||||
|
||||
# dump-only => no files
|
||||
backup_run(
|
||||
@@ -102,7 +109,7 @@ class TestE2EMariaDBNoCopy(unittest.TestCase):
|
||||
"sh",
|
||||
"-lc",
|
||||
f"mariadb -h 127.0.0.1 -u{cls.db_user} -p{cls.db_password} "
|
||||
f"-e \"DROP TABLE {cls.db_name}.t;\"",
|
||||
f'-e "DROP TABLE {cls.db_name}.t;"',
|
||||
]
|
||||
)
|
||||
|
||||
@@ -135,7 +142,10 @@ class TestE2EMariaDBNoCopy(unittest.TestCase):
|
||||
cleanup_docker(containers=cls.containers, volumes=cls.volumes)
|
||||
|
||||
def test_files_backup_not_present(self) -> None:
|
||||
p = backup_path(self.backups_dir, self.repo_name, self.version, self.db_volume) / "files"
|
||||
p = (
|
||||
backup_path(self.backups_dir, self.repo_name, self.version, self.db_volume)
|
||||
/ "files"
|
||||
)
|
||||
self.assertFalse(p.exists(), f"Did not expect files backup dir at: {p}")
|
||||
|
||||
def test_data_restored(self) -> None:
|
||||
@@ -147,7 +157,7 @@ class TestE2EMariaDBNoCopy(unittest.TestCase):
|
||||
"sh",
|
||||
"-lc",
|
||||
f"mariadb -h 127.0.0.1 -u{self.db_user} -p{self.db_password} "
|
||||
f"-N -e \"SELECT v FROM {self.db_name}.t WHERE id=1;\"",
|
||||
f'-N -e "SELECT v FROM {self.db_name}.t WHERE id=1;"',
|
||||
]
|
||||
)
|
||||
self.assertEqual((p.stdout or "").strip(), "ok")
|
||||
|
||||
@@ -33,26 +33,42 @@ class TestE2EPostgresFull(unittest.TestCase):
|
||||
|
||||
run(["docker", "volume", "create", cls.pg_volume])
|
||||
|
||||
run([
|
||||
"docker", "run", "-d",
|
||||
"--name", cls.pg_container,
|
||||
"-e", "POSTGRES_PASSWORD=pgpw",
|
||||
"-e", "POSTGRES_DB=appdb",
|
||||
"-e", "POSTGRES_USER=postgres",
|
||||
"-v", f"{cls.pg_volume}:/var/lib/postgresql/data",
|
||||
"postgres:16",
|
||||
])
|
||||
run(
|
||||
[
|
||||
"docker",
|
||||
"run",
|
||||
"-d",
|
||||
"--name",
|
||||
cls.pg_container,
|
||||
"-e",
|
||||
"POSTGRES_PASSWORD=pgpw",
|
||||
"-e",
|
||||
"POSTGRES_DB=appdb",
|
||||
"-e",
|
||||
"POSTGRES_USER=postgres",
|
||||
"-v",
|
||||
f"{cls.pg_volume}:/var/lib/postgresql/data",
|
||||
"postgres:16",
|
||||
]
|
||||
)
|
||||
wait_for_postgres(cls.pg_container, user="postgres", timeout_s=90)
|
||||
|
||||
# Create a table + data
|
||||
run([
|
||||
"docker", "exec", cls.pg_container,
|
||||
"sh", "-lc",
|
||||
"psql -U postgres -d appdb -c \"CREATE TABLE t (id int primary key, v text); INSERT INTO t VALUES (1,'ok');\"",
|
||||
])
|
||||
run(
|
||||
[
|
||||
"docker",
|
||||
"exec",
|
||||
cls.pg_container,
|
||||
"sh",
|
||||
"-lc",
|
||||
"psql -U postgres -d appdb -c \"CREATE TABLE t (id int primary key, v text); INSERT INTO t VALUES (1,'ok');\"",
|
||||
]
|
||||
)
|
||||
|
||||
cls.databases_csv = f"/tmp/{cls.prefix}/databases.csv"
|
||||
write_databases_csv(cls.databases_csv, [(cls.pg_container, "appdb", "postgres", "pgpw")])
|
||||
write_databases_csv(
|
||||
cls.databases_csv, [(cls.pg_container, "appdb", "postgres", "pgpw")]
|
||||
)
|
||||
|
||||
backup_run(
|
||||
backups_dir=cls.backups_dir,
|
||||
@@ -66,37 +82,62 @@ class TestE2EPostgresFull(unittest.TestCase):
|
||||
cls.hash, cls.version = latest_version_dir(cls.backups_dir, cls.repo_name)
|
||||
|
||||
# Wipe schema
|
||||
run([
|
||||
"docker", "exec", cls.pg_container,
|
||||
"sh", "-lc",
|
||||
"psql -U postgres -d appdb -c \"DROP TABLE t;\"",
|
||||
])
|
||||
run(
|
||||
[
|
||||
"docker",
|
||||
"exec",
|
||||
cls.pg_container,
|
||||
"sh",
|
||||
"-lc",
|
||||
'psql -U postgres -d appdb -c "DROP TABLE t;"',
|
||||
]
|
||||
)
|
||||
|
||||
# Restore
|
||||
run([
|
||||
"baudolo-restore", "postgres",
|
||||
cls.pg_volume, cls.hash, cls.version,
|
||||
"--backups-dir", cls.backups_dir,
|
||||
"--repo-name", cls.repo_name,
|
||||
"--container", cls.pg_container,
|
||||
"--db-name", "appdb",
|
||||
"--db-user", "postgres",
|
||||
"--db-password", "pgpw",
|
||||
"--empty",
|
||||
])
|
||||
run(
|
||||
[
|
||||
"baudolo-restore",
|
||||
"postgres",
|
||||
cls.pg_volume,
|
||||
cls.hash,
|
||||
cls.version,
|
||||
"--backups-dir",
|
||||
cls.backups_dir,
|
||||
"--repo-name",
|
||||
cls.repo_name,
|
||||
"--container",
|
||||
cls.pg_container,
|
||||
"--db-name",
|
||||
"appdb",
|
||||
"--db-user",
|
||||
"postgres",
|
||||
"--db-password",
|
||||
"pgpw",
|
||||
"--empty",
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls) -> None:
|
||||
cleanup_docker(containers=cls.containers, volumes=cls.volumes)
|
||||
|
||||
def test_dump_file_exists(self) -> None:
|
||||
p = backup_path(self.backups_dir, self.repo_name, self.version, self.pg_volume) / "sql" / "appdb.backup.sql"
|
||||
p = (
|
||||
backup_path(self.backups_dir, self.repo_name, self.version, self.pg_volume)
|
||||
/ "sql"
|
||||
/ "appdb.backup.sql"
|
||||
)
|
||||
self.assertTrue(p.is_file(), f"Expected dump file at: {p}")
|
||||
|
||||
def test_data_restored(self) -> None:
|
||||
p = run([
|
||||
"docker", "exec", self.pg_container,
|
||||
"sh", "-lc",
|
||||
"psql -U postgres -d appdb -t -c \"SELECT v FROM t WHERE id=1;\"",
|
||||
])
|
||||
p = run(
|
||||
[
|
||||
"docker",
|
||||
"exec",
|
||||
self.pg_container,
|
||||
"sh",
|
||||
"-lc",
|
||||
'psql -U postgres -d appdb -t -c "SELECT v FROM t WHERE id=1;"',
|
||||
]
|
||||
)
|
||||
self.assertEqual((p.stdout or "").strip(), "ok")
|
||||
|
||||
@@ -32,25 +32,41 @@ class TestE2EPostgresNoCopy(unittest.TestCase):
|
||||
cls.volumes = [cls.pg_volume]
|
||||
|
||||
run(["docker", "volume", "create", cls.pg_volume])
|
||||
run([
|
||||
"docker", "run", "-d",
|
||||
"--name", cls.pg_container,
|
||||
"-e", "POSTGRES_PASSWORD=pgpw",
|
||||
"-e", "POSTGRES_DB=appdb",
|
||||
"-e", "POSTGRES_USER=postgres",
|
||||
"-v", f"{cls.pg_volume}:/var/lib/postgresql/data",
|
||||
"postgres:16",
|
||||
])
|
||||
run(
|
||||
[
|
||||
"docker",
|
||||
"run",
|
||||
"-d",
|
||||
"--name",
|
||||
cls.pg_container,
|
||||
"-e",
|
||||
"POSTGRES_PASSWORD=pgpw",
|
||||
"-e",
|
||||
"POSTGRES_DB=appdb",
|
||||
"-e",
|
||||
"POSTGRES_USER=postgres",
|
||||
"-v",
|
||||
f"{cls.pg_volume}:/var/lib/postgresql/data",
|
||||
"postgres:16",
|
||||
]
|
||||
)
|
||||
wait_for_postgres(cls.pg_container, user="postgres", timeout_s=90)
|
||||
|
||||
run([
|
||||
"docker", "exec", cls.pg_container,
|
||||
"sh", "-lc",
|
||||
"psql -U postgres -d appdb -c \"CREATE TABLE t (id int primary key, v text); INSERT INTO t VALUES (1,'ok');\"",
|
||||
])
|
||||
run(
|
||||
[
|
||||
"docker",
|
||||
"exec",
|
||||
cls.pg_container,
|
||||
"sh",
|
||||
"-lc",
|
||||
"psql -U postgres -d appdb -c \"CREATE TABLE t (id int primary key, v text); INSERT INTO t VALUES (1,'ok');\"",
|
||||
]
|
||||
)
|
||||
|
||||
cls.databases_csv = f"/tmp/{cls.prefix}/databases.csv"
|
||||
write_databases_csv(cls.databases_csv, [(cls.pg_container, "appdb", "postgres", "pgpw")])
|
||||
write_databases_csv(
|
||||
cls.databases_csv, [(cls.pg_container, "appdb", "postgres", "pgpw")]
|
||||
)
|
||||
|
||||
backup_run(
|
||||
backups_dir=cls.backups_dir,
|
||||
@@ -64,36 +80,60 @@ class TestE2EPostgresNoCopy(unittest.TestCase):
|
||||
|
||||
cls.hash, cls.version = latest_version_dir(cls.backups_dir, cls.repo_name)
|
||||
|
||||
run([
|
||||
"docker", "exec", cls.pg_container,
|
||||
"sh", "-lc",
|
||||
"psql -U postgres -d appdb -c \"DROP TABLE t;\"",
|
||||
])
|
||||
run(
|
||||
[
|
||||
"docker",
|
||||
"exec",
|
||||
cls.pg_container,
|
||||
"sh",
|
||||
"-lc",
|
||||
'psql -U postgres -d appdb -c "DROP TABLE t;"',
|
||||
]
|
||||
)
|
||||
|
||||
run([
|
||||
"baudolo-restore", "postgres",
|
||||
cls.pg_volume, cls.hash, cls.version,
|
||||
"--backups-dir", cls.backups_dir,
|
||||
"--repo-name", cls.repo_name,
|
||||
"--container", cls.pg_container,
|
||||
"--db-name", "appdb",
|
||||
"--db-user", "postgres",
|
||||
"--db-password", "pgpw",
|
||||
"--empty",
|
||||
])
|
||||
run(
|
||||
[
|
||||
"baudolo-restore",
|
||||
"postgres",
|
||||
cls.pg_volume,
|
||||
cls.hash,
|
||||
cls.version,
|
||||
"--backups-dir",
|
||||
cls.backups_dir,
|
||||
"--repo-name",
|
||||
cls.repo_name,
|
||||
"--container",
|
||||
cls.pg_container,
|
||||
"--db-name",
|
||||
"appdb",
|
||||
"--db-user",
|
||||
"postgres",
|
||||
"--db-password",
|
||||
"pgpw",
|
||||
"--empty",
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls) -> None:
|
||||
cleanup_docker(containers=cls.containers, volumes=cls.volumes)
|
||||
|
||||
def test_files_backup_not_present(self) -> None:
|
||||
p = backup_path(self.backups_dir, self.repo_name, self.version, self.pg_volume) / "files"
|
||||
p = (
|
||||
backup_path(self.backups_dir, self.repo_name, self.version, self.pg_volume)
|
||||
/ "files"
|
||||
)
|
||||
self.assertFalse(p.exists(), f"Did not expect files backup dir at: {p}")
|
||||
|
||||
def test_data_restored(self) -> None:
|
||||
p = run([
|
||||
"docker", "exec", self.pg_container,
|
||||
"sh", "-lc",
|
||||
"psql -U postgres -d appdb -t -c \"SELECT v FROM t WHERE id=1;\"",
|
||||
])
|
||||
p = run(
|
||||
[
|
||||
"docker",
|
||||
"exec",
|
||||
self.pg_container,
|
||||
"sh",
|
||||
"-lc",
|
||||
'psql -U postgres -d appdb -t -c "SELECT v FROM t WHERE id=1;"',
|
||||
]
|
||||
)
|
||||
self.assertEqual((p.stdout or "").strip(), "ok")
|
||||
|
||||
Reference in New Issue
Block a user