mirror of
https://github.com/kevinveenbirkenbach/docker-volume-backup.git
synced 2025-12-29 03:42:08 +00:00
Compare commits
5 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 0222f7f109 | |||
| 6adafe6b1f | |||
| 88b35ee923 | |||
| 71f79929be | |||
| 0fb8efba4f |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,2 +1,3 @@
|
|||||||
__pycache__
|
__pycache__
|
||||||
artifacts/
|
artifacts/
|
||||||
|
*.egg-info
|
||||||
@@ -1,3 +1,10 @@
|
|||||||
|
## [1.1.0] - 2025-12-28
|
||||||
|
|
||||||
|
* * **Backup:** Log a warning and skip database dumps when no databases.csv entry is present instead of raising an exception; introduce module-level logging and apply formatting cleanups across backup/restore code and tests.
|
||||||
|
* **CLI:** Switch to an FHS-compliant default backup directory (/var/lib/backup) and use a stable default repository name instead of dynamic detection.
|
||||||
|
* **Maintenance:** Update mirror configuration and ignore generated .egg-info files.
|
||||||
|
|
||||||
|
|
||||||
## [1.0.0] - 2025-12-27
|
## [1.0.0] - 2025-12-27
|
||||||
|
|
||||||
* Official Release 🥳
|
* Official Release 🥳
|
||||||
|
|||||||
2
MIRRORS
2
MIRRORS
@@ -1,4 +1,4 @@
|
|||||||
git@github.com:kevinveenbirkenbach/backup-docker-to-local.git
|
git@github.com:kevinveenbirkenbach/backup-docker-to-local.git
|
||||||
ssh://git@git.veen.world:2201/kevinveenbirkenbach/backup-docker-to-local.git
|
ssh://git@git.veen.world:2201/kevinveenbirkenbach/backup-docker-to-local.git
|
||||||
ssh://git@code.infinito.nexus:2201/kevinveenbirkenbach/backup-docker-to-local.git
|
ssh://git@code.infinito.nexus:2201/kevinveenbirkenbach/backup-docker-to-local.git
|
||||||
https://pypi.org/project/baudolo/
|
https://pypi.org/project/backup-docker-to-local/
|
||||||
|
|||||||
BIN
dist/backup_docker_to_local-1.0.0-py3-none-any.whl
vendored
Normal file
BIN
dist/backup_docker_to_local-1.0.0-py3-none-any.whl
vendored
Normal file
Binary file not shown.
BIN
dist/backup_docker_to_local-1.0.0.tar.gz
vendored
Normal file
BIN
dist/backup_docker_to_local-1.0.0.tar.gz
vendored
Normal file
Binary file not shown.
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "backup-docker-to-local"
|
name = "backup-docker-to-local"
|
||||||
version = "1.0.0"
|
version = "1.1.0"
|
||||||
description = "Backup Docker volumes to local with rsync and optional DB dumps."
|
description = "Backup Docker volumes to local with rsync and optional DB dumps."
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
requires-python = ">=3.9"
|
requires-python = ">=3.9"
|
||||||
|
|||||||
@@ -51,7 +51,9 @@ def is_image_ignored(container: str, images_no_backup_required: list[str]) -> bo
|
|||||||
return any(pat in img for pat in images_no_backup_required)
|
return any(pat in img for pat in images_no_backup_required)
|
||||||
|
|
||||||
|
|
||||||
def volume_is_fully_ignored(containers: list[str], images_no_backup_required: list[str]) -> bool:
|
def volume_is_fully_ignored(
|
||||||
|
containers: list[str], images_no_backup_required: list[str]
|
||||||
|
) -> bool:
|
||||||
"""
|
"""
|
||||||
Skip file backup only if all containers linked to the volume are ignored.
|
Skip file backup only if all containers linked to the volume are ignored.
|
||||||
"""
|
"""
|
||||||
@@ -178,6 +180,8 @@ def main() -> int:
|
|||||||
print("Finished volume backups.", flush=True)
|
print("Finished volume backups.", flush=True)
|
||||||
|
|
||||||
print("Handling Docker Compose services...", flush=True)
|
print("Handling Docker Compose services...", flush=True)
|
||||||
handle_docker_compose_services(args.compose_dir, args.docker_compose_hard_restart_required)
|
handle_docker_compose_services(
|
||||||
|
args.compose_dir, args.docker_compose_hard_restart_required
|
||||||
|
)
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
|
|||||||
@@ -2,22 +2,6 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import os
|
import os
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
|
|
||||||
def _default_repo_name() -> str:
|
|
||||||
"""
|
|
||||||
Derive the repository name from the folder that contains `src/`.
|
|
||||||
|
|
||||||
Expected layout:
|
|
||||||
<repo-root>/src/baudolo/backup/cli.py
|
|
||||||
|
|
||||||
=> parents[0]=backup, [1]=baudolo, [2]=src, [3]=repo-root
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
return Path(__file__).resolve().parents[3].name
|
|
||||||
except Exception:
|
|
||||||
return "backup-docker-to-local"
|
|
||||||
|
|
||||||
|
|
||||||
def parse_args() -> argparse.Namespace:
|
def parse_args() -> argparse.Namespace:
|
||||||
@@ -41,7 +25,7 @@ def parse_args() -> argparse.Namespace:
|
|||||||
|
|
||||||
p.add_argument(
|
p.add_argument(
|
||||||
"--repo-name",
|
"--repo-name",
|
||||||
default=_default_repo_name(),
|
default="backup-docker-to-local",
|
||||||
help="Backup repo folder name under <backups-dir>/<machine-id>/ (default: git repo folder name)",
|
help="Backup repo folder name under <backups-dir>/<machine-id>/ (default: git repo folder name)",
|
||||||
)
|
)
|
||||||
p.add_argument(
|
p.add_argument(
|
||||||
@@ -51,8 +35,8 @@ def parse_args() -> argparse.Namespace:
|
|||||||
)
|
)
|
||||||
p.add_argument(
|
p.add_argument(
|
||||||
"--backups-dir",
|
"--backups-dir",
|
||||||
default="/Backups",
|
default="/var/lib/backup/",
|
||||||
help="Backup root directory (default: /Backups)",
|
help="Backup root directory (default: /var/lib/backup/)",
|
||||||
)
|
)
|
||||||
|
|
||||||
p.add_argument(
|
p.add_argument(
|
||||||
|
|||||||
@@ -10,7 +10,9 @@ def hard_restart_docker_services(dir_path: str) -> None:
|
|||||||
subprocess.run(["docker-compose", "up", "-d"], cwd=dir_path, check=True)
|
subprocess.run(["docker-compose", "up", "-d"], cwd=dir_path, check=True)
|
||||||
|
|
||||||
|
|
||||||
def handle_docker_compose_services(parent_directory: str, hard_restart_required: list[str]) -> None:
|
def handle_docker_compose_services(
|
||||||
|
parent_directory: str, hard_restart_required: list[str]
|
||||||
|
) -> None:
|
||||||
for entry in os.scandir(parent_directory):
|
for entry in os.scandir(parent_directory):
|
||||||
if not entry.is_dir():
|
if not entry.is_dir():
|
||||||
continue
|
continue
|
||||||
|
|||||||
@@ -5,9 +5,12 @@ import pathlib
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
import pandas
|
import pandas
|
||||||
|
import logging
|
||||||
|
|
||||||
from .shell import BackupException, execute_shell_command
|
from .shell import BackupException, execute_shell_command
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def get_instance(container: str, database_containers: list[str]) -> str:
|
def get_instance(container: str, database_containers: list[str]) -> str:
|
||||||
if container in database_containers:
|
if container in database_containers:
|
||||||
@@ -15,7 +18,9 @@ def get_instance(container: str, database_containers: list[str]) -> str:
|
|||||||
return re.split(r"(_|-)(database|db|postgres)", container)[0]
|
return re.split(r"(_|-)(database|db|postgres)", container)[0]
|
||||||
|
|
||||||
|
|
||||||
def fallback_pg_dumpall(container: str, username: str, password: str, out_file: str) -> None:
|
def fallback_pg_dumpall(
|
||||||
|
container: str, username: str, password: str, out_file: str
|
||||||
|
) -> None:
|
||||||
cmd = (
|
cmd = (
|
||||||
f"PGPASSWORD={password} docker exec -i {container} "
|
f"PGPASSWORD={password} docker exec -i {container} "
|
||||||
f"pg_dumpall -U {username} -h localhost > {out_file}"
|
f"pg_dumpall -U {username} -h localhost > {out_file}"
|
||||||
@@ -34,7 +39,8 @@ def backup_database(
|
|||||||
instance_name = get_instance(container, database_containers)
|
instance_name = get_instance(container, database_containers)
|
||||||
entries = databases_df.loc[databases_df["instance"] == instance_name]
|
entries = databases_df.loc[databases_df["instance"] == instance_name]
|
||||||
if entries.empty:
|
if entries.empty:
|
||||||
raise BackupException(f"No entry found for instance '{instance_name}'")
|
log.warning("No entry found for instance '%s'", instance_name)
|
||||||
|
return
|
||||||
|
|
||||||
out_dir = os.path.join(volume_dir, "sql")
|
out_dir = os.path.join(volume_dir, "sql")
|
||||||
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
|
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
|
||||||
@@ -68,6 +74,9 @@ def backup_database(
|
|||||||
execute_shell_command(cmd)
|
execute_shell_command(cmd)
|
||||||
except BackupException as e:
|
except BackupException as e:
|
||||||
print(f"pg_dump failed: {e}", flush=True)
|
print(f"pg_dump failed: {e}", flush=True)
|
||||||
print(f"Falling back to pg_dumpall for instance '{instance_name}'", flush=True)
|
print(
|
||||||
|
f"Falling back to pg_dumpall for instance '{instance_name}'",
|
||||||
|
flush=True,
|
||||||
|
)
|
||||||
fallback_pg_dumpall(container, user, password, cluster_file)
|
fallback_pg_dumpall(container, user, password, cluster_file)
|
||||||
continue
|
continue
|
||||||
|
|||||||
@@ -37,7 +37,9 @@ def change_containers_status(containers: list[str], status: str) -> None:
|
|||||||
def docker_volume_exists(volume: str) -> bool:
|
def docker_volume_exists(volume: str) -> bool:
|
||||||
# Avoid throwing exceptions for exists checks.
|
# Avoid throwing exceptions for exists checks.
|
||||||
try:
|
try:
|
||||||
execute_shell_command(f"docker volume inspect {volume} >/dev/null 2>&1 && echo OK")
|
execute_shell_command(
|
||||||
|
f"docker volume inspect {volume} >/dev/null 2>&1 && echo OK"
|
||||||
|
)
|
||||||
return True
|
return True
|
||||||
except Exception:
|
except Exception:
|
||||||
return False
|
return False
|
||||||
|
|||||||
@@ -13,7 +13,9 @@ def get_storage_path(volume_name: str) -> str:
|
|||||||
return f"{path}/"
|
return f"{path}/"
|
||||||
|
|
||||||
|
|
||||||
def get_last_backup_dir(versions_dir: str, volume_name: str, current_backup_dir: str) -> str | None:
|
def get_last_backup_dir(
|
||||||
|
versions_dir: str, volume_name: str, current_backup_dir: str
|
||||||
|
) -> str | None:
|
||||||
versions = sorted(os.listdir(versions_dir), reverse=True)
|
versions = sorted(os.listdir(versions_dir), reverse=True)
|
||||||
for version in versions:
|
for version in versions:
|
||||||
candidate = os.path.join(versions_dir, version, volume_name, "files", "")
|
candidate = os.path.join(versions_dir, version, volume_name, "files", "")
|
||||||
@@ -37,6 +39,8 @@ def backup_volume(versions_dir: str, volume_name: str, volume_dir: str) -> None:
|
|||||||
execute_shell_command(cmd)
|
execute_shell_command(cmd)
|
||||||
except BackupException as e:
|
except BackupException as e:
|
||||||
if "file has vanished" in str(e):
|
if "file has vanished" in str(e):
|
||||||
print("Warning: Some files vanished before transfer. Continuing.", flush=True)
|
print(
|
||||||
|
"Warning: Some files vanished before transfer. Continuing.", flush=True
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
raise
|
raise
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
__all__ = ["main"]
|
__all__ = ["main"]
|
||||||
|
|||||||
@@ -66,7 +66,9 @@ def main(argv: list[str] | None = None) -> int:
|
|||||||
# ------------------------------------------------------------------
|
# ------------------------------------------------------------------
|
||||||
# mariadb
|
# mariadb
|
||||||
# ------------------------------------------------------------------
|
# ------------------------------------------------------------------
|
||||||
p_mdb = sub.add_parser("mariadb", help="Restore a single MariaDB/MySQL-compatible dump")
|
p_mdb = sub.add_parser(
|
||||||
|
"mariadb", help="Restore a single MariaDB/MySQL-compatible dump"
|
||||||
|
)
|
||||||
_add_common_backup_args(p_mdb)
|
_add_common_backup_args(p_mdb)
|
||||||
p_mdb.add_argument("--container", required=True)
|
p_mdb.add_argument("--container", required=True)
|
||||||
p_mdb.add_argument("--db-name", required=True)
|
p_mdb.add_argument("--db-name", required=True)
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
"""Database restore handlers (Postgres, MariaDB/MySQL)."""
|
"""Database restore handlers (Postgres, MariaDB/MySQL)."""
|
||||||
|
|||||||
@@ -23,7 +23,9 @@ exit 42
|
|||||||
raise RuntimeError("empty client detection output")
|
raise RuntimeError("empty client detection output")
|
||||||
return out
|
return out
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("ERROR: neither 'mariadb' nor 'mysql' found in container.", file=sys.stderr)
|
print(
|
||||||
|
"ERROR: neither 'mariadb' nor 'mysql' found in container.", file=sys.stderr
|
||||||
|
)
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
|
|
||||||
@@ -47,7 +49,14 @@ def restore_mariadb_sql(
|
|||||||
# MariaDB 11 images may not contain the mysql binary at all.
|
# MariaDB 11 images may not contain the mysql binary at all.
|
||||||
docker_exec(
|
docker_exec(
|
||||||
container,
|
container,
|
||||||
[client, "-u", user, f"--password={password}", "-e", "SET FOREIGN_KEY_CHECKS=0;"],
|
[
|
||||||
|
client,
|
||||||
|
"-u",
|
||||||
|
user,
|
||||||
|
f"--password={password}",
|
||||||
|
"-e",
|
||||||
|
"SET FOREIGN_KEY_CHECKS=0;",
|
||||||
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
result = docker_exec(
|
result = docker_exec(
|
||||||
@@ -80,10 +89,19 @@ def restore_mariadb_sql(
|
|||||||
|
|
||||||
docker_exec(
|
docker_exec(
|
||||||
container,
|
container,
|
||||||
[client, "-u", user, f"--password={password}", "-e", "SET FOREIGN_KEY_CHECKS=1;"],
|
[
|
||||||
|
client,
|
||||||
|
"-u",
|
||||||
|
user,
|
||||||
|
f"--password={password}",
|
||||||
|
"-e",
|
||||||
|
"SET FOREIGN_KEY_CHECKS=1;",
|
||||||
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
with open(sql_path, "rb") as f:
|
with open(sql_path, "rb") as f:
|
||||||
docker_exec(container, [client, "-u", user, f"--password={password}", db_name], stdin=f)
|
docker_exec(
|
||||||
|
container, [client, "-u", user, f"--password={password}", db_name], stdin=f
|
||||||
|
)
|
||||||
|
|
||||||
print(f"MariaDB/MySQL restore complete for db '{db_name}'.")
|
print(f"MariaDB/MySQL restore complete for db '{db_name}'.")
|
||||||
|
|||||||
@@ -6,7 +6,9 @@ import sys
|
|||||||
from .run import run, docker_volume_exists
|
from .run import run, docker_volume_exists
|
||||||
|
|
||||||
|
|
||||||
def restore_volume_files(volume_name: str, backup_files_dir: str, *, rsync_image: str) -> int:
|
def restore_volume_files(
|
||||||
|
volume_name: str, backup_files_dir: str, *, rsync_image: str
|
||||||
|
) -> int:
|
||||||
if not os.path.isdir(backup_files_dir):
|
if not os.path.isdir(backup_files_dir):
|
||||||
print(f"ERROR: backup files dir not found: {backup_files_dir}", file=sys.stderr)
|
print(f"ERROR: backup files dir not found: {backup_files_dir}", file=sys.stderr)
|
||||||
return 2
|
return 2
|
||||||
|
|||||||
@@ -2,21 +2,24 @@ import pandas as pd
|
|||||||
import argparse
|
import argparse
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
|
||||||
def check_and_add_entry(file_path, instance, database, username, password):
|
def check_and_add_entry(file_path, instance, database, username, password):
|
||||||
# Check if the file exists and is not empty
|
# Check if the file exists and is not empty
|
||||||
if os.path.exists(file_path) and os.path.getsize(file_path) > 0:
|
if os.path.exists(file_path) and os.path.getsize(file_path) > 0:
|
||||||
# Read the existing CSV file with header
|
# Read the existing CSV file with header
|
||||||
df = pd.read_csv(file_path, sep=';')
|
df = pd.read_csv(file_path, sep=";")
|
||||||
else:
|
else:
|
||||||
# Create a new DataFrame with columns if file does not exist
|
# Create a new DataFrame with columns if file does not exist
|
||||||
df = pd.DataFrame(columns=['instance', 'database', 'username', 'password'])
|
df = pd.DataFrame(columns=["instance", "database", "username", "password"])
|
||||||
|
|
||||||
# Check if the entry exists and remove it
|
# Check if the entry exists and remove it
|
||||||
mask = (
|
mask = (
|
||||||
(df['instance'] == instance) &
|
(df["instance"] == instance)
|
||||||
((df['database'] == database) |
|
& (
|
||||||
(((df['database'].isna()) | (df['database'] == '')) & (database == ''))) &
|
(df["database"] == database)
|
||||||
(df['username'] == username)
|
| (((df["database"].isna()) | (df["database"] == "")) & (database == ""))
|
||||||
|
)
|
||||||
|
& (df["username"] == username)
|
||||||
)
|
)
|
||||||
|
|
||||||
if not df[mask].empty:
|
if not df[mask].empty:
|
||||||
@@ -26,25 +29,40 @@ def check_and_add_entry(file_path, instance, database, username, password):
|
|||||||
print("Adding new entry.")
|
print("Adding new entry.")
|
||||||
|
|
||||||
# Create a new DataFrame for the new entry
|
# Create a new DataFrame for the new entry
|
||||||
new_entry = pd.DataFrame([{'instance': instance, 'database': database, 'username': username, 'password': password}])
|
new_entry = pd.DataFrame(
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"instance": instance,
|
||||||
|
"database": database,
|
||||||
|
"username": username,
|
||||||
|
"password": password,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# Add (or replace) the entry using concat
|
# Add (or replace) the entry using concat
|
||||||
df = pd.concat([df, new_entry], ignore_index=True)
|
df = pd.concat([df, new_entry], ignore_index=True)
|
||||||
|
|
||||||
# Save the updated CSV file
|
# Save the updated CSV file
|
||||||
df.to_csv(file_path, sep=';', index=False)
|
df.to_csv(file_path, sep=";", index=False)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser(description="Check and replace (or add) a database entry in a CSV file.")
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Check and replace (or add) a database entry in a CSV file."
|
||||||
|
)
|
||||||
parser.add_argument("file_path", help="Path to the CSV file")
|
parser.add_argument("file_path", help="Path to the CSV file")
|
||||||
parser.add_argument("instance", help="Database instance")
|
parser.add_argument("instance", help="Database instance")
|
||||||
parser.add_argument("database", help="Database name")
|
parser.add_argument("database", help="Database name")
|
||||||
parser.add_argument("username", help="Username")
|
parser.add_argument("username", help="Username")
|
||||||
parser.add_argument("password", nargs='?', default="", help="Password (optional)")
|
parser.add_argument("password", nargs="?", default="", help="Password (optional)")
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
check_and_add_entry(args.file_path, args.instance, args.database, args.username, args.password)
|
check_and_add_entry(
|
||||||
|
args.file_path, args.instance, args.database, args.username, args.password
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|||||||
@@ -34,7 +34,9 @@ def run(
|
|||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
def sh(cmd: str, *, capture: bool = True, check: bool = True) -> subprocess.CompletedProcess:
|
def sh(
|
||||||
|
cmd: str, *, capture: bool = True, check: bool = True
|
||||||
|
) -> subprocess.CompletedProcess:
|
||||||
return run(["sh", "-lc", cmd], capture=capture, check=check)
|
return run(["sh", "-lc", cmd], capture=capture, check=check)
|
||||||
|
|
||||||
|
|
||||||
@@ -63,24 +65,37 @@ def wait_for_log(container: str, pattern: str, timeout_s: int = 60) -> None:
|
|||||||
raise TimeoutError(f"Timed out waiting for log pattern '{pattern}' in {container}")
|
raise TimeoutError(f"Timed out waiting for log pattern '{pattern}' in {container}")
|
||||||
|
|
||||||
|
|
||||||
def wait_for_postgres(container: str, *, user: str = "postgres", timeout_s: int = 90) -> None:
|
def wait_for_postgres(
|
||||||
|
container: str, *, user: str = "postgres", timeout_s: int = 90
|
||||||
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Docker-outside-of-Docker friendly readiness: check from inside the DB container.
|
Docker-outside-of-Docker friendly readiness: check from inside the DB container.
|
||||||
"""
|
"""
|
||||||
deadline = time.time() + timeout_s
|
deadline = time.time() + timeout_s
|
||||||
while time.time() < deadline:
|
while time.time() < deadline:
|
||||||
p = run(
|
p = run(
|
||||||
["docker", "exec", container, "sh", "-lc", f"pg_isready -U {user} -h localhost"],
|
[
|
||||||
|
"docker",
|
||||||
|
"exec",
|
||||||
|
container,
|
||||||
|
"sh",
|
||||||
|
"-lc",
|
||||||
|
f"pg_isready -U {user} -h localhost",
|
||||||
|
],
|
||||||
capture=True,
|
capture=True,
|
||||||
check=False,
|
check=False,
|
||||||
)
|
)
|
||||||
if p.returncode == 0:
|
if p.returncode == 0:
|
||||||
return
|
return
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
raise TimeoutError(f"Timed out waiting for Postgres readiness in container {container}")
|
raise TimeoutError(
|
||||||
|
f"Timed out waiting for Postgres readiness in container {container}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def wait_for_mariadb(container: str, *, root_password: str, timeout_s: int = 90) -> None:
|
def wait_for_mariadb(
|
||||||
|
container: str, *, root_password: str, timeout_s: int = 90
|
||||||
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Liveness probe for MariaDB.
|
Liveness probe for MariaDB.
|
||||||
|
|
||||||
@@ -92,17 +107,28 @@ def wait_for_mariadb(container: str, *, root_password: str, timeout_s: int = 90)
|
|||||||
deadline = time.time() + timeout_s
|
deadline = time.time() + timeout_s
|
||||||
while time.time() < deadline:
|
while time.time() < deadline:
|
||||||
p = run(
|
p = run(
|
||||||
["docker", "exec", container, "sh", "-lc", "mariadb -uroot --protocol=socket -e \"SELECT 1;\""],
|
[
|
||||||
|
"docker",
|
||||||
|
"exec",
|
||||||
|
container,
|
||||||
|
"sh",
|
||||||
|
"-lc",
|
||||||
|
'mariadb -uroot --protocol=socket -e "SELECT 1;"',
|
||||||
|
],
|
||||||
capture=True,
|
capture=True,
|
||||||
check=False,
|
check=False,
|
||||||
)
|
)
|
||||||
if p.returncode == 0:
|
if p.returncode == 0:
|
||||||
return
|
return
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
raise TimeoutError(f"Timed out waiting for MariaDB readiness in container {container}")
|
raise TimeoutError(
|
||||||
|
f"Timed out waiting for MariaDB readiness in container {container}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def wait_for_mariadb_sql(container: str, *, user: str, password: str, timeout_s: int = 90) -> None:
|
def wait_for_mariadb_sql(
|
||||||
|
container: str, *, user: str, password: str, timeout_s: int = 90
|
||||||
|
) -> None:
|
||||||
"""
|
"""
|
||||||
SQL login readiness for the *dedicated test user* over TCP.
|
SQL login readiness for the *dedicated test user* over TCP.
|
||||||
|
|
||||||
@@ -118,7 +144,7 @@ def wait_for_mariadb_sql(container: str, *, user: str, password: str, timeout_s:
|
|||||||
container,
|
container,
|
||||||
"sh",
|
"sh",
|
||||||
"-lc",
|
"-lc",
|
||||||
f"mariadb -h 127.0.0.1 -u{user} -p{password} -e \"SELECT 1;\"",
|
f'mariadb -h 127.0.0.1 -u{user} -p{password} -e "SELECT 1;"',
|
||||||
],
|
],
|
||||||
capture=True,
|
capture=True,
|
||||||
check=False,
|
check=False,
|
||||||
@@ -126,7 +152,9 @@ def wait_for_mariadb_sql(container: str, *, user: str, password: str, timeout_s:
|
|||||||
if p.returncode == 0:
|
if p.returncode == 0:
|
||||||
return
|
return
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
raise TimeoutError(f"Timed out waiting for MariaDB SQL login readiness in container {container}")
|
raise TimeoutError(
|
||||||
|
f"Timed out waiting for MariaDB SQL login readiness in container {container}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def backup_run(
|
def backup_run(
|
||||||
@@ -142,13 +170,20 @@ def backup_run(
|
|||||||
) -> None:
|
) -> None:
|
||||||
cmd = [
|
cmd = [
|
||||||
"baudolo",
|
"baudolo",
|
||||||
"--compose-dir", compose_dir,
|
"--compose-dir",
|
||||||
"--docker-compose-hard-restart-required", "mailu",
|
compose_dir,
|
||||||
"--repo-name", repo_name,
|
"--docker-compose-hard-restart-required",
|
||||||
"--databases-csv", databases_csv,
|
"mailu",
|
||||||
"--backups-dir", backups_dir,
|
"--repo-name",
|
||||||
"--database-containers", *database_containers,
|
repo_name,
|
||||||
"--images-no-stop-required", *images_no_stop_required,
|
"--databases-csv",
|
||||||
|
databases_csv,
|
||||||
|
"--backups-dir",
|
||||||
|
backups_dir,
|
||||||
|
"--database-containers",
|
||||||
|
*database_containers,
|
||||||
|
"--images-no-stop-required",
|
||||||
|
*images_no_stop_required,
|
||||||
]
|
]
|
||||||
if images_no_backup_required:
|
if images_no_backup_required:
|
||||||
cmd += ["--images-no-backup-required", *images_no_backup_required]
|
cmd += ["--images-no-backup-required", *images_no_backup_required]
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
import unittest
|
import unittest
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
from .helpers import (
|
from .helpers import (
|
||||||
backup_run,
|
backup_run,
|
||||||
@@ -33,12 +32,19 @@ class TestE2EFilesFull(unittest.TestCase):
|
|||||||
|
|
||||||
# create source volume with a file
|
# create source volume with a file
|
||||||
run(["docker", "volume", "create", cls.volume_src])
|
run(["docker", "volume", "create", cls.volume_src])
|
||||||
run([
|
run(
|
||||||
"docker", "run", "--rm",
|
[
|
||||||
"-v", f"{cls.volume_src}:/data",
|
"docker",
|
||||||
"alpine:3.20",
|
"run",
|
||||||
"sh", "-lc", "mkdir -p /data && echo 'hello' > /data/hello.txt",
|
"--rm",
|
||||||
])
|
"-v",
|
||||||
|
f"{cls.volume_src}:/data",
|
||||||
|
"alpine:3.20",
|
||||||
|
"sh",
|
||||||
|
"-lc",
|
||||||
|
"mkdir -p /data && echo 'hello' > /data/hello.txt",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# databases.csv (unused, but required by CLI)
|
# databases.csv (unused, but required by CLI)
|
||||||
cls.databases_csv = f"/tmp/{cls.prefix}/databases.csv"
|
cls.databases_csv = f"/tmp/{cls.prefix}/databases.csv"
|
||||||
@@ -75,20 +81,36 @@ class TestE2EFilesFull(unittest.TestCase):
|
|||||||
|
|
||||||
def test_restore_files_into_new_volume(self) -> None:
|
def test_restore_files_into_new_volume(self) -> None:
|
||||||
# restore files from volume_src backup into volume_dst
|
# restore files from volume_src backup into volume_dst
|
||||||
run([
|
run(
|
||||||
"baudolo-restore", "files",
|
[
|
||||||
self.volume_dst, self.hash, self.version,
|
"baudolo-restore",
|
||||||
"--backups-dir", self.backups_dir,
|
"files",
|
||||||
"--repo-name", self.repo_name,
|
self.volume_dst,
|
||||||
"--source-volume", self.volume_src,
|
self.hash,
|
||||||
"--rsync-image", "ghcr.io/kevinveenbirkenbach/alpine-rsync",
|
self.version,
|
||||||
])
|
"--backups-dir",
|
||||||
|
self.backups_dir,
|
||||||
|
"--repo-name",
|
||||||
|
self.repo_name,
|
||||||
|
"--source-volume",
|
||||||
|
self.volume_src,
|
||||||
|
"--rsync-image",
|
||||||
|
"ghcr.io/kevinveenbirkenbach/alpine-rsync",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# verify restored file exists in dst volume
|
# verify restored file exists in dst volume
|
||||||
p = run([
|
p = run(
|
||||||
"docker", "run", "--rm",
|
[
|
||||||
"-v", f"{self.volume_dst}:/data",
|
"docker",
|
||||||
"alpine:3.20",
|
"run",
|
||||||
"sh", "-lc", "cat /data/hello.txt",
|
"--rm",
|
||||||
])
|
"-v",
|
||||||
|
f"{self.volume_dst}:/data",
|
||||||
|
"alpine:3.20",
|
||||||
|
"sh",
|
||||||
|
"-lc",
|
||||||
|
"cat /data/hello.txt",
|
||||||
|
]
|
||||||
|
)
|
||||||
self.assertEqual((p.stdout or "").strip(), "hello")
|
self.assertEqual((p.stdout or "").strip(), "hello")
|
||||||
|
|||||||
@@ -31,12 +31,19 @@ class TestE2EFilesNoCopy(unittest.TestCase):
|
|||||||
cls.volumes = [cls.volume_src, cls.volume_dst]
|
cls.volumes = [cls.volume_src, cls.volume_dst]
|
||||||
|
|
||||||
run(["docker", "volume", "create", cls.volume_src])
|
run(["docker", "volume", "create", cls.volume_src])
|
||||||
run([
|
run(
|
||||||
"docker", "run", "--rm",
|
[
|
||||||
"-v", f"{cls.volume_src}:/data",
|
"docker",
|
||||||
"alpine:3.20",
|
"run",
|
||||||
"sh", "-lc", "echo 'hello' > /data/hello.txt",
|
"--rm",
|
||||||
])
|
"-v",
|
||||||
|
f"{cls.volume_src}:/data",
|
||||||
|
"alpine:3.20",
|
||||||
|
"sh",
|
||||||
|
"-lc",
|
||||||
|
"echo 'hello' > /data/hello.txt",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
cls.databases_csv = f"/tmp/{cls.prefix}/databases.csv"
|
cls.databases_csv = f"/tmp/{cls.prefix}/databases.csv"
|
||||||
write_databases_csv(cls.databases_csv, [])
|
write_databases_csv(cls.databases_csv, [])
|
||||||
@@ -59,14 +66,29 @@ class TestE2EFilesNoCopy(unittest.TestCase):
|
|||||||
cleanup_docker(containers=cls.containers, volumes=cls.volumes)
|
cleanup_docker(containers=cls.containers, volumes=cls.volumes)
|
||||||
|
|
||||||
def test_files_backup_not_present(self) -> None:
|
def test_files_backup_not_present(self) -> None:
|
||||||
p = backup_path(self.backups_dir, self.repo_name, self.version, self.volume_src) / "files"
|
p = (
|
||||||
|
backup_path(self.backups_dir, self.repo_name, self.version, self.volume_src)
|
||||||
|
/ "files"
|
||||||
|
)
|
||||||
self.assertFalse(p.exists(), f"Did not expect files backup dir at: {p}")
|
self.assertFalse(p.exists(), f"Did not expect files backup dir at: {p}")
|
||||||
|
|
||||||
def test_restore_files_fails_expected(self) -> None:
|
def test_restore_files_fails_expected(self) -> None:
|
||||||
p = run([
|
p = run(
|
||||||
"baudolo-restore", "files",
|
[
|
||||||
self.volume_dst, self.hash, self.version,
|
"baudolo-restore",
|
||||||
"--backups-dir", self.backups_dir,
|
"files",
|
||||||
"--repo-name", self.repo_name,
|
self.volume_dst,
|
||||||
], check=False)
|
self.hash,
|
||||||
self.assertEqual(p.returncode, 2, f"Expected exitcode 2, got {p.returncode}\nSTDOUT={p.stdout}\nSTDERR={p.stderr}")
|
self.version,
|
||||||
|
"--backups-dir",
|
||||||
|
self.backups_dir,
|
||||||
|
"--repo-name",
|
||||||
|
self.repo_name,
|
||||||
|
],
|
||||||
|
check=False,
|
||||||
|
)
|
||||||
|
self.assertEqual(
|
||||||
|
p.returncode,
|
||||||
|
2,
|
||||||
|
f"Expected exitcode 2, got {p.returncode}\nSTDOUT={p.stdout}\nSTDERR={p.stderr}",
|
||||||
|
)
|
||||||
|
|||||||
@@ -62,8 +62,12 @@ class TestE2EMariaDBFull(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Liveness + actual SQL login readiness (TCP)
|
# Liveness + actual SQL login readiness (TCP)
|
||||||
wait_for_mariadb(cls.db_container, root_password=cls.root_password, timeout_s=90)
|
wait_for_mariadb(
|
||||||
wait_for_mariadb_sql(cls.db_container, user=cls.db_user, password=cls.db_password, timeout_s=90)
|
cls.db_container, root_password=cls.root_password, timeout_s=90
|
||||||
|
)
|
||||||
|
wait_for_mariadb_sql(
|
||||||
|
cls.db_container, user=cls.db_user, password=cls.db_password, timeout_s=90
|
||||||
|
)
|
||||||
|
|
||||||
# Create table + data via the dedicated user (TCP)
|
# Create table + data via the dedicated user (TCP)
|
||||||
run(
|
run(
|
||||||
@@ -74,14 +78,17 @@ class TestE2EMariaDBFull(unittest.TestCase):
|
|||||||
"sh",
|
"sh",
|
||||||
"-lc",
|
"-lc",
|
||||||
f"mariadb -h 127.0.0.1 -u{cls.db_user} -p{cls.db_password} "
|
f"mariadb -h 127.0.0.1 -u{cls.db_user} -p{cls.db_password} "
|
||||||
f"-e \"CREATE TABLE {cls.db_name}.t (id INT PRIMARY KEY, v VARCHAR(50)); "
|
f'-e "CREATE TABLE {cls.db_name}.t (id INT PRIMARY KEY, v VARCHAR(50)); '
|
||||||
f"INSERT INTO {cls.db_name}.t VALUES (1,'ok');\"",
|
f"INSERT INTO {cls.db_name}.t VALUES (1,'ok');\"",
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
cls.databases_csv = f"/tmp/{cls.prefix}/databases.csv"
|
cls.databases_csv = f"/tmp/{cls.prefix}/databases.csv"
|
||||||
# IMPORTANT: baudolo backup expects credentials for the DB dump.
|
# IMPORTANT: baudolo backup expects credentials for the DB dump.
|
||||||
write_databases_csv(cls.databases_csv, [(cls.db_container, cls.db_name, cls.db_user, cls.db_password)])
|
write_databases_csv(
|
||||||
|
cls.databases_csv,
|
||||||
|
[(cls.db_container, cls.db_name, cls.db_user, cls.db_password)],
|
||||||
|
)
|
||||||
|
|
||||||
# Backup with file+dump
|
# Backup with file+dump
|
||||||
backup_run(
|
backup_run(
|
||||||
@@ -104,7 +111,7 @@ class TestE2EMariaDBFull(unittest.TestCase):
|
|||||||
"sh",
|
"sh",
|
||||||
"-lc",
|
"-lc",
|
||||||
f"mariadb -h 127.0.0.1 -u{cls.db_user} -p{cls.db_password} "
|
f"mariadb -h 127.0.0.1 -u{cls.db_user} -p{cls.db_password} "
|
||||||
f"-e \"DROP TABLE {cls.db_name}.t;\"",
|
f'-e "DROP TABLE {cls.db_name}.t;"',
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -137,7 +144,11 @@ class TestE2EMariaDBFull(unittest.TestCase):
|
|||||||
cleanup_docker(containers=cls.containers, volumes=cls.volumes)
|
cleanup_docker(containers=cls.containers, volumes=cls.volumes)
|
||||||
|
|
||||||
def test_dump_file_exists(self) -> None:
|
def test_dump_file_exists(self) -> None:
|
||||||
p = backup_path(self.backups_dir, self.repo_name, self.version, self.db_volume) / "sql" / f"{self.db_name}.backup.sql"
|
p = (
|
||||||
|
backup_path(self.backups_dir, self.repo_name, self.version, self.db_volume)
|
||||||
|
/ "sql"
|
||||||
|
/ f"{self.db_name}.backup.sql"
|
||||||
|
)
|
||||||
self.assertTrue(p.is_file(), f"Expected dump file at: {p}")
|
self.assertTrue(p.is_file(), f"Expected dump file at: {p}")
|
||||||
|
|
||||||
def test_data_restored(self) -> None:
|
def test_data_restored(self) -> None:
|
||||||
@@ -149,7 +160,7 @@ class TestE2EMariaDBFull(unittest.TestCase):
|
|||||||
"sh",
|
"sh",
|
||||||
"-lc",
|
"-lc",
|
||||||
f"mariadb -h 127.0.0.1 -u{self.db_user} -p{self.db_password} "
|
f"mariadb -h 127.0.0.1 -u{self.db_user} -p{self.db_password} "
|
||||||
f"-N -e \"SELECT v FROM {self.db_name}.t WHERE id=1;\"",
|
f'-N -e "SELECT v FROM {self.db_name}.t WHERE id=1;"',
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
self.assertEqual((p.stdout or "").strip(), "ok")
|
self.assertEqual((p.stdout or "").strip(), "ok")
|
||||||
|
|||||||
@@ -60,8 +60,12 @@ class TestE2EMariaDBNoCopy(unittest.TestCase):
|
|||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
wait_for_mariadb(cls.db_container, root_password=cls.root_password, timeout_s=90)
|
wait_for_mariadb(
|
||||||
wait_for_mariadb_sql(cls.db_container, user=cls.db_user, password=cls.db_password, timeout_s=90)
|
cls.db_container, root_password=cls.root_password, timeout_s=90
|
||||||
|
)
|
||||||
|
wait_for_mariadb_sql(
|
||||||
|
cls.db_container, user=cls.db_user, password=cls.db_password, timeout_s=90
|
||||||
|
)
|
||||||
|
|
||||||
# Create table + data (TCP)
|
# Create table + data (TCP)
|
||||||
run(
|
run(
|
||||||
@@ -72,13 +76,16 @@ class TestE2EMariaDBNoCopy(unittest.TestCase):
|
|||||||
"sh",
|
"sh",
|
||||||
"-lc",
|
"-lc",
|
||||||
f"mariadb -h 127.0.0.1 -u{cls.db_user} -p{cls.db_password} "
|
f"mariadb -h 127.0.0.1 -u{cls.db_user} -p{cls.db_password} "
|
||||||
f"-e \"CREATE TABLE {cls.db_name}.t (id INT PRIMARY KEY, v VARCHAR(50)); "
|
f'-e "CREATE TABLE {cls.db_name}.t (id INT PRIMARY KEY, v VARCHAR(50)); '
|
||||||
f"INSERT INTO {cls.db_name}.t VALUES (1,'ok');\"",
|
f"INSERT INTO {cls.db_name}.t VALUES (1,'ok');\"",
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
cls.databases_csv = f"/tmp/{cls.prefix}/databases.csv"
|
cls.databases_csv = f"/tmp/{cls.prefix}/databases.csv"
|
||||||
write_databases_csv(cls.databases_csv, [(cls.db_container, cls.db_name, cls.db_user, cls.db_password)])
|
write_databases_csv(
|
||||||
|
cls.databases_csv,
|
||||||
|
[(cls.db_container, cls.db_name, cls.db_user, cls.db_password)],
|
||||||
|
)
|
||||||
|
|
||||||
# dump-only => no files
|
# dump-only => no files
|
||||||
backup_run(
|
backup_run(
|
||||||
@@ -102,7 +109,7 @@ class TestE2EMariaDBNoCopy(unittest.TestCase):
|
|||||||
"sh",
|
"sh",
|
||||||
"-lc",
|
"-lc",
|
||||||
f"mariadb -h 127.0.0.1 -u{cls.db_user} -p{cls.db_password} "
|
f"mariadb -h 127.0.0.1 -u{cls.db_user} -p{cls.db_password} "
|
||||||
f"-e \"DROP TABLE {cls.db_name}.t;\"",
|
f'-e "DROP TABLE {cls.db_name}.t;"',
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -135,7 +142,10 @@ class TestE2EMariaDBNoCopy(unittest.TestCase):
|
|||||||
cleanup_docker(containers=cls.containers, volumes=cls.volumes)
|
cleanup_docker(containers=cls.containers, volumes=cls.volumes)
|
||||||
|
|
||||||
def test_files_backup_not_present(self) -> None:
|
def test_files_backup_not_present(self) -> None:
|
||||||
p = backup_path(self.backups_dir, self.repo_name, self.version, self.db_volume) / "files"
|
p = (
|
||||||
|
backup_path(self.backups_dir, self.repo_name, self.version, self.db_volume)
|
||||||
|
/ "files"
|
||||||
|
)
|
||||||
self.assertFalse(p.exists(), f"Did not expect files backup dir at: {p}")
|
self.assertFalse(p.exists(), f"Did not expect files backup dir at: {p}")
|
||||||
|
|
||||||
def test_data_restored(self) -> None:
|
def test_data_restored(self) -> None:
|
||||||
@@ -147,7 +157,7 @@ class TestE2EMariaDBNoCopy(unittest.TestCase):
|
|||||||
"sh",
|
"sh",
|
||||||
"-lc",
|
"-lc",
|
||||||
f"mariadb -h 127.0.0.1 -u{self.db_user} -p{self.db_password} "
|
f"mariadb -h 127.0.0.1 -u{self.db_user} -p{self.db_password} "
|
||||||
f"-N -e \"SELECT v FROM {self.db_name}.t WHERE id=1;\"",
|
f'-N -e "SELECT v FROM {self.db_name}.t WHERE id=1;"',
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
self.assertEqual((p.stdout or "").strip(), "ok")
|
self.assertEqual((p.stdout or "").strip(), "ok")
|
||||||
|
|||||||
@@ -33,26 +33,42 @@ class TestE2EPostgresFull(unittest.TestCase):
|
|||||||
|
|
||||||
run(["docker", "volume", "create", cls.pg_volume])
|
run(["docker", "volume", "create", cls.pg_volume])
|
||||||
|
|
||||||
run([
|
run(
|
||||||
"docker", "run", "-d",
|
[
|
||||||
"--name", cls.pg_container,
|
"docker",
|
||||||
"-e", "POSTGRES_PASSWORD=pgpw",
|
"run",
|
||||||
"-e", "POSTGRES_DB=appdb",
|
"-d",
|
||||||
"-e", "POSTGRES_USER=postgres",
|
"--name",
|
||||||
"-v", f"{cls.pg_volume}:/var/lib/postgresql/data",
|
cls.pg_container,
|
||||||
"postgres:16",
|
"-e",
|
||||||
])
|
"POSTGRES_PASSWORD=pgpw",
|
||||||
|
"-e",
|
||||||
|
"POSTGRES_DB=appdb",
|
||||||
|
"-e",
|
||||||
|
"POSTGRES_USER=postgres",
|
||||||
|
"-v",
|
||||||
|
f"{cls.pg_volume}:/var/lib/postgresql/data",
|
||||||
|
"postgres:16",
|
||||||
|
]
|
||||||
|
)
|
||||||
wait_for_postgres(cls.pg_container, user="postgres", timeout_s=90)
|
wait_for_postgres(cls.pg_container, user="postgres", timeout_s=90)
|
||||||
|
|
||||||
# Create a table + data
|
# Create a table + data
|
||||||
run([
|
run(
|
||||||
"docker", "exec", cls.pg_container,
|
[
|
||||||
"sh", "-lc",
|
"docker",
|
||||||
"psql -U postgres -d appdb -c \"CREATE TABLE t (id int primary key, v text); INSERT INTO t VALUES (1,'ok');\"",
|
"exec",
|
||||||
])
|
cls.pg_container,
|
||||||
|
"sh",
|
||||||
|
"-lc",
|
||||||
|
"psql -U postgres -d appdb -c \"CREATE TABLE t (id int primary key, v text); INSERT INTO t VALUES (1,'ok');\"",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
cls.databases_csv = f"/tmp/{cls.prefix}/databases.csv"
|
cls.databases_csv = f"/tmp/{cls.prefix}/databases.csv"
|
||||||
write_databases_csv(cls.databases_csv, [(cls.pg_container, "appdb", "postgres", "pgpw")])
|
write_databases_csv(
|
||||||
|
cls.databases_csv, [(cls.pg_container, "appdb", "postgres", "pgpw")]
|
||||||
|
)
|
||||||
|
|
||||||
backup_run(
|
backup_run(
|
||||||
backups_dir=cls.backups_dir,
|
backups_dir=cls.backups_dir,
|
||||||
@@ -66,37 +82,62 @@ class TestE2EPostgresFull(unittest.TestCase):
|
|||||||
cls.hash, cls.version = latest_version_dir(cls.backups_dir, cls.repo_name)
|
cls.hash, cls.version = latest_version_dir(cls.backups_dir, cls.repo_name)
|
||||||
|
|
||||||
# Wipe schema
|
# Wipe schema
|
||||||
run([
|
run(
|
||||||
"docker", "exec", cls.pg_container,
|
[
|
||||||
"sh", "-lc",
|
"docker",
|
||||||
"psql -U postgres -d appdb -c \"DROP TABLE t;\"",
|
"exec",
|
||||||
])
|
cls.pg_container,
|
||||||
|
"sh",
|
||||||
|
"-lc",
|
||||||
|
'psql -U postgres -d appdb -c "DROP TABLE t;"',
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# Restore
|
# Restore
|
||||||
run([
|
run(
|
||||||
"baudolo-restore", "postgres",
|
[
|
||||||
cls.pg_volume, cls.hash, cls.version,
|
"baudolo-restore",
|
||||||
"--backups-dir", cls.backups_dir,
|
"postgres",
|
||||||
"--repo-name", cls.repo_name,
|
cls.pg_volume,
|
||||||
"--container", cls.pg_container,
|
cls.hash,
|
||||||
"--db-name", "appdb",
|
cls.version,
|
||||||
"--db-user", "postgres",
|
"--backups-dir",
|
||||||
"--db-password", "pgpw",
|
cls.backups_dir,
|
||||||
"--empty",
|
"--repo-name",
|
||||||
])
|
cls.repo_name,
|
||||||
|
"--container",
|
||||||
|
cls.pg_container,
|
||||||
|
"--db-name",
|
||||||
|
"appdb",
|
||||||
|
"--db-user",
|
||||||
|
"postgres",
|
||||||
|
"--db-password",
|
||||||
|
"pgpw",
|
||||||
|
"--empty",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def tearDownClass(cls) -> None:
|
def tearDownClass(cls) -> None:
|
||||||
cleanup_docker(containers=cls.containers, volumes=cls.volumes)
|
cleanup_docker(containers=cls.containers, volumes=cls.volumes)
|
||||||
|
|
||||||
def test_dump_file_exists(self) -> None:
|
def test_dump_file_exists(self) -> None:
|
||||||
p = backup_path(self.backups_dir, self.repo_name, self.version, self.pg_volume) / "sql" / "appdb.backup.sql"
|
p = (
|
||||||
|
backup_path(self.backups_dir, self.repo_name, self.version, self.pg_volume)
|
||||||
|
/ "sql"
|
||||||
|
/ "appdb.backup.sql"
|
||||||
|
)
|
||||||
self.assertTrue(p.is_file(), f"Expected dump file at: {p}")
|
self.assertTrue(p.is_file(), f"Expected dump file at: {p}")
|
||||||
|
|
||||||
def test_data_restored(self) -> None:
|
def test_data_restored(self) -> None:
|
||||||
p = run([
|
p = run(
|
||||||
"docker", "exec", self.pg_container,
|
[
|
||||||
"sh", "-lc",
|
"docker",
|
||||||
"psql -U postgres -d appdb -t -c \"SELECT v FROM t WHERE id=1;\"",
|
"exec",
|
||||||
])
|
self.pg_container,
|
||||||
|
"sh",
|
||||||
|
"-lc",
|
||||||
|
'psql -U postgres -d appdb -t -c "SELECT v FROM t WHERE id=1;"',
|
||||||
|
]
|
||||||
|
)
|
||||||
self.assertEqual((p.stdout or "").strip(), "ok")
|
self.assertEqual((p.stdout or "").strip(), "ok")
|
||||||
|
|||||||
@@ -32,25 +32,41 @@ class TestE2EPostgresNoCopy(unittest.TestCase):
|
|||||||
cls.volumes = [cls.pg_volume]
|
cls.volumes = [cls.pg_volume]
|
||||||
|
|
||||||
run(["docker", "volume", "create", cls.pg_volume])
|
run(["docker", "volume", "create", cls.pg_volume])
|
||||||
run([
|
run(
|
||||||
"docker", "run", "-d",
|
[
|
||||||
"--name", cls.pg_container,
|
"docker",
|
||||||
"-e", "POSTGRES_PASSWORD=pgpw",
|
"run",
|
||||||
"-e", "POSTGRES_DB=appdb",
|
"-d",
|
||||||
"-e", "POSTGRES_USER=postgres",
|
"--name",
|
||||||
"-v", f"{cls.pg_volume}:/var/lib/postgresql/data",
|
cls.pg_container,
|
||||||
"postgres:16",
|
"-e",
|
||||||
])
|
"POSTGRES_PASSWORD=pgpw",
|
||||||
|
"-e",
|
||||||
|
"POSTGRES_DB=appdb",
|
||||||
|
"-e",
|
||||||
|
"POSTGRES_USER=postgres",
|
||||||
|
"-v",
|
||||||
|
f"{cls.pg_volume}:/var/lib/postgresql/data",
|
||||||
|
"postgres:16",
|
||||||
|
]
|
||||||
|
)
|
||||||
wait_for_postgres(cls.pg_container, user="postgres", timeout_s=90)
|
wait_for_postgres(cls.pg_container, user="postgres", timeout_s=90)
|
||||||
|
|
||||||
run([
|
run(
|
||||||
"docker", "exec", cls.pg_container,
|
[
|
||||||
"sh", "-lc",
|
"docker",
|
||||||
"psql -U postgres -d appdb -c \"CREATE TABLE t (id int primary key, v text); INSERT INTO t VALUES (1,'ok');\"",
|
"exec",
|
||||||
])
|
cls.pg_container,
|
||||||
|
"sh",
|
||||||
|
"-lc",
|
||||||
|
"psql -U postgres -d appdb -c \"CREATE TABLE t (id int primary key, v text); INSERT INTO t VALUES (1,'ok');\"",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
cls.databases_csv = f"/tmp/{cls.prefix}/databases.csv"
|
cls.databases_csv = f"/tmp/{cls.prefix}/databases.csv"
|
||||||
write_databases_csv(cls.databases_csv, [(cls.pg_container, "appdb", "postgres", "pgpw")])
|
write_databases_csv(
|
||||||
|
cls.databases_csv, [(cls.pg_container, "appdb", "postgres", "pgpw")]
|
||||||
|
)
|
||||||
|
|
||||||
backup_run(
|
backup_run(
|
||||||
backups_dir=cls.backups_dir,
|
backups_dir=cls.backups_dir,
|
||||||
@@ -64,36 +80,60 @@ class TestE2EPostgresNoCopy(unittest.TestCase):
|
|||||||
|
|
||||||
cls.hash, cls.version = latest_version_dir(cls.backups_dir, cls.repo_name)
|
cls.hash, cls.version = latest_version_dir(cls.backups_dir, cls.repo_name)
|
||||||
|
|
||||||
run([
|
run(
|
||||||
"docker", "exec", cls.pg_container,
|
[
|
||||||
"sh", "-lc",
|
"docker",
|
||||||
"psql -U postgres -d appdb -c \"DROP TABLE t;\"",
|
"exec",
|
||||||
])
|
cls.pg_container,
|
||||||
|
"sh",
|
||||||
|
"-lc",
|
||||||
|
'psql -U postgres -d appdb -c "DROP TABLE t;"',
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
run([
|
run(
|
||||||
"baudolo-restore", "postgres",
|
[
|
||||||
cls.pg_volume, cls.hash, cls.version,
|
"baudolo-restore",
|
||||||
"--backups-dir", cls.backups_dir,
|
"postgres",
|
||||||
"--repo-name", cls.repo_name,
|
cls.pg_volume,
|
||||||
"--container", cls.pg_container,
|
cls.hash,
|
||||||
"--db-name", "appdb",
|
cls.version,
|
||||||
"--db-user", "postgres",
|
"--backups-dir",
|
||||||
"--db-password", "pgpw",
|
cls.backups_dir,
|
||||||
"--empty",
|
"--repo-name",
|
||||||
])
|
cls.repo_name,
|
||||||
|
"--container",
|
||||||
|
cls.pg_container,
|
||||||
|
"--db-name",
|
||||||
|
"appdb",
|
||||||
|
"--db-user",
|
||||||
|
"postgres",
|
||||||
|
"--db-password",
|
||||||
|
"pgpw",
|
||||||
|
"--empty",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def tearDownClass(cls) -> None:
|
def tearDownClass(cls) -> None:
|
||||||
cleanup_docker(containers=cls.containers, volumes=cls.volumes)
|
cleanup_docker(containers=cls.containers, volumes=cls.volumes)
|
||||||
|
|
||||||
def test_files_backup_not_present(self) -> None:
|
def test_files_backup_not_present(self) -> None:
|
||||||
p = backup_path(self.backups_dir, self.repo_name, self.version, self.pg_volume) / "files"
|
p = (
|
||||||
|
backup_path(self.backups_dir, self.repo_name, self.version, self.pg_volume)
|
||||||
|
/ "files"
|
||||||
|
)
|
||||||
self.assertFalse(p.exists(), f"Did not expect files backup dir at: {p}")
|
self.assertFalse(p.exists(), f"Did not expect files backup dir at: {p}")
|
||||||
|
|
||||||
def test_data_restored(self) -> None:
|
def test_data_restored(self) -> None:
|
||||||
p = run([
|
p = run(
|
||||||
"docker", "exec", self.pg_container,
|
[
|
||||||
"sh", "-lc",
|
"docker",
|
||||||
"psql -U postgres -d appdb -t -c \"SELECT v FROM t WHERE id=1;\"",
|
"exec",
|
||||||
])
|
self.pg_container,
|
||||||
|
"sh",
|
||||||
|
"-lc",
|
||||||
|
'psql -U postgres -d appdb -t -c "SELECT v FROM t WHERE id=1;"',
|
||||||
|
]
|
||||||
|
)
|
||||||
self.assertEqual((p.stdout or "").strip(), "ok")
|
self.assertEqual((p.stdout or "").strip(), "ok")
|
||||||
|
|||||||
@@ -6,7 +6,9 @@ import unittest
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
def run_seed(csv_path: Path, instance: str, database: str, username: str, password: str = "") -> subprocess.CompletedProcess:
|
def run_seed(
|
||||||
|
csv_path: Path, instance: str, database: str, username: str, password: str = ""
|
||||||
|
) -> subprocess.CompletedProcess:
|
||||||
# Run the real CLI module (integration-style).
|
# Run the real CLI module (integration-style).
|
||||||
return subprocess.run(
|
return subprocess.run(
|
||||||
[
|
[
|
||||||
|
|||||||
@@ -6,7 +6,9 @@ from baudolo.backup.app import requires_stop
|
|||||||
|
|
||||||
class TestRequiresStop(unittest.TestCase):
|
class TestRequiresStop(unittest.TestCase):
|
||||||
@patch("baudolo.backup.app.get_image_info")
|
@patch("baudolo.backup.app.get_image_info")
|
||||||
def test_requires_stop_false_when_all_images_are_whitelisted(self, mock_get_image_info):
|
def test_requires_stop_false_when_all_images_are_whitelisted(
|
||||||
|
self, mock_get_image_info
|
||||||
|
):
|
||||||
# All containers use images containing allowed substrings
|
# All containers use images containing allowed substrings
|
||||||
mock_get_image_info.side_effect = [
|
mock_get_image_info.side_effect = [
|
||||||
"repo/mastodon:v4",
|
"repo/mastodon:v4",
|
||||||
@@ -17,7 +19,9 @@ class TestRequiresStop(unittest.TestCase):
|
|||||||
self.assertFalse(requires_stop(containers, whitelist))
|
self.assertFalse(requires_stop(containers, whitelist))
|
||||||
|
|
||||||
@patch("baudolo.backup.app.get_image_info")
|
@patch("baudolo.backup.app.get_image_info")
|
||||||
def test_requires_stop_true_when_any_image_is_not_whitelisted(self, mock_get_image_info):
|
def test_requires_stop_true_when_any_image_is_not_whitelisted(
|
||||||
|
self, mock_get_image_info
|
||||||
|
):
|
||||||
mock_get_image_info.side_effect = [
|
mock_get_image_info.side_effect = [
|
||||||
"repo/mastodon:v4",
|
"repo/mastodon:v4",
|
||||||
"repo/nginx:latest",
|
"repo/nginx:latest",
|
||||||
|
|||||||
Reference in New Issue
Block a user