mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-11-02 19:28:10 +00:00
Compare commits
25 Commits
486729d57d
...
cce33373ba
| Author | SHA1 | Date | |
|---|---|---|---|
| cce33373ba | |||
| fcc9dc71ef | |||
| 1b42ca46e8 | |||
| ce8958cc01 | |||
| 7e5990aa16 | |||
| 60ef36456a | |||
| 3a8b9cc958 | |||
| a1a956585c | |||
| 1a1f185265 | |||
| 57ca6adaec | |||
| a0c2245bbd | |||
| 206b3eadbc | |||
| feee3fd71f | |||
| 39e745049b | |||
| db034553a3 | |||
| f7e661bcca | |||
| d5f1ae0288 | |||
| 3c3083481e | |||
| 7cfe97ab50 | |||
| a552ea175d | |||
| dc16b7d21c | |||
| 54797aa65b | |||
| a6e42bff9b | |||
| 58cf63c040 | |||
| 682ea6d7f2 |
@@ -1,3 +0,0 @@
|
||||
# Todo
|
||||
- Test this script. It's just a draft. Checkout https://chatgpt.com/c/681d9e2b-7b28-800f-aef8-4f1427e9021d
|
||||
- Solve bugs in show_vault_variables.py
|
||||
254
cli/deploy.py
254
cli/deploy.py
@@ -5,6 +5,9 @@ import subprocess
|
||||
import os
|
||||
import datetime
|
||||
import sys
|
||||
import re
|
||||
from typing import Optional, Dict, Any, List
|
||||
|
||||
|
||||
def run_ansible_playbook(
|
||||
inventory,
|
||||
@@ -13,21 +16,19 @@ def run_ansible_playbook(
|
||||
allowed_applications=None,
|
||||
password_file=None,
|
||||
verbose=0,
|
||||
skip_tests=False,
|
||||
skip_validation=False,
|
||||
skip_build=False,
|
||||
cleanup=False,
|
||||
logs=False
|
||||
):
|
||||
start_time = datetime.datetime.now()
|
||||
print(f"\n▶️ Script started at: {start_time.isoformat()}\n")
|
||||
|
||||
if cleanup:
|
||||
# Cleanup is now handled via MODE_CLEANUP
|
||||
if modes.get("MODE_CLEANUP", False):
|
||||
cleanup_command = ["make", "clean-keep-logs"] if logs else ["make", "clean"]
|
||||
print("\n🧹 Cleaning up project (" + " ".join(cleanup_command) +")...\n")
|
||||
print("\n🧹 Cleaning up project (" + " ".join(cleanup_command) + ")...\n")
|
||||
subprocess.run(cleanup_command, check=True)
|
||||
else:
|
||||
print("\n⚠️ Skipping build as requested.\n")
|
||||
print("\n⚠️ Skipping cleanup as requested.\n")
|
||||
|
||||
if not skip_build:
|
||||
print("\n🛠️ Building project (make messy-build)...\n")
|
||||
@@ -38,27 +39,26 @@ def run_ansible_playbook(
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
playbook = os.path.join(os.path.dirname(script_dir), "playbook.yml")
|
||||
|
||||
# Inventory validation step
|
||||
if not skip_validation:
|
||||
# Inventory validation is controlled via MODE_ASSERT
|
||||
if modes.get("MODE_ASSERT", None) is False:
|
||||
print("\n⚠️ Skipping inventory validation as requested.\n")
|
||||
elif "MODE_ASSERT" not in modes or modes["MODE_ASSERT"] is True:
|
||||
print("\n🔍 Validating inventory before deployment...\n")
|
||||
try:
|
||||
subprocess.run(
|
||||
[sys.executable,
|
||||
os.path.join(script_dir, "validate/inventory.py"),
|
||||
os.path.dirname(inventory)
|
||||
[
|
||||
sys.executable,
|
||||
os.path.join(script_dir, "validate", "inventory.py"),
|
||||
os.path.dirname(inventory),
|
||||
],
|
||||
check=True
|
||||
check=True,
|
||||
)
|
||||
except subprocess.CalledProcessError:
|
||||
print(
|
||||
"\n❌ Inventory validation failed. Deployment aborted.\n",
|
||||
file=sys.stderr
|
||||
)
|
||||
print("\n❌ Inventory validation failed. Deployment aborted.\n", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("\n⚠️ Skipping inventory validation as requested.\n")
|
||||
|
||||
if not skip_tests:
|
||||
# Tests are controlled via MODE_TEST
|
||||
if modes.get("MODE_TEST", False):
|
||||
print("\n🧪 Running tests (make messy-test)...\n")
|
||||
subprocess.run(["make", "messy-test"], check=True)
|
||||
|
||||
@@ -93,25 +93,136 @@ def run_ansible_playbook(
|
||||
duration = end_time - start_time
|
||||
print(f"⏱️ Total execution time: {duration}\n")
|
||||
|
||||
|
||||
def validate_application_ids(inventory, app_ids):
|
||||
"""
|
||||
Abort the script if any application IDs are invalid, with detailed reasons.
|
||||
"""
|
||||
from module_utils.valid_deploy_id import ValidDeployId
|
||||
|
||||
validator = ValidDeployId()
|
||||
invalid = validator.validate(inventory, app_ids)
|
||||
if invalid:
|
||||
print("\n❌ Detected invalid application_id(s):\n")
|
||||
for app_id, status in invalid.items():
|
||||
reasons = []
|
||||
if not status['in_roles']:
|
||||
if not status["in_roles"]:
|
||||
reasons.append("not defined in roles (infinito)")
|
||||
if not status['in_inventory']:
|
||||
if not status["in_inventory"]:
|
||||
reasons.append("not found in inventory file")
|
||||
print(f" - {app_id}: " + ", ".join(reasons))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
MODE_LINE_RE = re.compile(
|
||||
r"""^\s*(?P<key>[A-Z0-9_]+)\s*:\s*(?P<value>.+?)\s*(?:#\s*(?P<cmt>.*))?\s*$"""
|
||||
)
|
||||
|
||||
|
||||
def _parse_bool_literal(text: str) -> Optional[bool]:
|
||||
t = text.strip().lower()
|
||||
if t in ("true", "yes", "on"):
|
||||
return True
|
||||
if t in ("false", "no", "off"):
|
||||
return False
|
||||
return None
|
||||
|
||||
|
||||
def load_modes_from_yaml(modes_yaml_path: str) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Parse group_vars/all/01_modes.yml line-by-line to recover:
|
||||
- name (e.g., MODE_TEST)
|
||||
- default (True/False/None if templated/unknown)
|
||||
- help (from trailing # comment, if present)
|
||||
"""
|
||||
modes = []
|
||||
if not os.path.exists(modes_yaml_path):
|
||||
raise FileNotFoundError(f"Modes file not found: {modes_yaml_path}")
|
||||
|
||||
with open(modes_yaml_path, "r", encoding="utf-8") as fh:
|
||||
for line in fh:
|
||||
line = line.rstrip()
|
||||
if not line or line.lstrip().startswith("#"):
|
||||
continue
|
||||
m = MODE_LINE_RE.match(line)
|
||||
if not m:
|
||||
continue
|
||||
key = m.group("key")
|
||||
val = m.group("value").strip()
|
||||
cmt = (m.group("cmt") or "").strip()
|
||||
|
||||
if not key.startswith("MODE_"):
|
||||
continue
|
||||
|
||||
default_bool = _parse_bool_literal(val)
|
||||
modes.append(
|
||||
{
|
||||
"name": key,
|
||||
"default": default_bool,
|
||||
"help": cmt or f"Toggle {key}",
|
||||
}
|
||||
)
|
||||
return modes
|
||||
|
||||
|
||||
def add_dynamic_mode_args(
|
||||
parser: argparse.ArgumentParser, modes_meta: List[Dict[str, Any]]
|
||||
) -> Dict[str, Dict[str, Any]]:
|
||||
"""
|
||||
Add argparse options based on modes metadata.
|
||||
Returns a dict mapping mode name -> { 'dest': <argparse_dest>, 'default': <bool/None>, 'kind': 'bool_true'|'bool_false'|'explicit' }.
|
||||
"""
|
||||
spec: Dict[str, Dict[str, Any]] = {}
|
||||
for m in modes_meta:
|
||||
name = m["name"]
|
||||
default = m["default"]
|
||||
desc = m["help"]
|
||||
short = name.replace("MODE_", "").lower()
|
||||
|
||||
if default is True:
|
||||
opt = f"--skip-{short}"
|
||||
dest = f"skip_{short}"
|
||||
help_txt = desc or f"Skip/disable {short} (default: enabled)"
|
||||
parser.add_argument(opt, action="store_true", help=help_txt, dest=dest)
|
||||
spec[name] = {"dest": dest, "default": True, "kind": "bool_true"}
|
||||
elif default is False:
|
||||
opt = f"--{short}"
|
||||
dest = short
|
||||
help_txt = desc or f"Enable {short} (default: disabled)"
|
||||
parser.add_argument(opt, action="store_true", help=help_txt, dest=dest)
|
||||
spec[name] = {"dest": dest, "default": False, "kind": "bool_false"}
|
||||
else:
|
||||
opt = f"--{short}"
|
||||
dest = short
|
||||
help_txt = desc or f"Set {short} explicitly (true/false). If omitted, keep inventory default."
|
||||
parser.add_argument(opt, choices=["true", "false"], help=help_txt, dest=dest)
|
||||
spec[name] = {"dest": dest, "default": None, "kind": "explicit"}
|
||||
|
||||
return spec
|
||||
|
||||
|
||||
def build_modes_from_args(
|
||||
spec: Dict[str, Dict[str, Any]], args_namespace: argparse.Namespace
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Using the argparse results and the spec, compute the `modes` dict to pass to Ansible.
|
||||
"""
|
||||
modes: Dict[str, Any] = {}
|
||||
for mode_name, info in spec.items():
|
||||
dest = info["dest"]
|
||||
kind = info["kind"]
|
||||
val = getattr(args_namespace, dest, None)
|
||||
|
||||
if kind == "bool_true":
|
||||
modes[mode_name] = False if val else True
|
||||
elif kind == "bool_false":
|
||||
modes[mode_name] = True if val else False
|
||||
else:
|
||||
if val is not None:
|
||||
modes[mode_name] = True if val == "true" else False
|
||||
return modes
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Run the central Ansible deployment script to manage infrastructure, updates, and tests."
|
||||
@@ -119,88 +230,68 @@ def main():
|
||||
|
||||
parser.add_argument(
|
||||
"inventory",
|
||||
help="Path to the inventory file (INI or YAML) containing hosts and variables."
|
||||
help="Path to the inventory file (INI or YAML) containing hosts and variables.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-l", "--limit",
|
||||
help="Restrict execution to a specific host or host group from the inventory."
|
||||
"-l",
|
||||
"--limit",
|
||||
help="Restrict execution to a specific host or host group from the inventory.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-T", "--host-type",
|
||||
"-T",
|
||||
"--host-type",
|
||||
choices=["server", "desktop"],
|
||||
default="server",
|
||||
help="Specify whether the target is a server or a personal computer. Affects role selection and variables."
|
||||
help="Specify whether the target is a server or a personal computer. Affects role selection and variables.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-r", "--reset", action="store_true",
|
||||
help="Reset all Infinito.Nexus files and configurations, and run the entire playbook (not just individual roles)."
|
||||
"-p",
|
||||
"--password-file",
|
||||
help="Path to the file containing the Vault password. If not provided, prompts for the password interactively.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-t", "--test", action="store_true",
|
||||
help="Run test routines instead of production tasks. Useful for local testing and CI pipelines."
|
||||
"-B",
|
||||
"--skip-build",
|
||||
action="store_true",
|
||||
help="Skip running 'make build' before deployment.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-u", "--update", action="store_true",
|
||||
help="Enable the update procedure to bring software and roles up to date."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-b", "--backup", action="store_true",
|
||||
help="Perform a full backup of critical data and configurations before the update process."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-c", "--cleanup", action="store_true",
|
||||
help="Clean up unused files and outdated configurations after all tasks are complete. Also cleans up the repository before the deployment procedure."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-d", "--debug", action="store_true",
|
||||
help="Enable detailed debug output for Ansible and this script."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-p", "--password-file",
|
||||
help="Path to the file containing the Vault password. If not provided, prompts for the password interactively."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-s", "--skip-tests", action="store_true",
|
||||
help="Skip running 'make test' even if tests are normally enabled."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-V", "--skip-validation", action="store_true",
|
||||
help="Skip inventory validation before deployment."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-B", "--skip-build", action="store_true",
|
||||
help="Skip running 'make build' before deployment."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-i", "--id",
|
||||
"-i",
|
||||
"--id",
|
||||
nargs="+",
|
||||
default=[],
|
||||
dest="id",
|
||||
help="List of application_id's for partial deploy. If not set, all application IDs defined in the inventory will be executed."
|
||||
help="List of application_id's for partial deploy. If not set, all application IDs defined in the inventory will be executed.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-v", "--verbose", action="count", default=0,
|
||||
help="Increase verbosity level. Multiple -v flags increase detail (e.g., -vvv for maximum log output)."
|
||||
"-v",
|
||||
"--verbose",
|
||||
action="count",
|
||||
default=0,
|
||||
help="Increase verbosity level. Multiple -v flags increase detail (e.g., -vvv for maximum log output).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--logs", action="store_true",
|
||||
help="Keep the CLI logs during cleanup command"
|
||||
"--logs",
|
||||
action="store_true",
|
||||
help="Keep the CLI logs during cleanup command",
|
||||
)
|
||||
|
||||
# ---- Dynamically add mode flags from group_vars/all/01_modes.yml ----
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
repo_root = os.path.dirname(script_dir)
|
||||
modes_yaml_path = os.path.join(repo_root, "group_vars", "all", "01_modes.yml")
|
||||
modes_meta = load_modes_from_yaml(modes_yaml_path)
|
||||
modes_spec = add_dynamic_mode_args(parser, modes_meta)
|
||||
|
||||
args = parser.parse_args()
|
||||
validate_application_ids(args.inventory, args.id)
|
||||
|
||||
modes = {
|
||||
"MODE_RESET": args.reset,
|
||||
"MODE_TEST": args.test,
|
||||
"MODE_UPDATE": args.update,
|
||||
"MODE_BACKUP": args.backup,
|
||||
"MODE_CLEANUP": args.cleanup,
|
||||
"MODE_LOGS": args.logs,
|
||||
"MODE_DEBUG": args.debug,
|
||||
"MODE_ASSERT": not args.skip_validation,
|
||||
"host_type": args.host_type
|
||||
}
|
||||
# Build modes from dynamic args
|
||||
modes = build_modes_from_args(modes_spec, args)
|
||||
|
||||
# Additional non-dynamic flags
|
||||
modes["MODE_LOGS"] = args.logs
|
||||
modes["host_type"] = args.host_type
|
||||
|
||||
run_ansible_playbook(
|
||||
inventory=args.inventory,
|
||||
@@ -209,11 +300,8 @@ def main():
|
||||
allowed_applications=args.id,
|
||||
password_file=args.password_file,
|
||||
verbose=args.verbose,
|
||||
skip_tests=args.skip_tests,
|
||||
skip_validation=args.skip_validation,
|
||||
skip_build=args.skip_build,
|
||||
cleanup=args.cleanup,
|
||||
logs=args.logs
|
||||
logs=args.logs,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
# Mode
|
||||
|
||||
# The following modes can be combined with each other
|
||||
MODE_TEST: false # Executes test routines instead of productive routines
|
||||
MODE_UPDATE: true # Executes updates
|
||||
MODE_DEBUG: false # This enables debugging in ansible and in the apps, You SHOULD NOT enable this on production servers
|
||||
MODE_RESET: false # Cleans up all Infinito.Nexus files. It's necessary to run to whole playbook and not particial roles when using this function.
|
||||
MODE_BACKUP: "{{ MODE_UPDATE }}" # Activates the backup before the update procedure
|
||||
MODE_CLEANUP: "{{ MODE_DEBUG }}" # Cleanup unused files and configurations
|
||||
MODE_ASSERT: "{{ MODE_DEBUG }}" # Executes validation tasks during the run.
|
||||
MODE_TEST: false # Executes test routines instead of productive routines
|
||||
MODE_UPDATE: true # Executes updates
|
||||
MODE_DEBUG: false # This enables debugging in ansible and in the apps, You SHOULD NOT enable this on production servers
|
||||
MODE_RESET: false # Cleans up all Infinito.Nexus files. It's necessary to run to whole playbook and not particial roles when using this function.
|
||||
MODE_CLEANUP: "{{ MODE_DEBUG | bool }}" # Cleanup unused files and configurations
|
||||
MODE_ASSERT: "{{ MODE_DEBUG | bool }}" # Executes validation tasks during the run.
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
# Todos
|
||||
- Use at all applications the ansible role name as application_id
|
||||
- Implement filter_plugins/get_infinito_path.py
|
||||
@@ -21,7 +21,9 @@
|
||||
system_service_tpl_exec_start: "{{ system_service_script_exec }} %I"
|
||||
system_service_tpl_on_failure: "" # No on failure needed, because it's anyhow the default on failure procedure
|
||||
|
||||
- block:
|
||||
- name: Assert '{{ system_service_id }}'
|
||||
block:
|
||||
|
||||
- name: Escape instance name for systemctl call
|
||||
ansible.builtin.command:
|
||||
argv:
|
||||
@@ -30,8 +32,8 @@
|
||||
register: escaped_name
|
||||
changed_when: false
|
||||
|
||||
- name: Start sys-ctl-alm-compose instance
|
||||
- name: Start '{{ system_service_id }}' instance
|
||||
ansible.builtin.systemd:
|
||||
name: "{{ ('sys-ctl-alm-compose@') | get_service_name(SOFTWARE_NAME, False) ~ escaped_name.stdout ~ '.service' }}"
|
||||
name: "{{ system_service_id | get_service_name(SOFTWARE_NAME, False) ~ escaped_name.stdout }}.service"
|
||||
state: started
|
||||
when: MODE_ASSERT | bool
|
||||
|
||||
@@ -23,4 +23,6 @@
|
||||
system_service_tpl_exec_start_pre: '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(" ") }} --ignore {{ SYS_SERVICE_BACKUP_DOCKER_2_LOC }} --timeout "{{ SYS_TIMEOUT_BACKUP_SERVICES }}"'
|
||||
system_service_tpl_exec_start: "/bin/sh -c '{{ BKP_DOCKER_2_LOC_EXEC }}'"
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }} {{ SYS_SERVICE_CLEANUP_BACKUPS_FAILED }}"
|
||||
system_service_tpl_exec_start_post: "/usr/bin/systemctl start {{ SYS_SERVICE_CLEANUP_BACKUPS }}"
|
||||
# system_service_tpl_exec_start_post: "/usr/bin/systemctl start {{ SYS_SERVICE_CLEANUP_BACKUPS }}" # Not possible to use it because it's a deathlock. Keep this line for documentation purposes
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
@@ -1,6 +1,5 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when:
|
||||
- run_once_sys_ctl_bkp_docker_2_loc is not defined
|
||||
|
||||
|
||||
@@ -19,4 +19,8 @@
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_tpl_exec_start: "{{ system_service_script_exec }} --backups-folder-path {{ BACKUPS_FOLDER_PATH }} --maximum-backup-size-percent {{SIZE_PERCENT_MAXIMUM_BACKUP}}"
|
||||
system_service_tpl_exec_start_pre: '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(" ") }} --ignore {{ SYS_SERVICE_GROUP_CLEANUP | join(" ") }} --timeout "{{ SYS_TIMEOUT_BACKUP_SERVICES }}"'
|
||||
system_service_copy_files: true
|
||||
system_service_copy_files: true
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
vars:
|
||||
flush_handlers: true
|
||||
@@ -1,6 +1,3 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
vars:
|
||||
flush_handlers: true
|
||||
when: run_once_sys_ctl_cln_bkps is not defined
|
||||
|
||||
@@ -19,5 +19,7 @@
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_CLEANUP_FAILED_BACKUPS }}"
|
||||
system_service_copy_files: false
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_tpl_exec_start: '/bin/sh -c "{{ CLEANUP_FAILED_BACKUPS_PKG }} --all --dirval-cmd dirval --workers {{ CLEANUP_FAILED_BACKUPS_WORKERS }} --yes"'
|
||||
system_service_tpl_exec_start: '/bin/sh -c "{{ CLEANUP_FAILED_BACKUPS_PKG }} --all --workers {{ CLEANUP_FAILED_BACKUPS_WORKERS }} --yes"'
|
||||
system_service_tpl_exec_start_pre: '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(" ") }} --ignore {{ SYS_SERVICE_GROUP_CLEANUP| join(" ") }} --timeout "{{ SYS_TIMEOUT_CLEANUP_SERVICES }}"'
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_ctl_cln_faild_bkps is not defined
|
||||
@@ -13,8 +13,11 @@
|
||||
- include_role:
|
||||
name: sys-service
|
||||
vars:
|
||||
system_service_suppress_flush: true # The healthcheck will just work after all routines passed
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_HEALTH_CSP_CRAWLER }}"
|
||||
system_service_timer_enabled: true
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_tpl_timeout_start_sec: "{{ CURRENT_PLAY_DOMAINS_ALL | timeout_start_sec_for_domains }}"
|
||||
system_service_tpl_exec_start: "{{ system_service_script_exec }} --nginx-config-dir={{ NGINX.DIRECTORIES.HTTP.SERVERS }}"
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_ctl_hlth_csp is not defined
|
||||
@@ -8,4 +8,6 @@
|
||||
vars:
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_HEALTH_MSMTP }}"
|
||||
system_service_timer_enabled: true
|
||||
system_service_timer_enabled: true
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
@@ -1,5 +1,3 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_ctl_hlth_msmtp is not defined
|
||||
|
||||
|
||||
@@ -24,3 +24,6 @@
|
||||
{{ system_service_script_exec }}
|
||||
--web-protocol {{ WEB_PROTOCOL }}
|
||||
--expectations '{{ applications | web_health_expectations(www_enabled=WWW_REDIRECT_ENABLED, group_names=group_names) | to_json }}'
|
||||
system_service_suppress_flush: true # The healthcheck will just work after all routines passed
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_ctl_hlth_webserver is not defined
|
||||
|
||||
|
||||
|
||||
@@ -13,3 +13,4 @@
|
||||
system_service_tpl_exec_start_post: "/usr/bin/systemctl start {{ SYS_SERVICE_CLEANUP_ANONYMOUS_VOLUMES }}"
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_ctl_rpr_docker_hard is not defined
|
||||
@@ -10,3 +10,5 @@
|
||||
system_service_tpl_exec_start_pre: "/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(' ') }} --ignore {{ SYS_SERVICE_GROUP_CLEANUP| join(' ') }} {{ SYS_SERVICE_REPAIR_DOCKER_SOFT }} --timeout '{{ SYS_TIMEOUT_DOCKER_RPR_SOFT }}'"
|
||||
system_service_tpl_exec_start: >
|
||||
/bin/sh -c '{{ system_service_script_exec }} --manipulation-string "{{ SYS_SERVICE_GROUP_MANIPULATION | join(" ") }}" {{ PATH_DOCKER_COMPOSE_INSTANCES }}'
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_ctl_rpr_docker_soft is not defined
|
||||
@@ -38,9 +38,7 @@
|
||||
ansible.builtin.systemd:
|
||||
daemon_reload: true
|
||||
become: true
|
||||
listen:
|
||||
- reload system daemon
|
||||
- refresh systemctl service
|
||||
listen: reload system daemon
|
||||
async: "{{ ASYNC_TIME if ASYNC_ENABLED | bool else omit }}"
|
||||
poll: "{{ ASYNC_POLL if ASYNC_ENABLED | bool else omit }}"
|
||||
|
||||
|
||||
18
roles/sys-dns-wildcards/README.md
Normal file
18
roles/sys-dns-wildcards/README.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# sys-dns-wildcards
|
||||
|
||||
Create Cloudflare DNS **wildcard** A/AAAA records (`*.parent`) for **parent hosts** (hosts that have children) **and** always for the **apex** (SLD.TLD).
|
||||
|
||||
Examples:
|
||||
- c.wiki.example.com -> parent: wiki.example.com -> creates: `*.wiki.example.com`
|
||||
- a.b.example.com -> parent: b.example.com -> creates: `*.b.example.com`
|
||||
- example.com (apex) -> also creates: `*.example.com`
|
||||
|
||||
## Inputs
|
||||
- parent_dns_domains (list[str], optional): FQDNs to evaluate. If empty, the role flattens CURRENT_PLAY_DOMAINS_ALL.
|
||||
- PRIMARY_DOMAIN (apex), defaults_networks.internet.ip4, optional defaults_networks.internet.ip6
|
||||
- Flags:
|
||||
- parent_dns_enabled (bool, default: true)
|
||||
- parent_dns_proxied (bool, default: false)
|
||||
|
||||
## Usage
|
||||
- Include the role once after your constructor stage has set CURRENT_PLAY_DOMAINS_ALL.
|
||||
1
roles/sys-dns-wildcards/defaults/main.yml
Normal file
1
roles/sys-dns-wildcards/defaults/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
parent_dns_proxied: false
|
||||
174
roles/sys-dns-wildcards/filter_plugins/wildcard_dns.py
Normal file
174
roles/sys-dns-wildcards/filter_plugins/wildcard_dns.py
Normal file
@@ -0,0 +1,174 @@
|
||||
# roles/sys-dns-wildcards/filter_plugins/wildcard_dns.py
|
||||
from ansible.errors import AnsibleFilterError
|
||||
import ipaddress
|
||||
|
||||
|
||||
def _validate(d: str) -> None:
|
||||
if not isinstance(d, str) or not d.strip() or d.startswith(".") or d.endswith(".") or ".." in d:
|
||||
raise AnsibleFilterError(f"Invalid domain: {d!r}")
|
||||
|
||||
|
||||
def _depth(domain: str, apex: str) -> int:
|
||||
dl, al = domain.split("."), apex.split(".")
|
||||
if not domain.endswith(apex) or len(dl) <= len(al):
|
||||
return 0
|
||||
return len(dl) - len(al)
|
||||
|
||||
|
||||
def _parent_of_child(domain: str, apex: str) -> str | None:
|
||||
"""
|
||||
For a child like a.b.example.com return b.example.com; else None (needs depth >= 2).
|
||||
"""
|
||||
if not domain.endswith(apex):
|
||||
return None
|
||||
parts = domain.split(".")
|
||||
apex_len = len(apex.split("."))
|
||||
if len(parts) <= apex_len + 1:
|
||||
return None
|
||||
return ".".join(parts[1:]) # drop exactly the left-most label
|
||||
|
||||
|
||||
def _flatten_domains_any_structure(domains_like) -> list[str]:
|
||||
"""
|
||||
Accept CURRENT_PLAY_DOMAINS*_like structures:
|
||||
- dict values: str | list/tuple/set[str] | dict (one level deeper)
|
||||
Returns unique, sorted host list.
|
||||
"""
|
||||
hosts: list[str] = []
|
||||
|
||||
def _add_any(x):
|
||||
if x is None:
|
||||
return
|
||||
if isinstance(x, str):
|
||||
hosts.append(x)
|
||||
return
|
||||
if isinstance(x, (list, tuple, set)):
|
||||
for i in x:
|
||||
if not isinstance(i, str):
|
||||
raise AnsibleFilterError(f"Non-string hostname in list: {i!r}")
|
||||
hosts.append(i)
|
||||
return
|
||||
if isinstance(x, dict):
|
||||
for v in x.values():
|
||||
_add_any(v)
|
||||
return
|
||||
raise AnsibleFilterError(f"Unsupported value type: {type(x).__name__}")
|
||||
|
||||
if not isinstance(domains_like, dict):
|
||||
raise AnsibleFilterError("Expected a dict for CURRENT_PLAY_DOMAINS_ALL")
|
||||
for v in domains_like.values():
|
||||
_add_any(v)
|
||||
return sorted(set(hosts))
|
||||
|
||||
|
||||
def _parents_from(domains: list[str], apex: str, *, min_child_depth: int) -> list[str]:
|
||||
_validate(apex)
|
||||
parents = set()
|
||||
for d in domains:
|
||||
_validate(d)
|
||||
if not d.endswith(apex):
|
||||
continue
|
||||
if _depth(d, apex) >= min_child_depth:
|
||||
p = _parent_of_child(d, apex)
|
||||
if p:
|
||||
parents.add(p)
|
||||
return sorted(parents)
|
||||
|
||||
|
||||
def _is_global(ip: str) -> bool:
|
||||
try:
|
||||
return ipaddress.ip_address(ip).is_global
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _build_wildcard_records(
|
||||
parents: list[str],
|
||||
apex: str,
|
||||
*,
|
||||
ip4: str,
|
||||
ip6: str | None,
|
||||
proxied: bool,
|
||||
ipv6_enabled: bool,
|
||||
) -> list[dict]:
|
||||
if not isinstance(parents, list):
|
||||
raise AnsibleFilterError("parents must be list[str]")
|
||||
_validate(apex)
|
||||
if not ip4:
|
||||
raise AnsibleFilterError("ip4 required")
|
||||
|
||||
records: list[dict] = []
|
||||
|
||||
def _add(name: str, rtype: str, content: str):
|
||||
records.append({
|
||||
"zone": apex,
|
||||
"type": rtype,
|
||||
"name": name, # For apex wildcard, name "*" means "*.apex" in Cloudflare
|
||||
"content": content,
|
||||
"proxied": bool(proxied),
|
||||
"ttl": 1,
|
||||
})
|
||||
|
||||
for p in sorted(set(parents)):
|
||||
# Create wildcard at apex as well (name="*")
|
||||
if p == apex:
|
||||
wc = "*"
|
||||
else:
|
||||
# relative part (drop ".apex")
|
||||
rel = p[:-len(apex)-1]
|
||||
if not rel:
|
||||
# Safety guard; should not happen because p==apex handled above
|
||||
wc = "*"
|
||||
else:
|
||||
wc = f"*.{rel}"
|
||||
_add(wc, "A", str(ip4))
|
||||
if ipv6_enabled and ip6 and _is_global(str(ip6)):
|
||||
_add(wc, "AAAA", str(ip6))
|
||||
return records
|
||||
|
||||
|
||||
def wildcard_records(
|
||||
current_play_domains_all, # dict expected when explicit_domains is None
|
||||
apex: str,
|
||||
ip4: str,
|
||||
ip6: str | None = None,
|
||||
proxied: bool = False,
|
||||
explicit_domains: list[str] | None = None,
|
||||
min_child_depth: int = 2,
|
||||
ipv6_enabled: bool = True,
|
||||
) -> list[dict]:
|
||||
"""
|
||||
Build wildcard records:
|
||||
- for each parent 'parent.apex' -> create '*.parent' A/AAAA
|
||||
- ALWAYS also create '*.apex' (apex wildcard), modeled as name="*"
|
||||
Sources:
|
||||
- If 'explicit_domains' is provided and non-empty, use it (expects list[str]).
|
||||
- Else flatten 'current_play_domains_all' (expects dict).
|
||||
"""
|
||||
# Source domains
|
||||
if explicit_domains and len(explicit_domains) > 0:
|
||||
if not isinstance(explicit_domains, list) or not all(isinstance(x, str) for x in explicit_domains):
|
||||
raise AnsibleFilterError("explicit_domains must be list[str]")
|
||||
domains = sorted(set(explicit_domains))
|
||||
else:
|
||||
domains = _flatten_domains_any_structure(current_play_domains_all)
|
||||
|
||||
# Determine parents and ALWAYS include apex for apex wildcard
|
||||
parents = _parents_from(domains, apex, min_child_depth=min_child_depth)
|
||||
parents = list(set(parents) | {apex})
|
||||
|
||||
return _build_wildcard_records(
|
||||
parents,
|
||||
apex,
|
||||
ip4=ip4,
|
||||
ip6=ip6,
|
||||
proxied=proxied,
|
||||
ipv6_enabled=ipv6_enabled,
|
||||
)
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
"wildcard_records": wildcard_records,
|
||||
}
|
||||
7
roles/sys-dns-wildcards/meta/main.yml
Normal file
7
roles/sys-dns-wildcards/meta/main.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birkenbach"
|
||||
description: "Create Cloudflare wildcard DNS records (*.parent) for parent hosts; no base or *.apex records."
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
min_ansible_version: "2.12"
|
||||
galaxy_tags: [dns, cloudflare, automation]
|
||||
dependencies: []
|
||||
9
roles/sys-dns-wildcards/tasks/01_core.yml
Normal file
9
roles/sys-dns-wildcards/tasks/01_core.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: "Apply Cloudflare DNS for parent domains"
|
||||
include_role:
|
||||
name: sys-dns-cloudflare-records
|
||||
vars:
|
||||
cloudflare_records: "{{ SYN_DNS_WILDCARD_RECORDS }}"
|
||||
when: DNS_PROVIDER == 'cloudflare'
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
3
roles/sys-dns-wildcards/tasks/main.yml
Normal file
3
roles/sys-dns-wildcards/tasks/main.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_sys_dns_wildcards is not defined
|
||||
12
roles/sys-dns-wildcards/vars/main.yml
Normal file
12
roles/sys-dns-wildcards/vars/main.yml
Normal file
@@ -0,0 +1,12 @@
|
||||
SYN_DNS_WILDCARD_RECORDS: >-
|
||||
{{
|
||||
{} |
|
||||
wildcard_records(
|
||||
PRIMARY_DOMAIN,
|
||||
networks.internet.ip4,
|
||||
networks.internet.ip6,
|
||||
parent_dns_proxied,
|
||||
explicit_domains=CURRENT_PLAY_DOMAINS_ALL,
|
||||
min_child_depth=2,
|
||||
ipv6_enabled=true)
|
||||
}}
|
||||
@@ -6,5 +6,6 @@
|
||||
- sys-svc-webserver
|
||||
- sys-svc-cln-domains
|
||||
- sys-svc-letsencrypt
|
||||
- sys-svc-dns
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_stk_front_pure is not defined
|
||||
|
||||
26
roles/sys-svc-dns/README.md
Normal file
26
roles/sys-svc-dns/README.md
Normal file
@@ -0,0 +1,26 @@
|
||||
# sys-svc-dns
|
||||
|
||||
Bootstrap and maintain **DNS prerequisites** for your web stack on Cloudflare.
|
||||
|
||||
This role validates credentials and (by default) ensures:
|
||||
- **A (and optional AAAA) records** on the **apex** (`@`) for all **base SLD domains**
|
||||
- **Wildcard A/AAAA records** (`*.parent`) for parent hosts via `sys-dns-wildcards`
|
||||
- *(Optional)* **CAA** records for Let’s Encrypt (kept as a commented block you can re-enable)
|
||||
|
||||
Runs **once per play** and is safe to include in stacks that roll out many domains.
|
||||
|
||||
---
|
||||
|
||||
## What it does
|
||||
|
||||
1. **Validate `CLOUDFLARE_API_TOKEN`** is present (early fail if missing).
|
||||
2. **Ensure apex A/AAAA exist** for every **base SLD** in `SYS_SVC_DNS_BASE_DOMAINS`:
|
||||
- Writes `@ A` → `networks.internet.ip4`
|
||||
- Writes `@ AAAA` → `networks.internet.ip6` (only if global and present)
|
||||
3. *(Optional)* **CAA records** for all base SLDs (commented in the tasks; enable if you want CAA managed here).
|
||||
4. **Ensure wildcard parent DNS exists** (`*.parent` derived from children):
|
||||
- Delegates to [`sys-dns-wildcards`](../sys-dns-wildcards/README.md)
|
||||
- Creates `A` (and `AAAA` if enabled) wildcard records on the Cloudflare zone, optionally proxied.
|
||||
|
||||
> Parent hosts example:
|
||||
> `c.wiki.example.com` → **parent** `wiki.example.com` (plus `example.com` apex)
|
||||
26
roles/sys-svc-dns/meta/main.yml
Normal file
26
roles/sys-svc-dns/meta/main.yml
Normal file
@@ -0,0 +1,26 @@
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birkenbach"
|
||||
description: "Cloudflare DNS bootstrap: parent host A/AAAA (and optional CAA) — runs once per play."
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birkenbach
|
||||
Consulting & Coaching Solutions
|
||||
https://www.veen.world
|
||||
min_ansible_version: "2.12"
|
||||
platforms:
|
||||
- name: Archlinux
|
||||
versions: [rolling]
|
||||
galaxy_tags:
|
||||
- dns
|
||||
- cloudflare
|
||||
- automation
|
||||
- letsencrypt
|
||||
- nginx
|
||||
repository: "https://s.infinito.nexus/code"
|
||||
issue_tracker_url: "https://s.infinito.nexus/issues"
|
||||
documentation: "https://docs.infinito.nexus"
|
||||
logo:
|
||||
class: "fa-solid fa-cloud"
|
||||
run_after: []
|
||||
dependencies: []
|
||||
@@ -1,11 +1,17 @@
|
||||
---
|
||||
|
||||
- name: "Validate CLOUDFLARE_API_TOKEN"
|
||||
fail:
|
||||
msg: >
|
||||
The variable "CLOUDFLARE_API_TOKEN" must be defined and cannot be empty!
|
||||
when: (CLOUDFLARE_API_TOKEN | default('') | trim) == ''
|
||||
|
||||
- name: "Apply apex A/AAAA for base domains"
|
||||
include_tasks: 02_apex.yml
|
||||
loop: "{{ SYS_SVC_DNS_BASE_DOMAINS | list }}"
|
||||
loop_control:
|
||||
loop_var: base_domain
|
||||
label: "{{ base_domain }}"
|
||||
|
||||
- name: "Ensure all CAA records are present"
|
||||
community.general.cloudflare_dns:
|
||||
api_token: "{{ CLOUDFLARE_API_TOKEN }}"
|
||||
@@ -17,8 +23,17 @@
|
||||
value: "{{ item.1.value }}"
|
||||
ttl: 1
|
||||
state: present
|
||||
loop: "{{ base_sld_domains | product(caa_entries) | list }}"
|
||||
loop: "{{ SYS_SVC_DNS_BASE_DOMAINS | product(caa_entries) | list }}"
|
||||
loop_control:
|
||||
label: "{{ item.0 }} → {{ item.1.tag }}"
|
||||
async: "{{ ASYNC_TIME if ASYNC_ENABLED | bool else omit }}"
|
||||
poll: "{{ ASYNC_POLL if ASYNC_ENABLED | bool else omit }}"
|
||||
poll: "{{ ASYNC_POLL if ASYNC_ENABLED | bool else omit }}"
|
||||
|
||||
- name: "Ensure wildcard parent DNS (*.parent) exists"
|
||||
include_role:
|
||||
name: sys-dns-wildcards
|
||||
vars:
|
||||
parent_dns_proxied: false
|
||||
when: run_once_sys_dns_wildcards is not defined
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
28
roles/sys-svc-dns/tasks/02_apex.yml
Normal file
28
roles/sys-svc-dns/tasks/02_apex.yml
Normal file
@@ -0,0 +1,28 @@
|
||||
---
|
||||
- name: "Ensure A @ for {{ base_domain }}"
|
||||
community.general.cloudflare_dns:
|
||||
api_token: "{{ CLOUDFLARE_API_TOKEN }}"
|
||||
zone: "{{ base_domain }}"
|
||||
type: A
|
||||
name: "@"
|
||||
content: "{{ networks.internet.ip4 }}"
|
||||
proxied: false
|
||||
ttl: 1
|
||||
state: present
|
||||
async: "{{ ASYNC_TIME if ASYNC_ENABLED | bool else omit }}"
|
||||
poll: "{{ ASYNC_POLL if ASYNC_ENABLED | bool else omit }}"
|
||||
|
||||
- name: "Ensure AAAA @ for {{ base_domain }} (if IPv6 is global)"
|
||||
community.general.cloudflare_dns:
|
||||
api_token: "{{ CLOUDFLARE_API_TOKEN }}"
|
||||
zone: "{{ base_domain }}"
|
||||
type: AAAA
|
||||
name: "@"
|
||||
content: "{{ networks.internet.ip6 }}"
|
||||
proxied: false
|
||||
ttl: 1
|
||||
state: present
|
||||
when:
|
||||
- (networks.internet.ip6 | default('') | trim) != ''
|
||||
async: "{{ ASYNC_TIME if ASYNC_ENABLED | bool else omit }}"
|
||||
poll: "{{ ASYNC_POLL if ASYNC_ENABLED | bool else omit }}"
|
||||
4
roles/sys-svc-dns/tasks/main.yml
Normal file
4
roles/sys-svc-dns/tasks/main.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
when: DNS_PROVIDER == 'cloudflare'
|
||||
when: run_once_sys_svc_dns is not defined
|
||||
4
roles/sys-svc-dns/vars/main.yml
Normal file
4
roles/sys-svc-dns/vars/main.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
caa_entries:
|
||||
- tag: issue
|
||||
value: letsencrypt.org
|
||||
SYS_SVC_DNS_BASE_DOMAINS: '{{ CURRENT_PLAY_DOMAINS_ALL | generate_base_sld_domains }}'
|
||||
@@ -1,2 +1,2 @@
|
||||
# Todos
|
||||
- Implement issuewild and iodef -> Not possible yet due to API issues
|
||||
- Implement issuewild and iodef -> Not possible yet due to API issues
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
- name: Include dependency 'sys-ctl-mtn-cert-renew'
|
||||
include_role:
|
||||
name: sys-ctl-mtn-cert-renew
|
||||
when: run_once_sys_ctl_mtn_cert_renew is not defined
|
||||
- name: Include dependency 'sys-ctl-mtn-cert-renew'
|
||||
include_role:
|
||||
name: sys-ctl-mtn-cert-renew
|
||||
when: run_once_sys_ctl_mtn_cert_renew is not defined
|
||||
|
||||
- name: create nginx letsencrypt config file
|
||||
template:
|
||||
src: "letsencrypt.conf.j2"
|
||||
dest: "{{NGINX.DIRECTORIES.HTTP.GLOBAL}}letsencrypt.conf"
|
||||
notify: restart openresty
|
||||
- name: create nginx letsencrypt config file
|
||||
template:
|
||||
src: "letsencrypt.conf.j2"
|
||||
dest: "{{ [ NGINX.DIRECTORIES.HTTP.GLOBAL, 'letsencrypt.conf' ] | path_join }}"
|
||||
notify: restart openresty
|
||||
|
||||
- name: "Set CAA records for all base domains"
|
||||
include_tasks: 01_set-caa-records.yml
|
||||
when: DNS_PROVIDER == 'cloudflare'
|
||||
- include_tasks: utils/run_once.yml
|
||||
@@ -1,4 +1,3 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_svc_letsencrypt is not defined
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
caa_entries:
|
||||
- tag: issue
|
||||
value: letsencrypt.org
|
||||
base_sld_domains: '{{ CURRENT_PLAY_DOMAINS_ALL | generate_base_sld_domains }}'
|
||||
@@ -11,10 +11,6 @@
|
||||
- name: "For '{{ application_id }}': load docker, db and proxy"
|
||||
include_role:
|
||||
name: sys-stk-full-stateful
|
||||
vars:
|
||||
# Forward flag into compose templating
|
||||
cmp_extra_facts:
|
||||
akaunting_setup_enabled: "{{ akaunting_setup_enabled }}"
|
||||
|
||||
- name: "Akaunting | Create first-run marker to disable future setup"
|
||||
ansible.builtin.file:
|
||||
|
||||
65
roles/web-app-mediawiki/tasks/03_patch_settings.yml
Normal file
65
roles/web-app-mediawiki/tasks/03_patch_settings.yml
Normal file
@@ -0,0 +1,65 @@
|
||||
# roles/web-app-mediawiki/tasks/03_patch_settings.yml
|
||||
- name: "MEDIAWIKI | Ensure LocalSettings.php has correct base settings"
|
||||
vars:
|
||||
_lsp_path: "{{ MEDIAWIKI_HTML_DIR }}/LocalSettings.php"
|
||||
_server_url: "{{ MEDIAWIKI_URL | regex_replace('/+$', '') }}"
|
||||
# Pre-escape single quotes for safe insertion into PHP single-quoted strings:
|
||||
_server_url_sq: "{{ _server_url | replace(\"'\", \"'\\\\''\") }}"
|
||||
_db_name_sq: "{{ database_name | replace(\"'\", \"'\\\\''\") }}"
|
||||
_db_user_sq: "{{ database_username | replace(\"'\", \"'\\\\''\") }}"
|
||||
_db_pass_sq: "{{ database_password | replace(\"'\", \"'\\\\''\") }}"
|
||||
_db_host_sq: "{{ (database_host ~ ':' ~ database_port) | replace(\"'\", \"'\\\\''\") }}"
|
||||
_lang_sq: "{{ HOST_LL | replace(\"'\", \"'\\\\''\") }}"
|
||||
shell: |
|
||||
docker exec -u {{ MEDIAWIKI_USER }} {{ MEDIAWIKI_CONTAINER }} bash -lc '
|
||||
set -euo pipefail
|
||||
LSP="{{ _lsp_path }}"
|
||||
SERVER='\''{{ _server_url_sq }}'\''
|
||||
DBNAME='\''{{ _db_name_sq }}'\''
|
||||
DBUSER='\''{{ _db_user_sq }}'\''
|
||||
DBPASS='\''{{ _db_pass_sq }}'\''
|
||||
DBHOST='\''{{ _db_host_sq }}'\''
|
||||
LANG='\''{{ _lang_sq }}'\''
|
||||
[ -f "$LSP" ] || { echo "LocalSettings.php not found, skipping."; exit 0; }
|
||||
|
||||
need=0
|
||||
|
||||
check_line() {
|
||||
local key="$1" val="$2"
|
||||
grep -Eq "^[[:space:]]*\$${key}[[:space:]]*=[[:space:]]*'\''${val}'\'';" "$LSP" || need=1
|
||||
}
|
||||
|
||||
check_line wgServer "$SERVER"
|
||||
check_line wgCanonicalServer "$SERVER"
|
||||
check_line wgDBname "$DBNAME"
|
||||
check_line wgDBuser "$DBUSER"
|
||||
check_line wgDBpassword "$DBPASS"
|
||||
check_line wgDBserver "$DBHOST"
|
||||
check_line wgLanguageCode "$LANG"
|
||||
|
||||
if [ "$need" -eq 1 ]; then
|
||||
tmp="$(mktemp)"
|
||||
# Remove any existing definitions for these keys
|
||||
grep -Ev "^[[:space:]]*\$(wgServer|wgCanonicalServer|wgDBname|wgDBuser|wgDBpassword|wgDBserver|wgLanguageCode)[[:space:]]*=" "$LSP" > "$tmp" || true
|
||||
|
||||
{
|
||||
printf "\n\$wgServer = '\''%s'\'';\n" "$SERVER"
|
||||
printf "\$wgCanonicalServer = '\''%s'\'';\n" "$SERVER"
|
||||
printf "\$wgDBname = '\''%s'\'';\n" "$DBNAME"
|
||||
printf "\$wgDBuser = '\''%s'\'';\n" "$DBUSER"
|
||||
printf "\$wgDBpassword = '\''%s'\'';\n" "$DBPASS"
|
||||
printf "\$wgDBserver = '\''%s'\'';\n" "$DBHOST"
|
||||
printf "\$wgLanguageCode = '\''%s'\'';\n" "$LANG"
|
||||
} >> "$tmp"
|
||||
|
||||
cat "$tmp" > "$LSP"
|
||||
rm -f "$tmp"
|
||||
echo CHANGED
|
||||
fi
|
||||
'
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: mw_lsp_update
|
||||
changed_when: "'CHANGED' in (mw_lsp_update.stdout | default(''))"
|
||||
failed_when: mw_lsp_update.rc != 0
|
||||
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||
@@ -22,11 +22,14 @@
|
||||
require_path: "{{ MEDIAWIKI_LOCAL_PATH }}/debug.php"
|
||||
when: MODE_DEBUG | bool
|
||||
|
||||
- name: "MEDIAWIKI | Sync LocalSettings.php with Ansible vars"
|
||||
include_tasks: 03_patch_settings.yml
|
||||
|
||||
- name: "Load admin setup procedures for '{{ application_id }}''"
|
||||
include_tasks: 03_admin.yml
|
||||
include_tasks: 04_admin.yml
|
||||
|
||||
- name: "Load extensions procedures for '{{ application_id }}''"
|
||||
include_tasks: "04_extensions.yml"
|
||||
include_tasks: "05_extensions.yml"
|
||||
when: MEDIAWIKI_OIDC_ENABLED | bool
|
||||
|
||||
- name: "OIDC | Ensure require_once(oidc.php) present"
|
||||
|
||||
@@ -2,15 +2,5 @@
|
||||
|
||||
This Ansible role configures Nginx to perform 301 redirects from one domain to another. It handles SSL certificate retrieval for the source domains and sets up the Nginx configuration to redirect to the specified target domains.
|
||||
|
||||
## Role Variables
|
||||
|
||||
- `domain_mappings`: A list of objects with `source` and `target` properties specifying the domains to redirect from and to.
|
||||
- `users.administrator.email`: The email used for SSL certificate registration with Let's Encrypt.
|
||||
|
||||
## Dependencies
|
||||
|
||||
- `sys-stk-front-pure`: A role for setting up HTTPS for Nginx
|
||||
- `letsencrypt`: A role for managing SSL certificates with Let's Encrypt
|
||||
|
||||
## Author Information
|
||||
This role was created in 2023 by [Kevin Veen-Birkenbach](https://www.veen.world/).
|
||||
@@ -0,0 +1,7 @@
|
||||
- name: Include Cloudflare redirect rule to enforce www → apex
|
||||
include_tasks: _02_cloudflare_redirect_rule.yml
|
||||
vars:
|
||||
domain: "{{ item | regex_replace('^www\\.', '') }}"
|
||||
www_fqdn: "{{ item }}"
|
||||
apex_url: "{{ WEB_PROTOCOL }}://{{ item | regex_replace('^www\\.', '') }}"
|
||||
loop: "{{ REDIRECT_WWW_DOMAINS }}"
|
||||
@@ -10,38 +10,11 @@
|
||||
include_role:
|
||||
name: web-opt-rdr-domains
|
||||
vars:
|
||||
domain_mappings: "{{ REDIRECT_WWW_DOMAINS | map('regex_replace', '^www\\.(.+)$', '{ source: \"www.\\1\", target: \"\\1\" }') | map('from_yaml') | list }}"
|
||||
redirect_domain_mappings: "{{ REDIRECT_WWW_DOMAINS | map('regex_replace', '^www\\.(.+)$', '{ source: \"www.\\1\", target: \"\\1\" }') | map('from_yaml') | list }}"
|
||||
when: REDIRECT_WWW_FLAVOR == 'origin'
|
||||
|
||||
- name: Include DNS role to set redirects
|
||||
include_role:
|
||||
name: sys-dns-cloudflare-records
|
||||
vars:
|
||||
cloudflare_records: |
|
||||
{%- set bare = REDIRECT_WWW_DOMAINS | map('regex_replace', '^www\\.(.+)$', '\\1') | list -%}
|
||||
[
|
||||
{%- for d in bare -%}
|
||||
{
|
||||
"type": "A",
|
||||
"zone": "{{ d | to_zone }}",
|
||||
"name": "{{ d }}",
|
||||
"content": "{{ networks.internet.ip4 }}",
|
||||
"proxied": {{ REDIRECT_WWW_FLAVOR == 'edge' }},
|
||||
"ttl": 1
|
||||
}{{ "," if not loop.last else "" }}
|
||||
{%- endfor -%}
|
||||
]
|
||||
- name: Include Cloudflare edge redirect
|
||||
include_tasks: _01_cloudflare_edge_redirect.yml
|
||||
when:
|
||||
- DNS_PROVIDER == 'cloudflare'
|
||||
- REDIRECT_WWW_FLAVOR == 'origin'
|
||||
|
||||
- name: Include Cloudflare redirect rule to enforce www → apex
|
||||
include_tasks: cloudflare_redirect_rule.yml
|
||||
vars:
|
||||
domain: "{{ item | regex_replace('^www\\.', '') }}"
|
||||
www_fqdn: "{{ item }}"
|
||||
apex_url: "{{ WEB_PROTOCOL }}://{{ item | regex_replace('^www\\.', '') }}"
|
||||
loop: "{{ REDIRECT_WWW_DOMAINS }}"
|
||||
when: REDIRECT_WWW_FLAVOR == 'edge'
|
||||
|
||||
|
||||
- REDIRECT_WWW_FLAVOR == 'edge'
|
||||
|
||||
@@ -61,6 +61,7 @@
|
||||
canonical_domains_map(PRIMARY_DOMAIN) |
|
||||
combine(CURRENT_PLAY_DOMAINS, recursive=True)
|
||||
}}
|
||||
|
||||
- name: Merge redirect_domain_mappings
|
||||
set_fact:
|
||||
# The following mapping is necessary to define the exceptions for domains which are created, but which aren't used
|
||||
|
||||
173
tests/integration/test_vars_usage_in_yaml.py
Normal file
173
tests/integration/test_vars_usage_in_yaml.py
Normal file
@@ -0,0 +1,173 @@
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
import re
|
||||
from typing import Any, Iterable, Set, List
|
||||
import yaml
|
||||
|
||||
|
||||
class TestVarsPassedAreUsed(unittest.TestCase):
|
||||
"""
|
||||
Integration test:
|
||||
- Walk all *.yml/*.yaml and *.j2 files
|
||||
- Collect variable names passed via task-level `vars:`
|
||||
- Consider a var "used" if it appears in ANY of:
|
||||
• Jinja output blocks: {{ ... var_name ... }}
|
||||
• Jinja statement blocks: {% ... var_name ... %}
|
||||
(robust against inner '}' / '%' via tempered regex)
|
||||
• Ansible expressions in YAML:
|
||||
- when: <expr> (string or list of strings)
|
||||
- loop: <expr>
|
||||
- with_*: <expr>
|
||||
|
||||
Additional rule:
|
||||
- Do NOT count as used if the token is immediately followed by '(' (optionally with whitespace),
|
||||
i.e. treat `var_name(` as a function/macro call, not a variable usage.
|
||||
"""
|
||||
|
||||
REPO_ROOT = Path(__file__).resolve().parents[2]
|
||||
YAML_EXTENSIONS = {".yml", ".yaml"}
|
||||
JINJA_EXTENSIONS = {".j2"}
|
||||
|
||||
# ---------- File iteration & YAML loading ----------
|
||||
|
||||
def _iter_files(self, extensions: set[str]) -> Iterable[Path]:
|
||||
for p in self.REPO_ROOT.rglob("*"):
|
||||
if p.is_file() and p.suffix in extensions:
|
||||
yield p
|
||||
|
||||
def _load_yaml_documents(self, path: Path) -> List[Any]:
|
||||
try:
|
||||
with path.open("r", encoding="utf-8") as f:
|
||||
return list(yaml.safe_load_all(f)) or []
|
||||
except Exception:
|
||||
# File may contain heavy templating or anchors; skip structural parse
|
||||
return []
|
||||
|
||||
def _walk_mapping(self, node: Any) -> Iterable[dict]:
|
||||
if isinstance(node, dict):
|
||||
yield node
|
||||
for v in node.values():
|
||||
yield from self._walk_mapping(v)
|
||||
elif isinstance(node, list):
|
||||
for item in node:
|
||||
yield from self._walk_mapping(item)
|
||||
|
||||
# ---------- Collect vars passed via `vars:` ----------
|
||||
|
||||
def _collect_vars_passed(self) -> Set[str]:
|
||||
collected: Set[str] = set()
|
||||
for yml in self._iter_files(self.YAML_EXTENSIONS):
|
||||
docs = self._load_yaml_documents(yml)
|
||||
for doc in docs:
|
||||
for mapping in self._walk_mapping(doc):
|
||||
if "vars" in mapping and isinstance(mapping["vars"], dict):
|
||||
for k in mapping["vars"].keys():
|
||||
if isinstance(k, str) and k.strip():
|
||||
collected.add(k.strip())
|
||||
return collected
|
||||
|
||||
# ---------- Gather text for Jinja usage scanning ----------
|
||||
|
||||
def _concat_texts(self) -> str:
|
||||
parts: List[str] = []
|
||||
for f in self._iter_files(self.YAML_EXTENSIONS | self.JINJA_EXTENSIONS):
|
||||
try:
|
||||
parts.append(f.read_text(encoding="utf-8"))
|
||||
except Exception:
|
||||
# Non-UTF8 or unreadable — ignore
|
||||
pass
|
||||
return "\n".join(parts)
|
||||
|
||||
# ---------- Extract Ansible expression strings from YAML ----------
|
||||
|
||||
def _collect_ansible_expressions(self) -> List[str]:
|
||||
"""
|
||||
Return a flat list of strings taken from Ansible expression-bearing fields:
|
||||
- when: <str> or when: [<str>, <str>, ...]
|
||||
- loop: <str>
|
||||
- with_*: <str>
|
||||
"""
|
||||
exprs: List[str] = []
|
||||
for yml in self._iter_files(self.YAML_EXTENSIONS):
|
||||
docs = self._load_yaml_documents(yml)
|
||||
for doc in docs:
|
||||
for mapping in self._walk_mapping(doc):
|
||||
for key, val in list(mapping.items()):
|
||||
if key == "when":
|
||||
if isinstance(val, str):
|
||||
exprs.append(val)
|
||||
elif isinstance(val, list):
|
||||
exprs.extend([x for x in val if isinstance(x, str)])
|
||||
elif key == "loop":
|
||||
if isinstance(val, str):
|
||||
exprs.append(val)
|
||||
elif isinstance(key, str) and key.startswith("with_"):
|
||||
if isinstance(val, str):
|
||||
exprs.append(val)
|
||||
return exprs
|
||||
|
||||
# ---------- Usage checks ----------
|
||||
|
||||
def _used_in_jinja_blocks(self, var_name: str, text: str) -> bool:
|
||||
"""
|
||||
Detect var usage inside Jinja blocks, excluding function/macro calls like `var_name(...)`.
|
||||
We use a tempered regex to avoid stopping at the first '}}'/'%}' and a negative lookahead
|
||||
`(?!\\s*\\()` after the token.
|
||||
"""
|
||||
# Word token not followed by '(' → real variable usage
|
||||
token = r"\b" + re.escape(var_name) + r"\b(?!\s*\()"
|
||||
|
||||
# Output blocks: {{ ... }}
|
||||
pat_output = re.compile(
|
||||
r"{{(?:(?!}}).)*" + token + r"(?:(?!}}).)*}}",
|
||||
re.DOTALL,
|
||||
)
|
||||
# Statement blocks: {% ... %}
|
||||
pat_stmt = re.compile(
|
||||
r"{%(?:(?!%}).)*" + token + r"(?:(?!%}).)*%}",
|
||||
re.DOTALL,
|
||||
)
|
||||
return pat_output.search(text) is not None or pat_stmt.search(text) is not None
|
||||
|
||||
def _used_in_ansible_exprs(self, var_name: str, exprs: List[str]) -> bool:
|
||||
"""
|
||||
Detect var usage in Ansible expressions (when/loop/with_*),
|
||||
excluding function/macro calls like `var_name(...)`.
|
||||
"""
|
||||
pat = re.compile(r"\b" + re.escape(var_name) + r"\b(?!\s*\()")
|
||||
return any(pat.search(e) for e in exprs)
|
||||
|
||||
# ---------- Test ----------
|
||||
|
||||
def test_vars_passed_are_used_in_yaml_or_jinja(self):
|
||||
vars_passed = self._collect_vars_passed()
|
||||
self.assertTrue(
|
||||
vars_passed,
|
||||
"No variables passed via `vars:` were found. "
|
||||
"Check the repo root path in this test."
|
||||
)
|
||||
|
||||
all_text = self._concat_texts()
|
||||
ansible_exprs = self._collect_ansible_expressions()
|
||||
|
||||
unused: List[str] = []
|
||||
for var_name in sorted(vars_passed):
|
||||
used = (
|
||||
self._used_in_jinja_blocks(var_name, all_text)
|
||||
or self._used_in_ansible_exprs(var_name, ansible_exprs)
|
||||
)
|
||||
if not used:
|
||||
unused.append(var_name)
|
||||
|
||||
if unused:
|
||||
msg = (
|
||||
"The following variables are passed via `vars:` but never referenced in:\n"
|
||||
" • Jinja output/statement blocks ({{ ... }} / {% ... %}) OR\n"
|
||||
" • Ansible expressions (when/loop/with_*)\n\n"
|
||||
+ "\n".join(f" - {v}" for v in unused)
|
||||
+ "\n\nNotes:\n"
|
||||
" • Function-like tokens (name followed by '(') are ignored intentionally.\n"
|
||||
" • If a var is only used in Python code or other file types, extend the test accordingly\n"
|
||||
" or remove the var if it's truly unused."
|
||||
)
|
||||
self.fail(msg)
|
||||
0
tests/unit/roles/sys-dns-wildcards/__init__.py
Normal file
0
tests/unit/roles/sys-dns-wildcards/__init__.py
Normal file
@@ -0,0 +1,249 @@
|
||||
# tests/unit/roles/sys-dns-wildcards/filter_plugins/test_wildcard_dns.py
|
||||
import unittest
|
||||
import importlib.util
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def _load_module():
|
||||
"""
|
||||
Load the wildcard_dns filter plugin from:
|
||||
roles/sys-dns-wildcards/filter_plugins/wildcard_dns.py
|
||||
"""
|
||||
here = Path(__file__).resolve()
|
||||
# Go up to repository root (…/tests/unit/roles/… → 5 levels up)
|
||||
repo_root = here.parents[5] if len(here.parents) >= 6 else here.parents[0]
|
||||
|
||||
path = repo_root / "roles" / "sys-dns-wildcards" / "filter_plugins" / "wildcard_dns.py"
|
||||
if not path.exists():
|
||||
raise FileNotFoundError(f"Could not find {path}")
|
||||
|
||||
spec = importlib.util.spec_from_file_location("wildcard_dns", path)
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
assert spec and spec.loader
|
||||
spec.loader.exec_module(mod) # type: ignore[attr-defined]
|
||||
return mod
|
||||
|
||||
|
||||
_wildcard_dns = _load_module()
|
||||
|
||||
|
||||
def _get_filter():
|
||||
"""Return the wildcard_records filter function from the plugin."""
|
||||
fm = _wildcard_dns.FilterModule()
|
||||
filters = fm.filters()
|
||||
if "wildcard_records" not in filters:
|
||||
raise AssertionError("wildcard_records filter not found")
|
||||
return filters["wildcard_records"]
|
||||
|
||||
|
||||
def _as_set(records):
|
||||
"""Normalize records for order-independent comparison."""
|
||||
return {
|
||||
(r.get("type"), r.get("name"), r.get("content"), bool(r.get("proxied")))
|
||||
for r in records
|
||||
}
|
||||
|
||||
|
||||
class TestWildcardDNS(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.wildcard_records = _get_filter()
|
||||
|
||||
def test_only_wildcards_including_apex(self):
|
||||
apex = "example.com"
|
||||
cpda = {
|
||||
"svc-a": ["c.wiki.example.com", "a.b.example.com"],
|
||||
"svc-b": {"extra": ["www.a.b.example.com"]},
|
||||
"svc-c": "example.com",
|
||||
}
|
||||
|
||||
recs = self.wildcard_records(
|
||||
current_play_domains_all=cpda,
|
||||
apex=apex,
|
||||
ip4="203.0.113.10",
|
||||
ip6="2606:4700:4700::1111",
|
||||
proxied=True,
|
||||
explicit_domains=None,
|
||||
min_child_depth=2,
|
||||
ipv6_enabled=True,
|
||||
)
|
||||
|
||||
got = _as_set(recs)
|
||||
expected = {
|
||||
# apex wildcard always
|
||||
("A", "*", "203.0.113.10", True),
|
||||
("AAAA", "*", "2606:4700:4700::1111", True),
|
||||
|
||||
# derived parents
|
||||
("A", "*.wiki", "203.0.113.10", True),
|
||||
("AAAA", "*.wiki", "2606:4700:4700::1111", True),
|
||||
("A", "*.b", "203.0.113.10", True),
|
||||
("AAAA", "*.b", "2606:4700:4700::1111", True),
|
||||
# www.a.b.example.com promotes a.b.example.com as a parent
|
||||
("A", "*.a.b", "203.0.113.10", True),
|
||||
("AAAA", "*.a.b", "2606:4700:4700::1111", True),
|
||||
}
|
||||
self.assertEqual(got, expected)
|
||||
|
||||
def test_min_child_depth_yields_only_apex(self):
|
||||
apex = "example.com"
|
||||
cpda = {"svc": ["x.example.com"]} # depth = 1, below threshold
|
||||
|
||||
recs = self.wildcard_records(
|
||||
current_play_domains_all=cpda,
|
||||
apex=apex,
|
||||
ip4="198.51.100.42",
|
||||
ip6="2606:4700:4700::1111",
|
||||
proxied=False,
|
||||
explicit_domains=None,
|
||||
min_child_depth=2, # requires >= 2 → no parent derived
|
||||
ipv6_enabled=True,
|
||||
)
|
||||
got = _as_set(recs)
|
||||
expected = {
|
||||
("A", "*", "198.51.100.42", False),
|
||||
("AAAA", "*", "2606:4700:4700::1111", False),
|
||||
}
|
||||
self.assertEqual(got, expected)
|
||||
|
||||
def test_ipv6_disabled_and_private_ipv6_filtered(self):
|
||||
apex = "example.com"
|
||||
cpda = {"svc": ["a.b.example.com"]}
|
||||
|
||||
# IPv6 disabled → only A records (apex + parent)
|
||||
recs1 = self.wildcard_records(
|
||||
current_play_domains_all=cpda,
|
||||
apex=apex,
|
||||
ip4="203.0.113.9",
|
||||
ip6="2606:4700:4700::1111",
|
||||
proxied=False,
|
||||
explicit_domains=None,
|
||||
min_child_depth=2,
|
||||
ipv6_enabled=False,
|
||||
)
|
||||
self.assertEqual(
|
||||
_as_set(recs1),
|
||||
{
|
||||
("A", "*", "203.0.113.9", False),
|
||||
("A", "*.b", "203.0.113.9", False),
|
||||
},
|
||||
)
|
||||
|
||||
# IPv6 enabled but ULA (not global) → skip AAAA (apex + parent)
|
||||
recs2 = self.wildcard_records(
|
||||
current_play_domains_all=cpda,
|
||||
apex=apex,
|
||||
ip4="203.0.113.9",
|
||||
ip6="fd00::1",
|
||||
proxied=False,
|
||||
explicit_domains=None,
|
||||
min_child_depth=2,
|
||||
ipv6_enabled=True,
|
||||
)
|
||||
self.assertEqual(
|
||||
_as_set(recs2),
|
||||
{
|
||||
("A", "*", "203.0.113.9", False),
|
||||
("A", "*.b", "203.0.113.9", False),
|
||||
},
|
||||
)
|
||||
|
||||
def test_proxied_flag_true_is_set(self):
|
||||
recs = self.wildcard_records(
|
||||
current_play_domains_all={"svc": ["a.b.example.com"]},
|
||||
apex="example.com",
|
||||
ip4="203.0.113.7",
|
||||
ip6=None,
|
||||
proxied=True,
|
||||
explicit_domains=None,
|
||||
min_child_depth=2,
|
||||
ipv6_enabled=True,
|
||||
)
|
||||
self.assertTrue(all(r.get("proxied") is True for r in recs))
|
||||
self.assertEqual(
|
||||
_as_set(recs),
|
||||
{
|
||||
("A", "*", "203.0.113.7", True),
|
||||
("A", "*.b", "203.0.113.7", True),
|
||||
},
|
||||
)
|
||||
|
||||
def test_explicit_domains_override_source(self):
|
||||
cpda = {"svc": ["ignore.me.example.com", "a.b.example.com"]}
|
||||
explicit = ["c.wiki.example.com"]
|
||||
|
||||
recs = self.wildcard_records(
|
||||
current_play_domains_all=cpda,
|
||||
apex="example.com",
|
||||
ip4="203.0.113.5",
|
||||
ip6="2606:4700:4700::1111",
|
||||
proxied=False,
|
||||
explicit_domains=explicit,
|
||||
min_child_depth=2,
|
||||
ipv6_enabled=True,
|
||||
)
|
||||
self.assertEqual(
|
||||
_as_set(recs),
|
||||
{
|
||||
# apex wildcard always
|
||||
("A", "*", "203.0.113.5", False),
|
||||
("AAAA", "*", "2606:4700:4700::1111", False),
|
||||
|
||||
# derived from explicit domain
|
||||
("A", "*.wiki", "203.0.113.5", False),
|
||||
("AAAA", "*.wiki", "2606:4700:4700::1111", False),
|
||||
},
|
||||
)
|
||||
|
||||
def test_nested_structures_flattened_correctly(self):
|
||||
cpda = {
|
||||
"svc1": {
|
||||
"primary": ["c.wiki.example.com"],
|
||||
"extra": {"alt": ["a.b.example.com"]},
|
||||
},
|
||||
"svc2": "www.a.b.example.com",
|
||||
"svc3": ["x.example.net"], # wrong apex → ignored
|
||||
}
|
||||
|
||||
recs = self.wildcard_records(
|
||||
current_play_domains_all=cpda,
|
||||
apex="example.com",
|
||||
ip4="203.0.113.21",
|
||||
ip6="2606:4700:4700::1111",
|
||||
proxied=False,
|
||||
explicit_domains=None,
|
||||
min_child_depth=2,
|
||||
ipv6_enabled=True,
|
||||
)
|
||||
got = _as_set(recs)
|
||||
expected = {
|
||||
# apex wildcard always
|
||||
("A", "*", "203.0.113.21", False),
|
||||
("AAAA", "*", "2606:4700:4700::1111", False),
|
||||
|
||||
# derived parents
|
||||
("A", "*.wiki", "203.0.113.21", False),
|
||||
("AAAA", "*.wiki", "2606:4700:4700::1111", False),
|
||||
("A", "*.b", "203.0.113.21", False),
|
||||
("AAAA", "*.b", "2606:4700:4700::1111", False),
|
||||
# www.a.b.example.com promotes a.b.example.com as a parent
|
||||
("A", "*.a.b", "203.0.113.21", False),
|
||||
("AAAA", "*.a.b", "2606:4700:4700::1111", False),
|
||||
}
|
||||
self.assertEqual(got, expected)
|
||||
|
||||
def test_error_on_missing_ip4(self):
|
||||
with self.assertRaises(Exception):
|
||||
self.wildcard_records(
|
||||
current_play_domains_all={"svc": ["a.b.example.com"]},
|
||||
apex="example.com",
|
||||
ip4="", # must not be empty
|
||||
ip6=None,
|
||||
proxied=False,
|
||||
explicit_domains=None,
|
||||
min_child_depth=2,
|
||||
ipv6_enabled=True,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
Reference in New Issue
Block a user