diff --git a/.dockerignore b/.dockerignore index 2d12d074..5b48fee3 100644 --- a/.dockerignore +++ b/.dockerignore @@ -11,4 +11,4 @@ venv roles/list.json *.pyc *.egg-info -build.git +./build/*.git diff --git a/.github/workflows/lint-ansible.yml b/.github/workflows/lint-ansible.yml new file mode 100644 index 00000000..2653782f --- /dev/null +++ b/.github/workflows/lint-ansible.yml @@ -0,0 +1,19 @@ +name: Ansible Linter + +on: + workflow_call: + +jobs: + test-unit: + runs-on: ubuntu-latest + timeout-minutes: 30 + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Show Docker version + run: docker version + + - name: Run lint-ansible via make + run: make lint-ansible \ No newline at end of file diff --git a/.github/workflows/lint-python.yml b/.github/workflows/lint-python.yml index 8d674295..e7325824 100644 --- a/.github/workflows/lint-python.yml +++ b/.github/workflows/lint-python.yml @@ -1,4 +1,4 @@ -name: Lint Infinito.Nexus Python with ruff +name: Python Linter on: workflow_call: diff --git a/.github/workflows/lint-shell.yml b/.github/workflows/lint-shell.yml index 4af233ce..019d1292 100644 --- a/.github/workflows/lint-shell.yml +++ b/.github/workflows/lint-shell.yml @@ -1,4 +1,4 @@ -name: ShellCheck +name: Shell Linter on: workflow_call: diff --git a/.github/workflows/mark-stable.yml b/.github/workflows/mark-stable.yml index 72743fbb..852c3028 100644 --- a/.github/workflows/mark-stable.yml +++ b/.github/workflows/mark-stable.yml @@ -9,6 +9,8 @@ on: - 'v*' jobs: + lint-ansible: + uses: ./.github/workflows/lint-ansible.yml lint-python: uses: ./.github/workflows/lint-python.yml lint-shell: diff --git a/.github/workflows/test-code-unit.yml b/.github/workflows/test-code-unit.yml index 1e30ac6b..0adceeeb 100644 --- a/.github/workflows/test-code-unit.yml +++ b/.github/workflows/test-code-unit.yml @@ -12,8 +12,5 @@ jobs: - name: Checkout repository uses: actions/checkout@v4 - - name: Show Docker version - run: docker version - - name: Run unit tests via make run: make test-unit diff --git a/Makefile b/Makefile index 50e5b96f..7e875eaf 100644 --- a/Makefile +++ b/Makefile @@ -66,7 +66,7 @@ clean-keep-logs: clean: @echo "Removing ignored git files" - git clean -fdX + sudo git clean -fdX list: @echo "Generating the roles list" @@ -153,15 +153,15 @@ test-lint: build-missing test-unit: build-missing @TEST_TYPE="unit" bash scripts/tests/code.sh -test-integration: setup-clean build-missing +test-integration: build-missing @TEST_TYPE="integration" bash scripts/tests/code.sh # Backwards compatible target (kept) -test-messy: test-lint test-unit test-integration +test-ansible: @echo "πŸ“‘ Checking Ansible syntax…" ansible-playbook -i localhost, -c local $(foreach f,$(wildcard group_vars/all/*.yml),-e @$(f)) playbook.yml --syntax-check -test: setup-clean test-messy +test: test-lint test-unit test-integration test-ansible @echo "βœ… Full test (setup + tests) executed." # Debug helper diff --git a/build/lib/cli/__init__.py b/build/lib/cli/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/build/lib/cli/build/__init__.py b/build/lib/cli/build/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/build/lib/cli/build/graph.py b/build/lib/cli/build/graph.py new file mode 100644 index 00000000..904954f5 --- /dev/null +++ b/build/lib/cli/build/graph.py @@ -0,0 +1,352 @@ +#!/usr/bin/env python3 +import os +import argparse +import yaml +import json +import re +from typing import List, Dict, Any, Set + +from module_utils.role_dependency_resolver import RoleDependencyResolver + +# Regex used to ignore Jinja expressions inside include/import statements +JINJA_PATTERN = re.compile(r'{{.*}}') + +# All dependency types the graph builder supports +ALL_DEP_TYPES = [ + "run_after", + "dependencies", + "include_tasks", + "import_tasks", + "include_role", + "import_role", +] + +# Graph directions: outgoing edges ("to") vs incoming edges ("from") +ALL_DIRECTIONS = ["to", "from"] + +# Combined keys: e.g. "include_role_to", "dependencies_from", etc. +ALL_KEYS = [f"{dep}_{direction}" for dep in ALL_DEP_TYPES for direction in ALL_DIRECTIONS] + + +# ------------------------------------------------------------ +# Helpers for locating meta and task files +# ------------------------------------------------------------ + +def find_role_meta(roles_dir: str, role: str) -> str: + """Return path to meta/main.yml of a role or raise FileNotFoundError.""" + path = os.path.join(roles_dir, role, "meta", "main.yml") + if not os.path.isfile(path): + raise FileNotFoundError(f"Metadata not found for role: {role}") + return path + + +def find_role_tasks(roles_dir: str, role: str) -> str: + """Return path to tasks/main.yml of a role or raise FileNotFoundError.""" + path = os.path.join(roles_dir, role, "tasks", "main.yml") + if not os.path.isfile(path): + raise FileNotFoundError(f"Tasks not found for role: {role}") + return path + + +# ------------------------------------------------------------ +# Parsers for meta and tasks +# ------------------------------------------------------------ + +def load_meta(path: str) -> Dict[str, Any]: + """ + Load metadata from meta/main.yml. + Returns a dict with: + - galaxy_info + - run_after + - dependencies + """ + with open(path, "r") as f: + data = yaml.safe_load(f) or {} + + galaxy_info = data.get("galaxy_info", {}) or {} + return { + "galaxy_info": galaxy_info, + "run_after": galaxy_info.get("run_after", []) or [], + "dependencies": data.get("dependencies", []) or [], + } + + +def load_tasks(path: str, dep_type: str) -> List[str]: + """ + Parse include_tasks/import_tasks from tasks/main.yml. + Only accepts simple, non-Jinja names. + """ + with open(path, "r") as f: + data = yaml.safe_load(f) or [] + + roles: List[str] = [] + + for task in data: + if not isinstance(task, dict): + continue + + if dep_type in task: + entry = task[dep_type] + if isinstance(entry, dict): + entry = entry.get("name", "") + if isinstance(entry, str) and entry and not JINJA_PATTERN.search(entry): + roles.append(entry) + + return roles + + +# ------------------------------------------------------------ +# Graph builder using precomputed caches (fast) +# ------------------------------------------------------------ + +def build_single_graph( + start_role: str, + dep_type: str, + direction: str, + roles_dir: str, + max_depth: int, + caches: Dict[str, Any], +) -> Dict[str, Any]: + """ + Build a graph (nodes + links) for one role, one dep_type, one direction. + Uses only precomputed in-memory caches, no filesystem access. + + caches structure: + caches["meta"][role] -> meta information + caches["deps"][dep_type][role] -> outgoing targets + caches["rev"][dep_type][target] -> set of source roles + """ + + nodes: Dict[str, Dict[str, Any]] = {} + links: List[Dict[str, str]] = [] + + meta_cache = caches["meta"] + deps_cache = caches["deps"] + rev_cache = caches["rev"] + + # -------------------------------------------------------- + # Ensure a role exists as a node + # -------------------------------------------------------- + def ensure_node(role: str): + if role in nodes: + return + + # Try retrieving cached meta; fallback: lazy load + meta = meta_cache.get(role) + if meta is None: + try: + meta = load_meta(find_role_meta(roles_dir, role)) + meta_cache[role] = meta + except FileNotFoundError: + meta = {"galaxy_info": {}} + + galaxy_info = meta.get("galaxy_info", {}) or {} + + node = { + "id": role, + **galaxy_info, + "doc_url": f"https://docs.infinito.nexus/roles/{role}/README.html", + "source_url": f"https://github.com/kevinveenbirkenbach/infinito-nexus/tree/master/roles/{role}", + } + nodes[role] = node + + # -------------------------------------------------------- + # Outgoing edges: role -> targets + # -------------------------------------------------------- + def outgoing(role: str) -> List[str]: + return deps_cache.get(dep_type, {}).get(role, []) or [] + + # -------------------------------------------------------- + # Incoming edges: sources -> role + # -------------------------------------------------------- + def incoming(role: str) -> Set[str]: + return rev_cache.get(dep_type, {}).get(role, set()) + + # -------------------------------------------------------- + # DFS traversal + # -------------------------------------------------------- + def traverse(role: str, depth: int, path: Set[str]): + ensure_node(role) + + if max_depth > 0 and depth >= max_depth: + return + + if direction == "to": + for tgt in outgoing(role): + ensure_node(tgt) + links.append({"source": role, "target": tgt, "type": dep_type}) + if tgt not in path: + traverse(tgt, depth + 1, path | {tgt}) + + else: # direction == "from" + for src in incoming(role): + ensure_node(src) + links.append({"source": src, "target": role, "type": dep_type}) + if src not in path: + traverse(src, depth + 1, path | {src}) + + traverse(start_role, 0, {start_role}) + + return {"nodes": list(nodes.values()), "links": links} + + +# ------------------------------------------------------------ +# Build all graph variants for one role +# ------------------------------------------------------------ + +def build_mappings( + start_role: str, + roles_dir: str, + max_depth: int +) -> Dict[str, Any]: + """ + Build all 12 graph variants (6 dep types Γ— 2 directions). + Accelerated version: + - One-time scan of all metadata + - One-time scan of all include_role/import_role + - One-time scan of include_tasks/import_tasks + - Build reverse-index tables + - Then generate all graphs purely from memory + """ + + result: Dict[str, Any] = {} + + roles = [ + r for r in os.listdir(roles_dir) + if os.path.isdir(os.path.join(roles_dir, r)) + ] + + # Pre-caches + meta_cache: Dict[str, Dict[str, Any]] = {} + deps_cache: Dict[str, Dict[str, List[str]]] = {dep: {} for dep in ALL_DEP_TYPES} + rev_cache: Dict[str, Dict[str, Set[str]]] = {dep: {} for dep in ALL_DEP_TYPES} + + resolver = RoleDependencyResolver(roles_dir) + + # -------------------------------------------------------- + # Step 1: Preload meta-based deps (run_after, dependencies) + # -------------------------------------------------------- + for role in roles: + try: + meta = load_meta(find_role_meta(roles_dir, role)) + except FileNotFoundError: + continue + + meta_cache[role] = meta + + for dep_key in ["run_after", "dependencies"]: + values = meta.get(dep_key, []) or [] + if isinstance(values, list) and values: + deps_cache[dep_key][role] = values + + for tgt in values: + if isinstance(tgt, str) and tgt.strip(): + rev_cache[dep_key].setdefault(tgt.strip(), set()).add(role) + + # -------------------------------------------------------- + # Step 2: Preload include_role/import_role (resolver) + # -------------------------------------------------------- + for role in roles: + role_path = os.path.join(roles_dir, role) + inc, imp = resolver._scan_tasks(role_path) + + if inc: + inc_list = sorted(inc) + deps_cache["include_role"][role] = inc_list + for tgt in inc_list: + rev_cache["include_role"].setdefault(tgt, set()).add(role) + + if imp: + imp_list = sorted(imp) + deps_cache["import_role"][role] = imp_list + for tgt in imp_list: + rev_cache["import_role"].setdefault(tgt, set()).add(role) + + # -------------------------------------------------------- + # Step 3: Preload include_tasks/import_tasks + # -------------------------------------------------------- + for role in roles: + try: + tasks_path = find_role_tasks(roles_dir, role) + except FileNotFoundError: + continue + + for dep_key in ["include_tasks", "import_tasks"]: + values = load_tasks(tasks_path, dep_key) + if values: + deps_cache[dep_key][role] = values + + for tgt in values: + rev_cache[dep_key].setdefault(tgt, set()).add(role) + + caches = { + "meta": meta_cache, + "deps": deps_cache, + "rev": rev_cache, + } + + # -------------------------------------------------------- + # Step 4: Build all graphs from caches + # -------------------------------------------------------- + for key in ALL_KEYS: + dep_type, direction = key.rsplit("_", 1) + try: + result[key] = build_single_graph( + start_role=start_role, + dep_type=dep_type, + direction=direction, + roles_dir=roles_dir, + max_depth=max_depth, + caches=caches, + ) + except Exception: + result[key] = {"nodes": [], "links": []} + + return result + + +# ------------------------------------------------------------ +# Output helper +# ------------------------------------------------------------ + +def output_graph(graph_data: Any, fmt: str, start: str, key: str): + base = f"{start}_{key}" + if fmt == "console": + print(f"--- {base} ---") + print(yaml.safe_dump(graph_data, sort_keys=False)) + + else: + path = f"{base}.{fmt}" + with open(path, "w") as f: + if fmt == "yaml": + yaml.safe_dump(graph_data, f, sort_keys=False) + else: + json.dump(graph_data, f, indent=2) + print(f"Wrote {path}") + + +# ------------------------------------------------------------ +# CLI entrypoint +# ------------------------------------------------------------ + +def main(): + script_dir = os.path.dirname(os.path.abspath(__file__)) + default_roles_dir = os.path.abspath(os.path.join(script_dir, "..", "..", "roles")) + + parser = argparse.ArgumentParser(description="Generate dependency graphs") + parser.add_argument("-r", "--role", required=True, help="Starting role name") + parser.add_argument("-D", "--depth", type=int, default=0, help="Max recursion depth") + parser.add_argument("-o", "--output", choices=["yaml", "json", "console"], default="console") + parser.add_argument("--roles-dir", default=default_roles_dir, help="Roles directory") + + args = parser.parse_args() + + graphs = build_mappings(args.role, args.roles_dir, args.depth) + + for key in ALL_KEYS: + graph_data = graphs.get(key, {"nodes": [], "links": []}) + output_graph(graph_data, args.output, args.role, key) + + +if __name__ == "__main__": + main() diff --git a/build/lib/cli/build/inventory/__init__.py b/build/lib/cli/build/inventory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/build/lib/cli/build/inventory/full.py b/build/lib/cli/build/inventory/full.py new file mode 100644 index 00000000..fb852367 --- /dev/null +++ b/build/lib/cli/build/inventory/full.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python3 +# cli/build/inventory/full.py + +import argparse +import sys +import os + +try: + from filter_plugins.get_all_invokable_apps import get_all_invokable_apps +except ImportError: + sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) + from filter_plugins.get_all_invokable_apps import get_all_invokable_apps + +import yaml +import json + +def build_group_inventory(apps, host): + """ + Build an Ansible inventory in which each application is a group containing the given host. + """ + groups = {app: {"hosts": [host]} for app in apps} + inventory = { + "all": { + "hosts": [host], + "children": {app: {} for app in apps}, + }, + **groups + } + return inventory + +def build_hostvar_inventory(apps, host): + """ + Alternative: Build an inventory where all invokable apps are set as a host variable (as a list). + """ + return { + "all": { + "hosts": [host], + }, + "_meta": { + "hostvars": { + host: { + "invokable_applications": apps + } + } + } + } + +def main(): + parser = argparse.ArgumentParser( + description='Build a dynamic Ansible inventory for a given host with all invokable applications.' + ) + parser.add_argument( + '--host', + required=True, + help='Hostname to assign to all invokable application groups' + ) + parser.add_argument( + '-f', '--format', + choices=['json', 'yaml'], + default='yaml', + help='Output format (yaml [default], json)' + ) + parser.add_argument( + '--inventory-style', + choices=['group', 'hostvars'], + default='group', + help='Inventory style: group (default, one group per app) or hostvars (list as hostvar)' + ) + parser.add_argument( + '-c', '--categories-file', + default=os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'roles', 'categories.yml')), + help='Path to roles/categories.yml (default: roles/categories.yml at project root)' + ) + parser.add_argument( + '-r', '--roles-dir', + default=os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'roles')), + help='Path to roles/ directory (default: roles/ at project root)' + ) + parser.add_argument( + '-o', '--output', + help='Write output to file instead of stdout' + ) + parser.add_argument( + '-i', '--ignore', + action='append', + default=[], + help='Application ID(s) to ignore (can be specified multiple times or comma-separated)' + ) + args = parser.parse_args() + + try: + apps = get_all_invokable_apps( + categories_file=args.categories_file, + roles_dir=args.roles_dir + ) + except Exception as e: + sys.stderr.write(f"Error: {e}\n") + sys.exit(1) + + # Combine all ignore arguments into a flat set + ignore_ids = set() + for entry in args.ignore: + ignore_ids.update(i.strip() for i in entry.split(',') if i.strip()) + + if ignore_ids: + apps = [app for app in apps if app not in ignore_ids] + + # Build the requested inventory style + if args.inventory_style == 'group': + inventory = build_group_inventory(apps, args.host) + else: + inventory = build_hostvar_inventory(apps, args.host) + + # Output in the chosen format + if args.format == 'json': + output = json.dumps(inventory, indent=2) + else: + output = yaml.safe_dump(inventory, default_flow_style=False) + + if args.output: + with open(args.output, 'w') as f: + f.write(output) + else: + print(output) + +if __name__ == '__main__': + main() diff --git a/build/lib/cli/build/role_include.py b/build/lib/cli/build/role_include.py new file mode 100644 index 00000000..6586d98f --- /dev/null +++ b/build/lib/cli/build/role_include.py @@ -0,0 +1,224 @@ +#!/usr/bin/env python3 + +import os +import sys +import yaml +import argparse +from collections import defaultdict, deque + +def find_roles(roles_dir, prefixes=None): + """ + Find all roles in the given directory whose names start with + any of the provided prefixes. If prefixes is empty or None, + include all roles. + """ + for entry in os.listdir(roles_dir): + if prefixes: + if not any(entry.startswith(pref) for pref in prefixes): + continue + path = os.path.join(roles_dir, entry) + meta_file = os.path.join(path, 'meta', 'main.yml') + if os.path.isdir(path) and os.path.isfile(meta_file): + yield path, meta_file + +def load_run_after(meta_file): + """Load the 'run_after' from the meta/main.yml of a role.""" + with open(meta_file, 'r') as f: + data = yaml.safe_load(f) or {} + return data.get('galaxy_info', {}).get('run_after', []) + +def load_application_id(role_path): + """Load the application_id from the vars/main.yml of the role.""" + vars_file = os.path.join(role_path, 'vars', 'main.yml') + if os.path.exists(vars_file): + with open(vars_file, 'r') as f: + data = yaml.safe_load(f) or {} + return data.get('application_id') + return None + +def build_dependency_graph(roles_dir, prefixes=None): + """ + Build a dependency graph where each key is a role name and + its value is a list of roles that depend on it. + Also return in_degree counts and the roles metadata map. + """ + graph = defaultdict(list) + in_degree = defaultdict(int) + roles = {} + + for role_path, meta_file in find_roles(roles_dir, prefixes): + run_after = load_run_after(meta_file) + application_id = load_application_id(role_path) + role_name = os.path.basename(role_path) + + roles[role_name] = { + 'role_name': role_name, + 'run_after': run_after, + 'application_id': application_id, + 'path': role_path + } + + for dependency in run_after: + graph[dependency].append(role_name) + in_degree[role_name] += 1 + + if role_name not in in_degree: + in_degree[role_name] = 0 + + return graph, in_degree, roles + +def find_cycle(roles): + """ + Detect a cycle in the run_after relations: + roles: dict mapping role_name -> { 'run_after': [...], ... } + Returns a list of role_names forming the cycle (with the start repeated at end), or None. + """ + visited = set() + stack = set() + + def dfs(node, path): + visited.add(node) + stack.add(node) + path.append(node) + for dep in roles.get(node, {}).get('run_after', []): + if dep not in visited: + res = dfs(dep, path) + if res: + return res + elif dep in stack: + idx = path.index(dep) + return path[idx:] + [dep] + stack.remove(node) + path.pop() + return None + + for role in roles: + if role not in visited: + cycle = dfs(role, []) + if cycle: + return cycle + return None + +def topological_sort(graph, in_degree, roles=None): + """ + Perform topological sort on the dependency graph. + If a cycle is detected, raise an Exception with detailed debug info. + """ + from collections import deque + + queue = deque([r for r, d in in_degree.items() if d == 0]) + sorted_roles = [] + local_in = dict(in_degree) + + while queue: + role = queue.popleft() + sorted_roles.append(role) + for nbr in graph.get(role, []): + local_in[nbr] -= 1 + if local_in[nbr] == 0: + queue.append(nbr) + + if len(sorted_roles) != len(in_degree): + # Something went wrong: likely a cycle + cycle = find_cycle(roles or {}) + unsorted = [r for r in in_degree if r not in sorted_roles] + + header = "❌ Dependency resolution failed" + if cycle: + reason = f"Circular dependency detected: {' -> '.join(cycle)}" + else: + reason = "Unresolved dependencies among roles (possible cycle or missing role)." + + details = [] + if unsorted: + details.append("Unsorted roles and their declared run_after dependencies:") + for r in unsorted: + deps = roles.get(r, {}).get('run_after', []) + details.append(f" - {r} depends on {deps!r}") + + graph_repr = f"Full dependency graph: {dict(graph)!r}" + + raise Exception("\n".join([header, reason] + details + [graph_repr])) + + return sorted_roles + +def print_dependency_tree(graph): + """Print the dependency tree visually on the console.""" + def print_node(role, indent=0): + print(" " * indent + role) + for dep in graph.get(role, []): + print_node(dep, indent + 1) + + all_roles = set(graph.keys()) + dependent = {r for deps in graph.values() for r in deps} + roots = all_roles - dependent + + for root in roots: + print_node(root) + +def gen_condi_role_incl(roles_dir, prefixes=None): + """ + Generate playbook entries based on the sorted order. + Raises a ValueError if application_id is missing. + """ + graph, in_degree, roles = build_dependency_graph(roles_dir, prefixes) + sorted_names = topological_sort(graph, in_degree, roles) + + entries = [] + for role_name in sorted_names: + role = roles[role_name] + + if role.get('application_id') is None: + vars_file = os.path.join(role['path'], 'vars', 'main.yml') + raise ValueError(f"'application_id' missing in {vars_file}") + + app_id = role['application_id'] + entries.append( + f"- name: setup {app_id}\n" + f" when: ('{app_id}' | application_allowed(group_names, allowed_applications))\n" + f" include_role:\n" + f" name: {role_name}\n" + ) + entries.append( + f"- name: flush handlers after {app_id}\n" + f" meta: flush_handlers\n" + ) + + return entries + +def main(): + parser = argparse.ArgumentParser( + description='Generate an Ansible playbook include file from Docker roles, sorted by run_after order.' + ) + parser.add_argument('roles_dir', help='Path to directory containing role folders') + parser.add_argument( + '-p', '--prefix', + action='append', + help='Only include roles whose names start with any of these prefixes; can be specified multiple times' + ) + parser.add_argument('-o', '--output', default=None, + help='Output file path (default: stdout)') + parser.add_argument('-t', '--tree', action='store_true', + help='Display the dependency tree of roles and exit') + + args = parser.parse_args() + prefixes = args.prefix or [] + + if args.tree: + graph, _, _ = build_dependency_graph(args.roles_dir, prefixes) + print_dependency_tree(graph) + sys.exit(0) + + entries = gen_condi_role_incl(args.roles_dir, prefixes) + output = ''.join(entries) + + if args.output: + os.makedirs(os.path.dirname(args.output), exist_ok=True) + with open(args.output, 'w') as f: + f.write(output) + print(f"Playbook entries written to {args.output}") + else: + print(output) + +if __name__ == '__main__': + main() diff --git a/build/lib/cli/build/roles_list.py b/build/lib/cli/build/roles_list.py new file mode 100644 index 00000000..40a2bb4a --- /dev/null +++ b/build/lib/cli/build/roles_list.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python3 +""" +Generate a JSON file listing all Ansible role directories. + +Usage: + python roles_list.py [--roles-dir path/to/roles] [--output path/to/roles/list.json | console] +""" +import os +import json +import argparse + + +def find_roles(roles_dir: str): + """Return sorted list of role names under roles_dir.""" + return sorted([ + entry for entry in os.listdir(roles_dir) + if os.path.isdir(os.path.join(roles_dir, entry)) + ]) + + +def write_roles_list(roles, out_file): + """Write the list of roles to out_file as JSON.""" + os.makedirs(os.path.dirname(out_file), exist_ok=True) + with open(out_file, 'w', encoding='utf-8') as f: + json.dump(roles, f, indent=2) + print(f"Wrote roles list to {out_file}") + + +def main(): + # Determine default roles_dir relative to this script: ../../.. -> roles + script_dir = os.path.dirname(os.path.abspath(__file__)) + default_roles_dir = os.path.abspath( + os.path.join(script_dir, '..', '..', 'roles') + ) + default_output = os.path.join(default_roles_dir, 'list.json') + + parser = argparse.ArgumentParser(description='Generate roles/list.json') + parser.add_argument( + '--roles-dir', '-r', + default=default_roles_dir, + help=f'Directory containing role subfolders (default: {default_roles_dir})' + ) + parser.add_argument( + '--output', '-o', + default=default_output, + help=( + 'Output path for roles list JSON ' + '(or "console" to print to stdout, default: %(default)s)' + ) + ) + args = parser.parse_args() + + if not os.path.isdir(args.roles_dir): + parser.error(f"Roles directory not found: {args.roles_dir}") + + roles = find_roles(args.roles_dir) + + if args.output.lower() == 'console': + # Print JSON to stdout + print(json.dumps(roles, indent=2)) + else: + write_roles_list(roles, args.output) + +if __name__ == '__main__': + main() diff --git a/build/lib/cli/build/tree.py b/build/lib/cli/build/tree.py new file mode 100644 index 00000000..0288a391 --- /dev/null +++ b/build/lib/cli/build/tree.py @@ -0,0 +1,203 @@ +#!/usr/bin/env python3 +import os +import argparse +import json +from typing import Dict, Any, Optional, Iterable, Tuple +from concurrent.futures import ProcessPoolExecutor, as_completed + +from cli.build.graph import build_mappings, output_graph + + +def find_roles(roles_dir: str) -> Iterable[Tuple[str, str]]: + """ + Yield (role_name, role_path) for all roles in the given roles_dir. + """ + for entry in os.listdir(roles_dir): + path = os.path.join(roles_dir, entry) + if os.path.isdir(path): + yield entry, path + + +def process_role( + role_name: str, + roles_dir: str, + depth: int, + shadow_folder: Optional[str], + output: str, + preview: bool, + verbose: bool, + no_include_role: bool, # currently unused, kept for CLI compatibility + no_import_role: bool, # currently unused, kept for CLI compatibility + no_dependencies: bool, # currently unused, kept for CLI compatibility + no_run_after: bool, # currently unused, kept for CLI compatibility +) -> None: + """ + Worker function: build graphs and (optionally) write meta/tree.json for a single role. + + Note: + This version no longer adds a custom top-level "dependencies" bucket. + Only the graphs returned by build_mappings() are written. + """ + role_path = os.path.join(roles_dir, role_name) + + if verbose: + print(f"[worker] Processing role: {role_name}") + + # Build the full graph structure (all dep types / directions) for this role + graphs: Dict[str, Any] = build_mappings( + start_role=role_name, + roles_dir=roles_dir, + max_depth=depth, + ) + + # Preview mode: dump graphs to console instead of writing tree.json + if preview: + for key, data in graphs.items(): + if verbose: + print(f"[worker] Previewing graph '{key}' for role '{role_name}'") + # In preview mode we always output as console + output_graph(data, "console", role_name, key) + return + + # Non-preview: write meta/tree.json for this role + if shadow_folder: + tree_file = os.path.join(shadow_folder, role_name, "meta", "tree.json") + else: + tree_file = os.path.join(role_path, "meta", "tree.json") + + os.makedirs(os.path.dirname(tree_file), exist_ok=True) + with open(tree_file, "w", encoding="utf-8") as f: + json.dump(graphs, f, indent=2) + + print(f"Wrote {tree_file}") + + +def main(): + script_dir = os.path.dirname(os.path.abspath(__file__)) + default_roles_dir = os.path.abspath(os.path.join(script_dir, "..", "..", "roles")) + + parser = argparse.ArgumentParser( + description="Generate all graphs for each role and write meta/tree.json" + ) + parser.add_argument( + "-d", + "--role_dir", + default=default_roles_dir, + help=f"Path to roles directory (default: {default_roles_dir})", + ) + parser.add_argument( + "-D", + "--depth", + type=int, + default=0, + help="Max recursion depth (>0) or <=0 to stop on cycle", + ) + parser.add_argument( + "-o", + "--output", + choices=["yaml", "json", "console"], + default="json", + help="Output format for preview mode", + ) + parser.add_argument( + "-p", + "--preview", + action="store_true", + help="Preview graphs to console instead of writing files", + ) + parser.add_argument( + "-s", + "--shadow-folder", + type=str, + default=None, + help="If set, writes tree.json to this shadow folder instead of the role's actual meta/ folder", + ) + parser.add_argument( + "-v", + "--verbose", + action="store_true", + help="Enable verbose logging", + ) + + # Toggles (kept for CLI compatibility, currently only meaningful for future extensions) + parser.add_argument( + "--no-include-role", + action="store_true", + help="Reserved: do not include include_role in custom dependency bucket", + ) + parser.add_argument( + "--no-import-role", + action="store_true", + help="Reserved: do not include import_role in custom dependency bucket", + ) + parser.add_argument( + "--no-dependencies", + action="store_true", + help="Reserved: do not include meta dependencies in custom dependency bucket", + ) + parser.add_argument( + "--no-run-after", + action="store_true", + help="Reserved: do not include run_after in custom dependency bucket", + ) + + args = parser.parse_args() + + if args.verbose: + print(f"Roles directory: {args.role_dir}") + print(f"Max depth: {args.depth}") + print(f"Output format: {args.output}") + print(f"Preview mode: {args.preview}") + print(f"Shadow folder: {args.shadow_folder}") + + roles = [role_name for role_name, _ in find_roles(args.role_dir)] + + # For preview, run sequentially to avoid completely interleaved output. + if args.preview: + for role_name in roles: + process_role( + role_name=role_name, + roles_dir=args.role_dir, + depth=args.depth, + shadow_folder=args.shadow_folder, + output=args.output, + preview=True, + verbose=args.verbose, + no_include_role=args.no_include_role, + no_import_role=args.no_import_role, + no_dependencies=args.no_dependencies, + no_run_after=args.no_run_after, + ) + return + + # Non-preview: roles are processed in parallel + with ProcessPoolExecutor() as executor: + futures = { + executor.submit( + process_role, + role_name, + args.role_dir, + args.depth, + args.shadow_folder, + args.output, + False, # preview=False in parallel mode + args.verbose, + args.no_include_role, + args.no_import_role, + args.no_dependencies, + args.no_run_after, + ): role_name + for role_name in roles + } + + for future in as_completed(futures): + role_name = futures[future] + try: + future.result() + except Exception as exc: + # Do not crash the whole run; report the failing role instead. + print(f"[ERROR] Role '{role_name}' failed: {exc}") + + +if __name__ == "__main__": + main() diff --git a/build/lib/cli/create/__init__.py b/build/lib/cli/create/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/build/lib/cli/create/credentials.py b/build/lib/cli/create/credentials.py new file mode 100644 index 00000000..f972d52c --- /dev/null +++ b/build/lib/cli/create/credentials.py @@ -0,0 +1,317 @@ +#!/usr/bin/env python3 +""" +Selectively add & vault NEW credentials in your inventory, preserving comments +and formatting. Existing values are left untouched unless --force is used. + +Usage example: + infinito create credentials \ + --role-path roles/web-app-akaunting \ + --inventory-file host_vars/echoserver.yml \ + --vault-password-file .pass/echoserver.txt \ + --set credentials.database_password=mysecret + +With snippet mode (no file changes, just YAML output): + + infinito create credentials \ + --role-path roles/web-app-akaunting \ + --inventory-file host_vars/echoserver.yml \ + --vault-password-file .pass/echoserver.txt \ + --snippet +""" + +import argparse +import sys +from pathlib import Path +from typing import Dict, Any, Union + +from ruamel.yaml import YAML +from ruamel.yaml.comments import CommentedMap + +from module_utils.manager.inventory import InventoryManager +from module_utils.handler.vault import VaultHandler # uses your existing handler + + +# ---------- helpers ---------- + +def ask_for_confirmation(key: str) -> bool: + """Prompt the user for confirmation to overwrite an existing value.""" + confirmation = input( + f"Are you sure you want to overwrite the value for '{key}'? (y/n): " + ).strip().lower() + return confirmation == 'y' + + +def ensure_map(node: CommentedMap, key: str) -> CommentedMap: + """ + Ensure node[key] exists and is a mapping (CommentedMap) for round-trip safety. + """ + if key not in node or not isinstance(node.get(key), CommentedMap): + node[key] = CommentedMap() + return node[key] + + +def _is_ruamel_vault(val: Any) -> bool: + """Detect if a ruamel scalar already carries the !vault tag.""" + try: + return getattr(val, 'tag', None) == '!vault' + except Exception: + return False + + +def _is_vault_encrypted(val: Any) -> bool: + """ + Detect if value is already a vault string or a ruamel !vault scalar. + Accept both '$ANSIBLE_VAULT' and '!vault' markers. + """ + if _is_ruamel_vault(val): + return True + if isinstance(val, str) and ("$ANSIBLE_VAULT" in val or "!vault" in val): + return True + return False + + +def _vault_body(text: str) -> str: + """ + Return only the vault body starting from the first line that contains + '$ANSIBLE_VAULT'. If not found, return the original text. + Also strips any leading '!vault |' header if present. + """ + lines = text.splitlines() + for i, ln in enumerate(lines): + if "$ANSIBLE_VAULT" in ln: + return "\n".join(lines[i:]) + return text + + +def _make_vault_scalar_from_text(text: str) -> Any: + """ + Build a ruamel object representing a literal block scalar tagged with !vault + by parsing a tiny YAML snippet. This avoids depending on yaml_set_tag(). + """ + body = _vault_body(text) + indented = " " + body.replace("\n", "\n ") # proper block scalar indentation + snippet = f"v: !vault |\n{indented}\n" + y = YAML(typ="rt") + return y.load(snippet)["v"] + + +def to_vault_block(vault_handler: VaultHandler, value: Union[str, Any], label: str) -> Any: + """ + Return a ruamel scalar tagged as !vault. If the input value is already + vault-encrypted (string contains $ANSIBLE_VAULT or is a !vault scalar), reuse/wrap. + Otherwise, encrypt plaintext via ansible-vault. + + Special rule: + - Empty strings ("") are NOT encrypted and are returned as plain "". + """ + # Empty strings should not be encrypted + if isinstance(value, str) and value == "": + return "" + + # Already a ruamel !vault scalar β†’ reuse + if _is_ruamel_vault(value): + return value + + # Already an encrypted string (may include '!vault |' or just the header) + if isinstance(value, str) and ("$ANSIBLE_VAULT" in value or "!vault" in value): + return _make_vault_scalar_from_text(value) + + # Plaintext β†’ encrypt now + snippet = vault_handler.encrypt_string(str(value), label) + return _make_vault_scalar_from_text(snippet) + +def parse_overrides(pairs: list[str]) -> Dict[str, str]: + """ + Parse --set key=value pairs into a dict. + Supports both 'credentials.key=val' and 'key=val' (short) forms. + """ + out: Dict[str, str] = {} + for pair in pairs: + k, v = pair.split("=", 1) + out[k.strip()] = v.strip() + return out + + +# ---------- main ---------- + +def main() -> int: + parser = argparse.ArgumentParser( + description="Selectively add & vault NEW credentials in your inventory, preserving comments/formatting." + ) + parser.add_argument("--role-path", required=True, help="Path to your role") + parser.add_argument("--inventory-file", required=True, help="Host vars file to update") + parser.add_argument("--vault-password-file", required=True, help="Vault password file") + parser.add_argument( + "--set", nargs="*", default=[], + help="Override values key[.subkey]=VALUE (applied to NEW keys; with --force also to existing)" + ) + parser.add_argument( + "-f", "--force", action="store_true", + help="Allow overrides to replace existing values (will ask per key unless combined with --yes)" + ) + parser.add_argument( + "-y", "--yes", action="store_true", + help="Non-interactive: assume 'yes' for all overwrite confirmations when --force is used" + ) + parser.add_argument( + "--snippet", + action="store_true", + help=( + "Do not modify the inventory file. Instead, print a YAML snippet with " + "the generated credentials to stdout. The snippet contains only the " + "application's credentials (and ansible_become_password if provided)." + ), + ) + parser.add_argument( + "--allow-empty-plain", + action="store_true", + help=( + "Allow 'plain' credentials in the schema without an explicit --set override. " + "Missing plain values will be set to an empty string before encryption." + ), + ) + args = parser.parse_args() + + overrides = parse_overrides(args.set) + + # Initialize inventory manager (provides schema + app_id + vault) + manager = InventoryManager( + role_path=Path(args.role_path), + inventory_path=Path(args.inventory_file), + vault_pw=args.vault_password_file, + overrides=overrides, + allow_empty_plain=args.allow_empty_plain, + ) + + yaml_rt = YAML(typ="rt") + yaml_rt.preserve_quotes = True + + # Get schema-applied structure (defaults etc.) for *non-destructive* merge + schema_inventory: Dict[str, Any] = manager.apply_schema() + schema_apps = schema_inventory.get("applications", {}) + schema_app_block = schema_apps.get(manager.app_id, {}) + schema_creds = schema_app_block.get("credentials", {}) if isinstance(schema_app_block, dict) else {} + + # ------------------------------------------------------------------------- + # SNIPPET MODE: only build a YAML fragment and print to stdout, no file I/O + # ------------------------------------------------------------------------- + if args.snippet: + # Build a minimal structure: + # applications: + # : + # credentials: + # key: !vault | + # ... + # ansible_become_password: !vault | ... + snippet_data = CommentedMap() + apps_snip = ensure_map(snippet_data, "applications") + app_block_snip = ensure_map(apps_snip, manager.app_id) + creds_snip = ensure_map(app_block_snip, "credentials") + + for key, default_val in schema_creds.items(): + # Priority: --set exact key β†’ default from schema β†’ empty string + ov = overrides.get(f"credentials.{key}", None) + if ov is None: + ov = overrides.get(key, None) + + if ov is not None: + value_for_key: Union[str, Any] = ov + else: + if _is_vault_encrypted(default_val): + creds_snip[key] = to_vault_block(manager.vault_handler, default_val, key) + continue + value_for_key = "" if default_val is None else str(default_val) + + creds_snip[key] = to_vault_block(manager.vault_handler, value_for_key, key) + + # Optional ansible_become_password only if provided via overrides + if "ansible_become_password" in overrides: + snippet_data["ansible_become_password"] = to_vault_block( + manager.vault_handler, + overrides["ansible_become_password"], + "ansible_become_password", + ) + + yaml_rt.dump(snippet_data, sys.stdout) + return 0 + + # ------------------------------------------------------------------------- + # DEFAULT MODE: modify the inventory file on disk (previous behavior) + # ------------------------------------------------------------------------- + + # 1) Load existing inventory with ruamel (round-trip) + with open(args.inventory_file, "r", encoding="utf-8") as f: + data = yaml_rt.load(f) # CommentedMap or None + if data is None: + data = CommentedMap() + + # 2) Ensure structural path exists + apps = ensure_map(data, "applications") + app_block = ensure_map(apps, manager.app_id) + creds = ensure_map(app_block, "credentials") + + # 3) Add ONLY missing credential keys (respect existing values) + newly_added_keys = set() + for key, default_val in schema_creds.items(): + if key in creds: + # Existing β†’ do not touch (preserve plaintext/vault/formatting/comments) + continue + + # Value to use for the new key + # Priority: --set exact key β†’ default from schema β†’ empty string + ov = overrides.get(f"credentials.{key}", None) + if ov is None: + ov = overrides.get(key, None) + + if ov is not None: + value_for_new_key: Union[str, Any] = ov + else: + if _is_vault_encrypted(default_val): + # Schema already provides a vault value β†’ take it as-is + creds[key] = to_vault_block(manager.vault_handler, default_val, key) + newly_added_keys.add(key) + continue + value_for_new_key = "" if default_val is None else str(default_val) + + # Insert as !vault literal (encrypt if needed) + creds[key] = to_vault_block(manager.vault_handler, value_for_new_key, key) + newly_added_keys.add(key) + + # 4) ansible_become_password: only add if missing; + # never rewrite an existing one unless --force (+ confirm/--yes) and override provided. + if "ansible_become_password" not in data: + val = overrides.get("ansible_become_password", None) + if val is not None: + data["ansible_become_password"] = to_vault_block( + manager.vault_handler, val, "ansible_become_password" + ) + else: + if args.force and "ansible_become_password" in overrides: + do_overwrite = args.yes or ask_for_confirmation("ansible_become_password") + if do_overwrite: + data["ansible_become_password"] = to_vault_block( + manager.vault_handler, overrides["ansible_become_password"], "ansible_become_password" + ) + + # 5) Overrides for existing credential keys (only with --force) + if args.force: + for ov_key, ov_val in overrides.items(): + # Accept both 'credentials.key' and bare 'key' + key = ov_key.split(".", 1)[1] if ov_key.startswith("credentials.") else ov_key + if key in creds: + # If we just added it in this run, don't ask again or rewrap + if key in newly_added_keys: + continue + if args.yes or ask_for_confirmation(key): + creds[key] = to_vault_block(manager.vault_handler, ov_val, key) + + # 6) Write back with ruamel (preserve formatting & comments) + with open(args.inventory_file, "w", encoding="utf-8") as f: + yaml_rt.dump(data, f) + + print(f"βœ… Added new credentials without touching existing formatting/comments β†’ {args.inventory_file}") + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/build/lib/cli/create/inventory.py b/build/lib/cli/create/inventory.py new file mode 100644 index 00000000..7b7d2c5a --- /dev/null +++ b/build/lib/cli/create/inventory.py @@ -0,0 +1,1173 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +""" +Create or update a full Ansible inventory for a single host and automatically +generate credentials for all selected applications. + +This subcommand: + +1. Uses `build inventory full` to generate a dynamic inventory for the given + host containing all invokable applications. +2. Optionally filters the resulting groups by: + - --include: only listed application_ids are kept + - --exclude: listed application_ids are removed + - --roles: legacy include filter (used only if --include/--exclude are not set) +3. Merges the generated inventory into an existing inventory file, without + deleting or overwriting unrelated entries. +4. Ensures `host_vars/.yml` exists and stores base settings such as: + - PRIMARY_DOMAIN (optional) + - SSL_ENABLED + - networks.internet.ip4 + - networks.internet.ip6 + Existing keys are preserved (only missing keys are added). +5. For every application_id in the final inventory, uses: + - `meta/applications/role_name.py` to resolve the role path + - `create/credentials.py --snippet` to generate credentials YAML + snippets, and merges all snippets into host_vars in a single write. +6. If --vault-password-file is not provided, a file `.password` is created + in the inventory directory (if missing) and used as vault password file. +""" + +import argparse +import subprocess +import sys +from pathlib import Path +from typing import Dict, Any, List, Set, Optional, NoReturn +import concurrent.futures +import os +import secrets +import string +import json + +try: + import yaml +except ImportError: # pragma: no cover + raise SystemExit("Please `pip install pyyaml` to use `infinito create inventory`.") + +try: + from ruamel.yaml import YAML + from ruamel.yaml.comments import CommentedMap +except ImportError: # pragma: no cover + raise SystemExit("Please `pip install ruamel.yaml` to use `infinito create inventory`.") + +from module_utils.handler.vault import VaultHandler + +# --------------------------------------------------------------------------- +# Generic helpers +# --------------------------------------------------------------------------- + +def run_subprocess( + cmd: List[str], + capture_output: bool = False, + env: Optional[Dict[str, str]] = None, +) -> subprocess.CompletedProcess: + """ + Run a subprocess command and either stream output or capture it. + Raise SystemExit on non-zero return code. + """ + if capture_output: + result = subprocess.run(cmd, text=True, capture_output=True, env=env) + else: + result = subprocess.run(cmd, text=True, env=env) + if result.returncode != 0: + msg = f"Command failed: {' '.join(str(c) for c in cmd)}\n" + if capture_output: + msg += f"STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}\n" + raise SystemExit(msg) + return result + +def deep_update_commented_map(target: CommentedMap, updates: Dict[str, Any]) -> None: + """ + Recursively merge updates into a ruamel CommentedMap. + + - If a value in updates is a mapping, it is merged into the existing mapping. + - Non-mapping values overwrite existing values. + """ + for key, value in updates.items(): + if isinstance(value, dict): + existing = target.get(key) + if not isinstance(existing, CommentedMap): + existing = CommentedMap() + target[key] = existing + deep_update_commented_map(existing, value) + else: + target[key] = value + + +def apply_vars_overrides(host_vars_file: Path, json_str: str) -> None: + """ + Apply JSON overrides to host_vars/.yml. + + Behavior: + - json_str must contain a JSON object at the top level. + - All keys in that object (possibly nested) are merged into the + existing document. + - Existing values are overwritten by values from the JSON. + - Non-existing keys are created. + + Example: + --vars '{"SSL_ENABLED": false, "networks": {"internet": {"ip4": "10.0.0.10"}}}' + """ + try: + overrides = json.loads(json_str) + except json.JSONDecodeError as exc: + raise SystemExit(f"Invalid JSON passed to --vars: {exc}") from exc + + if not isinstance(overrides, dict): + raise SystemExit("JSON for --vars must be an object at the top level.") + + yaml_rt = YAML(typ="rt") + yaml_rt.preserve_quotes = True + + if host_vars_file.exists(): + with host_vars_file.open("r", encoding="utf-8") as f: + doc = yaml_rt.load(f) + if doc is None: + doc = CommentedMap() + else: + doc = CommentedMap() + + if not isinstance(doc, CommentedMap): + tmp = CommentedMap() + for k, v in dict(doc).items(): + tmp[k] = v + doc = tmp + + deep_update_commented_map(doc, overrides) + + host_vars_file.parent.mkdir(parents=True, exist_ok=True) + with host_vars_file.open("w", encoding="utf-8") as f: + yaml_rt.dump(doc, f) + + +def build_env_with_project_root(project_root: Path) -> Dict[str, str]: + """ + Return an environment dict where PYTHONPATH includes the project root. + This makes `module_utils` and other top-level packages importable when + running project scripts as subprocesses. + """ + env = os.environ.copy() + root_str = str(project_root) + existing = env.get("PYTHONPATH") + if existing: + if root_str not in existing.split(os.pathsep): + env["PYTHONPATH"] = root_str + os.pathsep + existing + else: + env["PYTHONPATH"] = root_str + return env + +def ensure_become_password( + host_vars_file: Path, + vault_password_file: Path, + become_password: Optional[str], +) -> None: + """ + Ensure ansible_become_password exists and is stored as a vaulted string + according to the following rules: + + - If become_password is provided: + Encrypt it with Ansible Vault and set/overwrite ansible_become_password. + - If become_password is not provided and ansible_become_password already exists: + Do nothing (respect the existing value, even if it is plain text). + - If become_password is not provided and ansible_become_password is missing: + Generate a random password, encrypt it, and set ansible_become_password. + + The encryption is done via module_utils.handler.vault.VaultHandler so that the + resulting value is a !vault tagged scalar in host_vars. + """ + yaml_rt = YAML(typ="rt") + yaml_rt.preserve_quotes = True + + # Load existing host_vars document (created earlier by ensure_host_vars_file) + if host_vars_file.exists(): + with host_vars_file.open("r", encoding="utf-8") as f: + doc = yaml_rt.load(f) + if doc is None: + doc = CommentedMap() + else: + doc = CommentedMap() + + if not isinstance(doc, CommentedMap): + tmp = CommentedMap() + for k, v in dict(doc).items(): + tmp[k] = v + doc = tmp + + current_value = doc.get("ansible_become_password") + + # Case 1: no explicit password provided, but value already exists β†’ respect it + if become_password is None and current_value is not None: + return + + # Case 2: explicit password provided β†’ use it + # Case 3: no password provided and no value present β†’ generate a random one + if become_password is not None: + plain_password = become_password + else: + plain_password = generate_random_password() + + # Use VaultHandler to encrypt the password via ansible-vault encrypt_string + handler = VaultHandler(str(vault_password_file)) + snippet_text = handler.encrypt_string(plain_password, "ansible_become_password") + + # Parse the snippet with ruamel.yaml to get the tagged !vault scalar node + snippet_yaml = YAML(typ="rt") + encrypted_doc = snippet_yaml.load(snippet_text) or CommentedMap() + encrypted_value = encrypted_doc.get("ansible_become_password") + if encrypted_value is None: + raise SystemExit( + "Failed to parse 'ansible_become_password' from ansible-vault output." + ) + + # Store the vaulted value in host_vars + doc["ansible_become_password"] = encrypted_value + + with host_vars_file.open("w", encoding="utf-8") as f: + yaml_rt.dump(doc, f) + + +def detect_project_root() -> Path: + """ + Detect project root assuming this file is at: /cli/create/inventory.py + """ + here = Path(__file__).resolve() + # .../repo/cli/create/inventory.py β†’ parents[2] == repo + return here.parents[2] + + +def load_yaml(path: Path) -> Dict[str, Any]: + if not path.exists(): + return {} + with path.open("r", encoding="utf-8") as f: + data = yaml.safe_load(f) or {} + if not isinstance(data, dict): + raise SystemExit(f"Expected a mapping at top-level in {path}, got {type(data)}") + return data + + +def dump_yaml(path: Path, data: Dict[str, Any]) -> None: + with path.open("w", encoding="utf-8") as f: + yaml.safe_dump(data, f, sort_keys=False, default_flow_style=False) + + +def parse_roles_list(raw_roles: Optional[List[str]]) -> Optional[Set[str]]: + """ + Parse a list of IDs supplied on the CLI. Supports: + --include web-app-nextcloud web-app-mastodon + --include web-app-nextcloud,web-app-mastodon + Same logic is reused for --exclude and --roles. + """ + if not raw_roles: + return None + result: Set[str] = set() + for token in raw_roles: + token = token.strip() + if not token: + continue + # Allow comma-separated tokens as well + for part in token.split(","): + part = part.strip() + if part: + result.add(part) + return result + + +def generate_random_password(length: int = 64) -> str: + """ + Generate a random password using ASCII letters and digits. + """ + alphabet = string.ascii_letters + string.digits + return "".join(secrets.choice(alphabet) for _ in range(length)) + + +# --------------------------------------------------------------------------- +# Inventory generation (servers.yml via build/inventory/full.py) +# --------------------------------------------------------------------------- + +def generate_dynamic_inventory( + host: str, + roles_dir: Path, + categories_file: Path, + tmp_inventory: Path, + project_root: Path, +) -> Dict[str, Any]: + """ + Call `cli/build/inventory/full.py` directly to generate a dynamic inventory + YAML for the given host and return it as a Python dict. + """ + script = project_root / "cli" / "build" / "inventory" / "full.py" + env = build_env_with_project_root(project_root) + cmd = [ + sys.executable, + str(script), + "--host", host, + "--format", "yaml", + "--inventory-style", "group", + "-c", str(categories_file), + "-r", str(roles_dir), + "-o", str(tmp_inventory), + ] + run_subprocess(cmd, capture_output=False, env=env) + data = load_yaml(tmp_inventory) + tmp_inventory.unlink(missing_ok=True) + return data + + +def _filter_inventory_children( + inv_data: Dict[str, Any], + predicate, +) -> Dict[str, Any]: + """ + Generic helper: keep only children for which predicate(group_name, group_data) is True. + """ + all_block = inv_data.get("all", {}) + children = all_block.get("children", {}) or {} + + filtered_children: Dict[str, Any] = {} + for group_name, group_data in children.items(): + if predicate(group_name, group_data): + filtered_children[group_name] = group_data + + new_all = dict(all_block) + new_all["children"] = filtered_children + return {"all": new_all} + + +def filter_inventory_by_roles(inv_data: Dict[str, Any], roles_filter: Set[str]) -> Dict[str, Any]: + """ + Legacy: keep only groups whose names are in roles_filter. + """ + return _filter_inventory_children( + inv_data, + lambda group_name, _group_data: group_name in roles_filter, + ) + + +def filter_inventory_by_include(inv_data: Dict[str, Any], include_set: Set[str]) -> Dict[str, Any]: + """ + Keep only groups whose names are in include_set. + """ + return _filter_inventory_children( + inv_data, + lambda group_name, _group_data: group_name in include_set, + ) + + +def filter_inventory_by_ignore(inv_data: Dict[str, Any], ignore_set: Set[str]) -> Dict[str, Any]: + """ + Keep all groups except those whose names are in ignore_set. + """ + return _filter_inventory_children( + inv_data, + lambda group_name, _group_data: group_name not in ignore_set, + ) + + +def merge_inventories( + base: Dict[str, Any], + new: Dict[str, Any], + host: str, +) -> Dict[str, Any]: + """ + Merge `new` inventory into `base` inventory without deleting any + existing groups/hosts/vars. + + For each group in `new`: + - ensure the group exists in `base` + - ensure `hosts` exists + - ensure the given `host` is present in that group's `hosts` + (keep existing hosts and host vars untouched) + """ + base_all = base.setdefault("all", {}) + base_children = base_all.setdefault("children", {}) + + new_all = new.get("all", {}) + new_children = new_all.get("children", {}) or {} + + for group_name, group_data in new_children.items(): + # Ensure group exists in base + base_group = base_children.setdefault(group_name, {}) + base_hosts = base_group.setdefault("hosts", {}) + + # Try to propagate host vars from new inventory if they exist + new_hosts = (group_data or {}).get("hosts", {}) or {} + host_vars = {} + if isinstance(new_hosts, dict) and host in new_hosts: + host_vars = new_hosts.get(host) or {} + + # Ensure the target host exists in this group + if host not in base_hosts: + base_hosts[host] = host_vars or {} + + return base + + +# --------------------------------------------------------------------------- +# host_vars helpers +# --------------------------------------------------------------------------- + +def ensure_host_vars_file( + host_vars_file: Path, + host: str, + primary_domain: Optional[str], + ssl_disabled: bool, + ip4: str, + ip6: str, +) -> None: + """ + Ensure host_vars/.yml exists and contains base settings. + + Important: Existing keys are NOT overwritten. Only missing keys are added: + - PRIMARY_DOMAIN (only if primary_domain is provided) + - SSL_ENABLED (true by default, false if --ssl-disabled is used) + - networks.internet.ip4 + - networks.internet.ip6 + + Uses ruamel.yaml so that custom tags like !vault are preserved and do not + break parsing (unlike PyYAML safe_load). + """ + yaml_rt = YAML(typ="rt") + yaml_rt.preserve_quotes = True + + if host_vars_file.exists(): + with host_vars_file.open("r", encoding="utf-8") as f: + data = yaml_rt.load(f) + if data is None: + data = CommentedMap() + else: + data = CommentedMap() + + if not isinstance(data, CommentedMap): + tmp = CommentedMap() + for k, v in dict(data).items(): + tmp[k] = v + data = tmp + + + # Ensure local Ansible connection settings for local hosts. + # This avoids SSH in CI containers (e.g. GitHub Actions) where no ssh client exists + # and we want Ansible to execute tasks directly on the controller. + local_hosts = {"localhost", "127.0.0.1", "::1"} + + if host in local_hosts: + # Only set if not already defined, to avoid overwriting manual settings. + if "ansible_connection" not in data: + data["ansible_connection"] = "local" + + # Only set defaults; do NOT override existing values + if primary_domain is not None and "PRIMARY_DOMAIN" not in data: + data["PRIMARY_DOMAIN"] = primary_domain + + if "SSL_ENABLED" not in data: + # By default SSL is enabled; --ssl-disabled flips this to false + data["SSL_ENABLED"] = not ssl_disabled + + # networks.internet.ip4 / ip6 + networks = data.get("networks") + if not isinstance(networks, CommentedMap): + networks = CommentedMap() + data["networks"] = networks + + internet = networks.get("internet") + if not isinstance(internet, CommentedMap): + internet = CommentedMap() + networks["internet"] = internet + + if "ip4" not in internet: + internet["ip4"] = ip4 + if "ip6" not in internet: + internet["ip6"] = ip6 + + host_vars_file.parent.mkdir(parents=True, exist_ok=True) + with host_vars_file.open("w", encoding="utf-8") as f: + yaml_rt.dump(data, f) + + +def ensure_ruamel_map(node: CommentedMap, key: str) -> CommentedMap: + """ + Ensure node[key] exists and is a mapping (CommentedMap). + """ + if key not in node or not isinstance(node.get(key), CommentedMap): + node[key] = CommentedMap() + return node[key] + + +def get_path_administrator_home_from_group_vars(project_root: Path) -> str: + """ + Read PATH_ADMINISTRATOR_HOME from group_vars/all/06_paths.yml. + + Expected layout (relative to project_root): + + group_vars/ + all/ + 06_paths.yml + + If the file or variable is missing, fall back to '/home/administrator/' + and emit a warning to stderr. + """ + paths_file = project_root / "group_vars" / "all" / "06_paths.yml" + default_path = "/home/administrator/" + + if not paths_file.exists(): + print( + f"[WARN] group_vars paths file not found: {paths_file}. " + f"Falling back to PATH_ADMINISTRATOR_HOME={default_path}", + file=sys.stderr, + ) + return default_path + + try: + with paths_file.open("r", encoding="utf-8") as f: + data = yaml.safe_load(f) or {} + except Exception as exc: # pragma: no cover + print( + f"[WARN] Failed to load {paths_file}: {exc}. " + f"Falling back to PATH_ADMINISTRATOR_HOME={default_path}", + file=sys.stderr, + ) + return default_path + + value = data.get("PATH_ADMINISTRATOR_HOME", default_path) + if not isinstance(value, str) or not value: + print( + f"[WARN] PATH_ADMINISTRATOR_HOME missing or invalid in {paths_file}. " + f"Falling back to {default_path}", + file=sys.stderr, + ) + return default_path + + # Normalize: ensure it ends with exactly one trailing slash. + value = value.rstrip("/") + "/" + return value + + +def ensure_administrator_authorized_keys( + inventory_dir: Path, + host: str, + authorized_keys_spec: Optional[str], + project_root: Path, +) -> None: + """ + Ensure that the administrator's authorized_keys file exists and contains + all keys provided via --authorized-keys. + + Behavior: + - If authorized_keys_spec is None β†’ do nothing. + - If authorized_keys_spec is a path to an existing file: + read all non-empty, non-comment lines in that file as keys. + - Else: + treat authorized_keys_spec as literal key text, which may contain + one or more keys separated by newlines. + + The target file path mirrors the Ansible task in roles/user-administrator: + + src: "{{ inventory_dir }}/files/{{ inventory_hostname }}{{ PATH_ADMINISTRATOR_HOME }}.ssh/authorized_keys" + + We implement the same pattern here: + /files/.ssh/authorized_keys + + PATH_ADMINISTRATOR_HOME is read from group_vars/all/06_paths.yml so that + Python and Ansible share a single source of truth. + """ + if not authorized_keys_spec: + return + + # Read PATH_ADMINISTRATOR_HOME from group_vars/all/06_paths.yml + PATH_ADMINISTRATOR_HOME = get_path_administrator_home_from_group_vars(project_root) + + # Build relative path identical to the Ansible src: + # files/{{ inventory_hostname }}{{ PATH_ADMINISTRATOR_HOME }}.ssh/authorized_keys + rel_fragment = f"{host}{PATH_ADMINISTRATOR_HOME}.ssh/authorized_keys" + # remove leading slash so it becomes relative under files/ + rel_path = rel_fragment.lstrip("/") + target_path = inventory_dir / "files" / rel_path + target_path.parent.mkdir(parents=True, exist_ok=True) + + spec_path = Path(authorized_keys_spec) + if spec_path.exists() and spec_path.is_file(): + # Use keys from the referenced file. + source_text = spec_path.read_text(encoding="utf-8") + else: + # Treat the argument as literal key text. + source_text = authorized_keys_spec + + # Normalize incoming keys: one key per non-empty, non-comment line. + new_keys: List[str] = [] + for line in (source_text or "").splitlines(): + stripped = line.strip() + if not stripped or stripped.startswith("#"): + continue + new_keys.append(stripped) + + if not new_keys: + # Nothing to add. + return + + existing_lines: List[str] = [] + existing_keys: Set[str] = set() + + if target_path.exists(): + for line in target_path.read_text(encoding="utf-8").splitlines(): + existing_lines.append(line) + stripped = line.strip() + if stripped and not stripped.startswith("#"): + existing_keys.add(stripped) + + # Append only keys that are not yet present (by stripped line match). + for key in new_keys: + if key not in existing_keys: + existing_lines.append(key) + existing_keys.add(key) + + # Write back, ensuring a trailing newline. + final_text = "\n".join(existing_lines).rstrip() + "\n" + target_path.write_text(final_text, encoding="utf-8") + + +# --------------------------------------------------------------------------- +# Role resolution (meta/applications/role_name.py) +# --------------------------------------------------------------------------- + +def resolve_role_path( + application_id: str, + roles_dir: Path, + project_root: Path, +) -> Optional[Path]: + """ + Use `cli/meta/applications/role_name.py` to resolve the role path + for a given application_id. Returns an absolute Path or None on failure. + + We expect the helper to print either: + - a bare role folder name (e.g. 'web-app-nextcloud'), or + - a relative path like 'roles/web-app-nextcloud', or + - an absolute path. + + We try, in order: + 1) / + 2) / + 3) use printed as-is if absolute + """ + script = project_root / "cli" / "meta" / "applications" / "role_name.py" + env = build_env_with_project_root(project_root) + cmd = [ + sys.executable, + str(script), + application_id, + "-r", str(roles_dir), + ] + result = run_subprocess(cmd, capture_output=True, env=env) + raw = (result.stdout or "").strip() + + if not raw: + print(f"[WARN] Could not resolve role for application_id '{application_id}'. Skipping.", file=sys.stderr) + return None + + printed = Path(raw) + + # 1) If it's absolute, just use it + if printed.is_absolute(): + role_path = printed + else: + # 2) Prefer resolving below roles_dir + candidate = roles_dir / printed + if candidate.exists(): + role_path = candidate + else: + # 3) Fallback: maybe the helper already printed something like 'roles/web-app-nextcloud' + candidate2 = project_root / printed + if candidate2.exists(): + role_path = candidate2 + else: + print( + f"[WARN] Resolved role path does not exist after probing: " + f"{candidate} and {candidate2} (application_id={application_id})", + file=sys.stderr, + ) + return None + + if not role_path.exists(): + print(f"[WARN] Resolved role path does not exist: {role_path} (application_id={application_id})", file=sys.stderr) + return None + + return role_path + + +def fatal(msg: str) -> NoReturn: + """Print a fatal error and exit with code 1.""" + sys.stderr.write(f"[FATAL] {msg}\n") + sys.exit(1) + + +# --------------------------------------------------------------------------- +# Credentials generation via create/credentials.py --snippet +# --------------------------------------------------------------------------- + +def _generate_credentials_snippet_for_app( + app_id: str, + roles_dir: Path, + host_vars_file: Path, + vault_password_file: Path, + project_root: Path, + credentials_script: Path, +) -> Optional[CommentedMap]: + """ + Worker function for a single application_id: + + 1. Resolve role path via meta/applications/role_name.py. + 2. Skip if role path cannot be resolved. + 3. Skip if schema/main.yml does not exist. + 4. Call create/credentials.py with --snippet to get a YAML fragment. + + Returns a ruamel CommentedMap (snippet) or None on failure. + Errors are logged but do NOT abort the whole run. + """ + try: + role_path = resolve_role_path(app_id, roles_dir, project_root) + except SystemExit as exc: + sys.stderr.write(f"[ERROR] Failed to resolve role for {app_id}: {exc}\n") + return None + except Exception as exc: # pragma: no cover + sys.stderr.write( + f"[ERROR] Unexpected error while resolving role for {app_id}: {exc}\n" + ) + return None + + if role_path is None: + # resolve_role_path already logged a warning + return None + + schema_path = role_path / "schema" / "main.yml" + if not schema_path.exists(): + print( + f"[INFO] Skipping {app_id}: no schema/main.yml found at {schema_path}", + file=sys.stderr, + ) + return None + + cmd = [ + sys.executable, + str(credentials_script), + "--role-path", str(role_path), + "--inventory-file", str(host_vars_file), + "--vault-password-file", str(vault_password_file), + "--snippet", + "--allow-empty-plain", + ] + print(f"[INFO] Generating credentials snippet for {app_id} (role: {role_path})") + + env = build_env_with_project_root(project_root) + result = subprocess.run(cmd, text=True, capture_output=True, env=env) + if result.returncode != 0: + stdout = result.stdout or "" + stderr = result.stderr or "" + fatal( + f"Command failed ({result.returncode}): {' '.join(map(str, cmd))}\n" + f"STDOUT:\n{stdout}\nSTDERR:\n{stderr}" + ) + + snippet_text = (result.stdout or "").strip() + if not snippet_text: + # No output means nothing to merge + return None + + yaml_rt = YAML(typ="rt") + try: + data = yaml_rt.load(snippet_text) + except Exception as exc: # pragma: no cover + sys.stderr.write( + f"[ERROR] Failed to parse credentials snippet for {app_id}: {exc}\n" + f"Snippet was:\n{snippet_text}\n" + ) + return None + + if data is None: + return None + if not isinstance(data, CommentedMap): + # Normalize to CommentedMap + cm = CommentedMap() + for k, v in dict(data).items(): + cm[k] = v + return cm + + return data + + +def generate_credentials_for_roles( + application_ids: List[str], + roles_dir: Path, + host_vars_file: Path, + vault_password_file: Path, + project_root: Path, + workers: int = 4, +) -> None: + """ + Generate credentials for all given application_ids using create/credentials.py --snippet. + + Steps: + 1) In parallel, for each app_id: + - resolve role path + - skip roles without schema/main.yml + - run create/credentials.py --snippet + - return a YAML snippet (ruamel CommentedMap) + 2) Sequentially, merge all snippets into host_vars/.yml in a single write: + - applications..credentials. is added only if missing + - ansible_become_password is added only if missing + """ + if not application_ids: + print("[WARN] No application_ids to process for credential generation.", file=sys.stderr) + return + + credentials_script = project_root / "cli" / "create" / "credentials.py" + max_workers = max(1, workers) + print( + f"[INFO] Running credentials snippet generation for {len(application_ids)} " + f"applications with {max_workers} worker threads..." + ) + + snippets: List[CommentedMap] = [] + + # 1) Parallel: collect snippets + with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: + future_to_app: Dict[concurrent.futures.Future, str] = {} + + for app_id in application_ids: + future = executor.submit( + _generate_credentials_snippet_for_app, + app_id, + roles_dir, + host_vars_file, + vault_password_file, + project_root, + credentials_script, + ) + future_to_app[future] = app_id + + for future in concurrent.futures.as_completed(future_to_app): + app_id = future_to_app[future] + try: + snippet = future.result() + except Exception as exc: + fatal(f"Worker for {app_id} failed with exception: {exc}") + + if snippet is not None: + snippets.append(snippet) + + if not snippets: + print("[WARN] No credentials snippets were generated.", file=sys.stderr) + return + + # 2) Sequential: merge snippets into host_vars + yaml_rt = YAML(typ="rt") + yaml_rt.preserve_quotes = True + + if host_vars_file.exists(): + with host_vars_file.open("r", encoding="utf-8") as f: + doc = yaml_rt.load(f) + if doc is None: + doc = CommentedMap() + else: + doc = CommentedMap() + + if not isinstance(doc, CommentedMap): + tmp = CommentedMap() + for k, v in dict(doc).items(): + tmp[k] = v + doc = tmp + + # Merge each snippet + for snippet in snippets: + apps_snip = snippet.get("applications", {}) or {} + if isinstance(apps_snip, dict): + apps_doc = ensure_ruamel_map(doc, "applications") + for app_id, app_block_snip in apps_snip.items(): + if not isinstance(app_block_snip, dict): + continue + app_doc = ensure_ruamel_map(apps_doc, app_id) + creds_doc = ensure_ruamel_map(app_doc, "credentials") + + creds_snip = app_block_snip.get("credentials", {}) or {} + if not isinstance(creds_snip, dict): + continue + + for key, val in creds_snip.items(): + # Only add missing keys; do not overwrite existing credentials + if key not in creds_doc: + creds_doc[key] = val + + # ansible_become_password: only add if missing + if "ansible_become_password" in snippet and "ansible_become_password" not in doc: + doc["ansible_become_password"] = snippet["ansible_become_password"] + + with host_vars_file.open("w", encoding="utf-8") as f: + yaml_rt.dump(doc, f) + + +# --------------------------------------------------------------------------- +# main +# --------------------------------------------------------------------------- + +def main(argv: Optional[List[str]] = None) -> None: + parser = argparse.ArgumentParser( + description=( + "Create or update a full inventory for a host and generate " + "credentials for all selected applications." + ) + ) + parser.add_argument( + "inventory_dir", + help="Inventory directory (e.g. inventories/galaxyserver).", + ) + parser.add_argument( + "--host", + required=False, + default="localhost", + help="Hostname to use in the inventory (default: localhost).", + ) + parser.add_argument( + "--primary-domain", + required=False, + default=None, + help="Primary domain for this host (e.g. infinito.nexus). Optional.", + ) + parser.add_argument( + "--ssl-disabled", + action="store_true", + help="Disable SSL for this host (sets SSL_ENABLED: false in host_vars).", + ) + parser.add_argument( + "--become-password", + required=False, + help=( + "Optional become password. If omitted and ansible_become_password is " + "missing, a random one is generated and vaulted. If omitted and " + "ansible_become_password already exists, it is left unchanged." + ), + ) + parser.add_argument( + "--authorized-keys", + required=False, + help=( + "Optional SSH public keys for the 'administrator' account. " + "May be a literal key string (possibly with newlines) or a path " + "to a file containing one or more public keys. " + "All keys are ensured to exist in " + "files/.ssh/authorized_keys " + "under the inventory directory; missing keys are appended." + ), + ) + parser.add_argument( + "--vars", + required=False, + help=( + "Optional JSON string with additional values for host_vars/.yml. " + "The JSON must have an object at the top level. All keys from this " + "object (including nested ones) are merged into host_vars and " + "overwrite existing values." + ), + ) + parser.add_argument( + "--ip4", + default="127.0.0.1", + help="IPv4 address for networks.internet.ip4 (default: 127.0.0.1).", + ) + parser.add_argument( + "--ip6", + default="::1", + help='IPv6 address for networks.internet.ip6 (default: "::1").', + ) + parser.add_argument( + "--inventory-file", + help="Inventory YAML file path (default: /servers.yml).", + ) + parser.add_argument( + "--roles", + nargs="+", + help=( + "Optional legacy list of application_ids to include. " + "Used only if neither --include nor --exclude is specified. " + "Supports comma-separated values as well." + ), + ) + parser.add_argument( + "--include", + nargs="+", + help=( + "Only include the listed application_ids in the inventory. " + "Mutually exclusive with --exclude." + ), + ) + parser.add_argument( + "--exclude", + nargs="+", + help=( + "Exclude the listed application_ids from the inventory. " + "Mutually exclusive with --include." + ), + ) + parser.add_argument( + "--vault-password-file", + required=False, + default=None, + help=( + "Path to the Vault password file for credentials generation. " + "If omitted, /.password is created or reused." + ), + ) + parser.add_argument( + "--roles-dir", + help="Path to the roles/ directory (default: /roles).", + ) + parser.add_argument( + "--categories-file", + help="Path to roles/categories.yml (default: /categories.yml).", + ) + parser.add_argument( + "--workers", + type=int, + default=4, + help="Number of worker threads for parallel credentials snippet generation (default: 4).", + ) + + args = parser.parse_args(argv) + + # Parse include/exclude/roles lists + include_filter = parse_roles_list(args.include) + ignore_filter = parse_roles_list(args.exclude) + roles_filter = parse_roles_list(args.roles) + + # Enforce mutual exclusivity: only one of --include / --exclude may be used + if include_filter and ignore_filter: + fatal("Options --include and --exclude are mutually exclusive. Please use only one of them.") + + project_root = detect_project_root() + roles_dir = Path(args.roles_dir) if args.roles_dir else (project_root / "roles") + categories_file = Path(args.categories_file) if args.categories_file else (roles_dir / "categories.yml") + + inventory_dir = Path(args.inventory_dir).resolve() + inventory_dir.mkdir(parents=True, exist_ok=True) + + inventory_file = Path(args.inventory_file) if args.inventory_file else (inventory_dir / "servers.yml") + inventory_file = inventory_file.resolve() + + host_vars_dir = inventory_dir / "host_vars" + host_vars_file = host_vars_dir / f"{args.host}.yml" + + # Vault password file: use provided one, otherwise create/reuse .password in inventory_dir + if args.vault_password_file: + vault_password_file = Path(args.vault_password_file).resolve() + else: + vault_password_file = inventory_dir / ".password" + if not vault_password_file.exists(): + print(f"[INFO] No --vault-password-file provided. Creating {vault_password_file} ...") + password = generate_random_password() + with vault_password_file.open("w", encoding="utf-8") as f: + f.write(password + "\n") + try: + vault_password_file.chmod(0o600) + except PermissionError: + # Best-effort; ignore if chmod is not allowed + pass + else: + print(f"[INFO] Using existing vault password file: {vault_password_file}") + + tmp_inventory = inventory_dir / "_inventory_full_tmp.yml" + + # 1) Generate dynamic inventory via build/inventory/full.py + print("[INFO] Generating dynamic inventory via cli/build/inventory/full.py ...") + dyn_inv = generate_dynamic_inventory( + host=args.host, + roles_dir=roles_dir, + categories_file=categories_file, + tmp_inventory=tmp_inventory, + project_root=project_root, + ) + + # 2) Apply filters: include β†’ exclude β†’ legacy roles + if include_filter: + print(f"[INFO] Including only application_ids: {', '.join(sorted(include_filter))}") + dyn_inv = filter_inventory_by_include(dyn_inv, include_filter) + elif ignore_filter: + print(f"[INFO] Ignoring application_ids: {', '.join(sorted(ignore_filter))}") + dyn_inv = filter_inventory_by_ignore(dyn_inv, ignore_filter) + elif roles_filter: + print(f"[INFO] Filtering inventory to roles (legacy): {', '.join(sorted(roles_filter))}") + dyn_inv = filter_inventory_by_roles(dyn_inv, roles_filter) + + # Collect final application_ids from dynamic inventory for credential generation + dyn_all = dyn_inv.get("all", {}) + dyn_children = dyn_all.get("children", {}) or {} + application_ids = sorted(dyn_children.keys()) + + if not application_ids: + print("[WARN] No application_ids found in dynamic inventory after filtering. Nothing to do.", file=sys.stderr) + + # 3) Merge with existing inventory file (if any) + if inventory_file.exists(): + print(f"[INFO] Merging into existing inventory: {inventory_file}") + base_inv = load_yaml(inventory_file) + else: + print(f"[INFO] Creating new inventory file: {inventory_file}") + base_inv = {} + + merged_inv = merge_inventories(base_inv, dyn_inv, host=args.host) + dump_yaml(inventory_file, merged_inv) + + # 4) Ensure host_vars/.yml exists and has base settings + print(f"[INFO] Ensuring host_vars for host '{args.host}' at {host_vars_file}") + ensure_host_vars_file( + host_vars_file=host_vars_file, + host=args.host, + primary_domain=args.primary_domain, + ssl_disabled=args.ssl_disabled, + ip4=args.ip4, + ip6=args.ip6, + ) + + # 4b) Ensure ansible_become_password is vaulted according to CLI options + print(f"[INFO] Ensuring ansible_become_password for host '{args.host}'") + ensure_become_password( + host_vars_file=host_vars_file, + vault_password_file=vault_password_file, + become_password=args.become_password, + ) + + # 4c) Ensure administrator authorized_keys file contains keys from --authorized-keys + if args.authorized_keys: + print( + f"[INFO] Ensuring administrator authorized_keys for host '{args.host}' " + f"from spec: {args.authorized_keys}" + ) + ensure_administrator_authorized_keys( + inventory_dir=inventory_dir, + host=args.host, + authorized_keys_spec=args.authorized_keys, + project_root=project_root, + ) + + # 5) Generate credentials for all application_ids (snippets + single merge) + if application_ids: + print(f"[INFO] Generating credentials for {len(application_ids)} applications...") + generate_credentials_for_roles( + application_ids=application_ids, + roles_dir=roles_dir, + host_vars_file=host_vars_file, + vault_password_file=vault_password_file, + project_root=project_root, + workers=args.workers, + ) + if args.vars: + print( + f"[INFO] Applying JSON overrides to host_vars for host '{args.host}' " + f"via --vars" + ) + apply_vars_overrides( + host_vars_file=host_vars_file, + json_str=args.vars, + ) + + print("[INFO] Done. Inventory and host_vars updated without deleting existing values.") + + +if __name__ == "__main__": # pragma: no cover + main() diff --git a/build/lib/cli/create/role.py b/build/lib/cli/create/role.py new file mode 100644 index 00000000..75c2e4ce --- /dev/null +++ b/build/lib/cli/create/role.py @@ -0,0 +1,166 @@ +#!/usr/bin/env python3 +import argparse +import shutil +import ipaddress +import difflib +from jinja2 import Environment, FileSystemLoader +from ruamel.yaml import YAML + +import sys, os +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) +from module_utils.entity_name_utils import get_entity_name + +# Paths to the group-vars files +PORTS_FILE = './group_vars/all/10_ports.yml' +NETWORKS_FILE = './group_vars/all/09_networks.yml' +ROLE_TEMPLATE_DIR = './templates/roles/web-app' +ROLES_DIR = './roles' + +yaml = YAML() +yaml.preserve_quotes = True + + +def load_yaml_with_comments(path): + with open(path) as f: + return yaml.load(f) + + +def dump_yaml_with_comments(data, path): + with open(path, 'w') as f: + yaml.dump(data, f) + + +def get_next_network(networks_dict, prefixlen): + """Select the next contiguous subnet, based on the highest existing subnet + one network offset.""" + nets = [] + local = networks_dict['defaults_networks']['local'] + for name, info in local.items(): + # info is a dict with 'subnet' key + net = ipaddress.ip_network(info['subnet']) + if net.prefixlen == prefixlen: + nets.append(net) + if not nets: + raise RuntimeError(f"No existing /{prefixlen} subnets to base allocation on.") + nets.sort(key=lambda n: int(n.network_address)) + last = nets[-1] + offset = last.num_addresses + next_net = ipaddress.ip_network((int(last.network_address) + offset, prefixlen)) + return next_net + + +def get_next_port(ports_dict, category): + """Assign the next port by taking the max existing plus one.""" + loc = ports_dict['ports']['localhost'][category] + existing = [int(v) for v in loc.values()] + return (max(existing) + 1) if existing else 1 + + +def prompt_conflict(dst_file): + print(f"Conflict detected: {dst_file}") + print("[1] overwrite, [2] skip, [3] merge") + choice = None + while choice not in ('1', '2', '3'): + choice = input("Enter 1, 2, or 3: ").strip() + return choice + + +def render_templates(src_dir, dst_dir, context): + env = Environment(loader=FileSystemLoader(src_dir), keep_trailing_newline=True, autoescape=False) + env.filters['bool'] = lambda x: bool(x) + env.filters['get_entity_name'] = get_entity_name + + for root, _, files in os.walk(src_dir): + rel = os.path.relpath(root, src_dir) + target = os.path.join(dst_dir, rel) + os.makedirs(target, exist_ok=True) + for fn in files: + tpl = env.get_template(os.path.join(rel, fn)) + rendered = tpl.render(**context) + out = fn[:-3] if fn.endswith('.j2') else fn + dst_file = os.path.join(target, out) + + if os.path.exists(dst_file): + choice = prompt_conflict(dst_file) + if choice == '2': + print(f"Skipping {dst_file}") + continue + if choice == '3': + with open(dst_file) as f_old: + old_lines = f_old.readlines() + new_lines = rendered.splitlines(keepends=True) + additions = [l for l in new_lines if l not in old_lines] + if additions: + with open(dst_file, 'a') as f: + f.writelines(additions) + print(f"Merged {len(additions)} lines into {dst_file}") + else: + print(f"No new lines to merge into {dst_file}") + continue + # overwrite + print(f"Overwriting {dst_file}") + with open(dst_file, 'w') as f: + f.write(rendered) + else: + # create new file + with open(dst_file, 'w') as f: + f.write(rendered) + + +def main(): + # Load dynamic port categories + ports_data = load_yaml_with_comments(PORTS_FILE) + categories = list(ports_data['ports']['localhost'].keys()) + + parser = argparse.ArgumentParser( + description="Create or update a Docker Ansible role, and globally assign network and ports with comments preserved" + ) + parser.add_argument('-a', '--application-id', required=True, help="Unique application ID") + parser.add_argument('-n', '--network', choices=['24', '28'], required=True, help="Network prefix length (/24 or /28)") + parser.add_argument('-p', '--ports', nargs='+', choices=categories, required=True, help=f"Port categories to assign (allowed: {', '.join(categories)})") + args = parser.parse_args() + + app = args.application_id + role = f"web-app-{app}" + role_dir = os.path.join(ROLES_DIR, role) + + if os.path.exists(role_dir): + if input(f"Role {role} exists. Continue? [y/N]: ").strip().lower() != 'y': + print("Aborting.") + sys.exit(1) + else: + os.makedirs(role_dir) + + # 1) Render all templates with conflict handling + render_templates(ROLE_TEMPLATE_DIR, role_dir, {'application_id': app, 'role_name': role, 'database_type': 0}) + print(f"β†’ Templates applied to {role_dir}") + + # 2) Update global networks file, preserving comments + networks = load_yaml_with_comments(NETWORKS_FILE) + prefix = int(args.network) + new_net = get_next_network(networks, prefix) + networks['defaults_networks']['local'][app] = {'subnet': str(new_net)} + shutil.copy(NETWORKS_FILE, NETWORKS_FILE + '.bak') + dump_yaml_with_comments(networks, NETWORKS_FILE) + print(f"β†’ Assigned network {new_net} in {NETWORKS_FILE}") + + # 3) Update global ports file, preserving comments + ports_data = load_yaml_with_comments(PORTS_FILE) + assigned = {} + for cat in args.ports: + loc = ports_data['ports']['localhost'].setdefault(cat, {}) + if app in loc: + print(f"β†’ Existing port for {cat} and {app}: {loc[app]}, skipping.") + else: + pnum = get_next_port(ports_data, cat) + loc[app] = pnum + assigned[cat] = pnum + + if assigned: + shutil.copy(PORTS_FILE, PORTS_FILE + '.bak') + dump_yaml_with_comments(ports_data, PORTS_FILE) + print(f"β†’ Assigned ports {assigned} in {PORTS_FILE}") + else: + print("β†’ No new ports assigned.") + +if __name__ == '__main__': + main() diff --git a/build/lib/cli/deploy/__init__.py b/build/lib/cli/deploy/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/build/lib/cli/deploy/container.py b/build/lib/cli/deploy/container.py new file mode 100644 index 00000000..83c615d3 --- /dev/null +++ b/build/lib/cli/deploy/container.py @@ -0,0 +1,372 @@ +import argparse +import os +import subprocess +import sys +import time +import uuid +from typing import List, Tuple + + +WORKDIR_DEFAULT = "/opt/src/infinito" + + +def ensure_image(image: str, rebuild: bool = False, no_cache: bool = False) -> None: + """ + Handle Docker image creation rules: + - rebuild=True => always rebuild + - rebuild=False & image missing => build once + - no_cache=True => add '--no-cache' to docker build + """ + build_args = ["docker", "build", "--network=host", "--pull"] + if no_cache: + build_args.append("--no-cache") + build_args += ["-t", image, "."] + + if rebuild: + print(f">>> Forcing rebuild of Docker image '{image}'...") + subprocess.run(build_args, check=True) + print(f">>> Docker image '{image}' rebuilt (forced).") + return + + print(f">>> Checking if Docker image '{image}' exists...") + result = subprocess.run( + ["docker", "image", "inspect", image], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + if result.returncode == 0: + print(f">>> Docker image '{image}' already exists.") + return + + print(f">>> Docker image '{image}' not found. Building it...") + subprocess.run(build_args, check=True) + print(f">>> Docker image '{image}' successfully built.") + + +def docker_exec( + container: str, + args: List[str], + workdir: str | None = None, + check: bool = True, +) -> subprocess.CompletedProcess: + """ + Helper to run `docker exec` with optional working directory. + """ + cmd = ["docker", "exec"] + if workdir: + cmd += ["-w", workdir] + cmd.append(container) + cmd += args + + return subprocess.run(cmd, check=check) + + +def wait_for_inner_docker(container: str, timeout: int = 60) -> None: + """ + Poll `docker exec docker info` until inner dockerd is ready. + """ + print(">>> Waiting for inner Docker daemon inside CI container...") + for _ in range(timeout): + result = subprocess.run( + ["docker", "exec", container, "docker", "info"], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + if result.returncode == 0: + print(">>> Inner Docker daemon is UP.") + return + time.sleep(1) + + raise RuntimeError("Inner Docker daemon did not become ready in time") + + +def start_ci_container( + image: str, + build: bool, + rebuild: bool, + no_cache: bool, + name: str | None = None, +) -> str: + """ + Start a CI container running dockerd inside. + + Returns the container name. + """ + if build or rebuild: + ensure_image(image, rebuild=rebuild, no_cache=no_cache) + + if not name: + name = f"infinito-ci-{uuid.uuid4().hex[:8]}" + + print(f">>> Starting CI container '{name}' with inner dockerd...") + subprocess.run( + [ + "docker", + "run", + "-d", + "--name", + name, + "--network=host", + "--privileged", + "--cgroupns=host", + image, + "dockerd", + "--debug", + "--host=unix:///var/run/docker.sock", + "--storage-driver=vfs", + ], + check=True, + ) + + wait_for_inner_docker(name) + print(f">>> CI container '{name}' started and inner dockerd is ready.") + return name + + +def run_in_container( + image: str, + build: bool, + rebuild: bool, + no_cache: bool, + inventory_args: List[str], + deploy_args: List[str], + name: str | None = None, +) -> None: + """ + Full CI "run" mode: + - start CI container with dockerd + - run cli.create.inventory (with forwarded inventory_args) + - ensure CI vault password file + - run cli.deploy.dedicated (with forwarded deploy_args) + - always remove container at the end + """ + container_name = None + try: + container_name = start_ci_container( + image=image, + build=build, + rebuild=rebuild, + no_cache=no_cache, + name=name, + ) + + # 1) Create CI inventory + print(">>> Creating CI inventory inside container (cli.create.inventory)...") + inventory_cmd: List[str] = [ + "python3", + "-m", + "cli.create.inventory", + "inventories/github-ci", + "--host", + "localhost", + "--ssl-disabled", + ] + inventory_cmd.extend(inventory_args) + + docker_exec( + container_name, + inventory_cmd, + workdir=WORKDIR_DEFAULT, + check=True, + ) + + # 2) Ensure vault password file exists + print(">>> Ensuring CI vault password file exists...") + docker_exec( + container_name, + [ + "sh", + "-c", + "mkdir -p inventories/github-ci && " + "[ -f inventories/github-ci/.password ] || " + "printf '%s\n' 'ci-vault-password' > inventories/github-ci/.password", + ], + workdir=WORKDIR_DEFAULT, + check=True, + ) + + # 3) Run dedicated deploy + print(">>> Running cli.deploy.dedicated inside CI container...") + cmd = [ + "python3", + "-m", + "cli.deploy.dedicated", + "inventories/github-ci/servers.yml", + "-p", + "inventories/github-ci/.password", + *deploy_args, + ] + result = docker_exec(container_name, cmd, workdir=WORKDIR_DEFAULT, check=False) + + if result.returncode != 0: + raise subprocess.CalledProcessError(result.returncode, cmd) + + print(">>> Deployment finished successfully inside CI container.") + + finally: + if container_name: + print(f">>> Cleaning up CI container '{container_name}'...") + subprocess.run( + ["docker", "rm", "-f", container_name], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + + +def stop_container(name: str) -> None: + print(f">>> Stopping container '{name}'...") + subprocess.run(["docker", "stop", name], check=True) + print(f">>> Container '{name}' stopped.") + + +def remove_container(name: str) -> None: + print(f">>> Removing container '{name}'...") + subprocess.run(["docker", "rm", "-f", name], check=True) + print(f">>> Container '{name}' removed.") + + +def exec_in_container(name: str, cmd_args: List[str], workdir: str | None = WORKDIR_DEFAULT) -> int: + if not cmd_args: + print("Error: exec mode requires a command to run inside the container.", file=sys.stderr) + return 1 + + print(f">>> Executing command in container '{name}': {' '.join(cmd_args)}") + result = docker_exec(name, cmd_args, workdir=workdir, check=False) + return result.returncode + + +def split_inventory_and_deploy_args(rest: List[str]) -> Tuple[List[str], List[str]]: + """ + Split remaining arguments into: + - inventory_args: passed to cli.create.inventory + - deploy_args: passed to cli.deploy.dedicated + + Convention: + - [inventory-args ...] -- [deploy-args ...] + - If no '--' is present: inventory_args = [], deploy_args = all rest. + """ + if not rest: + return [], [] + + if "--" in rest: + idx = rest.index("--") + inventory_args = rest[:idx] + deploy_args = rest[idx + 1 :] + else: + inventory_args = [] + deploy_args = rest + + return inventory_args, deploy_args + + +def main() -> int: + # Capture raw arguments without program name + raw_argv = sys.argv[1:] + + # Split container-args vs forwarded args using first "--" + if "--" in raw_argv: + sep_index = raw_argv.index("--") + container_argv = raw_argv[:sep_index] + rest = raw_argv[sep_index + 1:] + else: + container_argv = raw_argv + rest = [] + + parser = argparse.ArgumentParser( + prog="infinito-deploy-container", + description=( + "Run Ansible deploy inside an infinito Docker image with an inner " + "Docker daemon (dockerd + vfs) and auto-generated CI inventory.\n\n" + "Usage (run mode):\n" + " python -m cli.deploy.container run [container-opts] -- \\\n" + " [inventory-args ...] -- [deploy-args ...]\n\n" + "Example:\n" + " python -m cli.deploy.container run --build -- \\\n" + " --include svc-db-mariadb -- \\\n" + " -T server --debug\n" + ) + ) + + parser.add_argument( + "mode", + choices=["run", "start", "stop", "exec", "remove"], + help="Container mode: run, start, stop, exec, remove." + ) + + parser.add_argument("--image", default=os.environ.get("INFINITO_IMAGE", "infinito:latest")) + parser.add_argument("--build", action="store_true") + parser.add_argument("--rebuild", action="store_true") + parser.add_argument("--no-cache", action="store_true") + parser.add_argument("--name") + + # Parse only container-level arguments + args = parser.parse_args(container_argv) + + mode = args.mode + + # --- RUN MODE --- + if mode == "run": + inventory_args, deploy_args = split_inventory_and_deploy_args(rest) + + if not deploy_args: + print( + "Error: missing deploy arguments in run mode.\n" + "Use: container run [opts] -- [inventory args] -- [deploy args]", + file=sys.stderr + ) + return 1 + + try: + run_in_container( + image=args.image, + build=args.build, + rebuild=args.rebuild, + no_cache=args.no_cache, + inventory_args=inventory_args, + deploy_args=deploy_args, + name=args.name, + ) + except subprocess.CalledProcessError as exc: + print(f"[ERROR] Deploy failed with exit code {exc.returncode}", file=sys.stderr) + return exc.returncode + + return 0 + + # --- START MODE --- + if mode == "start": + try: + name = start_ci_container( + image=args.image, + build=args.build, + rebuild=args.rebuild, + no_cache=args.no_cache, + name=args.name, + ) + except Exception as exc: + print(f"[ERROR] {exc}", file=sys.stderr) + return 1 + + print(f">>> Started CI container: {name}") + return 0 + + # For stop/remove/exec, a container name is mandatory + if not args.name: + print(f"Error: '{mode}' requires --name", file=sys.stderr) + return 1 + + if mode == "stop": + stop_container(args.name) + return 0 + + if mode == "remove": + remove_container(args.name) + return 0 + + if mode == "exec": + return exec_in_container(args.name, rest) + + print(f"Unknown mode: {mode}", file=sys.stderr) + return 1 + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/build/lib/cli/deploy/dedicated.py b/build/lib/cli/deploy/dedicated.py new file mode 100644 index 00000000..4414e9aa --- /dev/null +++ b/build/lib/cli/deploy/dedicated.py @@ -0,0 +1,367 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +""" +Infinito.Nexus Deploy CLI + +This script is the main entrypoint for running the Ansible playbook with +dynamic MODE_* flags, automatic inventory validation, and optional build/test +phases. It supports partial deployments, dynamic MODE flag generation, +inventory validation, and structured execution flow. +""" + +import argparse +import subprocess +import os +import datetime +import sys +import re +from typing import Optional, Dict, Any, List + + +# -------------------------------------------------------------------------------------- +# Path resolution +# -------------------------------------------------------------------------------------- + +# Current file: .../cli/deploy/deploy.py +SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) # β†’ cli/deploy +CLI_ROOT = os.path.dirname(SCRIPT_DIR) # β†’ cli +REPO_ROOT = os.path.dirname(CLI_ROOT) # β†’ project root + + +# -------------------------------------------------------------------------------------- +# Main execution logic +# -------------------------------------------------------------------------------------- + +def run_ansible_playbook( + inventory: str, + modes: Dict[str, Any], + limit: Optional[str] = None, + allowed_applications: Optional[List[str]] = None, + password_file: Optional[str] = None, + verbose: int = 0, + skip_build: bool = False, + skip_tests: bool = False, + logs: bool = False, + diff: bool = False, +) -> None: + """Run ansible-playbook with the given parameters and execution modes.""" + start_time = datetime.datetime.now() + print(f"\n▢️ Script started at: {start_time.isoformat()}\n") + + # --------------------------------------------------------- + # 1) Cleanup Phase + # --------------------------------------------------------- + if modes.get("MODE_CLEANUP", False): + cleanup_cmd = ["make", "clean-keep-logs"] if logs else ["make", "clean"] + print(f"\n🧹 Running cleanup ({' '.join(cleanup_cmd)})...\n") + subprocess.run(cleanup_cmd, check=True) + else: + print("\n🧹 Cleanup skipped (MODE_CLEANUP not set or False)\n") + + # --------------------------------------------------------- + # 2) Build Phase + # --------------------------------------------------------- + if not skip_build: + print("\nπŸ› οΈ Running project build (make setup)...\n") + subprocess.run(["make", "setup"], check=True) + else: + print("\nπŸ› οΈ Build skipped (--skip-build)\n") + + # The Ansible playbook is located in the repo root + playbook_path = os.path.join(REPO_ROOT, "playbook.yml") + + # --------------------------------------------------------- + # 3) Inventory Validation Phase + # --------------------------------------------------------- + if modes.get("MODE_ASSERT", None) is False: + print("\nπŸ” Inventory assertion explicitly disabled (MODE_ASSERT=false)\n") + else: + print("\nπŸ” Validating inventory before deployment...\n") + validator_path = os.path.join(CLI_ROOT, "validate", "inventory.py") + try: + subprocess.run( + [sys.executable, validator_path, os.path.dirname(inventory)], + check=True, + ) + except subprocess.CalledProcessError: + print( + "\n[ERROR] Inventory validation failed. Aborting deployment.\n", + file=sys.stderr, + ) + sys.exit(1) + + # --------------------------------------------------------- + # 4) Test Phase + # --------------------------------------------------------- + if not skip_tests: + print("\nπŸ§ͺ Running tests (make test-messy)...\n") + subprocess.run(["make", "test-messy"], check=True) + else: + print("\nπŸ§ͺ Tests skipped (--skip-tests)\n") + + # --------------------------------------------------------- + # 5) Build ansible-playbook command + # --------------------------------------------------------- + cmd: List[str] = ["ansible-playbook", "-i", inventory, playbook_path] + + # Limit hosts + if limit: + cmd.extend(["-l", limit]) + + # Allowed applications (partial deployment) + if allowed_applications: + joined = ",".join(allowed_applications) + cmd.extend(["-e", f"allowed_applications={joined}"]) + + # MODE_* flags + for key, value in modes.items(): + val = str(value).lower() if isinstance(value, bool) else str(value) + cmd.extend(["-e", f"{key}={val}"]) + + # Vault password file + if password_file: + cmd.extend(["--vault-password-file", password_file]) + + # Enable diff mode + if diff: + cmd.append("--diff") + + # MODE_DEBUG β†’ enforce high verbosity + if modes.get("MODE_DEBUG", False): + verbose = max(verbose, 3) + + # Verbosity flags + if verbose: + cmd.append("-" + "v" * verbose) + + print("\nπŸš€ Launching Ansible Playbook...\n") + result = subprocess.run(cmd) + + if result.returncode != 0: + print( + f"\n[ERROR] ansible-playbook exited with status {result.returncode}\n", + file=sys.stderr, + ) + sys.exit(result.returncode) + + end_time = datetime.datetime.now() + + print(f"\nβœ… Script ended at: {end_time.isoformat()}\n") + print(f"⏱️ Total execution time: {end_time - start_time}\n") + + +# -------------------------------------------------------------------------------------- +# Application ID validation +# -------------------------------------------------------------------------------------- + +def validate_application_ids(inventory: str, app_ids: List[str]) -> None: + """Validate requested application IDs using ValidDeployId.""" + if not app_ids: + return + + from module_utils.valid_deploy_id import ValidDeployId + + validator = ValidDeployId() + invalid = validator.validate(inventory, app_ids) + + if invalid: + print("\n[ERROR] Some application_ids are invalid for this inventory:\n") + for app_id, status in invalid.items(): + reasons = [] + if not status.get("allowed", True): + reasons.append("not allowed by configuration") + if not status.get("in_inventory", True): + reasons.append("not present in inventory") + print(f" - {app_id}: {', '.join(reasons)}") + sys.exit(1) + + +# -------------------------------------------------------------------------------------- +# MODE_* parsing logic +# -------------------------------------------------------------------------------------- + +MODE_LINE_RE = re.compile( + r"""^\s*(?P[A-Z0-9_]+)\s*:\s*(?P.+?)\s*(?:#\s*(?P.*))?\s*$""" +) + + +def _parse_bool_literal(text: str) -> Optional[bool]: + """Convert simple true/false/yes/no/on/off into boolean.""" + t = text.strip().lower() + if t in ("true", "yes", "on"): + return True + if t in ("false", "no", "off"): + return False + return None + + +def load_modes_from_yaml(modes_yaml_path: str) -> List[Dict[str, Any]]: + """Load MODE_* definitions from YAML-like key/value file.""" + modes: List[Dict[str, Any]] = [] + + if not os.path.exists(modes_yaml_path): + raise FileNotFoundError(f"Modes file not found: {modes_yaml_path}") + + with open(modes_yaml_path, "r", encoding="utf-8") as fh: + for line in fh: + line = line.rstrip() + if not line or line.lstrip().startswith("#"): + continue + + m = MODE_LINE_RE.match(line) + if not m: + continue + + key = m.group("key") + val = m.group("value").strip() + cmt = (m.group("cmt") or "").strip() + + if not key.startswith("MODE_"): + continue + + default_bool = _parse_bool_literal(val) + + modes.append( + { + "name": key, + "default": default_bool, + "help": cmt or f"Toggle {key}", + } + ) + + return modes + + +# -------------------------------------------------------------------------------------- +# Dynamic argparse mode injection +# -------------------------------------------------------------------------------------- + +def add_dynamic_mode_args( + parser: argparse.ArgumentParser, modes_meta: List[Dict[str, Any]] +) -> Dict[str, Dict[str, Any]]: + """ + Add command-line arguments dynamically based on MODE_* metadata. + """ + + spec: Dict[str, Dict[str, Any]] = {} + + for m in modes_meta: + name = m["name"] + default = m["default"] + desc = m["help"] + short = name.replace("MODE_", "").lower() + + if default is True: + # MODE_FOO: true β†’ --skip-foo disables it + opt = f"--skip-{short}" + dest = f"skip_{short}" + parser.add_argument(opt, action="store_true", dest=dest, help=desc) + spec[name] = {"dest": dest, "default": True, "kind": "bool_true"} + + elif default is False: + # MODE_BAR: false β†’ --bar enables it + opt = f"--{short}" + dest = short + parser.add_argument(opt, action="store_true", dest=dest, help=desc) + spec[name] = {"dest": dest, "default": False, "kind": "bool_false"} + + else: + # Explicit: MODE_XYZ: null β†’ --xyz true|false + opt = f"--{short}" + dest = short + parser.add_argument(opt, choices=["true", "false"], dest=dest, help=desc) + spec[name] = {"dest": dest, "default": None, "kind": "explicit"} + + return spec + + +def build_modes_from_args( + spec: Dict[str, Dict[str, Any]], args_namespace: argparse.Namespace +) -> Dict[str, Any]: + """Resolve CLI arguments into a MODE_* dictionary.""" + modes: Dict[str, Any] = {} + + for mode_name, info in spec.items(): + dest = info["dest"] + kind = info["kind"] + value = getattr(args_namespace, dest, None) + + if kind == "bool_true": + modes[mode_name] = False if value else True + + elif kind == "bool_false": + modes[mode_name] = True if value else False + + else: # explicit + if value is not None: + modes[mode_name] = (value == "true") + + return modes + + +# -------------------------------------------------------------------------------------- +# Main entrypoint +# -------------------------------------------------------------------------------------- + +def main() -> None: + parser = argparse.ArgumentParser( + description="Deploy the Infinito.Nexus stack using ansible-playbook." + ) + + # Standard arguments + parser.add_argument("inventory", help="Path to the inventory file.") + parser.add_argument("-l", "--limit", help="Limit execution to certain hosts or groups.") + parser.add_argument( + "-T", "--host-type", choices=["server", "desktop"], default="server", + help="Specify target type: server or desktop." + ) + parser.add_argument( + "-p", "--password-file", + help="Vault password file for encrypted variables." + ) + parser.add_argument("-B", "--skip-build", action="store_true", help="Skip build phase.") + parser.add_argument("-t", "--skip-tests", action="store_true", help="Skip test phase.") + parser.add_argument( + "-i", "--id", nargs="+", default=[], dest="id", + help="List of application_ids for partial deployment." + ) + parser.add_argument( + "-v", "--verbose", action="count", default=0, + help="Increase verbosity (e.g. -vvv)." + ) + parser.add_argument("--logs", action="store_true", help="Keep logs during cleanup.") + parser.add_argument("--diff", action="store_true", help="Enable Ansible diff mode.") + + # Dynamic MODE_* parsing + modes_yaml_path = os.path.join(REPO_ROOT, "group_vars", "all", "01_modes.yml") + modes_meta = load_modes_from_yaml(modes_yaml_path) + modes_spec = add_dynamic_mode_args(parser, modes_meta) + + args = parser.parse_args() + + # Validate application IDs + validate_application_ids(args.inventory, args.id) + + # Build final mode map + modes = build_modes_from_args(modes_spec, args) + modes["MODE_LOGS"] = args.logs + modes["host_type"] = args.host_type + + # Run playbook + run_ansible_playbook( + inventory=args.inventory, + modes=modes, + limit=args.limit, + allowed_applications=args.id, + password_file=args.password_file, + verbose=args.verbose, + skip_build=args.skip_build, + skip_tests=args.skip_tests, + logs=args.logs, + diff=args.diff, + ) + + +if __name__ == "__main__": + main() diff --git a/build/lib/cli/encrypt/__init__.py b/build/lib/cli/encrypt/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/build/lib/cli/encrypt/inventory.py b/build/lib/cli/encrypt/inventory.py new file mode 100644 index 00000000..c909a7d1 --- /dev/null +++ b/build/lib/cli/encrypt/inventory.py @@ -0,0 +1,66 @@ +import argparse +import subprocess +import sys +from pathlib import Path +import yaml +from typing import Dict, Any +from module_utils.handler.vault import VaultHandler, VaultScalar +from module_utils.handler.yaml import YamlHandler +from yaml.dumper import SafeDumper + +def ask_for_confirmation(key: str) -> bool: + """Prompt the user for confirmation to overwrite an existing value.""" + confirmation = input(f"Do you want to encrypt the value for '{key}'? (y/n): ").strip().lower() + return confirmation == 'y' + + +def encrypt_recursively(data: Any, vault_handler: VaultHandler, ask_confirmation: bool = True, prefix: str = "") -> Any: + """Recursively encrypt values in the data.""" + if isinstance(data, dict): + for key, value in data.items(): + new_prefix = f"{prefix}.{key}" if prefix else key + data[key] = encrypt_recursively(value, vault_handler, ask_confirmation, new_prefix) + elif isinstance(data, list): + for i, item in enumerate(data): + data[i] = encrypt_recursively(item, vault_handler, ask_confirmation, prefix) + elif isinstance(data, str): + # Only encrypt if it's not already vaulted + if not data.lstrip().startswith("$ANSIBLE_VAULT"): + if ask_confirmation: + # Ask for confirmation before encrypting if not `--all` + if not ask_for_confirmation(prefix): + print(f"Skipping encryption for '{prefix}'.") + return data + encrypted_value = vault_handler.encrypt_string(data, prefix) + lines = encrypted_value.splitlines() + indent = len(lines[1]) - len(lines[1].lstrip()) + body = "\n".join(line[indent:] for line in lines[1:]) + return VaultScalar(body) # Store encrypted value as VaultScalar + return data + + +def main(): + parser = argparse.ArgumentParser( + description="Encrypt all fields, ask for confirmation unless --all is specified." + ) + parser.add_argument("--inventory-file", required=True, help="Host vars file to update") + parser.add_argument("--vault-password-file", required=True, help="Vault password file") + parser.add_argument("--all", action="store_true", help="Encrypt all fields without confirmation") + args = parser.parse_args() + + # Initialize the VaultHandler and load the inventory + vault_handler = VaultHandler(vault_password_file=args.vault_password_file) + updated_inventory = YamlHandler.load_yaml(Path(args.inventory_file)) + + # 1) Encrypt all fields recursively + updated_inventory = encrypt_recursively(updated_inventory, vault_handler, ask_confirmation=not args.all) + + # 2) Save the updated inventory to file + with open(args.inventory_file, "w", encoding="utf-8") as f: + yaml.dump(updated_inventory, f, sort_keys=False, Dumper=SafeDumper) + + print(f"βœ… Inventory selectively vaulted β†’ {args.inventory_file}") + + +if __name__ == "__main__": + main() diff --git a/build/lib/cli/fix/__init__.py b/build/lib/cli/fix/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/build/lib/cli/fix/ini_py.py b/build/lib/cli/fix/ini_py.py new file mode 100644 index 00000000..491ee0d0 --- /dev/null +++ b/build/lib/cli/fix/ini_py.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python3 + +""" +This script creates __init__.py files in every subdirectory under the specified +folder relative to the project root. +""" + +import os +import argparse + + +def create_init_files(root_folder): + """ + Walk through all subdirectories of root_folder and create an __init__.py file + in each directory if it doesn't already exist. + """ + for dirpath, dirnames, filenames in os.walk(root_folder): + init_file = os.path.join(dirpath, '__init__.py') + if not os.path.exists(init_file): + open(init_file, 'w').close() + print(f"Created: {init_file}") + else: + print(f"Skipped (already exists): {init_file}") + + +def main(): + parser = argparse.ArgumentParser( + description='Create __init__.py files in every subdirectory.' + ) + parser.add_argument( + 'folder', + help='Relative path to the target folder (e.g., cli/fix)' + ) + args = parser.parse_args() + + # Determine the absolute path based on the current working directory + root_folder = os.path.abspath(args.folder) + + if not os.path.isdir(root_folder): + print(f"Error: The folder '{args.folder}' does not exist or is not a directory.") + exit(1) + + create_init_files(root_folder) + + +if __name__ == '__main__': + main() diff --git a/build/lib/cli/fix/move_unnecessary_dependencies.py b/build/lib/cli/fix/move_unnecessary_dependencies.py new file mode 100644 index 00000000..93d8843c --- /dev/null +++ b/build/lib/cli/fix/move_unnecessary_dependencies.py @@ -0,0 +1,480 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +""" +Move unnecessary meta dependencies to guarded include_role/import_role +for better performance, while preserving YAML comments, quotes, and layout. + +Heuristic (matches tests/integration/test_unnecessary_role_dependencies.py): +- A dependency is considered UNNECESSARY if: + * The consumer does NOT use provider variables in defaults/vars/handlers + (no early-var need), AND + * In tasks, any usage of provider vars or provider-handler notifications + occurs only AFTER an include/import of the provider in the same file, + OR there is no usage at all. + +Action: +- Remove such dependencies from roles//meta/main.yml. +- Prepend a guarded include block to roles//tasks/01_core.yml (preferred) + or roles//tasks/main.yml if 01_core.yml is absent. +- If multiple dependencies are moved for a role, use a loop over include_role. + +Notes: +- Creates .bak backups for modified YAML files. +- Requires ruamel.yaml to preserve comments/quotes everywhere. +""" + +import argparse +import glob +import os +import re +import shutil +import sys +from typing import Dict, Set, List, Tuple, Optional + +# --- Require ruamel.yaml for full round-trip preservation --- +try: + from ruamel.yaml import YAML + from ruamel.yaml.comments import CommentedMap, CommentedSeq + from ruamel.yaml.scalarstring import SingleQuotedScalarString + _HAVE_RUAMEL = True +except Exception: + _HAVE_RUAMEL = False + +if not _HAVE_RUAMEL: + print("[ERR] ruamel.yaml is required to preserve comments/quotes. Install with: pip install ruamel.yaml", file=sys.stderr) + sys.exit(3) + +yaml_rt = YAML() +yaml_rt.preserve_quotes = True +yaml_rt.width = 10**9 # prevent line wrapping + +# ---------------- Utilities ---------------- + +def _backup(path: str): + if os.path.exists(path): + shutil.copy2(path, path + ".bak") + +def read_text(path: str) -> str: + try: + with open(path, "r", encoding="utf-8") as f: + return f.read() + except Exception: + return "" + +def load_yaml_rt(path: str): + try: + with open(path, "r", encoding="utf-8") as f: + data = yaml_rt.load(f) + return data if data is not None else CommentedMap() + except FileNotFoundError: + return CommentedMap() + except Exception as e: + print(f"[WARN] Failed to parse YAML: {path}: {e}", file=sys.stderr) + return CommentedMap() + +def dump_yaml_rt(data, path: str): + _backup(path) + with open(path, "w", encoding="utf-8") as f: + yaml_rt.dump(data, f) + +def roles_root(project_root: str) -> str: + return os.path.join(project_root, "roles") + +def iter_role_dirs(project_root: str) -> List[str]: + root = roles_root(project_root) + return [d for d in glob.glob(os.path.join(root, "*")) if os.path.isdir(d)] + +def role_name_from_dir(role_dir: str) -> str: + return os.path.basename(role_dir.rstrip(os.sep)) + +def path_if_exists(*parts) -> Optional[str]: + p = os.path.join(*parts) + return p if os.path.exists(p) else None + +def gather_yaml_files(base: str, patterns: List[str]) -> List[str]: + files: List[str] = [] + for pat in patterns: + files.extend(glob.glob(os.path.join(base, pat), recursive=True)) + return [f for f in files if os.path.isfile(f)] + +def sq(v: str): + """Return a single-quoted scalar (ruamel) for consistent quoting.""" + return SingleQuotedScalarString(v) + +# ---------------- Providers: vars & handlers ---------------- + +def flatten_keys(data) -> Set[str]: + out: Set[str] = set() + if isinstance(data, dict): + for k, v in data.items(): + if isinstance(k, str): + out.add(k) + out |= flatten_keys(v) + elif isinstance(data, list): + for item in data: + out |= flatten_keys(item) + return out + +def collect_role_defined_vars(role_dir: str) -> Set[str]: + """Vars a role 'provides': defaults/vars keys + set_fact keys in tasks.""" + provided: Set[str] = set() + + for rel in ("defaults/main.yml", "vars/main.yml"): + p = path_if_exists(role_dir, rel) + if p: + data = load_yaml_rt(p) + provided |= flatten_keys(data) + + # set_fact keys + task_files = gather_yaml_files(os.path.join(role_dir, "tasks"), ["**/*.yml", "*.yml"]) + for tf in task_files: + data = load_yaml_rt(tf) + if isinstance(data, list): + for task in data: + if isinstance(task, dict) and "set_fact" in task and isinstance(task["set_fact"], dict): + provided |= set(task["set_fact"].keys()) + + noisy = {"when", "name", "vars", "tags", "register"} + return {v for v in provided if isinstance(v, str) and v and v not in noisy} + +def collect_role_handler_names(role_dir: str) -> Set[str]: + """Handler names defined by a role (for notify detection).""" + handler_file = path_if_exists(role_dir, "handlers/main.yml") + if not handler_file: + return set() + data = load_yaml_rt(handler_file) + names: Set[str] = set() + if isinstance(data, list): + for task in data: + if isinstance(task, dict): + nm = task.get("name") + if isinstance(nm, str) and nm.strip(): + names.add(nm.strip()) + return names + +# ---------------- Consumers: usage scanning ---------------- + +def find_var_positions(text: str, varname: str) -> List[int]: + """Return byte offsets for occurrences of varname (word-ish boundary).""" + positions: List[int] = [] + if not varname: + return positions + pattern = re.compile(rf"(? Optional[int]: + first: Optional[int] = None + for v in provided_vars: + for off in find_var_positions(text, v): + if first is None or off < first: + first = off + return first + +def first_include_offset_for_role(text: str, producer_role: str) -> Optional[int]: + """ + Find earliest include/import of a given role in this YAML text. + Handles compact dict and block styles. + """ + pattern = re.compile( + r"(include_role|import_role)\s*:\s*\{[^}]*\bname\s*:\s*['\"]?" + + re.escape(producer_role) + r"['\"]?[^}]*\}" + r"|" + r"(include_role|import_role)\s*:\s*\n(?:\s+[a-z_]+\s*:\s*.*\n)*\s*name\s*:\s*['\"]?" + + re.escape(producer_role) + r"['\"]?", + re.IGNORECASE, + ) + m = pattern.search(text) + return m.start() if m else None + +def find_notify_offsets_for_handlers(text: str, handler_names: Set[str]) -> List[int]: + """ + Heuristic: for each handler name, find occurrences where 'notify' appears within + the preceding ~200 chars. Works for single string or list-style notify blocks. + """ + if not handler_names: + return [] + offsets: List[int] = [] + for h in handler_names: + for m in re.finditer(re.escape(h), text): + start = m.start() + back = max(0, start - 200) + context = text[back:start] + if re.search(r"notify\s*:", context): + offsets.append(start) + return sorted(offsets) + +def parse_meta_dependencies(role_dir: str) -> List[str]: + meta = path_if_exists(role_dir, "meta/main.yml") + if not meta: + return [] + data = load_yaml_rt(meta) + dd = data.get("dependencies") + deps: List[str] = [] + if isinstance(dd, list): + for item in dd: + if isinstance(item, str): + deps.append(item) + elif isinstance(item, dict) and "role" in item: + deps.append(str(item["role"])) + elif isinstance(item, dict) and "name" in item: + deps.append(str(item["name"])) + return deps + +# ---------------- Fix application ---------------- + +def sanitize_run_once_var(role_name: str) -> str: + """ + Generate run_once variable name from role name. + Example: 'sys-front-inj-logout' -> 'run_once_sys_front_inj_logout' + """ + return "run_once_" + role_name.replace("-", "_") + +def build_include_block_yaml(consumer_role: str, moved_deps: List[str]) -> List[dict]: + """ + Build a guarded block that includes one or many roles. + This block will be prepended to tasks/01_core.yml or tasks/main.yml. + """ + guard_var = sanitize_run_once_var(consumer_role) + + if len(moved_deps) == 1: + inner_tasks = [ + { + "name": f"Include dependency '{moved_deps[0]}'", + "include_role": {"name": moved_deps[0]}, + } + ] + else: + inner_tasks = [ + { + "name": "Include dependencies", + "include_role": {"name": "{{ item }}"}, + "loop": moved_deps, + } + ] + + # Always set the run_once fact at the end + inner_tasks.append({"set_fact": {guard_var: True}}) + + # Correct Ansible block structure + block_task = { + "name": "Load former meta dependencies once", + "block": inner_tasks, + "when": f"{guard_var} is not defined", + } + + return [block_task] + +def prepend_tasks(tasks_path: str, new_tasks, dry_run: bool): + """ + Prepend new_tasks (CommentedSeq) to an existing tasks YAML list while preserving comments. + If the file does not exist, create it with new_tasks. + """ + if os.path.exists(tasks_path): + existing = load_yaml_rt(tasks_path) + if isinstance(existing, list): + combined = CommentedSeq() + for item in new_tasks: + combined.append(item) + for item in existing: + combined.append(item) + elif isinstance(existing, dict): + # Rare case: tasks file with a single mapping; coerce to list + combined = CommentedSeq() + for item in new_tasks: + combined.append(item) + combined.append(existing) + else: + combined = new_tasks + else: + os.makedirs(os.path.dirname(tasks_path), exist_ok=True) + combined = new_tasks + + if dry_run: + print(f"[DRY-RUN] Would write {tasks_path} with {len(new_tasks)} prepended task(s).") + return + + dump_yaml_rt(combined, tasks_path) + print(f"[OK] Updated {tasks_path} (prepended {len(new_tasks)} task(s)).") + +def update_meta_remove_deps(meta_path: str, remove: List[str], dry_run: bool): + """ + Remove entries from meta.dependencies while leaving the rest of the file intact. + Quotes, comments, key order, and line breaks are preserved. + Returns True if a change would be made (or was made when not in dry-run). + """ + if not os.path.exists(meta_path): + return False + + doc = load_yaml_rt(meta_path) + deps = doc.get("dependencies") + if not isinstance(deps, list): + return False + + def dep_name(item): + if isinstance(item, dict): + return item.get("role") or item.get("name") + return item + + keep = CommentedSeq() + removed = [] + for item in deps: + name = dep_name(item) + if name in remove: + removed.append(name) + else: + keep.append(item) + + if not removed: + return False + + if keep: + doc["dependencies"] = keep + else: + if "dependencies" in doc: + del doc["dependencies"] + + if dry_run: + print(f"[DRY-RUN] Would rewrite {meta_path}; removed: {', '.join(removed)}") + return True + + dump_yaml_rt(doc, meta_path) + print(f"[OK] Rewrote {meta_path}; removed: {', '.join(removed)}") + return True + +def dependency_is_unnecessary(consumer_dir: str, + consumer_name: str, + producer_name: str, + provider_vars: Set[str], + provider_handlers: Set[str]) -> bool: + """Apply heuristic to decide if we can move this dependency.""" + # 1) Early usage in defaults/vars/handlers? If yes -> necessary + defaults_files = [p for p in [ + path_if_exists(consumer_dir, "defaults/main.yml"), + path_if_exists(consumer_dir, "vars/main.yml"), + path_if_exists(consumer_dir, "handlers/main.yml"), + ] if p] + for p in defaults_files: + text = read_text(p) + if first_var_use_offset_in_text(text, provider_vars) is not None: + return False # needs meta dep + + # 2) Tasks: any usage before include/import? If yes -> keep meta dep + task_files = gather_yaml_files(os.path.join(consumer_dir, "tasks"), ["**/*.yml", "*.yml"]) + for p in task_files: + text = read_text(p) + if not text: + continue + include_off = first_include_offset_for_role(text, producer_name) + var_use_off = first_var_use_offset_in_text(text, provider_vars) + notify_offs = find_notify_offsets_for_handlers(text, provider_handlers) + + if var_use_off is not None: + if include_off is None or include_off > var_use_off: + return False # used before include + + for noff in notify_offs: + if include_off is None or include_off > noff: + return False # notify before include + + # If we get here: no early use, and either no usage at all or usage after include + return True + +def process_role(role_dir: str, + providers_index: Dict[str, Tuple[Set[str], Set[str]]], + only_role: Optional[str], + dry_run: bool) -> bool: + """ + Returns True if any change suggested/made for this role. + """ + consumer_name = role_name_from_dir(role_dir) + if only_role and only_role != consumer_name: + return False + + meta_deps = parse_meta_dependencies(role_dir) + if not meta_deps: + return False + + # Build provider vars/handlers accessors + moved: List[str] = [] + for producer in meta_deps: + # Only consider local roles we can analyze + producer_dir = path_if_exists(os.path.dirname(role_dir), producer) or path_if_exists(os.path.dirname(roles_root(os.path.dirname(role_dir))), "roles", producer) + if producer not in providers_index: + # Unknown/external role β†’ skip (we cannot verify safety) + continue + pvars, phandlers = providers_index[producer] + if dependency_is_unnecessary(role_dir, consumer_name, producer, pvars, phandlers): + moved.append(producer) + + if not moved: + return False + + # 1) Remove from meta + meta_path = os.path.join(role_dir, "meta", "main.yml") + update_meta_remove_deps(meta_path, moved, dry_run=dry_run) + + # 2) Prepend include block to tasks/01_core.yml or tasks/main.yml + target_tasks = path_if_exists(role_dir, "tasks/01_core.yml") + if not target_tasks: + target_tasks = os.path.join(role_dir, "tasks", "main.yml") + include_block = build_include_block_yaml(consumer_name, moved) + prepend_tasks(target_tasks, include_block, dry_run=dry_run) + return True + +def build_providers_index(all_roles: List[str]) -> Dict[str, Tuple[Set[str], Set[str]]]: + """ + Map role_name -> (provided_vars, handler_names) + """ + index: Dict[str, Tuple[Set[str], Set[str]]] = {} + for rd in all_roles: + rn = role_name_from_dir(rd) + index[rn] = (collect_role_defined_vars(rd), collect_role_handler_names(rd)) + return index + +def main(): + parser = argparse.ArgumentParser( + description="Move unnecessary meta dependencies to guarded include_role for performance (preserve comments/quotes)." + ) + parser.add_argument( + "--project-root", + default=os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")), + help="Path to project root (default: two levels up from this script).", + ) + parser.add_argument( + "--role", + dest="only_role", + default=None, + help="Only process a specific role name (e.g., 'docker-core').", + ) + parser.add_argument( + "--dry-run", + action="store_true", + help="Analyze and print planned changes without modifying files.", + ) + args = parser.parse_args() + + roles = iter_role_dirs(args.project_root) + if not roles: + print(f"[ERR] No roles found under {roles_root(args.project_root)}", file=sys.stderr) + sys.exit(2) + + providers_index = build_providers_index(roles) + + changed_any = False + for role_dir in roles: + changed = process_role(role_dir, providers_index, args.only_role, args.dry_run) + changed_any = changed_any or changed + + if not changed_any: + print("[OK] No unnecessary meta dependencies to move (per heuristic).") + else: + if args.dry_run: + print("[DRY-RUN] Completed analysis. No files were changed.") + else: + print("[OK] Finished moving unnecessary dependencies.") + +if __name__ == "__main__": + main() diff --git a/build/lib/cli/fix/tabs.py b/build/lib/cli/fix/tabs.py new file mode 100644 index 00000000..efabe3e1 --- /dev/null +++ b/build/lib/cli/fix/tabs.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python3 + +import os +import argparse +from pathlib import Path + +FILES_FIXED = [] + +def fix_tabs_in_file(file_path): + """Replaces tab characters with two spaces in the specified file.""" + with open(file_path, "r", encoding="utf-8") as f: + lines = f.readlines() + + if any('\t' in line for line in lines): + fixed_lines = [line.replace('\t', ' ') for line in lines] + with open(file_path, "w", encoding="utf-8") as f: + f.writelines(fixed_lines) + FILES_FIXED.append(str(file_path)) + +def find_yml_files(path): + """Yield all .yml files under a given path recursively.""" + for file in path.rglob("*.yml"): + if file.is_file(): + yield file + +def main(): + parser = argparse.ArgumentParser( + description="Fix tab characters in all .yml files under a given path (recursively)." + ) + parser.add_argument( + "path", + nargs="?", + default="./", + help="Base path to search for .yml files (default: ./)" + ) + args = parser.parse_args() + + base_path = Path(args.path).resolve() + + if not base_path.exists(): + print(f"❌ Path does not exist: {base_path}") + exit(1) + + print(f"πŸ” Searching for .yml files under: {base_path}\n") + + for yml_file in find_yml_files(base_path): + fix_tabs_in_file(yml_file) + + if FILES_FIXED: + print("βœ… Fixed tab characters in the following files:") + for f in FILES_FIXED: + print(f" - {f}") + else: + print("βœ… No tabs found in any .yml files.") + +if __name__ == "__main__": + main() diff --git a/build/lib/cli/fix/vars_main_files.py b/build/lib/cli/fix/vars_main_files.py new file mode 100644 index 00000000..6a99892b --- /dev/null +++ b/build/lib/cli/fix/vars_main_files.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python3 +""" +Script to ensure each Ansible role under ../roles/ with a given prefix has a vars/main.yml +containing the correct application_id. Can preview actions or overwrite mismatches. +""" +import argparse +import sys +import yaml +from pathlib import Path + +# Directory containing roles; can be overridden by tests +MODULE_DIR = Path(__file__).resolve().parent +ROLES_DIR = (MODULE_DIR.parent.parent / "roles").resolve() + +def process_role(role_dir: Path, prefix: str, preview: bool, overwrite: bool): + name = role_dir.name + if not name.startswith(prefix): + return + # Expected application_id is role name minus prefix + expected_id = name[len(prefix):] + vars_dir = role_dir / "vars" + vars_file = vars_dir / "main.yml" + if vars_file.exists(): + # Load existing variables + try: + existing = yaml.safe_load(vars_file.read_text()) or {} + except yaml.YAMLError as e: + print(f"Error parsing YAML in {vars_file}: {e}", file=sys.stderr) + return + actual_id = existing.get("application_id") + if actual_id == expected_id: + # Already correct + return + if overwrite: + # Update only application_id + existing["application_id"] = expected_id + if preview: + print(f"[PREVIEW] Would update {vars_file}: application_id -> {expected_id}") + else: + with open(vars_file, "w") as f: + yaml.safe_dump(existing, f, default_flow_style=False, sort_keys=False) + print(f"Updated {vars_file}: application_id -> {expected_id}") + else: + print(f"Mismatch in {vars_file}: application_id='{actual_id}', expected='{expected_id}'") + else: + # Create new vars/main.yml + if preview: + print(f"[PREVIEW] Would create {vars_file} with application_id: {expected_id}") + else: + vars_dir.mkdir(parents=True, exist_ok=True) + content = {"application_id": expected_id} + with open(vars_file, "w") as f: + yaml.safe_dump(content, f, default_flow_style=False, sort_keys=False) + print(f"Created {vars_file} with application_id: {expected_id}") + + +def run(prefix: str, preview: bool = False, overwrite: bool = False): + """ + Ensure vars/main.yml for roles under ROLES_DIR with the given prefix has correct application_id. + """ + for role in sorted(Path(ROLES_DIR).iterdir()): + if role.is_dir(): + process_role(role, prefix, preview, overwrite) + + +def main(): + parser = argparse.ArgumentParser( + description="Ensure vars/main.yml for roles with a given prefix has correct application_id" + ) + parser.add_argument( + "--prefix", required=True, + help="Role name prefix to filter (e.g. 'web-', 'svc-', 'desk-')" + ) + parser.add_argument( + "--preview", action="store_true", + help="Show what would be done without making changes" + ) + parser.add_argument( + "--overwrite", action="store_true", + help="If vars/main.yml exists but application_id mismatches, overwrite only that key" + ) + args = parser.parse_args() + + # Run processing + run(prefix=args.prefix, preview=args.preview, overwrite=args.overwrite) + + +if __name__ == "__main__": + main() diff --git a/build/lib/cli/integration/deploy_localhost.py b/build/lib/cli/integration/deploy_localhost.py new file mode 100644 index 00000000..55866bda --- /dev/null +++ b/build/lib/cli/integration/deploy_localhost.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 +""" +Run the full localhost integration flow entirely inside the infinito Docker container, +without writing any artifacts to the host filesystem. +Catches missing schema/config errors during credential vaulting and skips those apps. +""" +import subprocess +import os +import sys + +def main(): + repo = os.path.abspath(os.getcwd()) + + bash_script = ''' +set -e + +ART=/integration-artifacts +mkdir -p "$ART" +echo testpassword > "$ART/vaultpw.txt" + +# 1) Generate inventory +python3 -m cli.build.inventory.full \ + --host localhost \ + --inventory-style hostvars \ + --format yaml \ + --output "$ART/inventory.yml" + +# 2) Credentials per-app +apps=$(python3 <&1) || rc=$?; rc=${rc:-0} + + if [ "$rc" -eq 0 ]; then + echo "βœ… Credentials generated for $app" + elif echo "$output" | grep -q "No such file or directory"; then + echo "⚠️ Skipping $app (no schema/config)" + elif echo "$output" | grep -q "Plain algorithm for"; then + # Collect all plain-algo keys + keys=( $(echo "$output" | grep -oP "Plain algorithm for '\K[^']+") ) + overrides=() + for key in "${keys[@]}"; do + if [[ "$key" == *api_key ]]; then + val=$(python3 - << 'PY' +import random, string +print(''.join(random.choices(string.ascii_letters+string.digits, k=32))) +PY +) + elif [[ "$key" == *password ]]; then + val=$(python3 - << 'PY' +import random, string +print(''.join(random.choices(string.ascii_letters+string.digits, k=12))) +PY +) + else + val=$(python3 - << 'PY' +import random, string +print(''.join(random.choices(string.ascii_letters+string.digits, k=16))) +PY +) + fi + echo " β†’ Overriding $key=$val" + overrides+=("--set" "$key=$val") + done + # Retry with overrides + echo "πŸ”„ Retrying with overrides..." + retry_out=$(python3 -m cli.create.credentials \ + --role-path "/repo/roles/$app" \ + --inventory-file "$ART/inventory.yml" \ + --vault-password-file "$ART/vaultpw.txt" \ + "${overrides[@]}" \ + --force 2>&1) || retry_rc=$?; retry_rc=${retry_rc:-0} + if [ "$retry_rc" -eq 0 ]; then + echo "βœ… Credentials generated for $app (with overrides)" + else + echo "❌ Override failed for $app:" + echo "$retry_out" + fi + else + echo "❌ Credential error for $app:" + echo "$output" + fi +done + +# 3) Show generated files +ls -R "$ART" 2>/dev/null + +echo " +===== inventory.yml =====" +cat "$ART/inventory.yml" + +echo " +===== vaultpw.txt =====" +cat "$ART/vaultpw.txt" + +# 4) Deploy +python3 -m cli.deploy \ + "$ART/inventory.yml" \ + --limit localhost \ + --vault-password-file "$ART/vaultpw.txt" \ + --verbose +''' + + cmd = [ + "docker", "run", "--rm", + "-v", f"{repo}:/repo", + "-w", "/repo", + "--entrypoint", "bash", + "infinito:latest", + "-c", bash_script + ] + print(f"\033[96m> {' '.join(cmd)}\033[0m") + rc = subprocess.call(cmd) + sys.exit(rc) + +if __name__ == '__main__': + main() diff --git a/build/lib/cli/make.py b/build/lib/cli/make.py new file mode 100644 index 00000000..b9c19525 --- /dev/null +++ b/build/lib/cli/make.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +""" +CLI wrapper for Makefile targets within Infinito.Nexus. +Invokes `make` commands in the project root directory. +""" +import argparse +import os +import subprocess +import sys + + +def main(): + parser = argparse.ArgumentParser( + prog='infinito make', + description='Run Makefile targets for Infinito.Nexus project' + ) + parser.add_argument( + 'targets', + nargs=argparse.REMAINDER, + help='Make targets and options to pass to `make`' + ) + args = parser.parse_args() + + # Default to 'build' if no target is specified + make_args = args.targets or ['build'] + + # Determine repository root (one level up from cli/) + script_dir = os.path.dirname(os.path.realpath(__file__)) + repo_root = os.path.abspath(os.path.join(script_dir, os.pardir)) + + # Check for Makefile + makefile_path = os.path.join(repo_root, 'Makefile') + if not os.path.isfile(makefile_path): + print(f"Error: Makefile not found in {repo_root}", file=sys.stderr) + sys.exit(1) + + # Invoke make in repo root + cmd = ['make'] + make_args + try: + result = subprocess.run(cmd, cwd=repo_root) + sys.exit(result.returncode) + except FileNotFoundError: + print("Error: 'make' command not found. Please install make.", file=sys.stderr) + sys.exit(1) + except KeyboardInterrupt: + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/build/lib/cli/meta/__init__.py b/build/lib/cli/meta/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/build/lib/cli/meta/applications/__init__.py b/build/lib/cli/meta/applications/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/build/lib/cli/meta/applications/all.py b/build/lib/cli/meta/applications/all.py new file mode 100644 index 00000000..58292ee2 --- /dev/null +++ b/build/lib/cli/meta/applications/all.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +# cli/meta/applications/all.py + +import argparse +import sys + +# Import the Ansible filter implementation +try: + from filter_plugins.get_all_application_ids import get_all_application_ids +except ImportError: + sys.stderr.write("Filter plugin `get_all_application_ids` not found. Ensure `filter_plugins/get_all_application_ids.py` is in your PYTHONPATH.\n") + sys.exit(1) + + +def find_application_ids(): + """ + Legacy function retained for reference. + Delegates to the `get_all_application_ids` filter plugin. + """ + return get_all_application_ids() + + +def main(): + parser = argparse.ArgumentParser( + description='Output a list of all application_id values defined in roles/*/vars/main.yml' + ) + parser.parse_args() + + try: + ids = find_application_ids() + except Exception as e: + sys.stderr.write(f"Error retrieving application IDs: {e}\n") + sys.exit(1) + + for app_id in ids: + print(app_id) + + +if __name__ == '__main__': + main() diff --git a/build/lib/cli/meta/applications/in_group_deps.py b/build/lib/cli/meta/applications/in_group_deps.py new file mode 100644 index 00000000..bc7c5143 --- /dev/null +++ b/build/lib/cli/meta/applications/in_group_deps.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python3 +""" +CLI wrapper for applications_if_group_and_deps filter. +""" +import argparse +import sys +import os +import yaml +from filter_plugins.applications_if_group_and_deps import FilterModule + + +def find_role_dirs_by_app_id(app_ids, roles_dir): + """ + Map application_ids to role directory names based on vars/main.yml in each role. + """ + mapping = {} + for role in os.listdir(roles_dir): + role_path = os.path.join(roles_dir, role) + vars_file = os.path.join(role_path, 'vars', 'main.yml') + if not os.path.isfile(vars_file): + continue + try: + with open(vars_file) as f: + data = yaml.safe_load(f) or {} + except Exception: + continue + app_id = data.get('application_id') + if isinstance(app_id, str) and app_id: + mapping[app_id] = role + # Translate each requested app_id to role dir if exists + dirs = [] + for gid in app_ids: + if gid in mapping: + dirs.append(mapping[gid]) + else: + # keep original if it matches a directory + if os.path.isdir(os.path.join(roles_dir, gid)): + dirs.append(gid) + return dirs + + +def main(): + parser = argparse.ArgumentParser( + description="Filter applications by group names (role dirs or application_ids) and their recursive role dependencies." + ) + parser.add_argument( + "-a", "--applications", + type=str, + required=True, + help="Path to YAML file defining the applications dict." + ) + parser.add_argument( + "-g", "--groups", + nargs='+', + required=True, + help="List of group names to filter by (role directory names or application_ids)." + ) + args = parser.parse_args() + + # Load applications + try: + with open(args.applications) as f: + data = yaml.safe_load(f) + except Exception as e: + print(f"Error loading applications file: {e}", file=sys.stderr) + sys.exit(1) + + # Unwrap under 'applications' key if present + if isinstance(data, dict) and 'applications' in data and isinstance(data['applications'], dict): + applications = data['applications'] + else: + applications = data + + if not isinstance(applications, dict): + print( + f"Expected applications YAML to contain a mapping (or 'applications' mapping), got {type(applications).__name__}", + file=sys.stderr + ) + sys.exit(1) + + # Determine roles_dir relative to project root + script_dir = os.path.dirname(__file__) + project_root = os.path.abspath(os.path.join(script_dir, '..', '..', '..')) + roles_dir = os.path.join(project_root, 'roles') + + # Map user-provided groups (which may be application_ids) to role directory names + group_dirs = find_role_dirs_by_app_id(args.groups, roles_dir) + if not group_dirs: + print(f"No matching role directories found for groups: {args.groups}", file=sys.stderr) + sys.exit(1) + + # Run filter using role directory names + try: + filtered = FilterModule().applications_if_group_and_deps( + applications, + group_dirs + ) + except Exception as e: + print(f"Error running filter: {e}", file=sys.stderr) + sys.exit(1) + + # Output result as YAML + print(yaml.safe_dump(filtered, default_flow_style=False)) + + +if __name__ == '__main__': + main() diff --git a/build/lib/cli/meta/applications/invokable.py b/build/lib/cli/meta/applications/invokable.py new file mode 100644 index 00000000..fdfc2915 --- /dev/null +++ b/build/lib/cli/meta/applications/invokable.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 +# cli/meta/applications/invokable.py + +import argparse +import sys +import os + +# Import filter plugin for get_all_invokable_apps +try: + from filter_plugins.get_all_invokable_apps import get_all_invokable_apps +except ImportError: + # Try to adjust sys.path if running outside Ansible + sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) + try: + from filter_plugins.get_all_invokable_apps import get_all_invokable_apps + except ImportError: + sys.stderr.write("Could not import filter_plugins.get_all_invokable_apps. Check your PYTHONPATH.\n") + sys.exit(1) + +def main(): + parser = argparse.ArgumentParser( + description='List all invokable applications (application_ids) based on invokable paths from categories.yml and available roles.' + ) + parser.add_argument( + '-c', '--categories-file', + default=os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'roles', 'categories.yml')), + help='Path to roles/categories.yml (default: roles/categories.yml at project root)' + ) + parser.add_argument( + '-r', '--roles-dir', + default=os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'roles')), + help='Path to roles/ directory (default: roles/ at project root)' + ) + args = parser.parse_args() + + try: + result = get_all_invokable_apps( + categories_file=args.categories_file, + roles_dir=args.roles_dir + ) + except Exception as e: + sys.stderr.write(f"Error: {e}\n") + sys.exit(1) + + for app_id in result: + print(app_id) + +if __name__ == '__main__': + main() diff --git a/build/lib/cli/meta/applications/role_name.py b/build/lib/cli/meta/applications/role_name.py new file mode 100644 index 00000000..3d23825d --- /dev/null +++ b/build/lib/cli/meta/applications/role_name.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python3 +""" +CLI Script: get_role_folder_cli.py + +This script determines the appropriate Ansible role folder based on the provided application_id +by inspecting each role's vars/main.yml within the roles directory. By default, it assumes the +roles directory is located at the project root, relative to this script's location. + +""" +import os +import sys +import argparse +import yaml + + +def get_role(application_id, roles_path): + """ + Find the role directory under `roles_path` whose vars/main.yml contains the specified application_id. + + :param application_id: The application_id to match. + :param roles_path: Path to the roles directory. + :return: The name of the matching role directory. + :raises RuntimeError: If no match is found or if an error occurs while reading files. + """ + if not os.path.isdir(roles_path): + raise RuntimeError(f"Roles path not found: {roles_path}") + + for role in sorted(os.listdir(roles_path)): + role_dir = os.path.join(roles_path, role) + vars_file = os.path.join(role_dir, 'vars', 'main.yml') + if os.path.isfile(vars_file): + try: + with open(vars_file, 'r') as f: + data = yaml.safe_load(f) or {} + except Exception as e: + raise RuntimeError(f"Failed to load {vars_file}: {e}") + + if data.get('application_id') == application_id: + return role + + raise RuntimeError(f"No role found with application_id '{application_id}' in {roles_path}") + + +def main(): + parser = argparse.ArgumentParser( + description='Determine the Ansible role folder by application_id' + ) + parser.add_argument( + 'application_id', + help='The application_id defined in vars/main.yml to search for' + ) + parser.add_argument( + '-r', '--roles-path', + default=os.path.join( + os.path.dirname(os.path.realpath(__file__)), + os.pardir, os.pardir, os.pardir, + 'roles' + ), + help='Path to the roles directory (default: roles/ at project root)' + ) + + args = parser.parse_args() + + try: + folder = get_role(args.application_id, args.roles_path) + print(folder) + sys.exit(0) + except RuntimeError as err: + print(f"Error: {err}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/build/lib/cli/meta/categories/invokable.py b/build/lib/cli/meta/categories/invokable.py new file mode 100644 index 00000000..8d0d199c --- /dev/null +++ b/build/lib/cli/meta/categories/invokable.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python3 +""" +CLI for extracting invokable or non-invokable role paths from a nested roles YAML file using argparse. +Assumes a default roles file at the project root if none is provided. +""" + +import os +import sys + +# ─── Determine project root ─── +if "__file__" in globals(): + project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +else: + project_root = os.getcwd() + +# Ensure project root on PYTHONPATH so 'filter_plugins' can be imported +sys.path.insert(0, project_root) + +import argparse +import yaml +from filter_plugins.invokable_paths import get_invokable_paths, get_non_invokable_paths + + +def main(): + parser = argparse.ArgumentParser( + description="Extract invokable or non-invokable role paths from a nested roles YAML file." + ) + parser.add_argument( + "roles_file", + nargs='?', + default=None, + help="Path to the roles YAML file (default: roles/categories.yml at project root)" + ) + parser.add_argument( + "--suffix", "-s", + help="Optional suffix to append to each path.", + default=None + ) + + mode_group = parser.add_mutually_exclusive_group() + mode_group.add_argument( + "--non-invokable", "-n", + action='store_true', + help="List paths where 'invokable' is False or not set." + ) + mode_group.add_argument( + "--invokable", "-i", + action='store_true', + help="List paths where 'invokable' is True. (default behavior)" + ) + + args = parser.parse_args() + + # Default to invokable if neither flag is provided + list_non = args.non_invokable + list_inv = args.invokable or not (args.non_invokable or args.invokable) + + try: + if list_non: + paths = get_non_invokable_paths(args.roles_file, args.suffix) + else: + paths = get_invokable_paths(args.roles_file, args.suffix) + except FileNotFoundError as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + except yaml.YAMLError as e: + print(f"Error parsing YAML: {e}", file=sys.stderr) + sys.exit(1) + except ValueError as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + for p in paths: + print(p) + + +if __name__ == "__main__": + main() diff --git a/build/lib/cli/meta/j2/__init__.py b/build/lib/cli/meta/j2/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/build/lib/cli/meta/j2/compiler.py b/build/lib/cli/meta/j2/compiler.py new file mode 100644 index 00000000..e3030f8f --- /dev/null +++ b/build/lib/cli/meta/j2/compiler.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python3 +import argparse +import os +import re +import sys + +# Projekt-Root: vier Ebenen ΓΌber diesem File +PROJECT_ROOT = os.path.dirname( + os.path.dirname( + os.path.dirname( + os.path.dirname(__file__) + ) + ) +) + +INCLUDE_RE = re.compile(r"^(\s*)\{%\s*include\s*['\"]([^'\"]+)['\"]\s*%\}") + +def expand_includes(rel_path, seen=None): + """ + Liest die Datei rel_path (relative zum PROJECT_ROOT), + ersetzt rekursiv alle "{% include 'path' %}"-Zeilen durch den + Inhalt der jeweiligen Datei (mit gleicher EinrΓΌckung). + """ + if seen is None: + seen = set() + rp = rel_path.replace("\\", "/") + if rp in seen: + raise RuntimeError(f"Circular include detected: {rp}") + seen.add(rp) + + abs_path = os.path.join(PROJECT_ROOT, rp) + if not os.path.isfile(abs_path): + raise FileNotFoundError(f"Template not found: {rp}") + + output_lines = [] + for line in open(abs_path, encoding="utf-8"): + m = INCLUDE_RE.match(line) + if not m: + output_lines.append(line.rstrip("\n")) + else: + indent, inc_rel = m.group(1), m.group(2) + # rekursiver Aufruf + for inc_line in expand_includes(inc_rel, seen): + output_lines.append(indent + inc_line) + seen.remove(rp) + return output_lines + +def parse_args(): + p = argparse.ArgumentParser( + description="Expand all {% include '...' %} directives in a Jinja2 template (no variable rendering)." + ) + p.add_argument("template", help="Template path relative to project root") + p.add_argument( + "--out", + help="If given, write output to this file instead of stdout", + default=None + ) + return p.parse_args() + +def main(): + args = parse_args() + + try: + lines = expand_includes(args.template) + text = "\n".join(lines) + if args.out: + with open(args.out, "w", encoding="utf-8") as f: + f.write(text + "\n") + else: + print(text) + except Exception as e: + sys.stderr.write(f"Error: {e}\n") + sys.exit(1) + +if __name__ == "__main__": + main() diff --git a/build/lib/cli/setup/__init__.py b/build/lib/cli/setup/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/build/lib/cli/setup/applications.py b/build/lib/cli/setup/applications.py new file mode 100644 index 00000000..98d2ab6d --- /dev/null +++ b/build/lib/cli/setup/applications.py @@ -0,0 +1,212 @@ +#!/usr/bin/env python3 +import argparse +import yaml +import sys +import time +from pathlib import Path + +# Ensure project root on PYTHONPATH so module_utils is importable +repo_root = Path(__file__).resolve().parent.parent.parent +sys.path.insert(0, str(repo_root)) + +# Add lookup_plugins for application_gid +plugin_path = repo_root / "lookup_plugins" +sys.path.insert(0, str(plugin_path)) + +from module_utils.dict_renderer import DictRenderer +from application_gid import LookupModule + +def load_yaml_file(path: Path) -> dict: + if not path.exists(): + return {} + with path.open("r", encoding="utf-8") as f: + return yaml.safe_load(f) or {} + +class DefaultsGenerator: + def __init__(self, roles_dir: Path, output_file: Path, verbose: bool, timeout: float): + self.roles_dir = roles_dir + self.output_file = output_file + self.verbose = verbose + self.renderer = DictRenderer(verbose=verbose, timeout=timeout) + self.gid_lookup = LookupModule() + + def log(self, message: str): + if self.verbose: + print(f"[DefaultsGenerator] {message}") + + def run(self): + result = {"defaults_applications": {}} + + for role_dir in sorted(self.roles_dir.iterdir()): + role_name = role_dir.name + vars_main = role_dir / "vars" / "main.yml" + config_file = role_dir / "config" / "main.yml" + + if not vars_main.exists(): + self.log(f"Skipping {role_name}: vars/main.yml missing") + continue + + vars_data = load_yaml_file(vars_main) + application_id = vars_data.get("application_id") + if not application_id: + self.log(f"Skipping {role_name}: application_id not defined") + continue + + if not config_file.exists(): + self.log(f"Config missing for {role_name}, adding empty defaults for '{application_id}'") + result["defaults_applications"][application_id] = {} + continue + + config_data = load_yaml_file(config_file) + if not config_data: + # Empty or null config β†’ still register the application with empty defaults + self.log(f"Empty config for {role_name}, adding empty defaults for '{application_id}'") + result["defaults_applications"][application_id] = {} + continue + + # Existing non-empty config: keep current behavior + try: + gid_number = self.gid_lookup.run([application_id], roles_dir=str(self.roles_dir))[0] + except Exception as e: + print(f"Warning: failed to determine gid for '{application_id}': {e}", file=sys.stderr) + sys.exit(1) + + config_data["group_id"] = gid_number + result["defaults_applications"][application_id] = config_data + + # Inject users mapping as Jinja2 references (unchanged) + users_meta = load_yaml_file(role_dir / "users" / "main.yml") + users_data = users_meta.get("users", {}) + transformed = {user: f"{{{{ users[\"{user}\"] }}}}" for user in users_data} + if transformed: + result["defaults_applications"][application_id]["users"] = transformed + + # Render placeholders in entire result context + self.log("Starting placeholder rendering...") + try: + result = self.renderer.render(result) + except Exception as e: + print(f"Error during rendering: {e}", file=sys.stderr) + sys.exit(1) + + # Sort applications by application key for stable output + apps = result.get("defaults_applications", {}) + if isinstance(apps, dict) and apps: + result["defaults_applications"] = { + k: apps[k] for k in sorted(apps.keys()) + } + + # Write output + self.output_file.parent.mkdir(parents=True, exist_ok=True) + with self.output_file.open("w", encoding="utf-8") as f: + yaml.dump(result, f, sort_keys=False) + + # Print location of generated file (absolute if not under cwd) + try: + rel = self.output_file.relative_to(Path.cwd()) + except ValueError: + rel = self.output_file + print(f"βœ… Generated: {rel}") + + def test_empty_config_mapping_adds_empty_defaults(self): + """ + If a role has vars/main.yml and config/main.yml exists but contains an + empty mapping ({}), the generator must still emit an empty-dict entry + for that application_id. + """ + role_empty_cfg = self.roles_dir / "role-empty-config" + (role_empty_cfg / "vars").mkdir(parents=True, exist_ok=True) + (role_empty_cfg / "config").mkdir(parents=True, exist_ok=True) + + # application_id is defined… + (role_empty_cfg / "vars" / "main.yml").write_text( + "application_id: emptycfg\n", + encoding="utf-8", + ) + # …but config is an explicit empty mapping + (role_empty_cfg / "config" / "main.yml").write_text( + "{}\n", + encoding="utf-8", + ) + + result = subprocess.run( + [ + "python3", + str(self.script_path), + "--roles-dir", + str(self.roles_dir), + "--output-file", + str(self.output_file), + ], + capture_output=True, + text=True, + ) + self.assertEqual(result.returncode, 0, msg=result.stderr) + + data = yaml.safe_load(self.output_file.read_text()) + apps = data.get("defaults_applications", {}) + + self.assertIn("emptycfg", apps) + self.assertEqual( + apps["emptycfg"], + {}, + msg="Role with {} config should produce an empty defaults mapping", + ) + + def test_empty_config_file_adds_empty_defaults(self): + """ + If a role has vars/main.yml and config/main.yml exists but is an empty + file (or only whitespace), the generator must still emit an empty-dict + entry for that application_id. + """ + role_empty_file = self.roles_dir / "role-empty-config-file" + (role_empty_file / "vars").mkdir(parents=True, exist_ok=True) + (role_empty_file / "config").mkdir(parents=True, exist_ok=True) + + (role_empty_file / "vars" / "main.yml").write_text( + "application_id: emptyfileapp\n", + encoding="utf-8", + ) + # Create an empty file (no YAML content at all) + (role_empty_file / "config" / "main.yml").write_text( + "", + encoding="utf-8", + ) + + result = subprocess.run( + [ + "python3", + str(self.script_path), + "--roles-dir", + str(self.roles_dir), + "--output-file", + str(self.output_file), + ], + capture_output=True, + text=True, + ) + self.assertEqual(result.returncode, 0, msg=result.stderr) + + data = yaml.safe_load(self.output_file.read_text()) + apps = data.get("defaults_applications", {}) + + self.assertIn("emptyfileapp", apps) + self.assertEqual( + apps["emptyfileapp"], + {}, + msg="Role with empty config file should produce an empty defaults mapping", + ) + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Generate defaults_applications YAML...") + parser.add_argument("--roles-dir", default="roles", help="Path to the roles directory") + parser.add_argument("--output-file", required=True, help="Path to output YAML file") + parser.add_argument("--verbose", action="store_true", help="Enable verbose logging") + parser.add_argument("--timeout", type=float, default=10.0, help="Timeout for rendering") + + args = parser.parse_args() + cwd = Path.cwd() + roles_dir = (cwd / args.roles_dir).resolve() + output_file = (cwd / args.output_file).resolve() + + DefaultsGenerator(roles_dir, output_file, args.verbose, args.timeout).run() diff --git a/build/lib/cli/setup/users.py b/build/lib/cli/setup/users.py new file mode 100644 index 00000000..cc7801d3 --- /dev/null +++ b/build/lib/cli/setup/users.py @@ -0,0 +1,253 @@ +#!/usr/bin/env python3 +import os +import sys +import argparse +import yaml +import glob +from collections import OrderedDict + + +def represent_str(dumper, data): + """ + Custom YAML string representer that forces double quotes around any string + containing a Jinja2 placeholder ({{ ... }}). + """ + if isinstance(data, str) and '{{' in data: + return dumper.represent_scalar( + 'tag:yaml.org,2002:str', + data, + style='"' + ) + return dumper.represent_scalar( + 'tag:yaml.org,2002:str', + data + ) + + +def build_users(defs, primary_domain, start_id, become_pwd): + """ + Construct user entries with auto-incremented UID/GID, default username/email, + and optional description. + + Args: + defs (OrderedDict): Mapping of user keys to their override settings. + primary_domain (str): The primary domain for email addresses (e.g. 'example.com'). + start_id (int): Starting number for UID/GID allocation (e.g. 1001). + become_pwd (str): Default password string for users without an override. + + Returns: + OrderedDict: Complete user definitions with all required fields filled in. + + Raises: + ValueError: If there are duplicate UIDs, usernames, or emails. + """ + users = OrderedDict() + used_uids = set() + + # Collect any preset UIDs to avoid collisions + for key, overrides in defs.items(): + if 'uid' in overrides: + uid = overrides['uid'] + if uid in used_uids: + raise ValueError(f"Duplicate uid {uid} for user '{key}'") + used_uids.add(uid) + + next_uid = start_id + def allocate_uid(): + nonlocal next_uid + # Find the next free UID not already used + while next_uid in used_uids: + next_uid += 1 + free_uid = next_uid + used_uids.add(free_uid) + next_uid += 1 + return free_uid + + # Build each user entry + for key, overrides in defs.items(): + username = overrides.get('username', key) + email = overrides.get('email', f"{username}@{primary_domain}") + description = overrides.get('description') + roles = overrides.get('roles', []) + password = overrides.get('password', become_pwd) + reserved = overrides.get('reserved', False) + + # Determine UID and GID + if 'uid' in overrides: + uid = overrides['uid'] + else: + uid = allocate_uid() + gid = overrides.get('gid', uid) + + entry = { + 'username': username, + 'email': email, + 'password': password, + 'uid': uid, + 'gid': gid, + 'roles': roles + } + if description is not None: + entry['description'] = description + + if reserved: + entry['reserved'] = reserved + + users[key] = entry + + # Ensure uniqueness of usernames and emails + seen_usernames = set() + seen_emails = set() + + for key, entry in users.items(): + un = entry['username'] + em = entry['email'] + if un in seen_usernames: + raise ValueError(f"Duplicate username '{un}' in merged users") + if em in seen_emails: + raise ValueError(f"Duplicate email '{em}' in merged users") + seen_usernames.add(un) + seen_emails.add(em) + + return users + + +def load_user_defs(roles_directory): + """ + Scan all roles/*/users/main.yml files and merge any 'users:' sections. + + Args: + roles_directory (str): Path to the directory containing role subdirectories. + + Returns: + OrderedDict: Merged user definitions from all roles. + + Raises: + ValueError: On invalid format or conflicting override values. + """ + pattern = os.path.join(roles_directory, '*/users/main.yml') + files = sorted(glob.glob(pattern)) + merged = OrderedDict() + + for filepath in files: + with open(filepath, 'r') as f: + data = yaml.safe_load(f) or {} + users = data.get('users', {}) + if not isinstance(users, dict): + continue + + for key, overrides in users.items(): + if not isinstance(overrides, dict): + raise ValueError(f"Invalid definition for user '{key}' in {filepath}") + + if key not in merged: + merged[key] = overrides.copy() + else: + existing = merged[key] + for field, value in overrides.items(): + if field in existing and existing[field] != value: + raise ValueError( + f"Conflict for user '{key}': field '{field}' has existing value '{existing[field]}', tried to set '{value}' in {filepath}" + ) + existing.update(overrides) + + return merged + + +def dictify(data): + """ + Recursively convert OrderedDict to regular dict for YAML dumping. + """ + if isinstance(data, OrderedDict): + return {k: dictify(v) for k, v in data.items()} + if isinstance(data, dict): + return {k: dictify(v) for k, v in data.items()} + if isinstance(data, list): + return [dictify(v) for v in data] + return data + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Generate a users.yml by merging all roles/*/users/main.yml definitions.' + ) + parser.add_argument( + '--roles-dir', '-r', required=True, + help='Directory containing roles (e.g., roles/*/users/main.yml).' + ) + parser.add_argument( + '--output', '-o', required=True, + help='Path to the output YAML file (e.g., users.yml).' + ) + parser.add_argument( + '--start-id', '-s', type=int, default=1001, + help='Starting UID/GID number (default: 1001).' + ) + parser.add_argument( + '--reserved-usernames', '-e', + help='Comma-separated list of usernames to reserve.', + default=None + ) + return parser.parse_args() + + +def main(): + args = parse_args() + primary_domain = '{{ SYSTEM_EMAIL.DOMAIN }}' + become_pwd = '{{ lookup("password", "/dev/null length=42 chars=ascii_letters,digits") }}' + + try: + definitions = load_user_defs(args.roles_dir) + except ValueError as e: + print(f"Error merging user definitions: {e}", file=sys.stderr) + sys.exit(1) + + # Add reserved/ users if specified + if args.reserved_usernames: + for name in args.reserved_usernames.split(','): + user_key = name.strip() + if not user_key: + continue + if user_key in definitions: + print( + f"Warning: reserved user '{user_key}' already defined; skipping (not changing existing definition).", + file=sys.stderr + ) + else: + definitions[user_key] = {} + # Mark user as reserved + definitions[user_key]["reserved"] = True + try: + users = build_users( + definitions, + primary_domain, + args.start_id, + become_pwd + ) + except ValueError as e: + print(f"Error building user entries: {e}", file=sys.stderr) + sys.exit(1) + + # Sort users by key for deterministic output + if isinstance(users, dict) and users: + users = OrderedDict(sorted(users.items())) + + # Convert OrderedDict into plain dict for YAML + default_users = {'default_users': users} + plain_data = dictify(default_users) + + # Register custom string representer + yaml.SafeDumper.add_representer(str, represent_str) + + # Dump the YAML file + with open(args.output, 'w') as f: + yaml.safe_dump( + plain_data, + f, + default_flow_style=False, + sort_keys=False, + width=120 + ) + +if __name__ == '__main__': + main() diff --git a/build/lib/cli/validate/__init__.py b/build/lib/cli/validate/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/build/lib/cli/validate/inventory.py b/build/lib/cli/validate/inventory.py new file mode 100644 index 00000000..bd67add0 --- /dev/null +++ b/build/lib/cli/validate/inventory.py @@ -0,0 +1,154 @@ +#!/usr/bin/env python3 +import argparse +import sys +import yaml +import re +from pathlib import Path + +# Ensure imports work when run directly +script_dir = Path(__file__).resolve().parent +repo_root = script_dir.parent.parent +sys.path.insert(0, str(repo_root)) + +from cli.meta.applications.all import find_application_ids + +def load_yaml_file(path): + try: + with open(path, 'r', encoding='utf-8') as f: + content = f.read() + content = re.sub(r'(?m)^([ \t]*[^\s:]+):\s*!vault[\s\S]+?(?=^\S|\Z)', r"\1: \"\"\n", content) + return yaml.safe_load(content) + except Exception as e: + print(f"Warning: Could not parse {path}: {e}", file=sys.stderr) + return None + + +def recursive_keys(d, prefix=''): + keys = set() + if isinstance(d, dict): + for k, v in d.items(): + full = f"{prefix}.{k}" if prefix else k + keys.add(full) + keys.update(recursive_keys(v, full)) + return keys + + +def compare_application_keys(applications, defaults, source): + errs = [] + for app_id, conf in applications.items(): + if app_id not in defaults: + errs.append(f"{source}: Unknown application '{app_id}' (not in defaults_applications)") + continue + default = defaults[app_id] + app_keys = recursive_keys(conf) + def_keys = recursive_keys(default) + for key in app_keys: + if key.startswith('credentials'): + continue + if key not in def_keys: + errs.append(f"{source}: Missing default for {app_id}: {key}") + return errs + + +def compare_user_keys(users, default_users, source): + errs = [] + for user, conf in users.items(): + if user not in default_users: + print(f"Warning: {source}: Unknown user '{user}' (not in default_users)", file=sys.stderr) + continue + def_conf = default_users[user] + for key in conf: + if key in ('password','credentials','mailu_token'): + continue + if key not in def_conf: + errs.append(f"Missing default for user '{user}': key '{key}'") + return errs + + +def load_inventory_files(inv_dir): + all_data = {} + p = Path(inv_dir) + for f in p.glob('*.yml'): + data = load_yaml_file(f) + if isinstance(data, dict): + apps = data.get('applications') or data.get('defaults_applications') + if apps: + all_data[str(f)] = apps + for d in p.glob('*_vars'): + if d.is_dir(): + for f in d.rglob('*.yml'): + data = load_yaml_file(f) + if isinstance(data, dict): + apps = data.get('applications') or data.get('defaults_applications') + if apps: + all_data[str(f)] = apps + return all_data + + +def validate_host_keys(app_ids, inv_dir): + errs = [] + p = Path(inv_dir) + # Scan all top-level YAMLs for 'all.children' + for f in p.glob('*.yml'): + data = load_yaml_file(f) + if not isinstance(data, dict): + continue + all_node = data.get('all', {}) + children = all_node.get('children') + if not isinstance(children, dict): + continue + for grp in children.keys(): + if grp not in app_ids: + errs.append(f"{f}: Invalid group '{grp}' (not in application_ids)") + return errs + + +def find_single_file(pattern): + c = list(Path('group_vars/all').glob(pattern)) + if len(c)!=1: + raise RuntimeError(f"Expected exactly one {pattern} in group_vars/all, found {len(c)}") + return c[0] + + +def main(): + p = argparse.ArgumentParser() + p.add_argument('inventory_dir') + args = p.parse_args() + # defaults + dfile = find_single_file('*_applications.yml') + ufile = find_single_file('*users.yml') + ddata = load_yaml_file(dfile) or {} + udata = load_yaml_file(ufile) or {} + defaults = ddata.get('defaults_applications',{}) + default_users = udata.get('default_users',{}) + if not defaults: + print(f"Error: No 'defaults_applications' found in {dfile}", file=sys.stderr) + sys.exit(1) + if not default_users: + print(f"Error: No 'default_users' found in {ufile}", file=sys.stderr) + sys.exit(1) + app_errs = [] + inv_files = load_inventory_files(args.inventory_dir) + for src, apps in inv_files.items(): + app_errs.extend(compare_application_keys(apps, defaults, src)) + user_errs = [] + for fpath in Path(args.inventory_dir).rglob('*.yml'): + data = load_yaml_file(fpath) + if isinstance(data, dict) and 'users' in data: + errs = compare_user_keys(data['users'], default_users, str(fpath)) + for e in errs: + print(e, file=sys.stderr) + user_errs.extend(errs) + host_errs = validate_host_keys(find_application_ids(), args.inventory_dir) + app_errs.extend(host_errs) + if app_errs or user_errs: + if app_errs: + print('Validation failed with the following issues:') + for e in app_errs: + print(f"- {e}") + sys.exit(1) + print('Inventory directory is valid against defaults and hosts.') + sys.exit(0) + +if __name__=='__main__': + main() diff --git a/build/lib/cli/vault.py b/build/lib/cli/vault.py new file mode 100644 index 00000000..383c6325 --- /dev/null +++ b/build/lib/cli/vault.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python3 + +import argparse +import subprocess + +def run_ansible_vault(action, filename, password_file): + cmd = ["ansible-vault", action, filename, "--vault-password-file", password_file] + subprocess.run(cmd, check=True) + +def main(): + parser = argparse.ArgumentParser(description="Manage Ansible Vault") + parser.add_argument("action", choices=["edit", "decrypt", "encrypt"], help="Vault action") + parser.add_argument("filename", help="File to process") + parser.add_argument("--password-file", required=True, help="Path to the Vault password file") + args = parser.parse_args() + + run_ansible_vault(args.action, args.filename, args.password_file) + +if __name__ == "__main__": + main() diff --git a/build/lib/filter_plugins/active_docker.py b/build/lib/filter_plugins/active_docker.py new file mode 100644 index 00000000..83dd2a36 --- /dev/null +++ b/build/lib/filter_plugins/active_docker.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +""" +Ansible filter to count active docker services for current host. + +Active means: +- application key is in group_names +- application key matches prefix regex (default: ^(web-|svc-).* ) +- under applications[app]['docker']['services'] each service is counted if: + - 'enabled' is True, OR + - 'enabled' is missing/undefined (treated as active) + +Returns an integer. If ensure_min_one=True, returns at least 1. +""" + +import re +from typing import Any, Dict, Mapping, Iterable + + +def _is_mapping(x: Any) -> bool: + # be liberal: Mapping covers dict-like; fallback to dict check + try: + return isinstance(x, Mapping) + except Exception: + return isinstance(x, dict) + + +def active_docker_container_count(applications: Mapping[str, Any], + group_names: Iterable[str], + prefix_regex: str = r'^(web-|svc-).*', + ensure_min_one: bool = False) -> int: + if not _is_mapping(applications): + return 1 if ensure_min_one else 0 + + group_set = set(group_names or []) + try: + pattern = re.compile(prefix_regex) + except re.error: + pattern = re.compile(r'^(web-|svc-).*') # fallback + + count = 0 + + for app_key, app_val in applications.items(): + # host selection + name prefix + if app_key not in group_set: + continue + if not pattern.match(str(app_key)): + continue + + docker = app_val.get('docker') if _is_mapping(app_val) else None + services = docker.get('services') if _is_mapping(docker) else None + if not _is_mapping(services): + # sometimes roles define a single service name string; ignore + continue + + for _svc_name, svc_cfg in services.items(): + if not _is_mapping(svc_cfg): + # allow shorthand like: service: {} or image string -> counts as enabled + count += 1 + continue + enabled = svc_cfg.get('enabled', True) + if isinstance(enabled, bool): + if enabled: + count += 1 + else: + # non-bool enabled -> treat "truthy" as enabled + if bool(enabled): + count += 1 + + if ensure_min_one and count < 1: + return 1 + return count + + +class FilterModule(object): + def filters(self): + return { + # usage: {{ applications | active_docker_container_count(group_names) }} + 'active_docker_container_count': active_docker_container_count, + } diff --git a/build/lib/filter_plugins/application_allowed.py b/build/lib/filter_plugins/application_allowed.py new file mode 100644 index 00000000..3a3f5d04 --- /dev/null +++ b/build/lib/filter_plugins/application_allowed.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python3 + +# Provides a filter to control which applications (roles) should be deployed + +from ansible.errors import AnsibleFilterError + + +def application_allowed(application_id: str, group_names: list, allowed_applications: list = []): + """ + Return True if: + - application_id exists in group_names, AND + - either allowed_applications is not provided (or empty), OR application_id is in allowed_applications. + + Parameters: + application_id (str): Name of the application/role to check. + group_names (list): List of groups the current host belongs to. + allowed_applications (list, optional): List of application IDs to allow. + + Returns: + bool: True if this application is allowed to deploy, False otherwise. + """ + # Ensure group_names is iterable + if not isinstance(group_names, (list, tuple)): + raise AnsibleFilterError(f"Expected group_names to be a list, str or tuple, got {type(group_names)}") + + # Must be part of the host's groups + if application_id not in group_names: + return False + + # If allowed_applications provided, only allow if ID is in that list + if allowed_applications: + if not isinstance(allowed_applications, (list, tuple, str)): + raise AnsibleFilterError(f"allowed_applications must be a list or tuple if provided, got {type(allowed_applications)}") + return application_id in allowed_applications + + # No filter provided β†’ allow all in group_names + return True + + +class FilterModule(object): + def filters(self): + return { + 'application_allowed': application_allowed, + } diff --git a/build/lib/filter_plugins/applications_if_group_and_deps.py b/build/lib/filter_plugins/applications_if_group_and_deps.py new file mode 100644 index 00000000..54572b5b --- /dev/null +++ b/build/lib/filter_plugins/applications_if_group_and_deps.py @@ -0,0 +1,102 @@ +from ansible.errors import AnsibleFilterError +import os +import yaml + + +class FilterModule(object): + def filters(self): + return { + 'applications_if_group_and_deps': self.applications_if_group_and_deps, + } + + def applications_if_group_and_deps(self, applications, group_names): + """ + Return only those applications whose key is either: + 1) directly in group_names, or + 2) the application_id of any role reachable (recursively) + from any group in group_names via meta/dependencies. + """ + self._validate_inputs(applications, group_names) + + roles_dir = self._get_roles_directory() + + included_roles = self._collect_reachable_roles(group_names, roles_dir) + included_app_ids = self._gather_application_ids(included_roles, roles_dir) + + return self._filter_applications(applications, group_names, included_app_ids) + + def _validate_inputs(self, applications, group_names): + """Validate the inputs for correct types.""" + if not isinstance(applications, dict): + raise AnsibleFilterError(f"Expected applications as dict, got {type(applications).__name__}") + if not isinstance(group_names, (list, tuple)): + raise AnsibleFilterError(f"Expected group_names as list/tuple, got {type(group_names).__name__}") + + def _get_roles_directory(self): + """Locate and return the roles directory.""" + plugin_dir = os.path.dirname(__file__) + project_root = os.path.abspath(os.path.join(plugin_dir, '..')) + return os.path.join(project_root, 'roles') + + def _collect_reachable_roles(self, group_names, roles_dir): + """Recursively collect all roles reachable from the given groups via meta/dependencies.""" + included_roles = set() + for group in group_names: + self._collect_roles_from_group(group, included_roles, roles_dir) + return included_roles + + def _collect_roles_from_group(self, group, seen, roles_dir): + """Recursively collect roles from a specific group.""" + if group in seen: + return + seen.add(group) + + meta_file = os.path.join(roles_dir, group, 'meta', 'main.yml') + if not os.path.isfile(meta_file): + return + + try: + with open(meta_file) as f: + meta = yaml.safe_load(f) or {} + except Exception: + return + + for dep in meta.get('dependencies', []): + dep_name = self._get_dependency_name(dep) + if dep_name: + self._collect_roles_from_group(dep_name, seen, roles_dir) + + def _get_dependency_name(self, dependency): + """Extract the dependency role name from the meta data.""" + if isinstance(dependency, str): + return dependency + elif isinstance(dependency, dict): + return dependency.get('role') or dependency.get('name') + return None + + def _gather_application_ids(self, included_roles, roles_dir): + """Gather application_ids from the roles.""" + included_app_ids = set() + for role in included_roles: + vars_file = os.path.join(roles_dir, role, 'vars', 'main.yml') + if not os.path.isfile(vars_file): + continue + try: + with open(vars_file) as f: + vars_data = yaml.safe_load(f) or {} + except Exception: + continue + + app_id = vars_data.get('application_id') + if isinstance(app_id, str) and app_id: + included_app_ids.add(app_id) + + return included_app_ids + + def _filter_applications(self, applications, group_names, included_app_ids): + """Filter and return the applications that match the conditions.""" + result = {} + for app_key, cfg in applications.items(): + if app_key in group_names or app_key in included_app_ids: + result[app_key] = cfg + return result diff --git a/build/lib/filter_plugins/canonical_domains_map.py b/build/lib/filter_plugins/canonical_domains_map.py new file mode 100644 index 00000000..19041e9c --- /dev/null +++ b/build/lib/filter_plugins/canonical_domains_map.py @@ -0,0 +1,116 @@ +from ansible.errors import AnsibleFilterError +import sys +import os + +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) +from module_utils.entity_name_utils import get_entity_name +from module_utils.role_dependency_resolver import RoleDependencyResolver +from typing import Iterable + + +class FilterModule(object): + def filters(self): + return {'canonical_domains_map': self.canonical_domains_map} + + def canonical_domains_map( + self, + apps, + PRIMARY_DOMAIN, + *, + recursive: bool = False, + roles_base_dir: str | None = None, + seed: Iterable[str] | None = None, + ): + """ + Build { app_id: [canonical domains...] }. + + Rekursiv werden nur include_role, import_role und meta/main.yml:dependencies verfolgt. + 'run_after' wird hier absichtlich ignoriert. + """ + if not isinstance(apps, dict): + raise AnsibleFilterError(f"'apps' must be a dict, got {type(apps).__name__}") + + app_keys = set(apps.keys()) + seed_keys = set(seed) if seed is not None else app_keys + + if recursive: + roles_base_dir = roles_base_dir or os.path.join(os.getcwd(), "roles") + if not os.path.isdir(roles_base_dir): + raise AnsibleFilterError( + f"roles_base_dir '{roles_base_dir}' not found or not a directory." + ) + + resolver = RoleDependencyResolver(roles_base_dir) + discovered_roles = resolver.resolve_transitively( + start_roles=seed_keys, + resolve_include_role=True, + resolve_import_role=True, + resolve_dependencies=True, + resolve_run_after=False, + max_depth=None, + ) + # all discovered roles that actually have config entries in `apps` + target_apps = discovered_roles & app_keys + else: + target_apps = seed_keys + + result = {} + seen_domains = {} + + for app_id in sorted(target_apps): + cfg = apps.get(app_id) + if cfg is None: + continue + if not str(app_id).startswith(("web-", "svc-db-")): + continue + if not isinstance(cfg, dict): + raise AnsibleFilterError( + f"Invalid configuration for application '{app_id}': expected dict, got {cfg!r}" + ) + + domains_cfg = cfg.get('server', {}).get('domains', {}) + if not domains_cfg or 'canonical' not in domains_cfg: + self._add_default_domain(app_id, PRIMARY_DOMAIN, seen_domains, result) + continue + + canonical_domains = domains_cfg['canonical'] + self._process_canonical_domains(app_id, canonical_domains, seen_domains, result) + + return result + + def _add_default_domain(self, app_id, PRIMARY_DOMAIN, seen_domains, result): + entity_name = get_entity_name(app_id) + default_domain = f"{entity_name}.{PRIMARY_DOMAIN}" + if default_domain in seen_domains: + raise AnsibleFilterError( + f"Domain '{default_domain}' is already configured for " + f"'{seen_domains[default_domain]}' and '{app_id}'" + ) + seen_domains[default_domain] = app_id + result[app_id] = [default_domain] + + def _process_canonical_domains(self, app_id, canonical_domains, seen_domains, result): + if isinstance(canonical_domains, dict): + for _, domain in canonical_domains.items(): + self._validate_and_check_domain(app_id, domain, seen_domains) + result[app_id] = canonical_domains.copy() + elif isinstance(canonical_domains, list): + for domain in canonical_domains: + self._validate_and_check_domain(app_id, domain, seen_domains) + result[app_id] = list(canonical_domains) + else: + raise AnsibleFilterError( + f"Unexpected type for 'server.domains.canonical' in application '{app_id}': " + f"{type(canonical_domains).__name__}" + ) + + def _validate_and_check_domain(self, app_id, domain, seen_domains): + if not isinstance(domain, str) or not domain.strip(): + raise AnsibleFilterError( + f"Invalid domain entry in 'canonical' for application '{app_id}': {domain!r}" + ) + if domain in seen_domains: + raise AnsibleFilterError( + f"Domain '{domain}' is already configured for '{seen_domains[domain]}' and '{app_id}'" + ) + seen_domains[domain] = app_id diff --git a/build/lib/filter_plugins/csp_filters.py b/build/lib/filter_plugins/csp_filters.py new file mode 100644 index 00000000..e5c8f355 --- /dev/null +++ b/build/lib/filter_plugins/csp_filters.py @@ -0,0 +1,335 @@ +from ansible.errors import AnsibleFilterError +import hashlib +import base64 +import sys +import os + +# Ensure module_utils is importable when this filter runs from Ansible +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) +from module_utils.config_utils import get_app_conf +from module_utils.get_url import get_url + + +def _dedup_preserve(seq): + """Return a list with stable order and unique items.""" + seen = set() + out = [] + for x in seq: + if x not in seen: + seen.add(x) + out.append(x) + return out + +def _sort_tokens(tokens): + """ + Return a deterministically ordered list of CSP tokens. + - de-duplicates while preserving relative order + - then sorts lexicographically + - keeps 'self' as the first token if present + """ + uniq = _dedup_preserve(tokens) + if not uniq: + return uniq + + # Lexicographically sort all tokens + uniq = sorted(uniq) + + # Ensure "'self'" is always first if present + if "'self'" in uniq: + uniq.remove("'self'") + uniq.insert(0, "'self'") + + return uniq + +class FilterModule(object): + """ + Jinja filters for building a robust, CSP3-aware Content-Security-Policy header. + Safari/CSP2 compatibility is ensured by merging the -elem/-attr variants into the base + directives (style-src, script-src). We intentionally do NOT mirror back into -elem/-attr + to allow true CSP3 granularity on modern browsers. + """ + + def filters(self): + return { + 'build_csp_header': self.build_csp_header, + } + + # ------------------------------- + # Helpers + # ------------------------------- + + @staticmethod + def is_feature_enabled(applications: dict, feature: str, application_id: str) -> bool: + """ + Returns True if applications[application_id].features[feature] is truthy. + """ + return get_app_conf( + applications, + application_id, + 'features.' + feature, + False, + False + ) + + @staticmethod + def get_csp_whitelist(applications, application_id, directive): + """ + Returns a list of additional whitelist entries for a given directive. + Accepts both scalar and list in config; always returns a list. + """ + wl = get_app_conf( + applications, + application_id, + 'server.csp.whitelist.' + directive, + False, + [] + ) + if isinstance(wl, list): + return wl + if wl: + return [wl] + return [] + + @staticmethod + def get_csp_flags(applications, application_id, directive): + """ + Returns CSP flag tokens (e.g., "'unsafe-eval'", "'unsafe-inline'") for a directive, + merging sane defaults with app config. + + Defaults: + - For styles we enable 'unsafe-inline' by default (style-src, style-src-elem, style-src-attr), + because many apps rely on inline styles / style attributes. + - For scripts we do NOT enable 'unsafe-inline' by default. + """ + default_flags = {} + if directive in ('style-src', 'style-src-elem', 'style-src-attr'): + default_flags = {'unsafe-inline': True} + + configured = get_app_conf( + applications, + application_id, + 'server.csp.flags.' + directive, + False, + {} + ) + + merged = {**default_flags, **configured} + + tokens = [] + for flag_name, enabled in merged.items(): + if enabled: + tokens.append(f"'{flag_name}'") + return tokens + + @staticmethod + def get_csp_inline_content(applications, application_id, directive): + """ + Returns inline script/style snippets to hash for a given directive. + Accepts both scalar and list in config; always returns a list. + """ + snippets = get_app_conf( + applications, + application_id, + 'server.csp.hashes.' + directive, + False, + [] + ) + if isinstance(snippets, list): + return snippets + if snippets: + return [snippets] + return [] + + @staticmethod + def get_csp_hash(content): + """ + Computes the SHA256 hash of the given inline content and returns + a CSP token like "'sha256-'". + """ + try: + digest = hashlib.sha256(content.encode('utf-8')).digest() + b64 = base64.b64encode(digest).decode('utf-8') + return f"'sha256-{b64}'" + except Exception as exc: + raise AnsibleFilterError(f"get_csp_hash failed: {exc}") + + # ------------------------------- + # Main builder + # ------------------------------- + + def build_csp_header( + self, + applications, + application_id, + domains, + web_protocol='https', + matomo_feature_name='matomo' + ): + """ + Builds the Content-Security-Policy header value dynamically based on application settings. + + Key points: + - CSP3-aware: supports base/elem/attr for styles and scripts. + - Safari/CSP2 fallback: base directives (style-src, script-src) always include + the union of their -elem/-attr variants. + - We do NOT mirror back into -elem/-attr; finer CSP3 rules remain effective + on modern browsers if you choose to use them. + - If the app explicitly disables a token on the *base* (e.g. style-src.unsafe-inline: false), + that token is removed from the merged base even if present in elem/attr. + - Inline hashes are added ONLY if that directive does NOT include 'unsafe-inline'. + - Whitelists/flags/hashes read from: + server.csp.whitelist. + server.csp.flags. + server.csp.hashes. + - β€œSmart defaults”: + * internal CDN for style/script elem and connect + * Matomo endpoints (if feature enabled) for script-elem/connect + * Simpleicons (if feature enabled) for connect + * reCAPTCHA (if feature enabled) for script-elem/frame-src + * frame-ancestors extended for desktop/logout/keycloak if enabled + """ + try: + directives = [ + 'default-src', + 'connect-src', + 'frame-ancestors', + 'frame-src', + 'script-src', + 'script-src-elem', + 'script-src-attr', + 'style-src', + 'style-src-elem', + 'style-src-attr', + 'font-src', + 'worker-src', + 'manifest-src', + 'media-src', + ] + + tokens_by_dir = {} + explicit_flags_by_dir = {} + + for directive in directives: + # Collect explicit flags (to later respect explicit "False" on base during merge) + explicit_flags = get_app_conf( + applications, + application_id, + 'server.csp.flags.' + directive, + False, + {} + ) + explicit_flags_by_dir[directive] = explicit_flags + + tokens = ["'self'"] + + # Flags (with sane defaults) + flags = self.get_csp_flags(applications, application_id, directive) + tokens += flags + + # Internal CDN defaults for selected directives + if directive in ('script-src-elem', 'connect-src', 'style-src-elem', 'style-src'): + tokens.append(get_url(domains, 'web-svc-cdn', web_protocol)) + + # Matomo (if enabled) + if directive in ('script-src-elem', 'connect-src'): + if self.is_feature_enabled(applications, matomo_feature_name, application_id): + tokens.append(get_url(domains, 'web-app-matomo', web_protocol)) + + # Simpleicons (if enabled) – typically used via connect-src (fetch) + if directive == 'connect-src': + if self.is_feature_enabled(applications, 'simpleicons', application_id): + tokens.append(get_url(domains, 'web-svc-simpleicons', web_protocol)) + + # reCAPTCHA (if enabled) – scripts + frames + if self.is_feature_enabled(applications, 'recaptcha', application_id): + if directive in ('script-src-elem', 'frame-src'): + tokens.append('https://www.gstatic.com') + tokens.append('https://www.google.com') + + # hCaptcha (if enabled) – scripts + frames + if self.is_feature_enabled(applications, 'hcaptcha', application_id): + if directive in ('script-src-elem'): + tokens.append('https://www.hcaptcha.com') + tokens.append('https://js.hcaptcha.com') + if directive in ('frame-src'): + tokens.append('https://newassets.hcaptcha.com/') + + # Frame ancestors (desktop + logout) + if directive == 'frame-ancestors': + if self.is_feature_enabled(applications, 'desktop', application_id): + # Allow being embedded by the desktop app domain's site + domain = domains.get('web-app-desktop')[0] + sld_tld = ".".join(domain.split(".")[-2:]) # e.g., example.com + tokens.append(f"{sld_tld}") + if self.is_feature_enabled(applications, 'logout', application_id): + tokens.append(get_url(domains, 'web-svc-logout', web_protocol)) + tokens.append(get_url(domains, 'web-app-keycloak', web_protocol)) + + # Logout support requires inline handlers (script-src-attr) + if directive in ('script-src-attr','script-src-elem'): + if self.is_feature_enabled(applications, 'logout', application_id): + tokens.append("'unsafe-inline'") + + + # Custom whitelist + tokens += self.get_csp_whitelist(applications, application_id, directive) + + # Inline hashes (only if this directive does NOT include 'unsafe-inline') + if "'unsafe-inline'" not in tokens: + for snippet in self.get_csp_inline_content(applications, application_id, directive): + tokens.append(self.get_csp_hash(snippet)) + + tokens_by_dir[directive] = _dedup_preserve(tokens) + + # ---------------------------------------------------------- + # CSP3 families β†’ ensure CSP2 fallback (Safari-safe) + # Merge style/script families so base contains union of elem/attr. + # Respect explicit disables on the base (e.g. unsafe-inline=False). + # Do NOT mirror back into elem/attr (keep granularity). + # ---------------------------------------------------------- + def _strip_if_disabled(unioned_tokens, explicit_flags, name): + """ + Remove a token (e.g. 'unsafe-inline') from the unioned token list + if it is explicitly disabled in the base directive flags. + """ + if isinstance(explicit_flags, dict) and explicit_flags.get(name) is False: + tok = f"'{name}'" + return [t for t in unioned_tokens if t != tok] + return unioned_tokens + + def merge_family(base_key, elem_key, attr_key): + base = tokens_by_dir.get(base_key, []) + elem = tokens_by_dir.get(elem_key, []) + attr = tokens_by_dir.get(attr_key, []) + union = _dedup_preserve(base + elem + attr) + + # Respect explicit disables on the base + explicit_base = explicit_flags_by_dir.get(base_key, {}) + # The most relevant flags for script/style: + for flag_name in ('unsafe-inline', 'unsafe-eval'): + union = _strip_if_disabled(union, explicit_base, flag_name) + + tokens_by_dir[base_key] = union # write back only to base + + merge_family('style-src', 'style-src-elem', 'style-src-attr') + merge_family('script-src', 'script-src-elem', 'script-src-attr') + + # ---------------------------------------------------------- + # Assemble header + # ---------------------------------------------------------- + # Sort tokens per directive for deterministic output + for directive, toks in list(tokens_by_dir.items()): + tokens_by_dir[directive] = _sort_tokens(toks) + + parts = [] + for directive in directives: + if directive in tokens_by_dir: + parts.append(f"{directive} {' '.join(tokens_by_dir[directive])};") + + # Keep permissive img-src for data/blob + any host (as before) + parts.append("img-src * data: blob:;") + + return ' '.join(parts) + + except Exception as exc: + raise AnsibleFilterError(f"build_csp_header failed: {exc}") diff --git a/build/lib/filter_plugins/csp_hashes.py b/build/lib/filter_plugins/csp_hashes.py new file mode 100644 index 00000000..47f44a68 --- /dev/null +++ b/build/lib/filter_plugins/csp_hashes.py @@ -0,0 +1,31 @@ +from ansible.errors import AnsibleFilterError +import copy + +def append_csp_hash(applications, application_id, code_one_liner): + """ + Ensures that applications[application_id].csp.hashes['script-src-elem'] + exists and appends the given one-liner (if not already present). + """ + if not isinstance(applications, dict): + raise AnsibleFilterError("`applications` must be a dict") + if application_id not in applications: + raise AnsibleFilterError(f"Unknown application_id: {application_id}") + + apps = copy.deepcopy(applications) + app = apps[application_id] + server = app.setdefault('server', {}) + csp = server.setdefault('csp', {}) + hashes = csp.setdefault('hashes', {}) + + existing = hashes.get('script-src-elem', []) + if code_one_liner not in existing: + existing.append(code_one_liner) + hashes['script-src-elem'] = existing + + return apps + +class FilterModule(object): + def filters(self): + return { + 'append_csp_hash': append_csp_hash + } diff --git a/build/lib/filter_plugins/docker_service_enabled.py b/build/lib/filter_plugins/docker_service_enabled.py new file mode 100644 index 00000000..460727c7 --- /dev/null +++ b/build/lib/filter_plugins/docker_service_enabled.py @@ -0,0 +1,25 @@ +class FilterModule(object): + ''' Custom filter to safely check if a docker service is enabled for an application_id ''' + + def filters(self): + return { + 'is_docker_service_enabled': self.is_docker_service_enabled + } + + @staticmethod + def is_docker_service_enabled(applications, application_id, service_name): + """ + Returns True if applications[application_id].docker.services[service_name].enabled is truthy, + otherwise returns False (even if intermediate keys are missing). + """ + try: + return bool( + applications + and application_id in applications + and applications[application_id].get('docker', {}) + .get('services', {}) + .get(service_name, {}) + .get('enabled', False) + ) + except Exception: + return False diff --git a/build/lib/filter_plugins/domain_redirect_mappings.py b/build/lib/filter_plugins/domain_redirect_mappings.py new file mode 100644 index 00000000..09008b76 --- /dev/null +++ b/build/lib/filter_plugins/domain_redirect_mappings.py @@ -0,0 +1,97 @@ +from ansible.errors import AnsibleFilterError +import sys, os +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) +from module_utils.entity_name_utils import get_entity_name + +class FilterModule(object): + def filters(self): + return {'domain_mappings': self.domain_mappings} + + def domain_mappings(self, apps, primary_domain, auto_build_alias): + """ + Build a flat list of redirect mappings for all apps: + - source: each alias domain + - target: the first canonical domain + Skip mappings where source == target, since they make no sense. + """ + def parse_entry(domains_cfg, key, app_id): + if key not in domains_cfg: + return None + entry = domains_cfg[key] + if isinstance(entry, dict): + values = list(entry.values()) + elif isinstance(entry, list): + values = entry + else: + raise AnsibleFilterError( + f"Unexpected type for 'domains.{key}' in application '{app_id}': {type(entry).__name__}" + ) + for d in values: + if not isinstance(d, str) or not d.strip(): + raise AnsibleFilterError( + f"Invalid domain entry in '{key}' for application '{app_id}': {d!r}" + ) + return values + + def default_domain(app_id:str, primary:str): + subdomain = get_entity_name(app_id) + return f"{subdomain}.{primary}" + + # 1) Compute canonical domains per app (always as a list) + canonical_map = {} + for app_id, cfg in apps.items(): + domains_cfg = cfg.get('server',{}).get('domains',{}) + entry = domains_cfg.get('canonical') + if entry is None: + canonical_map[app_id] = [default_domain(app_id, primary_domain)] + elif isinstance(entry, dict): + canonical_map[app_id] = list(entry.values()) + elif isinstance(entry, list): + canonical_map[app_id] = list(entry) + else: + raise AnsibleFilterError( + f"Unexpected type for 'server.domains.canonical' in application '{app_id}': {type(entry).__name__}" + ) + + # 2) Compute alias domains per app + alias_map = {} + for app_id, cfg in apps.items(): + domains_cfg = cfg.get('server',{}).get('domains',{}) + if domains_cfg is None: + alias_map[app_id] = [] + continue + if isinstance(domains_cfg, dict) and not domains_cfg: + alias_map[app_id] = [default_domain(app_id, primary_domain)] + continue + + aliases = parse_entry(domains_cfg, 'aliases', app_id) or [] + default = default_domain(app_id, primary_domain) + has_aliases = 'aliases' in domains_cfg + has_canonical = 'canonical' in domains_cfg + + if has_aliases: + if default not in aliases: + aliases.append(default) + elif has_canonical: + canon = canonical_map.get(app_id, []) + if default not in canon and default not in aliases and auto_build_alias: + aliases.append(default) + + alias_map[app_id] = aliases + + # 3) Build flat list of {source, target} entries, + # skipping self-mappings + mappings = [] + for app_id, sources in alias_map.items(): + canon_list = canonical_map.get(app_id, []) + target = canon_list[0] if canon_list else default_domain(app_id, primary_domain) + for src in sources: + if src == target: + # skip self-redirects + continue + mappings.append({ + 'source': src, + 'target': target + }) + + return mappings diff --git a/build/lib/filter_plugins/domain_tools.py b/build/lib/filter_plugins/domain_tools.py new file mode 100644 index 00000000..ec55f814 --- /dev/null +++ b/build/lib/filter_plugins/domain_tools.py @@ -0,0 +1,19 @@ +# filter_plugins/domain_tools.py +# Returns the DNS zone (SLD.TLD) from a hostname. +# Pure-Python, no external deps; handles simple cases. For exotic TLDs use tldextract (see note). +from ansible.errors import AnsibleFilterError + +def to_zone(hostname: str) -> str: + if not isinstance(hostname, str) or not hostname.strip(): + raise AnsibleFilterError("to_zone: hostname must be a non-empty string") + parts = hostname.strip(".").split(".") + if len(parts) < 2: + raise AnsibleFilterError(f"to_zone: '{hostname}' has no TLD part") + # naive default: last two labels -> SLD.TLD + return ".".join(parts[-2:]) + +class FilterModule(object): + def filters(self): + return { + "to_zone": to_zone, + } diff --git a/build/lib/filter_plugins/generate_all_domains.py b/build/lib/filter_plugins/generate_all_domains.py new file mode 100644 index 00000000..41d6b894 --- /dev/null +++ b/build/lib/filter_plugins/generate_all_domains.py @@ -0,0 +1,31 @@ +from ansible.errors import AnsibleFilterError + +class FilterModule(object): + def filters(self): + return {'generate_all_domains': self.generate_all_domains} + + def generate_all_domains(self, domains_dict, include_www:bool=True): + """ + Transform a dict of domains (values: str, list, dict) into a flat list, + optionally add 'www.' prefixes, dedupe and sort alphabetically. + """ + # lokaler Helfer zum Flatten + def _flatten(domains): + flat = [] + for v in (domains or {}).values(): + if isinstance(v, str): + flat.append(v) + elif isinstance(v, list): + flat.extend(v) + elif isinstance(v, dict): + flat.extend(v.values()) + return flat + + try: + flat = _flatten(domains_dict) + if include_www: + original = list(flat) + flat.extend([f"www.{d}" for d in original]) + return sorted(set(flat)) + except Exception as exc: + raise AnsibleFilterError(f"generate_all_domains failed: {exc}") diff --git a/build/lib/filter_plugins/generate_base_sld_domains.py b/build/lib/filter_plugins/generate_base_sld_domains.py new file mode 100644 index 00000000..cc2253ef --- /dev/null +++ b/build/lib/filter_plugins/generate_base_sld_domains.py @@ -0,0 +1,44 @@ +import re +from ansible.errors import AnsibleFilterError + +class FilterModule(object): + def filters(self): + return {'generate_base_sld_domains': self.generate_base_sld_domains} + + def generate_base_sld_domains(self, domains_list): + """ + Given a list of hostnames, extract the second-level domain (SLD.TLD) for any hostname + with two or more labels, return single-label hostnames as-is, and reject IPs, + empty or malformed strings, and non-strings. Deduplicate and sort. + """ + if not isinstance(domains_list, list): + raise AnsibleFilterError( + f"generate_base_sld_domains expected a list, got {type(domains_list).__name__}" + ) + + ip_pattern = re.compile(r'^\d{1,3}(?:\.\d{1,3}){3}$') + results = set() + + for hostname in domains_list: + # type check + if not isinstance(hostname, str): + raise AnsibleFilterError(f"Invalid domain entry (not a string): {hostname!r}") + + # malformed or empty + if not hostname or hostname.startswith('.') or hostname.endswith('.') or '..' in hostname: + raise AnsibleFilterError(f"Invalid domain entry (malformed): {hostname!r}") + + # IP addresses disallowed + if ip_pattern.match(hostname): + raise AnsibleFilterError(f"IP addresses not allowed: {hostname!r}") + + # single-label hostnames + labels = hostname.split('.') + if len(labels) == 1: + results.add(hostname) + else: + # always keep only the last two labels (SLD.TLD) + sld = ".".join(labels[-2:]) + results.add(sld) + + return sorted(results) \ No newline at end of file diff --git a/build/lib/filter_plugins/get_all_application_ids.py b/build/lib/filter_plugins/get_all_application_ids.py new file mode 100644 index 00000000..e498d95b --- /dev/null +++ b/build/lib/filter_plugins/get_all_application_ids.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +# filter_plugins/get_all_application_ids.py + +import glob +import os +import yaml + + +def get_all_application_ids(roles_dir='roles'): + """ + Ansible filter to retrieve all unique application_id values + defined in roles/*/vars/main.yml files. + + :param roles_dir: Base directory for Ansible roles (default: 'roles') + :return: Sorted list of unique application_id strings + """ + pattern = os.path.join(roles_dir, '*', 'vars', 'main.yml') + app_ids = [] + + for filepath in glob.glob(pattern): + try: + with open(filepath, 'r', encoding='utf-8') as f: + data = yaml.safe_load(f) + except Exception: + continue + + if isinstance(data, dict) and 'application_id' in data: + app_ids.append(data['application_id']) + + return sorted(set(app_ids)) + + +class FilterModule(object): + """ + Ansible filter plugin for retrieving application IDs. + """ + def filters(self): + return { + 'get_all_application_ids': get_all_application_ids + } diff --git a/build/lib/filter_plugins/get_all_invokable_apps.py b/build/lib/filter_plugins/get_all_invokable_apps.py new file mode 100644 index 00000000..58eea296 --- /dev/null +++ b/build/lib/filter_plugins/get_all_invokable_apps.py @@ -0,0 +1,54 @@ +import os +import yaml + +def get_all_invokable_apps( + categories_file=None, + roles_dir=None +): + """ + Return all application_ids (or role names) for roles whose directory names match invokable paths from categories.yml. + :param categories_file: Path to categories.yml (default: roles/categories.yml at project root) + :param roles_dir: Path to roles directory (default: roles/ at project root) + :return: List of application_ids (or role names) + """ + # Resolve defaults + here = os.path.dirname(os.path.abspath(__file__)) + project_root = os.path.abspath(os.path.join(here, '..')) + if not categories_file: + categories_file = os.path.join(project_root, 'roles', 'categories.yml') + if not roles_dir: + roles_dir = os.path.join(project_root, 'roles') + + # Get invokable paths + from filter_plugins.invokable_paths import get_invokable_paths + invokable_paths = get_invokable_paths(categories_file) + if not invokable_paths: + return [] + + result = [] + if not os.path.isdir(roles_dir): + return [] + + for role in sorted(os.listdir(roles_dir)): + role_path = os.path.join(roles_dir, role) + if not os.path.isdir(role_path): + continue + if any(role == p or role.startswith(p + '-') for p in invokable_paths): + vars_file = os.path.join(role_path, 'vars', 'main.yml') + if os.path.isfile(vars_file): + try: + with open(vars_file, 'r', encoding='utf-8') as f: + data = yaml.safe_load(f) or {} + app_id = data.get('application_id', role) + except Exception: + app_id = role + else: + app_id = role + result.append(app_id) + return sorted(result) + +class FilterModule(object): + def filters(self): + return { + 'get_all_invokable_apps': get_all_invokable_apps + } diff --git a/build/lib/filter_plugins/get_app_conf.py b/build/lib/filter_plugins/get_app_conf.py new file mode 100644 index 00000000..fb4e6392 --- /dev/null +++ b/build/lib/filter_plugins/get_app_conf.py @@ -0,0 +1,10 @@ +import sys, os +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) +from module_utils.config_utils import get_app_conf, AppConfigKeyError,ConfigEntryNotSetError + +class FilterModule(object): + ''' Infinito.Nexus application config extraction filters ''' + def filters(self): + return { + 'get_app_conf': get_app_conf, + } diff --git a/build/lib/filter_plugins/get_category_entries.py b/build/lib/filter_plugins/get_category_entries.py new file mode 100644 index 00000000..77f9cd55 --- /dev/null +++ b/build/lib/filter_plugins/get_category_entries.py @@ -0,0 +1,31 @@ +# Custom Ansible filter to get all role names under "roles/" with a given prefix. + +import os + +def get_category_entries(prefix, roles_path="roles"): + """ + Returns a list of role names under the given roles_path + that start with the specified prefix. + + :param prefix: String prefix to match role names. + :param roles_path: Path to the roles directory (default: 'roles'). + :return: List of matching role names. + """ + if not os.path.isdir(roles_path): + return [] + + roles = [] + for entry in os.listdir(roles_path): + full_path = os.path.join(roles_path, entry) + if os.path.isdir(full_path) and entry.startswith(prefix): + roles.append(entry) + + return sorted(roles) + +class FilterModule(object): + """ Custom filters for Ansible """ + + def filters(self): + return { + "get_category_entries": get_category_entries + } diff --git a/build/lib/filter_plugins/get_docker_image.py b/build/lib/filter_plugins/get_docker_image.py new file mode 100644 index 00000000..74ae5165 --- /dev/null +++ b/build/lib/filter_plugins/get_docker_image.py @@ -0,0 +1,19 @@ +def get_docker_image(applications, application_id, image_key:str=None): + image_key = image_key if image_key else application_id + docker = applications.get(application_id, {}).get("docker", {}) + version = docker.get("versions", {}).get(image_key) + image = docker.get("images", {}).get(image_key) + + if not image: + raise ValueError(f"Missing image for {application_id}:{image_key}") + + if not version: + raise ValueError(f"Missing version for {application_id}:{image_key}") + + return f"{image}:{version}" + +class FilterModule(object): + def filters(self): + return { + 'get_docker_image': get_docker_image, + } diff --git a/build/lib/filter_plugins/get_docker_paths.py b/build/lib/filter_plugins/get_docker_paths.py new file mode 100644 index 00000000..87467d1c --- /dev/null +++ b/build/lib/filter_plugins/get_docker_paths.py @@ -0,0 +1,34 @@ +import sys, os +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) +from module_utils.entity_name_utils import get_entity_name + +def get_docker_paths(application_id: str, path_docker_compose_instances: str) -> dict: + """ + Build the docker_compose dict based on + path_docker_compose_instances and application_id. + Uses get_entity_name to extract the entity name from application_id. + """ + entity = get_entity_name(application_id) + base = f"{path_docker_compose_instances}{entity}/" + + return { + 'directories': { + 'instance': base, + 'env': f"{base}.env/", + 'services': f"{base}services/", + 'volumes': f"{base}volumes/", + 'config': f"{base}config/", + }, + 'files': { + 'env': f"{base}.env/env", + 'docker_compose': f"{base}docker-compose.yml", + 'docker_compose_override': f"{base}docker-compose.override.yml", + 'dockerfile': f"{base}Dockerfile", + } + } + +class FilterModule(object): + def filters(self): + return { + 'get_docker_paths': get_docker_paths, + } diff --git a/build/lib/filter_plugins/get_domain.py b/build/lib/filter_plugins/get_domain.py new file mode 100644 index 00000000..be435346 --- /dev/null +++ b/build/lib/filter_plugins/get_domain.py @@ -0,0 +1,19 @@ +#!/usr/bin/python +import os +import sys +from ansible.errors import AnsibleFilterError + +class FilterModule(object): + def filters(self): + plugin_dir = os.path.dirname(__file__) + project_root = os.path.dirname(plugin_dir) + module_utils = os.path.join(project_root, 'module_utils') + if module_utils not in sys.path: + sys.path.append(module_utils) + + try: + from domain_utils import get_domain + except ImportError as e: + raise AnsibleFilterError(f"could not import domain_utils: {e}") + + return {'get_domain': get_domain} diff --git a/build/lib/filter_plugins/get_entity_name.py b/build/lib/filter_plugins/get_entity_name.py new file mode 100644 index 00000000..088d3560 --- /dev/null +++ b/build/lib/filter_plugins/get_entity_name.py @@ -0,0 +1,9 @@ +import sys, os +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) +from module_utils.entity_name_utils import get_entity_name + +class FilterModule(object): + def filters(self): + return { + 'get_entity_name': get_entity_name, + } diff --git a/build/lib/filter_plugins/get_role.py b/build/lib/filter_plugins/get_role.py new file mode 100644 index 00000000..1849a544 --- /dev/null +++ b/build/lib/filter_plugins/get_role.py @@ -0,0 +1,48 @@ +''' +Ansible filter plugin: get_role + +This filter inspects each role under the given roles directory, loads its vars/main.yml, +and returns the role folder name whose application_id matches the provided value. +''' + +from ansible.errors import AnsibleFilterError +import os +import yaml + + +def get_role(application_id, roles_path='roles'): + """ + Find the role directory under `roles_path` whose vars/main.yml contains the given application_id. + + :param application_id: The application_id to match. + :param roles_path: Path to the roles directory (default: 'roles'). + :return: The name of the matching role directory. + :raises AnsibleFilterError: If vars file is unreadable or no match is found. + """ + if not os.path.isdir(roles_path): + raise AnsibleFilterError(f"Roles path not found: {roles_path}") + + for role in os.listdir(roles_path): + role_dir = os.path.join(roles_path, role) + vars_file = os.path.join(role_dir, 'vars', 'main.yml') + if os.path.isfile(vars_file): + try: + with open(vars_file, 'r') as f: + data = yaml.safe_load(f) or {} + except Exception as e: + raise AnsibleFilterError(f"Failed to load {vars_file}: {e}") + + if data.get('application_id') == application_id: + return role + + raise AnsibleFilterError(f"No role found with application_id '{application_id}' in {roles_path}") + + +class FilterModule(object): + """ + Register the get_role filter + """ + def filters(self): + return { + 'get_role': get_role, + } diff --git a/build/lib/filter_plugins/get_service_name.py b/build/lib/filter_plugins/get_service_name.py new file mode 100644 index 00000000..2eeafd23 --- /dev/null +++ b/build/lib/filter_plugins/get_service_name.py @@ -0,0 +1,37 @@ +""" +Custom Ansible filter to build a systemctl unit name (always lowercase). + +Rules: +- If `systemctl_id` ends with '@': drop the '@' and return + "{systemctl_id_without_at}.{software_name}@{suffix_handling}". +- Else: return "{systemctl_id}.{software_name}{suffix_handling}". + +Suffix handling: +- Default "" β†’ automatically pick: + - ".service" if no '@' in systemctl_id + - ".timer" if '@' in systemctl_id +- Explicit False β†’ no suffix at all +- Any string β†’ ".{suffix}" (lowercased) +""" + +def get_service_name(systemctl_id, software_name, suffix=""): + sid = str(systemctl_id).strip().lower() + software_name = str(software_name).strip().lower() + + # Determine suffix + if suffix is False: + sfx = "" # no suffix at all + elif suffix == "" or suffix is None: + sfx = ".service" + else: + sfx = str(suffix).strip().lower() + + if sid.endswith("@"): + base = sid[:-1] # drop the trailing '@' + return f"{base}.{software_name}@{sfx}" + else: + return f"{sid}.{software_name}{sfx}" + +class FilterModule(object): + def filters(self): + return {"get_service_name": get_service_name} diff --git a/build/lib/filter_plugins/get_service_script_path.py b/build/lib/filter_plugins/get_service_script_path.py new file mode 100644 index 00000000..b091df1d --- /dev/null +++ b/build/lib/filter_plugins/get_service_script_path.py @@ -0,0 +1,24 @@ +# filter_plugins/get_service_script_path.py +# Custom Ansible filter to generate service script paths. + +def get_service_script_path(systemctl_id, script_type): + """ + Build the path to a service script based on systemctl_id and type. + + :param systemctl_id: The identifier of the system service. + :param script_type: The script type/extension (e.g., sh, py, yml). + :return: The full path string. + """ + if not systemctl_id or not script_type: + raise ValueError("Both systemctl_id and script_type are required") + + return f"/opt/scripts/systemctl/{systemctl_id}/script.{script_type}" + + +class FilterModule(object): + """ Custom filters for Ansible """ + + def filters(self): + return { + "get_service_script_path": get_service_script_path + } diff --git a/build/lib/filter_plugins/get_url.py b/build/lib/filter_plugins/get_url.py new file mode 100644 index 00000000..3322d4f6 --- /dev/null +++ b/build/lib/filter_plugins/get_url.py @@ -0,0 +1,11 @@ +#!/usr/bin/python +import sys, os +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) +from module_utils.get_url import get_url + +class FilterModule(object): + ''' Infinito.Nexus application config extraction filters ''' + def filters(self): + return { + 'get_url': get_url, + } diff --git a/build/lib/filter_plugins/has_env.py b/build/lib/filter_plugins/has_env.py new file mode 100644 index 00000000..3a93b298 --- /dev/null +++ b/build/lib/filter_plugins/has_env.py @@ -0,0 +1,14 @@ +import os + +def has_env(application_id, base_dir='.'): + """ + Check if env.j2 exists under roles/{{ application_id }}/templates/env.j2 + """ + path = os.path.join(base_dir, 'roles', application_id, 'templates', 'env.j2') + return os.path.isfile(path) + +class FilterModule(object): + def filters(self): + return { + 'has_env': has_env, + } diff --git a/build/lib/filter_plugins/invokable_paths.py b/build/lib/filter_plugins/invokable_paths.py new file mode 100644 index 00000000..f8662edf --- /dev/null +++ b/build/lib/filter_plugins/invokable_paths.py @@ -0,0 +1,113 @@ +import os +import yaml +from typing import Dict, List, Optional + +def get_invokable_paths( + roles_file: Optional[str] = None, + suffix: Optional[str] = None +) -> List[str]: + """ + Load nested roles YAML and return dash-joined paths where 'invokable' is True. Appends suffix if provided. + """ + if not roles_file: + script_dir = os.path.dirname(os.path.abspath(__file__)) + project_root = os.path.dirname(script_dir) + roles_file = os.path.join(project_root, 'roles', 'categories.yml') + + try: + with open(roles_file, 'r') as f: + data = yaml.safe_load(f) or {} + except FileNotFoundError: + raise FileNotFoundError(f"Roles file not found: {roles_file}") + except yaml.YAMLError as e: + raise yaml.YAMLError(f"Error parsing YAML {roles_file}: {e}") + + if not isinstance(data, dict): + raise ValueError("YAML root is not a dictionary") + + roles = data + if 'roles' in roles and isinstance(roles['roles'], dict) and len(roles) == 1: + roles = roles['roles'] + + def _recurse(subroles: Dict[str, dict], parent: List[str] = None) -> List[str]: + parent = parent or [] + found: List[str] = [] + METADATA = {'title', 'description', 'icon', 'invokable'} + + for key, cfg in subroles.items(): + path = parent + [key] + if cfg.get('invokable', False): + p = '-'.join(path) + if suffix: + p += suffix + found.append(p) + + children = { + ck: cv for ck, cv in cfg.items() + if ck not in METADATA and isinstance(cv, dict) + } + if children: + found.extend(_recurse(children, path)) + return found + + return _recurse(roles) + + +def get_non_invokable_paths( + roles_file: Optional[str] = None, + suffix: Optional[str] = None +) -> List[str]: + """ + Load nested roles YAML and return dash-joined paths where 'invokable' is False or missing. + Appends suffix if provided. + """ + if not roles_file: + script_dir = os.path.dirname(os.path.abspath(__file__)) + project_root = os.path.dirname(script_dir) + roles_file = os.path.join(project_root, 'roles', 'categories.yml') + + try: + with open(roles_file, 'r') as f: + data = yaml.safe_load(f) or {} + except FileNotFoundError: + raise FileNotFoundError(f"Roles file not found: {roles_file}") + except yaml.YAMLError as e: + raise yaml.YAMLError(f"Error parsing YAML {roles_file}: {e}") + + if not isinstance(data, dict): + raise ValueError("YAML root is not a dictionary") + + roles = data + if 'roles' in roles and isinstance(roles['roles'], dict) and len(roles) == 1: + roles = roles['roles'] + + def _recurse_non(subroles: Dict[str, dict], parent: List[str] = None) -> List[str]: + parent = parent or [] + found: List[str] = [] + METADATA = {'title', 'description', 'icon', 'invokable'} + + for key, cfg in subroles.items(): + path = parent + [key] + p = '-'.join(path) + inv = cfg.get('invokable', False) + if not inv: + entry = p + (suffix or "") + found.append(entry) + + children = { + ck: cv for ck, cv in cfg.items() + if ck not in METADATA and isinstance(cv, dict) + } + if children: + found.extend(_recurse_non(children, path)) + return found + + return _recurse_non(roles) + + +class FilterModule: + def filters(self): + return { + 'invokable_paths': get_invokable_paths, + 'non_invokable_paths': get_non_invokable_paths + } diff --git a/build/lib/filter_plugins/memory_filters.py b/build/lib/filter_plugins/memory_filters.py new file mode 100644 index 00000000..5c14eca2 --- /dev/null +++ b/build/lib/filter_plugins/memory_filters.py @@ -0,0 +1,179 @@ +from __future__ import annotations + +import sys, os, re +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) + +from ansible.errors import AnsibleFilterError +from module_utils.config_utils import get_app_conf +from module_utils.entity_name_utils import get_entity_name + +# Regex and unit conversion table +_UNIT_RE = re.compile(r'^\s*(\d+(?:\.\d+)?)\s*([kKmMgGtT]?[bB]?)?\s*$') +_FACTORS = { + '': 1, 'b': 1, + 'k': 1024, 'kb': 1024, + 'm': 1024**2, 'mb': 1024**2, + 'g': 1024**3, 'gb': 1024**3, + 't': 1024**4, 'tb': 1024**4, +} + +# ------------------------------------------------------ +# Helpers: unit conversion +# ------------------------------------------------------ + +def _to_bytes(v: str) -> int: + """Convert a human-readable size string (e.g., '2g', '512m') to bytes.""" + if v is None: + raise AnsibleFilterError("memory_filters: size value is None") + + s = str(v).strip() + m = _UNIT_RE.match(s) + if not m: + raise AnsibleFilterError(f"memory_filters: invalid size '{v}'") + + num, unit = m.group(1), (m.group(2) or '').lower() + + try: + val = float(num) + except ValueError as e: + raise AnsibleFilterError(f"memory_filters: invalid numeric size '{v}'") from e + + factor = _FACTORS.get(unit) + if factor is None: + raise AnsibleFilterError(f"memory_filters: unknown unit in '{v}'") + + return int(val * factor) + + +def _to_mb(v: str) -> int: + """Convert human-readable size to megabytes.""" + return max(0, _to_bytes(v) // (1024 * 1024)) + + +# ------------------------------------------------------ +# JVM-specific helpers +# ------------------------------------------------------ + +def _svc(app_id: str) -> str: + """Resolve the internal service name for JVM-based applications.""" + return get_entity_name(app_id) + + +def _mem_limit_mb(apps: dict, app_id: str) -> int: + """Resolve mem_limit for the JVM service of the given application.""" + svc = _svc(app_id) + raw = get_app_conf(apps, app_id, f"docker.services.{svc}.mem_limit") + mb = _to_mb(raw) + + if mb <= 0: + raise AnsibleFilterError( + f"memory_filters: mem_limit for '{svc}' must be > 0 MB (got '{raw}')" + ) + return mb + + +def _mem_res_mb(apps: dict, app_id: str) -> int: + """Resolve mem_reservation for the JVM service of the given application.""" + svc = _svc(app_id) + raw = get_app_conf(apps, app_id, f"docker.services.{svc}.mem_reservation") + mb = _to_mb(raw) + + if mb <= 0: + raise AnsibleFilterError( + f"memory_filters: mem_reservation for '{svc}' must be > 0 MB (got '{raw}')" + ) + return mb + + +def jvm_max_mb(apps: dict, app_id: str) -> int: + """ + Compute recommended JVM Xmx in MB using: + Xmx = min( + floor(0.7 * mem_limit), + mem_limit - 1024, + 12288 + ) + with a lower bound of 1024 MB. + """ + limit_mb = _mem_limit_mb(apps, app_id) + c1 = (limit_mb * 7) // 10 + c2 = max(0, limit_mb - 1024) + c3 = 12288 + + return max(1024, min(c1, c2, c3)) + + +def jvm_min_mb(apps: dict, app_id: str) -> int: + """ + Compute recommended JVM Xms in MB using: + Xms = min( + floor(Xmx / 2), + mem_reservation, + Xmx + ) + with a lower bound of 512 MB. + """ + xmx = jvm_max_mb(apps, app_id) + res = _mem_res_mb(apps, app_id) + + return max(512, min(xmx // 2, res, xmx)) + + +# ------------------------------------------------------ +# Redis-specific helpers (always service name "redis") +# ------------------------------------------------------ + +def _redis_mem_limit_mb(apps: dict, app_id: str, default_mb: int = 256) -> int: + """ + Resolve mem_limit for the Redis service of an application. + Unlike JVM-based services, Redis always uses the service name "redis". + + If no mem_limit is defined, fall back to default_mb. + """ + raw = get_app_conf( + apps, + app_id, + "docker.services.redis.mem_limit", + strict=False, + default=f"{default_mb}m", + ) + + mb = _to_mb(raw) + + if mb <= 0: + raise AnsibleFilterError( + f"memory_filters: mem_limit for 'redis' must be > 0 MB (got '{raw}')" + ) + + return mb + + +def redis_maxmemory_mb( + apps: dict, + app_id: str, + factor: float = 0.8, + min_mb: int = 64 +) -> int: + """ + Compute recommended Redis `maxmemory` in MB. + + * factor: fraction of allowed memory used for Redis data (default 0.8) + * min_mb: minimum floor value (default 64 MB) + + maxmemory = max(min_mb, floor(factor * mem_limit)) + """ + limit_mb = _redis_mem_limit_mb(apps, app_id) + return max(min_mb, int(limit_mb * factor)) + + +# ------------------------------------------------------ +# Filter module +# ------------------------------------------------------ + +class FilterModule(object): + def filters(self): + return { + "jvm_max_mb": jvm_max_mb, + "jvm_min_mb": jvm_min_mb, + "redis_maxmemory_mb": redis_maxmemory_mb, + } diff --git a/build/lib/filter_plugins/merge_mapping.py b/build/lib/filter_plugins/merge_mapping.py new file mode 100644 index 00000000..60d79997 --- /dev/null +++ b/build/lib/filter_plugins/merge_mapping.py @@ -0,0 +1,42 @@ +# filter_plugins/merge_mapping.py + +from ansible.errors import AnsibleFilterError + +def merge_mapping(list1, list2, key_name='source'): + """ + Merge two lists of dicts on a given key. + - list1, list2: each must be a List[Dict] + - key_name: the field to match on + If both lists contain an item with the same key_name value, + their dictionaries are merged (fields from list2 overwrite or add to list1). + """ + if not isinstance(list1, list) or not isinstance(list2, list): + raise AnsibleFilterError("merge_mapping expects two lists") + + merged = {} + # First, copy items from list1 + for item in list1: + if key_name not in item: + raise AnsibleFilterError(f"Item {item} is missing the key '{key_name}'") + merged[item[key_name]] = item.copy() + + # Then merge in items from list2 + for item in list2: + if key_name not in item: + raise AnsibleFilterError(f"Item {item} is missing the key '{key_name}'") + k = item[key_name] + if k in merged: + # update will overwrite existing fields or add new ones + merged[k].update(item) + else: + merged[k] = item.copy() + + # Return as a list of dicts again + return list(merged.values()) + + +class FilterModule(object): + def filters(self): + return { + 'merge_mapping': merge_mapping, + } diff --git a/build/lib/filter_plugins/merge_with_defaults.py b/build/lib/filter_plugins/merge_with_defaults.py new file mode 100644 index 00000000..82e7fd78 --- /dev/null +++ b/build/lib/filter_plugins/merge_with_defaults.py @@ -0,0 +1,39 @@ +def merge_with_defaults(defaults, customs): + """ + Recursively merge two dicts (customs into defaults). + For each top-level key in customs, ensure all dict keys from defaults are present (at least empty dict). + Customs always take precedence. + """ + def merge_dict(d1, d2): + # Recursively merge d2 into d1, d2 wins + result = dict(d1) if d1 else {} + for k, v in (d2 or {}).items(): + if k in result and isinstance(result[k], dict) and isinstance(v, dict): + result[k] = merge_dict(result[k], v) + else: + result[k] = v + return result + + merged = {} + # Union of all app-keys + all_keys = set(defaults or {}).union(set(customs or {})) + for app_key in all_keys: + base = (defaults or {}).get(app_key, {}) + override = (customs or {}).get(app_key, {}) + + # Step 1: merge override into base + result = merge_dict(base, override) + + # Step 2: ensure all dict keys from base exist in result (at least {}) + for k, v in (base or {}).items(): + if isinstance(v, dict) and k not in result: + result[k] = {} + merged[app_key] = result + return merged + +class FilterModule(object): + '''Custom merge filter for Infinito.Nexus: merge_with_defaults''' + def filters(self): + return { + 'merge_with_defaults': merge_with_defaults, + } diff --git a/build/lib/filter_plugins/node_autosize.py b/build/lib/filter_plugins/node_autosize.py new file mode 100644 index 00000000..424385e2 --- /dev/null +++ b/build/lib/filter_plugins/node_autosize.py @@ -0,0 +1,141 @@ +# filter_plugins/node_autosize.py +# Reuse app config to derive sensible Node.js heap sizes for containers. +# +# Usage example (Jinja): +# {{ applications | node_max_old_space_size('web-app-nextcloud', 'whiteboard') }} +# +# Heuristics (defaults): +# - candidate = 35% of mem_limit +# - min = 768 MB (required minimum) +# - cap = min(3072 MB, 60% of mem_limit) +# +# NEW: If mem_limit (container cgroup RAM) is smaller than min_mb, we raise an +# exception β€” to prevent a misconfiguration where Node's heap could exceed the cgroup +# and be OOM-killed. + +from __future__ import annotations +import re +from ansible.errors import AnsibleFilterError + +# Import the shared config resolver from module_utils +try: + from module_utils.config_utils import get_app_conf, AppConfigKeyError +except Exception as e: + raise AnsibleFilterError( + f"Failed to import get_app_conf from module_utils.config_utils: {e}" + ) + +_SIZE_RE = re.compile(r"^\s*(\d+(?:\.\d+)?)\s*([kmgtp]?i?b?)?\s*$", re.IGNORECASE) +_MULT = { + "": 1, + "b": 1, + "k": 10**3, "kb": 10**3, + "m": 10**6, "mb": 10**6, + "g": 10**9, "gb": 10**9, + "t": 10**12, "tb": 10**12, + "p": 10**15, "pb": 10**15, + "kib": 1024, + "mib": 1024**2, + "gib": 1024**3, + "tib": 1024**4, + "pib": 1024**5, +} + + +def _to_bytes(val): + """Convert numeric or string memory limits (e.g. '512m', '2GiB') to bytes.""" + if val is None or val == "": + return None + if isinstance(val, (int, float)): + return int(val) + if not isinstance(val, str): + raise AnsibleFilterError(f"Unsupported mem_limit type: {type(val).__name__}") + m = _SIZE_RE.match(val) + if not m: + raise AnsibleFilterError(f"Unrecognized mem_limit string: {val!r}") + num = float(m.group(1)) + unit = (m.group(2) or "").lower() + if unit not in _MULT: + raise AnsibleFilterError(f"Unknown unit in mem_limit: {unit!r}") + return int(num * _MULT[unit]) + + +def _mb(bytes_val: int) -> int: + """Return decimal MB (10^6) as integer β€” Node expects MB units.""" + return int(round(bytes_val / 10**6)) + + +def _compute_old_space_mb( + total_mb: int, pct: float, min_mb: int, hardcap_mb: int, safety_cap_pct: float +) -> int: + """ + Compute Node.js old-space heap (MB) with safe minimum and cap handling. + + NOTE: The calling function ensures total_mb >= min_mb; here we only + apply the sizing heuristics and caps. + """ + candidate = int(total_mb * float(pct)) + safety_cap = int(total_mb * float(safety_cap_pct)) + final_cap = min(int(hardcap_mb), safety_cap) + + # Enforce minimum first; only apply cap if it's above the minimum + candidate = max(candidate, int(min_mb)) + if final_cap >= int(min_mb): + candidate = min(candidate, final_cap) + + # Never below a tiny hard floor + return max(candidate, 128) + + +def node_max_old_space_size( + applications: dict, + application_id: str, + service_name: str, + pct: float = 0.35, + min_mb: int = 768, + hardcap_mb: int = 3072, + safety_cap_pct: float = 0.60, +) -> int: + """ + Derive Node.js --max-old-space-size (MB) from the service's mem_limit in app config. + + Looks up: docker.services..mem_limit for the given application_id. + + Raises: + AnsibleFilterError if mem_limit is missing/invalid OR if mem_limit (MB) < min_mb. + """ + try: + mem_limit = get_app_conf( + applications=applications, + application_id=application_id, + config_path=f"docker.services.{service_name}.mem_limit", + strict=True, + default=None, + ) + except AppConfigKeyError as e: + raise AnsibleFilterError(str(e)) + + if mem_limit in (None, False, ""): + raise AnsibleFilterError( + f"mem_limit not set for application '{application_id}', service '{service_name}'" + ) + + total_bytes = _to_bytes(mem_limit) + total_mb = _mb(total_bytes) + + # NEW: guardrail β€” refuse to size a heap larger than the cgroup limit + if total_mb < int(min_mb): + raise AnsibleFilterError( + f"mem_limit ({total_mb} MB) is below the required minimum heap ({int(min_mb)} MB) " + f"for application '{application_id}', service '{service_name}'. " + f"Increase mem_limit or lower min_mb." + ) + + return _compute_old_space_mb(total_mb, pct, min_mb, hardcap_mb, safety_cap_pct) + + +class FilterModule(object): + def filters(self): + return { + "node_max_old_space_size": node_max_old_space_size, + } diff --git a/build/lib/filter_plugins/redirect_filters.py b/build/lib/filter_plugins/redirect_filters.py new file mode 100644 index 00000000..0c686d9a --- /dev/null +++ b/build/lib/filter_plugins/redirect_filters.py @@ -0,0 +1,36 @@ +from ansible.errors import AnsibleFilterError + +class FilterModule(object): + """ + Custom filters for redirect domain mappings + """ + + def filters(self): + return { + "add_redirect_if_group": self.add_redirect_if_group, + } + + @staticmethod + def add_redirect_if_group(redirect_list, group, source, target, group_names): + """ + Append {"source": source, "target": target} to *redirect_list* + **only** if *group* is contained in *group_names*. + + Usage in Jinja: + {{ redirect_list + | add_redirect_if_group('lam', + 'ldap.' ~ PRIMARY_DOMAIN, + domains | get_domain('web-app-lam'), + group_names) }} + """ + try: + # Make a copy so we don’t mutate the original list in place + redirects = list(redirect_list) + + if group in group_names: + redirects.append({"source": source, "target": target}) + + return redirects + + except Exception as exc: + raise AnsibleFilterError(f"add_redirect_if_group failed: {exc}") diff --git a/build/lib/filter_plugins/reserved_users.py b/build/lib/filter_plugins/reserved_users.py new file mode 100644 index 00000000..6c4986be --- /dev/null +++ b/build/lib/filter_plugins/reserved_users.py @@ -0,0 +1,53 @@ +from ansible.errors import AnsibleFilterError +import re + + +def reserved_usernames(users_dict): + """ + Return a list of usernames where reserved: true. + Usernames are regex-escaped to be safely embeddable. + """ + if not isinstance(users_dict, dict): + raise AnsibleFilterError("reserved_usernames expects a dictionary.") + + results = [] + + for _key, user in users_dict.items(): + if not isinstance(user, dict): + continue + if not user.get("reserved", False): + continue + username = user.get("username") + if username: + results.append(re.escape(str(username))) + + return results + + +def non_reserved_users(users_dict): + """ + Return a dict of users where reserved != true. + """ + if not isinstance(users_dict, dict): + raise AnsibleFilterError("non_reserved_users expects a dictionary.") + + results = {} + + for key, user in users_dict.items(): + if not isinstance(user, dict): + continue + if user.get("reserved", False): + continue + results[key] = user + + return results + + +class FilterModule(object): + """User filters for extracting reserved and non-reserved subsets.""" + + def filters(self): + return { + "reserved_usernames": reserved_usernames, + "non_reserved_users": non_reserved_users, + } diff --git a/build/lib/filter_plugins/resource_filter.py b/build/lib/filter_plugins/resource_filter.py new file mode 100644 index 00000000..00d6116b --- /dev/null +++ b/build/lib/filter_plugins/resource_filter.py @@ -0,0 +1,40 @@ +# filter_plugins/resource_filter.py +from __future__ import annotations + +import sys, os +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) + +from module_utils.config_utils import get_app_conf, AppConfigKeyError, ConfigEntryNotSetError # noqa: F401 +from module_utils.entity_name_utils import get_entity_name + +from ansible.errors import AnsibleFilterError + + +def resource_filter( + applications: dict, + application_id: str, + key: str, + service_name: str, + hard_default, +): + """ + Lookup order: + 1) docker.services.. + 2) hard_default (mandatory) + + - service_name may be "" β†’ will resolve to get_entity_name(application_id). + - hard_default is mandatory (no implicit None). + - required=False always. + """ + try: + primary_service = service_name if service_name != "" else get_entity_name(application_id) + return get_app_conf(applications, application_id, f"docker.services.{primary_service}.{key}", False, hard_default) + except (AppConfigKeyError, ConfigEntryNotSetError) as e: + raise AnsibleFilterError(str(e)) + + +class FilterModule(object): + def filters(self): + return { + "resource_filter": resource_filter, + } diff --git a/build/lib/filter_plugins/role_path_by_app_id.py b/build/lib/filter_plugins/role_path_by_app_id.py new file mode 100644 index 00000000..a15878ed --- /dev/null +++ b/build/lib/filter_plugins/role_path_by_app_id.py @@ -0,0 +1,86 @@ +import os +import glob +import yaml +from ansible.errors import AnsibleFilterError + + +def abs_role_path_by_application_id(application_id): + """ + Searches all roles/*/vars/main.yml for application_id and returns + the absolute path of the role that matches. Raises an error if + zero or more than one match is found. + """ + base_dir = os.getcwd() + pattern = os.path.join(base_dir, 'roles', '*', 'vars', 'main.yml') + matches = [] + + for filepath in glob.glob(pattern): + try: + with open(filepath, 'r') as f: + data = yaml.safe_load(f) or {} + except Exception: + continue + + if data.get('application_id') == application_id: + role_dir = os.path.dirname(os.path.dirname(filepath)) + abs_path = os.path.abspath(role_dir) + matches.append(abs_path) + + if len(matches) > 1: + raise AnsibleFilterError( + f"Multiple roles found with application_id='{application_id}': {matches}. " + "The application_id must be unique." + ) + if not matches: + raise AnsibleFilterError( + f"No role found with application_id='{application_id}'." + ) + + return matches[0] + + +def rel_role_path_by_application_id(application_id): + """ + Searches all roles/*/vars/main.yml for application_id and returns + the relative path (from the project root) of the role that matches. + Raises an error if zero or more than one match is found. + """ + base_dir = os.getcwd() + pattern = os.path.join(base_dir, 'roles', '*', 'vars', 'main.yml') + matches = [] + + for filepath in glob.glob(pattern): + try: + with open(filepath, 'r') as f: + data = yaml.safe_load(f) or {} + except Exception: + continue + + if data.get('application_id') == application_id: + role_dir = os.path.dirname(os.path.dirname(filepath)) + rel_path = os.path.relpath(role_dir, base_dir) + matches.append(rel_path) + + if len(matches) > 1: + raise AnsibleFilterError( + f"Multiple roles found with application_id='{application_id}': {matches}. " + "The application_id must be unique." + ) + if not matches: + raise AnsibleFilterError( + f"No role found with application_id='{application_id}'." + ) + + return matches[0] + + +class FilterModule(object): + """ + Provides the filters `abs_role_path_by_application_id` and + `rel_role_path_by_application_id`. + """ + def filters(self): + return { + 'abs_role_path_by_application_id': abs_role_path_by_application_id, + 'rel_role_path_by_application_id': rel_role_path_by_application_id, + } diff --git a/build/lib/filter_plugins/text_filters.py b/build/lib/filter_plugins/text_filters.py new file mode 100644 index 00000000..58967e21 --- /dev/null +++ b/build/lib/filter_plugins/text_filters.py @@ -0,0 +1,42 @@ +from ansible.errors import AnsibleFilterError +import re + +def to_one_liner(s): + """ + Collapse any multi-line string into a single line, + trim extra whitespace, and remove JavaScript comments. + Supports removal of both '//' line comments and '/*...*/' block comments, + but preserves '//' inside string literals and templating expressions. + """ + if not isinstance(s, str): + raise AnsibleFilterError("to_one_liner() expects a string") + + # 1) Remove block comments /* ... */ + no_block_comments = re.sub(r'/\*.*?\*/', '', s, flags=re.DOTALL) + + # 2) Extract string literals to protect them from comment removal + string_pattern = re.compile(r"'(?:\\.|[^'\\])*'|\"(?:\\.|[^\"\\])*\"") + literals = [] + def _extract(match): + idx = len(literals) + literals.append(match.group(0)) + return f"__STR{idx}__" + temp = string_pattern.sub(_extract, no_block_comments) + + # 3) Remove line comments // ... + temp = re.sub(r'//.*$', '', temp, flags=re.MULTILINE) + + # 4) Restore string literals + for idx, lit in enumerate(literals): + temp = temp.replace(f"__STR{idx}__", lit) + + # 5) Collapse all whitespace + one_liner = re.sub(r'\s+', ' ', temp).strip() + + return one_liner + +class FilterModule(object): + def filters(self): + return { + 'to_one_liner': to_one_liner, + } diff --git a/build/lib/filter_plugins/timeout_start_sec_for_domains.py b/build/lib/filter_plugins/timeout_start_sec_for_domains.py new file mode 100644 index 00000000..144dedfa --- /dev/null +++ b/build/lib/filter_plugins/timeout_start_sec_for_domains.py @@ -0,0 +1,67 @@ +# filter_plugins/timeout_start_sec_for_domains.py (nur Kern geΓ€ndert) +from ansible.errors import AnsibleFilterError + +class FilterModule(object): + def filters(self): + return { + "timeout_start_sec_for_domains": self.timeout_start_sec_for_domains, + } + + def timeout_start_sec_for_domains( + self, + domains_dict, + include_www=True, + per_domain_seconds=25, + overhead_seconds=30, + min_seconds=120, + max_seconds=3600, + ): + """ + Args: + domains_dict (dict | list[str] | str): Either the domain mapping dict + (values can be str | list[str] | dict[str,str]) or an already + flattened list of domains, or a single domain string. + include_www (bool): If true, add 'www.' for non-www entries. + ... + """ + try: + # Local flattener for dict inputs (like your generate_all_domains source) + def _flatten_from_dict(domains_map): + flat = [] + for v in (domains_map or {}).values(): + if isinstance(v, str): + flat.append(v) + elif isinstance(v, list): + flat.extend(v) + elif isinstance(v, dict): + flat.extend(v.values()) + return flat + + # Accept dict | list | str + if isinstance(domains_dict, dict): + flat = _flatten_from_dict(domains_dict) + elif isinstance(domains_dict, list): + flat = list(domains_dict) + elif isinstance(domains_dict, str): + flat = [domains_dict] + else: + raise AnsibleFilterError( + "Expected 'domains_dict' to be dict | list | str." + ) + + if include_www: + base_unique = sorted(set(flat)) + www_variants = [f"www.{d}" for d in base_unique if not str(d).lower().startswith("www.")] + flat.extend(www_variants) + + unique_domains = sorted(set(flat)) + count = len(unique_domains) + + raw = overhead_seconds + per_domain_seconds * count + clamped = max(min_seconds, min(max_seconds, int(raw))) + return clamped + + except AnsibleFilterError: + raise + except Exception as exc: + raise AnsibleFilterError(f"timeout_start_sec_for_domains failed: {exc}") diff --git a/build/lib/filter_plugins/to_primary_domain.py b/build/lib/filter_plugins/to_primary_domain.py new file mode 100644 index 00000000..75583076 --- /dev/null +++ b/build/lib/filter_plugins/to_primary_domain.py @@ -0,0 +1,30 @@ +from ansible.errors import AnsibleFilterError + +try: + import tld + from tld.exceptions import TldDomainNotFound, TldBadUrl +except ImportError: + raise AnsibleFilterError("The 'tld' Python package is required for the to_primary_domain filter. Install with 'pip install tld'.") + +class FilterModule(object): + ''' Custom filter to extract the primary/zone domain from a full domain name ''' + + def filters(self): + return { + 'to_primary_domain': self.to_primary_domain, + } + + def to_primary_domain(self, domain): + """ + Converts a full domain or subdomain into its primary/zone domain. + E.g. 'foo.bar.example.co.uk' -> 'example.co.uk' + """ + if not isinstance(domain, str): + raise AnsibleFilterError("Input to to_primary_domain must be a string") + try: + res = tld.get_fld(domain, fix_protocol=True) + if not res: + raise AnsibleFilterError(f"Could not extract primary domain from: {domain}") + return res + except (TldDomainNotFound, TldBadUrl) as exc: + raise AnsibleFilterError(str(exc)) diff --git a/build/lib/filter_plugins/url_join.py b/build/lib/filter_plugins/url_join.py new file mode 100644 index 00000000..bfa62109 --- /dev/null +++ b/build/lib/filter_plugins/url_join.py @@ -0,0 +1,146 @@ +""" +Ansible filter plugin that safely joins URL components from a list. +- Requires a valid '://' in the first element (any RFC-3986-ish scheme) +- Preserves the double slash after the scheme, collapses other duplicate slashes +- Supports query parts introduced by elements starting with '?' or '&' + * first query element uses '?', subsequent use '&' (regardless of given prefix) + * each query element must be exactly one 'key=value' pair + * query elements may only appear after path elements; once query starts, no more path parts +- Raises specific AnsibleFilterError messages for common misuse +""" + +import re +from ansible.errors import AnsibleFilterError + +_SCHEME_RE = re.compile(r'^([a-zA-Z][a-zA-Z0-9+.\-]*://)(.*)$') +_QUERY_PAIR_RE = re.compile(r'^[^&=?#]+=[^&?#]*$') # key=value (no '&', no extra '?' or '#') + +def _to_str_or_error(obj, index): + """Cast to str, raising a specific AnsibleFilterError with index context.""" + try: + return str(obj) + except Exception as e: + raise AnsibleFilterError( + f"url_join: unable to convert part at index {index} to string: {e}" + ) + +def url_join(parts): + """ + Join a list of URL parts, URL-aware (scheme, path, query). + + Args: + parts (list|tuple): URL segments. First element MUST include '://'. + Path elements are plain strings. + Query elements must start with '?' or '&' and contain exactly one 'key=value'. + + Returns: + str: Joined URL. + + Raises: + AnsibleFilterError: with specific, descriptive messages. + """ + # --- basic input validation --- + if parts is None: + raise AnsibleFilterError("url_join: parts must be a non-empty list; got None") + if not isinstance(parts, (list, tuple)): + raise AnsibleFilterError( + f"url_join: parts must be a list/tuple; got {type(parts).__name__}" + ) + if len(parts) == 0: + raise AnsibleFilterError("url_join: parts must be a non-empty list") + + # --- first element must carry a scheme --- + first_raw = parts[0] + if first_raw is None: + raise AnsibleFilterError( + "url_join: first element must include a scheme like 'https://'; got None" + ) + + first_str = _to_str_or_error(first_raw, 0) + m = _SCHEME_RE.match(first_str) + if not m: + raise AnsibleFilterError( + "url_join: first element must start with '://', e.g. 'https://example.com'; " + f"got '{first_str}'" + ) + + scheme = m.group(1) # e.g., 'https://', 'ftp://', 'myapp+v1://' + after_scheme = m.group(2).lstrip('/') # strip only leading slashes right after scheme + + # --- iterate parts: collect path parts until first query part; then only query parts allowed --- + path_parts = [] + query_pairs = [] + in_query = False + + for i, p in enumerate(parts): + if p is None: + # skip None silently (consistent with path_join-ish behavior) + continue + + s = _to_str_or_error(p, i) + + # disallow additional scheme in later parts + if i > 0 and "://" in s: + raise AnsibleFilterError( + f"url_join: only the first element may contain a scheme; part at index {i} " + f"looks like a URL with scheme ('{s}')." + ) + + # first element: replace with remainder after scheme and continue + if i == 0: + s = after_scheme + + # check if this is a query element (starts with ? or &) + if s.startswith('?') or s.startswith('&'): + in_query = True + raw_pair = s[1:] # strip the leading ? or & + if raw_pair == '': + raise AnsibleFilterError( + f"url_join: query element at index {i} is empty; expected '?key=value' or '&key=value'" + ) + # Disallow multiple pairs in a single element; enforce exactly one key=value + if '&' in raw_pair: + raise AnsibleFilterError( + f"url_join: query element at index {i} must contain exactly one 'key=value' pair " + f"without '&'; got '{s}'" + ) + if not _QUERY_PAIR_RE.match(raw_pair): + raise AnsibleFilterError( + f"url_join: query element at index {i} must match 'key=value' (no extra '?', '&', '#'); got '{s}'" + ) + query_pairs.append(raw_pair) + else: + # non-query element + if in_query: + # once query started, no more path parts allowed + raise AnsibleFilterError( + f"url_join: path element found at index {i} after query parameters started; " + f"query parts must come last" + ) + # normal path part: strip slashes to avoid duplicate '/' + path_parts.append(s.strip('/')) + + # normalize path: remove empty chunks + path_parts = [p for p in path_parts if p != ''] + + # --- build result --- + # path portion + if path_parts: + joined_path = "/".join(path_parts) + base = scheme + joined_path + else: + # no path beyond scheme + base = scheme + + # query portion + if query_pairs: + base = base + "?" + "&".join(query_pairs) + + return base + + +class FilterModule(object): + def filters(self): + return { + 'url_join': url_join, + } diff --git a/build/lib/filter_plugins/volume_path.py b/build/lib/filter_plugins/volume_path.py new file mode 100644 index 00000000..c35581d8 --- /dev/null +++ b/build/lib/filter_plugins/volume_path.py @@ -0,0 +1,21 @@ +from ansible.errors import AnsibleFilterError + +def docker_volume_path(volume_name: str) -> str: + """ + Returns the absolute filesystem path of a Docker volume. + + Example: + "akaunting_data" -> "/var/lib/docker/volumes/akaunting_data/_data/" + """ + if not volume_name or not isinstance(volume_name, str): + raise AnsibleFilterError(f"Invalid volume name: {volume_name}") + + return f"/var/lib/docker/volumes/{volume_name}/_data/" + +class FilterModule(object): + """Docker volume path filters.""" + + def filters(self): + return { + "docker_volume_path": docker_volume_path, + } diff --git a/build/lib/library/__init__.py b/build/lib/library/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/build/lib/library/cert_check_exists.py b/build/lib/library/cert_check_exists.py new file mode 100644 index 00000000..56237f14 --- /dev/null +++ b/build/lib/library/cert_check_exists.py @@ -0,0 +1,26 @@ + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.cert_utils import CertUtils + +def main(): + module_args = dict( + domain=dict(type='str', required=True), + cert_base_path=dict(type='str', required=False, default='/etc/letsencrypt/live'), + debug=dict(type='bool', required=False, default=False), + ) + + module = AnsibleModule( + argument_spec=module_args + ) + + domain = module.params['domain'] + cert_base_path = module.params['cert_base_path'] + debug = module.params['debug'] + + folder = CertUtils.find_cert_for_domain(domain, cert_base_path, debug) + exists = folder is not None + + module.exit_json(exists=exists) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/build/lib/library/cert_folder_find.py b/build/lib/library/cert_folder_find.py new file mode 100644 index 00000000..3d15edc8 --- /dev/null +++ b/build/lib/library/cert_folder_find.py @@ -0,0 +1,27 @@ +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.cert_utils import CertUtils + +def main(): + module_args = dict( + domain=dict(type='str', required=True), + cert_base_path=dict(type='str', required=False, default='/etc/letsencrypt/live'), + debug=dict(type='bool', required=False, default=False), + ) + + module = AnsibleModule( + argument_spec=module_args + ) + + domain = module.params['domain'] + cert_base_path = module.params['cert_base_path'] + debug = module.params['debug'] + + folder = CertUtils.find_cert_for_domain(domain, cert_base_path, debug) + + if folder is None: + module.fail_json(msg=f"No certificate covering domain {domain} found.") + else: + module.exit_json(folder=folder) + +if __name__ == '__main__': + main() diff --git a/build/lib/lookup_plugins/__init__.py b/build/lib/lookup_plugins/__init__.py new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/build/lib/lookup_plugins/__init__.py @@ -0,0 +1 @@ + diff --git a/build/lib/lookup_plugins/application_gid.py b/build/lib/lookup_plugins/application_gid.py new file mode 100644 index 00000000..2f7f5066 --- /dev/null +++ b/build/lib/lookup_plugins/application_gid.py @@ -0,0 +1,42 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import yaml + +from ansible.plugins.lookup import LookupBase +from ansible.errors import AnsibleError + +class LookupModule(LookupBase): + + def run(self, terms, variables=None, **kwargs): + application_id = terms[0] + base_gid = kwargs.get('base_gid', 10000) + roles_dir = kwargs.get('roles_dir', 'roles') + + if not os.path.isdir(roles_dir): + raise AnsibleError(f"Roles directory '{roles_dir}' not found") + + matched_roles = [] + + for root, dirs, files in os.walk(roles_dir): + if os.path.basename(root) == "vars" and "main.yml" in files: + vars_path = os.path.join(root, "main.yml") + try: + with open(vars_path, 'r') as f: + data = yaml.safe_load(f) or {} + app_id = data.get('application_id') + if app_id: + matched_roles.append((app_id, vars_path)) + except Exception as e: + raise AnsibleError(f"Error parsing {vars_path}: {e}") + + # sort alphabetically by application_id + sorted_ids = sorted(app_id for app_id, _ in matched_roles) + + try: + index = sorted_ids.index(application_id) + except ValueError: + raise AnsibleError(f"Application ID '{application_id}' not found in any role") + + return [base_gid + index] diff --git a/build/lib/lookup_plugins/colorscheme.py b/build/lib/lookup_plugins/colorscheme.py new file mode 100644 index 00000000..31535dde --- /dev/null +++ b/build/lib/lookup_plugins/colorscheme.py @@ -0,0 +1,10 @@ +from ansible.plugins.lookup import LookupBase +from colorscheme_generator import generate_full_palette + +class LookupModule(LookupBase): + def run(self, terms, variables=None, **kwargs): + base_color = terms[0] + count = kwargs.get('count') + shades = kwargs.get('shades') + invert_lightness = kwargs.get('invert_lightness', False) + return [generate_full_palette(base_color, count=count, shades=shades, invert_lightness=invert_lightness)] diff --git a/build/lib/lookup_plugins/local_mtime_qs.py b/build/lib/lookup_plugins/local_mtime_qs.py new file mode 100644 index 00000000..6e8f4aa2 --- /dev/null +++ b/build/lib/lookup_plugins/local_mtime_qs.py @@ -0,0 +1,53 @@ +from __future__ import annotations +from ansible.plugins.lookup import LookupBase +from ansible.errors import AnsibleError +import os + +class LookupModule(LookupBase): + """ + Return a cache-busting string based on the LOCAL file's mtime. + + Usage (single path β†’ string via Jinja): + {{ lookup('local_mtime_qs', '/path/to/file.css') }} + -> "?version=1712323456" + + Options: + param (str): query parameter name (default: "version") + mode (str): "qs" (default) β†’ returns "?=" + "epoch" β†’ returns "" + + Multiple paths (returns list, one result per term): + {{ lookup('local_mtime_qs', '/a.js', '/b.js', param='v') }} + """ + + def run(self, terms, variables=None, **kwargs): + if not terms: + return [] + + param = kwargs.get('param', 'version') + mode = kwargs.get('mode', 'qs') + + if mode not in ('qs', 'epoch'): + raise AnsibleError("local_mtime_qs: 'mode' must be 'qs' or 'epoch'") + + results = [] + for term in terms: + path = os.path.abspath(os.path.expanduser(str(term))) + + # Fail fast if path is missing or not a regular file + if not os.path.exists(path): + raise AnsibleError(f"local_mtime_qs: file does not exist: {path}") + if not os.path.isfile(path): + raise AnsibleError(f"local_mtime_qs: not a regular file: {path}") + + try: + mtime = int(os.stat(path).st_mtime) + except OSError as e: + raise AnsibleError(f"local_mtime_qs: cannot stat '{path}': {e}") + + if mode == 'qs': + results.append(f"?{param}={mtime}") + else: # mode == 'epoch' + results.append(str(mtime)) + + return results diff --git a/build/lib/module_utils/__init__.py b/build/lib/module_utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/build/lib/module_utils/cert_utils.py b/build/lib/module_utils/cert_utils.py new file mode 100644 index 00000000..2ab7d032 --- /dev/null +++ b/build/lib/module_utils/cert_utils.py @@ -0,0 +1,206 @@ +#!/usr/bin/python + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os +import subprocess +import time +from datetime import datetime + +class CertUtils: + _domain_cert_mapping = None + _cert_snapshot = None + + @staticmethod + def run_openssl(cert_path): + try: + output = subprocess.check_output( + ['openssl', 'x509', '-in', cert_path, '-noout', '-text'], + universal_newlines=True + ) + return output + except subprocess.CalledProcessError: + return "" + + @staticmethod + def run_openssl_dates(cert_path): + """ + Returns (not_before_ts, not_after_ts) as POSIX timestamps or (None, None) on failure. + """ + try: + output = subprocess.check_output( + ['openssl', 'x509', '-in', cert_path, '-noout', '-startdate', '-enddate'], + universal_newlines=True + ) + nb, na = None, None + for line in output.splitlines(): + line = line.strip() + if line.startswith('notBefore='): + nb = line.split('=', 1)[1].strip() + elif line.startswith('notAfter='): + na = line.split('=', 1)[1].strip() + def _parse(openssl_dt): + # OpenSSL format example: "Oct 10 12:34:56 2025 GMT" + return int(datetime.strptime(openssl_dt, "%b %d %H:%M:%S %Y %Z").timestamp()) + return (_parse(nb) if nb else None, _parse(na) if na else None) + except Exception: + return (None, None) + + @staticmethod + def extract_sans(cert_text): + dns_entries = [] + in_san = False + for line in cert_text.splitlines(): + line = line.strip() + if 'X509v3 Subject Alternative Name:' in line: + in_san = True + continue + if in_san: + if not line: + break + dns_entries += [e.strip().replace('DNS:', '') for e in line.split(',') if e.strip()] + return dns_entries + + @staticmethod + def list_cert_files(cert_base_path): + cert_files = [] + for root, dirs, files in os.walk(cert_base_path): + if 'cert.pem' in files: + cert_files.append(os.path.join(root, 'cert.pem')) + return cert_files + + @staticmethod + def matches(domain, san): + """RFC compliant SAN matching.""" + if san.startswith('*.'): + base = san[2:] + # Wildcard matches ONLY one additional label + if domain == base: + return False + if domain.endswith('.' + base) and domain.count('.') == base.count('.') + 1: + return True + return False + else: + return domain == san + + @classmethod + def build_snapshot(cls, cert_base_path): + snapshot = [] + for cert_file in cls.list_cert_files(cert_base_path): + try: + stat = os.stat(cert_file) + snapshot.append((cert_file, stat.st_mtime, stat.st_size)) + except FileNotFoundError: + continue + snapshot.sort() + return snapshot + + @classmethod + def snapshot_changed(cls, cert_base_path): + current_snapshot = cls.build_snapshot(cert_base_path) + if cls._cert_snapshot != current_snapshot: + cls._cert_snapshot = current_snapshot + return True + return False + + @classmethod + def refresh_cert_mapping(cls, cert_base_path, debug=False): + """ + Build mapping: SAN -> list of entries + entry = { + 'folder': str, + 'cert_path': str, + 'mtime': float, + 'not_before': int|None, + 'not_after': int|None, + 'is_wildcard': bool + } + """ + cert_files = cls.list_cert_files(cert_base_path) + mapping = {} + for cert_path in cert_files: + cert_text = cls.run_openssl(cert_path) + if not cert_text: + continue + sans = cls.extract_sans(cert_text) + folder = os.path.basename(os.path.dirname(cert_path)) + try: + mtime = os.stat(cert_path).st_mtime + except FileNotFoundError: + mtime = 0.0 + nb, na = cls.run_openssl_dates(cert_path) + + for san in sans: + entry = { + 'folder': folder, + 'cert_path': cert_path, + 'mtime': mtime, + 'not_before': nb, + 'not_after': na, + 'is_wildcard': san.startswith('*.'), + } + mapping.setdefault(san, []).append(entry) + + cls._domain_cert_mapping = mapping + if debug: + print(f"[DEBUG] Refreshed domain-to-cert mapping (counts): " + f"{ {k: len(v) for k, v in mapping.items()} }") + + @classmethod + def ensure_cert_mapping(cls, cert_base_path, debug=False): + if cls._domain_cert_mapping is None or cls.snapshot_changed(cert_base_path): + cls.refresh_cert_mapping(cert_base_path, debug) + + @staticmethod + def _score_entry(entry): + """ + Return tuple used for sorting newest-first: + (not_before or -inf, mtime) + """ + nb = entry.get('not_before') + mtime = entry.get('mtime', 0.0) + return (nb if nb is not None else -1, mtime) + + @classmethod + def find_cert_for_domain(cls, domain, cert_base_path, debug=False): + cls.ensure_cert_mapping(cert_base_path, debug) + + candidates_exact = [] + candidates_wild = [] + + for san, entries in cls._domain_cert_mapping.items(): + if san == domain: + candidates_exact.extend(entries) + elif san.startswith('*.'): + base = san[2:] + if domain.count('.') == base.count('.') + 1 and domain.endswith('.' + base): + candidates_wild.extend(entries) + + def _pick_newest(entries): + if not entries: + return None + # newest by (not_before, mtime) + best = max(entries, key=cls._score_entry) + return best + + best_exact = _pick_newest(candidates_exact) + best_wild = _pick_newest(candidates_wild) + + if best_exact and debug: + print(f"[DEBUG] Best exact match for {domain}: {best_exact['folder']} " + f"(not_before={best_exact['not_before']}, mtime={best_exact['mtime']})") + if best_wild and debug: + print(f"[DEBUG] Best wildcard match for {domain}: {best_wild['folder']} " + f"(not_before={best_wild['not_before']}, mtime={best_wild['mtime']})") + + # Prefer exact if it exists; otherwise wildcard + chosen = best_exact or best_wild + + if chosen: + return chosen['folder'] + + if debug: + print(f"[DEBUG] No certificate folder found for {domain}") + + return None diff --git a/build/lib/module_utils/config_utils.py b/build/lib/module_utils/config_utils.py new file mode 100644 index 00000000..b54f48ec --- /dev/null +++ b/build/lib/module_utils/config_utils.py @@ -0,0 +1,152 @@ +import os +import re +import yaml +from ansible.errors import AnsibleFilterError +from collections.abc import Mapping + +from ansible.errors import AnsibleUndefinedVariable +try: + from ansible.utils.unsafe_proxy import AnsibleUndefined +except ImportError: + class AnsibleUndefined: pass + +class AppConfigKeyError(AnsibleFilterError, ValueError): + """ + Raised when a required application config key is missing (strict mode). + Compatible with Ansible error handling and Python ValueError. + """ + pass + +class ConfigEntryNotSetError(AppConfigKeyError): + """ + Raised when a config entry is defined in schema but not set in application. + """ + pass + + +def get_app_conf(applications, application_id, config_path, strict=True, default=None, skip_missing_app=False): + # Path to the schema file for this application + schema_path = os.path.join('roles', application_id, 'schema', 'main.yml') + + def schema_defines(path): + if not os.path.isfile(schema_path): + return False + with open(schema_path) as f: + schema = yaml.safe_load(f) or {} + node = schema + for part in path.split('.'): + key_match = re.match(r"^([a-zA-Z0-9_-]+)", part) + if not key_match: + return False + k = key_match.group(1) + if isinstance(node, dict) and k in node: + node = node[k] + else: + return False + return True + + def access(obj, key, path_trace): + # Match either 'key' or 'key[index]' + m = re.match(r"^([a-zA-Z0-9_-]+)(?:\[(\d+)\])?$", key) + if not m: + raise AppConfigKeyError( + f"Invalid key format in config_path: '{key}'\n" + f"Full path so far: {'.'.join(path_trace)}\n" + f"application_id: {application_id}\n" + f"config_path: {config_path}" + ) + k, idx = m.group(1), m.group(2) + + if (hasattr(obj, '__class__') and obj.__class__.__name__ == 'AnsibleUndefined') \ + or isinstance(obj, AnsibleUndefinedVariable): + if not strict: + return default if default is not None else False + raise AppConfigKeyError( + f"Key '{k}' is undefined at '{'.'.join(path_trace)}'\n" + f" actual type: {type(obj).__name__}\n" + f" repr(obj): {obj!r}\n" + f" repr(applications): {applications!r}\n" + f"application_id: {application_id}\n" + f"config_path: {config_path}" + ) + + # Access dict key + if isinstance(obj, Mapping): + if k not in obj: + # Non-strict mode: always return default on missing key + if not strict: + return default if default is not None else False + # Schema-defined but unset: strict raises ConfigEntryNotSetError + trace_path = '.'.join(path_trace[1:]) + if schema_defines(trace_path): + raise ConfigEntryNotSetError( + f"Config entry '{trace_path}' is defined in schema at '{schema_path}' but not set in application '{application_id}'." + ) + # Generic missing-key error + raise AppConfigKeyError( + f"Key '{k}' not found in dict at '{key}'\n" + f"Full path so far: {'.'.join(path_trace)}\n" + f"Current object: {repr(obj)}\n" + f"application_id: {application_id}\n" + f"config_path: {config_path}" + ) + obj = obj[k] + else: + if not strict: + return default if default is not None else False + raise AppConfigKeyError( + f"Expected dict for '{k}', got {type(obj).__name__} at '{key}'\n" + f"Full path so far: {'.'.join(path_trace)}\n" + f"Current object: {repr(obj)}\n" + f"application_id: {application_id}\n" + f"config_path: {config_path}" + ) + + # If index was provided, access list element + if idx is not None: + if not isinstance(obj, list): + if not strict: + return default if default is not None else False + raise AppConfigKeyError( + f"Expected list for '{k}[{idx}]', got {type(obj).__name__}\n" + f"Full path so far: {'.'.join(path_trace)}\n" + f"Current object: {repr(obj)}\n" + f"application_id: {application_id}\n" + f"config_path: {config_path}" + ) + i = int(idx) + if i >= len(obj): + if not strict: + return default if default is not None else False + raise AppConfigKeyError( + f"Index {i} out of range for list at '{k}'\n" + f"Full path so far: {'.'.join(path_trace)}\n" + f"Current object: {repr(obj)}\n" + f"application_id: {application_id}\n" + f"config_path: {config_path}" + ) + obj = obj[i] + return obj + + # Begin traversal + path_trace = [f"applications[{repr(application_id)}]"] + try: + obj = applications[application_id] + except KeyError: + if skip_missing_app: + # Simply return default instead of failing + return default if default is not None else False + raise AppConfigKeyError( + f"Application ID '{application_id}' not found in applications dict.\n" + f"path_trace: {path_trace}\n" + f"applications keys: {list(applications.keys())}\n" + f"config_path: {config_path}" + ) + + for part in config_path.split('.'): + path_trace.append(part) + obj = access(obj, part, path_trace) + if obj is False and not strict: + return default if default is not None else False + return obj + diff --git a/build/lib/module_utils/dict_renderer.py b/build/lib/module_utils/dict_renderer.py new file mode 100644 index 00000000..f2869267 --- /dev/null +++ b/build/lib/module_utils/dict_renderer.py @@ -0,0 +1,119 @@ +import re +import time +from typing import Any, Dict, Union, List, Set + +class DictRenderer: + """ + Resolves placeholders in the form << path >> within nested dictionaries, + supporting hyphens, numeric list indexing, and quoted keys via ['key'] or ["key"]. + """ + # Match << path >> where path contains no whitespace or closing > + PATTERN = re.compile(r"<<\s*(?P[^\s>]+)\s*>>") + # Tokenizes a path into unquoted keys, single-quoted, double-quoted keys, or numeric indices + TOKEN_REGEX = re.compile( + r"(?P[\w\-]+)" + r"|\['(?P[^']+)'\]" + r"|\[\"(?P[^\"]+)\"\]" + r"|\[(?P\d+)\]" + ) + + def __init__(self, verbose: bool = False, timeout: float = 10.0): + self.verbose = verbose + self.timeout = timeout + + def render(self, data: Union[Dict[str, Any], List[Any]]) -> Union[Dict[str, Any], List[Any]]: + start = time.monotonic() + self.root = data + rendered = data + pass_num = 0 + + while True: + pass_num += 1 + if self.verbose: + print(f"[DictRenderer] Pass {pass_num} starting...") + rendered, changed = self._render_pass(rendered) + if not changed: + if self.verbose: + print(f"[DictRenderer] No more placeholders after pass {pass_num}.") + break + if time.monotonic() - start > self.timeout: + raise TimeoutError(f"Rendering exceeded timeout of {self.timeout} seconds") + + # After all passes, raise error on unresolved placeholders + unresolved = self.find_unresolved(rendered) + if unresolved: + raise ValueError(f"Unresolved placeholders: {', '.join(sorted(unresolved))}") + + return rendered + + def _render_pass(self, obj: Any) -> (Any, bool): + if isinstance(obj, dict): + new = {} + changed = False + for k, v in obj.items(): + nv, ch = self._render_pass(v) + new[k] = nv + changed = changed or ch + return new, changed + if isinstance(obj, list): + new_list = [] + changed = False + for item in obj: + ni, ch = self._render_pass(item) + new_list.append(ni) + changed = changed or ch + return new_list, changed + if isinstance(obj, str): + def repl(m): + path = m.group('path') + val = self._lookup(path) + if val is not None: + if self.verbose: + print(f"[DictRenderer] Resolving <<{path}>> -> {val}") + return str(val) + return m.group(0) + new_str = self.PATTERN.sub(repl, obj) + return new_str, new_str != obj + return obj, False + + def _lookup(self, path: str) -> Any: + current = self.root + for m in self.TOKEN_REGEX.finditer(path): + if m.group('key') is not None: + if isinstance(current, dict): + current = current.get(m.group('key')) + else: + return None + elif m.group('qkey') is not None: + if isinstance(current, dict): + current = current.get(m.group('qkey')) + else: + return None + elif m.group('dkey') is not None: + if isinstance(current, dict): + current = current.get(m.group('dkey')) + else: + return None + elif m.group('idx') is not None: + idx = int(m.group('idx')) + if isinstance(current, list) and 0 <= idx < len(current): + current = current[idx] + else: + return None + if current is None: + return None + return current + + def find_unresolved(self, data: Any) -> Set[str]: + """Return all paths of unresolved << placeholders in data.""" + unresolved: Set[str] = set() + if isinstance(data, dict): + for v in data.values(): + unresolved |= self.find_unresolved(v) + elif isinstance(data, list): + for item in data: + unresolved |= self.find_unresolved(item) + elif isinstance(data, str): + for m in self.PATTERN.finditer(data): + unresolved.add(m.group('path')) + return unresolved diff --git a/build/lib/module_utils/domain_utils.py b/build/lib/module_utils/domain_utils.py new file mode 100644 index 00000000..2812b136 --- /dev/null +++ b/build/lib/module_utils/domain_utils.py @@ -0,0 +1,52 @@ +# filter_plugins/domain_utils.py +from ansible.errors import AnsibleFilterError + +def get_domain(domains, application_id): + """ + Return the domain for application_id from the domains mapping: + - If value is a string, return it. + - If value is a dict, return its first value. + - If value is a list, return its first element. + - Otherwise, raise an error. + """ + if not isinstance(domains, dict): + raise AnsibleFilterError(f"'domains' must be a dict, got {type(domains).__name__}") + + if application_id not in domains: + raise AnsibleFilterError(f"application_id '{application_id}' not found in domains mapping") + + val = domains[application_id] + + # String case + if isinstance(val, str): + if not val: + raise AnsibleFilterError(f"domains['{application_id}'] is an empty string") + return val + + # Dict case + if isinstance(val, dict): + try: + first_val = next(iter(val.values())) + except StopIteration: + raise AnsibleFilterError(f"domains['{application_id}'] dict is empty") + if not isinstance(first_val, str) or not first_val: + raise AnsibleFilterError( + f"first value of domains['{application_id}'] must be a non-empty string, got {first_val!r}" + ) + return first_val + + # List case + if isinstance(val, list): + if not val: + raise AnsibleFilterError(f"domains['{application_id}'] list is empty") + first = val[0] + if not isinstance(first, str) or not first: + raise AnsibleFilterError( + f"first element of domains['{application_id}'] must be a non-empty string, got {first!r}" + ) + return first + + # Unsupported type + raise AnsibleFilterError( + f"domains['{application_id}'] has unsupported type {type(val).__name__}, must be str, dict or list" + ) diff --git a/build/lib/module_utils/entity_name_utils.py b/build/lib/module_utils/entity_name_utils.py new file mode 100644 index 00000000..57169903 --- /dev/null +++ b/build/lib/module_utils/entity_name_utils.py @@ -0,0 +1,49 @@ +import os +import yaml + +def load_categories_tree(categories_file): + with open(categories_file, 'r', encoding='utf-8') as f: + categories = yaml.safe_load(f)['roles'] + return categories + +def flatten_categories(tree, prefix=''): + """Flattens nested category tree to all possible category paths.""" + result = [] + for k, v in tree.items(): + current = f"{prefix}-{k}" if prefix else k + result.append(current) + if isinstance(v, dict): + for sk, sv in v.items(): + if isinstance(sv, dict): + result.extend(flatten_categories({sk: sv}, current)) + return result + +def get_entity_name(role_name): + """ + Get the entity name from a role name by removing the + longest matching category path from categories.yml. + """ + possible_locations = [ + os.path.join(os.getcwd(), 'roles', 'categories.yml'), + os.path.join(os.path.dirname(__file__), '..', 'roles', 'categories.yml'), + 'roles/categories.yml', + ] + categories_file = None + for loc in possible_locations: + if os.path.exists(loc): + categories_file = loc + break + if not categories_file: + return role_name + + categories_tree = load_categories_tree(categories_file) + all_category_paths = flatten_categories(categories_tree) + + role_name_lc = role_name.lower() + all_category_paths = [cat.lower() for cat in all_category_paths] + for cat in sorted(all_category_paths, key=len, reverse=True): + if role_name_lc.startswith(cat + "-"): + return role_name[len(cat) + 1:] + if role_name_lc == cat: + return "" + return role_name diff --git a/build/lib/module_utils/get_url.py b/build/lib/module_utils/get_url.py new file mode 100644 index 00000000..96cd320d --- /dev/null +++ b/build/lib/module_utils/get_url.py @@ -0,0 +1,18 @@ +from ansible.errors import AnsibleFilterError +import sys, os + +def get_url(domains, application_id, protocol): + plugin_dir = os.path.dirname(__file__) + project_root = os.path.dirname(plugin_dir) + module_utils = os.path.join(project_root, 'module_utils') + if module_utils not in sys.path: + sys.path.append(module_utils) + + try: + from domain_utils import get_domain + except ImportError as e: + raise AnsibleFilterError(f"could not import domain_utils: {e}") + + if not isinstance(protocol, str): + raise AnsibleFilterError("Protocol must be a string") + return f"{protocol}://{ get_domain(domains, application_id) }" \ No newline at end of file diff --git a/build/lib/module_utils/handler/__init__.py b/build/lib/module_utils/handler/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/build/lib/module_utils/handler/vault.py b/build/lib/module_utils/handler/vault.py new file mode 100644 index 00000000..2dbaaf22 --- /dev/null +++ b/build/lib/module_utils/handler/vault.py @@ -0,0 +1,50 @@ +import subprocess +from typing import Any, Dict + +from yaml.loader import SafeLoader +from yaml.dumper import SafeDumper + +class VaultScalar(str): + """A subclass of str to represent vault-encrypted strings.""" + pass + +def _vault_constructor(loader, node): + """Custom constructor to handle !vault tag as plain text.""" + return node.value + +def _vault_representer(dumper, data): + """Custom representer to dump VaultScalar as literal blocks.""" + return dumper.represent_scalar('!vault', data, style='|') + +SafeLoader.add_constructor('!vault', _vault_constructor) +SafeDumper.add_representer(VaultScalar, _vault_representer) + +class VaultHandler: + def __init__(self, vault_password_file: str): + self.vault_password_file = vault_password_file + + def encrypt_string(self, value: str, name: str) -> str: + """Encrypt a string using ansible-vault.""" + cmd = [ + "ansible-vault", "encrypt_string", + value, f"--name={name}", + "--vault-password-file", self.vault_password_file + ] + proc = subprocess.run(cmd, capture_output=True, text=True) + if proc.returncode != 0: + raise RuntimeError(f"ansible-vault encrypt_string failed:\n{proc.stderr}") + return proc.stdout + + def encrypt_leaves(self, branch: Dict[str, Any], vault_pw: str): + """Recursively encrypt all leaves (plain text values) under the credentials section.""" + for key, value in branch.items(): + if isinstance(value, dict): + self.encrypt_leaves(value, vault_pw) # Recurse into nested dictionaries + else: + # Skip if already vaulted (i.e., starts with $ANSIBLE_VAULT) + if isinstance(value, str) and not value.lstrip().startswith("$ANSIBLE_VAULT"): + snippet = self.encrypt_string(value, key) + lines = snippet.splitlines() + indent = len(lines[1]) - len(lines[1].lstrip()) + body = "\n".join(line[indent:] for line in lines[1:]) + branch[key] = VaultScalar(body) # Store encrypted value as VaultScalar diff --git a/build/lib/module_utils/handler/yaml.py b/build/lib/module_utils/handler/yaml.py new file mode 100644 index 00000000..ed523cd0 --- /dev/null +++ b/build/lib/module_utils/handler/yaml.py @@ -0,0 +1,23 @@ +import yaml +from yaml.loader import SafeLoader +from typing import Any, Dict +from module_utils.handler.vault import VaultScalar + +class YamlHandler: + @staticmethod + def load_yaml(path) -> Dict: + """Load the YAML file and wrap existing !vault entries.""" + text = path.read_text() + data = yaml.load(text, Loader=SafeLoader) or {} + return YamlHandler.wrap_existing_vaults(data) + + @staticmethod + def wrap_existing_vaults(node: Any) -> Any: + """Recursively wrap any str that begins with '$ANSIBLE_VAULT' in a VaultScalar so it dumps as a literal block.""" + if isinstance(node, dict): + return {k: YamlHandler.wrap_existing_vaults(v) for k, v in node.items()} + if isinstance(node, list): + return [YamlHandler.wrap_existing_vaults(v) for v in node] + if isinstance(node, str) and node.lstrip().startswith("$ANSIBLE_VAULT"): + return VaultScalar(node) + return node \ No newline at end of file diff --git a/build/lib/module_utils/manager/__init__.py b/build/lib/module_utils/manager/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/build/lib/module_utils/manager/inventory.py b/build/lib/module_utils/manager/inventory.py new file mode 100644 index 00000000..2901f585 --- /dev/null +++ b/build/lib/module_utils/manager/inventory.py @@ -0,0 +1,185 @@ +import secrets +import hashlib +import bcrypt +from pathlib import Path +from typing import Dict +from module_utils.handler.yaml import YamlHandler +from module_utils.handler.vault import VaultHandler, VaultScalar +import string +import sys +import base64 + +class InventoryManager: + def __init__( + self, + role_path: Path, + inventory_path: Path, + vault_pw: str, + overrides: Dict[str, str], + allow_empty_plain: bool = False, + ): + """Initialize the Inventory Manager.""" + self.role_path = role_path + self.inventory_path = inventory_path + self.vault_pw = vault_pw + self.overrides = overrides + self.allow_empty_plain = allow_empty_plain + self.inventory = YamlHandler.load_yaml(inventory_path) + self.schema = YamlHandler.load_yaml(role_path / "schema" / "main.yml") + self.app_id = self.load_application_id(role_path) + + self.vault_handler = VaultHandler(vault_pw) + + def load_application_id(self, role_path: Path) -> str: + """Load the application ID from the role's vars/main.yml file.""" + vars_file = role_path / "vars" / "main.yml" + data = YamlHandler.load_yaml(vars_file) + app_id = data.get("application_id") + if not app_id: + print(f"ERROR: 'application_id' missing in {vars_file}", file=sys.stderr) + sys.exit(1) + return app_id + + def apply_schema(self) -> Dict: + """Apply the schema and return the updated inventory.""" + apps = self.inventory.setdefault("applications", {}) + target = apps.setdefault(self.app_id, {}) + + # Load the data from vars/main.yml + vars_file = self.role_path / "config" / "main.yml" + data = YamlHandler.load_yaml(vars_file) + + # Check if 'central-database' is enabled in the features section of data + if "features" in data: + if "central_database" in data["features"] and data["features"]["central_database"]: + # Add 'central_database' value (password) to credentials + target.setdefault("credentials", {})["database_password"] = self.generate_value("alphanumeric") + if "oauth2" in data["features"] and data["features"]["oauth2"]: + target.setdefault("credentials", {})["oauth2_proxy_cookie_secret"] = self.generate_value("random_hex_16") + + # Apply recursion only for the `credentials` section + self.recurse_credentials(self.schema, target) + return self.inventory + + def recurse_credentials(self, branch: dict, dest: dict, prefix: str = ""): + """Recursively process only the 'credentials' section and generate values.""" + for key, meta in branch.items(): + full_key = f"{prefix}.{key}" if prefix else key + + # Only process 'credentials' section for encryption + if prefix == "credentials" and isinstance(meta, dict) and all( + k in meta for k in ("description", "algorithm", "validation") + ): + alg = meta["algorithm"] + if alg == "plain": + # Must be supplied via --set, unless allow_empty_plain=True + if full_key not in self.overrides: + if self.allow_empty_plain: + plain = "" + else: + print( + f"ERROR: Plain algorithm for '{full_key}' requires override via --set {full_key}=", + file=sys.stderr, + ) + sys.exit(1) + else: + plain = self.overrides[full_key] + else: + plain = self.overrides.get(full_key, self.generate_value(alg)) + + # Check if the value is already vaulted or if it's a dictionary + existing_value = dest.get(key) + + # If existing_value is a dictionary, print a warning and skip encryption + if isinstance(existing_value, dict): + print(f"Skipping encryption for '{key}', as it is a dictionary.", file=sys.stderr) + continue + + # Check if the value is a VaultScalar and already vaulted + if existing_value and isinstance(existing_value, VaultScalar): + print(f"Skipping encryption for '{key}', as it is already vaulted.", file=sys.stderr) + continue + + # Empty strings should *not* be encrypted + if plain == "": + dest[key] = "" + continue + + # Encrypt only if it's not already vaulted + snippet = self.vault_handler.encrypt_string(plain, key) + lines = snippet.splitlines() + indent = len(lines[1]) - len(lines[1].lstrip()) + body = "\n".join(line[indent:] for line in lines[1:]) + dest[key] = VaultScalar(body) + + elif isinstance(meta, dict): + sub = dest.setdefault(key, {}) + self.recurse_credentials(meta, sub, full_key) + else: + dest[key] = meta + + def generate_secure_alphanumeric(self, length: int) -> str: + """Generate a cryptographically secure random alphanumeric string of the given length.""" + characters = string.ascii_letters + string.digits # a-zA-Z0-9 + return ''.join(secrets.choice(characters) for _ in range(length)) + + def generate_value(self, algorithm: str) -> str: + """ + Generate a random secret value according to the specified algorithm. + + Supported algorithms: + β€’ "random_hex" + – Returns a 64-byte (512-bit) secure random string, encoded as 128 hexadecimal characters. + – Use when you need maximum entropy in a hex-only format. + + β€’ "sha256" + – Generates 32 random bytes, hashes them with SHA-256, and returns a 64-character hex digest. + – Good for when you want a fixed-length (256-bit) hash output. + + β€’ "sha1" + – Generates 20 random bytes, hashes them with SHA-1, and returns a 40-character hex digest. + – Only use in legacy contexts; SHA-1 is considered weaker than SHA-256. + + β€’ "bcrypt" + – Creates a random 16-byte URL-safe password, then applies a bcrypt hash. + – Suitable for storing user-style passwords where bcrypt verification is needed. + + β€’ "alphanumeric" + – Produces a 64-character string drawn from [A–Z, a–z, 0–9]. + – Offers β‰ˆ380 bits of entropy; human-friendly charset. + + β€’ "base64_prefixed_32" + – Generates 32 random bytes, encodes them in Base64, and prefixes the result with "base64:". + – Useful when downstream systems expect a Base64 format. + + β€’ "random_hex_16" + – Returns 16 random bytes (128 bits) encoded as 32 hexadecimal characters. + – Handy for shorter tokens or salts. + + Returns: + A securely generated string according to the chosen algorithm. + """ + if algorithm == "random_hex": + return secrets.token_hex(64) + if algorithm == "random_hex_32": + return secrets.token_hex(32) + if algorithm == "sha256": + return hashlib.sha256(secrets.token_bytes(32)).hexdigest() + if algorithm == "sha1": + return hashlib.sha1(secrets.token_bytes(20)).hexdigest() + if algorithm == "bcrypt": + # Generate a random password and hash it with bcrypt + pw = secrets.token_urlsafe(16).encode() + raw_hash = bcrypt.hashpw(pw, bcrypt.gensalt()).decode() + # Replace every '$' with a random lowercase alphanumeric character + alnum = string.digits + string.ascii_lowercase + escaped = "".join(secrets.choice(alnum) if ch == '$' else ch for ch in raw_hash) + return escaped + if algorithm == "alphanumeric": + return self.generate_secure_alphanumeric(64) + if algorithm == "base64_prefixed_32": + return "base64:" + base64.b64encode(secrets.token_bytes(32)).decode() + if algorithm == "random_hex_16": + # 16 Bytes β†’ 32 Hex-Characters + return secrets.token_hex(16) + return "undefined" diff --git a/build/lib/module_utils/role_dependency_resolver.py b/build/lib/module_utils/role_dependency_resolver.py new file mode 100644 index 00000000..cadb7c5a --- /dev/null +++ b/build/lib/module_utils/role_dependency_resolver.py @@ -0,0 +1,296 @@ +import os +import fnmatch +import re +from typing import Dict, Set, Iterable, Tuple, Optional + +import yaml + + +class RoleDependencyResolver: + _RE_PURE_JINJA = re.compile(r"\s*\{\{\s*[^}]+\s*\}\}\s*$") + + def __init__(self, roles_dir: str): + self.roles_dir = roles_dir + + # -------------------------- public API -------------------------- + + def resolve_transitively( + self, + start_roles: Iterable[str], + *, + resolve_include_role: bool = True, + resolve_import_role: bool = True, + resolve_dependencies: bool = True, + resolve_run_after: bool = False, + max_depth: Optional[int] = None, + ) -> Set[str]: + to_visit = list(dict.fromkeys(start_roles)) + visited: Set[str] = set() + depth: Dict[str, int] = {} + + for r in to_visit: + depth[r] = 0 + + while to_visit: + role = to_visit.pop() + cur_d = depth.get(role, 0) + if role in visited: + continue + visited.add(role) + + if max_depth is not None and cur_d >= max_depth: + continue + + for dep in self.get_role_dependencies( + role, + resolve_include_role=resolve_include_role, + resolve_import_role=resolve_import_role, + resolve_dependencies=resolve_dependencies, + resolve_run_after=resolve_run_after, + ): + if dep not in visited: + to_visit.append(dep) + depth[dep] = cur_d + 1 + + return visited + + def get_role_dependencies( + self, + role_name: str, + *, + resolve_include_role: bool = True, + resolve_import_role: bool = True, + resolve_dependencies: bool = True, + resolve_run_after: bool = False, + ) -> Set[str]: + role_path = os.path.join(self.roles_dir, role_name) + if not os.path.isdir(role_path): + return set() + + deps: Set[str] = set() + + if resolve_include_role or resolve_import_role: + includes, imports = self._scan_tasks(role_path) + if resolve_include_role: + deps |= includes + if resolve_import_role: + deps |= imports + + if resolve_dependencies: + deps |= self._extract_meta_dependencies(role_path) + + if resolve_run_after: + deps |= self._extract_meta_run_after(role_path) + + return deps + + # -------------------------- scanning helpers -------------------------- + + def _scan_tasks(self, role_path: str) -> Tuple[Set[str], Set[str]]: + tasks_dir = os.path.join(role_path, "tasks") + include_roles: Set[str] = set() + import_roles: Set[str] = set() + + if not os.path.isdir(tasks_dir): + return include_roles, import_roles + + all_roles = self._list_role_dirs(self.roles_dir) + + candidates = [] + for root, _, files in os.walk(tasks_dir): + for f in files: + if f.endswith(".yml") or f.endswith(".yaml"): + candidates.append(os.path.join(root, f)) + + for file_path in candidates: + try: + with open(file_path, "r", encoding="utf-8") as f: + docs = list(yaml.safe_load_all(f)) + except Exception: + inc, imp = self._tolerant_scan_file(file_path, all_roles) + include_roles |= inc + import_roles |= imp + continue + + for doc in docs or []: + if not isinstance(doc, list): + continue + for task in doc: + if not isinstance(task, dict): + continue + if "include_role" in task: + include_roles |= self._extract_from_task(task, "include_role", all_roles) + if "import_role" in task: + import_roles |= self._extract_from_task(task, "import_role", all_roles) + + return include_roles, import_roles + + def _extract_from_task(self, task: dict, key: str, all_roles: Iterable[str]) -> Set[str]: + roles: Set[str] = set() + spec = task.get(key) + if not isinstance(spec, dict): + return roles + + name = spec.get("name") + loop_val = self._collect_loop_values(task) + + if loop_val is not None: + for item in self._iter_flat(loop_val): + cand = self._role_from_loop_item(item, name_template=name) + if cand: + roles.add(cand) + + if isinstance(name, str) and name.strip() and not self._is_pure_jinja_var(name): + pattern = self._jinja_to_glob(name) if ("{{" in name and "}}" in name) else name + self._match_glob_into(pattern, all_roles, roles) + return roles + + if isinstance(name, str) and name.strip(): + if "{{" in name and "}}" in name: + if self._is_pure_jinja_var(name): + return roles + pattern = self._jinja_to_glob(name) + self._match_glob_into(pattern, all_roles, roles) + else: + roles.add(name.strip()) + + return roles + + def _collect_loop_values(self, task: dict): + for k in ("loop", "with_items", "with_list", "with_flattened"): + if k in task: + return task[k] + return None + + def _iter_flat(self, value): + if isinstance(value, list): + for v in value: + if isinstance(v, list): + for x in v: + yield x + else: + yield v + + def _role_from_loop_item(self, item, name_template=None) -> Optional[str]: + tmpl = (name_template or "").strip() if isinstance(name_template, str) else "" + + if isinstance(item, str): + if tmpl in ("{{ item }}", "{{item}}") or not tmpl or "item" in tmpl: + return item.strip() + return None + + if isinstance(item, dict): + for k in ("role", "name"): + v = item.get(k) + if isinstance(v, str) and v.strip(): + if tmpl in (f"{{{{ item.{k} }}}}", f"{{{{item.{k}}}}}") or not tmpl or "item" in tmpl: + return v.strip() + return None + + def _match_glob_into(self, pattern: str, all_roles: Iterable[str], out: Set[str]): + if "*" in pattern or "?" in pattern or "[" in pattern: + for r in all_roles: + if fnmatch.fnmatch(r, pattern): + out.add(r) + else: + out.add(pattern) + + def test_jinja_mixed_name_glob_matching(self): + """ + include_role: + name: "prefix-{{ item }}-suffix" + loop: [x, y] + Existing roles: prefix-x-suffix, prefix-y-suffix, prefix-z-suffix + + Expectation: + - NO raw loop items ('x', 'y') end up as roles + - Glob matching resolves to all three concrete roles + """ + make_role(self.roles_dir, "A") + for rn in ["prefix-x-suffix", "prefix-y-suffix", "prefix-z-suffix"]: + make_role(self.roles_dir, rn) + + write( + os.path.join(self.roles_dir, "A", "tasks", "main.yml"), + """ + - name: jinja-mixed glob + include_role: + name: "prefix-{{ item }}-suffix" + loop: + - x + - y + """ + ) + + r = RoleDependencyResolver(self.roles_dir) + deps = r.get_role_dependencies("A") + + # ensure no raw loop items leak into the results + self.assertNotIn("x", deps) + self.assertNotIn("y", deps) + + # only the resolved role names should be present + self.assertEqual( + deps, + {"prefix-x-suffix", "prefix-y-suffix", "prefix-z-suffix"}, + ) + + + # -------------------------- meta helpers -------------------------- + + def _extract_meta_dependencies(self, role_path: str) -> Set[str]: + deps: Set[str] = set() + meta_main = os.path.join(role_path, "meta", "main.yml") + if not os.path.isfile(meta_main): + return deps + try: + with open(meta_main, "r", encoding="utf-8") as f: + meta = yaml.safe_load(f) or {} + raw_deps = meta.get("dependencies", []) + if isinstance(raw_deps, list): + for item in raw_deps: + if isinstance(item, str): + deps.add(item.strip()) + elif isinstance(item, dict): + r = item.get("role") + if isinstance(r, str) and r.strip(): + deps.add(r.strip()) + except Exception: + pass + return deps + + def _extract_meta_run_after(self, role_path: str) -> Set[str]: + deps: Set[str] = set() + meta_main = os.path.join(role_path, "meta", "main.yml") + if not os.path.isfile(meta_main): + return deps + try: + with open(meta_main, "r", encoding="utf-8") as f: + meta = yaml.safe_load(f) or {} + galaxy_info = meta.get("galaxy_info", {}) + run_after = galaxy_info.get("run_after", []) + if isinstance(run_after, list): + for item in run_after: + if isinstance(item, str) and item.strip(): + deps.add(item.strip()) + except Exception: + pass + return deps + + # -------------------------- small utils -------------------------- + + def _list_role_dirs(self, roles_dir: str) -> list[str]: + return [ + d for d in os.listdir(roles_dir) + if os.path.isdir(os.path.join(roles_dir, d)) + ] + + @classmethod + def _is_pure_jinja_var(cls, s: str) -> bool: + return bool(cls._RE_PURE_JINJA.fullmatch(s or "")) + + @staticmethod + def _jinja_to_glob(s: str) -> str: + pattern = re.sub(r"\{\{[^}]+\}\}", "*", s or "") + pattern = re.sub(r"\*{2,}", "*", pattern) + return pattern.strip() diff --git a/build/lib/module_utils/sounds.py b/build/lib/module_utils/sounds.py new file mode 100644 index 00000000..9dd0eaec --- /dev/null +++ b/build/lib/module_utils/sounds.py @@ -0,0 +1,259 @@ +import os +import warnings +from typing import Optional + +_SOUND_DISABLED_REASON: Optional[str] = None +_SOUND_WARNED: bool = False + + +def _warn_sound_disabled_once() -> None: + """ + Emit the 'Sound support disabled' warning at most once per Python process. + + Important: + - Do NOT warn at import time (avoids noisy unit test output). + - Warn only when a sound function is actually called. + """ + global _SOUND_WARNED + + if _SOUND_WARNED: + return + + if not _SOUND_DISABLED_REASON: + return + + _SOUND_WARNED = True + warnings.warn( + f"Sound support disabled: {_SOUND_DISABLED_REASON}", + RuntimeWarning, + stacklevel=2, + ) + + +class DummySound: + @staticmethod + def play_start_sound() -> None: + _warn_sound_disabled_once() + + @staticmethod + def play_infinito_intro_sound() -> None: + _warn_sound_disabled_once() + + @staticmethod + def play_finished_successfully_sound() -> None: + _warn_sound_disabled_once() + + @staticmethod + def play_finished_failed_sound() -> None: + _warn_sound_disabled_once() + + @staticmethod + def play_warning_sound() -> None: + _warn_sound_disabled_once() + + +try: + import numpy as np + import simpleaudio as sa + import shutil + import subprocess + import tempfile + import wave as wavmod + + class Sound: + """ + Sound effects for the application. + """ + + fs = 44100 + complexity_factor = 10 + max_length = 2.0 + + @staticmethod + def _generate_complex_wave( + frequency: float, + duration: float, + harmonics: int | None = None, + ) -> np.ndarray: + if harmonics is None: + harmonics = Sound.complexity_factor + + t = np.linspace(0, duration, int(Sound.fs * duration), False) + wave = np.zeros_like(t) + + for n in range(1, harmonics + 1): + wave += (1 / n) * np.sin(2 * np.pi * frequency * n * t) + + # ADSR envelope + attack = int(0.02 * Sound.fs) + release = int(0.05 * Sound.fs) + env = np.ones_like(wave) + env[:attack] = np.linspace(0, 1, attack) + env[-release:] = np.linspace(1, 0, release) + + wave *= env + wave /= np.max(np.abs(wave)) + return (wave * (2**15 - 1)).astype(np.int16) + + @staticmethod + def _crossfade(w1: np.ndarray, w2: np.ndarray, fade_len: int) -> np.ndarray: + fade_len = min(fade_len, len(w1), len(w2)) + if fade_len <= 0: + return np.concatenate([w1, w2]) + + fade_out = np.linspace(1, 0, fade_len) + fade_in = np.linspace(0, 1, fade_len) + + w1_end = w1[-fade_len:].astype(np.float32) * fade_out + w2_start = w2[:fade_len].astype(np.float32) * fade_in + middle = (w1_end + w2_start).astype(np.int16) + + return np.concatenate([w1[:-fade_len], middle, w2[fade_len:]]) + + @staticmethod + def _play_via_system(wave: np.ndarray) -> None: + with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as f: + fname = f.name + + try: + with wavmod.open(fname, "wb") as w: + w.setnchannels(1) + w.setsampwidth(2) + w.setframerate(Sound.fs) + w.writeframes(wave.tobytes()) + + def run(cmd: list[str]) -> bool: + return ( + subprocess.run( + cmd, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + check=False, + ).returncode + == 0 + ) + + if shutil.which("pw-play") and run(["pw-play", fname]): + return + if shutil.which("paplay") and run(["paplay", fname]): + return + if shutil.which("aplay") and run(["aplay", "-q", fname]): + return + if shutil.which("ffplay") and run(["ffplay", "-autoexit", "-nodisp", fname]): + return + + play_obj = sa.play_buffer(wave, 1, 2, Sound.fs) + play_obj.wait_done() + + finally: + try: + os.unlink(fname) + except Exception: + pass + + @staticmethod + def _play(wave: np.ndarray) -> None: + backend = os.getenv("INFINITO_AUDIO_BACKEND", "auto").lower() + + if backend == "system": + Sound._play_via_system(wave) + return + + if backend == "simpleaudio": + play_obj = sa.play_buffer(wave, 1, 2, Sound.fs) + play_obj.wait_done() + return + + # auto + try: + play_obj = sa.play_buffer(wave, 1, 2, Sound.fs) + play_obj.wait_done() + except Exception: + Sound._play_via_system(wave) + + @classmethod + def play_infinito_intro_sound(cls) -> None: + build_time = 10.0 + celebr_time = 12.0 + breakdown_time = 10.0 + overlap = 3.0 + + bass_seg = 0.125 + melody_seg = 0.25 + bass_freq = 65.41 + melody_freqs = [261.63, 293.66, 329.63, 392.00, 440.00, 523.25] + + steps = int(build_time / (bass_seg + melody_seg)) + build_seq: list[np.ndarray] = [] + + for i in range(steps): + amp = (i + 1) / steps + b = cls._generate_complex_wave(bass_freq, bass_seg).astype(np.float32) * amp + m = cls._generate_complex_wave( + melody_freqs[i % len(melody_freqs)], melody_seg + ).astype(np.float32) * amp + build_seq.append(b.astype(np.int16)) + build_seq.append(m.astype(np.int16)) + + build_wave = np.concatenate(build_seq) + + roots = [523.25, 349.23, 233.08, 155.56, 103.83, 69.30, 46.25] + chord_time = celebr_time / len(roots) + celebr_seq: list[np.ndarray] = [] + + for root in roots: + t = np.linspace(0, chord_time, int(cls.fs * chord_time), False) + chord = sum(np.sin(2 * np.pi * f * t) for f in [root, root * 5 / 4, root * 3 / 2]) + chord /= np.max(np.abs(chord)) + celebr_seq.append((chord * (2**15 - 1)).astype(np.int16)) + + celebr_wave = np.concatenate(celebr_seq) + breakdown_wave = np.concatenate(list(reversed(build_seq))) + + fade_samples = int(overlap * cls.fs) + bc = cls._crossfade(build_wave, celebr_wave, fade_samples) + full = cls._crossfade(bc, breakdown_wave, fade_samples) + + cls._play(full) + + @classmethod + def play_start_sound(cls) -> None: + freqs = [523.25, 659.26, 783.99, 880.00, 1046.50, 1174.66] + cls._prepare_and_play(freqs) + + @classmethod + def play_finished_successfully_sound(cls) -> None: + freqs = [523.25, 587.33, 659.26, 783.99, 880.00, 987.77] + cls._prepare_and_play(freqs) + + @classmethod + def play_finished_failed_sound(cls) -> None: + freqs = [880.00, 830.61, 783.99, 659.26, 622.25, 523.25] + durations = [0.4, 0.3, 0.25, 0.25, 0.25, 0.25] + cls._prepare_and_play(freqs, durations) + + @classmethod + def play_warning_sound(cls) -> None: + freqs = [700.00, 550.00, 750.00, 500.00, 800.00, 450.00] + cls._prepare_and_play(freqs) + + @classmethod + def _prepare_and_play( + cls, freqs: list[float], durations: list[float] | None = None + ) -> None: + count = len(freqs) + + if durations is None: + durations = [cls.max_length / count] * count + else: + total = sum(durations) + durations = [d * cls.max_length / total for d in durations] + + waves = [cls._generate_complex_wave(f, d) for f, d in zip(freqs, durations)] + cls._play(np.concatenate(waves)) + +except ImportError as exc: + # Do NOT warn at import time β€” this module is used in many unit tests / subprocess calls. + # Warn only when a sound method is actually invoked. + _SOUND_DISABLED_REASON = str(exc) + Sound = DummySound diff --git a/build/lib/module_utils/valid_deploy_id.py b/build/lib/module_utils/valid_deploy_id.py new file mode 100644 index 00000000..9cbe8861 --- /dev/null +++ b/build/lib/module_utils/valid_deploy_id.py @@ -0,0 +1,89 @@ +# File: module_utils/valid_deploy_id.py +""" +Utility for validating deployment application IDs against defined roles and inventory. +""" +import os +import yaml +import glob +import configparser + +from filter_plugins.get_all_application_ids import get_all_application_ids + +class ValidDeployId: + def __init__(self, roles_dir='roles'): + # Load all known application IDs from roles + self.valid_ids = set(get_all_application_ids(roles_dir)) + + def validate(self, inventory_path, ids): + """ + Validate a list of application IDs against both role definitions and inventory. + Returns a dict mapping invalid IDs to their presence status. + Example: + { + "app1": {"in_roles": False, "in_inventory": True}, + "app2": {"in_roles": True, "in_inventory": False} + } + """ + invalid = {} + for app_id in ids: + in_roles = app_id in self.valid_ids + in_inventory = self._exists_in_inventory(inventory_path, app_id) + if not (in_roles and in_inventory): + invalid[app_id] = { + 'in_roles': in_roles, + 'in_inventory': in_inventory + } + return invalid + + def _exists_in_inventory(self, inventory_path, app_id): + _, ext = os.path.splitext(inventory_path) + if ext in ('.yml', '.yaml'): + return self._search_yaml_keys(inventory_path, app_id) + else: + return self._search_ini_sections(inventory_path, app_id) + + def _search_ini_sections(self, inventory_path, app_id): + """ + Manually parse INI inventory for sections and host lists. + Returns True if app_id matches a section name or a host in a section. + """ + present = False + with open(inventory_path, 'r', encoding='utf-8') as f: + current_section = None + for raw in f: + line = raw.strip() + # Skip blanks and comments + if not line or line.startswith(('#', ';')): + continue + # Section header + if line.startswith('[') and line.endswith(']'): + current_section = line[1:-1].strip() + if current_section == app_id: + return True + continue + # Host or variable line under a section + if current_section: + # Split on commas or whitespace + for part in [p.strip() for p in line.replace(',', ' ').split()]: + if part == app_id: + return True + return False + + def _search_yaml_keys(self, inventory_path, app_id): + with open(inventory_path, 'r', encoding='utf-8') as f: + data = yaml.safe_load(f) + return self._find_key(data, app_id) + + def _find_key(self, node, key): # recursive search + if isinstance(node, dict): + for k, v in node.items(): + # If key matches and maps to a dict or list, consider it present + if k == key and isinstance(v, (dict, list)): + return True + if self._find_key(v, key): + return True + elif isinstance(node, list): + for item in node: + if self._find_key(item, key): + return True + return False diff --git a/scripts/tests/code.sh b/scripts/tests/code.sh index 1c870d45..de034345 100755 --- a/scripts/tests/code.sh +++ b/scripts/tests/code.sh @@ -20,5 +20,6 @@ docker run --rm \ echo "PYTHON=${PYTHON}" export PATH="$(dirname "$PYTHON"):$PATH" # Ensure we really use the exported interpreter (and thus the global venv) + make setup "${PYTHON}" -m unittest discover -s tests/${TEST_TYPE} -t . -p "${TEST_PATTERN}" '