Compare commits

..

19 Commits

Author SHA1 Message Date
8608d89653 Implemented correct template for collabora 2025-08-20 09:07:33 +02:00
a4f39ac732 Renamed webserver roles to more speakable names 2025-08-20 08:54:17 +02:00
9cfb8f3a60 Different optimations for collabora 2025-08-20 08:34:12 +02:00
3e5344a46c Optimized Collabora CSP for Nextcloud 2025-08-20 07:03:02 +02:00
ec07d1a20b Added logic to start docker compose pull just once per directory 2025-08-20 07:02:27 +02:00
594d9417d1 handlers(docker): add once-per-directory docker compose pull with lockfile
- Introduced a new handler 'docker compose pull' that runs only once per
  {{ docker_compose.directories.instance }} directory by using a lock
  file under /run/ansible/compose-pull.
- Ensures idempotency by marking the task as changed only when a pull
  was actually executed.
- Restricted execution with 'when: MODE_UPDATE | bool'.
- Improves update workflow by avoiding redundant docker pulls during
  the same Ansible run.

Reference: ChatGPT discussion
https://chatgpt.com/share/68a55151-959c-800f-8b70-160ffe43e776
2025-08-20 06:42:49 +02:00
dc125e4843 Solved path bug 2025-08-20 06:18:52 +02:00
39a54294dd Moved update commands to nextcloud role 2025-08-20 06:07:33 +02:00
a57fe718de Optimized spacinbg 2025-08-20 05:49:35 +02:00
b6aec5fe33 Optimized features 2025-08-20 05:39:49 +02:00
de07d890dc Solvewd 'sys-ctl-bkp-docker-2-loc' bug 2025-08-20 05:25:24 +02:00
e27f355697 Solvewd tabulator bug 2025-08-20 05:02:16 +02:00
790762d397 Renamed some web apps to web servicesy 2025-08-20 05:00:24 +02:00
4ce681e643 Add integration test: ensure roles including 'sys-service' define system_service_id
This test scans all roles for tasks including:
  - include_role:
      name: sys-service

If present, the role must define a non-empty 'system_service_id' in vars/main.yml.
Helps enforce consistency and prevent misconfiguration.

Ref: https://chatgpt.com/share/68a536e5-c384-800f-937a-f9d91249950c
2025-08-20 04:46:27 +02:00
55cf3d0d8e Solved unit performance tests 2025-08-20 04:35:46 +02:00
2708b67751 Optimized webserver on failure 2025-08-20 04:12:42 +02:00
f477ee3731 Deactivated redis, moved version to correct place for web-svc-collabora 2025-08-20 03:40:37 +02:00
6d70f78989 fix(domain-filters): support dependency expansion via seed param
- Added missing 'Iterable' import in 'canonical_domains_map' to avoid NameError.
- Introduced 'seed' parameter so the filter can start traversal from current play apps
  while still emitting canonical domains for discovered dependencies (e.g. web-svc-collabora).
- Updated 01_constructor.yml to pass full 'applications' and a clean 'seed' list
  (using dict2items → key) instead of '.keys()' method calls, fixing integration
  test error: 'reference to application keys is invalid'.

This resolves issues where collabora domains were missing and integration tests failed.

Ref: https://chatgpt.com/share/68a51f9b-3924-800f-a41b-803d8dd10397
2025-08-20 03:07:14 +02:00
b867a52471 Refactor and extend role dependency resolution:
- Introduced module_utils/role_dependency_resolver.py with full support for include_role, import_role, meta dependencies, and run_after.
- Refactored cli/build/tree.py to use RoleDependencyResolver (added toggles for include/import/dependencies/run_after).
- Extended filter_plugins/canonical_domains_map.py with optional 'recursive' mode (ignores run_after by design).
- Updated roles/web-app-nextcloud to properly include Collabora dependency.
- Added comprehensive unittests under tests/unit/module_utils for RoleDependencyResolver.

Ref: https://chatgpt.com/share/68a519c8-8e54-800f-83c0-be38546620d9
2025-08-20 02:42:07 +02:00
192 changed files with 1299 additions and 540 deletions

View File

@@ -2,174 +2,45 @@
import os import os
import argparse import argparse
import json import json
import fnmatch
import re
from typing import Dict, Any from typing import Dict, Any
import yaml
from cli.build.graph import build_mappings, output_graph from cli.build.graph import build_mappings, output_graph
from module_utils.role_dependency_resolver import RoleDependencyResolver
def find_roles(roles_dir: str): def find_roles(roles_dir: str):
"""Yield (role_name, role_path) for every subfolder in roles_dir."""
for entry in os.listdir(roles_dir): for entry in os.listdir(roles_dir):
path = os.path.join(roles_dir, entry) path = os.path.join(roles_dir, entry)
if os.path.isdir(path): if os.path.isdir(path):
yield entry, path yield entry, path
def _is_pure_jinja_var(s: str) -> bool:
"""Check if string is exactly a single {{ var }} expression."""
return bool(re.fullmatch(r"\s*\{\{\s*[^}]+\s*\}\}\s*", s))
def _jinja_to_glob(s: str) -> str:
"""Convert Jinja placeholders {{ ... }} into * for fnmatch."""
pattern = re.sub(r"\{\{[^}]+\}\}", "*", s)
pattern = re.sub(r"\*{2,}", "*", pattern)
return pattern.strip()
def _list_role_dirs(roles_dir: str) -> list[str]:
"""Return a list of role directory names inside roles_dir."""
return [
d for d in os.listdir(roles_dir)
if os.path.isdir(os.path.join(roles_dir, d))
]
def find_include_role_dependencies(role_path: str, roles_dir: str) -> set[str]:
"""
Scan all tasks/*.yml(.yaml) files of a role and collect include_role dependencies.
Rules:
- loop/with_items with literal strings -> add those as roles
- name contains jinja AND surrounding literals -> convert to glob and match existing roles
- name is a pure jinja variable only -> ignore
- name is a pure literal -> add as-is
"""
deps: set[str] = set()
tasks_dir = os.path.join(role_path, "tasks")
if not os.path.isdir(tasks_dir):
return deps
candidates = []
for root, _, files in os.walk(tasks_dir):
for f in files:
if f.endswith(".yml") or f.endswith(".yaml"):
candidates.append(os.path.join(root, f))
all_roles = _list_role_dirs(roles_dir)
def add_literal_loop_items(loop_val):
if isinstance(loop_val, list):
for item in loop_val:
if isinstance(item, str) and item.strip():
deps.add(item.strip())
for file_path in candidates:
try:
with open(file_path, "r", encoding="utf-8") as f:
docs = list(yaml.safe_load_all(f))
except Exception:
# Be tolerant to any parsing issues; skip unreadable files
continue
for doc in docs:
if not isinstance(doc, list):
continue
for task in doc:
if not isinstance(task, dict):
continue
if "include_role" not in task:
continue
inc = task.get("include_role")
if not isinstance(inc, dict):
continue
name = inc.get("name")
if not isinstance(name, str) or not name.strip():
continue
# 1) Handle loop/with_items
loop_val = task.get("loop", task.get("with_items"))
if loop_val is not None:
add_literal_loop_items(loop_val)
# still check name for surrounding literals
if not _is_pure_jinja_var(name):
pattern = (
_jinja_to_glob(name)
if ("{{" in name and "}}" in name)
else name
)
if "*" in pattern:
for r in all_roles:
if fnmatch.fnmatch(r, pattern):
deps.add(r)
continue
# 2) No loop: evaluate name
if "{{" in name and "}}" in name:
if _is_pure_jinja_var(name):
continue # ignore pure variable
pattern = _jinja_to_glob(name)
if "*" in pattern:
for r in all_roles:
if fnmatch.fnmatch(r, pattern):
deps.add(r)
continue
else:
deps.add(pattern)
else:
# pure literal
deps.add(name.strip())
return deps
def main(): def main():
# default roles dir is ../../roles relative to this script
script_dir = os.path.dirname(os.path.abspath(__file__)) script_dir = os.path.dirname(os.path.abspath(__file__))
default_roles_dir = os.path.abspath( default_roles_dir = os.path.abspath(os.path.join(script_dir, "..", "..", "roles"))
os.path.join(script_dir, "..", "..", "roles")
)
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description="Generate all graphs for each role and write meta/tree.json" description="Generate all graphs for each role and write meta/tree.json"
) )
parser.add_argument( parser.add_argument("-d", "--role_dir", default=default_roles_dir,
"-d", "--role_dir", help=f"Path to roles directory (default: {default_roles_dir})")
default=default_roles_dir, parser.add_argument("-D", "--depth", type=int, default=0,
help=f"Path to roles directory (default: {default_roles_dir})" help="Max recursion depth (>0) or <=0 to stop on cycle")
) parser.add_argument("-o", "--output", choices=["yaml", "json", "console"],
parser.add_argument( default="json", help="Output format")
"-D", "--depth", parser.add_argument("-p", "--preview", action="store_true",
type=int, help="Preview graphs to console instead of writing files")
default=0, parser.add_argument("-s", "--shadow-folder", type=str, default=None,
help="Max recursion depth (>0) or <=0 to stop on cycle" help="If set, writes tree.json to this shadow folder instead of the role's actual meta/ folder")
) parser.add_argument("-v", "--verbose", action="store_true", help="Enable verbose logging")
parser.add_argument(
"-o", "--output", # Toggles
choices=["yaml", "json", "console"], parser.add_argument("--no-include-role", action="store_true", help="Do not scan include_role")
default="json", parser.add_argument("--no-import-role", action="store_true", help="Do not scan import_role")
help="Output format" parser.add_argument("--no-dependencies", action="store_true", help="Do not read meta/main.yml dependencies")
) parser.add_argument("--no-run-after", action="store_true",
parser.add_argument( help="Do not read galaxy_info.run_after from meta/main.yml")
"-p", "--preview",
action="store_true",
help="Preview graphs to console instead of writing files"
)
parser.add_argument(
"-s", "--shadow-folder",
type=str,
default=None,
help="If set, writes tree.json to this shadow folder instead of the role's actual meta/ folder"
)
parser.add_argument(
"-v", "--verbose",
action="store_true",
help="Enable verbose logging"
)
args = parser.parse_args() args = parser.parse_args()
if args.verbose: if args.verbose:
@@ -179,6 +50,8 @@ def main():
print(f"Preview mode: {args.preview}") print(f"Preview mode: {args.preview}")
print(f"Shadow folder: {args.shadow_folder}") print(f"Shadow folder: {args.shadow_folder}")
resolver = RoleDependencyResolver(args.role_dir)
for role_name, role_path in find_roles(args.role_dir): for role_name, role_path in find_roles(args.role_dir):
if args.verbose: if args.verbose:
print(f"Processing role: {role_name}") print(f"Processing role: {role_name}")
@@ -189,13 +62,26 @@ def main():
max_depth=args.depth max_depth=args.depth
) )
# add include_role dependencies from tasks # Direct deps (depth=1) getrennt erfasst für buckets
include_deps = find_include_role_dependencies(role_path, args.role_dir) inc_roles, imp_roles = resolver._scan_tasks(role_path)
if include_deps: meta_deps = resolver._extract_meta_dependencies(role_path)
run_after = set()
if not args.no_run_after:
run_after = resolver._extract_meta_run_after(role_path)
if any([not args.no_include_role and inc_roles,
not args.no_import_role and imp_roles,
not args.no_dependencies and meta_deps,
not args.no_run_after and run_after]):
deps_root = graphs.setdefault("dependencies", {}) deps_root = graphs.setdefault("dependencies", {})
inc_list = set(deps_root.get("include_role", [])) if not args.no_include_role and inc_roles:
inc_list.update(include_deps) deps_root["include_role"] = sorted(inc_roles)
deps_root["include_role"] = sorted(inc_list) if not args.no_import_role and imp_roles:
deps_root["import_role"] = sorted(imp_roles)
if not args.no_dependencies and meta_deps:
deps_root["dependencies"] = sorted(meta_deps)
if not args.no_run_after and run_after:
deps_root["run_after"] = sorted(run_after)
graphs["dependencies"] = deps_root graphs["dependencies"] = deps_root
if args.preview: if args.preview:
@@ -205,13 +91,11 @@ def main():
output_graph(data, "console", role_name, key) output_graph(data, "console", role_name, key)
else: else:
if args.shadow_folder: if args.shadow_folder:
tree_file = os.path.join( tree_file = os.path.join(args.shadow_folder, role_name, "meta", "tree.json")
args.shadow_folder, role_name, "meta", "tree.json"
)
else: else:
tree_file = os.path.join(role_path, "meta", "tree.json") tree_file = os.path.join(role_path, "meta", "tree.json")
os.makedirs(os.path.dirname(tree_file), exist_ok=True) os.makedirs(os.path.dirname(tree_file), exist_ok=True)
with open(tree_file, "w") as f: with open(tree_file, "w", encoding="utf-8") as f:
json.dump(graphs, f, indent=2) json.dump(graphs, f, indent=2)
print(f"Wrote {tree_file}") print(f"Wrote {tree_file}")

View File

@@ -4,31 +4,71 @@ import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from module_utils.entity_name_utils import get_entity_name from module_utils.entity_name_utils import get_entity_name
from module_utils.role_dependency_resolver import RoleDependencyResolver
from typing import Iterable
class FilterModule(object): class FilterModule(object):
def filters(self): def filters(self):
return {'canonical_domains_map': self.canonical_domains_map} return {'canonical_domains_map': self.canonical_domains_map}
def canonical_domains_map(self, apps, PRIMARY_DOMAIN): def canonical_domains_map(
self,
apps,
PRIMARY_DOMAIN,
*,
recursive: bool = False,
roles_base_dir: str | None = None,
seed: Iterable[str] | None = None,
):
""" """
Maps applications to their canonical domains, checking for conflicts Build { app_id: [canonical domains...] }.
and ensuring all domains are valid and unique across applications.
Rekursiv werden nur include_role, import_role und meta/main.yml:dependencies verfolgt.
'run_after' wird hier absichtlich ignoriert.
""" """
if not isinstance(apps, dict):
raise AnsibleFilterError(f"'apps' must be a dict, got {type(apps).__name__}")
app_keys = set(apps.keys())
seed_keys = set(seed) if seed is not None else app_keys
if recursive:
roles_base_dir = roles_base_dir or os.path.join(os.getcwd(), "roles")
if not os.path.isdir(roles_base_dir):
raise AnsibleFilterError(
f"roles_base_dir '{roles_base_dir}' not found or not a directory."
)
resolver = RoleDependencyResolver(roles_base_dir)
discovered_roles = resolver.resolve_transitively(
start_roles=seed_keys,
resolve_include_role=True,
resolve_import_role=True,
resolve_dependencies=True,
resolve_run_after=False,
max_depth=None,
)
# all discovered roles that actually have config entries in `apps`
target_apps = discovered_roles & app_keys
else:
target_apps = seed_keys
result = {} result = {}
seen_domains = {} seen_domains = {}
for app_id, cfg in apps.items(): for app_id in sorted(target_apps):
if app_id.startswith(( cfg = apps.get(app_id)
"web-", if cfg is None:
"svc-db-" # Database services can also be exposed to the internet. It is just listening to the port, but the domain is used for port mapping continue
)): if not str(app_id).startswith(("web-", "svc-db-")):
continue
if not isinstance(cfg, dict): if not isinstance(cfg, dict):
raise AnsibleFilterError( raise AnsibleFilterError(
f"Invalid configuration for application '{app_id}': " f"Invalid configuration for application '{app_id}': expected dict, got {cfg!r}"
f"expected a dict, got {cfg!r}"
) )
domains_cfg = cfg.get('server',{}).get('domains',{}) domains_cfg = cfg.get('server', {}).get('domains', {})
if not domains_cfg or 'canonical' not in domains_cfg: if not domains_cfg or 'canonical' not in domains_cfg:
self._add_default_domain(app_id, PRIMARY_DOMAIN, seen_domains, result) self._add_default_domain(app_id, PRIMARY_DOMAIN, seen_domains, result)
continue continue
@@ -39,10 +79,6 @@ class FilterModule(object):
return result return result
def _add_default_domain(self, app_id, PRIMARY_DOMAIN, seen_domains, result): def _add_default_domain(self, app_id, PRIMARY_DOMAIN, seen_domains, result):
"""
Add the default domain for an application if no canonical domains are defined.
Ensures the domain is unique across applications.
"""
entity_name = get_entity_name(app_id) entity_name = get_entity_name(app_id)
default_domain = f"{entity_name}.{PRIMARY_DOMAIN}" default_domain = f"{entity_name}.{PRIMARY_DOMAIN}"
if default_domain in seen_domains: if default_domain in seen_domains:
@@ -54,40 +90,21 @@ class FilterModule(object):
result[app_id] = [default_domain] result[app_id] = [default_domain]
def _process_canonical_domains(self, app_id, canonical_domains, seen_domains, result): def _process_canonical_domains(self, app_id, canonical_domains, seen_domains, result):
"""
Process the canonical domains for an application, handling both lists and dicts,
and ensuring each domain is unique.
"""
if isinstance(canonical_domains, dict): if isinstance(canonical_domains, dict):
self._process_canonical_domains_dict(app_id, canonical_domains, seen_domains, result) for _, domain in canonical_domains.items():
self._validate_and_check_domain(app_id, domain, seen_domains)
result[app_id] = canonical_domains.copy()
elif isinstance(canonical_domains, list): elif isinstance(canonical_domains, list):
self._process_canonical_domains_list(app_id, canonical_domains, seen_domains, result) for domain in canonical_domains:
self._validate_and_check_domain(app_id, domain, seen_domains)
result[app_id] = list(canonical_domains)
else: else:
raise AnsibleFilterError( raise AnsibleFilterError(
f"Unexpected type for 'server.domains.canonical' in application '{app_id}': " f"Unexpected type for 'server.domains.canonical' in application '{app_id}': "
f"{type(canonical_domains).__name__}" f"{type(canonical_domains).__name__}"
) )
def _process_canonical_domains_dict(self, app_id, domains_dict, seen_domains, result):
"""
Process a dictionary of canonical domains for an application.
"""
for name, domain in domains_dict.items():
self._validate_and_check_domain(app_id, domain, seen_domains)
result[app_id] = domains_dict.copy()
def _process_canonical_domains_list(self, app_id, domains_list, seen_domains, result):
"""
Process a list of canonical domains for an application.
"""
for domain in domains_list:
self._validate_and_check_domain(app_id, domain, seen_domains)
result[app_id] = list(domains_list)
def _validate_and_check_domain(self, app_id, domain, seen_domains): def _validate_and_check_domain(self, app_id, domain, seen_domains):
"""
Validate the domain and check if it has already been assigned to another application.
"""
if not isinstance(domain, str) or not domain.strip(): if not isinstance(domain, str) or not domain.strip():
raise AnsibleFilterError( raise AnsibleFilterError(
f"Invalid domain entry in 'canonical' for application '{app_id}': {domain!r}" f"Invalid domain entry in 'canonical' for application '{app_id}': {domain!r}"

View File

@@ -6,3 +6,4 @@ PATH_SYSTEMCTL_SCRIPTS: "{{ [ PATH_ADMINISTRATOR_SCRIPTS, 'systemctl' ]
PATH_DOCKER_COMPOSE_INSTANCES: "/opt/docker/" PATH_DOCKER_COMPOSE_INSTANCES: "/opt/docker/"
PATH_SYSTEM_LOCK_SCRIPT: "/opt/scripts/sys-lock.py" PATH_SYSTEM_LOCK_SCRIPT: "/opt/scripts/sys-lock.py"
PATH_SYSTEM_SERVICE_DIR: "/etc/systemd/system" PATH_SYSTEM_SERVICE_DIR: "/etc/systemd/system"
PATH_DOCKER_COMPOSE_PULL_LOCK_DIR: "/run/ansible/compose-pull/"

View File

@@ -88,7 +88,7 @@ defaults_networks:
subnet: 192.168.103.96/28 subnet: 192.168.103.96/28
web-svc-simpleicons: web-svc-simpleicons:
subnet: 192.168.103.112/28 subnet: 192.168.103.112/28
web-app-libretranslate: web-svc-libretranslate:
subnet: 192.168.103.128/28 subnet: 192.168.103.128/28
web-app-pretix: web-app-pretix:
subnet: 192.168.103.144/28 subnet: 192.168.103.144/28

View File

@@ -66,7 +66,7 @@ ports:
web-svc-collabora: 8042 web-svc-collabora: 8042
web-app-mobilizon: 8043 web-app-mobilizon: 8043
web-svc-simpleicons: 8044 web-svc-simpleicons: 8044
web-app-libretranslate: 8045 web-svc-libretranslate: 8045
web-app-pretix: 8046 web-app-pretix: 8046
web-app-mig: 8047 web-app-mig: 8047
web-svc-logout: 8048 web-svc-logout: 8048

View File

@@ -0,0 +1,296 @@
import os
import fnmatch
import re
from typing import Dict, Set, Iterable, Tuple, Optional
import yaml
class RoleDependencyResolver:
_RE_PURE_JINJA = re.compile(r"\s*\{\{\s*[^}]+\s*\}\}\s*$")
def __init__(self, roles_dir: str):
self.roles_dir = roles_dir
# -------------------------- public API --------------------------
def resolve_transitively(
self,
start_roles: Iterable[str],
*,
resolve_include_role: bool = True,
resolve_import_role: bool = True,
resolve_dependencies: bool = True,
resolve_run_after: bool = False,
max_depth: Optional[int] = None,
) -> Set[str]:
to_visit = list(dict.fromkeys(start_roles))
visited: Set[str] = set()
depth: Dict[str, int] = {}
for r in to_visit:
depth[r] = 0
while to_visit:
role = to_visit.pop()
cur_d = depth.get(role, 0)
if role in visited:
continue
visited.add(role)
if max_depth is not None and cur_d >= max_depth:
continue
for dep in self.get_role_dependencies(
role,
resolve_include_role=resolve_include_role,
resolve_import_role=resolve_import_role,
resolve_dependencies=resolve_dependencies,
resolve_run_after=resolve_run_after,
):
if dep not in visited:
to_visit.append(dep)
depth[dep] = cur_d + 1
return visited
def get_role_dependencies(
self,
role_name: str,
*,
resolve_include_role: bool = True,
resolve_import_role: bool = True,
resolve_dependencies: bool = True,
resolve_run_after: bool = False,
) -> Set[str]:
role_path = os.path.join(self.roles_dir, role_name)
if not os.path.isdir(role_path):
return set()
deps: Set[str] = set()
if resolve_include_role or resolve_import_role:
includes, imports = self._scan_tasks(role_path)
if resolve_include_role:
deps |= includes
if resolve_import_role:
deps |= imports
if resolve_dependencies:
deps |= self._extract_meta_dependencies(role_path)
if resolve_run_after:
deps |= self._extract_meta_run_after(role_path)
return deps
# -------------------------- scanning helpers --------------------------
def _scan_tasks(self, role_path: str) -> Tuple[Set[str], Set[str]]:
tasks_dir = os.path.join(role_path, "tasks")
include_roles: Set[str] = set()
import_roles: Set[str] = set()
if not os.path.isdir(tasks_dir):
return include_roles, import_roles
all_roles = self._list_role_dirs(self.roles_dir)
candidates = []
for root, _, files in os.walk(tasks_dir):
for f in files:
if f.endswith(".yml") or f.endswith(".yaml"):
candidates.append(os.path.join(root, f))
for file_path in candidates:
try:
with open(file_path, "r", encoding="utf-8") as f:
docs = list(yaml.safe_load_all(f))
except Exception:
inc, imp = self._tolerant_scan_file(file_path, all_roles)
include_roles |= inc
import_roles |= imp
continue
for doc in docs or []:
if not isinstance(doc, list):
continue
for task in doc:
if not isinstance(task, dict):
continue
if "include_role" in task:
include_roles |= self._extract_from_task(task, "include_role", all_roles)
if "import_role" in task:
import_roles |= self._extract_from_task(task, "import_role", all_roles)
return include_roles, import_roles
def _extract_from_task(self, task: dict, key: str, all_roles: Iterable[str]) -> Set[str]:
roles: Set[str] = set()
spec = task.get(key)
if not isinstance(spec, dict):
return roles
name = spec.get("name")
loop_val = self._collect_loop_values(task)
if loop_val is not None:
for item in self._iter_flat(loop_val):
cand = self._role_from_loop_item(item, name_template=name)
if cand:
roles.add(cand)
if isinstance(name, str) and name.strip() and not self._is_pure_jinja_var(name):
pattern = self._jinja_to_glob(name) if ("{{" in name and "}}" in name) else name
self._match_glob_into(pattern, all_roles, roles)
return roles
if isinstance(name, str) and name.strip():
if "{{" in name and "}}" in name:
if self._is_pure_jinja_var(name):
return roles
pattern = self._jinja_to_glob(name)
self._match_glob_into(pattern, all_roles, roles)
else:
roles.add(name.strip())
return roles
def _collect_loop_values(self, task: dict):
for k in ("loop", "with_items", "with_list", "with_flattened"):
if k in task:
return task[k]
return None
def _iter_flat(self, value):
if isinstance(value, list):
for v in value:
if isinstance(v, list):
for x in v:
yield x
else:
yield v
def _role_from_loop_item(self, item, name_template=None) -> Optional[str]:
tmpl = (name_template or "").strip() if isinstance(name_template, str) else ""
if isinstance(item, str):
if tmpl in ("{{ item }}", "{{item}}") or not tmpl or "item" in tmpl:
return item.strip()
return None
if isinstance(item, dict):
for k in ("role", "name"):
v = item.get(k)
if isinstance(v, str) and v.strip():
if tmpl in (f"{{{{ item.{k} }}}}", f"{{{{item.{k}}}}}") or not tmpl or "item" in tmpl:
return v.strip()
return None
def _match_glob_into(self, pattern: str, all_roles: Iterable[str], out: Set[str]):
if "*" in pattern or "?" in pattern or "[" in pattern:
for r in all_roles:
if fnmatch.fnmatch(r, pattern):
out.add(r)
else:
out.add(pattern)
def test_jinja_mixed_name_glob_matching(self):
"""
include_role:
name: "prefix-{{ item }}-suffix"
loop: [x, y]
Existing roles: prefix-x-suffix, prefix-y-suffix, prefix-z-suffix
Expectation:
- NO raw loop items ('x', 'y') end up as roles
- Glob matching resolves to all three concrete roles
"""
make_role(self.roles_dir, "A")
for rn in ["prefix-x-suffix", "prefix-y-suffix", "prefix-z-suffix"]:
make_role(self.roles_dir, rn)
write(
os.path.join(self.roles_dir, "A", "tasks", "main.yml"),
"""
- name: jinja-mixed glob
include_role:
name: "prefix-{{ item }}-suffix"
loop:
- x
- y
"""
)
r = RoleDependencyResolver(self.roles_dir)
deps = r.get_role_dependencies("A")
# ensure no raw loop items leak into the results
self.assertNotIn("x", deps)
self.assertNotIn("y", deps)
# only the resolved role names should be present
self.assertEqual(
deps,
{"prefix-x-suffix", "prefix-y-suffix", "prefix-z-suffix"},
)
# -------------------------- meta helpers --------------------------
def _extract_meta_dependencies(self, role_path: str) -> Set[str]:
deps: Set[str] = set()
meta_main = os.path.join(role_path, "meta", "main.yml")
if not os.path.isfile(meta_main):
return deps
try:
with open(meta_main, "r", encoding="utf-8") as f:
meta = yaml.safe_load(f) or {}
raw_deps = meta.get("dependencies", [])
if isinstance(raw_deps, list):
for item in raw_deps:
if isinstance(item, str):
deps.add(item.strip())
elif isinstance(item, dict):
r = item.get("role")
if isinstance(r, str) and r.strip():
deps.add(r.strip())
except Exception:
pass
return deps
def _extract_meta_run_after(self, role_path: str) -> Set[str]:
deps: Set[str] = set()
meta_main = os.path.join(role_path, "meta", "main.yml")
if not os.path.isfile(meta_main):
return deps
try:
with open(meta_main, "r", encoding="utf-8") as f:
meta = yaml.safe_load(f) or {}
galaxy_info = meta.get("galaxy_info", {})
run_after = galaxy_info.get("run_after", [])
if isinstance(run_after, list):
for item in run_after:
if isinstance(item, str) and item.strip():
deps.add(item.strip())
except Exception:
pass
return deps
# -------------------------- small utils --------------------------
def _list_role_dirs(self, roles_dir: str) -> list[str]:
return [
d for d in os.listdir(roles_dir)
if os.path.isdir(os.path.join(roles_dir, d))
]
@classmethod
def _is_pure_jinja_var(cls, s: str) -> bool:
return bool(cls._RE_PURE_JINJA.fullmatch(s or ""))
@staticmethod
def _jinja_to_glob(s: str) -> str:
pattern = re.sub(r"\{\{[^}]+\}\}", "*", s or "")
pattern = re.sub(r"\*{2,}", "*", pattern)
return pattern.strip()

View File

@@ -8,4 +8,4 @@ This role builds on `cmp-db-docker` by adding a reverse-proxy frontend for HTTP
Leverages the `cmp-db-docker` role to stand up your containerized database (PostgreSQL, MariaDB, etc.) with backups and user management. Leverages the `cmp-db-docker` role to stand up your containerized database (PostgreSQL, MariaDB, etc.) with backups and user management.
- **Reverse Proxy** - **Reverse Proxy**
Includes the `srv-proxy-6-6-domain` role to configure a proxy (e.g. nginx) for routing HTTP(S) traffic to your database UI or management endpoint. Includes the `srv-domain-provision` role to configure a proxy (e.g. nginx) for routing HTTP(S) traffic to your database UI or management endpoint.

View File

@@ -1,7 +1,7 @@
galaxy_info: galaxy_info:
author: "Kevin Veen-Birkenbach" author: "Kevin Veen-Birkenbach"
description: > description: >
Extends cmp-db-docker by adding an HTTP reverse proxy via srv-proxy-6-6-domain. Extends cmp-db-docker by adding an HTTP reverse proxy via srv-domain-provision.
company: | company: |
Kevin Veen-Birkenbach Kevin Veen-Birkenbach
Consulting & Coaching Solutions Consulting & Coaching Solutions

View File

@@ -8,9 +8,9 @@
include_role: include_role:
name: cmp-db-docker name: cmp-db-docker
- name: "For '{{ application_id }}': include role srv-proxy-6-6-domain" - name: "For '{{ application_id }}': include role srv-domain-provision"
include_role: include_role:
name: srv-proxy-6-6-domain name: srv-domain-provision
vars: vars:
domain: "{{ domains | get_domain(application_id) }}" domain: "{{ domains | get_domain(application_id) }}"
http_port: "{{ ports.localhost.http[application_id] }}" http_port: "{{ ports.localhost.http[application_id] }}"

View File

@@ -8,4 +8,4 @@ This role combines the standard Docker Compose setup with a reverse-proxy for an
Brings up containers, networks, and volumes via the `docker-compose` role. Brings up containers, networks, and volumes via the `docker-compose` role.
- **Reverse Proxy** - **Reverse Proxy**
Uses the `srv-proxy-6-6-domain` role to expose your application under a custom domain and port. Uses the `srv-domain-provision` role to expose your application under a custom domain and port.

View File

@@ -1,7 +1,7 @@
galaxy_info: galaxy_info:
author: "Kevin Veen-Birkenbach" author: "Kevin Veen-Birkenbach"
description: > description: >
Combines the docker-compose role with srv-proxy-6-6-domain to Combines the docker-compose role with srv-domain-provision to
deploy applications behind a reverse proxy. deploy applications behind a reverse proxy.
company: | company: |
Kevin Veen-Birkenbach Kevin Veen-Birkenbach

View File

@@ -1,9 +1,9 @@
# run_once_cmp_docker_proxy: deactivated # run_once_cmp_docker_proxy: deactivated
# Load the proxy first, so that openresty handlers are flushed before the main docker compose # Load the proxy first, so that openresty handlers are flushed before the main docker compose
- name: "For '{{ application_id }}': include role srv-proxy-6-6-domain" - name: "For '{{ application_id }}': include role srv-domain-provision"
include_role: include_role:
name: srv-proxy-6-6-domain name: srv-domain-provision
vars: vars:
domain: "{{ domains | get_domain(application_id) }}" domain: "{{ domains | get_domain(application_id) }}"
http_port: "{{ ports.localhost.http[application_id] }}" http_port: "{{ ports.localhost.http[application_id] }}"

View File

@@ -20,7 +20,7 @@ To offer a centralized, extensible system for managing containerized application
- **Reset Logic:** Cleans previous Compose project files and data when `MODE_RESET` is enabled. - **Reset Logic:** Cleans previous Compose project files and data when `MODE_RESET` is enabled.
- **Handlers for Runtime Control:** Automatically builds, sets up, or restarts containers based on handlers. - **Handlers for Runtime Control:** Automatically builds, sets up, or restarts containers based on handlers.
- **Template-ready Service Files:** Predefined service base and health check templates. - **Template-ready Service Files:** Predefined service base and health check templates.
- **Integration Support:** Compatible with `srv-proxy-7-4-core` and other Infinito.Nexus service roles. - **Integration Support:** Compatible with `srv-proxy-core` and other Infinito.Nexus service roles.
## Administration Tips ## Administration Tips

View File

@@ -11,6 +11,30 @@
- docker compose restart - docker compose restart
- docker compose just up - docker compose just up
- name: docker compose pull
shell: |
set -euo pipefail
lock="{{ [ PATH_DOCKER_COMPOSE_PULL_LOCK_DIR | docker_compose.directories.instance ] path_join | hash('sha1') }}"
if [ ! -e "$lock" ]; then
mkdir -p "$(dirname "$lock")"
docker compose pull
: > "$lock"
echo "pulled"
fi
args:
chdir: "{{ docker_compose.directories.instance }}"
executable: /bin/bash
register: compose_pull
changed_when: "'pulled' in compose_pull.stdout"
environment:
COMPOSE_HTTP_TIMEOUT: 600
DOCKER_CLIENT_TIMEOUT: 600
when: MODE_UPDATE | bool
listen:
- docker compose up
- docker compose restart
- docker compose just up
- name: Build docker compose - name: Build docker compose
shell: | shell: |
set -euo pipefail set -euo pipefail

View File

@@ -1,3 +1,8 @@
- name: Remove all docker compose pull locks
file:
path: "{{ PATH_DOCKER_COMPOSE_PULL_LOCK_DIR }}"
state: absent
- name: "Load docker container role" - name: "Load docker container role"
include_role: include_role:
name: docker-container name: docker-container

View File

@@ -1,10 +1,10 @@
# Role: srv-web-7-6-composer # Role: srv-composer
This Ansible role composes and orchestrates all necessary HTTPS-layer tasks and HTML-content injections for your webserver domains. It integrates two key sub-roles into a unified workflow: This Ansible role composes and orchestrates all necessary HTTPS-layer tasks and HTML-content injections for your webserver domains. It integrates two key sub-roles into a unified workflow:
1. **`sys-srv-web-inj-compose`** 1. **`sys-srv-web-inj-compose`**
Injects global HTML snippets (CSS, Matomo tracking, iFrame notifier, custom JavaScript) into responses using Nginx `sub_filter`. Injects global HTML snippets (CSS, Matomo tracking, iFrame notifier, custom JavaScript) into responses using Nginx `sub_filter`.
2. **`srv-web-6-6-tls-core`** 2. **`srv-tls-core`**
Handles issuing, renewing, and managing TLS certificates via ACME/Certbot. Handles issuing, renewing, and managing TLS certificates via ACME/Certbot.
By combining encryption setup with content enhancements, this role streamlines domain provisioning for secure, fully-featured HTTP/HTTPS delivery. By combining encryption setup with content enhancements, this role streamlines domain provisioning for secure, fully-featured HTTP/HTTPS delivery.
@@ -16,7 +16,7 @@ By combining encryption setup with content enhancements, this role streamlines d
* **Content Injection** * **Content Injection**
Adds global theming, analytics, and custom scripts before `</head>` and tracking noscript tags before `</body>`. Adds global theming, analytics, and custom scripts before `</head>` and tracking noscript tags before `</body>`.
* **Certificate Management** * **Certificate Management**
Automates cert issuance and renewal via `srv-web-6-6-tls-core`. Automates cert issuance and renewal via `srv-tls-core`.
* **Idempotent Workflow** * **Idempotent Workflow**
Ensures each component runs only once per domain. Ensures each component runs only once per domain.
* **Simplified Playbooks** * **Simplified Playbooks**

View File

@@ -27,4 +27,4 @@ galaxy_info:
- orchestration - orchestration
repository: "https://s.infinito.nexus/code" repository: "https://s.infinito.nexus/code"
issue_tracker_url: "https://s.infinito.nexus/issues" issue_tracker_url: "https://s.infinito.nexus/issues"
documentation: "https://s.infinito.nexus/code/roles/srv-web-7-6-composer" documentation: "https://s.infinito.nexus/code/roles/srv-composer"

View File

@@ -0,0 +1,9 @@
# run_once_srv_composer: deactivated
- name: "include role sys-srv-web-inj-compose for '{{ domain }}'"
include_role:
name: sys-srv-web-inj-compose
- name: "include role srv-tls-core for '{{ domain }}'"
include_role:
name: srv-tls-core

View File

@@ -18,4 +18,4 @@ galaxy_info:
- performance - performance
repository: "https://s.infinito.nexus/code" repository: "https://s.infinito.nexus/code"
issue_tracker_url: "https://s.infinito.nexus/issues" issue_tracker_url: "https://s.infinito.nexus/issues"
documentation: "https://s.infinito.nexus/code/roles/srv-web-7-4-core" documentation: "https://s.infinito.nexus/code/roles/srv-core"

View File

@@ -2,4 +2,4 @@
- block: - block:
- include_tasks: 01_core.yml - include_tasks: 01_core.yml
- include_tasks: utils/run_once.yml - include_tasks: utils/run_once.yml
when: run_once_srv_web_7_4_core is not defined when: run_once_srv_core is not defined

View File

@@ -6,11 +6,11 @@ This role bootstraps **per-domain Nginx configuration**: it requests TLS certifi
## Overview ## Overview
A higher-level orchestration wrapper, *srv-proxy-6-6-domain* ties together several lower-level roles: A higher-level orchestration wrapper, *srv-domain-provision* ties together several lower-level roles:
1. **`sys-srv-web-inj-compose`** applies global tweaks and includes. 1. **`sys-srv-web-inj-compose`** applies global tweaks and includes.
2. **`srv-web-6-6-tls-core`** obtains Lets Encrypt certificates. 2. **`srv-tls-core`** obtains Lets Encrypt certificates.
3. **Domain template deployment** copies a Jinja2 vHost from *srv-proxy-7-4-core*. 3. **Domain template deployment** copies a Jinja2 vHost from *srv-proxy-core*.
4. **`web-app-oauth2-proxy`** *(optional)* protects the site with OAuth2. 4. **`web-app-oauth2-proxy`** *(optional)* protects the site with OAuth2.
The result is a complete, reproducible domain rollout in a single playbook task. The result is a complete, reproducible domain rollout in a single playbook task.

View File

@@ -2,4 +2,4 @@
vhost_flavour: "basic" # valid: basic | ws_generic vhost_flavour: "basic" # valid: basic | ws_generic
# build the full template path from the flavour # build the full template path from the flavour
vhost_template_src: "roles/srv-proxy-7-4-core/templates/vhost/{{ vhost_flavour }}.conf.j2" vhost_template_src: "roles/srv-proxy-core/templates/vhost/{{ vhost_flavour }}.conf.j2"

View File

@@ -1,4 +1,4 @@
# roles/srv-proxy-6-6-domain/tasks/02_enable_cf_dev_mode.yml # roles/srv-domain-provision/tasks/02_enable_cf_dev_mode.yml
--- ---
# Enables Cloudflare Development Mode (bypasses cache for ~3 hours). # Enables Cloudflare Development Mode (bypasses cache for ~3 hours).
# Uses the same auth token as in 01_cleanup.yml: CLOUDFLARE_API_TOKEN # Uses the same auth token as in 01_cleanup.yml: CLOUDFLARE_API_TOKEN

View File

@@ -1,10 +1,10 @@
- block: - block:
- name: Include dependency 'srv-proxy-7-4-core' - name: Include dependency 'srv-proxy-core'
include_role: include_role:
name: srv-proxy-7-4-core name: srv-proxy-core
when: run_once_srv_proxy_7_4_core is not defined when: run_once_srv_proxy_core is not defined
- include_tasks: utils/run_once.yml - include_tasks: utils/run_once.yml
when: run_once_srv_proxy_6_6_domain is not defined when: run_once_srv_domain_provision is not defined
- include_tasks: "01_cloudflare.yml" - include_tasks: "01_cloudflare.yml"
when: DNS_PROVIDER == "cloudflare" when: DNS_PROVIDER == "cloudflare"
@@ -15,7 +15,7 @@
- name: "include role for {{ domain }} to receive certificates and do the modification routines" - name: "include role for {{ domain }} to receive certificates and do the modification routines"
include_role: include_role:
name: srv-web-7-6-composer name: srv-composer
- name: "Copy nginx config to {{ configuration_destination }}" - name: "Copy nginx config to {{ configuration_destination }}"
template: template:

View File

@@ -1,23 +1,23 @@
# Webserver HTTPS Provisioning 🚀 # Webserver HTTPS Provisioning 🚀
## Description ## Description
The **srv-web-7-6-https** role extends a basic Nginx installation by wiring in everything you need to serve content over HTTPS: The **srv-https-stack** role extends a basic Nginx installation by wiring in everything you need to serve content over HTTPS:
1. Ensures your Nginx server is configured for SSL/TLS. 1. Ensures your Nginx server is configured for SSL/TLS.
2. Pulls in Lets Encrypt ACME challenge handling. 2. Pulls in Lets Encrypt ACME challenge handling.
3. Applies global cleanup of unused domain configs. 3. Applies global cleanup of unused domain configs.
This role is built on top of your existing `srv-web-7-4-core` role, and it automates the end-to-end process of turning HTTP sites into secure HTTPS sites. This role is built on top of your existing `srv-core` role, and it automates the end-to-end process of turning HTTP sites into secure HTTPS sites.
--- ---
## Overview ## Overview
When you apply **srv-web-7-6-https**, it will: When you apply **srv-https-stack**, it will:
1. **Include** the `srv-web-7-4-core` role to install and configure Nginx. 1. **Include** the `srv-core` role to install and configure Nginx.
2. **Clean up** any stale vHost files under `sys-svc-cln-domains`. 2. **Clean up** any stale vHost files under `sys-svc-cln-domains`.
3. **Deploy** the Lets Encrypt challenge-and-redirect snippet from `srv-web-7-7-letsencrypt`. 3. **Deploy** the Lets Encrypt challenge-and-redirect snippet from `srv-letsencrypt`.
4. **Reload** Nginx automatically when any template changes. 4. **Reload** Nginx automatically when any template changes.
All tasks are idempotent—once your certificates are in place and your configuration is set, Ansible will skip unchanged steps on subsequent runs. All tasks are idempotent—once your certificates are in place and your configuration is set, Ansible will skip unchanged steps on subsequent runs.
@@ -42,7 +42,7 @@ All tasks are idempotent—once your certificates are in place and your configur
## Requirements ## Requirements
- A working `srv-web-7-4-core` setup. - A working `srv-core` setup.
- DNS managed via Cloudflare (for CAA record tasks) or equivalent ACME DNS flow. - DNS managed via Cloudflare (for CAA record tasks) or equivalent ACME DNS flow.
- Variables: - Variables:
- `LETSENCRYPT_WEBROOT_PATH` - `LETSENCRYPT_WEBROOT_PATH`

View File

@@ -3,8 +3,8 @@
include_role: include_role:
name: '{{ item }}' name: '{{ item }}'
loop: loop:
- srv-web-7-4-core - srv-core
- sys-svc-cln-domains - sys-svc-cln-domains
- srv-web-7-7-letsencrypt - srv-letsencrypt
- include_tasks: utils/run_once.yml - include_tasks: utils/run_once.yml
when: run_once_srv_web_7_6_https is not defined when: run_once_srv_https_stack is not defined

View File

@@ -1,4 +1,4 @@
- block: - block:
- include_tasks: 01_core.yml - include_tasks: 01_core.yml
- include_tasks: utils/run_once.yml - include_tasks: utils/run_once.yml
when: run_once_srv_web_7_7_letsencrypt is not defined when: run_once_srv_letsencrypt is not defined

View File

@@ -12,4 +12,4 @@ ssl_session_tickets on;
add_header Strict-Transport-Security max-age=15768000; add_header Strict-Transport-Security max-age=15768000;
ssl_stapling on; ssl_stapling on;
ssl_stapling_verify on; ssl_stapling_verify on;
{% include 'roles/srv-web-7-7-letsencrypt/templates/ssl_credentials.j2' %} {% include 'roles/srv-letsencrypt/templates/ssl_credentials.j2' %}

View File

@@ -16,7 +16,7 @@ The goal of this role is to deliver a **hassle-free, production-ready reverse pr
## Features ## Features
- **Automatic TLS & HSTS** — integrates with the *srv-web-7-6-https* role for certificate management. - **Automatic TLS & HSTS** — integrates with the *srv-https-stack* role for certificate management.
- **Flexible vHost templates***basic* and *ws_generic* flavours cover standard HTTP and WebSocket applications. - **Flexible vHost templates***basic* and *ws_generic* flavours cover standard HTTP and WebSocket applications.
- **Security headers** — sensible defaults plus optional X-Frame-Options / CSP based on application settings. - **Security headers** — sensible defaults plus optional X-Frame-Options / CSP based on application settings.
- **WebSocket & HTTP/2 aware** — upgrades, keep-alive tuning, and gzip already configured. - **WebSocket & HTTP/2 aware** — upgrades, keep-alive tuning, and gzip already configured.

View File

@@ -3,7 +3,7 @@
include_role: include_role:
name: '{{ item }}' name: '{{ item }}'
loop: loop:
- srv-web-7-6-https - srv-https-stack
- srv-web-7-4-core - srv-core
- include_tasks: utils/run_once.yml - include_tasks: utils/run_once.yml
when: run_once_srv_proxy_7_4_core is not defined when: run_once_srv_proxy_core is not defined

View File

@@ -1,6 +1,6 @@
# Nginx Location Templates # Nginx Location Templates
This directory contains Jinja2 templates for different Nginx `location` blocks, each designed to proxy and optimize different types of web traffic. These templates are used by the `srv-proxy-7-4-core` role to modularize and standardize reverse proxy configuration across a wide variety of applications. This directory contains Jinja2 templates for different Nginx `location` blocks, each designed to proxy and optimize different types of web traffic. These templates are used by the `srv-proxy-core` role to modularize and standardize reverse proxy configuration across a wide variety of applications.
--- ---

View File

@@ -16,7 +16,7 @@ location {{location}}
proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port {{ WEB_PORT }}; proxy_set_header X-Forwarded-Port {{ WEB_PORT }};
{% include 'roles/srv-proxy-7-4-core/templates/headers/content_security_policy.conf.j2' %} {% include 'roles/srv-proxy-core/templates/headers/content_security_policy.conf.j2' %}
# WebSocket specific header # WebSocket specific header
proxy_http_version 1.1; proxy_http_version 1.1;

View File

@@ -13,7 +13,7 @@ server
{{ proxy_extra_configuration }} {{ proxy_extra_configuration }}
{% endif %} {% endif %}
{% include 'roles/srv-web-7-7-letsencrypt/templates/ssl_header.j2' %} {% include 'roles/srv-letsencrypt/templates/ssl_header.j2' %}
{% if applications | get_app_conf(application_id, 'features.oauth2', False) %} {% if applications | get_app_conf(application_id, 'features.oauth2', False) %}
{% set acl = applications | get_app_conf(application_id, 'oauth2_proxy.acl', False, {}) %} {% set acl = applications | get_app_conf(application_id, 'oauth2_proxy.acl', False, {}) %}
@@ -22,38 +22,38 @@ server
{# 1. Expose everything by default, then protect blacklisted paths #} {# 1. Expose everything by default, then protect blacklisted paths #}
{% set oauth2_proxy_enabled = false %} {% set oauth2_proxy_enabled = false %}
{% set location = "/" %} {% set location = "/" %}
{% include 'roles/srv-proxy-7-4-core/templates/location/html.conf.j2' %} {% include 'roles/srv-proxy-core/templates/location/html.conf.j2' %}
{% for loc in acl.blacklist %} {% for loc in acl.blacklist %}
{% set oauth2_proxy_enabled = true %} {% set oauth2_proxy_enabled = true %}
{% set location = loc %} {% set location = loc %}
{% include 'roles/srv-proxy-7-4-core/templates/location/html.conf.j2' %} {% include 'roles/srv-proxy-core/templates/location/html.conf.j2' %}
{% endfor %} {% endfor %}
{% elif acl.whitelist is defined %} {% elif acl.whitelist is defined %}
{# 2. Protect everything by default, then expose whitelisted paths #} {# 2. Protect everything by default, then expose whitelisted paths #}
{% set oauth2_proxy_enabled = true %} {% set oauth2_proxy_enabled = true %}
{% set location = "/" %} {% set location = "/" %}
{% include 'roles/srv-proxy-7-4-core/templates/location/html.conf.j2' %} {% include 'roles/srv-proxy-core/templates/location/html.conf.j2' %}
{% for loc in acl.whitelist %} {% for loc in acl.whitelist %}
{% set oauth2_proxy_enabled = false %} {% set oauth2_proxy_enabled = false %}
{% set location = loc %} {% set location = loc %}
{% include 'roles/srv-proxy-7-4-core/templates/location/html.conf.j2' %} {% include 'roles/srv-proxy-core/templates/location/html.conf.j2' %}
{% endfor %} {% endfor %}
{% else %} {% else %}
{# 3. OAuth2 enabled but no (or empty) ACL — protect all #} {# 3. OAuth2 enabled but no (or empty) ACL — protect all #}
{% set oauth2_proxy_enabled = true %} {% set oauth2_proxy_enabled = true %}
{% set location = "/" %} {% set location = "/" %}
{% include 'roles/srv-proxy-7-4-core/templates/location/html.conf.j2' %} {% include 'roles/srv-proxy-core/templates/location/html.conf.j2' %}
{% endif %} {% endif %}
{% else %} {% else %}
{# 4. OAuth2 completely disabled — expose all #} {# 4. OAuth2 completely disabled — expose all #}
{% set oauth2_proxy_enabled = false %} {% set oauth2_proxy_enabled = false %}
{% set location = "/" %} {% set location = "/" %}
{% include 'roles/srv-proxy-7-4-core/templates/location/html.conf.j2' %} {% include 'roles/srv-proxy-core/templates/location/html.conf.j2' %}
{% endif %} {% endif %}
} }

View File

@@ -6,7 +6,7 @@ map $http_upgrade $connection_upgrade {
server { server {
server_name {{ domain }}; server_name {{ domain }};
{% include 'roles/srv-web-7-7-letsencrypt/templates/ssl_header.j2' %} {% include 'roles/srv-letsencrypt/templates/ssl_header.j2' %}
{% include 'roles/sys-srv-web-inj-compose/templates/server.conf.j2' %} {% include 'roles/sys-srv-web-inj-compose/templates/server.conf.j2' %}
@@ -25,10 +25,10 @@ server {
add_header Strict-Transport-Security "max-age=31536000"; add_header Strict-Transport-Security "max-age=31536000";
{% include 'roles/srv-proxy-7-4-core/templates/location/html.conf.j2' %} {% include 'roles/srv-proxy-core/templates/location/html.conf.j2' %}
{% if location_ws is defined %} {% if location_ws is defined %}
{% include 'roles/srv-proxy-7-4-core/templates/location/ws.conf.j2' %} {% include 'roles/srv-proxy-core/templates/location/ws.conf.j2' %}
{% endif %} {% endif %}
error_page 500 501 502 503 504 /500.html; error_page 500 501 502 503 504 /500.html;

View File

@@ -1,10 +1,10 @@
- block: - block:
- name: Include dependency 'srv-web-7-6-https' - name: Include dependency 'srv-https-stack'
include_role: include_role:
name: srv-web-7-6-https name: srv-https-stack
when: run_once_srv_web_7_6_https is not defined when: run_once_srv_https_stack is not defined
- include_tasks: utils/run_once.yml - include_tasks: utils/run_once.yml
when: run_once_srv_web_6_6_tls_core is not defined when: run_once_srv_tls_core is not defined
- name: "Include flavor '{{ CERTBOT_FLAVOR }}' for '{{ domain }}'" - name: "Include flavor '{{ CERTBOT_FLAVOR }}' for '{{ domain }}'"
include_tasks: "{{ role_path }}/tasks/flavors/{{ CERTBOT_FLAVOR }}.yml" include_tasks: "{{ role_path }}/tasks/flavors/{{ CERTBOT_FLAVOR }}.yml"

View File

@@ -1,9 +0,0 @@
# run_once_srv_web_7_6_composer: deactivated
- name: "include role sys-srv-web-inj-compose for '{{ domain }}'"
include_role:
name: sys-srv-web-inj-compose
- name: "include role srv-web-6-6-tls-core for '{{ domain }}'"
include_role:
name: srv-web-6-6-tls-core

View File

@@ -6,4 +6,3 @@ OnFailure={{ SYS_SERVICE_ON_FAILURE_COMPOSE }} {{ SYS_SERVICE_CLEANUP_BACKUPS_FA
Type=oneshot Type=oneshot
ExecStartPre=/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(' ') }} --ignore {{ SYS_SERVICE_GROUP_BACKUPS | join(' ') }} --timeout "{{ SYS_TIMEOUT_BACKUP_SERVICES }}" ExecStartPre=/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(' ') }} --ignore {{ SYS_SERVICE_GROUP_BACKUPS | join(' ') }} --timeout "{{ SYS_TIMEOUT_BACKUP_SERVICES }}"
ExecStart={{ system_service_script_exec }} ExecStart={{ system_service_script_exec }}

View File

@@ -2,5 +2,5 @@ server {
listen {{ ports.public.ldaps['svc-db-openldap'] }}ssl; listen {{ ports.public.ldaps['svc-db-openldap'] }}ssl;
proxy_pass 127.0.0.1:{{ ports.localhost.ldap['svc-db-openldap'] }}; proxy_pass 127.0.0.1:{{ ports.localhost.ldap['svc-db-openldap'] }};
{% include 'roles/srv-web-7-7-letsencrypt/templates/ssl_credentials.j2' %} {% include 'roles/srv-letsencrypt/templates/ssl_credentials.j2' %}
} }

View File

@@ -14,9 +14,10 @@
include_tasks: 03_reset.yml include_tasks: 03_reset.yml
when: MODE_RESET | bool when: MODE_RESET | bool
- include_role: - name: "Execute system service setup for '{{ system_service_id }}'"
include_role:
name: sys-service name: sys-service
vars: vars:
system_service_copy_files: false system_service_copy_files: false
system_service_timer_enabled: false system_service_timer_enabled: true
system_service_on_calendar: "{{ SYS_SCHEDULE_BACKUP_DOCKER_TO_LOCAL }}" system_service_on_calendar: "{{ SYS_SCHEDULE_BACKUP_DOCKER_TO_LOCAL }}"

View File

@@ -8,3 +8,4 @@
vars: vars:
system_service_on_calendar: "{{SYS_SCHEDULE_HEALTH_BTRFS}}" system_service_on_calendar: "{{SYS_SCHEDULE_HEALTH_BTRFS}}"
system_service_timer_enabled: true system_service_timer_enabled: true
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"

View File

@@ -15,3 +15,4 @@
vars: vars:
system_service_on_calendar: "{{ SYS_SCHEDULE_HEALTH_CSP_CRAWLER }}" system_service_on_calendar: "{{ SYS_SCHEDULE_HEALTH_CSP_CRAWLER }}"
system_service_timer_enabled: true system_service_timer_enabled: true
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"

View File

@@ -0,0 +1,11 @@
- name: Include dependency 'sys-ctl-alm-compose'
include_role:
name: sys-ctl-alm-compose
when: run_once_sys_ctl_alm_compose is not defined
- include_role:
name: sys-service
vars:
system_service_on_calendar: "{{ SYS_SCHEDULE_HEALTH_DISC_SPACE }}"
system_service_timer_enabled: true
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"

View File

@@ -1,13 +1,4 @@
- block: - block:
- name: Include dependency 'sys-ctl-alm-compose' - include_tasks: 01_core.yml
include_role:
name: sys-ctl-alm-compose
when: run_once_sys_ctl_alm_compose is not defined
- include_tasks: utils/run_once.yml - include_tasks: utils/run_once.yml
when: run_once_sys_ctl_hlth_disc_space is not defined when: run_once_sys_ctl_hlth_disc_space is not defined
- include_role:
name: sys-service
vars:
system_service_on_calendar: "{{ SYS_SCHEDULE_HEALTH_DISC_SPACE }}"
system_service_timer_enabled: true

View File

@@ -8,3 +8,4 @@
vars: vars:
system_service_timer_enabled: true system_service_timer_enabled: true
system_service_on_calendar: "{{ SYS_SCHEDULE_HEALTH_DOCKER_CONTAINER }}" system_service_on_calendar: "{{ SYS_SCHEDULE_HEALTH_DOCKER_CONTAINER }}"
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"

View File

@@ -8,3 +8,4 @@
vars: vars:
system_service_on_calendar: "{{SYS_SCHEDULE_HEALTH_DOCKER_VOLUMES}}" system_service_on_calendar: "{{SYS_SCHEDULE_HEALTH_DOCKER_VOLUMES}}"
system_service_timer_enabled: true system_service_timer_enabled: true
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"

View File

@@ -8,3 +8,4 @@
vars: vars:
system_service_on_calendar: "{{SYS_SCHEDULE_HEALTH_JOURNALCTL}}" system_service_on_calendar: "{{SYS_SCHEDULE_HEALTH_JOURNALCTL}}"
system_service_timer_enabled: true system_service_timer_enabled: true
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"

View File

@@ -6,5 +6,6 @@
- include_role: - include_role:
name: sys-service name: sys-service
vars: vars:
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
system_service_on_calendar: "{{ SYS_SCHEDULE_HEALTH_MSMTP }}" system_service_on_calendar: "{{ SYS_SCHEDULE_HEALTH_MSMTP }}"
system_service_timer_enabled: true system_service_timer_enabled: true

View File

@@ -1,20 +1,20 @@
- block: - name: Include dependencies
- name: Include dependencies
include_role: include_role:
name: '{{ item }}' name: '{{ item }}'
loop: loop:
- dev-python-pip - dev-python-pip
- sys-ctl-alm-compose - sys-ctl-alm-compose
- include_tasks: utils/run_once.yml
when: run_once_sys_ctl_hlth_webserver is not defined
- name: Install required Python modules - name: Install required Python modules
community.general.pacman: community.general.pacman:
name: python-requests name: python-requests
state: present state: present
- meta: flush_handlers
- include_role: - include_role:
name: sys-service name: sys-service
vars: vars:
system_service_on_calendar: "{{SYS_SCHEDULE_HEALTH_NGINX}}" system_service_on_calendar: "{{ SYS_SCHEDULE_HEALTH_NGINX }}"
system_service_timer_enabled: true system_service_timer_enabled: true
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"

View File

@@ -1,6 +1,8 @@
- name: Include dependency 'sys-ctl-alm-compose' - name: Include dependency 'sys-ctl-alm-compose'
include_role: include_role:
name: sys-ctl-alm-compose name: sys-ctl-alm-compose
vars:
flush_handlers: true
when: run_once_sys_ctl_alm_compose is not defined when: run_once_sys_ctl_alm_compose is not defined
- include_role: - include_role:
@@ -10,3 +12,4 @@
system_service_on_calendar: "{{ SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_DEPLOY }}" system_service_on_calendar: "{{ SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_DEPLOY }}"
persistent: "true" persistent: "true"
system_service_timer_enabled: true system_service_timer_enabled: true
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"

View File

@@ -3,7 +3,7 @@
name: '{{ item }}' name: '{{ item }}'
loop: loop:
- sys-svc-certbot - sys-svc-certbot
- srv-web-7-4-core - srv-core
- sys-ctl-alm-compose - sys-ctl-alm-compose
- name: install certbot - name: install certbot
@@ -19,3 +19,4 @@
system_service_on_calendar: "{{ SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_RENEW }}" system_service_on_calendar: "{{ SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_RENEW }}"
persistent: true persistent: true
system_service_timer_enabled: true system_service_timer_enabled: true
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"

View File

@@ -15,3 +15,4 @@
system_service_copy_files: false system_service_copy_files: false
system_service_on_calendar: "{{SYS_SCHEDULE_REPAIR_BTRFS_AUTO_BALANCER}}" system_service_on_calendar: "{{SYS_SCHEDULE_REPAIR_BTRFS_AUTO_BALANCER}}"
system_service_timer_enabled: true system_service_timer_enabled: true
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"

View File

@@ -8,3 +8,4 @@
vars: vars:
system_service_on_calendar: "{{SYS_SCHEDULE_REPAIR_DOCKER_SOFT}}" system_service_on_calendar: "{{SYS_SCHEDULE_REPAIR_DOCKER_SOFT}}"
system_service_timer_enabled: true system_service_timer_enabled: true
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"

View File

@@ -7,8 +7,6 @@
block: block:
- name: "Load base routine for '{{ system_service_id }}'" - name: "Load base routine for '{{ system_service_id }}'"
include_tasks: 03_base.yml include_tasks: 03_base.yml
- include_tasks: utils/run_once.yml - name: "Flush system handlers for '{{ system_service_id }}'"
vars: meta: flush_handlers
# Necessary to flush after every service which uses an 'system_service_id' otherwise wrong one will be used
flush_handlers: true
when: system_service_id is defined when: system_service_id is defined

View File

@@ -3,10 +3,10 @@
inj_enabled: "{{ applications | inj_enabled(application_id, SRV_WEB_INJ_COMP_FEATURES_ALL) }}" inj_enabled: "{{ applications | inj_enabled(application_id, SRV_WEB_INJ_COMP_FEATURES_ALL) }}"
- block: - block:
- name: Include dependency 'srv-web-7-4-core' - name: Include dependency 'srv-core'
include_role: include_role:
name: srv-web-7-4-core name: srv-core
when: run_once_srv_web_7_4_core is not defined when: run_once_srv_core is not defined
- include_tasks: utils/run_once.yml - include_tasks: utils/run_once.yml
when: run_once_sys_srv_web_inj_compose is not defined when: run_once_sys_srv_web_inj_compose is not defined

View File

@@ -1,7 +1,7 @@
- name: Include dependency 'srv-web-7-4-core' - name: Include dependency 'srv-core'
include_role: include_role:
name: srv-web-7-4-core name: srv-core
when: run_once_srv_web_7_4_core is not defined when: run_once_srv_core is not defined
- name: Generate color palette with colorscheme-generator - name: Generate color palette with colorscheme-generator
set_fact: set_fact:

View File

@@ -1,8 +1,8 @@
- block: - block:
- name: Include dependency 'srv-web-7-4-core' - name: Include dependency 'srv-core'
include_role: include_role:
name: srv-web-7-4-core name: srv-core
when: run_once_srv_web_7_4_core is not defined when: run_once_srv_core is not defined
- include_tasks: 01_deploy.yml - include_tasks: 01_deploy.yml
- include_tasks: utils/run_once.yml - include_tasks: utils/run_once.yml
when: run_once_sys_srv_web_inj_desktop is not defined when: run_once_sys_srv_web_inj_desktop is not defined

View File

@@ -1,9 +1,9 @@
- block: - block:
- name: Include dependency 'srv-web-7-4-core' - name: Include dependency 'srv-core'
include_role: include_role:
name: srv-web-7-4-core name: srv-core
when: run_once_srv_web_7_4_core is not defined when: run_once_srv_core is not defined
- include_tasks: utils/run_once.yml - include_tasks: utils/run_once.yml
when: run_once_sys_srv_web_inj_javascript is not defined when: run_once_sys_srv_web_inj_javascript is not defined

View File

@@ -1,8 +1,8 @@
- name: Include dependency 'srv-web-7-4-core' - name: Include dependency 'srv-core'
include_role: include_role:
name: srv-web-7-4-core name: srv-core
when: when:
- run_once_srv_web_7_4_core is not defined - run_once_srv_core is not defined
- name: "deploy the logout.js" - name: "deploy the logout.js"
include_tasks: "02_deploy.yml" include_tasks: "02_deploy.yml"

View File

@@ -1,8 +1,8 @@
- block: - block:
- name: Include dependency 'srv-web-7-4-core' - name: Include dependency 'srv-core'
include_role: include_role:
name: srv-web-7-4-core name: srv-core
when: run_once_srv_web_7_4_core is not defined when: run_once_srv_core is not defined
- include_tasks: utils/run_once.yml - include_tasks: utils/run_once.yml
when: run_once_sys_srv_web_inj_matomo is not defined when: run_once_sys_srv_web_inj_matomo is not defined

View File

@@ -3,7 +3,7 @@
include_role: include_role:
name: '{{ item }}' name: '{{ item }}'
loop: loop:
- srv-web-7-4-core - srv-core
- name: Include task to remove deprecated nginx configs - name: Include task to remove deprecated nginx configs
include_tasks: remove_deprecated_nginx_configs.yml include_tasks: remove_deprecated_nginx_configs.yml

View File

@@ -160,23 +160,6 @@ def upgrade_listmonk():
run_command('echo "y" | docker compose run -T application ./listmonk --upgrade') run_command('echo "y" | docker compose run -T application ./listmonk --upgrade')
print("Upgrade complete.") print("Upgrade complete.")
def update_nextcloud():
"""
Performs the necessary Nextcloud update procedures, including maintenance and app updates.
"""
print("Start Nextcloud upgrade procedure.")
update_procedure("docker-compose exec -T -u www-data application /var/www/html/occ upgrade")
print("Start Nextcloud repairing procedure.")
update_procedure("docker-compose exec -T -u www-data application /var/www/html/occ maintenance:repair --include-expensive")
print("Start Nextcloud update procedure.")
update_procedure("docker-compose exec -T -u www-data application /var/www/html/occ app:update --all")
print("Start Nextcloud add-missing procedure.")
update_procedure("docker-compose exec -T -u www-data application /var/www/html/occ db:add-missing-columns")
update_procedure("docker-compose exec -T -u www-data application /var/www/html/occ db:add-missing-indices")
update_procedure("docker-compose exec -T -u www-data application /var/www/html/occ db:add-missing-primary-keys")
print("Deactivate Maintanance Mode")
update_procedure("docker-compose exec -T -u www-data application /var/www/html/occ maintenance:mode --off")
def update_procedure(command): def update_procedure(command):
""" """
Attempts to execute a command up to a maximum number of retries. Attempts to execute a command up to a maximum number of retries.
@@ -239,8 +222,6 @@ if __name__ == "__main__":
upgrade_listmonk() upgrade_listmonk()
elif os.path.basename(dir_path) == "mastodon": elif os.path.basename(dir_path) == "mastodon":
update_mastodon() update_mastodon()
elif os.path.basename(dir_path) == "nextcloud":
update_nextcloud()
# @todo implement dedicated procedure for bluesky # @todo implement dedicated procedure for bluesky
# @todo implement dedicated procedure for taiga # @todo implement dedicated procedure for taiga

View File

@@ -1,7 +1,7 @@
--- ---
- name: "For '{{ application_id }}': include role to receive certs & do modification routines" - name: "For '{{ application_id }}': include role to receive certs & do modification routines"
include_role: include_role:
name: srv-web-7-6-composer name: srv-composer
vars: vars:
domain: "{{ item }}" domain: "{{ item }}"
http_port: "{{ ports.localhost.http[application_id] }}" http_port: "{{ ports.localhost.http[application_id] }}"
@@ -17,7 +17,7 @@
- name: "For '{{ application_id }}': configure {{ domains | get_domain(application_id) }}.conf" - name: "For '{{ application_id }}': configure {{ domains | get_domain(application_id) }}.conf"
template: template:
src: roles/srv-proxy-7-4-core/templates/vhost/basic.conf.j2 src: roles/srv-proxy-core/templates/vhost/basic.conf.j2
dest: "{{ NGINX.DIRECTORIES.HTTP.SERVERS }}{{ domains | get_domain(application_id) }}.conf" dest: "{{ NGINX.DIRECTORIES.HTTP.SERVERS }}{{ domains | get_domain(application_id) }}.conf"
notify: restart openresty notify: restart openresty

View File

@@ -35,7 +35,7 @@ By default, BigBlueButton is deployed with best-practice hardening, modular secr
## System Requirements ## System Requirements
- Arch Linux with Docker, Compose, and Nginx roles pre-installed - Arch Linux with Docker, Compose, and Nginx roles pre-installed
- DNS and reverse proxy configuration using `srv-proxy-7-4-core` - DNS and reverse proxy configuration using `srv-proxy-core`
- Functional email system for Greenlight SMTP - Functional email system for Greenlight SMTP
## Important Resources ## Important Resources

View File

@@ -3,7 +3,7 @@
set_fact: set_fact:
proxy_extra_configuration: >- proxy_extra_configuration: >-
{{ lookup('ansible.builtin.template', {{ lookup('ansible.builtin.template',
playbook_dir ~ '/roles/srv-proxy-7-4-core/templates/location/html.conf.j2') | trim }} playbook_dir ~ '/roles/srv-proxy-core/templates/location/html.conf.j2') | trim }}
vars: vars:
location: '^~ /html5client' location: '^~ /html5client'
oauth2_proxy_enabled: false oauth2_proxy_enabled: false

View File

@@ -2,9 +2,9 @@
include_role: include_role:
name: docker-compose name: docker-compose
- name: "include role srv-proxy-6-6-domain for {{ application_id }}" - name: "include role srv-domain-provision for {{ application_id }}"
include_role: include_role:
name: srv-proxy-6-6-domain name: srv-domain-provision
vars: vars:
domain: "{{ item.domain }}" domain: "{{ item.domain }}"
http_port: "{{ item.http_port }}" http_port: "{{ item.http_port }}"

View File

@@ -1,2 +0,0 @@
application_id: "web-app-coturn"
container_port: 3000

View File

@@ -1,8 +1,8 @@
--- ---
- name: "include role srv-proxy-6-6-domain for {{ application_id }}" - name: "include role srv-domain-provision for {{ application_id }}"
include_role: include_role:
name: srv-proxy-6-6-domain name: srv-domain-provision
vars: vars:
domain: "{{ domains | get_domain(application_id) }}" domain: "{{ domains | get_domain(application_id) }}"
http_port: "{{ ports.localhost.http[application_id] }}" http_port: "{{ ports.localhost.http[application_id] }}"

Some files were not shown because too many files have changed in this diff Show More