mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-09-08 19:27:18 +02:00
Compare commits
252 Commits
7d0502ebc5
...
master
Author | SHA1 | Date | |
---|---|---|---|
445c94788e | |||
aac9704e8b | |||
a57a5f8828 | |||
90843726de | |||
d25da76117 | |||
d48a1b3c0a | |||
2839d2e1a4 | |||
00c99e58e9 | |||
904040589e | |||
9f3d300bca | |||
9e253a2d09 | |||
49120b0dcf | |||
b6f91ab9d3 | |||
77e8e7ed7e | |||
32bc17e0c3 | |||
e294637cb6 | |||
577767bed6 | |||
e77f8da510 | |||
4738b263ec | |||
0a588023a7 | |||
d2fa90774b | |||
0e72dcbe36 | |||
4f8ce598a9 | |||
3769e66d8d | |||
33a5fadf67 | |||
699a6b6f1e | |||
61c29eee60 | |||
d5204fb5c2 | |||
751615b1a4 | |||
e2993d2912 | |||
24b6647bfb | |||
d2dc2eab5f | |||
a1130e33d7 | |||
df122905eb | |||
d093a22d61 | |||
5e550ce3a3 | |||
0ada12e3ca | |||
1a5ce4a7fa | |||
a9abb3ce5d | |||
71ceb339fc | |||
61bba3d2ef | |||
0bde4295c7 | |||
8059f272d5 | |||
7c814e6e83 | |||
d760c042c2 | |||
6cac8085a8 | |||
3a83f3d14e | |||
61d852c508 | |||
188b098503 | |||
bc56940e55 | |||
5dfc2efb5a | |||
7f9dc65b37 | |||
163a925096 | |||
a8c88634b5 | |||
ce3fe1cd51 | |||
7ca8b7c71d | |||
110381e80c | |||
b02d88adc0 | |||
b7065837df | |||
c98a2378c4 | |||
4ae3cee36c | |||
b834f0c95c | |||
9f734dff17 | |||
6fa4d00547 | |||
7254667186 | |||
aaedaab3da | |||
7791bd8c04 | |||
34b3f3b0ad | |||
94fe58b5da | |||
9feb766e6f | |||
231fd567b3 | |||
3f8e7c1733 | |||
3bfab9ef8e | |||
f1870c07be | |||
d0cec9a7d4 | |||
1dbd714a56 | |||
3a17b2979e | |||
bb0530c2ac | |||
aa2eb53776 | |||
5f66c1a622 | |||
b3dfb8bf22 | |||
db642c1c39 | |||
2fccebbd1f | |||
c23fbd8ec4 | |||
2999d9af77 | |||
2809ffb9f0 | |||
cb12114ce8 | |||
ba99e558f7 | |||
2aed0f97d2 | |||
f36c7831b1 | |||
009bee531b | |||
4c7bb6d9db | |||
092869b29a | |||
f4ea6c6c0f | |||
3ed84717a7 | |||
1cfc2b7e23 | |||
01b9648650 | |||
65d3b3040d | |||
28f7ac5aba | |||
19926b0c57 | |||
3a79d9d630 | |||
983287a84a | |||
dd9a9b6d84 | |||
23a2e081bf | |||
4cbd848026 | |||
d67f660152 | |||
5c6349321b | |||
af1ee64246 | |||
d96bfc64a6 | |||
6ea8301364 | |||
92f5bf6481 | |||
58c17bf043 | |||
6c2d5c52c8 | |||
b919f39e35 | |||
9f2cfe65af | |||
fe399c3967 | |||
ef801aa498 | |||
18f3b1042f | |||
dece6228a4 | |||
cb66fb2978 | |||
b9da6908ec | |||
8baec17562 | |||
1401779a9d | |||
707a3fc1d0 | |||
d595d46e2e | |||
73d5651eea | |||
12a267827d | |||
c6cd6430bb | |||
67b2ebf001 | |||
ebb6660473 | |||
f62d09d8f1 | |||
de159db918 | |||
e2c2cf4bcf | |||
6e1e1ad5c5 | |||
06baa4b03a | |||
73e7fbdc8a | |||
bae2bc21ec | |||
a8f4dea9d2 | |||
5aaf2d28dc | |||
5287bb4d74 | |||
5446a1497e | |||
19889a8cfc | |||
d9980c0d8f | |||
35206aaafd | |||
942e8c9c12 | |||
97f4045c68 | |||
c182ecf516 | |||
ce033c370a | |||
a0477ad54c | |||
35c3681f55 | |||
af97e71976 | |||
19a51fd718 | |||
b916173422 | |||
9756a0f75f | |||
e417bc19bd | |||
7ad14673e1 | |||
eb781dbf8b | |||
6016da6f1f | |||
8b2f0ac47b | |||
9d6d64e11d | |||
f1a2967a37 | |||
95a2172fff | |||
dc3f4e05a8 | |||
e33944cda2 | |||
efa68cc1e0 | |||
79e702a3ab | |||
9180182d5b | |||
535094d15d | |||
658003f5b9 | |||
3ff783df17 | |||
3df511aee9 | |||
c27d16322b | |||
7a6e273ea4 | |||
384beae7c1 | |||
ad7e61e8b1 | |||
fa46523433 | |||
f4a380d802 | |||
42d6c1799b | |||
8608d89653 | |||
a4f39ac732 | |||
9cfb8f3a60 | |||
3e5344a46c | |||
ec07d1a20b | |||
594d9417d1 | |||
dc125e4843 | |||
39a54294dd | |||
a57fe718de | |||
b6aec5fe33 | |||
de07d890dc | |||
e27f355697 | |||
790762d397 | |||
4ce681e643 | |||
55cf3d0d8e | |||
2708b67751 | |||
f477ee3731 | |||
6d70f78989 | |||
b867a52471 | |||
78ee3e3c64 | |||
d7ece2a8c3 | |||
3794aa87b0 | |||
4cf996b1bb | |||
79517b2fe9 | |||
a84ee1240a | |||
7019b307c5 | |||
838a8fc7a1 | |||
95aba805c0 | |||
0856c340c7 | |||
b90a2f6c87 | |||
98e045196b | |||
a10dd402b8 | |||
6e538eabc8 | |||
82cc24a7f5 | |||
26b392ea76 | |||
b49fdc509e | |||
b1e8339283 | |||
f5db786878 | |||
7ef20474a0 | |||
83b9f697ab | |||
dd7b5e844c | |||
da01305cac | |||
1082caddae | |||
242347878d | |||
f46aabe884 | |||
d3cc187c3b | |||
0a4b9bc8e4 | |||
2887e54cca | |||
630fd43382 | |||
3114a7b586 | |||
34d771266a | |||
73b7d2728e | |||
fc4df980c5 | |||
763b43b44c | |||
db860e6ae3 | |||
2ba486902f | |||
7848226f83 | |||
185f37af52 | |||
b9461026a6 | |||
bf63e01b98 | |||
4a600ac531 | |||
dc0bb555c1 | |||
5adce08aea | |||
2569abc0be | |||
3a839cfe37 | |||
29f50da226 | |||
a5941763ff | |||
3d7bbabd7b | |||
e4b8c97e03 | |||
29df95ed82 | |||
6443771d93 | |||
d1cd87c843 | |||
5f0762e4f6 | |||
5642793f4a |
@@ -189,7 +189,7 @@ def parse_args():
|
|||||||
|
|
||||||
def main():
|
def main():
|
||||||
args = parse_args()
|
args = parse_args()
|
||||||
primary_domain = '{{ PRIMARY_DOMAIN }}'
|
primary_domain = '{{ SYSTEM_EMAIL.DOMAIN }}'
|
||||||
become_pwd = '{{ lookup("password", "/dev/null length=42 chars=ascii_letters,digits") }}'
|
become_pwd = '{{ lookup("password", "/dev/null length=42 chars=ascii_letters,digits") }}'
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@@ -102,8 +102,10 @@ def find_cycle(roles):
|
|||||||
def topological_sort(graph, in_degree, roles=None):
|
def topological_sort(graph, in_degree, roles=None):
|
||||||
"""
|
"""
|
||||||
Perform topological sort on the dependency graph.
|
Perform topological sort on the dependency graph.
|
||||||
If `roles` is provided, on error it will include detailed debug info.
|
If a cycle is detected, raise an Exception with detailed debug info.
|
||||||
"""
|
"""
|
||||||
|
from collections import deque
|
||||||
|
|
||||||
queue = deque([r for r, d in in_degree.items() if d == 0])
|
queue = deque([r for r, d in in_degree.items() if d == 0])
|
||||||
sorted_roles = []
|
sorted_roles = []
|
||||||
local_in = dict(in_degree)
|
local_in = dict(in_degree)
|
||||||
@@ -117,28 +119,26 @@ def topological_sort(graph, in_degree, roles=None):
|
|||||||
queue.append(nbr)
|
queue.append(nbr)
|
||||||
|
|
||||||
if len(sorted_roles) != len(in_degree):
|
if len(sorted_roles) != len(in_degree):
|
||||||
|
# Something went wrong: likely a cycle
|
||||||
cycle = find_cycle(roles or {})
|
cycle = find_cycle(roles or {})
|
||||||
if roles is not None:
|
unsorted = [r for r in in_degree if r not in sorted_roles]
|
||||||
if cycle:
|
|
||||||
header = f"Circular dependency detected: {' -> '.join(cycle)}"
|
|
||||||
else:
|
|
||||||
header = "Circular dependency detected among the roles!"
|
|
||||||
|
|
||||||
unsorted = [r for r in in_degree if r not in sorted_roles]
|
header = "❌ Dependency resolution failed"
|
||||||
detail_lines = ["Unsorted roles and their dependencies:"]
|
if cycle:
|
||||||
|
reason = f"Circular dependency detected: {' -> '.join(cycle)}"
|
||||||
|
else:
|
||||||
|
reason = "Unresolved dependencies among roles (possible cycle or missing role)."
|
||||||
|
|
||||||
|
details = []
|
||||||
|
if unsorted:
|
||||||
|
details.append("Unsorted roles and their declared run_after dependencies:")
|
||||||
for r in unsorted:
|
for r in unsorted:
|
||||||
deps = roles.get(r, {}).get('run_after', [])
|
deps = roles.get(r, {}).get('run_after', [])
|
||||||
detail_lines.append(f" - {r} depends on {deps!r}")
|
details.append(f" - {r} depends on {deps!r}")
|
||||||
|
|
||||||
detail_lines.append("Full dependency graph:")
|
graph_repr = f"Full dependency graph: {dict(graph)!r}"
|
||||||
detail_lines.append(f" {dict(graph)!r}")
|
|
||||||
|
|
||||||
raise Exception("\n".join([header] + detail_lines))
|
raise Exception("\n".join([header, reason] + details + [graph_repr]))
|
||||||
else:
|
|
||||||
if cycle:
|
|
||||||
raise Exception(f"Circular dependency detected: {' -> '.join(cycle)}")
|
|
||||||
else:
|
|
||||||
raise Exception("Circular dependency detected among the roles!")
|
|
||||||
|
|
||||||
return sorted_roles
|
return sorted_roles
|
||||||
|
|
||||||
|
@@ -5,10 +5,10 @@ import json
|
|||||||
from typing import Dict, Any
|
from typing import Dict, Any
|
||||||
|
|
||||||
from cli.build.graph import build_mappings, output_graph
|
from cli.build.graph import build_mappings, output_graph
|
||||||
|
from module_utils.role_dependency_resolver import RoleDependencyResolver
|
||||||
|
|
||||||
|
|
||||||
def find_roles(roles_dir: str):
|
def find_roles(roles_dir: str):
|
||||||
"""Yield (role_name, role_path) for every subfolder in roles_dir."""
|
|
||||||
for entry in os.listdir(roles_dir):
|
for entry in os.listdir(roles_dir):
|
||||||
path = os.path.join(roles_dir, entry)
|
path = os.path.join(roles_dir, entry)
|
||||||
if os.path.isdir(path):
|
if os.path.isdir(path):
|
||||||
@@ -16,46 +16,31 @@ def find_roles(roles_dir: str):
|
|||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
# default roles dir is ../../roles relative to this script
|
|
||||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||||
default_roles_dir = os.path.abspath(os.path.join(script_dir, '..', '..', 'roles'))
|
default_roles_dir = os.path.abspath(os.path.join(script_dir, "..", "..", "roles"))
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="Generate all graphs for each role and write meta/tree.json"
|
description="Generate all graphs for each role and write meta/tree.json"
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument("-d", "--role_dir", default=default_roles_dir,
|
||||||
'-d', '--role_dir',
|
help=f"Path to roles directory (default: {default_roles_dir})")
|
||||||
default=default_roles_dir,
|
parser.add_argument("-D", "--depth", type=int, default=0,
|
||||||
help=f"Path to roles directory (default: {default_roles_dir})"
|
help="Max recursion depth (>0) or <=0 to stop on cycle")
|
||||||
)
|
parser.add_argument("-o", "--output", choices=["yaml", "json", "console"],
|
||||||
parser.add_argument(
|
default="json", help="Output format")
|
||||||
'-D', '--depth',
|
parser.add_argument("-p", "--preview", action="store_true",
|
||||||
type=int,
|
help="Preview graphs to console instead of writing files")
|
||||||
default=0,
|
parser.add_argument("-s", "--shadow-folder", type=str, default=None,
|
||||||
help="Max recursion depth (>0) or <=0 to stop on cycle"
|
help="If set, writes tree.json to this shadow folder instead of the role's actual meta/ folder")
|
||||||
)
|
parser.add_argument("-v", "--verbose", action="store_true", help="Enable verbose logging")
|
||||||
parser.add_argument(
|
|
||||||
'-o', '--output',
|
# Toggles
|
||||||
choices=['yaml', 'json', 'console'],
|
parser.add_argument("--no-include-role", action="store_true", help="Do not scan include_role")
|
||||||
default='json',
|
parser.add_argument("--no-import-role", action="store_true", help="Do not scan import_role")
|
||||||
help="Output format"
|
parser.add_argument("--no-dependencies", action="store_true", help="Do not read meta/main.yml dependencies")
|
||||||
)
|
parser.add_argument("--no-run-after", action="store_true",
|
||||||
parser.add_argument(
|
help="Do not read galaxy_info.run_after from meta/main.yml")
|
||||||
'-p', '--preview',
|
|
||||||
action='store_true',
|
|
||||||
help="Preview graphs to console instead of writing files"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
'-s', '--shadow-folder',
|
|
||||||
type=str,
|
|
||||||
default=None,
|
|
||||||
help="If set, writes tree.json to this shadow folder instead of the role's actual meta/ folder"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
'-v', '--verbose',
|
|
||||||
action='store_true',
|
|
||||||
help="Enable verbose logging"
|
|
||||||
)
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
if args.verbose:
|
if args.verbose:
|
||||||
@@ -65,6 +50,8 @@ def main():
|
|||||||
print(f"Preview mode: {args.preview}")
|
print(f"Preview mode: {args.preview}")
|
||||||
print(f"Shadow folder: {args.shadow_folder}")
|
print(f"Shadow folder: {args.shadow_folder}")
|
||||||
|
|
||||||
|
resolver = RoleDependencyResolver(args.role_dir)
|
||||||
|
|
||||||
for role_name, role_path in find_roles(args.role_dir):
|
for role_name, role_path in find_roles(args.role_dir):
|
||||||
if args.verbose:
|
if args.verbose:
|
||||||
print(f"Processing role: {role_name}")
|
print(f"Processing role: {role_name}")
|
||||||
@@ -75,24 +62,43 @@ def main():
|
|||||||
max_depth=args.depth
|
max_depth=args.depth
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Direct deps (depth=1) – getrennt erfasst für buckets
|
||||||
|
inc_roles, imp_roles = resolver._scan_tasks(role_path)
|
||||||
|
meta_deps = resolver._extract_meta_dependencies(role_path)
|
||||||
|
run_after = set()
|
||||||
|
if not args.no_run_after:
|
||||||
|
run_after = resolver._extract_meta_run_after(role_path)
|
||||||
|
|
||||||
|
if any([not args.no_include_role and inc_roles,
|
||||||
|
not args.no_import_role and imp_roles,
|
||||||
|
not args.no_dependencies and meta_deps,
|
||||||
|
not args.no_run_after and run_after]):
|
||||||
|
deps_root = graphs.setdefault("dependencies", {})
|
||||||
|
if not args.no_include_role and inc_roles:
|
||||||
|
deps_root["include_role"] = sorted(inc_roles)
|
||||||
|
if not args.no_import_role and imp_roles:
|
||||||
|
deps_root["import_role"] = sorted(imp_roles)
|
||||||
|
if not args.no_dependencies and meta_deps:
|
||||||
|
deps_root["dependencies"] = sorted(meta_deps)
|
||||||
|
if not args.no_run_after and run_after:
|
||||||
|
deps_root["run_after"] = sorted(run_after)
|
||||||
|
graphs["dependencies"] = deps_root
|
||||||
|
|
||||||
if args.preview:
|
if args.preview:
|
||||||
for key, data in graphs.items():
|
for key, data in graphs.items():
|
||||||
if args.verbose:
|
if args.verbose:
|
||||||
print(f"Previewing graph '{key}' for role '{role_name}'")
|
print(f"Previewing graph '{key}' for role '{role_name}'")
|
||||||
output_graph(data, 'console', role_name, key)
|
output_graph(data, "console", role_name, key)
|
||||||
else:
|
else:
|
||||||
# Decide on output folder
|
|
||||||
if args.shadow_folder:
|
if args.shadow_folder:
|
||||||
tree_file = os.path.join(
|
tree_file = os.path.join(args.shadow_folder, role_name, "meta", "tree.json")
|
||||||
args.shadow_folder, role_name, 'meta', 'tree.json'
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
tree_file = os.path.join(role_path, 'meta', 'tree.json')
|
tree_file = os.path.join(role_path, "meta", "tree.json")
|
||||||
os.makedirs(os.path.dirname(tree_file), exist_ok=True)
|
os.makedirs(os.path.dirname(tree_file), exist_ok=True)
|
||||||
with open(tree_file, 'w') as f:
|
with open(tree_file, "w", encoding="utf-8") as f:
|
||||||
json.dump(graphs, f, indent=2)
|
json.dump(graphs, f, indent=2)
|
||||||
print(f"Wrote {tree_file}")
|
print(f"Wrote {tree_file}")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
@@ -1,14 +1,29 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Selectively add & vault NEW credentials in your inventory, preserving comments
|
||||||
|
and formatting. Existing values are left untouched unless --force is used.
|
||||||
|
|
||||||
|
Usage example:
|
||||||
|
infinito create credentials \
|
||||||
|
--role-path roles/web-app-akaunting \
|
||||||
|
--inventory-file host_vars/echoserver.yml \
|
||||||
|
--vault-password-file .pass/echoserver.txt \
|
||||||
|
--set credentials.database_password=mysecret
|
||||||
|
"""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import subprocess
|
|
||||||
import sys
|
import sys
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import yaml
|
from typing import Dict, Any, Union
|
||||||
from typing import Dict, Any
|
|
||||||
from module_utils.manager.inventory import InventoryManager
|
|
||||||
from module_utils.handler.vault import VaultHandler, VaultScalar
|
|
||||||
from module_utils.handler.yaml import YamlHandler
|
|
||||||
from yaml.dumper import SafeDumper
|
|
||||||
|
|
||||||
|
from ruamel.yaml import YAML
|
||||||
|
from ruamel.yaml.comments import CommentedMap
|
||||||
|
|
||||||
|
from module_utils.manager.inventory import InventoryManager
|
||||||
|
from module_utils.handler.vault import VaultHandler # uses your existing handler
|
||||||
|
|
||||||
|
|
||||||
|
# ---------- helpers ----------
|
||||||
|
|
||||||
def ask_for_confirmation(key: str) -> bool:
|
def ask_for_confirmation(key: str) -> bool:
|
||||||
"""Prompt the user for confirmation to overwrite an existing value."""
|
"""Prompt the user for confirmation to overwrite an existing value."""
|
||||||
@@ -18,35 +33,117 @@ def ask_for_confirmation(key: str) -> bool:
|
|||||||
return confirmation == 'y'
|
return confirmation == 'y'
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def ensure_map(node: CommentedMap, key: str) -> CommentedMap:
|
||||||
|
"""
|
||||||
|
Ensure node[key] exists and is a mapping (CommentedMap) for round-trip safety.
|
||||||
|
"""
|
||||||
|
if key not in node or not isinstance(node.get(key), CommentedMap):
|
||||||
|
node[key] = CommentedMap()
|
||||||
|
return node[key]
|
||||||
|
|
||||||
|
|
||||||
|
def _is_ruamel_vault(val: Any) -> bool:
|
||||||
|
"""Detect if a ruamel scalar already carries the !vault tag."""
|
||||||
|
try:
|
||||||
|
return getattr(val, 'tag', None) == '!vault'
|
||||||
|
except Exception:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _is_vault_encrypted(val: Any) -> bool:
|
||||||
|
"""
|
||||||
|
Detect if value is already a vault string or a ruamel !vault scalar.
|
||||||
|
Accept both '$ANSIBLE_VAULT' and '!vault' markers.
|
||||||
|
"""
|
||||||
|
if _is_ruamel_vault(val):
|
||||||
|
return True
|
||||||
|
if isinstance(val, str) and ("$ANSIBLE_VAULT" in val or "!vault" in val):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _vault_body(text: str) -> str:
|
||||||
|
"""
|
||||||
|
Return only the vault body starting from the first line that contains
|
||||||
|
'$ANSIBLE_VAULT'. If not found, return the original text.
|
||||||
|
Also strips any leading '!vault |' header if present.
|
||||||
|
"""
|
||||||
|
lines = text.splitlines()
|
||||||
|
for i, ln in enumerate(lines):
|
||||||
|
if "$ANSIBLE_VAULT" in ln:
|
||||||
|
return "\n".join(lines[i:])
|
||||||
|
return text
|
||||||
|
|
||||||
|
|
||||||
|
def _make_vault_scalar_from_text(text: str) -> Any:
|
||||||
|
"""
|
||||||
|
Build a ruamel object representing a literal block scalar tagged with !vault
|
||||||
|
by parsing a tiny YAML snippet. This avoids depending on yaml_set_tag().
|
||||||
|
"""
|
||||||
|
body = _vault_body(text)
|
||||||
|
indented = " " + body.replace("\n", "\n ") # proper block scalar indentation
|
||||||
|
snippet = f"v: !vault |\n{indented}\n"
|
||||||
|
y = YAML(typ="rt")
|
||||||
|
return y.load(snippet)["v"]
|
||||||
|
|
||||||
|
|
||||||
|
def to_vault_block(vault_handler: VaultHandler, value: Union[str, Any], label: str) -> Any:
|
||||||
|
"""
|
||||||
|
Return a ruamel scalar tagged as !vault. If the input value is already
|
||||||
|
vault-encrypted (string contains $ANSIBLE_VAULT or is a !vault scalar), reuse/wrap.
|
||||||
|
Otherwise, encrypt plaintext via ansible-vault.
|
||||||
|
"""
|
||||||
|
# Already a ruamel !vault scalar → reuse
|
||||||
|
if _is_ruamel_vault(value):
|
||||||
|
return value
|
||||||
|
|
||||||
|
# Already an encrypted string (may include '!vault |' or just the header)
|
||||||
|
if isinstance(value, str) and ("$ANSIBLE_VAULT" in value or "!vault" in value):
|
||||||
|
return _make_vault_scalar_from_text(value)
|
||||||
|
|
||||||
|
# Plaintext → encrypt now
|
||||||
|
snippet = vault_handler.encrypt_string(str(value), label)
|
||||||
|
return _make_vault_scalar_from_text(snippet)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_overrides(pairs: list[str]) -> Dict[str, str]:
|
||||||
|
"""
|
||||||
|
Parse --set key=value pairs into a dict.
|
||||||
|
Supports both 'credentials.key=val' and 'key=val' (short) forms.
|
||||||
|
"""
|
||||||
|
out: Dict[str, str] = {}
|
||||||
|
for pair in pairs:
|
||||||
|
k, v = pair.split("=", 1)
|
||||||
|
out[k.strip()] = v.strip()
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
# ---------- main ----------
|
||||||
|
|
||||||
|
def main() -> int:
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="Selectively vault credentials + become-password in your inventory."
|
description="Selectively add & vault NEW credentials in your inventory, preserving comments/formatting."
|
||||||
)
|
)
|
||||||
|
parser.add_argument("--role-path", required=True, help="Path to your role")
|
||||||
|
parser.add_argument("--inventory-file", required=True, help="Host vars file to update")
|
||||||
|
parser.add_argument("--vault-password-file", required=True, help="Vault password file")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--role-path", required=True, help="Path to your role"
|
"--set", nargs="*", default=[],
|
||||||
)
|
help="Override values key[.subkey]=VALUE (applied to NEW keys; with --force also to existing)"
|
||||||
parser.add_argument(
|
|
||||||
"--inventory-file", required=True, help="Host vars file to update"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--vault-password-file", required=True, help="Vault password file"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--set", nargs="*", default=[], help="Override values key.subkey=VALUE"
|
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-f", "--force", action="store_true",
|
"-f", "--force", action="store_true",
|
||||||
help="Force overwrite without confirmation"
|
help="Allow overrides to replace existing values (will ask per key unless combined with --yes)"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-y", "--yes", action="store_true",
|
||||||
|
help="Non-interactive: assume 'yes' for all overwrite confirmations when --force is used"
|
||||||
)
|
)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
# Parse overrides
|
overrides = parse_overrides(args.set)
|
||||||
overrides = {
|
|
||||||
k.strip(): v.strip()
|
|
||||||
for pair in args.set for k, v in [pair.split("=", 1)]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Initialize inventory manager
|
# Initialize inventory manager (provides schema + app_id + vault)
|
||||||
manager = InventoryManager(
|
manager = InventoryManager(
|
||||||
role_path=Path(args.role_path),
|
role_path=Path(args.role_path),
|
||||||
inventory_path=Path(args.inventory_file),
|
inventory_path=Path(args.inventory_file),
|
||||||
@@ -54,62 +151,90 @@ def main():
|
|||||||
overrides=overrides
|
overrides=overrides
|
||||||
)
|
)
|
||||||
|
|
||||||
# Load existing credentials to preserve
|
# 1) Load existing inventory with ruamel (round-trip)
|
||||||
existing_apps = manager.inventory.get("applications", {})
|
yaml_rt = YAML(typ="rt")
|
||||||
existing_creds = {}
|
yaml_rt.preserve_quotes = True
|
||||||
if manager.app_id in existing_apps:
|
|
||||||
existing_creds = existing_apps[manager.app_id].get("credentials", {}).copy()
|
|
||||||
|
|
||||||
# Apply schema (may generate defaults)
|
with open(args.inventory_file, "r", encoding="utf-8") as f:
|
||||||
updated_inventory = manager.apply_schema()
|
data = yaml_rt.load(f) # CommentedMap or None
|
||||||
|
if data is None:
|
||||||
|
data = CommentedMap()
|
||||||
|
|
||||||
# Restore existing database_password if present
|
# 2) Get schema-applied structure (defaults etc.) for *non-destructive* merge
|
||||||
apps = updated_inventory.setdefault("applications", {})
|
schema_inventory: Dict[str, Any] = manager.apply_schema()
|
||||||
app_block = apps.setdefault(manager.app_id, {})
|
|
||||||
creds = app_block.setdefault("credentials", {})
|
|
||||||
if "database_password" in existing_creds:
|
|
||||||
creds["database_password"] = existing_creds["database_password"]
|
|
||||||
|
|
||||||
# Store original plaintext values
|
# 3) Ensure structural path exists
|
||||||
original_plain = {key: str(val) for key, val in creds.items()}
|
apps = ensure_map(data, "applications")
|
||||||
|
app_block = ensure_map(apps, manager.app_id)
|
||||||
|
creds = ensure_map(app_block, "credentials")
|
||||||
|
|
||||||
for key, raw_val in list(creds.items()):
|
# 4) Determine defaults we could add
|
||||||
# Skip if already vaulted
|
schema_apps = schema_inventory.get("applications", {})
|
||||||
if isinstance(raw_val, VaultScalar) or str(raw_val).lstrip().startswith("$ANSIBLE_VAULT"):
|
schema_app_block = schema_apps.get(manager.app_id, {})
|
||||||
|
schema_creds = schema_app_block.get("credentials", {}) if isinstance(schema_app_block, dict) else {}
|
||||||
|
|
||||||
|
# 5) Add ONLY missing credential keys
|
||||||
|
newly_added_keys = set()
|
||||||
|
for key, default_val in schema_creds.items():
|
||||||
|
if key in creds:
|
||||||
|
# existing → do not touch (preserve plaintext/vault/formatting/comments)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Determine plaintext
|
# Value to use for the new key
|
||||||
plain = original_plain.get(key, "")
|
# Priority: --set exact key → default from schema → empty string
|
||||||
if key in overrides and (args.force or ask_for_confirmation(key)):
|
ov = overrides.get(f"credentials.{key}", None)
|
||||||
plain = overrides[key]
|
if ov is None:
|
||||||
|
ov = overrides.get(key, None)
|
||||||
|
|
||||||
# Encrypt the plaintext
|
if ov is not None:
|
||||||
encrypted = manager.vault_handler.encrypt_string(plain, key)
|
value_for_new_key: Union[str, Any] = ov
|
||||||
lines = encrypted.splitlines()
|
|
||||||
indent = len(lines[1]) - len(lines[1].lstrip())
|
|
||||||
body = "\n".join(line[indent:] for line in lines[1:])
|
|
||||||
creds[key] = VaultScalar(body)
|
|
||||||
|
|
||||||
# Vault top-level become password if present
|
|
||||||
if "ansible_become_password" in updated_inventory:
|
|
||||||
val = str(updated_inventory["ansible_become_password"])
|
|
||||||
if val.lstrip().startswith("$ANSIBLE_VAULT"):
|
|
||||||
updated_inventory["ansible_become_password"] = VaultScalar(val)
|
|
||||||
else:
|
else:
|
||||||
snippet = manager.vault_handler.encrypt_string(
|
if _is_vault_encrypted(default_val):
|
||||||
val, "ansible_become_password"
|
# Schema already provides a vault value → take it as-is
|
||||||
|
creds[key] = to_vault_block(manager.vault_handler, default_val, key)
|
||||||
|
newly_added_keys.add(key)
|
||||||
|
continue
|
||||||
|
value_for_new_key = "" if default_val is None else str(default_val)
|
||||||
|
|
||||||
|
# Insert as !vault literal (encrypt if needed)
|
||||||
|
creds[key] = to_vault_block(manager.vault_handler, value_for_new_key, key)
|
||||||
|
newly_added_keys.add(key)
|
||||||
|
|
||||||
|
# 6) ansible_become_password: only add if missing;
|
||||||
|
# never rewrite an existing one unless --force (+ confirm/--yes) and override provided.
|
||||||
|
if "ansible_become_password" not in data:
|
||||||
|
val = overrides.get("ansible_become_password", None)
|
||||||
|
if val is not None:
|
||||||
|
data["ansible_become_password"] = to_vault_block(
|
||||||
|
manager.vault_handler, val, "ansible_become_password"
|
||||||
)
|
)
|
||||||
lines = snippet.splitlines()
|
else:
|
||||||
indent = len(lines[1]) - len(lines[1].lstrip())
|
if args.force and "ansible_become_password" in overrides:
|
||||||
body = "\n".join(line[indent:] for line in lines[1:])
|
do_overwrite = args.yes or ask_for_confirmation("ansible_become_password")
|
||||||
updated_inventory["ansible_become_password"] = VaultScalar(body)
|
if do_overwrite:
|
||||||
|
data["ansible_become_password"] = to_vault_block(
|
||||||
|
manager.vault_handler, overrides["ansible_become_password"], "ansible_become_password"
|
||||||
|
)
|
||||||
|
|
||||||
# Write back to file
|
# 7) Overrides for existing credential keys (only with --force)
|
||||||
|
if args.force:
|
||||||
|
for ov_key, ov_val in overrides.items():
|
||||||
|
# Accept both 'credentials.key' and bare 'key'
|
||||||
|
key = ov_key.split(".", 1)[1] if ov_key.startswith("credentials.") else ov_key
|
||||||
|
if key in creds:
|
||||||
|
# If we just added it in this run, don't ask again or rewrap
|
||||||
|
if key in newly_added_keys:
|
||||||
|
continue
|
||||||
|
if args.yes or ask_for_confirmation(key):
|
||||||
|
creds[key] = to_vault_block(manager.vault_handler, ov_val, key)
|
||||||
|
|
||||||
|
# 8) Write back with ruamel (preserve formatting & comments)
|
||||||
with open(args.inventory_file, "w", encoding="utf-8") as f:
|
with open(args.inventory_file, "w", encoding="utf-8") as f:
|
||||||
yaml.dump(updated_inventory, f, sort_keys=False, Dumper=SafeDumper)
|
yaml_rt.dump(data, f)
|
||||||
|
|
||||||
print(f"✅ Inventory selectively vaulted → {args.inventory_file}")
|
print(f"✅ Added new credentials without touching existing formatting/comments → {args.inventory_file}")
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
sys.exit(main())
|
||||||
|
@@ -11,8 +11,8 @@ sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')
|
|||||||
from module_utils.entity_name_utils import get_entity_name
|
from module_utils.entity_name_utils import get_entity_name
|
||||||
|
|
||||||
# Paths to the group-vars files
|
# Paths to the group-vars files
|
||||||
PORTS_FILE = './group_vars/all/09_ports.yml'
|
PORTS_FILE = './group_vars/all/10_ports.yml'
|
||||||
NETWORKS_FILE = './group_vars/all/10_networks.yml'
|
NETWORKS_FILE = './group_vars/all/09_networks.yml'
|
||||||
ROLE_TEMPLATE_DIR = './templates/roles/web-app'
|
ROLE_TEMPLATE_DIR = './templates/roles/web-app'
|
||||||
ROLES_DIR = './roles'
|
ROLES_DIR = './roles'
|
||||||
|
|
||||||
|
@@ -198,6 +198,7 @@ def main():
|
|||||||
"MODE_CLEANUP": args.cleanup,
|
"MODE_CLEANUP": args.cleanup,
|
||||||
"MODE_LOGS": args.logs,
|
"MODE_LOGS": args.logs,
|
||||||
"MODE_DEBUG": args.debug,
|
"MODE_DEBUG": args.debug,
|
||||||
|
"MODE_ASSERT": not args.skip_validation,
|
||||||
"host_type": args.host_type
|
"host_type": args.host_type
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -228,7 +228,7 @@ def parse_meta_dependencies(role_dir: str) -> List[str]:
|
|||||||
def sanitize_run_once_var(role_name: str) -> str:
|
def sanitize_run_once_var(role_name: str) -> str:
|
||||||
"""
|
"""
|
||||||
Generate run_once variable name from role name.
|
Generate run_once variable name from role name.
|
||||||
Example: 'sys-srv-web-inj-logout' -> 'run_once_sys_srv_web_inj_logout'
|
Example: 'sys-front-inj-logout' -> 'run_once_sys_front_inj_logout'
|
||||||
"""
|
"""
|
||||||
return "run_once_" + role_name.replace("-", "_")
|
return "run_once_" + role_name.replace("-", "_")
|
||||||
|
|
||||||
|
@@ -15,8 +15,8 @@ Follow these guides to install and configure Infinito.Nexus:
|
|||||||
- **Networking & VPN** - Configure `WireGuard`, `OpenVPN`, and `Nginx Reverse Proxy`.
|
- **Networking & VPN** - Configure `WireGuard`, `OpenVPN`, and `Nginx Reverse Proxy`.
|
||||||
|
|
||||||
## Managing & Updating Infinito.Nexus 🔄
|
## Managing & Updating Infinito.Nexus 🔄
|
||||||
- Regularly update services using `update-docker`, `update-pacman`, or `update-apt`.
|
- Regularly update services using `update-pacman`, or `update-apt`.
|
||||||
- Monitor system health with `sys-hlth-btrfs`, `sys-hlth-webserver`, and `sys-hlth-docker-container`.
|
- Monitor system health with `sys-ctl-hlth-btrfs`, `sys-ctl-hlth-webserver`, and `sys-ctl-hlth-docker-container`.
|
||||||
- Automate system maintenance with `sys-lock`, `sys-cln-bkps-service`, and `sys-rpr-docker-hard`.
|
- Automate system maintenance with `sys-lock`, `sys-ctl-cln-bkps`, and `sys-ctl-rpr-docker-hard`.
|
||||||
|
|
||||||
For more details, refer to the specific guides above.
|
For more details, refer to the specific guides above.
|
@@ -1,86 +0,0 @@
|
|||||||
from ansible.errors import AnsibleFilterError
|
|
||||||
|
|
||||||
class FilterModule(object):
|
|
||||||
def filters(self):
|
|
||||||
return {'alias_domains_map': self.alias_domains_map}
|
|
||||||
|
|
||||||
def alias_domains_map(self, apps, PRIMARY_DOMAIN):
|
|
||||||
"""
|
|
||||||
Build a map of application IDs to their alias domains.
|
|
||||||
|
|
||||||
- If no `domains` key → []
|
|
||||||
- If `domains` exists but is an empty dict → return the original cfg
|
|
||||||
- Explicit `aliases` are used (default appended if missing)
|
|
||||||
- If only `canonical` defined and it doesn't include default, default is added
|
|
||||||
- Invalid types raise AnsibleFilterError
|
|
||||||
"""
|
|
||||||
def parse_entry(domains_cfg, key, app_id):
|
|
||||||
if key not in domains_cfg:
|
|
||||||
return None
|
|
||||||
entry = domains_cfg[key]
|
|
||||||
if isinstance(entry, dict):
|
|
||||||
values = list(entry.values())
|
|
||||||
elif isinstance(entry, list):
|
|
||||||
values = entry
|
|
||||||
else:
|
|
||||||
raise AnsibleFilterError(
|
|
||||||
f"Unexpected type for 'domains.{key}' in application '{app_id}': {type(entry).__name__}"
|
|
||||||
)
|
|
||||||
for d in values:
|
|
||||||
if not isinstance(d, str) or not d.strip():
|
|
||||||
raise AnsibleFilterError(
|
|
||||||
f"Invalid domain entry in '{key}' for application '{app_id}': {d!r}"
|
|
||||||
)
|
|
||||||
return values
|
|
||||||
|
|
||||||
def default_domain(app_id, primary):
|
|
||||||
return f"{app_id}.{primary}"
|
|
||||||
|
|
||||||
# 1) Precompute canonical domains per app (fallback to default)
|
|
||||||
canonical_map = {}
|
|
||||||
for app_id, cfg in apps.items():
|
|
||||||
domains_cfg = cfg.get('server',{}).get('domains',{})
|
|
||||||
entry = domains_cfg.get('canonical')
|
|
||||||
if entry is None:
|
|
||||||
canonical_map[app_id] = [default_domain(app_id, PRIMARY_DOMAIN)]
|
|
||||||
elif isinstance(entry, dict):
|
|
||||||
canonical_map[app_id] = list(entry.values())
|
|
||||||
elif isinstance(entry, list):
|
|
||||||
canonical_map[app_id] = list(entry)
|
|
||||||
else:
|
|
||||||
raise AnsibleFilterError(
|
|
||||||
f"Unexpected type for 'server.domains.canonical' in application '{app_id}': {type(entry).__name__}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# 2) Build alias list per app
|
|
||||||
result = {}
|
|
||||||
for app_id, cfg in apps.items():
|
|
||||||
domains_cfg = cfg.get('server',{}).get('domains')
|
|
||||||
|
|
||||||
# no domains key → no aliases
|
|
||||||
if domains_cfg is None:
|
|
||||||
result[app_id] = []
|
|
||||||
continue
|
|
||||||
|
|
||||||
# empty domains dict → return the original cfg
|
|
||||||
if isinstance(domains_cfg, dict) and not domains_cfg:
|
|
||||||
result[app_id] = cfg
|
|
||||||
continue
|
|
||||||
|
|
||||||
# otherwise, compute aliases
|
|
||||||
aliases = parse_entry(domains_cfg, 'aliases', app_id) or []
|
|
||||||
default = default_domain(app_id, PRIMARY_DOMAIN)
|
|
||||||
has_aliases = 'aliases' in domains_cfg
|
|
||||||
has_canon = 'canonical' in domains_cfg
|
|
||||||
|
|
||||||
if has_aliases:
|
|
||||||
if default not in aliases:
|
|
||||||
aliases.append(default)
|
|
||||||
elif has_canon:
|
|
||||||
canon = canonical_map.get(app_id, [])
|
|
||||||
if default not in canon and default not in aliases:
|
|
||||||
aliases.append(default)
|
|
||||||
|
|
||||||
result[app_id] = aliases
|
|
||||||
|
|
||||||
return result
|
|
@@ -4,45 +4,81 @@ import os
|
|||||||
|
|
||||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||||
from module_utils.entity_name_utils import get_entity_name
|
from module_utils.entity_name_utils import get_entity_name
|
||||||
|
from module_utils.role_dependency_resolver import RoleDependencyResolver
|
||||||
|
from typing import Iterable
|
||||||
|
|
||||||
|
|
||||||
class FilterModule(object):
|
class FilterModule(object):
|
||||||
def filters(self):
|
def filters(self):
|
||||||
return {'canonical_domains_map': self.canonical_domains_map}
|
return {'canonical_domains_map': self.canonical_domains_map}
|
||||||
|
|
||||||
def canonical_domains_map(self, apps, PRIMARY_DOMAIN):
|
def canonical_domains_map(
|
||||||
|
self,
|
||||||
|
apps,
|
||||||
|
PRIMARY_DOMAIN,
|
||||||
|
*,
|
||||||
|
recursive: bool = False,
|
||||||
|
roles_base_dir: str | None = None,
|
||||||
|
seed: Iterable[str] | None = None,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Maps applications to their canonical domains, checking for conflicts
|
Build { app_id: [canonical domains...] }.
|
||||||
and ensuring all domains are valid and unique across applications.
|
|
||||||
|
Rekursiv werden nur include_role, import_role und meta/main.yml:dependencies verfolgt.
|
||||||
|
'run_after' wird hier absichtlich ignoriert.
|
||||||
"""
|
"""
|
||||||
|
if not isinstance(apps, dict):
|
||||||
|
raise AnsibleFilterError(f"'apps' must be a dict, got {type(apps).__name__}")
|
||||||
|
|
||||||
|
app_keys = set(apps.keys())
|
||||||
|
seed_keys = set(seed) if seed is not None else app_keys
|
||||||
|
|
||||||
|
if recursive:
|
||||||
|
roles_base_dir = roles_base_dir or os.path.join(os.getcwd(), "roles")
|
||||||
|
if not os.path.isdir(roles_base_dir):
|
||||||
|
raise AnsibleFilterError(
|
||||||
|
f"roles_base_dir '{roles_base_dir}' not found or not a directory."
|
||||||
|
)
|
||||||
|
|
||||||
|
resolver = RoleDependencyResolver(roles_base_dir)
|
||||||
|
discovered_roles = resolver.resolve_transitively(
|
||||||
|
start_roles=seed_keys,
|
||||||
|
resolve_include_role=True,
|
||||||
|
resolve_import_role=True,
|
||||||
|
resolve_dependencies=True,
|
||||||
|
resolve_run_after=False,
|
||||||
|
max_depth=None,
|
||||||
|
)
|
||||||
|
# all discovered roles that actually have config entries in `apps`
|
||||||
|
target_apps = discovered_roles & app_keys
|
||||||
|
else:
|
||||||
|
target_apps = seed_keys
|
||||||
|
|
||||||
result = {}
|
result = {}
|
||||||
seen_domains = {}
|
seen_domains = {}
|
||||||
|
|
||||||
for app_id, cfg in apps.items():
|
for app_id in sorted(target_apps):
|
||||||
if app_id.startswith((
|
cfg = apps.get(app_id)
|
||||||
"web-",
|
if cfg is None:
|
||||||
"svc-db-" # Database services can also be exposed to the internet. It is just listening to the port, but the domain is used for port mapping
|
continue
|
||||||
)):
|
if not str(app_id).startswith(("web-", "svc-db-")):
|
||||||
if not isinstance(cfg, dict):
|
continue
|
||||||
raise AnsibleFilterError(
|
if not isinstance(cfg, dict):
|
||||||
f"Invalid configuration for application '{app_id}': "
|
raise AnsibleFilterError(
|
||||||
f"expected a dict, got {cfg!r}"
|
f"Invalid configuration for application '{app_id}': expected dict, got {cfg!r}"
|
||||||
)
|
)
|
||||||
|
|
||||||
domains_cfg = cfg.get('server',{}).get('domains',{})
|
domains_cfg = cfg.get('server', {}).get('domains', {})
|
||||||
if not domains_cfg or 'canonical' not in domains_cfg:
|
if not domains_cfg or 'canonical' not in domains_cfg:
|
||||||
self._add_default_domain(app_id, PRIMARY_DOMAIN, seen_domains, result)
|
self._add_default_domain(app_id, PRIMARY_DOMAIN, seen_domains, result)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
canonical_domains = domains_cfg['canonical']
|
canonical_domains = domains_cfg['canonical']
|
||||||
self._process_canonical_domains(app_id, canonical_domains, seen_domains, result)
|
self._process_canonical_domains(app_id, canonical_domains, seen_domains, result)
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def _add_default_domain(self, app_id, PRIMARY_DOMAIN, seen_domains, result):
|
def _add_default_domain(self, app_id, PRIMARY_DOMAIN, seen_domains, result):
|
||||||
"""
|
|
||||||
Add the default domain for an application if no canonical domains are defined.
|
|
||||||
Ensures the domain is unique across applications.
|
|
||||||
"""
|
|
||||||
entity_name = get_entity_name(app_id)
|
entity_name = get_entity_name(app_id)
|
||||||
default_domain = f"{entity_name}.{PRIMARY_DOMAIN}"
|
default_domain = f"{entity_name}.{PRIMARY_DOMAIN}"
|
||||||
if default_domain in seen_domains:
|
if default_domain in seen_domains:
|
||||||
@@ -54,40 +90,21 @@ class FilterModule(object):
|
|||||||
result[app_id] = [default_domain]
|
result[app_id] = [default_domain]
|
||||||
|
|
||||||
def _process_canonical_domains(self, app_id, canonical_domains, seen_domains, result):
|
def _process_canonical_domains(self, app_id, canonical_domains, seen_domains, result):
|
||||||
"""
|
|
||||||
Process the canonical domains for an application, handling both lists and dicts,
|
|
||||||
and ensuring each domain is unique.
|
|
||||||
"""
|
|
||||||
if isinstance(canonical_domains, dict):
|
if isinstance(canonical_domains, dict):
|
||||||
self._process_canonical_domains_dict(app_id, canonical_domains, seen_domains, result)
|
for _, domain in canonical_domains.items():
|
||||||
|
self._validate_and_check_domain(app_id, domain, seen_domains)
|
||||||
|
result[app_id] = canonical_domains.copy()
|
||||||
elif isinstance(canonical_domains, list):
|
elif isinstance(canonical_domains, list):
|
||||||
self._process_canonical_domains_list(app_id, canonical_domains, seen_domains, result)
|
for domain in canonical_domains:
|
||||||
|
self._validate_and_check_domain(app_id, domain, seen_domains)
|
||||||
|
result[app_id] = list(canonical_domains)
|
||||||
else:
|
else:
|
||||||
raise AnsibleFilterError(
|
raise AnsibleFilterError(
|
||||||
f"Unexpected type for 'server.domains.canonical' in application '{app_id}': "
|
f"Unexpected type for 'server.domains.canonical' in application '{app_id}': "
|
||||||
f"{type(canonical_domains).__name__}"
|
f"{type(canonical_domains).__name__}"
|
||||||
)
|
)
|
||||||
|
|
||||||
def _process_canonical_domains_dict(self, app_id, domains_dict, seen_domains, result):
|
|
||||||
"""
|
|
||||||
Process a dictionary of canonical domains for an application.
|
|
||||||
"""
|
|
||||||
for name, domain in domains_dict.items():
|
|
||||||
self._validate_and_check_domain(app_id, domain, seen_domains)
|
|
||||||
result[app_id] = domains_dict.copy()
|
|
||||||
|
|
||||||
def _process_canonical_domains_list(self, app_id, domains_list, seen_domains, result):
|
|
||||||
"""
|
|
||||||
Process a list of canonical domains for an application.
|
|
||||||
"""
|
|
||||||
for domain in domains_list:
|
|
||||||
self._validate_and_check_domain(app_id, domain, seen_domains)
|
|
||||||
result[app_id] = list(domains_list)
|
|
||||||
|
|
||||||
def _validate_and_check_domain(self, app_id, domain, seen_domains):
|
def _validate_and_check_domain(self, app_id, domain, seen_domains):
|
||||||
"""
|
|
||||||
Validate the domain and check if it has already been assigned to another application.
|
|
||||||
"""
|
|
||||||
if not isinstance(domain, str) or not domain.strip():
|
if not isinstance(domain, str) or not domain.strip():
|
||||||
raise AnsibleFilterError(
|
raise AnsibleFilterError(
|
||||||
f"Invalid domain entry in 'canonical' for application '{app_id}': {domain!r}"
|
f"Invalid domain entry in 'canonical' for application '{app_id}': {domain!r}"
|
||||||
|
@@ -1,10 +1,14 @@
|
|||||||
from ansible.errors import AnsibleFilterError
|
from ansible.errors import AnsibleFilterError
|
||||||
import hashlib
|
import hashlib
|
||||||
import base64
|
import base64
|
||||||
import sys, os
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Ensure module_utils is importable when this filter runs from Ansible
|
||||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||||
from module_utils.config_utils import get_app_conf
|
from module_utils.config_utils import get_app_conf
|
||||||
|
from module_utils.get_url import get_url
|
||||||
|
|
||||||
|
|
||||||
class FilterModule(object):
|
class FilterModule(object):
|
||||||
"""
|
"""
|
||||||
@@ -16,10 +20,14 @@ class FilterModule(object):
|
|||||||
'build_csp_header': self.build_csp_header,
|
'build_csp_header': self.build_csp_header,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# -------------------------------
|
||||||
|
# Helpers
|
||||||
|
# -------------------------------
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def is_feature_enabled(applications: dict, feature: str, application_id: str) -> bool:
|
def is_feature_enabled(applications: dict, feature: str, application_id: str) -> bool:
|
||||||
"""
|
"""
|
||||||
Return True if applications[application_id].features[feature] is truthy.
|
Returns True if applications[application_id].features[feature] is truthy.
|
||||||
"""
|
"""
|
||||||
return get_app_conf(
|
return get_app_conf(
|
||||||
applications,
|
applications,
|
||||||
@@ -31,6 +39,10 @@ class FilterModule(object):
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_csp_whitelist(applications, application_id, directive):
|
def get_csp_whitelist(applications, application_id, directive):
|
||||||
|
"""
|
||||||
|
Returns a list of additional whitelist entries for a given directive.
|
||||||
|
Accepts both scalar and list in config; always returns a list.
|
||||||
|
"""
|
||||||
wl = get_app_conf(
|
wl = get_app_conf(
|
||||||
applications,
|
applications,
|
||||||
application_id,
|
application_id,
|
||||||
@@ -47,28 +59,37 @@ class FilterModule(object):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def get_csp_flags(applications, application_id, directive):
|
def get_csp_flags(applications, application_id, directive):
|
||||||
"""
|
"""
|
||||||
Dynamically extract all CSP flags for a given directive and return them as tokens,
|
Returns CSP flag tokens (e.g., "'unsafe-eval'", "'unsafe-inline'") for a directive,
|
||||||
e.g., "'unsafe-eval'", "'unsafe-inline'", etc.
|
merging sane defaults with app config.
|
||||||
|
Default: 'unsafe-inline' is enabled for style-src and style-src-elem.
|
||||||
"""
|
"""
|
||||||
flags = get_app_conf(
|
# Defaults that apply to all apps
|
||||||
|
default_flags = {}
|
||||||
|
if directive in ('style-src', 'style-src-elem'):
|
||||||
|
default_flags = {'unsafe-inline': True}
|
||||||
|
|
||||||
|
configured = get_app_conf(
|
||||||
applications,
|
applications,
|
||||||
application_id,
|
application_id,
|
||||||
'server.csp.flags.' + directive,
|
'server.csp.flags.' + directive,
|
||||||
False,
|
False,
|
||||||
{}
|
{}
|
||||||
)
|
)
|
||||||
tokens = []
|
|
||||||
|
|
||||||
for flag_name, enabled in flags.items():
|
# Merge defaults with configured flags (configured overrides defaults)
|
||||||
|
merged = {**default_flags, **configured}
|
||||||
|
|
||||||
|
tokens = []
|
||||||
|
for flag_name, enabled in merged.items():
|
||||||
if enabled:
|
if enabled:
|
||||||
tokens.append(f"'{flag_name}'")
|
tokens.append(f"'{flag_name}'")
|
||||||
|
|
||||||
return tokens
|
return tokens
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_csp_inline_content(applications, application_id, directive):
|
def get_csp_inline_content(applications, application_id, directive):
|
||||||
"""
|
"""
|
||||||
Return inline script/style snippets to hash for a given CSP directive.
|
Returns inline script/style snippets to hash for a given directive.
|
||||||
|
Accepts both scalar and list in config; always returns a list.
|
||||||
"""
|
"""
|
||||||
snippets = get_app_conf(
|
snippets = get_app_conf(
|
||||||
applications,
|
applications,
|
||||||
@@ -86,7 +107,7 @@ class FilterModule(object):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def get_csp_hash(content):
|
def get_csp_hash(content):
|
||||||
"""
|
"""
|
||||||
Compute the SHA256 hash of the given inline content and return
|
Computes the SHA256 hash of the given inline content and returns
|
||||||
a CSP token like "'sha256-<base64>'".
|
a CSP token like "'sha256-<base64>'".
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
@@ -96,6 +117,10 @@ class FilterModule(object):
|
|||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
raise AnsibleFilterError(f"get_csp_hash failed: {exc}")
|
raise AnsibleFilterError(f"get_csp_hash failed: {exc}")
|
||||||
|
|
||||||
|
# -------------------------------
|
||||||
|
# Main builder
|
||||||
|
# -------------------------------
|
||||||
|
|
||||||
def build_csp_header(
|
def build_csp_header(
|
||||||
self,
|
self,
|
||||||
applications,
|
applications,
|
||||||
@@ -105,82 +130,80 @@ class FilterModule(object):
|
|||||||
matomo_feature_name='matomo'
|
matomo_feature_name='matomo'
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Build the Content-Security-Policy header value dynamically based on application settings.
|
Builds the Content-Security-Policy header value dynamically based on application settings.
|
||||||
Inline hashes are read from applications[application_id].csp.hashes
|
- Flags (e.g., 'unsafe-eval', 'unsafe-inline') are read from server.csp.flags.<directive>,
|
||||||
|
with sane defaults applied in get_csp_flags (always 'unsafe-inline' for style-src and style-src-elem).
|
||||||
|
- Inline hashes are read from server.csp.hashes.<directive>.
|
||||||
|
- Whitelists are read from server.csp.whitelist.<directive>.
|
||||||
|
- Inline hashes are added only if the final tokens do NOT include 'unsafe-inline'.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
directives = [
|
directives = [
|
||||||
'default-src',
|
'default-src', # Fallback source list for content types not explicitly listed
|
||||||
'connect-src',
|
'connect-src', # Allowed URLs for XHR, WebSockets, EventSource, fetch()
|
||||||
'frame-ancestors',
|
'frame-ancestors', # Who may embed this page
|
||||||
'frame-src',
|
'frame-src', # Sources for nested browsing contexts (e.g., <iframe>)
|
||||||
'script-src',
|
'script-src', # Sources for script execution
|
||||||
'script-src-elem',
|
'script-src-elem', # Sources for <script> elements
|
||||||
'style-src',
|
'style-src', # Sources for inline styles and <style>/<link> elements
|
||||||
'font-src',
|
'style-src-elem', # Sources for <style> and <link rel="stylesheet">
|
||||||
'worker-src',
|
'font-src', # Sources for fonts
|
||||||
'manifest-src',
|
'worker-src', # Sources for workers
|
||||||
'media-src',
|
'manifest-src', # Sources for web app manifests
|
||||||
|
'media-src', # Sources for audio and video
|
||||||
]
|
]
|
||||||
|
|
||||||
parts = []
|
parts = []
|
||||||
|
|
||||||
for directive in directives:
|
for directive in directives:
|
||||||
tokens = ["'self'"]
|
tokens = ["'self'"]
|
||||||
|
|
||||||
# unsafe-eval / unsafe-inline flags
|
# 1) Load flags (includes defaults from get_csp_flags)
|
||||||
flags = self.get_csp_flags(applications, application_id, directive)
|
flags = self.get_csp_flags(applications, application_id, directive)
|
||||||
tokens += flags
|
tokens += flags
|
||||||
|
|
||||||
# Matomo integration
|
# 2) Allow fetching from internal CDN by default for selected directives
|
||||||
if (
|
if directive in ['script-src-elem', 'connect-src', 'style-src-elem']:
|
||||||
self.is_feature_enabled(applications, matomo_feature_name, application_id)
|
tokens.append(get_url(domains, 'web-svc-cdn', web_protocol))
|
||||||
and directive in ['script-src-elem', 'connect-src']
|
|
||||||
):
|
|
||||||
matomo_domain = domains.get('web-app-matomo')[0]
|
|
||||||
if matomo_domain:
|
|
||||||
tokens.append(f"{web_protocol}://{matomo_domain}")
|
|
||||||
|
|
||||||
# ReCaptcha integration: allow loading scripts from Google if feature enabled
|
# 3) Matomo integration if feature is enabled
|
||||||
|
if directive in ['script-src-elem', 'connect-src']:
|
||||||
|
if self.is_feature_enabled(applications, matomo_feature_name, application_id):
|
||||||
|
tokens.append(get_url(domains, 'web-app-matomo', web_protocol))
|
||||||
|
|
||||||
|
# 4) ReCaptcha integration (scripts + frames) if feature is enabled
|
||||||
if self.is_feature_enabled(applications, 'recaptcha', application_id):
|
if self.is_feature_enabled(applications, 'recaptcha', application_id):
|
||||||
if directive in ['script-src-elem',"frame-src"]:
|
if directive in ['script-src-elem', 'frame-src']:
|
||||||
tokens.append('https://www.gstatic.com')
|
tokens.append('https://www.gstatic.com')
|
||||||
tokens.append('https://www.google.com')
|
tokens.append('https://www.google.com')
|
||||||
|
|
||||||
# Allow the loading of js from the cdn
|
# 5) Frame ancestors handling (desktop + logout support)
|
||||||
if directive == 'script-src-elem':
|
|
||||||
if self.is_feature_enabled(applications, 'logout', application_id) or self.is_feature_enabled(applications, 'desktop', application_id):
|
|
||||||
domain = domains.get('web-svc-cdn')[0]
|
|
||||||
tokens.append(f"{domain}")
|
|
||||||
|
|
||||||
if directive == 'frame-ancestors':
|
if directive == 'frame-ancestors':
|
||||||
# Enable loading via ancestors
|
|
||||||
if self.is_feature_enabled(applications, 'desktop', application_id):
|
if self.is_feature_enabled(applications, 'desktop', application_id):
|
||||||
domain = domains.get('web-app-port-ui')[0]
|
# Allow being embedded by the desktop app domain (and potentially its parent)
|
||||||
sld_tld = ".".join(domain.split(".")[-2:]) # yields "example.com"
|
domain = domains.get('web-app-desktop')[0]
|
||||||
tokens.append(f"{sld_tld}") # yields "*.example.com"
|
sld_tld = ".".join(domain.split(".")[-2:]) # e.g., example.com
|
||||||
|
tokens.append(f"{sld_tld}")
|
||||||
if self.is_feature_enabled(applications, 'logout', application_id):
|
if self.is_feature_enabled(applications, 'logout', application_id):
|
||||||
|
# Allow embedding via logout proxy and Keycloak app
|
||||||
|
tokens.append(get_url(domains, 'web-svc-logout', web_protocol))
|
||||||
|
tokens.append(get_url(domains, 'web-app-keycloak', web_protocol))
|
||||||
|
|
||||||
# Allow logout via infinito logout proxy
|
# 6) Custom whitelist entries
|
||||||
domain = domains.get('web-svc-logout')[0]
|
|
||||||
tokens.append(f"{domain}")
|
|
||||||
|
|
||||||
# Allow logout via keycloak app
|
|
||||||
domain = domains.get('web-app-keycloak')[0]
|
|
||||||
tokens.append(f"{domain}")
|
|
||||||
|
|
||||||
# whitelist
|
|
||||||
tokens += self.get_csp_whitelist(applications, application_id, directive)
|
tokens += self.get_csp_whitelist(applications, application_id, directive)
|
||||||
|
|
||||||
# only add hashes if 'unsafe-inline' is NOT in flags
|
# 7) Add inline content hashes ONLY if final tokens do NOT include 'unsafe-inline'
|
||||||
if "'unsafe-inline'" not in flags:
|
# (Check tokens, not flags, to include defaults and later modifications.)
|
||||||
|
if "'unsafe-inline'" not in tokens:
|
||||||
for snippet in self.get_csp_inline_content(applications, application_id, directive):
|
for snippet in self.get_csp_inline_content(applications, application_id, directive):
|
||||||
tokens.append(self.get_csp_hash(snippet))
|
tokens.append(self.get_csp_hash(snippet))
|
||||||
|
|
||||||
|
# Append directive
|
||||||
parts.append(f"{directive} {' '.join(tokens)};")
|
parts.append(f"{directive} {' '.join(tokens)};")
|
||||||
|
|
||||||
# static img-src
|
# 8) Static img-src directive (kept permissive for data/blob and any host)
|
||||||
parts.append("img-src * data: blob:;")
|
parts.append("img-src * data: blob:;")
|
||||||
|
|
||||||
return ' '.join(parts)
|
return ' '.join(parts)
|
||||||
|
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
|
@@ -1,49 +0,0 @@
|
|||||||
import os
|
|
||||||
import re
|
|
||||||
import yaml
|
|
||||||
from ansible.errors import AnsibleFilterError
|
|
||||||
|
|
||||||
|
|
||||||
def get_application_id(role_name):
|
|
||||||
"""
|
|
||||||
Jinja2/Ansible filter: given a role name, load its vars/main.yml and return the application_id value.
|
|
||||||
"""
|
|
||||||
# Construct path: assumes current working directory is project root
|
|
||||||
vars_file = os.path.join(os.getcwd(), 'roles', role_name, 'vars', 'main.yml')
|
|
||||||
|
|
||||||
if not os.path.isfile(vars_file):
|
|
||||||
raise AnsibleFilterError(f"Vars file not found for role '{role_name}': {vars_file}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Read entire file content to avoid lazy stream issues
|
|
||||||
with open(vars_file, 'r', encoding='utf-8') as f:
|
|
||||||
content = f.read()
|
|
||||||
data = yaml.safe_load(content)
|
|
||||||
except Exception as e:
|
|
||||||
raise AnsibleFilterError(f"Error reading YAML from {vars_file}: {e}")
|
|
||||||
|
|
||||||
# Ensure parsed data is a mapping
|
|
||||||
if not isinstance(data, dict):
|
|
||||||
raise AnsibleFilterError(
|
|
||||||
f"Error reading YAML from {vars_file}: expected mapping, got {type(data).__name__}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Detect malformed YAML: no valid identifier-like keys
|
|
||||||
valid_key_pattern = re.compile(r'^[A-Za-z_][A-Za-z0-9_]*$')
|
|
||||||
if data and not any(valid_key_pattern.match(k) for k in data.keys()):
|
|
||||||
raise AnsibleFilterError(f"Error reading YAML from {vars_file}: invalid top-level keys")
|
|
||||||
|
|
||||||
if 'application_id' not in data:
|
|
||||||
raise AnsibleFilterError(f"Key 'application_id' not found in {vars_file}")
|
|
||||||
|
|
||||||
return data['application_id']
|
|
||||||
|
|
||||||
|
|
||||||
class FilterModule(object):
|
|
||||||
"""
|
|
||||||
Ansible filter plugin entry point.
|
|
||||||
"""
|
|
||||||
def filters(self):
|
|
||||||
return {
|
|
||||||
'get_application_id': get_application_id,
|
|
||||||
}
|
|
31
filter_plugins/get_category_entries.py
Normal file
31
filter_plugins/get_category_entries.py
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# Custom Ansible filter to get all role names under "roles/" with a given prefix.
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
def get_category_entries(prefix, roles_path="roles"):
|
||||||
|
"""
|
||||||
|
Returns a list of role names under the given roles_path
|
||||||
|
that start with the specified prefix.
|
||||||
|
|
||||||
|
:param prefix: String prefix to match role names.
|
||||||
|
:param roles_path: Path to the roles directory (default: 'roles').
|
||||||
|
:return: List of matching role names.
|
||||||
|
"""
|
||||||
|
if not os.path.isdir(roles_path):
|
||||||
|
return []
|
||||||
|
|
||||||
|
roles = []
|
||||||
|
for entry in os.listdir(roles_path):
|
||||||
|
full_path = os.path.join(roles_path, entry)
|
||||||
|
if os.path.isdir(full_path) and entry.startswith(prefix):
|
||||||
|
roles.append(entry)
|
||||||
|
|
||||||
|
return sorted(roles)
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
""" Custom filters for Ansible """
|
||||||
|
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
"get_category_entries": get_category_entries
|
||||||
|
}
|
37
filter_plugins/get_service_name.py
Normal file
37
filter_plugins/get_service_name.py
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
"""
|
||||||
|
Custom Ansible filter to build a systemctl unit name (always lowercase).
|
||||||
|
|
||||||
|
Rules:
|
||||||
|
- If `systemctl_id` ends with '@': drop the '@' and return
|
||||||
|
"{systemctl_id_without_at}.{software_name}@{suffix_handling}".
|
||||||
|
- Else: return "{systemctl_id}.{software_name}{suffix_handling}".
|
||||||
|
|
||||||
|
Suffix handling:
|
||||||
|
- Default "" → automatically pick:
|
||||||
|
- ".service" if no '@' in systemctl_id
|
||||||
|
- ".timer" if '@' in systemctl_id
|
||||||
|
- Explicit False → no suffix at all
|
||||||
|
- Any string → ".{suffix}" (lowercased)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def get_service_name(systemctl_id, software_name, suffix=""):
|
||||||
|
sid = str(systemctl_id).strip().lower()
|
||||||
|
software_name = str(software_name).strip().lower()
|
||||||
|
|
||||||
|
# Determine suffix
|
||||||
|
if suffix is False:
|
||||||
|
sfx = "" # no suffix at all
|
||||||
|
elif suffix == "" or suffix is None:
|
||||||
|
sfx = ".service"
|
||||||
|
else:
|
||||||
|
sfx = str(suffix).strip().lower()
|
||||||
|
|
||||||
|
if sid.endswith("@"):
|
||||||
|
base = sid[:-1] # drop the trailing '@'
|
||||||
|
return f"{base}.{software_name}@{sfx}"
|
||||||
|
else:
|
||||||
|
return f"{sid}.{software_name}{sfx}"
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {"get_service_name": get_service_name}
|
24
filter_plugins/get_service_script_path.py
Normal file
24
filter_plugins/get_service_script_path.py
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
# filter_plugins/get_service_script_path.py
|
||||||
|
# Custom Ansible filter to generate service script paths.
|
||||||
|
|
||||||
|
def get_service_script_path(systemctl_id, script_type):
|
||||||
|
"""
|
||||||
|
Build the path to a service script based on systemctl_id and type.
|
||||||
|
|
||||||
|
:param systemctl_id: The identifier of the system service.
|
||||||
|
:param script_type: The script type/extension (e.g., sh, py, yml).
|
||||||
|
:return: The full path string.
|
||||||
|
"""
|
||||||
|
if not systemctl_id or not script_type:
|
||||||
|
raise ValueError("Both systemctl_id and script_type are required")
|
||||||
|
|
||||||
|
return f"/opt/scripts/systemctl/{systemctl_id}/script.{script_type}"
|
||||||
|
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
""" Custom filters for Ansible """
|
||||||
|
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
"get_service_script_path": get_service_script_path
|
||||||
|
}
|
@@ -1,122 +0,0 @@
|
|||||||
import os
|
|
||||||
import yaml
|
|
||||||
import re
|
|
||||||
from ansible.errors import AnsibleFilterError
|
|
||||||
|
|
||||||
# in-memory cache: application_id → (parsed_yaml, is_nested)
|
|
||||||
_cfg_cache = {}
|
|
||||||
|
|
||||||
def load_configuration(application_id, key):
|
|
||||||
if not isinstance(key, str):
|
|
||||||
raise AnsibleFilterError("Key must be a dotted-string, e.g. 'features.matomo'")
|
|
||||||
|
|
||||||
# locate roles/
|
|
||||||
here = os.path.dirname(__file__)
|
|
||||||
root = os.path.abspath(os.path.join(here, '..'))
|
|
||||||
roles_dir = os.path.join(root, 'roles')
|
|
||||||
if not os.path.isdir(roles_dir):
|
|
||||||
raise AnsibleFilterError(f"Roles directory not found at {roles_dir}")
|
|
||||||
|
|
||||||
# first time? load & cache
|
|
||||||
if application_id not in _cfg_cache:
|
|
||||||
config_path = None
|
|
||||||
|
|
||||||
# 1) primary: vars/main.yml declares it
|
|
||||||
for role in os.listdir(roles_dir):
|
|
||||||
mv = os.path.join(roles_dir, role, 'vars', 'main.yml')
|
|
||||||
if os.path.exists(mv):
|
|
||||||
try:
|
|
||||||
md = yaml.safe_load(open(mv)) or {}
|
|
||||||
except Exception:
|
|
||||||
md = {}
|
|
||||||
if md.get('application_id') == application_id:
|
|
||||||
cf = os.path.join(roles_dir, role, "config" , "main.yml")
|
|
||||||
if not os.path.exists(cf):
|
|
||||||
raise AnsibleFilterError(
|
|
||||||
f"Role '{role}' declares '{application_id}' but missing config/main.yml"
|
|
||||||
)
|
|
||||||
config_path = cf
|
|
||||||
break
|
|
||||||
|
|
||||||
# 2) fallback nested
|
|
||||||
if config_path is None:
|
|
||||||
for role in os.listdir(roles_dir):
|
|
||||||
cf = os.path.join(roles_dir, role, "config" , "main.yml")
|
|
||||||
if not os.path.exists(cf):
|
|
||||||
continue
|
|
||||||
try:
|
|
||||||
dd = yaml.safe_load(open(cf)) or {}
|
|
||||||
except Exception:
|
|
||||||
dd = {}
|
|
||||||
if isinstance(dd, dict) and application_id in dd:
|
|
||||||
config_path = cf
|
|
||||||
break
|
|
||||||
|
|
||||||
# 3) fallback flat
|
|
||||||
if config_path is None:
|
|
||||||
for role in os.listdir(roles_dir):
|
|
||||||
cf = os.path.join(roles_dir, role, "config" , "main.yml")
|
|
||||||
if not os.path.exists(cf):
|
|
||||||
continue
|
|
||||||
try:
|
|
||||||
dd = yaml.safe_load(open(cf)) or {}
|
|
||||||
except Exception:
|
|
||||||
dd = {}
|
|
||||||
# flat style: dict with all non-dict values
|
|
||||||
if isinstance(dd, dict) and not any(isinstance(v, dict) for v in dd.values()):
|
|
||||||
config_path = cf
|
|
||||||
break
|
|
||||||
|
|
||||||
if config_path is None:
|
|
||||||
return None
|
|
||||||
|
|
||||||
# parse once
|
|
||||||
try:
|
|
||||||
parsed = yaml.safe_load(open(config_path)) or {}
|
|
||||||
except Exception as e:
|
|
||||||
raise AnsibleFilterError(f"Error loading config/main.yml at {config_path}: {e}")
|
|
||||||
|
|
||||||
# detect nested vs flat
|
|
||||||
is_nested = isinstance(parsed, dict) and (application_id in parsed)
|
|
||||||
_cfg_cache[application_id] = (parsed, is_nested)
|
|
||||||
|
|
||||||
parsed, is_nested = _cfg_cache[application_id]
|
|
||||||
|
|
||||||
# pick base entry
|
|
||||||
entry = parsed[application_id] if is_nested else parsed
|
|
||||||
|
|
||||||
# resolve dotted key
|
|
||||||
key_parts = key.split('.')
|
|
||||||
for part in key_parts:
|
|
||||||
# Check if part has an index (e.g., domains.canonical[0])
|
|
||||||
match = re.match(r'([^\[]+)\[([0-9]+)\]', part)
|
|
||||||
if match:
|
|
||||||
part, index = match.groups()
|
|
||||||
index = int(index)
|
|
||||||
if isinstance(entry, dict) and part in entry:
|
|
||||||
entry = entry[part]
|
|
||||||
# Check if entry is a list and access the index
|
|
||||||
if isinstance(entry, list) and 0 <= index < len(entry):
|
|
||||||
entry = entry[index]
|
|
||||||
else:
|
|
||||||
raise AnsibleFilterError(
|
|
||||||
f"Index '{index}' out of range for key '{part}' in application '{application_id}'"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
raise AnsibleFilterError(
|
|
||||||
f"Key '{part}' not found under application '{application_id}'"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
if isinstance(entry, dict) and part in entry:
|
|
||||||
entry = entry[part]
|
|
||||||
else:
|
|
||||||
raise AnsibleFilterError(
|
|
||||||
f"Key '{part}' not found under application '{application_id}'"
|
|
||||||
)
|
|
||||||
|
|
||||||
return entry
|
|
||||||
|
|
||||||
|
|
||||||
class FilterModule(object):
|
|
||||||
def filters(self):
|
|
||||||
return {'load_configuration': load_configuration}
|
|
@@ -1,55 +0,0 @@
|
|||||||
from jinja2 import Undefined
|
|
||||||
|
|
||||||
|
|
||||||
def safe_placeholders(template: str, mapping: dict = None) -> str:
|
|
||||||
"""
|
|
||||||
Format a template like "{url}/logo.png".
|
|
||||||
If mapping is provided (not None) and ANY placeholder is missing or maps to None/empty string, the function will raise KeyError.
|
|
||||||
If mapping is None, missing placeholders or invalid templates return empty string.
|
|
||||||
Numerical zero or False are considered valid values.
|
|
||||||
Any other formatting errors return an empty string.
|
|
||||||
"""
|
|
||||||
# Non-string templates yield empty
|
|
||||||
if not isinstance(template, str):
|
|
||||||
return ''
|
|
||||||
|
|
||||||
class SafeDict(dict):
|
|
||||||
def __getitem__(self, key):
|
|
||||||
val = super().get(key, None)
|
|
||||||
# Treat None or empty string as missing
|
|
||||||
if val is None or (isinstance(val, str) and val == ''):
|
|
||||||
raise KeyError(key)
|
|
||||||
return val
|
|
||||||
def __missing__(self, key):
|
|
||||||
raise KeyError(key)
|
|
||||||
|
|
||||||
silent = mapping is None
|
|
||||||
data = mapping or {}
|
|
||||||
try:
|
|
||||||
return template.format_map(SafeDict(data))
|
|
||||||
except KeyError:
|
|
||||||
if silent:
|
|
||||||
return ''
|
|
||||||
raise
|
|
||||||
except Exception:
|
|
||||||
return ''
|
|
||||||
|
|
||||||
def safe_var(value):
|
|
||||||
"""
|
|
||||||
Ansible filter: returns the value unchanged unless it's Undefined or None,
|
|
||||||
in which case returns an empty string.
|
|
||||||
Catches all exceptions and yields ''.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
if isinstance(value, Undefined) or value is None:
|
|
||||||
return ''
|
|
||||||
return value
|
|
||||||
except Exception:
|
|
||||||
return ''
|
|
||||||
|
|
||||||
class FilterModule(object):
|
|
||||||
def filters(self):
|
|
||||||
return {
|
|
||||||
'safe_var': safe_var,
|
|
||||||
'safe_placeholders': safe_placeholders,
|
|
||||||
}
|
|
@@ -1,28 +0,0 @@
|
|||||||
"""
|
|
||||||
Ansible filter plugin that joins a base string and a tail path safely.
|
|
||||||
If the base is falsy (None, empty, etc.), returns an empty string.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def safe_join(base, tail):
|
|
||||||
"""
|
|
||||||
Safely join base and tail into a path or URL.
|
|
||||||
|
|
||||||
- base: the base string. If falsy, returns ''.
|
|
||||||
- tail: the string to append. Leading/trailing slashes are handled.
|
|
||||||
- On any exception, returns ''.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
if not base:
|
|
||||||
return ''
|
|
||||||
base_str = str(base).rstrip('/')
|
|
||||||
tail_str = str(tail).lstrip('/')
|
|
||||||
return f"{base_str}/{tail_str}"
|
|
||||||
except Exception:
|
|
||||||
return ''
|
|
||||||
|
|
||||||
|
|
||||||
class FilterModule(object):
|
|
||||||
def filters(self):
|
|
||||||
return {
|
|
||||||
'safe_join': safe_join,
|
|
||||||
}
|
|
67
filter_plugins/timeout_start_sec_for_domains.py
Normal file
67
filter_plugins/timeout_start_sec_for_domains.py
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
# filter_plugins/timeout_start_sec_for_domains.py (nur Kern geändert)
|
||||||
|
from ansible.errors import AnsibleFilterError
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
"timeout_start_sec_for_domains": self.timeout_start_sec_for_domains,
|
||||||
|
}
|
||||||
|
|
||||||
|
def timeout_start_sec_for_domains(
|
||||||
|
self,
|
||||||
|
domains_dict,
|
||||||
|
include_www=True,
|
||||||
|
per_domain_seconds=25,
|
||||||
|
overhead_seconds=30,
|
||||||
|
min_seconds=120,
|
||||||
|
max_seconds=3600,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
domains_dict (dict | list[str] | str): Either the domain mapping dict
|
||||||
|
(values can be str | list[str] | dict[str,str]) or an already
|
||||||
|
flattened list of domains, or a single domain string.
|
||||||
|
include_www (bool): If true, add 'www.<domain>' for non-www entries.
|
||||||
|
...
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Local flattener for dict inputs (like your generate_all_domains source)
|
||||||
|
def _flatten_from_dict(domains_map):
|
||||||
|
flat = []
|
||||||
|
for v in (domains_map or {}).values():
|
||||||
|
if isinstance(v, str):
|
||||||
|
flat.append(v)
|
||||||
|
elif isinstance(v, list):
|
||||||
|
flat.extend(v)
|
||||||
|
elif isinstance(v, dict):
|
||||||
|
flat.extend(v.values())
|
||||||
|
return flat
|
||||||
|
|
||||||
|
# Accept dict | list | str
|
||||||
|
if isinstance(domains_dict, dict):
|
||||||
|
flat = _flatten_from_dict(domains_dict)
|
||||||
|
elif isinstance(domains_dict, list):
|
||||||
|
flat = list(domains_dict)
|
||||||
|
elif isinstance(domains_dict, str):
|
||||||
|
flat = [domains_dict]
|
||||||
|
else:
|
||||||
|
raise AnsibleFilterError(
|
||||||
|
"Expected 'domains_dict' to be dict | list | str."
|
||||||
|
)
|
||||||
|
|
||||||
|
if include_www:
|
||||||
|
base_unique = sorted(set(flat))
|
||||||
|
www_variants = [f"www.{d}" for d in base_unique if not str(d).lower().startswith("www.")]
|
||||||
|
flat.extend(www_variants)
|
||||||
|
|
||||||
|
unique_domains = sorted(set(flat))
|
||||||
|
count = len(unique_domains)
|
||||||
|
|
||||||
|
raw = overhead_seconds + per_domain_seconds * count
|
||||||
|
clamped = max(min_seconds, min(max_seconds, int(raw)))
|
||||||
|
return clamped
|
||||||
|
|
||||||
|
except AnsibleFilterError:
|
||||||
|
raise
|
||||||
|
except Exception as exc:
|
||||||
|
raise AnsibleFilterError(f"timeout_start_sec_for_domains failed: {exc}")
|
146
filter_plugins/url_join.py
Normal file
146
filter_plugins/url_join.py
Normal file
@@ -0,0 +1,146 @@
|
|||||||
|
"""
|
||||||
|
Ansible filter plugin that safely joins URL components from a list.
|
||||||
|
- Requires a valid '<scheme>://' in the first element (any RFC-3986-ish scheme)
|
||||||
|
- Preserves the double slash after the scheme, collapses other duplicate slashes
|
||||||
|
- Supports query parts introduced by elements starting with '?' or '&'
|
||||||
|
* first query element uses '?', subsequent use '&' (regardless of given prefix)
|
||||||
|
* each query element must be exactly one 'key=value' pair
|
||||||
|
* query elements may only appear after path elements; once query starts, no more path parts
|
||||||
|
- Raises specific AnsibleFilterError messages for common misuse
|
||||||
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
|
from ansible.errors import AnsibleFilterError
|
||||||
|
|
||||||
|
_SCHEME_RE = re.compile(r'^([a-zA-Z][a-zA-Z0-9+.\-]*://)(.*)$')
|
||||||
|
_QUERY_PAIR_RE = re.compile(r'^[^&=?#]+=[^&?#]*$') # key=value (no '&', no extra '?' or '#')
|
||||||
|
|
||||||
|
def _to_str_or_error(obj, index):
|
||||||
|
"""Cast to str, raising a specific AnsibleFilterError with index context."""
|
||||||
|
try:
|
||||||
|
return str(obj)
|
||||||
|
except Exception as e:
|
||||||
|
raise AnsibleFilterError(
|
||||||
|
f"url_join: unable to convert part at index {index} to string: {e}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def url_join(parts):
|
||||||
|
"""
|
||||||
|
Join a list of URL parts, URL-aware (scheme, path, query).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
parts (list|tuple): URL segments. First element MUST include '<scheme>://'.
|
||||||
|
Path elements are plain strings.
|
||||||
|
Query elements must start with '?' or '&' and contain exactly one 'key=value'.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Joined URL.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
AnsibleFilterError: with specific, descriptive messages.
|
||||||
|
"""
|
||||||
|
# --- basic input validation ---
|
||||||
|
if parts is None:
|
||||||
|
raise AnsibleFilterError("url_join: parts must be a non-empty list; got None")
|
||||||
|
if not isinstance(parts, (list, tuple)):
|
||||||
|
raise AnsibleFilterError(
|
||||||
|
f"url_join: parts must be a list/tuple; got {type(parts).__name__}"
|
||||||
|
)
|
||||||
|
if len(parts) == 0:
|
||||||
|
raise AnsibleFilterError("url_join: parts must be a non-empty list")
|
||||||
|
|
||||||
|
# --- first element must carry a scheme ---
|
||||||
|
first_raw = parts[0]
|
||||||
|
if first_raw is None:
|
||||||
|
raise AnsibleFilterError(
|
||||||
|
"url_join: first element must include a scheme like 'https://'; got None"
|
||||||
|
)
|
||||||
|
|
||||||
|
first_str = _to_str_or_error(first_raw, 0)
|
||||||
|
m = _SCHEME_RE.match(first_str)
|
||||||
|
if not m:
|
||||||
|
raise AnsibleFilterError(
|
||||||
|
"url_join: first element must start with '<scheme>://', e.g. 'https://example.com'; "
|
||||||
|
f"got '{first_str}'"
|
||||||
|
)
|
||||||
|
|
||||||
|
scheme = m.group(1) # e.g., 'https://', 'ftp://', 'myapp+v1://'
|
||||||
|
after_scheme = m.group(2).lstrip('/') # strip only leading slashes right after scheme
|
||||||
|
|
||||||
|
# --- iterate parts: collect path parts until first query part; then only query parts allowed ---
|
||||||
|
path_parts = []
|
||||||
|
query_pairs = []
|
||||||
|
in_query = False
|
||||||
|
|
||||||
|
for i, p in enumerate(parts):
|
||||||
|
if p is None:
|
||||||
|
# skip None silently (consistent with path_join-ish behavior)
|
||||||
|
continue
|
||||||
|
|
||||||
|
s = _to_str_or_error(p, i)
|
||||||
|
|
||||||
|
# disallow additional scheme in later parts
|
||||||
|
if i > 0 and "://" in s:
|
||||||
|
raise AnsibleFilterError(
|
||||||
|
f"url_join: only the first element may contain a scheme; part at index {i} "
|
||||||
|
f"looks like a URL with scheme ('{s}')."
|
||||||
|
)
|
||||||
|
|
||||||
|
# first element: replace with remainder after scheme and continue
|
||||||
|
if i == 0:
|
||||||
|
s = after_scheme
|
||||||
|
|
||||||
|
# check if this is a query element (starts with ? or &)
|
||||||
|
if s.startswith('?') or s.startswith('&'):
|
||||||
|
in_query = True
|
||||||
|
raw_pair = s[1:] # strip the leading ? or &
|
||||||
|
if raw_pair == '':
|
||||||
|
raise AnsibleFilterError(
|
||||||
|
f"url_join: query element at index {i} is empty; expected '?key=value' or '&key=value'"
|
||||||
|
)
|
||||||
|
# Disallow multiple pairs in a single element; enforce exactly one key=value
|
||||||
|
if '&' in raw_pair:
|
||||||
|
raise AnsibleFilterError(
|
||||||
|
f"url_join: query element at index {i} must contain exactly one 'key=value' pair "
|
||||||
|
f"without '&'; got '{s}'"
|
||||||
|
)
|
||||||
|
if not _QUERY_PAIR_RE.match(raw_pair):
|
||||||
|
raise AnsibleFilterError(
|
||||||
|
f"url_join: query element at index {i} must match 'key=value' (no extra '?', '&', '#'); got '{s}'"
|
||||||
|
)
|
||||||
|
query_pairs.append(raw_pair)
|
||||||
|
else:
|
||||||
|
# non-query element
|
||||||
|
if in_query:
|
||||||
|
# once query started, no more path parts allowed
|
||||||
|
raise AnsibleFilterError(
|
||||||
|
f"url_join: path element found at index {i} after query parameters started; "
|
||||||
|
f"query parts must come last"
|
||||||
|
)
|
||||||
|
# normal path part: strip slashes to avoid duplicate '/'
|
||||||
|
path_parts.append(s.strip('/'))
|
||||||
|
|
||||||
|
# normalize path: remove empty chunks
|
||||||
|
path_parts = [p for p in path_parts if p != '']
|
||||||
|
|
||||||
|
# --- build result ---
|
||||||
|
# path portion
|
||||||
|
if path_parts:
|
||||||
|
joined_path = "/".join(path_parts)
|
||||||
|
base = scheme + joined_path
|
||||||
|
else:
|
||||||
|
# no path beyond scheme
|
||||||
|
base = scheme
|
||||||
|
|
||||||
|
# query portion
|
||||||
|
if query_pairs:
|
||||||
|
base = base + "?" + "&".join(query_pairs)
|
||||||
|
|
||||||
|
return base
|
||||||
|
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
'url_join': url_join,
|
||||||
|
}
|
21
filter_plugins/volume_path.py
Normal file
21
filter_plugins/volume_path.py
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
from ansible.errors import AnsibleFilterError
|
||||||
|
|
||||||
|
def docker_volume_path(volume_name: str) -> str:
|
||||||
|
"""
|
||||||
|
Returns the absolute filesystem path of a Docker volume.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
"akaunting_data" -> "/var/lib/docker/volumes/akaunting_data/_data/"
|
||||||
|
"""
|
||||||
|
if not volume_name or not isinstance(volume_name, str):
|
||||||
|
raise AnsibleFilterError(f"Invalid volume name: {volume_name}")
|
||||||
|
|
||||||
|
return f"/var/lib/docker/volumes/{volume_name}/_data/"
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
"""Docker volume path filters."""
|
||||||
|
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
"docker_volume_path": docker_volume_path,
|
||||||
|
}
|
@@ -1,6 +1,8 @@
|
|||||||
SOFTWARE_NAME: "Infinito.Nexus" # Name of the software
|
SOFTWARE_NAME: "Infinito.Nexus" # Name of the software
|
||||||
|
|
||||||
|
# Deployment
|
||||||
ENVIRONMENT: "production" # Possible values: production, development
|
ENVIRONMENT: "production" # Possible values: production, development
|
||||||
|
DEPLOYMENT_MODE: "single" # Use single, if you deploy on one server. Use cluster if you setup in cluster mode.
|
||||||
|
|
||||||
# If true, sensitive credentials will be masked or hidden from all Ansible task logs
|
# If true, sensitive credentials will be masked or hidden from all Ansible task logs
|
||||||
# Recommendet to set to true
|
# Recommendet to set to true
|
||||||
@@ -20,27 +22,16 @@ HOST_TIME_FORMAT: "HH:mm"
|
|||||||
HOST_THOUSAND_SEPARATOR: "."
|
HOST_THOUSAND_SEPARATOR: "."
|
||||||
HOST_DECIMAL_MARK: ","
|
HOST_DECIMAL_MARK: ","
|
||||||
|
|
||||||
# Deployment mode
|
|
||||||
DEPLOYMENT_MODE: "single" # Use single, if you deploy on one server. Use cluster if you setup in cluster mode.
|
|
||||||
|
|
||||||
# Web
|
# Web
|
||||||
WEB_PROTOCOL: "https" # Web protocol type. Use https or http. If you run local you need to change it to http
|
WEB_PROTOCOL: "https" # Web protocol type. Use https or http. If you run local you need to change it to http
|
||||||
WEB_PORT: "{{ 443 if WEB_PROTOCOL == 'https' else 80 }}" # Default port web applications will listen to
|
WEB_PORT: "{{ 443 if WEB_PROTOCOL == 'https' else 80 }}" # Default port web applications will listen to
|
||||||
|
|
||||||
|
# Websocket
|
||||||
|
WEBSOCKET_PROTOCOL: "{{ 'wss' if WEB_PROTOCOL == 'https' else 'ws' }}"
|
||||||
|
|
||||||
# Domain
|
# Domain
|
||||||
PRIMARY_DOMAIN: "localhost" # Primary Domain of the server
|
PRIMARY_DOMAIN: "localhost" # Primary Domain of the server
|
||||||
|
|
||||||
# Server Tact Variables
|
|
||||||
|
|
||||||
## Ours in which the server is "awake" (100% working). Rest of the time is reserved for maintanance
|
|
||||||
HOURS_SERVER_AWAKE: "0..23"
|
|
||||||
|
|
||||||
## Random delay for systemd timers to avoid peak loads.
|
|
||||||
RANDOMIZED_DELAY_SEC: "5min"
|
|
||||||
|
|
||||||
# Runtime Variables for Process Control
|
|
||||||
ACTIVATE_ALL_TIMERS: false # Activates all timers, independend if the handlers had been triggered
|
|
||||||
|
|
||||||
DNS_PROVIDER: cloudflare # The DNS Provider\Registrar for the domain
|
DNS_PROVIDER: cloudflare # The DNS Provider\Registrar for the domain
|
||||||
|
|
||||||
HOSTING_PROVIDER: hetzner # Provider which hosts the server
|
HOSTING_PROVIDER: hetzner # Provider which hosts the server
|
||||||
@@ -52,18 +43,15 @@ CERTBOT_CREDENTIALS_FILE: "{{ CERTBOT_CREDENTIALS_DIR }}/{{ CERT
|
|||||||
CERTBOT_DNS_PROPAGATION_WAIT_SECONDS: 300 # How long should the script wait for DNS propagation before continuing
|
CERTBOT_DNS_PROPAGATION_WAIT_SECONDS: 300 # How long should the script wait for DNS propagation before continuing
|
||||||
CERTBOT_FLAVOR: san # Possible options: san (recommended, with a dns flavor like cloudflare, or hetzner), wildcard(doesn't function with www redirect), dedicated
|
CERTBOT_FLAVOR: san # Possible options: san (recommended, with a dns flavor like cloudflare, or hetzner), wildcard(doesn't function with www redirect), dedicated
|
||||||
|
|
||||||
# Path where Certbot stores challenge webroot files
|
# Letsencrypt
|
||||||
LETSENCRYPT_WEBROOT_PATH: "/var/lib/letsencrypt/"
|
LETSENCRYPT_WEBROOT_PATH: "/var/lib/letsencrypt/" # Path where Certbot stores challenge webroot files
|
||||||
|
LETSENCRYPT_BASE_PATH: "/etc/letsencrypt/" # Base directory containing Certbot configuration, account data, and archives
|
||||||
|
LETSENCRYPT_LIVE_PATH: "{{ LETSENCRYPT_BASE_PATH }}live/" # Symlink directory for the current active certificate and private key
|
||||||
|
|
||||||
# Base directory containing Certbot configuration, account data, and archives
|
## Docker
|
||||||
LETSENCRYPT_BASE_PATH: "/etc/letsencrypt/"
|
DOCKER_RESTART_POLICY: "unless-stopped" # Default restart parameter for docker containers
|
||||||
|
DOCKER_VARS_FILE: "{{ playbook_dir }}/roles/docker-compose/vars/docker-compose.yml" # File containing docker compose variables used by other services
|
||||||
# Symlink directory for the current active certificate and private key
|
DOCKER_WHITELISTET_ANON_VOLUMES: [] # Volumes which should be ignored during docker anonymous health check
|
||||||
LETSENCRYPT_LIVE_PATH: "{{ LETSENCRYPT_BASE_PATH }}live/"
|
|
||||||
|
|
||||||
## Docker Role Specific Parameters
|
|
||||||
DOCKER_RESTART_POLICY: "unless-stopped"
|
|
||||||
DOCKER_VARS_FILE: "{{ playbook_dir }}/roles/docker-compose/vars/docker-compose.yml"
|
|
||||||
|
|
||||||
# Asyn Confitguration
|
# Asyn Confitguration
|
||||||
ASYNC_ENABLED: "{{ not MODE_DEBUG | bool }}" # Activate async, deactivated for debugging
|
ASYNC_ENABLED: "{{ not MODE_DEBUG | bool }}" # Activate async, deactivated for debugging
|
||||||
@@ -88,6 +76,9 @@ _applications_nextcloud_oidc_flavor: >-
|
|||||||
)
|
)
|
||||||
}}
|
}}
|
||||||
|
|
||||||
# Systemctl
|
# Role-based access control
|
||||||
SYS_TIMER_SUFFIX: ".{{ SOFTWARE_NAME | lower }}.timer"
|
# @See https://en.wikipedia.org/wiki/Role-based_access_control
|
||||||
SYS_SERVICE_SUFFIX: ".{{ SOFTWARE_NAME | lower }}.service"
|
RBAC:
|
||||||
|
GROUP:
|
||||||
|
NAME: "/roles" # Name of the group which holds the RBAC roles
|
||||||
|
CLAIM: "groups" # Name of the claim containing the RBAC groups
|
@@ -1,9 +1,10 @@
|
|||||||
# Mode
|
# Mode
|
||||||
|
|
||||||
# The following modes can be combined with each other
|
# The following modes can be combined with each other
|
||||||
MODE_TEST: false # Executes test routines instead of productive routines
|
MODE_TEST: false # Executes test routines instead of productive routines
|
||||||
MODE_UPDATE: true # Executes updates
|
MODE_UPDATE: true # Executes updates
|
||||||
MODE_BACKUP: true # Activates the backup before the update procedure
|
MODE_DEBUG: false # This enables debugging in ansible and in the apps, You SHOULD NOT enable this on production servers
|
||||||
MODE_CLEANUP: true # Cleanup unused files and configurations
|
MODE_RESET: false # Cleans up all Infinito.Nexus files. It's necessary to run to whole playbook and not particial roles when using this function.
|
||||||
MODE_DEBUG: false # This enables debugging in ansible and in the apps, You SHOULD NOT enable this on production servers
|
MODE_BACKUP: "{{ MODE_UPDATE }}" # Activates the backup before the update procedure
|
||||||
MODE_RESET: false # Cleans up all Infinito.Nexus files. It's necessary to run to whole playbook and not particial roles when using this function.
|
MODE_CLEANUP: "{{ MODE_DEBUG }}" # Cleanup unused files and configurations
|
||||||
|
MODE_ASSERT: "{{ MODE_DEBUG }}" # Executes validation tasks during the run.
|
||||||
|
8
group_vars/all/02_email.yml
Normal file
8
group_vars/all/02_email.yml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
# Email Configuration
|
||||||
|
DEFAULT_SYSTEM_EMAIL:
|
||||||
|
DOMAIN: "{{ PRIMARY_DOMAIN }}"
|
||||||
|
HOST: "mail.{{ PRIMARY_DOMAIN }}"
|
||||||
|
PORT: 465
|
||||||
|
TLS: true # true for TLS and false for SSL
|
||||||
|
START_TLS: false
|
||||||
|
SMTP: true
|
@@ -1,9 +0,0 @@
|
|||||||
# Email Configuration
|
|
||||||
default_system_email:
|
|
||||||
domain: "{{ PRIMARY_DOMAIN }}"
|
|
||||||
host: "mail.{{ PRIMARY_DOMAIN }}"
|
|
||||||
port: 465
|
|
||||||
tls: true # true for TLS and false for SSL
|
|
||||||
start_tls: false
|
|
||||||
smtp: true
|
|
||||||
# password: # Needs to be defined in inventory file
|
|
@@ -1,38 +0,0 @@
|
|||||||
|
|
||||||
# System maintenance Services
|
|
||||||
|
|
||||||
## Timeouts to wait for other services to stop
|
|
||||||
system_maintenance_lock_timeout_cleanup_services: "15min"
|
|
||||||
system_maintenance_lock_timeout_storage_optimizer: "10min"
|
|
||||||
system_maintenance_lock_timeout_backup_services: "1h"
|
|
||||||
system_maintenance_lock_timeout_heal_docker: "30min"
|
|
||||||
system_maintenance_lock_timeout_update_docker: "2min"
|
|
||||||
system_maintenance_lock_timeout_restart_docker: "{{system_maintenance_lock_timeout_update_docker}}"
|
|
||||||
|
|
||||||
## Services
|
|
||||||
|
|
||||||
### Defined Services for Backup Tasks
|
|
||||||
system_maintenance_backup_services:
|
|
||||||
- "sys-bkp-docker-2-loc"
|
|
||||||
- "svc-bkp-rmt-2-loc"
|
|
||||||
- "svc-bkp-loc-2-usb"
|
|
||||||
- "sys-bkp-docker-2-loc-everything"
|
|
||||||
|
|
||||||
### Defined Services for System Cleanup
|
|
||||||
system_maintenance_cleanup_services:
|
|
||||||
- "sys-cln-backups"
|
|
||||||
- "sys-cln-disc-space"
|
|
||||||
- "sys-cln-faild-bkps"
|
|
||||||
|
|
||||||
### Services that Manipulate the System
|
|
||||||
system_maintenance_manipulation_services:
|
|
||||||
- "sys-rpr-docker-soft"
|
|
||||||
- "update-docker"
|
|
||||||
- "svc-opt-ssd-hdd"
|
|
||||||
- "sys-rpr-docker-hard"
|
|
||||||
|
|
||||||
## Total System Maintenance Services
|
|
||||||
system_maintenance_services: "{{ system_maintenance_backup_services + system_maintenance_cleanup_services + system_maintenance_manipulation_services }}"
|
|
||||||
|
|
||||||
### Define Variables for Docker Volume Health services
|
|
||||||
whitelisted_anonymous_docker_volumes: []
|
|
9
group_vars/all/06_paths.yml
Normal file
9
group_vars/all/06_paths.yml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
|
||||||
|
# Path Variables for Key Directories and Scripts
|
||||||
|
PATH_ADMINISTRATOR_HOME: "/home/administrator/"
|
||||||
|
PATH_ADMINISTRATOR_SCRIPTS: "/opt/scripts/"
|
||||||
|
PATH_SYSTEMCTL_SCRIPTS: "{{ [ PATH_ADMINISTRATOR_SCRIPTS, 'systemctl' ] | path_join }}"
|
||||||
|
PATH_DOCKER_COMPOSE_INSTANCES: "/opt/docker/"
|
||||||
|
PATH_SYSTEM_LOCK_SCRIPT: "/opt/scripts/sys-lock.py"
|
||||||
|
PATH_SYSTEM_SERVICE_DIR: "/etc/systemd/system"
|
||||||
|
PATH_DOCKER_COMPOSE_PULL_LOCK_DIR: "/run/ansible/compose-pull/"
|
@@ -1,6 +0,0 @@
|
|||||||
|
|
||||||
# Path Variables for Key Directories and Scripts
|
|
||||||
PATH_ADMINISTRATOR_HOME: "/home/administrator/"
|
|
||||||
PATH_ADMINISTRATOR_SCRIPTS: "/opt/scripts/"
|
|
||||||
PATH_DOCKER_COMPOSE_INSTANCES: "/opt/docker/"
|
|
||||||
PATH_SYSTEM_LOCK_SCRIPT: "/opt/scripts/sys-lock.py"
|
|
51
group_vars/all/07_services.yml
Normal file
51
group_vars/all/07_services.yml
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
|
||||||
|
# Services
|
||||||
|
|
||||||
|
## Meta
|
||||||
|
SYS_SERVICE_SUFFIX: ".{{ SOFTWARE_NAME | lower }}.service"
|
||||||
|
|
||||||
|
## Names
|
||||||
|
SYS_SERVICE_CLEANUP_BACKUPS_FAILED: "{{ 'sys-ctl-cln-faild-bkps' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
|
SYS_SERVICE_CLEANUP_ANONYMOUS_VOLUMES: "{{ 'sys-ctl-cln-anon-volumes' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
|
SYS_SERVICE_OPTIMIZE_DRIVE: "{{ 'svc-opt-ssd-hdd' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
|
SYS_SERVICE_BACKUP_RMT_2_LOC: "{{ 'svc-bkp-rmt-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
|
SYS_SERVICE_BACKUP_DOCKER_2_LOC: "{{ 'sys-ctl-bkp-docker-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
|
SYS_SERVICE_REPAIR_DOCKER_SOFT: "{{ 'sys-ctl-rpr-docker-soft' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
|
SYS_SERVICE_REPAIR_DOCKER_HARD: "{{ 'sys-ctl-rpr-docker-hard' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
|
|
||||||
|
## On Failure
|
||||||
|
SYS_SERVICE_ON_FAILURE_COMPOSE: "{{ ('sys-ctl-alm-compose@') | get_service_name(SOFTWARE_NAME, False) }}%n.service"
|
||||||
|
|
||||||
|
## Groups
|
||||||
|
SYS_SERVICE_GROUP_BACKUPS: >
|
||||||
|
{{ (('sys-ctl-bkp-' | get_category_entries) + ('svc-bkp-' | get_category_entries))
|
||||||
|
| map('regex_replace', '$', SYS_SERVICE_SUFFIX) | list | sort }}
|
||||||
|
|
||||||
|
SYS_SERVICE_GROUP_CLEANUP: >
|
||||||
|
{{ ('sys-ctl-cln-' | get_category_entries)
|
||||||
|
| map('regex_replace', '$', SYS_SERVICE_SUFFIX) | list | sort }}
|
||||||
|
|
||||||
|
SYS_SERVICE_GROUP_REPAIR: >
|
||||||
|
{{ ('sys-ctl-rpr-' | get_category_entries)
|
||||||
|
| map('regex_replace', '$', SYS_SERVICE_SUFFIX) | list | sort }}
|
||||||
|
|
||||||
|
SYS_SERVICE_GROUP_OPTIMIZATION: >
|
||||||
|
{{ ('svc-opt-' | get_category_entries)
|
||||||
|
| map('regex_replace', '$', SYS_SERVICE_SUFFIX) | list | sort }}
|
||||||
|
|
||||||
|
SYS_SERVICE_GROUP_MAINTANANCE: >
|
||||||
|
{{ ('svc-mtn-' | get_category_entries)
|
||||||
|
| map('regex_replace', '$', SYS_SERVICE_SUFFIX) | list | sort }}
|
||||||
|
|
||||||
|
## Collection of services to manipulate the system
|
||||||
|
SYS_SERVICE_GROUP_MANIPULATION: >
|
||||||
|
{{
|
||||||
|
(
|
||||||
|
SYS_SERVICE_GROUP_BACKUPS +
|
||||||
|
SYS_SERVICE_GROUP_CLEANUP +
|
||||||
|
SYS_SERVICE_GROUP_REPAIR +
|
||||||
|
SYS_SERVICE_GROUP_OPTIMIZATION +
|
||||||
|
SYS_SERVICE_GROUP_MAINTANANCE
|
||||||
|
) | sort
|
||||||
|
}}
|
||||||
|
|
@@ -1,29 +0,0 @@
|
|||||||
|
|
||||||
## Schedule for Health Checks
|
|
||||||
on_calendar_health_btrfs: "*-*-* 00:00:00" # Check once per day the btrfs for errors
|
|
||||||
on_calendar_health_journalctl: "*-*-* 00:00:00" # Check once per day the journalctl for errors
|
|
||||||
on_calendar_health_disc_space: "*-*-* 06,12,18,00:00:00" # Check four times per day if there is sufficient disc space
|
|
||||||
on_calendar_health_docker_container: "*-*-* {{ HOURS_SERVER_AWAKE }}:00:00" # Check once per hour if the docker containers are healthy
|
|
||||||
on_calendar_health_docker_volumes: "*-*-* {{ HOURS_SERVER_AWAKE }}:15:00" # Check once per hour if the docker volumes are healthy
|
|
||||||
on_calendar_health_csp_crawler: "*-*-* {{ HOURS_SERVER_AWAKE }}:30:00" # Check once per hour if all CSP are fullfilled available
|
|
||||||
on_calendar_health_nginx: "*-*-* {{ HOURS_SERVER_AWAKE }}:45:00" # Check once per hour if all webservices are available
|
|
||||||
on_calendar_health_msmtp: "*-*-* 00:00:00" # Check once per day SMTP Server
|
|
||||||
|
|
||||||
## Schedule for Cleanup Tasks
|
|
||||||
on_calendar_cleanup_backups: "*-*-* 00,06,12,18:30:00" # Cleanup backups every 6 hours, MUST be called before disc space cleanup
|
|
||||||
on_calendar_cleanup_disc_space: "*-*-* 07,13,19,01:30:00" # Cleanup disc space every 6 hours
|
|
||||||
on_calendar_cleanup_certs: "*-*-* 12,00:45:00" # Deletes and revokes unused certs
|
|
||||||
|
|
||||||
## Schedule for Backup Tasks
|
|
||||||
on_calendar_backup_docker_to_local: "*-*-* 03:30:00"
|
|
||||||
on_calendar_backup_remote_to_local: "*-*-* 21:30:00"
|
|
||||||
|
|
||||||
## Schedule for Maintenance Tasks
|
|
||||||
on_calendar_heal_docker: "*-*-* {{ HOURS_SERVER_AWAKE }}:30:00" # Heal unhealthy docker instances once per hour
|
|
||||||
on_calendar_renew_lets_encrypt_certificates: "*-*-* 12,00:30:00" # Renew Mailu certificates twice per day
|
|
||||||
on_calendar_deploy_certificates: "*-*-* 13,01:30:00" # Deploy letsencrypt certificates twice per day to docker containers
|
|
||||||
on_calendar_msi_keyboard_color: "*-*-* *:*:00" # Change the keyboard color every minute
|
|
||||||
on_calendar_cleanup_failed_docker: "*-*-* 12:00:00" # Clean up failed docker backups every noon
|
|
||||||
on_calendar_btrfs_auto_balancer: "Sat *-*-01..07 00:00:00" # Execute btrfs auto balancer every first Saturday of a month
|
|
||||||
on_calendar_restart_docker: "Sun *-*-* 08:00:00" # Restart docker instances every Sunday at 8:00 AM
|
|
||||||
on_calendar_nextcloud: "22" # Do nextcloud maintanace between 22:00 and 02:00
|
|
52
group_vars/all/08_schedule.yml
Normal file
52
group_vars/all/08_schedule.yml
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
|
||||||
|
# Service Timers
|
||||||
|
|
||||||
|
## Meta
|
||||||
|
SYS_TIMER_ALL_ENABLED: "{{ MODE_DEBUG }}" # Runtime Variables for Process Control - Activates all timers, independend if the handlers had been triggered
|
||||||
|
|
||||||
|
## Server Tact Variables
|
||||||
|
|
||||||
|
HOURS_SERVER_AWAKE: "0..23" # Ours in which the server is "awake" (100% working). Rest of the time is reserved for maintanance
|
||||||
|
RANDOMIZED_DELAY_SEC: "5min" # Random delay for systemd timers to avoid peak loads.
|
||||||
|
|
||||||
|
## Timeouts for all services
|
||||||
|
SYS_TIMEOUT_DOCKER_RPR_HARD: "10min"
|
||||||
|
SYS_TIMEOUT_DOCKER_RPR_SOFT: "{{ SYS_TIMEOUT_DOCKER_RPR_HARD }}"
|
||||||
|
SYS_TIMEOUT_CLEANUP_SERVICES: "15min"
|
||||||
|
SYS_TIMEOUT_DOCKER_UPDATE: "20min"
|
||||||
|
SYS_TIMEOUT_STORAGE_OPTIMIZER: "{{ SYS_TIMEOUT_DOCKER_UPDATE }}"
|
||||||
|
SYS_TIMEOUT_BACKUP_SERVICES: "60min"
|
||||||
|
|
||||||
|
## On Calendar
|
||||||
|
|
||||||
|
### Schedule for health checks
|
||||||
|
SYS_SCHEDULE_HEALTH_BTRFS: "*-*-* 00:00:00" # Check once per day the btrfs for errors
|
||||||
|
SYS_SCHEDULE_HEALTH_JOURNALCTL: "*-*-* 00:00:00" # Check once per day the journalctl for errors
|
||||||
|
SYS_SCHEDULE_HEALTH_DISC_SPACE: "*-*-* 06,12,18,00:00:00" # Check four times per day if there is sufficient disc space
|
||||||
|
SYS_SCHEDULE_HEALTH_DOCKER_CONTAINER: "*-*-* {{ HOURS_SERVER_AWAKE }}:00:00" # Check once per hour if the docker containers are healthy
|
||||||
|
SYS_SCHEDULE_HEALTH_DOCKER_VOLUMES: "*-*-* {{ HOURS_SERVER_AWAKE }}:15:00" # Check once per hour if the docker volumes are healthy
|
||||||
|
SYS_SCHEDULE_HEALTH_CSP_CRAWLER: "*-*-* {{ HOURS_SERVER_AWAKE }}:30:00" # Check once per hour if all CSP are fullfilled available
|
||||||
|
SYS_SCHEDULE_HEALTH_NGINX: "*-*-* {{ HOURS_SERVER_AWAKE }}:45:00" # Check once per hour if all webservices are available
|
||||||
|
SYS_SCHEDULE_HEALTH_MSMTP: "*-*-* 00:00:00" # Check once per day SMTP Server
|
||||||
|
|
||||||
|
### Schedule for cleanup tasks
|
||||||
|
SYS_SCHEDULE_CLEANUP_BACKUPS: "*-*-* 00,06,12,18:30:00" # Cleanup backups every 6 hours, MUST be called before disc space cleanup
|
||||||
|
SYS_SCHEDULE_CLEANUP_DISC_SPACE: "*-*-* 07,13,19,01:30:00" # Cleanup disc space every 6 hours
|
||||||
|
SYS_SCHEDULE_CLEANUP_CERTS: "*-*-* 12,00:45:00" # Deletes and revokes unused certs
|
||||||
|
SYS_SCHEDULE_CLEANUP_FAILED_BACKUPS: "*-*-* 12:00:00" # Clean up failed docker backups every noon
|
||||||
|
|
||||||
|
### Schedule for repair services
|
||||||
|
SYS_SCHEDULE_REPAIR_BTRFS_AUTO_BALANCER: "Sat *-*-01..07 00:00:00" # Execute btrfs auto balancer every first Saturday of a month
|
||||||
|
SYS_SCHEDULE_REPAIR_DOCKER_HARD: "Sun *-*-* 08:00:00" # Restart docker instances every Sunday at 8:00 AM
|
||||||
|
|
||||||
|
### Schedule for backup tasks
|
||||||
|
SYS_SCHEDULE_BACKUP_DOCKER_TO_LOCAL: "*-*-* 03:30:00"
|
||||||
|
SYS_SCHEDULE_BACKUP_REMOTE_TO_LOCAL: "*-*-* 21:30:00"
|
||||||
|
|
||||||
|
### Schedule for Maintenance Tasks
|
||||||
|
SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_RENEW: "*-*-* 12,00:30:00" # Renew Mailu certificates twice per day
|
||||||
|
SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_DEPLOY: "*-*-* 13,01:30:00" # Deploy letsencrypt certificates twice per day to docker containers
|
||||||
|
SYS_SCHEDULE_MAINTANANCE_NEXTCLOUD: "22" # Do nextcloud maintanace between 22:00 and 02:00
|
||||||
|
|
||||||
|
### Animation
|
||||||
|
SYS_SCHEDULE_ANIMATION_KEYBOARD_COLOR: "*-*-* *:*:00" # Change the keyboard color every minute
|
@@ -10,7 +10,7 @@ defaults_networks:
|
|||||||
# /28 Networks, 14 Usable Ip Addresses
|
# /28 Networks, 14 Usable Ip Addresses
|
||||||
web-app-akaunting:
|
web-app-akaunting:
|
||||||
subnet: 192.168.101.0/28
|
subnet: 192.168.101.0/28
|
||||||
web-app-attendize:
|
web-app-confluence:
|
||||||
subnet: 192.168.101.16/28
|
subnet: 192.168.101.16/28
|
||||||
web-app-baserow:
|
web-app-baserow:
|
||||||
subnet: 192.168.101.32/28
|
subnet: 192.168.101.32/28
|
||||||
@@ -34,8 +34,8 @@ defaults_networks:
|
|||||||
subnet: 192.168.101.176/28
|
subnet: 192.168.101.176/28
|
||||||
web-app-listmonk:
|
web-app-listmonk:
|
||||||
subnet: 192.168.101.192/28
|
subnet: 192.168.101.192/28
|
||||||
# Free:
|
web-app-jira:
|
||||||
# subnet: 192.168.101.208/28
|
subnet: 192.168.101.208/28
|
||||||
web-app-matomo:
|
web-app-matomo:
|
||||||
subnet: 192.168.101.224/28
|
subnet: 192.168.101.224/28
|
||||||
web-app-mastodon:
|
web-app-mastodon:
|
||||||
@@ -48,7 +48,7 @@ defaults_networks:
|
|||||||
subnet: 192.168.102.16/28
|
subnet: 192.168.102.16/28
|
||||||
web-app-moodle:
|
web-app-moodle:
|
||||||
subnet: 192.168.102.32/28
|
subnet: 192.168.102.32/28
|
||||||
web-app-mybb:
|
web-app-bookwyrm:
|
||||||
subnet: 192.168.102.48/28
|
subnet: 192.168.102.48/28
|
||||||
web-app-nextcloud:
|
web-app-nextcloud:
|
||||||
subnet: 192.168.102.64/28
|
subnet: 192.168.102.64/28
|
||||||
@@ -84,11 +84,11 @@ defaults_networks:
|
|||||||
subnet: 192.168.103.64/28
|
subnet: 192.168.103.64/28
|
||||||
web-app-syncope:
|
web-app-syncope:
|
||||||
subnet: 192.168.103.80/28
|
subnet: 192.168.103.80/28
|
||||||
web-app-collabora:
|
web-svc-collabora:
|
||||||
subnet: 192.168.103.96/28
|
subnet: 192.168.103.96/28
|
||||||
web-svc-simpleicons:
|
web-svc-simpleicons:
|
||||||
subnet: 192.168.103.112/28
|
subnet: 192.168.103.112/28
|
||||||
web-app-libretranslate:
|
web-svc-libretranslate:
|
||||||
subnet: 192.168.103.128/28
|
subnet: 192.168.103.128/28
|
||||||
web-app-pretix:
|
web-app-pretix:
|
||||||
subnet: 192.168.103.144/28
|
subnet: 192.168.103.144/28
|
||||||
@@ -96,6 +96,12 @@ defaults_networks:
|
|||||||
subnet: 192.168.103.160/28
|
subnet: 192.168.103.160/28
|
||||||
web-svc-logout:
|
web-svc-logout:
|
||||||
subnet: 192.168.103.176/28
|
subnet: 192.168.103.176/28
|
||||||
|
web-app-chess:
|
||||||
|
subnet: 192.168.103.192/28
|
||||||
|
web-app-magento:
|
||||||
|
subnet: 192.168.103.208/28
|
||||||
|
web-app-bridgy-fed:
|
||||||
|
subnet: 192.168.103.224/28
|
||||||
|
|
||||||
# /24 Networks / 254 Usable Clients
|
# /24 Networks / 254 Usable Clients
|
||||||
web-app-bigbluebutton:
|
web-app-bigbluebutton:
|
@@ -2,12 +2,12 @@ ports:
|
|||||||
# Ports which are exposed to localhost
|
# Ports which are exposed to localhost
|
||||||
localhost:
|
localhost:
|
||||||
database:
|
database:
|
||||||
svc-db-postgres: 5432
|
svc-db-postgres: 5432
|
||||||
svc-db-mariadb: 3306
|
svc-db-mariadb: 3306
|
||||||
# https://developer.mozilla.org/de/docs/Web/API/WebSockets_API
|
# https://developer.mozilla.org/de/docs/Web/API/WebSockets_API
|
||||||
websocket:
|
websocket:
|
||||||
web-app-mastodon: 4001
|
web-app-mastodon: 4001
|
||||||
web-app-espocrm: 4002
|
web-app-espocrm: 4002
|
||||||
oauth2_proxy:
|
oauth2_proxy:
|
||||||
web-app-phpmyadmin: 4181
|
web-app-phpmyadmin: 4181
|
||||||
web-app-lam: 4182
|
web-app-lam: 4182
|
||||||
@@ -26,7 +26,7 @@ ports:
|
|||||||
web-app-gitea: 8002
|
web-app-gitea: 8002
|
||||||
web-app-wordpress: 8003
|
web-app-wordpress: 8003
|
||||||
web-app-mediawiki: 8004
|
web-app-mediawiki: 8004
|
||||||
web-app-mybb: 8005
|
web-app-confluence: 8005
|
||||||
web-app-yourls: 8006
|
web-app-yourls: 8006
|
||||||
web-app-mailu: 8007
|
web-app-mailu: 8007
|
||||||
web-app-elk: 8008
|
web-app-elk: 8008
|
||||||
@@ -36,7 +36,7 @@ ports:
|
|||||||
web-app-funkwhale: 8012
|
web-app-funkwhale: 8012
|
||||||
web-app-roulette-wheel: 8013
|
web-app-roulette-wheel: 8013
|
||||||
web-app-joomla: 8014
|
web-app-joomla: 8014
|
||||||
web-app-attendize: 8015
|
web-app-jira: 8015
|
||||||
web-app-pgadmin: 8016
|
web-app-pgadmin: 8016
|
||||||
web-app-baserow: 8017
|
web-app-baserow: 8017
|
||||||
web-app-matomo: 8018
|
web-app-matomo: 8018
|
||||||
@@ -50,7 +50,7 @@ ports:
|
|||||||
web-app-moodle: 8026
|
web-app-moodle: 8026
|
||||||
web-app-taiga: 8027
|
web-app-taiga: 8027
|
||||||
web-app-friendica: 8028
|
web-app-friendica: 8028
|
||||||
web-app-port-ui: 8029
|
web-app-desktop: 8029
|
||||||
web-app-bluesky_api: 8030
|
web-app-bluesky_api: 8030
|
||||||
web-app-bluesky_web: 8031
|
web-app-bluesky_web: 8031
|
||||||
web-app-keycloak: 8032
|
web-app-keycloak: 8032
|
||||||
@@ -63,13 +63,18 @@ ports:
|
|||||||
web-app-navigator: 8039
|
web-app-navigator: 8039
|
||||||
web-app-espocrm: 8040
|
web-app-espocrm: 8040
|
||||||
web-app-syncope: 8041
|
web-app-syncope: 8041
|
||||||
web-app-collabora: 8042
|
web-svc-collabora: 8042
|
||||||
web-app-mobilizon: 8043
|
web-app-mobilizon: 8043
|
||||||
web-svc-simpleicons: 8044
|
web-svc-simpleicons: 8044
|
||||||
web-app-libretranslate: 8045
|
web-svc-libretranslate: 8045
|
||||||
web-app-pretix: 8046
|
web-app-pretix: 8046
|
||||||
web-app-mig: 8047
|
web-app-mig: 8047
|
||||||
web-svc-logout: 8048
|
web-svc-logout: 8048
|
||||||
|
web-app-bookwyrm: 8049
|
||||||
|
web-app-chess: 8050
|
||||||
|
web-app-bluesky_view: 8051
|
||||||
|
web-app-magento: 8052
|
||||||
|
web-app-bridgy-fed: 8053
|
||||||
web-app-bigbluebutton: 48087 # This port is predefined by bbb. @todo Try to change this to a 8XXX port
|
web-app-bigbluebutton: 48087 # This port is predefined by bbb. @todo Try to change this to a 8XXX port
|
||||||
public:
|
public:
|
||||||
# The following ports should be changed to 22 on the subdomain via stream mapping
|
# The following ports should be changed to 22 on the subdomain via stream mapping
|
||||||
@@ -80,9 +85,10 @@ ports:
|
|||||||
svc-db-openldap: 636
|
svc-db-openldap: 636
|
||||||
stun:
|
stun:
|
||||||
web-app-bigbluebutton: 3478 # Not sure if it's right placed here or if it should be moved to localhost section
|
web-app-bigbluebutton: 3478 # Not sure if it's right placed here or if it should be moved to localhost section
|
||||||
web-app-nextcloud: 3479
|
# Occupied by BBB: 3479
|
||||||
|
web-app-nextcloud: 3480
|
||||||
turn:
|
turn:
|
||||||
web-app-bigbluebutton: 5349 # Not sure if it's right placed here or if it should be moved to localhost section
|
web-app-bigbluebutton: 5349 # Not sure if it's right placed here or if it should be moved to localhost section
|
||||||
web-app-nextcloud: 5350 # Not used yet
|
web-app-nextcloud: 5350 # Not used yet
|
||||||
federation:
|
federation:
|
||||||
web-app-matrix_synapse: 8448
|
web-app-matrix_synapse: 8448
|
@@ -7,38 +7,43 @@
|
|||||||
#############################################
|
#############################################
|
||||||
# @see https://en.wikipedia.org/wiki/OpenID_Connect
|
# @see https://en.wikipedia.org/wiki/OpenID_Connect
|
||||||
|
|
||||||
## Helper Variables:
|
# Helper Variables:
|
||||||
_oidc_client_realm: "{{ OIDC.CLIENT.REALM if OIDC.CLIENT is defined and OIDC.CLIENT.REALM is defined else SOFTWARE_NAME | lower }}"
|
_oidc_client_realm: "{{ OIDC.CLIENT.REALM if OIDC.CLIENT is defined and OIDC.CLIENT.REALM is defined else SOFTWARE_NAME | lower }}"
|
||||||
_oidc_url: "{{
|
_oidc_url: "{{
|
||||||
( OIDC.URL
|
( OIDC.URL
|
||||||
if (OIDC is defined and OIDC.URL is defined)
|
if (OIDC is defined and OIDC.URL is defined)
|
||||||
else WEB_PROTOCOL ~ '://' ~ (domains | get_domain('web-app-keycloak'))
|
else domains | get_url('web-app-keycloak', WEB_PROTOCOL)
|
||||||
).rstrip('/')
|
).rstrip('/')
|
||||||
}}"
|
}}"
|
||||||
_oidc_client_issuer_url: "{{ _oidc_url ~ '/realms/' ~ _oidc_client_realm }}"
|
_oidc_client_issuer_url: "{{ _oidc_url ~ '/realms/' ~ _oidc_client_realm }}"
|
||||||
_oidc_client_id: "{{ OIDC.CLIENT.ID if OIDC.CLIENT is defined and OIDC.CLIENT.ID is defined else SOFTWARE_NAME | lower }}"
|
_oidc_client_id: "{{ OIDC.CLIENT.ID if OIDC.CLIENT is defined and OIDC.CLIENT.ID is defined else SOFTWARE_NAME | lower }}"
|
||||||
|
_oidc_account_url: "{{ _oidc_client_issuer_url ~ '/account' }}"
|
||||||
|
_oidc_protocol_oidc: "{{ _oidc_client_issuer_url ~ '/protocol/openid-connect' }}"
|
||||||
|
|
||||||
|
# Definition
|
||||||
defaults_oidc:
|
defaults_oidc:
|
||||||
URL: "{{ _oidc_url }}"
|
URL: "{{ _oidc_url }}"
|
||||||
CLIENT:
|
CLIENT:
|
||||||
ID: "{{ _oidc_client_id }}" # Client identifier, typically matching your primary domain
|
ID: "{{ _oidc_client_id }}" # Client identifier, typically matching your primary domain
|
||||||
# secret: # Client secret for authenticating with the OIDC provider (set in the inventory file). Recommend greater then 32 characters
|
# SECRET: # Client secret for authenticating with the OIDC provider (set in the inventory file). Recommend greater then 32 characters
|
||||||
REALM: "{{ _oidc_client_realm }}" # The realm to which the client belongs in the OIDC provider
|
REALM: "{{ _oidc_client_realm }}" # The realm to which the client belongs in the OIDC provider
|
||||||
ISSUER_URL: "{{ _oidc_client_issuer_url }}" # Base URL of the OIDC provider (issuer)
|
ISSUER_URL: "{{ _oidc_client_issuer_url }}" # Base URL of the OIDC provider (issuer)
|
||||||
DISCOVERY_DOCUMENT: "{{ _oidc_client_issuer_url ~ '/.well-known/openid-configuration' }}" # URL for fetching the provider's configuration details
|
DISCOVERY_DOCUMENT: "{{ _oidc_client_issuer_url ~ '/.well-known/openid-configuration' }}" # URL for fetching the provider's configuration details
|
||||||
AUTHORIZE_URL: "{{ _oidc_client_issuer_url ~ '/protocol/openid-connect/auth' }}" # Endpoint to start the authorization process
|
AUTHORIZE_URL: "{{ _oidc_protocol_oidc ~ '/auth' }}" # Endpoint to start the authorization process
|
||||||
TOKEN_URL: "{{ _oidc_client_issuer_url ~ '/protocol/openid-connect/token' }}" # Endpoint to exchange authorization codes for tokens (note: 'token_url' may be a typo for 'token_url')
|
TOKEN_URL: "{{ _oidc_protocol_oidc ~ '/token' }}" # Endpoint to exchange authorization codes for tokens (note: 'token_url' may be a typo for 'token_url')
|
||||||
USER_INFO_URL: "{{ _oidc_client_issuer_url ~ '/protocol/openid-connect/userinfo' }}" # Endpoint to retrieve user information
|
USER_INFO_URL: "{{ _oidc_protocol_oidc ~ '/userinfo' }}" # Endpoint to retrieve user information
|
||||||
LOGOUT_URL: "{{ _oidc_client_issuer_url ~ '/protocol/openid-connect/logout' }}" # Endpoint to log out the user
|
LOGOUT_URL: "{{ _oidc_protocol_oidc ~ '/logout' }}" # Endpoint to log out the user
|
||||||
CHANGE_CREDENTIALS: "{{ _oidc_client_issuer_url ~ '/account/account-security/signing-in' }}" # URL for managing or changing user credentials
|
CERTS: "{{ _oidc_protocol_oidc ~ '/certs' }}" # JSON Web Key Set (JWKS)
|
||||||
CERTS: "{{ _oidc_client_issuer_url ~ '/protocol/openid-connect/certs' }}" # JSON Web Key Set (JWKS)
|
ACCOUNT:
|
||||||
|
URL: "{{ _oidc_account_url }}" # Entry point for the user settings console
|
||||||
|
PROFILE_URL: "{{ _oidc_account_url ~ '/#/personal-info' }}" # Section for managing personal information
|
||||||
|
SECURITY_URL: "{{ _oidc_account_url ~ '/#/security/signingin' }}" # Section for managing login and security settings
|
||||||
|
CHANGE_CREDENTIALS: "{{ _oidc_account_url ~ '/account-security/signing-in' }}" # URL for managing or changing user credentials
|
||||||
RESET_CREDENTIALS: "{{ _oidc_client_issuer_url ~ '/login-actions/reset-credentials?client_id=' ~ _oidc_client_id }}" # Password reset url
|
RESET_CREDENTIALS: "{{ _oidc_client_issuer_url ~ '/login-actions/reset-credentials?client_id=' ~ _oidc_client_id }}" # Password reset url
|
||||||
BUTTON_TEXT: "SSO Login ({{ PRIMARY_DOMAIN | upper }})" # Default button text
|
BUTTON_TEXT: "SSO Login ({{ PRIMARY_DOMAIN | upper }})" # Default button text
|
||||||
ATTRIBUTES:
|
ATTRIBUTES:
|
||||||
# Attribut to identify the user
|
# Attribut to identify the user
|
||||||
USERNAME: "preferred_username"
|
USERNAME: "preferred_username"
|
||||||
GIVEN_NAME: "givenName"
|
GIVEN_NAME: "givenName"
|
||||||
FAMILY_NAME: "surname"
|
FAMILY_NAME: "surname"
|
||||||
EMAIL: "email"
|
EMAIL: "email"
|
||||||
CLAIMS:
|
|
||||||
GROUPS: "groups"
|
|
||||||
|
@@ -14,22 +14,22 @@ _ldap_domain: "{{ PRIMARY_DOMAIN }}" # LDAP is jsut listening to
|
|||||||
_ldap_user_id: "uid"
|
_ldap_user_id: "uid"
|
||||||
_ldap_filters_users_all: "(|(objectclass=inetOrgPerson))"
|
_ldap_filters_users_all: "(|(objectclass=inetOrgPerson))"
|
||||||
|
|
||||||
ldap:
|
LDAP:
|
||||||
# Distinguished Names (DN)
|
# Distinguished Names (DN)
|
||||||
dn:
|
DN:
|
||||||
# -------------------------------------------------------------------------
|
# -------------------------------------------------------------------------
|
||||||
# Base DN / Suffix
|
# Base DN / Suffix
|
||||||
# This is the top-level naming context for your directory, used as the
|
# This is the top-level naming context for your directory, used as the
|
||||||
# default search base for most operations (e.g. adding users, groups).
|
# default search base for most operations (e.g. adding users, groups).
|
||||||
# Example: “dc=example,dc=com”
|
# Example: “dc=example,dc=com”
|
||||||
root: "{{ LDAP_DN_BASE }}"
|
ROOT: "{{ LDAP_DN_BASE }}"
|
||||||
administrator:
|
ADMINISTRATOR:
|
||||||
# -------------------------------------------------------------------------
|
# -------------------------------------------------------------------------
|
||||||
# Data-Tree Administrator Bind DN
|
# Data-Tree Administrator Bind DN
|
||||||
# The DN used to authenticate for regular directory operations under
|
# The DN used to authenticate for regular directory operations under
|
||||||
# the data tree (adding users, modifying attributes, creating OUs, etc.).
|
# the data tree (adding users, modifying attributes, creating OUs, etc.).
|
||||||
# Typically: “cn=admin,dc=example,dc=com”
|
# Typically: “cn=admin,dc=example,dc=com”
|
||||||
data: "cn={{ applications['svc-db-openldap'].users.administrator.username }},{{ LDAP_DN_BASE }}"
|
DATA: "cn={{ applications['svc-db-openldap'].users.administrator.username }},{{ LDAP_DN_BASE }}"
|
||||||
|
|
||||||
# -------------------------------------------------------------------------
|
# -------------------------------------------------------------------------
|
||||||
# Config-Tree Administrator Bind DN
|
# Config-Tree Administrator Bind DN
|
||||||
@@ -37,9 +37,9 @@ ldap:
|
|||||||
# need to load or modify schema, overlays, modules, or other server-
|
# need to load or modify schema, overlays, modules, or other server-
|
||||||
# level settings.
|
# level settings.
|
||||||
# Typically: “cn=admin,cn=config”
|
# Typically: “cn=admin,cn=config”
|
||||||
configuration: "cn={{ applications['svc-db-openldap'].users.administrator.username }},cn=config"
|
CONFIGURATION: "cn={{ applications['svc-db-openldap'].users.administrator.username }},cn=config"
|
||||||
|
|
||||||
ou:
|
OU:
|
||||||
# -------------------------------------------------------------------------
|
# -------------------------------------------------------------------------
|
||||||
# Organizational Units (OUs)
|
# Organizational Units (OUs)
|
||||||
# Pre-created containers in the directory tree to logically separate entries:
|
# Pre-created containers in the directory tree to logically separate entries:
|
||||||
@@ -47,9 +47,9 @@ ldap:
|
|||||||
# – groups: Contains organizational or business groups (e.g., departments, teams).
|
# – groups: Contains organizational or business groups (e.g., departments, teams).
|
||||||
# – roles: Contains application-specific RBAC roles
|
# – roles: Contains application-specific RBAC roles
|
||||||
# (e.g., "cn=app1-user", "cn=yourls-admin").
|
# (e.g., "cn=app1-user", "cn=yourls-admin").
|
||||||
users: "ou=users,{{ LDAP_DN_BASE }}"
|
USERS: "ou=users,{{ LDAP_DN_BASE }}"
|
||||||
groups: "ou=groups,{{ LDAP_DN_BASE }}"
|
GROUPS: "ou=groups,{{ LDAP_DN_BASE }}"
|
||||||
roles: "ou=roles,{{ LDAP_DN_BASE }}"
|
ROLES: "ou=roles,{{ LDAP_DN_BASE }}"
|
||||||
|
|
||||||
# -------------------------------------------------------------------------
|
# -------------------------------------------------------------------------
|
||||||
# Additional Notes
|
# Additional Notes
|
||||||
@@ -59,17 +59,17 @@ ldap:
|
|||||||
# for ordinary user/group operations, and vice versa.
|
# for ordinary user/group operations, and vice versa.
|
||||||
|
|
||||||
# Password to access dn.bind
|
# Password to access dn.bind
|
||||||
bind_credential: "{{ applications | get_app_conf('svc-db-openldap', 'credentials.administrator_database_password') }}"
|
BIND_CREDENTIAL: "{{ applications | get_app_conf('svc-db-openldap', 'credentials.administrator_database_password') }}"
|
||||||
server:
|
SERVER:
|
||||||
domain: "{{ _ldap_name if _ldap_docker_network_enabled else _ldap_domain }}" # Mapping for public or locale access
|
DOMAIN: "{{ _ldap_name if _ldap_docker_network_enabled else _ldap_domain }}" # Mapping for public or locale access
|
||||||
port: "{{ _ldap_server_port }}"
|
PORT: "{{ _ldap_server_port }}"
|
||||||
uri: "{{ _ldap_protocol }}://{{ _ldap_name if _ldap_docker_network_enabled else _ldap_domain }}:{{ _ldap_server_port }}"
|
URI: "{{ _ldap_protocol }}://{{ _ldap_name if _ldap_docker_network_enabled else _ldap_domain }}:{{ _ldap_server_port }}"
|
||||||
security: "" #TLS, SSL - Leave empty for none
|
SECURITY: "" #TLS, SSL - Leave empty for none
|
||||||
network:
|
NETWORK:
|
||||||
local: "{{ _ldap_docker_network_enabled }}" # Uses the application configuration to define if local network should be available or not
|
LOCAL: "{{ _ldap_docker_network_enabled }}" # Uses the application configuration to define if local network should be available or not
|
||||||
user:
|
USER:
|
||||||
objects:
|
OBJECTS:
|
||||||
structural:
|
STRUCTURAL:
|
||||||
- person # Structural Classes define the core identity of an entry:
|
- person # Structural Classes define the core identity of an entry:
|
||||||
# • Specify mandatory attributes (e.g. sn, cn)
|
# • Specify mandatory attributes (e.g. sn, cn)
|
||||||
# • Each entry must have exactly one structural class
|
# • Each entry must have exactly one structural class
|
||||||
@@ -77,26 +77,26 @@ ldap:
|
|||||||
# (e.g. mail, employeeNumber)
|
# (e.g. mail, employeeNumber)
|
||||||
- posixAccount # Provides UNIX account attributes (uidNumber, gidNumber,
|
- posixAccount # Provides UNIX account attributes (uidNumber, gidNumber,
|
||||||
# homeDirectory)
|
# homeDirectory)
|
||||||
auxiliary:
|
AUXILIARY:
|
||||||
nextloud_user: "nextcloudUser" # Auxiliary Classes attach optional attributes without
|
NEXTCLOUD_USER: "nextcloudUser" # Auxiliary Classes attach optional attributes without
|
||||||
# changing the entry’s structural role. Here they add
|
# changing the entry’s structural role. Here they add
|
||||||
# nextcloudQuota and nextcloudEnabled for Nextcloud.
|
# nextcloudQuota and nextcloudEnabled for Nextcloud.
|
||||||
ssh_public_key: "ldapPublicKey" # Allows storing SSH public keys for services like Gitea.
|
SSH_PUBLIC_KEY: "ldapPublicKey" # Allows storing SSH public keys for services like Gitea.
|
||||||
attributes:
|
ATTRIBUTES:
|
||||||
# Attribut to identify the user
|
# Attribut to identify the user
|
||||||
id: "{{ _ldap_user_id }}"
|
ID: "{{ _ldap_user_id }}"
|
||||||
mail: "mail"
|
MAIL: "mail"
|
||||||
fullname: "cn"
|
FULLNAME: "cn"
|
||||||
firstname: "givenname"
|
FIRSTNAME: "givenname"
|
||||||
surname: "sn"
|
SURNAME: "sn"
|
||||||
ssh_public_key: "sshPublicKey"
|
SSH_PUBLIC_KEY: "sshPublicKey"
|
||||||
nextcloud_quota: "nextcloudQuota"
|
NEXTCLOUD_QUOTA: "nextcloudQuota"
|
||||||
filters:
|
FILTERS:
|
||||||
users:
|
USERS:
|
||||||
login: "(&{{ _ldap_filters_users_all }}({{_ldap_user_id}}=%{{_ldap_user_id}}))"
|
LOGIN: "(&{{ _ldap_filters_users_all }}({{_ldap_user_id}}=%{{_ldap_user_id}}))"
|
||||||
all: "{{ _ldap_filters_users_all }}"
|
ALL: "{{ _ldap_filters_users_all }}"
|
||||||
rbac:
|
RBAC:
|
||||||
flavors:
|
FLAVORS:
|
||||||
# Valid values posixGroup, groupOfNames
|
# Valid values posixGroup, groupOfNames
|
||||||
- groupOfNames
|
- groupOfNames
|
||||||
# - posixGroup
|
# - posixGroup
|
||||||
|
@@ -21,7 +21,7 @@ defaults_service_provider:
|
|||||||
if 'web-app-bluesky' in group_names else '' }}
|
if 'web-app-bluesky' in group_names else '' }}
|
||||||
email: "{{ users.contact.username ~ '@' ~ PRIMARY_DOMAIN if 'web-app-mailu' in group_names else '' }}"
|
email: "{{ users.contact.username ~ '@' ~ PRIMARY_DOMAIN if 'web-app-mailu' in group_names else '' }}"
|
||||||
mastodon: "{{ '@' ~ users.contact.username ~ '@' ~ domains | get_domain('web-app-mastodon') if 'web-app-mastodon' in group_names else '' }}"
|
mastodon: "{{ '@' ~ users.contact.username ~ '@' ~ domains | get_domain('web-app-mastodon') if 'web-app-mastodon' in group_names else '' }}"
|
||||||
matrix: "{{ '@' ~ users.contact.username ~ ':' ~ domains['web-app-matrix'].synapse if 'web-app-matrix' in group_names else '' }}"
|
matrix: "{{ '@' ~ users.contact.username ~ ':' ~ applications | get_app_conf('web-app-matrix', 'server_name') if 'web-app-matrix' in group_names else '' }}"
|
||||||
peertube: "{{ '@' ~ users.contact.username ~ '@' ~ domains | get_domain('web-app-peertube') if 'web-app-peertube' in group_names else '' }}"
|
peertube: "{{ '@' ~ users.contact.username ~ '@' ~ domains | get_domain('web-app-peertube') if 'web-app-peertube' in group_names else '' }}"
|
||||||
pixelfed: "{{ '@' ~ users.contact.username ~ '@' ~ domains | get_domain('web-app-pixelfed') if 'web-app-pixelfed' in group_names else '' }}"
|
pixelfed: "{{ '@' ~ users.contact.username ~ '@' ~ domains | get_domain('web-app-pixelfed') if 'web-app-pixelfed' in group_names else '' }}"
|
||||||
phone: "+0 000 000 404"
|
phone: "+0 000 000 404"
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
backups_folder_path: "/Backups/" # Path to the backups folder
|
BACKUPS_FOLDER_PATH: "/Backups/" # Path to the backups folder
|
||||||
|
|
||||||
# Storage Space-Related Configurations
|
# Storage Space-Related Configurations
|
||||||
size_percent_maximum_backup: 75 # Maximum storage space in percent for backups
|
SIZE_PERCENT_MAXIMUM_BACKUP: 75 # Maximum storage space in percent for backups
|
||||||
size_percent_cleanup_disc_space: 85 # Threshold for triggering cleanup actions
|
SIZE_PERCENT_CLEANUP_DISC_SPACE: 85 # Threshold for triggering cleanup actions
|
||||||
size_percent_disc_space_warning: 90 # Warning threshold in percent for free disk space
|
SIZE_PERCENT_DISC_SPACE_WARNING: 90 # Warning threshold in percent for free disk space
|
53
lookup_plugins/local_mtime_qs.py
Normal file
53
lookup_plugins/local_mtime_qs.py
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
from ansible.plugins.lookup import LookupBase
|
||||||
|
from ansible.errors import AnsibleError
|
||||||
|
import os
|
||||||
|
|
||||||
|
class LookupModule(LookupBase):
|
||||||
|
"""
|
||||||
|
Return a cache-busting string based on the LOCAL file's mtime.
|
||||||
|
|
||||||
|
Usage (single path → string via Jinja):
|
||||||
|
{{ lookup('local_mtime_qs', '/path/to/file.css') }}
|
||||||
|
-> "?version=1712323456"
|
||||||
|
|
||||||
|
Options:
|
||||||
|
param (str): query parameter name (default: "version")
|
||||||
|
mode (str): "qs" (default) → returns "?<param>=<mtime>"
|
||||||
|
"epoch" → returns "<mtime>"
|
||||||
|
|
||||||
|
Multiple paths (returns list, one result per term):
|
||||||
|
{{ lookup('local_mtime_qs', '/a.js', '/b.js', param='v') }}
|
||||||
|
"""
|
||||||
|
|
||||||
|
def run(self, terms, variables=None, **kwargs):
|
||||||
|
if not terms:
|
||||||
|
return []
|
||||||
|
|
||||||
|
param = kwargs.get('param', 'version')
|
||||||
|
mode = kwargs.get('mode', 'qs')
|
||||||
|
|
||||||
|
if mode not in ('qs', 'epoch'):
|
||||||
|
raise AnsibleError("local_mtime_qs: 'mode' must be 'qs' or 'epoch'")
|
||||||
|
|
||||||
|
results = []
|
||||||
|
for term in terms:
|
||||||
|
path = os.path.abspath(os.path.expanduser(str(term)))
|
||||||
|
|
||||||
|
# Fail fast if path is missing or not a regular file
|
||||||
|
if not os.path.exists(path):
|
||||||
|
raise AnsibleError(f"local_mtime_qs: file does not exist: {path}")
|
||||||
|
if not os.path.isfile(path):
|
||||||
|
raise AnsibleError(f"local_mtime_qs: not a regular file: {path}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
mtime = int(os.stat(path).st_mtime)
|
||||||
|
except OSError as e:
|
||||||
|
raise AnsibleError(f"local_mtime_qs: cannot stat '{path}': {e}")
|
||||||
|
|
||||||
|
if mode == 'qs':
|
||||||
|
results.append(f"?{param}={mtime}")
|
||||||
|
else: # mode == 'epoch'
|
||||||
|
results.append(str(mtime))
|
||||||
|
|
||||||
|
return results
|
296
module_utils/role_dependency_resolver.py
Normal file
296
module_utils/role_dependency_resolver.py
Normal file
@@ -0,0 +1,296 @@
|
|||||||
|
import os
|
||||||
|
import fnmatch
|
||||||
|
import re
|
||||||
|
from typing import Dict, Set, Iterable, Tuple, Optional
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
class RoleDependencyResolver:
|
||||||
|
_RE_PURE_JINJA = re.compile(r"\s*\{\{\s*[^}]+\s*\}\}\s*$")
|
||||||
|
|
||||||
|
def __init__(self, roles_dir: str):
|
||||||
|
self.roles_dir = roles_dir
|
||||||
|
|
||||||
|
# -------------------------- public API --------------------------
|
||||||
|
|
||||||
|
def resolve_transitively(
|
||||||
|
self,
|
||||||
|
start_roles: Iterable[str],
|
||||||
|
*,
|
||||||
|
resolve_include_role: bool = True,
|
||||||
|
resolve_import_role: bool = True,
|
||||||
|
resolve_dependencies: bool = True,
|
||||||
|
resolve_run_after: bool = False,
|
||||||
|
max_depth: Optional[int] = None,
|
||||||
|
) -> Set[str]:
|
||||||
|
to_visit = list(dict.fromkeys(start_roles))
|
||||||
|
visited: Set[str] = set()
|
||||||
|
depth: Dict[str, int] = {}
|
||||||
|
|
||||||
|
for r in to_visit:
|
||||||
|
depth[r] = 0
|
||||||
|
|
||||||
|
while to_visit:
|
||||||
|
role = to_visit.pop()
|
||||||
|
cur_d = depth.get(role, 0)
|
||||||
|
if role in visited:
|
||||||
|
continue
|
||||||
|
visited.add(role)
|
||||||
|
|
||||||
|
if max_depth is not None and cur_d >= max_depth:
|
||||||
|
continue
|
||||||
|
|
||||||
|
for dep in self.get_role_dependencies(
|
||||||
|
role,
|
||||||
|
resolve_include_role=resolve_include_role,
|
||||||
|
resolve_import_role=resolve_import_role,
|
||||||
|
resolve_dependencies=resolve_dependencies,
|
||||||
|
resolve_run_after=resolve_run_after,
|
||||||
|
):
|
||||||
|
if dep not in visited:
|
||||||
|
to_visit.append(dep)
|
||||||
|
depth[dep] = cur_d + 1
|
||||||
|
|
||||||
|
return visited
|
||||||
|
|
||||||
|
def get_role_dependencies(
|
||||||
|
self,
|
||||||
|
role_name: str,
|
||||||
|
*,
|
||||||
|
resolve_include_role: bool = True,
|
||||||
|
resolve_import_role: bool = True,
|
||||||
|
resolve_dependencies: bool = True,
|
||||||
|
resolve_run_after: bool = False,
|
||||||
|
) -> Set[str]:
|
||||||
|
role_path = os.path.join(self.roles_dir, role_name)
|
||||||
|
if not os.path.isdir(role_path):
|
||||||
|
return set()
|
||||||
|
|
||||||
|
deps: Set[str] = set()
|
||||||
|
|
||||||
|
if resolve_include_role or resolve_import_role:
|
||||||
|
includes, imports = self._scan_tasks(role_path)
|
||||||
|
if resolve_include_role:
|
||||||
|
deps |= includes
|
||||||
|
if resolve_import_role:
|
||||||
|
deps |= imports
|
||||||
|
|
||||||
|
if resolve_dependencies:
|
||||||
|
deps |= self._extract_meta_dependencies(role_path)
|
||||||
|
|
||||||
|
if resolve_run_after:
|
||||||
|
deps |= self._extract_meta_run_after(role_path)
|
||||||
|
|
||||||
|
return deps
|
||||||
|
|
||||||
|
# -------------------------- scanning helpers --------------------------
|
||||||
|
|
||||||
|
def _scan_tasks(self, role_path: str) -> Tuple[Set[str], Set[str]]:
|
||||||
|
tasks_dir = os.path.join(role_path, "tasks")
|
||||||
|
include_roles: Set[str] = set()
|
||||||
|
import_roles: Set[str] = set()
|
||||||
|
|
||||||
|
if not os.path.isdir(tasks_dir):
|
||||||
|
return include_roles, import_roles
|
||||||
|
|
||||||
|
all_roles = self._list_role_dirs(self.roles_dir)
|
||||||
|
|
||||||
|
candidates = []
|
||||||
|
for root, _, files in os.walk(tasks_dir):
|
||||||
|
for f in files:
|
||||||
|
if f.endswith(".yml") or f.endswith(".yaml"):
|
||||||
|
candidates.append(os.path.join(root, f))
|
||||||
|
|
||||||
|
for file_path in candidates:
|
||||||
|
try:
|
||||||
|
with open(file_path, "r", encoding="utf-8") as f:
|
||||||
|
docs = list(yaml.safe_load_all(f))
|
||||||
|
except Exception:
|
||||||
|
inc, imp = self._tolerant_scan_file(file_path, all_roles)
|
||||||
|
include_roles |= inc
|
||||||
|
import_roles |= imp
|
||||||
|
continue
|
||||||
|
|
||||||
|
for doc in docs or []:
|
||||||
|
if not isinstance(doc, list):
|
||||||
|
continue
|
||||||
|
for task in doc:
|
||||||
|
if not isinstance(task, dict):
|
||||||
|
continue
|
||||||
|
if "include_role" in task:
|
||||||
|
include_roles |= self._extract_from_task(task, "include_role", all_roles)
|
||||||
|
if "import_role" in task:
|
||||||
|
import_roles |= self._extract_from_task(task, "import_role", all_roles)
|
||||||
|
|
||||||
|
return include_roles, import_roles
|
||||||
|
|
||||||
|
def _extract_from_task(self, task: dict, key: str, all_roles: Iterable[str]) -> Set[str]:
|
||||||
|
roles: Set[str] = set()
|
||||||
|
spec = task.get(key)
|
||||||
|
if not isinstance(spec, dict):
|
||||||
|
return roles
|
||||||
|
|
||||||
|
name = spec.get("name")
|
||||||
|
loop_val = self._collect_loop_values(task)
|
||||||
|
|
||||||
|
if loop_val is not None:
|
||||||
|
for item in self._iter_flat(loop_val):
|
||||||
|
cand = self._role_from_loop_item(item, name_template=name)
|
||||||
|
if cand:
|
||||||
|
roles.add(cand)
|
||||||
|
|
||||||
|
if isinstance(name, str) and name.strip() and not self._is_pure_jinja_var(name):
|
||||||
|
pattern = self._jinja_to_glob(name) if ("{{" in name and "}}" in name) else name
|
||||||
|
self._match_glob_into(pattern, all_roles, roles)
|
||||||
|
return roles
|
||||||
|
|
||||||
|
if isinstance(name, str) and name.strip():
|
||||||
|
if "{{" in name and "}}" in name:
|
||||||
|
if self._is_pure_jinja_var(name):
|
||||||
|
return roles
|
||||||
|
pattern = self._jinja_to_glob(name)
|
||||||
|
self._match_glob_into(pattern, all_roles, roles)
|
||||||
|
else:
|
||||||
|
roles.add(name.strip())
|
||||||
|
|
||||||
|
return roles
|
||||||
|
|
||||||
|
def _collect_loop_values(self, task: dict):
|
||||||
|
for k in ("loop", "with_items", "with_list", "with_flattened"):
|
||||||
|
if k in task:
|
||||||
|
return task[k]
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _iter_flat(self, value):
|
||||||
|
if isinstance(value, list):
|
||||||
|
for v in value:
|
||||||
|
if isinstance(v, list):
|
||||||
|
for x in v:
|
||||||
|
yield x
|
||||||
|
else:
|
||||||
|
yield v
|
||||||
|
|
||||||
|
def _role_from_loop_item(self, item, name_template=None) -> Optional[str]:
|
||||||
|
tmpl = (name_template or "").strip() if isinstance(name_template, str) else ""
|
||||||
|
|
||||||
|
if isinstance(item, str):
|
||||||
|
if tmpl in ("{{ item }}", "{{item}}") or not tmpl or "item" in tmpl:
|
||||||
|
return item.strip()
|
||||||
|
return None
|
||||||
|
|
||||||
|
if isinstance(item, dict):
|
||||||
|
for k in ("role", "name"):
|
||||||
|
v = item.get(k)
|
||||||
|
if isinstance(v, str) and v.strip():
|
||||||
|
if tmpl in (f"{{{{ item.{k} }}}}", f"{{{{item.{k}}}}}") or not tmpl or "item" in tmpl:
|
||||||
|
return v.strip()
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _match_glob_into(self, pattern: str, all_roles: Iterable[str], out: Set[str]):
|
||||||
|
if "*" in pattern or "?" in pattern or "[" in pattern:
|
||||||
|
for r in all_roles:
|
||||||
|
if fnmatch.fnmatch(r, pattern):
|
||||||
|
out.add(r)
|
||||||
|
else:
|
||||||
|
out.add(pattern)
|
||||||
|
|
||||||
|
def test_jinja_mixed_name_glob_matching(self):
|
||||||
|
"""
|
||||||
|
include_role:
|
||||||
|
name: "prefix-{{ item }}-suffix"
|
||||||
|
loop: [x, y]
|
||||||
|
Existing roles: prefix-x-suffix, prefix-y-suffix, prefix-z-suffix
|
||||||
|
|
||||||
|
Expectation:
|
||||||
|
- NO raw loop items ('x', 'y') end up as roles
|
||||||
|
- Glob matching resolves to all three concrete roles
|
||||||
|
"""
|
||||||
|
make_role(self.roles_dir, "A")
|
||||||
|
for rn in ["prefix-x-suffix", "prefix-y-suffix", "prefix-z-suffix"]:
|
||||||
|
make_role(self.roles_dir, rn)
|
||||||
|
|
||||||
|
write(
|
||||||
|
os.path.join(self.roles_dir, "A", "tasks", "main.yml"),
|
||||||
|
"""
|
||||||
|
- name: jinja-mixed glob
|
||||||
|
include_role:
|
||||||
|
name: "prefix-{{ item }}-suffix"
|
||||||
|
loop:
|
||||||
|
- x
|
||||||
|
- y
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
r = RoleDependencyResolver(self.roles_dir)
|
||||||
|
deps = r.get_role_dependencies("A")
|
||||||
|
|
||||||
|
# ensure no raw loop items leak into the results
|
||||||
|
self.assertNotIn("x", deps)
|
||||||
|
self.assertNotIn("y", deps)
|
||||||
|
|
||||||
|
# only the resolved role names should be present
|
||||||
|
self.assertEqual(
|
||||||
|
deps,
|
||||||
|
{"prefix-x-suffix", "prefix-y-suffix", "prefix-z-suffix"},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------- meta helpers --------------------------
|
||||||
|
|
||||||
|
def _extract_meta_dependencies(self, role_path: str) -> Set[str]:
|
||||||
|
deps: Set[str] = set()
|
||||||
|
meta_main = os.path.join(role_path, "meta", "main.yml")
|
||||||
|
if not os.path.isfile(meta_main):
|
||||||
|
return deps
|
||||||
|
try:
|
||||||
|
with open(meta_main, "r", encoding="utf-8") as f:
|
||||||
|
meta = yaml.safe_load(f) or {}
|
||||||
|
raw_deps = meta.get("dependencies", [])
|
||||||
|
if isinstance(raw_deps, list):
|
||||||
|
for item in raw_deps:
|
||||||
|
if isinstance(item, str):
|
||||||
|
deps.add(item.strip())
|
||||||
|
elif isinstance(item, dict):
|
||||||
|
r = item.get("role")
|
||||||
|
if isinstance(r, str) and r.strip():
|
||||||
|
deps.add(r.strip())
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
return deps
|
||||||
|
|
||||||
|
def _extract_meta_run_after(self, role_path: str) -> Set[str]:
|
||||||
|
deps: Set[str] = set()
|
||||||
|
meta_main = os.path.join(role_path, "meta", "main.yml")
|
||||||
|
if not os.path.isfile(meta_main):
|
||||||
|
return deps
|
||||||
|
try:
|
||||||
|
with open(meta_main, "r", encoding="utf-8") as f:
|
||||||
|
meta = yaml.safe_load(f) or {}
|
||||||
|
galaxy_info = meta.get("galaxy_info", {})
|
||||||
|
run_after = galaxy_info.get("run_after", [])
|
||||||
|
if isinstance(run_after, list):
|
||||||
|
for item in run_after:
|
||||||
|
if isinstance(item, str) and item.strip():
|
||||||
|
deps.add(item.strip())
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
return deps
|
||||||
|
|
||||||
|
# -------------------------- small utils --------------------------
|
||||||
|
|
||||||
|
def _list_role_dirs(self, roles_dir: str) -> list[str]:
|
||||||
|
return [
|
||||||
|
d for d in os.listdir(roles_dir)
|
||||||
|
if os.path.isdir(os.path.join(roles_dir, d))
|
||||||
|
]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _is_pure_jinja_var(cls, s: str) -> bool:
|
||||||
|
return bool(cls._RE_PURE_JINJA.fullmatch(s or ""))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _jinja_to_glob(s: str) -> str:
|
||||||
|
pattern = re.sub(r"\{\{[^}]+\}\}", "*", s or "")
|
||||||
|
pattern = re.sub(r"\*{2,}", "*", pattern)
|
||||||
|
return pattern.strip()
|
@@ -1,4 +1,9 @@
|
|||||||
roles:
|
roles:
|
||||||
|
docker:
|
||||||
|
title: "Docker Toolkit"
|
||||||
|
description: "Generic Docker helpers and utilities (compose wrappers, container tooling)."
|
||||||
|
icon: "fas fa-docker"
|
||||||
|
invokable: false
|
||||||
dev:
|
dev:
|
||||||
title: "Software Development Utilties"
|
title: "Software Development Utilties"
|
||||||
invokable: false
|
invokable: false
|
||||||
@@ -6,41 +11,76 @@ roles:
|
|||||||
title: "System"
|
title: "System"
|
||||||
description: "System near components. Will be automaticly called if necessary from other roles."
|
description: "System near components. Will be automaticly called if necessary from other roles."
|
||||||
invokable: false
|
invokable: false
|
||||||
alm:
|
ctl:
|
||||||
title: "Alerting"
|
title: "Control"
|
||||||
description: "Notification handlers for system events"
|
description: "Control layer for system lifecycle management—handling cleanup, monitoring, backups, alerting, maintenance, and repair tasks."
|
||||||
icon: "fas fa-bell"
|
icon: "fas fa-cogs"
|
||||||
invokable: false
|
invokable: false
|
||||||
cln:
|
cln:
|
||||||
title: "Cleanup"
|
title: "Cleanup"
|
||||||
description: "Roles for cleaning up various system resources—old backups, unused certificates, temporary files, Docker volumes, disk caches, deprecated domains, and more."
|
description: "Roles for cleaning up various system resources—old backups, unused certificates, temporary files, Docker volumes, disk caches, deprecated domains, and more."
|
||||||
icon: "fas fa-trash-alt"
|
icon: "fas fa-trash-alt"
|
||||||
|
invokable: false
|
||||||
|
hlth:
|
||||||
|
title: "Monitoring"
|
||||||
|
description: "Roles for system monitoring and health checks—encompassing bot-style automated checks and core low-level monitors for logs, containers, disk usage, and more."
|
||||||
|
icon: "fas fa-chart-area"
|
||||||
|
invokable: false
|
||||||
|
bkp:
|
||||||
|
title: "Backup & Restore"
|
||||||
|
description: "Backup strategies & restore procedures"
|
||||||
|
icon: "fas fa-hdd"
|
||||||
|
invokable: false
|
||||||
|
alm:
|
||||||
|
title: "Alerting"
|
||||||
|
description: "Notification handlers for system events"
|
||||||
|
icon: "fas fa-bell"
|
||||||
|
invokable: false
|
||||||
|
mtn:
|
||||||
|
title: "Maintenance"
|
||||||
|
description: "Maintenance roles for certificates, system upkeep, and recurring operational tasks."
|
||||||
|
icon: "fas fa-tools"
|
||||||
|
invokable: false
|
||||||
|
rpr:
|
||||||
|
title: "Repair"
|
||||||
|
description: "Repair and recovery roles—handling hard/soft recovery of Docker, Btrfs balancers, and other low-level system fixes."
|
||||||
|
icon: "fas fa-wrench"
|
||||||
|
invokable: false
|
||||||
|
dns:
|
||||||
|
title: "DNS Automation"
|
||||||
|
description: "DNS providers, records, and rDNS management (Cloudflare, Hetzner, etc.)."
|
||||||
|
icon: "fas fa-network-wired"
|
||||||
invokable: false
|
invokable: false
|
||||||
hlth:
|
stk:
|
||||||
title: "Monitoring"
|
title: "Stack"
|
||||||
description: "Roles for system monitoring and health checks—encompassing bot-style automated checks and core low-level monitors for logs, containers, disk usage, and more."
|
description: "Stack levels to setup the server"
|
||||||
icon: "fas fa-chart-area"
|
icon: "fas fa-bars-staggered"
|
||||||
invokable: false
|
invokable: false
|
||||||
bkp:
|
front:
|
||||||
title: "Backup & Restore"
|
title: "System Frontend Helpers"
|
||||||
description: "Backup strategies & restore procedures"
|
description: "Frontend helpers for reverse-proxied apps (injection, shared assets, CDN plumbing)."
|
||||||
icon: "fas fa-hdd"
|
icon: "fas fa-wand-magic-sparkles"
|
||||||
invokable: false
|
invokable: false
|
||||||
|
inj:
|
||||||
|
title: "Injection"
|
||||||
|
description: "Composable HTML injection roles (CSS, JS, logout interceptor, analytics, desktop iframe) for Nginx/OpenResty via sub_filter/Lua with CDN-backed assets."
|
||||||
|
icon: "fas fa-filter"
|
||||||
|
invokable: false
|
||||||
update:
|
update:
|
||||||
title: "Updates & Package Management"
|
title: "Updates & Package Management"
|
||||||
description: "OS & package updates"
|
description: "OS & package updates"
|
||||||
icon: "fas fa-sync"
|
icon: "fas fa-sync"
|
||||||
invokable: true
|
invokable: true
|
||||||
|
pkgmgr:
|
||||||
|
title: "Package Manager Helpers"
|
||||||
|
description: "Helpers for package managers and unified install flows."
|
||||||
|
icon: "fas fa-box-open"
|
||||||
|
invokable: false
|
||||||
drv:
|
drv:
|
||||||
title: "Drivers"
|
title: "Drivers"
|
||||||
description: "Roles for installing and configuring hardware drivers—covering printers, graphics, input devices, and other peripheral support."
|
description: "Roles for installing and configuring hardware drivers—covering printers, graphics, input devices, and other peripheral support."
|
||||||
icon: "fas fa-microchip"
|
icon: "fas fa-microchip"
|
||||||
invokable: true
|
invokable: true
|
||||||
# core:
|
|
||||||
# title: "Core & System"
|
|
||||||
# description: "Fundamental system configuration"
|
|
||||||
# icon: "fas fa-cogs"
|
|
||||||
# invokable: true
|
|
||||||
gen:
|
gen:
|
||||||
title: "Generic"
|
title: "Generic"
|
||||||
description: "Helper roles & installers (git, locales, timer, etc.)"
|
description: "Helper roles & installers (git, locales, timer, etc.)"
|
||||||
@@ -66,20 +106,10 @@ roles:
|
|||||||
description: "Utility roles for server-side configuration and management—covering corporate identity provisioning, network helpers, and other service-oriented toolkits."
|
description: "Utility roles for server-side configuration and management—covering corporate identity provisioning, network helpers, and other service-oriented toolkits."
|
||||||
icon: "fas fa-cogs"
|
icon: "fas fa-cogs"
|
||||||
invokable: true
|
invokable: true
|
||||||
srv:
|
dev:
|
||||||
title: "Server"
|
title: "Developer Utilities"
|
||||||
description: "General server roles for provisioning and managing server infrastructure—covering web servers, proxy servers, network services, and other backend components."
|
description: "Developer-centric server utilities and admin toolkits."
|
||||||
icon: "fas fa-server"
|
icon: "fas fa-code"
|
||||||
invokable: false
|
|
||||||
web:
|
|
||||||
title: "Webserver"
|
|
||||||
description: "Web-server roles for installing and configuring Nginx (core, TLS, injection filters, composer modules)."
|
|
||||||
icon: "fas fa-server"
|
|
||||||
invokable: false
|
|
||||||
proxy:
|
|
||||||
title: "Proxy Server"
|
|
||||||
description: "Proxy-server roles for virtual-host orchestration and reverse-proxy setups."
|
|
||||||
icon: "fas fa-project-diagram"
|
|
||||||
invokable: false
|
invokable: false
|
||||||
web:
|
web:
|
||||||
title: "Web Infrastructure"
|
title: "Web Infrastructure"
|
||||||
@@ -99,11 +129,6 @@ roles:
|
|||||||
title: "Webserver Optimation"
|
title: "Webserver Optimation"
|
||||||
description: "Tools which help to optimize webservers"
|
description: "Tools which help to optimize webservers"
|
||||||
invokable: true
|
invokable: true
|
||||||
net:
|
|
||||||
title: "Network"
|
|
||||||
description: "Network setup (DNS, Let's Encrypt HTTP, WireGuard, etc.)"
|
|
||||||
icon: "fas fa-globe"
|
|
||||||
invokable: true
|
|
||||||
svc:
|
svc:
|
||||||
title: "Services"
|
title: "Services"
|
||||||
description: "Infrastructure services like databases"
|
description: "Infrastructure services like databases"
|
||||||
@@ -123,7 +148,11 @@ roles:
|
|||||||
description: "Reverse‑proxy roles for routing and load‑balancing traffic to backend services"
|
description: "Reverse‑proxy roles for routing and load‑balancing traffic to backend services"
|
||||||
icon: "fas fa-project-diagram"
|
icon: "fas fa-project-diagram"
|
||||||
invokable: true
|
invokable: true
|
||||||
|
net:
|
||||||
|
title: "Network"
|
||||||
|
description: "Network setup (DNS, Let's Encrypt HTTP, WireGuard, etc.)"
|
||||||
|
icon: "fas fa-globe"
|
||||||
|
invokable: true
|
||||||
user:
|
user:
|
||||||
title: "Users & Access"
|
title: "Users & Access"
|
||||||
description: "User accounts & access control"
|
description: "User accounts & access control"
|
||||||
|
@@ -1,11 +0,0 @@
|
|||||||
# Database Docker with Web Proxy
|
|
||||||
|
|
||||||
This role builds on `cmp-db-docker` by adding a reverse-proxy frontend for HTTP access to your database service.
|
|
||||||
|
|
||||||
## Features
|
|
||||||
|
|
||||||
- **Database Composition**
|
|
||||||
Leverages the `cmp-db-docker` role to stand up your containerized database (PostgreSQL, MariaDB, etc.) with backups and user management.
|
|
||||||
|
|
||||||
- **Reverse Proxy**
|
|
||||||
Includes the `srv-proxy-6-6-domain` role to configure a proxy (e.g. nginx) for routing HTTP(S) traffic to your database UI or management endpoint.
|
|
@@ -1 +0,0 @@
|
|||||||
DATABASE_VARS_FILE: "{{ playbook_dir }}/roles/cmp-rdbms/vars/database.yml"
|
|
@@ -1 +0,0 @@
|
|||||||
{% include 'roles/cmp-rdbms/templates/services/' + database_type + '.yml.j2' %}
|
|
@@ -1,20 +0,0 @@
|
|||||||
# Helper variables
|
|
||||||
_dbtype: "{{ (database_type | d('') | trim) }}"
|
|
||||||
_database_id: "{{ ('svc-db-' ~ _dbtype) if _dbtype else '' }}"
|
|
||||||
_database_central_name: "{{ (applications | get_app_conf(_database_id, 'docker.services.' ~ _dbtype ~ '.name', False, '')) if _dbtype else '' }}"
|
|
||||||
_database_consumer_id: "{{ database_application_id | d(application_id) }}"
|
|
||||||
_database_consumer_entity_name: "{{ _database_consumer_id | get_entity_name }}"
|
|
||||||
_database_central_enabled: "{{ (applications | get_app_conf(_database_consumer_id, 'features.central_database', False)) if _dbtype else False }}"
|
|
||||||
|
|
||||||
# Definition
|
|
||||||
|
|
||||||
database_name: "{{ _database_consumer_entity_name }}"
|
|
||||||
database_instance: "{{ _database_central_name if _database_central_enabled else database_name }}" # This could lead to bugs at dedicated database @todo cleanup
|
|
||||||
database_host: "{{ _database_central_name if _database_central_enabled else 'database' }}" # This could lead to bugs at dedicated database @todo cleanup
|
|
||||||
database_username: "{{ _database_consumer_entity_name }}"
|
|
||||||
database_password: "{{ applications | get_app_conf(_database_consumer_id, 'credentials.database_password', true) }}"
|
|
||||||
database_port: "{{ (ports.localhost.database[_database_id] | d('')) if _dbtype else '' }}"
|
|
||||||
database_env: "{{ docker_compose.directories.env }}{{ database_type }}.env"
|
|
||||||
database_url_jdbc: "jdbc:{{ database_type if database_type == 'mariadb' else 'postgresql' }}://{{ database_host }}:{{ database_port }}/{{ database_name }}"
|
|
||||||
database_url_full: "{{ database_type }}://{{ database_username }}:{{ database_password }}@{{ database_host }}:{{ database_port }}/{{ database_name }}"
|
|
||||||
database_volume: "{{ _database_consumer_entity_name ~ '_' if not _database_central_enabled }}{{ database_host }}"
|
|
@@ -19,3 +19,5 @@
|
|||||||
template:
|
template:
|
||||||
src: caffeine.desktop.j2
|
src: caffeine.desktop.j2
|
||||||
dest: "{{auto_start_directory}}caffeine.desktop"
|
dest: "{{auto_start_directory}}caffeine.desktop"
|
||||||
|
|
||||||
|
- include_tasks: utils/run_once.yml
|
||||||
|
@@ -1,4 +1,3 @@
|
|||||||
- block:
|
- block:
|
||||||
- include_tasks: 01_core.yml
|
- include_tasks: 01_core.yml
|
||||||
- include_tasks: utils/run_once.yml
|
|
||||||
when: run_once_desk_gnome_caffeine is not defined
|
when: run_once_desk_gnome_caffeine is not defined
|
||||||
|
@@ -9,4 +9,4 @@
|
|||||||
community.general.pacman:
|
community.general.pacman:
|
||||||
name: "libreoffice-{{ applications['desk-libreoffice'].flavor }}-{{ item }}"
|
name: "libreoffice-{{ applications['desk-libreoffice'].flavor }}-{{ item }}"
|
||||||
state: present
|
state: present
|
||||||
loop: "{{libreoffice_languages}}"
|
loop: "{{ libreoffice_languages }}"
|
||||||
|
@@ -49,3 +49,5 @@
|
|||||||
create: yes
|
create: yes
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
become: false
|
become: false
|
||||||
|
|
||||||
|
- include_tasks: utils/run_once.yml
|
@@ -1,4 +1,3 @@
|
|||||||
- block:
|
- block:
|
||||||
- include_tasks: 01_core.yml
|
- include_tasks: 01_core.yml
|
||||||
- include_tasks: utils/run_once.yml
|
|
||||||
when: run_once_desk_ssh is not defined
|
when: run_once_desk_ssh is not defined
|
@@ -1,4 +0,0 @@
|
|||||||
---
|
|
||||||
- name: reload virtualbox kernel modules
|
|
||||||
become: true
|
|
||||||
command: vboxreload
|
|
@@ -1,8 +1,14 @@
|
|||||||
---
|
---
|
||||||
- name: Setup locale.gen
|
- name: Setup locale.gen
|
||||||
template: src=locale.gen dest=/etc/locale.gen
|
template:
|
||||||
|
src: locale.gen.j2
|
||||||
|
dest: /etc/locale.gen
|
||||||
|
|
||||||
- name: Setup locale.conf
|
- name: Setup locale.conf
|
||||||
template: src=locale.conf dest=/etc/locale.conf
|
template:
|
||||||
|
src: locale.conf.j2
|
||||||
|
dest: /etc/locale.conf
|
||||||
|
|
||||||
- name: Generate locales
|
- name: Generate locales
|
||||||
shell: locale-gen
|
shell: locale-gen
|
||||||
become: true
|
become: true
|
||||||
|
@@ -1,2 +0,0 @@
|
|||||||
LANG=en_US.UTF-8
|
|
||||||
LANGUAGE=en_US.UTF-8
|
|
2
roles/dev-locales/templates/locale.conf.j2
Normal file
2
roles/dev-locales/templates/locale.conf.j2
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
LANG={{ HOST_LL_CC }}.UTF-8
|
||||||
|
LANGUAGE={{ HOST_LL_CC }}.UTF-8
|
@@ -20,7 +20,7 @@ To offer a centralized, extensible system for managing containerized application
|
|||||||
- **Reset Logic:** Cleans previous Compose project files and data when `MODE_RESET` is enabled.
|
- **Reset Logic:** Cleans previous Compose project files and data when `MODE_RESET` is enabled.
|
||||||
- **Handlers for Runtime Control:** Automatically builds, sets up, or restarts containers based on handlers.
|
- **Handlers for Runtime Control:** Automatically builds, sets up, or restarts containers based on handlers.
|
||||||
- **Template-ready Service Files:** Predefined service base and health check templates.
|
- **Template-ready Service Files:** Predefined service base and health check templates.
|
||||||
- **Integration Support:** Compatible with `srv-proxy-7-4-core` and other Infinito.Nexus service roles.
|
- **Integration Support:** Compatible with `sys-svc-proxy` and other Infinito.Nexus service roles.
|
||||||
|
|
||||||
## Administration Tips
|
## Administration Tips
|
||||||
|
|
||||||
|
@@ -10,13 +10,45 @@
|
|||||||
- docker compose up
|
- docker compose up
|
||||||
- docker compose restart
|
- docker compose restart
|
||||||
- docker compose just up
|
- docker compose just up
|
||||||
|
when: MODE_ASSERT | bool
|
||||||
|
|
||||||
|
- name: docker compose pull
|
||||||
|
shell: |
|
||||||
|
set -euo pipefail
|
||||||
|
lock="{{ [ PATH_DOCKER_COMPOSE_PULL_LOCK_DIR, (docker_compose.directories.instance | hash('sha1')) ~ '.lock' ] | path_join }}"
|
||||||
|
if [ ! -e "$lock" ]; then
|
||||||
|
mkdir -p "$(dirname "$lock")"
|
||||||
|
if docker compose config | grep -qE '^[[:space:]]+build:'; then
|
||||||
|
docker compose build --pull
|
||||||
|
fi
|
||||||
|
if docker compose pull --help 2>/dev/null | grep -q -- '--ignore-buildable'; then
|
||||||
|
docker compose pull --ignore-buildable
|
||||||
|
else
|
||||||
|
docker compose pull || true
|
||||||
|
fi
|
||||||
|
: > "$lock"
|
||||||
|
echo "pulled"
|
||||||
|
fi
|
||||||
|
args:
|
||||||
|
chdir: "{{ docker_compose.directories.instance }}"
|
||||||
|
executable: /bin/bash
|
||||||
|
register: compose_pull
|
||||||
|
changed_when: "'pulled' in compose_pull.stdout"
|
||||||
|
environment:
|
||||||
|
COMPOSE_HTTP_TIMEOUT: 600
|
||||||
|
DOCKER_CLIENT_TIMEOUT: 600
|
||||||
|
when: MODE_UPDATE | bool
|
||||||
|
listen:
|
||||||
|
- docker compose up
|
||||||
|
- docker compose restart
|
||||||
|
- docker compose just up
|
||||||
|
|
||||||
- name: Build docker compose
|
- name: Build docker compose
|
||||||
shell: |
|
shell: |
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
docker compose build || {
|
docker compose build || {
|
||||||
echo "Retrying without cache and pulling bases...";
|
echo "Retrying without cache and pulling bases...";
|
||||||
docker compose build --no-cache --pull;
|
docker compose build --no-cache{{ ' --pull' if MODE_UPDATE | bool else ''}};
|
||||||
}
|
}
|
||||||
args:
|
args:
|
||||||
chdir: "{{ docker_compose.directories.instance }}"
|
chdir: "{{ docker_compose.directories.instance }}"
|
||||||
|
@@ -1,3 +1,8 @@
|
|||||||
|
- name: Remove all docker compose pull locks
|
||||||
|
file:
|
||||||
|
path: "{{ PATH_DOCKER_COMPOSE_PULL_LOCK_DIR }}"
|
||||||
|
state: absent
|
||||||
|
|
||||||
- name: "Load docker container role"
|
- name: "Load docker container role"
|
||||||
include_role:
|
include_role:
|
||||||
name: docker-container
|
name: docker-container
|
||||||
|
@@ -5,7 +5,9 @@
|
|||||||
loop:
|
loop:
|
||||||
- "{{ application_id | abs_role_path_by_application_id }}/templates/Dockerfile.j2"
|
- "{{ application_id | abs_role_path_by_application_id }}/templates/Dockerfile.j2"
|
||||||
- "{{ application_id | abs_role_path_by_application_id }}/files/Dockerfile"
|
- "{{ application_id | abs_role_path_by_application_id }}/files/Dockerfile"
|
||||||
notify: docker compose up
|
notify:
|
||||||
|
- docker compose up
|
||||||
|
- docker compose build
|
||||||
register: create_dockerfile_result
|
register: create_dockerfile_result
|
||||||
failed_when:
|
failed_when:
|
||||||
- create_dockerfile_result is failed
|
- create_dockerfile_result is failed
|
||||||
|
@@ -2,7 +2,7 @@
|
|||||||
services:
|
services:
|
||||||
{# Load Database #}
|
{# Load Database #}
|
||||||
{% if applications | is_docker_service_enabled(application_id, 'database') %}
|
{% if applications | is_docker_service_enabled(application_id, 'database') %}
|
||||||
{% include 'roles/cmp-rdbms/templates/services/main.yml.j2' %}
|
{% include 'roles/sys-svc-rdbms/templates/services/main.yml.j2' %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{# Load Redis #}
|
{# Load Redis #}
|
||||||
{% if applications | is_docker_service_enabled(application_id, 'redis') or applications | get_app_conf(application_id, 'features.oauth2', False) %}
|
{% if applications | is_docker_service_enabled(application_id, 'redis') or applications | get_app_conf(application_id, 'features.oauth2', False) %}
|
||||||
|
@@ -1,2 +1,2 @@
|
|||||||
DOCKER_COMPOSE_VARIABLE_FILE: "{{ role_path }}/vars/docker-compose.yml"
|
DOCKER_COMPOSE_VARIABLE_FILE: "{{ role_path }}/vars/docker-compose.yml"
|
||||||
DOCKER_COMPOSE_DOWN_ALL_PACKAGE: "docodol"
|
DOCKER_COMPOSE_DOWN_ALL_PACKAGE: "docodol"
|
@@ -3,6 +3,10 @@
|
|||||||
- "CMD"
|
- "CMD"
|
||||||
- "curl"
|
- "curl"
|
||||||
- "-f"
|
- "-f"
|
||||||
|
{% if container_hostname is defined %}
|
||||||
|
- "-H"
|
||||||
|
- "Host: {{ container_hostname }}"
|
||||||
|
{% endif %}
|
||||||
- "http://127.0.0.1{{ (":" ~ container_port) if container_port is defined else '' }}/{{ container_healthcheck | default('') }}"
|
- "http://127.0.0.1{{ (":" ~ container_port) if container_port is defined else '' }}/{{ container_healthcheck | default('') }}"
|
||||||
interval: 1m
|
interval: 1m
|
||||||
timeout: 10s
|
timeout: 10s
|
||||||
|
7
roles/docker-container/templates/healthcheck/nc.yml.j2
Normal file
7
roles/docker-container/templates/healthcheck/nc.yml.j2
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "nc -z localhost {{ container_port }} || exit 1"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 3s
|
||||||
|
retries: 3
|
||||||
|
start_period: 10s
|
||||||
|
{{ "\n" }}
|
@@ -1,38 +0,0 @@
|
|||||||
- include_role:
|
|
||||||
name: '{{ item }}'
|
|
||||||
loop:
|
|
||||||
- dev-yay
|
|
||||||
- sys-alm-compose
|
|
||||||
|
|
||||||
- name: Install MSI packages
|
|
||||||
kewlfft.aur.aur:
|
|
||||||
use: yay
|
|
||||||
name:
|
|
||||||
- msi-perkeyrgb
|
|
||||||
|
|
||||||
- name: Copy keyboard_color.sh script
|
|
||||||
copy:
|
|
||||||
src: keyboard_color.py
|
|
||||||
dest: /opt/keyboard_color.py
|
|
||||||
mode: "0755"
|
|
||||||
|
|
||||||
- name: Copy keyboard-color{{ SYS_SERVICE_SUFFIX }} file
|
|
||||||
template:
|
|
||||||
src: keyboard-color.service.j2
|
|
||||||
dest: /etc/systemd/system/keyboard-color{{ SYS_SERVICE_SUFFIX }}
|
|
||||||
mode: 0644
|
|
||||||
|
|
||||||
- name: Reload systemd daemon
|
|
||||||
systemd:
|
|
||||||
daemon_reload: yes
|
|
||||||
|
|
||||||
- name: "set 'service_name' to '{{ role_name }}'"
|
|
||||||
set_fact:
|
|
||||||
service_name: "{{ role_name }}"
|
|
||||||
|
|
||||||
- name: "include role for sys-timer for {{ service_name }}"
|
|
||||||
include_role:
|
|
||||||
name: sys-timer
|
|
||||||
vars:
|
|
||||||
on_calendar: "{{on_calendar_msi_keyboard_color}}"
|
|
||||||
persistent: "true"
|
|
@@ -1,5 +0,0 @@
|
|||||||
- block:
|
|
||||||
- include_tasks: 01_core.yml
|
|
||||||
- set_fact:
|
|
||||||
run_once_drv_msi_keyboard_color: true
|
|
||||||
when: run_once_drv_msi_keyboard_color is not defined
|
|
@@ -1,7 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=Keyboard Color Service
|
|
||||||
OnFailure=sys-alm-compose.{{ SOFTWARE_NAME }}@%n.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=oneshot
|
|
||||||
ExecStart=/bin/python /opt/keyboard_color.py {{ vendor_and_product_id }}
|
|
@@ -1 +0,0 @@
|
|||||||
application_id: net-wireguard-core
|
|
@@ -1 +0,0 @@
|
|||||||
application_id: net-wireguard-firewalled
|
|
@@ -1,6 +0,0 @@
|
|||||||
- name: "restart set-mtu service"
|
|
||||||
systemd:
|
|
||||||
name: set-mtu{{ SYS_SERVICE_SUFFIX }}
|
|
||||||
state: restarted
|
|
||||||
enabled: yes
|
|
||||||
daemon_reload: yes
|
|
@@ -1,11 +0,0 @@
|
|||||||
- name: create set-mtu service
|
|
||||||
template:
|
|
||||||
src: set-mtu.service.j2
|
|
||||||
dest: /etc/systemd/system/set-mtu{{ SYS_SERVICE_SUFFIX }}
|
|
||||||
notify: restart set-mtu service
|
|
||||||
|
|
||||||
- name: create set-mtu.sh
|
|
||||||
template:
|
|
||||||
src: set-mtu.sh.j2
|
|
||||||
dest: /usr/local/bin/set-mtu.sh
|
|
||||||
notify: restart set-mtu service
|
|
@@ -1,10 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=set MTU
|
|
||||||
Before=wg-quick@wg0{{ SYS_SERVICE_SUFFIX }}
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=oneshot
|
|
||||||
ExecStart=bash /usr/local/bin/set-mtu.sh
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
RequiredBy=wg-quick@wg0{{ SYS_SERVICE_SUFFIX }}
|
|
@@ -1 +0,0 @@
|
|||||||
application_id: net-wireguard-plain
|
|
@@ -12,32 +12,27 @@
|
|||||||
name: github.com
|
name: github.com
|
||||||
key: "{{ lookup('pipe', 'ssh-keyscan -t ed25519 github.com | grep -v \"^#\"') }}"
|
key: "{{ lookup('pipe', 'ssh-keyscan -t ed25519 github.com | grep -v \"^#\"') }}"
|
||||||
become: true
|
become: true
|
||||||
|
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||||
|
|
||||||
- name: Create installation directory for Kevin's Package Manager
|
- name: Create installation directory for Kevin's Package Manager
|
||||||
file:
|
file:
|
||||||
path: "{{ pkgmgr_install_path }}"
|
path: "{{ PKGMGR_INSTALL_PATH }}"
|
||||||
state: directory
|
state: directory
|
||||||
mode: '0755'
|
mode: '0755'
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
- name: Clone Kevin's Package Manager repository
|
- name: Clone Kevin's Package Manager repository
|
||||||
git:
|
git:
|
||||||
repo: "{{ pkgmgr_repo_url }}"
|
repo: "{{ PKGMGR_REPO_URL }}"
|
||||||
dest: "{{ pkgmgr_install_path }}"
|
dest: "{{ PKGMGR_INSTALL_PATH }}"
|
||||||
version: "HEAD"
|
version: "HEAD"
|
||||||
force: yes
|
force: yes
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
- name: Ensure main.py is executable
|
|
||||||
file:
|
|
||||||
path: "{{ pkgmgr_install_path }}/main.py"
|
|
||||||
mode: '0755'
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: create config.yaml
|
- name: create config.yaml
|
||||||
template:
|
template:
|
||||||
src: config.yaml.j2
|
src: config.yaml.j2
|
||||||
dest: "{{pkgmgr_config_path}}"
|
dest: "{{ PKGMGR_CONFIG_PATH }}"
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
- name: Run the Package Manager install command to create an alias for Kevins package manager
|
- name: Run the Package Manager install command to create an alias for Kevins package manager
|
||||||
@@ -45,6 +40,10 @@
|
|||||||
source ~/.venvs/pkgmgr/bin/activate
|
source ~/.venvs/pkgmgr/bin/activate
|
||||||
make setup
|
make setup
|
||||||
args:
|
args:
|
||||||
chdir: "{{ pkgmgr_install_path }}"
|
chdir: "{{ PKGMGR_INSTALL_PATH }}"
|
||||||
executable: /bin/bash
|
executable: /bin/bash
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
|
- name: "Update all repositories with pkgmgr"
|
||||||
|
command: "pkgmgr pull --all"
|
||||||
|
when: MODE_UPDATE | bool
|
@@ -1,3 +1,3 @@
|
|||||||
directories:
|
directories:
|
||||||
repositories: "{{repositories_directory}}"
|
repositories: "{{ PKGMGR_REPOSITORIES_DIR }}"
|
||||||
binaries: "{{binaries_directory}}"
|
binaries: "{{ PKGMGR_BINARIES_DIR }}"
|
@@ -2,16 +2,16 @@
|
|||||||
# Variables for Kevin's Package Manager installation
|
# Variables for Kevin's Package Manager installation
|
||||||
|
|
||||||
# The Git repository URL for Kevin's Package Manager
|
# The Git repository URL for Kevin's Package Manager
|
||||||
pkgmgr_repo_url: "https://github.com/kevinveenbirkenbach/package-manager.git"
|
PKGMGR_REPO_URL: "https://github.com/kevinveenbirkenbach/package-manager.git"
|
||||||
|
|
||||||
# Directory which contains all Repositories managed by Kevin's Package Manager
|
|
||||||
repositories_directory: "/opt/Repositories/"
|
|
||||||
|
|
||||||
# The directory where the repository will be cloned
|
|
||||||
pkgmgr_install_path: "{{repositories_directory}}github.com/kevinveenbirkenbach/package-manager"
|
|
||||||
|
|
||||||
# File containing the configuration
|
|
||||||
pkgmgr_config_path: "{{pkgmgr_install_path}}/config/config.yaml"
|
|
||||||
|
|
||||||
# The directory where executable aliases will be installed (ensure it's in your PATH)
|
# The directory where executable aliases will be installed (ensure it's in your PATH)
|
||||||
binaries_directory: "/usr/local/bin"
|
PKGMGR_BINARIES_DIR: "/usr/local/bin"
|
||||||
|
|
||||||
|
# Directory which contains all Repositories managed by Kevin's Package Manager
|
||||||
|
PKGMGR_REPOSITORIES_DIR: "/opt/Repositories/"
|
||||||
|
|
||||||
|
# The directory where the repository will be cloned
|
||||||
|
PKGMGR_INSTALL_PATH: "{{ [ PKGMGR_REPOSITORIES_DIR, 'github.com/kevinveenbirkenbach/package-manager' ] | path_join }}"
|
||||||
|
|
||||||
|
# File containing the configuration
|
||||||
|
PKGMGR_CONFIG_PATH: "{{ [ PKGMGR_INSTALL_PATH, 'config/config.yaml' ] | path_join }}"
|
||||||
|
@@ -1,5 +0,0 @@
|
|||||||
# default vhost flavour
|
|
||||||
vhost_flavour: "basic" # valid: basic | ws_generic
|
|
||||||
|
|
||||||
# build the full template path from the flavour
|
|
||||||
vhost_template_src: "roles/srv-proxy-7-4-core/templates/vhost/{{ vhost_flavour }}.conf.j2"
|
|
@@ -1 +0,0 @@
|
|||||||
configuration_destination: "{{ NGINX.DIRECTORIES.HTTP.SERVERS }}{{ domain }}.conf"
|
|
@@ -1,31 +0,0 @@
|
|||||||
- block:
|
|
||||||
- name: Install certbundle
|
|
||||||
include_role:
|
|
||||||
name: pkgmgr-install
|
|
||||||
vars:
|
|
||||||
package_name: certbundle
|
|
||||||
|
|
||||||
- name: Generate SAN certificate with certbundle
|
|
||||||
command: >-
|
|
||||||
certbundle
|
|
||||||
--domains "{{ current_play_domains_all | join(',') }}"
|
|
||||||
--certbot-email "{{ users.administrator.email }}"
|
|
||||||
--certbot-acme-challenge-method "{{ CERTBOT_ACME_CHALLENGE_METHOD }}"
|
|
||||||
--chunk-size 100
|
|
||||||
{% if CERTBOT_ACME_CHALLENGE_METHOD != 'webroot' %}
|
|
||||||
--certbot-credentials-file "{{ CERTBOT_CREDENTIALS_FILE }}"
|
|
||||||
--certbot-dns-propagation-seconds "{{ CERTBOT_DNS_PROPAGATION_WAIT_SECONDS }}"
|
|
||||||
{% else %}
|
|
||||||
--letsencrypt-webroot-path "{{ LETSENCRYPT_WEBROOT_PATH }}"
|
|
||||||
{% endif %}
|
|
||||||
{{ '--mode-test' if MODE_TEST | bool else '' }}
|
|
||||||
register: certbundle_result
|
|
||||||
changed_when: "'Certificate not yet due for renewal' not in certbundle_result.stdout"
|
|
||||||
failed_when: >
|
|
||||||
certbundle_result.rc != 0
|
|
||||||
and 'too many certificates' not in certbundle_result.stderr
|
|
||||||
|
|
||||||
- name: run the san tasks once
|
|
||||||
set_fact:
|
|
||||||
run_once_san_certs: true
|
|
||||||
when: run_once_san_certs is not defined
|
|
@@ -1,6 +0,0 @@
|
|||||||
- name: "reload certbot service"
|
|
||||||
systemd:
|
|
||||||
name: srv-web-6-6-tls-renew{{ SYS_SERVICE_SUFFIX }}
|
|
||||||
state: reloaded
|
|
||||||
enabled: yes
|
|
||||||
daemon_reload: yes
|
|
@@ -1,30 +0,0 @@
|
|||||||
- name: Include dependencies
|
|
||||||
include_role:
|
|
||||||
name: '{{ item }}'
|
|
||||||
loop:
|
|
||||||
- sys-svc-certbot
|
|
||||||
- srv-web-7-4-core
|
|
||||||
- sys-alm-compose
|
|
||||||
|
|
||||||
- name: install certbot
|
|
||||||
community.general.pacman:
|
|
||||||
name:
|
|
||||||
- certbot-nginx
|
|
||||||
state: present
|
|
||||||
|
|
||||||
- name: configure srv-web-6-6-tls-renew service
|
|
||||||
template:
|
|
||||||
src: srv-web-6-6-tls-renew.service.j2
|
|
||||||
dest: /etc/systemd/system/srv-web-6-6-tls-renew{{ SYS_SERVICE_SUFFIX }}
|
|
||||||
notify: reload certbot service
|
|
||||||
|
|
||||||
- name: "set 'service_name' to '{{ role_name }}'"
|
|
||||||
set_fact:
|
|
||||||
service_name: "{{ role_name }}"
|
|
||||||
|
|
||||||
- name: "include role for sys-timer for {{ service_name }}"
|
|
||||||
include_role:
|
|
||||||
name: sys-timer
|
|
||||||
vars:
|
|
||||||
on_calendar: "{{on_calendar_renew_lets_encrypt_certificates}}"
|
|
||||||
persistent: "true"
|
|
@@ -1,9 +0,0 @@
|
|||||||
# run_once_srv_web_7_6_composer: deactivated
|
|
||||||
|
|
||||||
- name: "include role sys-srv-web-inj-compose for '{{ domain }}'"
|
|
||||||
include_role:
|
|
||||||
name: sys-srv-web-inj-compose
|
|
||||||
|
|
||||||
- name: "include role srv-web-6-6-tls-core for '{{ domain }}'"
|
|
||||||
include_role:
|
|
||||||
name: srv-web-6-6-tls-core
|
|
@@ -1,5 +0,0 @@
|
|||||||
- name: "reload svc-bkp-loc-2-usb service"
|
|
||||||
systemd:
|
|
||||||
name: svc-bkp-loc-2-usb{{ SYS_SERVICE_SUFFIX }}
|
|
||||||
state: reloaded
|
|
||||||
daemon_reload: yes
|
|
@@ -3,7 +3,7 @@
|
|||||||
include_role:
|
include_role:
|
||||||
name: '{{ item }}'
|
name: '{{ item }}'
|
||||||
loop:
|
loop:
|
||||||
- sys-cln-bkps-service
|
- sys-ctl-cln-bkps
|
||||||
- sys-lock
|
- sys-lock
|
||||||
- include_tasks: utils/run_once.yml
|
- include_tasks: utils/run_once.yml
|
||||||
when: run_once_svc_bkp_loc_2_usb is not defined
|
when: run_once_svc_bkp_loc_2_usb is not defined
|
||||||
@@ -11,9 +11,9 @@
|
|||||||
- name: Fail if any backup_to_usb variable is empty
|
- name: Fail if any backup_to_usb variable is empty
|
||||||
assert:
|
assert:
|
||||||
that:
|
that:
|
||||||
- backup_to_usb_mount != ""
|
- BACKUP_TO_USB_MOUNT != ""
|
||||||
- backup_to_usb_target != ""
|
- BACKUP_TO_USB_target != ""
|
||||||
- backup_to_usb_source != ""
|
- BACKUP_TO_USB_SOURCE != ""
|
||||||
fail_msg: |
|
fail_msg: |
|
||||||
One or more of the configuration variables are empty!
|
One or more of the configuration variables are empty!
|
||||||
Please set:
|
Please set:
|
||||||
@@ -21,20 +21,7 @@
|
|||||||
- target
|
- target
|
||||||
- source
|
- source
|
||||||
to non‑empty values in your configuration file.
|
to non‑empty values in your configuration file.
|
||||||
|
when: MODE_ASSERT | bool
|
||||||
|
|
||||||
- name: Copy backup script to the scripts directory
|
- include_role:
|
||||||
copy:
|
name: sys-service
|
||||||
src: svc-bkp-loc-2-usb.py
|
|
||||||
dest: "{{ backup_to_usb_script_path }}"
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: '0755'
|
|
||||||
|
|
||||||
- name: Copy systemd service to systemd directory
|
|
||||||
template:
|
|
||||||
src: svc-bkp-loc-2-usb.service.j2
|
|
||||||
dest: /etc/systemd/system/svc-bkp-loc-2-usb{{ SYS_SERVICE_SUFFIX }}
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: '0644'
|
|
||||||
notify: reload svc-bkp-loc-2-usb service
|
|
||||||
|
@@ -1,12 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=Backup to USB when mounted to {{ backup_to_usb_mount }}
|
|
||||||
Wants={{systemctl_mount_service_name}}
|
|
||||||
OnFailure=sys-alm-compose.{{ SOFTWARE_NAME }}@%n.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=oneshot
|
|
||||||
ExecStart=/bin/python {{ backup_to_usb_script_path }} {{backup_to_usb_source}} {{backup_to_usb_destination}}
|
|
||||||
ExecStartPost=/bin/systemctl start sys-cln-backups{{ SYS_SERVICE_SUFFIX }}
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
11
roles/svc-bkp-loc-2-usb/templates/systemctl.service.j2
Normal file
11
roles/svc-bkp-loc-2-usb/templates/systemctl.service.j2
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Backup to USB when mounted to {{ BACKUP_TO_USB_MOUNT }}
|
||||||
|
Wants={{ BACKUPS_SERVICE_MNT_NAME }}
|
||||||
|
OnFailure={{ SYS_SERVICE_ON_FAILURE_COMPOSE }}
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=oneshot
|
||||||
|
ExecStart={{ system_service_script_exec }} {{ BACKUP_TO_USB_SOURCE }} {{ BACKUP_TO_USB_DESTINATION }}
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
@@ -1,10 +1,9 @@
|
|||||||
application_id: "svc-bkp-loc-2-usb"
|
application_id: "svc-bkp-loc-2-usb"
|
||||||
|
system_service_id: "{{ application_id }}"
|
||||||
|
|
||||||
backup_to_usb_script_path: "/usr/local/sbin/svc-bkp-loc-2-usb.py"
|
BACKUP_TO_USB_DESTINATION: '{{ BACKUP_TO_USB_MOUNT}}{{ BACKUP_TO_USB_TARGET }}'
|
||||||
backup_to_usb_destination: '{{ backup_to_usb_mount}}{{ backup_to_usb_targed }}'
|
BACKUPS_SERVICE_MNT_NAME: '{{ BACKUP_TO_USB_MOUNT | trim(''/'') | replace(''/'',''-'') }}.mount'
|
||||||
backups_folder_path: '{{ backup_to_usb_destination }}'
|
|
||||||
systemctl_mount_service_name: '{{ backup_to_usb_mount | trim(''/'') | replace(''/'',''-'') }}.mount'
|
|
||||||
|
|
||||||
backup_to_usb_mount: "{{ applications | get_app_conf(application_id, 'mount') }}"
|
BACKUP_TO_USB_MOUNT: "{{ applications | get_app_conf(application_id, 'mount') }}"
|
||||||
backup_to_usb_targed: "{{ applications | get_app_conf(application_id, 'target') }}"
|
BACKUP_TO_USB_TARGET: "{{ applications | get_app_conf(application_id, 'target') }}"
|
||||||
backup_to_usb_source: "{{ applications | get_app_conf(application_id, 'source') }}"
|
BACKUP_TO_USB_SOURCE: "{{ applications | get_app_conf(application_id, 'source') }}"
|
@@ -9,17 +9,17 @@ To track what the service is doing, execute one of the following commands:
|
|||||||
#### Using systemctl
|
#### Using systemctl
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
watch -n2 "systemctl status sys-bkp-rmt-2-loc{{ SYS_SERVICE_SUFFIX }}"
|
watch -n2 "systemctl status {{ 'sys-bkp-rmt-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Using journalctl
|
#### Using journalctl
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
journalctl -fu sys-bkp-rmt-2-loc{{ SYS_SERVICE_SUFFIX }}
|
journalctl -fu {{ 'sys-bkp-rmt-2-loc' | get_service_name(SOFTWARE_NAME) }}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Viewing History
|
### Viewing History
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo journalctl -u sys-bkp-rmt-2-loc{{ SYS_SERVICE_SUFFIX }}
|
sudo journalctl -u {{ 'sys-bkp-rmt-2-loc' | get_service_name(SOFTWARE_NAME) }}
|
||||||
```
|
```
|
@@ -17,7 +17,7 @@ Backup Remote to Local is a robust solution for retrieving backup data from remo
|
|||||||
- **Remote Backup Retrieval:** Pulls backups from a remote server using secure SSH connections.
|
- **Remote Backup Retrieval:** Pulls backups from a remote server using secure SSH connections.
|
||||||
- **Incremental Backup with rsync:** Uses rsync with options for archive, backup, and hard linking to efficiently manage changes.
|
- **Incremental Backup with rsync:** Uses rsync with options for archive, backup, and hard linking to efficiently manage changes.
|
||||||
- **Retry Logic:** Implements a retry mechanism to handle transient network issues or remote errors.
|
- **Retry Logic:** Implements a retry mechanism to handle transient network issues or remote errors.
|
||||||
- **Integration with Other Roles:** Works alongside roles like sys-bkp-directory-validator, sys-cln-faild-bkps, sys-timer, sys-bkp-provider, and sys-lock.
|
- **Integration with Other Roles:** Works alongside roles like sys-svc-directory-validator, sys-ctl-cln-faild-bkps, sys-timer, sys-bkp-provider, and sys-lock.
|
||||||
- **Administrative Debugging:** Detailed debug instructions and administrative tasks are provided in a separate file.
|
- **Administrative Debugging:** Detailed debug instructions and administrative tasks are provided in a separate file.
|
||||||
|
|
||||||
## Other Resources
|
## Other Resources
|
||||||
|
@@ -1,4 +0,0 @@
|
|||||||
- name: "reload svc-bkp-rmt-2-loc service"
|
|
||||||
systemd:
|
|
||||||
name: svc-bkp-rmt-2-loc{{ SYS_SERVICE_SUFFIX }}
|
|
||||||
daemon_reload: yes
|
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user