mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-09-08 11:17:17 +02:00
Compare commits
178 Commits
8608d89653
...
master
Author | SHA1 | Date | |
---|---|---|---|
445c94788e | |||
aac9704e8b | |||
a57a5f8828 | |||
90843726de | |||
d25da76117 | |||
d48a1b3c0a | |||
2839d2e1a4 | |||
00c99e58e9 | |||
904040589e | |||
9f3d300bca | |||
9e253a2d09 | |||
49120b0dcf | |||
b6f91ab9d3 | |||
77e8e7ed7e | |||
32bc17e0c3 | |||
e294637cb6 | |||
577767bed6 | |||
e77f8da510 | |||
4738b263ec | |||
0a588023a7 | |||
d2fa90774b | |||
0e72dcbe36 | |||
4f8ce598a9 | |||
3769e66d8d | |||
33a5fadf67 | |||
699a6b6f1e | |||
61c29eee60 | |||
d5204fb5c2 | |||
751615b1a4 | |||
e2993d2912 | |||
24b6647bfb | |||
d2dc2eab5f | |||
a1130e33d7 | |||
df122905eb | |||
d093a22d61 | |||
5e550ce3a3 | |||
0ada12e3ca | |||
1a5ce4a7fa | |||
a9abb3ce5d | |||
71ceb339fc | |||
61bba3d2ef | |||
0bde4295c7 | |||
8059f272d5 | |||
7c814e6e83 | |||
d760c042c2 | |||
6cac8085a8 | |||
3a83f3d14e | |||
61d852c508 | |||
188b098503 | |||
bc56940e55 | |||
5dfc2efb5a | |||
7f9dc65b37 | |||
163a925096 | |||
a8c88634b5 | |||
ce3fe1cd51 | |||
7ca8b7c71d | |||
110381e80c | |||
b02d88adc0 | |||
b7065837df | |||
c98a2378c4 | |||
4ae3cee36c | |||
b834f0c95c | |||
9f734dff17 | |||
6fa4d00547 | |||
7254667186 | |||
aaedaab3da | |||
7791bd8c04 | |||
34b3f3b0ad | |||
94fe58b5da | |||
9feb766e6f | |||
231fd567b3 | |||
3f8e7c1733 | |||
3bfab9ef8e | |||
f1870c07be | |||
d0cec9a7d4 | |||
1dbd714a56 | |||
3a17b2979e | |||
bb0530c2ac | |||
aa2eb53776 | |||
5f66c1a622 | |||
b3dfb8bf22 | |||
db642c1c39 | |||
2fccebbd1f | |||
c23fbd8ec4 | |||
2999d9af77 | |||
2809ffb9f0 | |||
cb12114ce8 | |||
ba99e558f7 | |||
2aed0f97d2 | |||
f36c7831b1 | |||
009bee531b | |||
4c7bb6d9db | |||
092869b29a | |||
f4ea6c6c0f | |||
3ed84717a7 | |||
1cfc2b7e23 | |||
01b9648650 | |||
65d3b3040d | |||
28f7ac5aba | |||
19926b0c57 | |||
3a79d9d630 | |||
983287a84a | |||
dd9a9b6d84 | |||
23a2e081bf | |||
4cbd848026 | |||
d67f660152 | |||
5c6349321b | |||
af1ee64246 | |||
d96bfc64a6 | |||
6ea8301364 | |||
92f5bf6481 | |||
58c17bf043 | |||
6c2d5c52c8 | |||
b919f39e35 | |||
9f2cfe65af | |||
fe399c3967 | |||
ef801aa498 | |||
18f3b1042f | |||
dece6228a4 | |||
cb66fb2978 | |||
b9da6908ec | |||
8baec17562 | |||
1401779a9d | |||
707a3fc1d0 | |||
d595d46e2e | |||
73d5651eea | |||
12a267827d | |||
c6cd6430bb | |||
67b2ebf001 | |||
ebb6660473 | |||
f62d09d8f1 | |||
de159db918 | |||
e2c2cf4bcf | |||
6e1e1ad5c5 | |||
06baa4b03a | |||
73e7fbdc8a | |||
bae2bc21ec | |||
a8f4dea9d2 | |||
5aaf2d28dc | |||
5287bb4d74 | |||
5446a1497e | |||
19889a8cfc | |||
d9980c0d8f | |||
35206aaafd | |||
942e8c9c12 | |||
97f4045c68 | |||
c182ecf516 | |||
ce033c370a | |||
a0477ad54c | |||
35c3681f55 | |||
af97e71976 | |||
19a51fd718 | |||
b916173422 | |||
9756a0f75f | |||
e417bc19bd | |||
7ad14673e1 | |||
eb781dbf8b | |||
6016da6f1f | |||
8b2f0ac47b | |||
9d6d64e11d | |||
f1a2967a37 | |||
95a2172fff | |||
dc3f4e05a8 | |||
e33944cda2 | |||
efa68cc1e0 | |||
79e702a3ab | |||
9180182d5b | |||
535094d15d | |||
658003f5b9 | |||
3ff783df17 | |||
3df511aee9 | |||
c27d16322b | |||
7a6e273ea4 | |||
384beae7c1 | |||
ad7e61e8b1 | |||
fa46523433 | |||
f4a380d802 | |||
42d6c1799b |
@@ -1,14 +1,29 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Selectively add & vault NEW credentials in your inventory, preserving comments
|
||||
and formatting. Existing values are left untouched unless --force is used.
|
||||
|
||||
Usage example:
|
||||
infinito create credentials \
|
||||
--role-path roles/web-app-akaunting \
|
||||
--inventory-file host_vars/echoserver.yml \
|
||||
--vault-password-file .pass/echoserver.txt \
|
||||
--set credentials.database_password=mysecret
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
import yaml
|
||||
from typing import Dict, Any
|
||||
from module_utils.manager.inventory import InventoryManager
|
||||
from module_utils.handler.vault import VaultHandler, VaultScalar
|
||||
from module_utils.handler.yaml import YamlHandler
|
||||
from yaml.dumper import SafeDumper
|
||||
from typing import Dict, Any, Union
|
||||
|
||||
from ruamel.yaml import YAML
|
||||
from ruamel.yaml.comments import CommentedMap
|
||||
|
||||
from module_utils.manager.inventory import InventoryManager
|
||||
from module_utils.handler.vault import VaultHandler # uses your existing handler
|
||||
|
||||
|
||||
# ---------- helpers ----------
|
||||
|
||||
def ask_for_confirmation(key: str) -> bool:
|
||||
"""Prompt the user for confirmation to overwrite an existing value."""
|
||||
@@ -18,35 +33,117 @@ def ask_for_confirmation(key: str) -> bool:
|
||||
return confirmation == 'y'
|
||||
|
||||
|
||||
def main():
|
||||
def ensure_map(node: CommentedMap, key: str) -> CommentedMap:
|
||||
"""
|
||||
Ensure node[key] exists and is a mapping (CommentedMap) for round-trip safety.
|
||||
"""
|
||||
if key not in node or not isinstance(node.get(key), CommentedMap):
|
||||
node[key] = CommentedMap()
|
||||
return node[key]
|
||||
|
||||
|
||||
def _is_ruamel_vault(val: Any) -> bool:
|
||||
"""Detect if a ruamel scalar already carries the !vault tag."""
|
||||
try:
|
||||
return getattr(val, 'tag', None) == '!vault'
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _is_vault_encrypted(val: Any) -> bool:
|
||||
"""
|
||||
Detect if value is already a vault string or a ruamel !vault scalar.
|
||||
Accept both '$ANSIBLE_VAULT' and '!vault' markers.
|
||||
"""
|
||||
if _is_ruamel_vault(val):
|
||||
return True
|
||||
if isinstance(val, str) and ("$ANSIBLE_VAULT" in val or "!vault" in val):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _vault_body(text: str) -> str:
|
||||
"""
|
||||
Return only the vault body starting from the first line that contains
|
||||
'$ANSIBLE_VAULT'. If not found, return the original text.
|
||||
Also strips any leading '!vault |' header if present.
|
||||
"""
|
||||
lines = text.splitlines()
|
||||
for i, ln in enumerate(lines):
|
||||
if "$ANSIBLE_VAULT" in ln:
|
||||
return "\n".join(lines[i:])
|
||||
return text
|
||||
|
||||
|
||||
def _make_vault_scalar_from_text(text: str) -> Any:
|
||||
"""
|
||||
Build a ruamel object representing a literal block scalar tagged with !vault
|
||||
by parsing a tiny YAML snippet. This avoids depending on yaml_set_tag().
|
||||
"""
|
||||
body = _vault_body(text)
|
||||
indented = " " + body.replace("\n", "\n ") # proper block scalar indentation
|
||||
snippet = f"v: !vault |\n{indented}\n"
|
||||
y = YAML(typ="rt")
|
||||
return y.load(snippet)["v"]
|
||||
|
||||
|
||||
def to_vault_block(vault_handler: VaultHandler, value: Union[str, Any], label: str) -> Any:
|
||||
"""
|
||||
Return a ruamel scalar tagged as !vault. If the input value is already
|
||||
vault-encrypted (string contains $ANSIBLE_VAULT or is a !vault scalar), reuse/wrap.
|
||||
Otherwise, encrypt plaintext via ansible-vault.
|
||||
"""
|
||||
# Already a ruamel !vault scalar → reuse
|
||||
if _is_ruamel_vault(value):
|
||||
return value
|
||||
|
||||
# Already an encrypted string (may include '!vault |' or just the header)
|
||||
if isinstance(value, str) and ("$ANSIBLE_VAULT" in value or "!vault" in value):
|
||||
return _make_vault_scalar_from_text(value)
|
||||
|
||||
# Plaintext → encrypt now
|
||||
snippet = vault_handler.encrypt_string(str(value), label)
|
||||
return _make_vault_scalar_from_text(snippet)
|
||||
|
||||
|
||||
def parse_overrides(pairs: list[str]) -> Dict[str, str]:
|
||||
"""
|
||||
Parse --set key=value pairs into a dict.
|
||||
Supports both 'credentials.key=val' and 'key=val' (short) forms.
|
||||
"""
|
||||
out: Dict[str, str] = {}
|
||||
for pair in pairs:
|
||||
k, v = pair.split("=", 1)
|
||||
out[k.strip()] = v.strip()
|
||||
return out
|
||||
|
||||
|
||||
# ---------- main ----------
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Selectively vault credentials + become-password in your inventory."
|
||||
description="Selectively add & vault NEW credentials in your inventory, preserving comments/formatting."
|
||||
)
|
||||
parser.add_argument("--role-path", required=True, help="Path to your role")
|
||||
parser.add_argument("--inventory-file", required=True, help="Host vars file to update")
|
||||
parser.add_argument("--vault-password-file", required=True, help="Vault password file")
|
||||
parser.add_argument(
|
||||
"--role-path", required=True, help="Path to your role"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--inventory-file", required=True, help="Host vars file to update"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--vault-password-file", required=True, help="Vault password file"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--set", nargs="*", default=[], help="Override values key.subkey=VALUE"
|
||||
"--set", nargs="*", default=[],
|
||||
help="Override values key[.subkey]=VALUE (applied to NEW keys; with --force also to existing)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-f", "--force", action="store_true",
|
||||
help="Force overwrite without confirmation"
|
||||
help="Allow overrides to replace existing values (will ask per key unless combined with --yes)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-y", "--yes", action="store_true",
|
||||
help="Non-interactive: assume 'yes' for all overwrite confirmations when --force is used"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Parse overrides
|
||||
overrides = {
|
||||
k.strip(): v.strip()
|
||||
for pair in args.set for k, v in [pair.split("=", 1)]
|
||||
}
|
||||
overrides = parse_overrides(args.set)
|
||||
|
||||
# Initialize inventory manager
|
||||
# Initialize inventory manager (provides schema + app_id + vault)
|
||||
manager = InventoryManager(
|
||||
role_path=Path(args.role_path),
|
||||
inventory_path=Path(args.inventory_file),
|
||||
@@ -54,62 +151,90 @@ def main():
|
||||
overrides=overrides
|
||||
)
|
||||
|
||||
# Load existing credentials to preserve
|
||||
existing_apps = manager.inventory.get("applications", {})
|
||||
existing_creds = {}
|
||||
if manager.app_id in existing_apps:
|
||||
existing_creds = existing_apps[manager.app_id].get("credentials", {}).copy()
|
||||
# 1) Load existing inventory with ruamel (round-trip)
|
||||
yaml_rt = YAML(typ="rt")
|
||||
yaml_rt.preserve_quotes = True
|
||||
|
||||
# Apply schema (may generate defaults)
|
||||
updated_inventory = manager.apply_schema()
|
||||
with open(args.inventory_file, "r", encoding="utf-8") as f:
|
||||
data = yaml_rt.load(f) # CommentedMap or None
|
||||
if data is None:
|
||||
data = CommentedMap()
|
||||
|
||||
# Restore existing database_password if present
|
||||
apps = updated_inventory.setdefault("applications", {})
|
||||
app_block = apps.setdefault(manager.app_id, {})
|
||||
creds = app_block.setdefault("credentials", {})
|
||||
if "database_password" in existing_creds:
|
||||
creds["database_password"] = existing_creds["database_password"]
|
||||
# 2) Get schema-applied structure (defaults etc.) for *non-destructive* merge
|
||||
schema_inventory: Dict[str, Any] = manager.apply_schema()
|
||||
|
||||
# Store original plaintext values
|
||||
original_plain = {key: str(val) for key, val in creds.items()}
|
||||
# 3) Ensure structural path exists
|
||||
apps = ensure_map(data, "applications")
|
||||
app_block = ensure_map(apps, manager.app_id)
|
||||
creds = ensure_map(app_block, "credentials")
|
||||
|
||||
for key, raw_val in list(creds.items()):
|
||||
# Skip if already vaulted
|
||||
if isinstance(raw_val, VaultScalar) or str(raw_val).lstrip().startswith("$ANSIBLE_VAULT"):
|
||||
# 4) Determine defaults we could add
|
||||
schema_apps = schema_inventory.get("applications", {})
|
||||
schema_app_block = schema_apps.get(manager.app_id, {})
|
||||
schema_creds = schema_app_block.get("credentials", {}) if isinstance(schema_app_block, dict) else {}
|
||||
|
||||
# 5) Add ONLY missing credential keys
|
||||
newly_added_keys = set()
|
||||
for key, default_val in schema_creds.items():
|
||||
if key in creds:
|
||||
# existing → do not touch (preserve plaintext/vault/formatting/comments)
|
||||
continue
|
||||
|
||||
# Determine plaintext
|
||||
plain = original_plain.get(key, "")
|
||||
if key in overrides and (args.force or ask_for_confirmation(key)):
|
||||
plain = overrides[key]
|
||||
# Value to use for the new key
|
||||
# Priority: --set exact key → default from schema → empty string
|
||||
ov = overrides.get(f"credentials.{key}", None)
|
||||
if ov is None:
|
||||
ov = overrides.get(key, None)
|
||||
|
||||
# Encrypt the plaintext
|
||||
encrypted = manager.vault_handler.encrypt_string(plain, key)
|
||||
lines = encrypted.splitlines()
|
||||
indent = len(lines[1]) - len(lines[1].lstrip())
|
||||
body = "\n".join(line[indent:] for line in lines[1:])
|
||||
creds[key] = VaultScalar(body)
|
||||
|
||||
# Vault top-level become password if present
|
||||
if "ansible_become_password" in updated_inventory:
|
||||
val = str(updated_inventory["ansible_become_password"])
|
||||
if val.lstrip().startswith("$ANSIBLE_VAULT"):
|
||||
updated_inventory["ansible_become_password"] = VaultScalar(val)
|
||||
if ov is not None:
|
||||
value_for_new_key: Union[str, Any] = ov
|
||||
else:
|
||||
snippet = manager.vault_handler.encrypt_string(
|
||||
val, "ansible_become_password"
|
||||
if _is_vault_encrypted(default_val):
|
||||
# Schema already provides a vault value → take it as-is
|
||||
creds[key] = to_vault_block(manager.vault_handler, default_val, key)
|
||||
newly_added_keys.add(key)
|
||||
continue
|
||||
value_for_new_key = "" if default_val is None else str(default_val)
|
||||
|
||||
# Insert as !vault literal (encrypt if needed)
|
||||
creds[key] = to_vault_block(manager.vault_handler, value_for_new_key, key)
|
||||
newly_added_keys.add(key)
|
||||
|
||||
# 6) ansible_become_password: only add if missing;
|
||||
# never rewrite an existing one unless --force (+ confirm/--yes) and override provided.
|
||||
if "ansible_become_password" not in data:
|
||||
val = overrides.get("ansible_become_password", None)
|
||||
if val is not None:
|
||||
data["ansible_become_password"] = to_vault_block(
|
||||
manager.vault_handler, val, "ansible_become_password"
|
||||
)
|
||||
else:
|
||||
if args.force and "ansible_become_password" in overrides:
|
||||
do_overwrite = args.yes or ask_for_confirmation("ansible_become_password")
|
||||
if do_overwrite:
|
||||
data["ansible_become_password"] = to_vault_block(
|
||||
manager.vault_handler, overrides["ansible_become_password"], "ansible_become_password"
|
||||
)
|
||||
lines = snippet.splitlines()
|
||||
indent = len(lines[1]) - len(lines[1].lstrip())
|
||||
body = "\n".join(line[indent:] for line in lines[1:])
|
||||
updated_inventory["ansible_become_password"] = VaultScalar(body)
|
||||
|
||||
# Write back to file
|
||||
# 7) Overrides for existing credential keys (only with --force)
|
||||
if args.force:
|
||||
for ov_key, ov_val in overrides.items():
|
||||
# Accept both 'credentials.key' and bare 'key'
|
||||
key = ov_key.split(".", 1)[1] if ov_key.startswith("credentials.") else ov_key
|
||||
if key in creds:
|
||||
# If we just added it in this run, don't ask again or rewrap
|
||||
if key in newly_added_keys:
|
||||
continue
|
||||
if args.yes or ask_for_confirmation(key):
|
||||
creds[key] = to_vault_block(manager.vault_handler, ov_val, key)
|
||||
|
||||
# 8) Write back with ruamel (preserve formatting & comments)
|
||||
with open(args.inventory_file, "w", encoding="utf-8") as f:
|
||||
yaml.dump(updated_inventory, f, sort_keys=False, Dumper=SafeDumper)
|
||||
yaml_rt.dump(data, f)
|
||||
|
||||
print(f"✅ Inventory selectively vaulted → {args.inventory_file}")
|
||||
print(f"✅ Added new credentials without touching existing formatting/comments → {args.inventory_file}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
sys.exit(main())
|
||||
|
@@ -11,7 +11,7 @@ sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')
|
||||
from module_utils.entity_name_utils import get_entity_name
|
||||
|
||||
# Paths to the group-vars files
|
||||
PORTS_FILE = './group_vars/all/09_ports.yml'
|
||||
PORTS_FILE = './group_vars/all/10_ports.yml'
|
||||
NETWORKS_FILE = './group_vars/all/09_networks.yml'
|
||||
ROLE_TEMPLATE_DIR = './templates/roles/web-app'
|
||||
ROLES_DIR = './roles'
|
||||
|
@@ -198,6 +198,7 @@ def main():
|
||||
"MODE_CLEANUP": args.cleanup,
|
||||
"MODE_LOGS": args.logs,
|
||||
"MODE_DEBUG": args.debug,
|
||||
"MODE_ASSERT": not args.skip_validation,
|
||||
"host_type": args.host_type
|
||||
}
|
||||
|
||||
|
@@ -228,7 +228,7 @@ def parse_meta_dependencies(role_dir: str) -> List[str]:
|
||||
def sanitize_run_once_var(role_name: str) -> str:
|
||||
"""
|
||||
Generate run_once variable name from role name.
|
||||
Example: 'sys-srv-web-inj-logout' -> 'run_once_sys_srv_web_inj_logout'
|
||||
Example: 'sys-front-inj-logout' -> 'run_once_sys_front_inj_logout'
|
||||
"""
|
||||
return "run_once_" + role_name.replace("-", "_")
|
||||
|
||||
|
@@ -15,7 +15,7 @@ Follow these guides to install and configure Infinito.Nexus:
|
||||
- **Networking & VPN** - Configure `WireGuard`, `OpenVPN`, and `Nginx Reverse Proxy`.
|
||||
|
||||
## Managing & Updating Infinito.Nexus 🔄
|
||||
- Regularly update services using `update-docker`, `update-pacman`, or `update-apt`.
|
||||
- Regularly update services using `update-pacman`, or `update-apt`.
|
||||
- Monitor system health with `sys-ctl-hlth-btrfs`, `sys-ctl-hlth-webserver`, and `sys-ctl-hlth-docker-container`.
|
||||
- Automate system maintenance with `sys-lock`, `sys-ctl-cln-bkps`, and `sys-ctl-rpr-docker-hard`.
|
||||
|
@@ -1,86 +0,0 @@
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {'alias_domains_map': self.alias_domains_map}
|
||||
|
||||
def alias_domains_map(self, apps, PRIMARY_DOMAIN):
|
||||
"""
|
||||
Build a map of application IDs to their alias domains.
|
||||
|
||||
- If no `domains` key → []
|
||||
- If `domains` exists but is an empty dict → return the original cfg
|
||||
- Explicit `aliases` are used (default appended if missing)
|
||||
- If only `canonical` defined and it doesn't include default, default is added
|
||||
- Invalid types raise AnsibleFilterError
|
||||
"""
|
||||
def parse_entry(domains_cfg, key, app_id):
|
||||
if key not in domains_cfg:
|
||||
return None
|
||||
entry = domains_cfg[key]
|
||||
if isinstance(entry, dict):
|
||||
values = list(entry.values())
|
||||
elif isinstance(entry, list):
|
||||
values = entry
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Unexpected type for 'domains.{key}' in application '{app_id}': {type(entry).__name__}"
|
||||
)
|
||||
for d in values:
|
||||
if not isinstance(d, str) or not d.strip():
|
||||
raise AnsibleFilterError(
|
||||
f"Invalid domain entry in '{key}' for application '{app_id}': {d!r}"
|
||||
)
|
||||
return values
|
||||
|
||||
def default_domain(app_id, primary):
|
||||
return f"{app_id}.{primary}"
|
||||
|
||||
# 1) Precompute canonical domains per app (fallback to default)
|
||||
canonical_map = {}
|
||||
for app_id, cfg in apps.items():
|
||||
domains_cfg = cfg.get('server',{}).get('domains',{})
|
||||
entry = domains_cfg.get('canonical')
|
||||
if entry is None:
|
||||
canonical_map[app_id] = [default_domain(app_id, PRIMARY_DOMAIN)]
|
||||
elif isinstance(entry, dict):
|
||||
canonical_map[app_id] = list(entry.values())
|
||||
elif isinstance(entry, list):
|
||||
canonical_map[app_id] = list(entry)
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Unexpected type for 'server.domains.canonical' in application '{app_id}': {type(entry).__name__}"
|
||||
)
|
||||
|
||||
# 2) Build alias list per app
|
||||
result = {}
|
||||
for app_id, cfg in apps.items():
|
||||
domains_cfg = cfg.get('server',{}).get('domains')
|
||||
|
||||
# no domains key → no aliases
|
||||
if domains_cfg is None:
|
||||
result[app_id] = []
|
||||
continue
|
||||
|
||||
# empty domains dict → return the original cfg
|
||||
if isinstance(domains_cfg, dict) and not domains_cfg:
|
||||
result[app_id] = cfg
|
||||
continue
|
||||
|
||||
# otherwise, compute aliases
|
||||
aliases = parse_entry(domains_cfg, 'aliases', app_id) or []
|
||||
default = default_domain(app_id, PRIMARY_DOMAIN)
|
||||
has_aliases = 'aliases' in domains_cfg
|
||||
has_canon = 'canonical' in domains_cfg
|
||||
|
||||
if has_aliases:
|
||||
if default not in aliases:
|
||||
aliases.append(default)
|
||||
elif has_canon:
|
||||
canon = canonical_map.get(app_id, [])
|
||||
if default not in canon and default not in aliases:
|
||||
aliases.append(default)
|
||||
|
||||
result[app_id] = aliases
|
||||
|
||||
return result
|
@@ -1,10 +1,14 @@
|
||||
from ansible.errors import AnsibleFilterError
|
||||
import hashlib
|
||||
import base64
|
||||
import sys, os
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Ensure module_utils is importable when this filter runs from Ansible
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
from module_utils.config_utils import get_app_conf
|
||||
from module_utils.get_url import get_url
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
"""
|
||||
@@ -16,10 +20,14 @@ class FilterModule(object):
|
||||
'build_csp_header': self.build_csp_header,
|
||||
}
|
||||
|
||||
# -------------------------------
|
||||
# Helpers
|
||||
# -------------------------------
|
||||
|
||||
@staticmethod
|
||||
def is_feature_enabled(applications: dict, feature: str, application_id: str) -> bool:
|
||||
"""
|
||||
Return True if applications[application_id].features[feature] is truthy.
|
||||
Returns True if applications[application_id].features[feature] is truthy.
|
||||
"""
|
||||
return get_app_conf(
|
||||
applications,
|
||||
@@ -31,6 +39,10 @@ class FilterModule(object):
|
||||
|
||||
@staticmethod
|
||||
def get_csp_whitelist(applications, application_id, directive):
|
||||
"""
|
||||
Returns a list of additional whitelist entries for a given directive.
|
||||
Accepts both scalar and list in config; always returns a list.
|
||||
"""
|
||||
wl = get_app_conf(
|
||||
applications,
|
||||
application_id,
|
||||
@@ -47,28 +59,37 @@ class FilterModule(object):
|
||||
@staticmethod
|
||||
def get_csp_flags(applications, application_id, directive):
|
||||
"""
|
||||
Dynamically extract all CSP flags for a given directive and return them as tokens,
|
||||
e.g., "'unsafe-eval'", "'unsafe-inline'", etc.
|
||||
Returns CSP flag tokens (e.g., "'unsafe-eval'", "'unsafe-inline'") for a directive,
|
||||
merging sane defaults with app config.
|
||||
Default: 'unsafe-inline' is enabled for style-src and style-src-elem.
|
||||
"""
|
||||
flags = get_app_conf(
|
||||
# Defaults that apply to all apps
|
||||
default_flags = {}
|
||||
if directive in ('style-src', 'style-src-elem'):
|
||||
default_flags = {'unsafe-inline': True}
|
||||
|
||||
configured = get_app_conf(
|
||||
applications,
|
||||
application_id,
|
||||
'server.csp.flags.' + directive,
|
||||
False,
|
||||
{}
|
||||
)
|
||||
tokens = []
|
||||
|
||||
for flag_name, enabled in flags.items():
|
||||
# Merge defaults with configured flags (configured overrides defaults)
|
||||
merged = {**default_flags, **configured}
|
||||
|
||||
tokens = []
|
||||
for flag_name, enabled in merged.items():
|
||||
if enabled:
|
||||
tokens.append(f"'{flag_name}'")
|
||||
|
||||
return tokens
|
||||
|
||||
@staticmethod
|
||||
def get_csp_inline_content(applications, application_id, directive):
|
||||
"""
|
||||
Return inline script/style snippets to hash for a given CSP directive.
|
||||
Returns inline script/style snippets to hash for a given directive.
|
||||
Accepts both scalar and list in config; always returns a list.
|
||||
"""
|
||||
snippets = get_app_conf(
|
||||
applications,
|
||||
@@ -86,7 +107,7 @@ class FilterModule(object):
|
||||
@staticmethod
|
||||
def get_csp_hash(content):
|
||||
"""
|
||||
Compute the SHA256 hash of the given inline content and return
|
||||
Computes the SHA256 hash of the given inline content and returns
|
||||
a CSP token like "'sha256-<base64>'".
|
||||
"""
|
||||
try:
|
||||
@@ -96,6 +117,10 @@ class FilterModule(object):
|
||||
except Exception as exc:
|
||||
raise AnsibleFilterError(f"get_csp_hash failed: {exc}")
|
||||
|
||||
# -------------------------------
|
||||
# Main builder
|
||||
# -------------------------------
|
||||
|
||||
def build_csp_header(
|
||||
self,
|
||||
applications,
|
||||
@@ -105,82 +130,80 @@ class FilterModule(object):
|
||||
matomo_feature_name='matomo'
|
||||
):
|
||||
"""
|
||||
Build the Content-Security-Policy header value dynamically based on application settings.
|
||||
Inline hashes are read from applications[application_id].csp.hashes
|
||||
Builds the Content-Security-Policy header value dynamically based on application settings.
|
||||
- Flags (e.g., 'unsafe-eval', 'unsafe-inline') are read from server.csp.flags.<directive>,
|
||||
with sane defaults applied in get_csp_flags (always 'unsafe-inline' for style-src and style-src-elem).
|
||||
- Inline hashes are read from server.csp.hashes.<directive>.
|
||||
- Whitelists are read from server.csp.whitelist.<directive>.
|
||||
- Inline hashes are added only if the final tokens do NOT include 'unsafe-inline'.
|
||||
"""
|
||||
try:
|
||||
directives = [
|
||||
'default-src',
|
||||
'connect-src',
|
||||
'frame-ancestors',
|
||||
'frame-src',
|
||||
'script-src',
|
||||
'script-src-elem',
|
||||
'style-src',
|
||||
'font-src',
|
||||
'worker-src',
|
||||
'manifest-src',
|
||||
'media-src',
|
||||
'default-src', # Fallback source list for content types not explicitly listed
|
||||
'connect-src', # Allowed URLs for XHR, WebSockets, EventSource, fetch()
|
||||
'frame-ancestors', # Who may embed this page
|
||||
'frame-src', # Sources for nested browsing contexts (e.g., <iframe>)
|
||||
'script-src', # Sources for script execution
|
||||
'script-src-elem', # Sources for <script> elements
|
||||
'style-src', # Sources for inline styles and <style>/<link> elements
|
||||
'style-src-elem', # Sources for <style> and <link rel="stylesheet">
|
||||
'font-src', # Sources for fonts
|
||||
'worker-src', # Sources for workers
|
||||
'manifest-src', # Sources for web app manifests
|
||||
'media-src', # Sources for audio and video
|
||||
]
|
||||
|
||||
parts = []
|
||||
|
||||
for directive in directives:
|
||||
tokens = ["'self'"]
|
||||
|
||||
# unsafe-eval / unsafe-inline flags
|
||||
# 1) Load flags (includes defaults from get_csp_flags)
|
||||
flags = self.get_csp_flags(applications, application_id, directive)
|
||||
tokens += flags
|
||||
|
||||
# Matomo integration
|
||||
if (
|
||||
self.is_feature_enabled(applications, matomo_feature_name, application_id)
|
||||
and directive in ['script-src-elem', 'connect-src']
|
||||
):
|
||||
matomo_domain = domains.get('web-app-matomo')[0]
|
||||
if matomo_domain:
|
||||
tokens.append(f"{web_protocol}://{matomo_domain}")
|
||||
# 2) Allow fetching from internal CDN by default for selected directives
|
||||
if directive in ['script-src-elem', 'connect-src', 'style-src-elem']:
|
||||
tokens.append(get_url(domains, 'web-svc-cdn', web_protocol))
|
||||
|
||||
# ReCaptcha integration: allow loading scripts from Google if feature enabled
|
||||
# 3) Matomo integration if feature is enabled
|
||||
if directive in ['script-src-elem', 'connect-src']:
|
||||
if self.is_feature_enabled(applications, matomo_feature_name, application_id):
|
||||
tokens.append(get_url(domains, 'web-app-matomo', web_protocol))
|
||||
|
||||
# 4) ReCaptcha integration (scripts + frames) if feature is enabled
|
||||
if self.is_feature_enabled(applications, 'recaptcha', application_id):
|
||||
if directive in ['script-src-elem',"frame-src"]:
|
||||
if directive in ['script-src-elem', 'frame-src']:
|
||||
tokens.append('https://www.gstatic.com')
|
||||
tokens.append('https://www.google.com')
|
||||
|
||||
# Allow the loading of js from the cdn
|
||||
if directive == 'script-src-elem':
|
||||
if self.is_feature_enabled(applications, 'logout', application_id) or self.is_feature_enabled(applications, 'desktop', application_id):
|
||||
domain = domains.get('web-svc-cdn')[0]
|
||||
tokens.append(f"{domain}")
|
||||
|
||||
# 5) Frame ancestors handling (desktop + logout support)
|
||||
if directive == 'frame-ancestors':
|
||||
# Enable loading via ancestors
|
||||
if self.is_feature_enabled(applications, 'desktop', application_id):
|
||||
domain = domains.get('web-app-port-ui')[0]
|
||||
sld_tld = ".".join(domain.split(".")[-2:]) # yields "example.com"
|
||||
tokens.append(f"{sld_tld}") # yields "*.example.com"
|
||||
|
||||
# Allow being embedded by the desktop app domain (and potentially its parent)
|
||||
domain = domains.get('web-app-desktop')[0]
|
||||
sld_tld = ".".join(domain.split(".")[-2:]) # e.g., example.com
|
||||
tokens.append(f"{sld_tld}")
|
||||
if self.is_feature_enabled(applications, 'logout', application_id):
|
||||
# Allow embedding via logout proxy and Keycloak app
|
||||
tokens.append(get_url(domains, 'web-svc-logout', web_protocol))
|
||||
tokens.append(get_url(domains, 'web-app-keycloak', web_protocol))
|
||||
|
||||
# Allow logout via infinito logout proxy
|
||||
domain = domains.get('web-svc-logout')[0]
|
||||
tokens.append(f"{domain}")
|
||||
|
||||
# Allow logout via keycloak app
|
||||
domain = domains.get('web-app-keycloak')[0]
|
||||
tokens.append(f"{domain}")
|
||||
|
||||
# whitelist
|
||||
# 6) Custom whitelist entries
|
||||
tokens += self.get_csp_whitelist(applications, application_id, directive)
|
||||
|
||||
# only add hashes if 'unsafe-inline' is NOT in flags
|
||||
if "'unsafe-inline'" not in flags:
|
||||
# 7) Add inline content hashes ONLY if final tokens do NOT include 'unsafe-inline'
|
||||
# (Check tokens, not flags, to include defaults and later modifications.)
|
||||
if "'unsafe-inline'" not in tokens:
|
||||
for snippet in self.get_csp_inline_content(applications, application_id, directive):
|
||||
tokens.append(self.get_csp_hash(snippet))
|
||||
|
||||
# Append directive
|
||||
parts.append(f"{directive} {' '.join(tokens)};")
|
||||
|
||||
# static img-src
|
||||
# 8) Static img-src directive (kept permissive for data/blob and any host)
|
||||
parts.append("img-src * data: blob:;")
|
||||
|
||||
return ' '.join(parts)
|
||||
|
||||
except Exception as exc:
|
||||
|
@@ -1,49 +0,0 @@
|
||||
import os
|
||||
import re
|
||||
import yaml
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
|
||||
def get_application_id(role_name):
|
||||
"""
|
||||
Jinja2/Ansible filter: given a role name, load its vars/main.yml and return the application_id value.
|
||||
"""
|
||||
# Construct path: assumes current working directory is project root
|
||||
vars_file = os.path.join(os.getcwd(), 'roles', role_name, 'vars', 'main.yml')
|
||||
|
||||
if not os.path.isfile(vars_file):
|
||||
raise AnsibleFilterError(f"Vars file not found for role '{role_name}': {vars_file}")
|
||||
|
||||
try:
|
||||
# Read entire file content to avoid lazy stream issues
|
||||
with open(vars_file, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
data = yaml.safe_load(content)
|
||||
except Exception as e:
|
||||
raise AnsibleFilterError(f"Error reading YAML from {vars_file}: {e}")
|
||||
|
||||
# Ensure parsed data is a mapping
|
||||
if not isinstance(data, dict):
|
||||
raise AnsibleFilterError(
|
||||
f"Error reading YAML from {vars_file}: expected mapping, got {type(data).__name__}"
|
||||
)
|
||||
|
||||
# Detect malformed YAML: no valid identifier-like keys
|
||||
valid_key_pattern = re.compile(r'^[A-Za-z_][A-Za-z0-9_]*$')
|
||||
if data and not any(valid_key_pattern.match(k) for k in data.keys()):
|
||||
raise AnsibleFilterError(f"Error reading YAML from {vars_file}: invalid top-level keys")
|
||||
|
||||
if 'application_id' not in data:
|
||||
raise AnsibleFilterError(f"Key 'application_id' not found in {vars_file}")
|
||||
|
||||
return data['application_id']
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
"""
|
||||
Ansible filter plugin entry point.
|
||||
"""
|
||||
def filters(self):
|
||||
return {
|
||||
'get_application_id': get_application_id,
|
||||
}
|
@@ -1,122 +0,0 @@
|
||||
import os
|
||||
import yaml
|
||||
import re
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
# in-memory cache: application_id → (parsed_yaml, is_nested)
|
||||
_cfg_cache = {}
|
||||
|
||||
def load_configuration(application_id, key):
|
||||
if not isinstance(key, str):
|
||||
raise AnsibleFilterError("Key must be a dotted-string, e.g. 'features.matomo'")
|
||||
|
||||
# locate roles/
|
||||
here = os.path.dirname(__file__)
|
||||
root = os.path.abspath(os.path.join(here, '..'))
|
||||
roles_dir = os.path.join(root, 'roles')
|
||||
if not os.path.isdir(roles_dir):
|
||||
raise AnsibleFilterError(f"Roles directory not found at {roles_dir}")
|
||||
|
||||
# first time? load & cache
|
||||
if application_id not in _cfg_cache:
|
||||
config_path = None
|
||||
|
||||
# 1) primary: vars/main.yml declares it
|
||||
for role in os.listdir(roles_dir):
|
||||
mv = os.path.join(roles_dir, role, 'vars', 'main.yml')
|
||||
if os.path.exists(mv):
|
||||
try:
|
||||
md = yaml.safe_load(open(mv)) or {}
|
||||
except Exception:
|
||||
md = {}
|
||||
if md.get('application_id') == application_id:
|
||||
cf = os.path.join(roles_dir, role, "config" , "main.yml")
|
||||
if not os.path.exists(cf):
|
||||
raise AnsibleFilterError(
|
||||
f"Role '{role}' declares '{application_id}' but missing config/main.yml"
|
||||
)
|
||||
config_path = cf
|
||||
break
|
||||
|
||||
# 2) fallback nested
|
||||
if config_path is None:
|
||||
for role in os.listdir(roles_dir):
|
||||
cf = os.path.join(roles_dir, role, "config" , "main.yml")
|
||||
if not os.path.exists(cf):
|
||||
continue
|
||||
try:
|
||||
dd = yaml.safe_load(open(cf)) or {}
|
||||
except Exception:
|
||||
dd = {}
|
||||
if isinstance(dd, dict) and application_id in dd:
|
||||
config_path = cf
|
||||
break
|
||||
|
||||
# 3) fallback flat
|
||||
if config_path is None:
|
||||
for role in os.listdir(roles_dir):
|
||||
cf = os.path.join(roles_dir, role, "config" , "main.yml")
|
||||
if not os.path.exists(cf):
|
||||
continue
|
||||
try:
|
||||
dd = yaml.safe_load(open(cf)) or {}
|
||||
except Exception:
|
||||
dd = {}
|
||||
# flat style: dict with all non-dict values
|
||||
if isinstance(dd, dict) and not any(isinstance(v, dict) for v in dd.values()):
|
||||
config_path = cf
|
||||
break
|
||||
|
||||
if config_path is None:
|
||||
return None
|
||||
|
||||
# parse once
|
||||
try:
|
||||
parsed = yaml.safe_load(open(config_path)) or {}
|
||||
except Exception as e:
|
||||
raise AnsibleFilterError(f"Error loading config/main.yml at {config_path}: {e}")
|
||||
|
||||
# detect nested vs flat
|
||||
is_nested = isinstance(parsed, dict) and (application_id in parsed)
|
||||
_cfg_cache[application_id] = (parsed, is_nested)
|
||||
|
||||
parsed, is_nested = _cfg_cache[application_id]
|
||||
|
||||
# pick base entry
|
||||
entry = parsed[application_id] if is_nested else parsed
|
||||
|
||||
# resolve dotted key
|
||||
key_parts = key.split('.')
|
||||
for part in key_parts:
|
||||
# Check if part has an index (e.g., domains.canonical[0])
|
||||
match = re.match(r'([^\[]+)\[([0-9]+)\]', part)
|
||||
if match:
|
||||
part, index = match.groups()
|
||||
index = int(index)
|
||||
if isinstance(entry, dict) and part in entry:
|
||||
entry = entry[part]
|
||||
# Check if entry is a list and access the index
|
||||
if isinstance(entry, list) and 0 <= index < len(entry):
|
||||
entry = entry[index]
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Index '{index}' out of range for key '{part}' in application '{application_id}'"
|
||||
)
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Key '{part}' not found under application '{application_id}'"
|
||||
)
|
||||
else:
|
||||
if isinstance(entry, dict) and part in entry:
|
||||
entry = entry[part]
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Key '{part}' not found under application '{application_id}'"
|
||||
)
|
||||
|
||||
return entry
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {'load_configuration': load_configuration}
|
@@ -1,55 +0,0 @@
|
||||
from jinja2 import Undefined
|
||||
|
||||
|
||||
def safe_placeholders(template: str, mapping: dict = None) -> str:
|
||||
"""
|
||||
Format a template like "{url}/logo.png".
|
||||
If mapping is provided (not None) and ANY placeholder is missing or maps to None/empty string, the function will raise KeyError.
|
||||
If mapping is None, missing placeholders or invalid templates return empty string.
|
||||
Numerical zero or False are considered valid values.
|
||||
Any other formatting errors return an empty string.
|
||||
"""
|
||||
# Non-string templates yield empty
|
||||
if not isinstance(template, str):
|
||||
return ''
|
||||
|
||||
class SafeDict(dict):
|
||||
def __getitem__(self, key):
|
||||
val = super().get(key, None)
|
||||
# Treat None or empty string as missing
|
||||
if val is None or (isinstance(val, str) and val == ''):
|
||||
raise KeyError(key)
|
||||
return val
|
||||
def __missing__(self, key):
|
||||
raise KeyError(key)
|
||||
|
||||
silent = mapping is None
|
||||
data = mapping or {}
|
||||
try:
|
||||
return template.format_map(SafeDict(data))
|
||||
except KeyError:
|
||||
if silent:
|
||||
return ''
|
||||
raise
|
||||
except Exception:
|
||||
return ''
|
||||
|
||||
def safe_var(value):
|
||||
"""
|
||||
Ansible filter: returns the value unchanged unless it's Undefined or None,
|
||||
in which case returns an empty string.
|
||||
Catches all exceptions and yields ''.
|
||||
"""
|
||||
try:
|
||||
if isinstance(value, Undefined) or value is None:
|
||||
return ''
|
||||
return value
|
||||
except Exception:
|
||||
return ''
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'safe_var': safe_var,
|
||||
'safe_placeholders': safe_placeholders,
|
||||
}
|
@@ -1,28 +0,0 @@
|
||||
"""
|
||||
Ansible filter plugin that joins a base string and a tail path safely.
|
||||
If the base is falsy (None, empty, etc.), returns an empty string.
|
||||
"""
|
||||
|
||||
def safe_join(base, tail):
|
||||
"""
|
||||
Safely join base and tail into a path or URL.
|
||||
|
||||
- base: the base string. If falsy, returns ''.
|
||||
- tail: the string to append. Leading/trailing slashes are handled.
|
||||
- On any exception, returns ''.
|
||||
"""
|
||||
try:
|
||||
if not base:
|
||||
return ''
|
||||
base_str = str(base).rstrip('/')
|
||||
tail_str = str(tail).lstrip('/')
|
||||
return f"{base_str}/{tail_str}"
|
||||
except Exception:
|
||||
return ''
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'safe_join': safe_join,
|
||||
}
|
67
filter_plugins/timeout_start_sec_for_domains.py
Normal file
67
filter_plugins/timeout_start_sec_for_domains.py
Normal file
@@ -0,0 +1,67 @@
|
||||
# filter_plugins/timeout_start_sec_for_domains.py (nur Kern geändert)
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
"timeout_start_sec_for_domains": self.timeout_start_sec_for_domains,
|
||||
}
|
||||
|
||||
def timeout_start_sec_for_domains(
|
||||
self,
|
||||
domains_dict,
|
||||
include_www=True,
|
||||
per_domain_seconds=25,
|
||||
overhead_seconds=30,
|
||||
min_seconds=120,
|
||||
max_seconds=3600,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
domains_dict (dict | list[str] | str): Either the domain mapping dict
|
||||
(values can be str | list[str] | dict[str,str]) or an already
|
||||
flattened list of domains, or a single domain string.
|
||||
include_www (bool): If true, add 'www.<domain>' for non-www entries.
|
||||
...
|
||||
"""
|
||||
try:
|
||||
# Local flattener for dict inputs (like your generate_all_domains source)
|
||||
def _flatten_from_dict(domains_map):
|
||||
flat = []
|
||||
for v in (domains_map or {}).values():
|
||||
if isinstance(v, str):
|
||||
flat.append(v)
|
||||
elif isinstance(v, list):
|
||||
flat.extend(v)
|
||||
elif isinstance(v, dict):
|
||||
flat.extend(v.values())
|
||||
return flat
|
||||
|
||||
# Accept dict | list | str
|
||||
if isinstance(domains_dict, dict):
|
||||
flat = _flatten_from_dict(domains_dict)
|
||||
elif isinstance(domains_dict, list):
|
||||
flat = list(domains_dict)
|
||||
elif isinstance(domains_dict, str):
|
||||
flat = [domains_dict]
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
"Expected 'domains_dict' to be dict | list | str."
|
||||
)
|
||||
|
||||
if include_www:
|
||||
base_unique = sorted(set(flat))
|
||||
www_variants = [f"www.{d}" for d in base_unique if not str(d).lower().startswith("www.")]
|
||||
flat.extend(www_variants)
|
||||
|
||||
unique_domains = sorted(set(flat))
|
||||
count = len(unique_domains)
|
||||
|
||||
raw = overhead_seconds + per_domain_seconds * count
|
||||
clamped = max(min_seconds, min(max_seconds, int(raw)))
|
||||
return clamped
|
||||
|
||||
except AnsibleFilterError:
|
||||
raise
|
||||
except Exception as exc:
|
||||
raise AnsibleFilterError(f"timeout_start_sec_for_domains failed: {exc}")
|
146
filter_plugins/url_join.py
Normal file
146
filter_plugins/url_join.py
Normal file
@@ -0,0 +1,146 @@
|
||||
"""
|
||||
Ansible filter plugin that safely joins URL components from a list.
|
||||
- Requires a valid '<scheme>://' in the first element (any RFC-3986-ish scheme)
|
||||
- Preserves the double slash after the scheme, collapses other duplicate slashes
|
||||
- Supports query parts introduced by elements starting with '?' or '&'
|
||||
* first query element uses '?', subsequent use '&' (regardless of given prefix)
|
||||
* each query element must be exactly one 'key=value' pair
|
||||
* query elements may only appear after path elements; once query starts, no more path parts
|
||||
- Raises specific AnsibleFilterError messages for common misuse
|
||||
"""
|
||||
|
||||
import re
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
_SCHEME_RE = re.compile(r'^([a-zA-Z][a-zA-Z0-9+.\-]*://)(.*)$')
|
||||
_QUERY_PAIR_RE = re.compile(r'^[^&=?#]+=[^&?#]*$') # key=value (no '&', no extra '?' or '#')
|
||||
|
||||
def _to_str_or_error(obj, index):
|
||||
"""Cast to str, raising a specific AnsibleFilterError with index context."""
|
||||
try:
|
||||
return str(obj)
|
||||
except Exception as e:
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: unable to convert part at index {index} to string: {e}"
|
||||
)
|
||||
|
||||
def url_join(parts):
|
||||
"""
|
||||
Join a list of URL parts, URL-aware (scheme, path, query).
|
||||
|
||||
Args:
|
||||
parts (list|tuple): URL segments. First element MUST include '<scheme>://'.
|
||||
Path elements are plain strings.
|
||||
Query elements must start with '?' or '&' and contain exactly one 'key=value'.
|
||||
|
||||
Returns:
|
||||
str: Joined URL.
|
||||
|
||||
Raises:
|
||||
AnsibleFilterError: with specific, descriptive messages.
|
||||
"""
|
||||
# --- basic input validation ---
|
||||
if parts is None:
|
||||
raise AnsibleFilterError("url_join: parts must be a non-empty list; got None")
|
||||
if not isinstance(parts, (list, tuple)):
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: parts must be a list/tuple; got {type(parts).__name__}"
|
||||
)
|
||||
if len(parts) == 0:
|
||||
raise AnsibleFilterError("url_join: parts must be a non-empty list")
|
||||
|
||||
# --- first element must carry a scheme ---
|
||||
first_raw = parts[0]
|
||||
if first_raw is None:
|
||||
raise AnsibleFilterError(
|
||||
"url_join: first element must include a scheme like 'https://'; got None"
|
||||
)
|
||||
|
||||
first_str = _to_str_or_error(first_raw, 0)
|
||||
m = _SCHEME_RE.match(first_str)
|
||||
if not m:
|
||||
raise AnsibleFilterError(
|
||||
"url_join: first element must start with '<scheme>://', e.g. 'https://example.com'; "
|
||||
f"got '{first_str}'"
|
||||
)
|
||||
|
||||
scheme = m.group(1) # e.g., 'https://', 'ftp://', 'myapp+v1://'
|
||||
after_scheme = m.group(2).lstrip('/') # strip only leading slashes right after scheme
|
||||
|
||||
# --- iterate parts: collect path parts until first query part; then only query parts allowed ---
|
||||
path_parts = []
|
||||
query_pairs = []
|
||||
in_query = False
|
||||
|
||||
for i, p in enumerate(parts):
|
||||
if p is None:
|
||||
# skip None silently (consistent with path_join-ish behavior)
|
||||
continue
|
||||
|
||||
s = _to_str_or_error(p, i)
|
||||
|
||||
# disallow additional scheme in later parts
|
||||
if i > 0 and "://" in s:
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: only the first element may contain a scheme; part at index {i} "
|
||||
f"looks like a URL with scheme ('{s}')."
|
||||
)
|
||||
|
||||
# first element: replace with remainder after scheme and continue
|
||||
if i == 0:
|
||||
s = after_scheme
|
||||
|
||||
# check if this is a query element (starts with ? or &)
|
||||
if s.startswith('?') or s.startswith('&'):
|
||||
in_query = True
|
||||
raw_pair = s[1:] # strip the leading ? or &
|
||||
if raw_pair == '':
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: query element at index {i} is empty; expected '?key=value' or '&key=value'"
|
||||
)
|
||||
# Disallow multiple pairs in a single element; enforce exactly one key=value
|
||||
if '&' in raw_pair:
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: query element at index {i} must contain exactly one 'key=value' pair "
|
||||
f"without '&'; got '{s}'"
|
||||
)
|
||||
if not _QUERY_PAIR_RE.match(raw_pair):
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: query element at index {i} must match 'key=value' (no extra '?', '&', '#'); got '{s}'"
|
||||
)
|
||||
query_pairs.append(raw_pair)
|
||||
else:
|
||||
# non-query element
|
||||
if in_query:
|
||||
# once query started, no more path parts allowed
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: path element found at index {i} after query parameters started; "
|
||||
f"query parts must come last"
|
||||
)
|
||||
# normal path part: strip slashes to avoid duplicate '/'
|
||||
path_parts.append(s.strip('/'))
|
||||
|
||||
# normalize path: remove empty chunks
|
||||
path_parts = [p for p in path_parts if p != '']
|
||||
|
||||
# --- build result ---
|
||||
# path portion
|
||||
if path_parts:
|
||||
joined_path = "/".join(path_parts)
|
||||
base = scheme + joined_path
|
||||
else:
|
||||
# no path beyond scheme
|
||||
base = scheme
|
||||
|
||||
# query portion
|
||||
if query_pairs:
|
||||
base = base + "?" + "&".join(query_pairs)
|
||||
|
||||
return base
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'url_join': url_join,
|
||||
}
|
21
filter_plugins/volume_path.py
Normal file
21
filter_plugins/volume_path.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
def docker_volume_path(volume_name: str) -> str:
|
||||
"""
|
||||
Returns the absolute filesystem path of a Docker volume.
|
||||
|
||||
Example:
|
||||
"akaunting_data" -> "/var/lib/docker/volumes/akaunting_data/_data/"
|
||||
"""
|
||||
if not volume_name or not isinstance(volume_name, str):
|
||||
raise AnsibleFilterError(f"Invalid volume name: {volume_name}")
|
||||
|
||||
return f"/var/lib/docker/volumes/{volume_name}/_data/"
|
||||
|
||||
class FilterModule(object):
|
||||
"""Docker volume path filters."""
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
"docker_volume_path": docker_volume_path,
|
||||
}
|
@@ -26,6 +26,9 @@ HOST_DECIMAL_MARK: ","
|
||||
WEB_PROTOCOL: "https" # Web protocol type. Use https or http. If you run local you need to change it to http
|
||||
WEB_PORT: "{{ 443 if WEB_PROTOCOL == 'https' else 80 }}" # Default port web applications will listen to
|
||||
|
||||
# Websocket
|
||||
WEBSOCKET_PROTOCOL: "{{ 'wss' if WEB_PROTOCOL == 'https' else 'ws' }}"
|
||||
|
||||
# Domain
|
||||
PRIMARY_DOMAIN: "localhost" # Primary Domain of the server
|
||||
|
||||
|
@@ -3,8 +3,8 @@
|
||||
# The following modes can be combined with each other
|
||||
MODE_TEST: false # Executes test routines instead of productive routines
|
||||
MODE_UPDATE: true # Executes updates
|
||||
MODE_BACKUP: true # Activates the backup before the update procedure
|
||||
MODE_CLEANUP: true # Cleanup unused files and configurations
|
||||
MODE_DEBUG: false # This enables debugging in ansible and in the apps, You SHOULD NOT enable this on production servers
|
||||
MODE_RESET: false # Cleans up all Infinito.Nexus files. It's necessary to run to whole playbook and not particial roles when using this function.
|
||||
MODE_ASSERT: false # Executes validation tasks during the run.
|
||||
MODE_BACKUP: "{{ MODE_UPDATE }}" # Activates the backup before the update procedure
|
||||
MODE_CLEANUP: "{{ MODE_DEBUG }}" # Cleanup unused files and configurations
|
||||
MODE_ASSERT: "{{ MODE_DEBUG }}" # Executes validation tasks during the run.
|
||||
|
@@ -6,11 +6,12 @@ SYS_SERVICE_SUFFIX: ".{{ SOFTWARE_NAME | lower }}.service"
|
||||
|
||||
## Names
|
||||
SYS_SERVICE_CLEANUP_BACKUPS_FAILED: "{{ 'sys-ctl-cln-faild-bkps' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_CLEANUP_ANONYMOUS_VOLUMES: "{{ 'sys-ctl-cln-anon-volumes' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_OPTIMIZE_DRIVE: "{{ 'svc-opt-ssd-hdd' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_BACKUP_RMT_2_LOC: "{{ 'svc-bkp-rmt-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_BACKUP_DOCKER_2_LOC: "{{ 'sys-ctl-bkp-docker-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_REPAIR_DOCKER_SOFT: "{{ 'sys-ctl-rpr-docker-soft' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_REPAIR_DOCKER_HARD: "{{ 'sys-ctl-rpr-docker-hard' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_UPDATE_DOCKER: "{{ 'update-docker' | get_service_name(SOFTWARE_NAME) }}"
|
||||
|
||||
## On Failure
|
||||
SYS_SERVICE_ON_FAILURE_COMPOSE: "{{ ('sys-ctl-alm-compose@') | get_service_name(SOFTWARE_NAME, False) }}%n.service"
|
||||
@@ -18,32 +19,33 @@ SYS_SERVICE_ON_FAILURE_COMPOSE: "{{ ('sys-ctl-alm-compose@') | get_service
|
||||
## Groups
|
||||
SYS_SERVICE_GROUP_BACKUPS: >
|
||||
{{ (('sys-ctl-bkp-' | get_category_entries) + ('svc-bkp-' | get_category_entries))
|
||||
| map('regex_replace', '$', SYS_SERVICE_SUFFIX) | list }}
|
||||
| map('regex_replace', '$', SYS_SERVICE_SUFFIX) | list | sort }}
|
||||
|
||||
SYS_SERVICE_GROUP_CLEANUP: >
|
||||
{{ ('sys-ctl-cln-' | get_category_entries)
|
||||
| map('regex_replace', '$', SYS_SERVICE_SUFFIX) | list }}
|
||||
| map('regex_replace', '$', SYS_SERVICE_SUFFIX) | list | sort }}
|
||||
|
||||
SYS_SERVICE_GROUP_REPAIR: >
|
||||
{{ ('sys-ctl-rpr-' | get_category_entries)
|
||||
| map('regex_replace', '$', SYS_SERVICE_SUFFIX) | list }}
|
||||
| map('regex_replace', '$', SYS_SERVICE_SUFFIX) | list | sort }}
|
||||
|
||||
SYS_SERVICE_GROUP_OPTIMIZATION: >
|
||||
{{ ('svc-opt-' | get_category_entries)
|
||||
| map('regex_replace', '$', SYS_SERVICE_SUFFIX) | list }}
|
||||
| map('regex_replace', '$', SYS_SERVICE_SUFFIX) | list | sort }}
|
||||
|
||||
SYS_SERVICE_GROUP_MAINTANANCE: >
|
||||
{{ ('svc-mtn-' | get_category_entries)
|
||||
| map('regex_replace', '$', SYS_SERVICE_SUFFIX) | list }}
|
||||
| map('regex_replace', '$', SYS_SERVICE_SUFFIX) | list | sort }}
|
||||
|
||||
## Collection of services to manipulate the system
|
||||
SYS_SERVICE_GROUP_MANIPULATION: >
|
||||
{{
|
||||
(
|
||||
SYS_SERVICE_GROUP_BACKUPS +
|
||||
SYS_SERVICE_GROUP_CLEANUP +
|
||||
SYS_SERVICE_GROUP_REPAIR +
|
||||
SYS_SERVICE_GROUP_OPTIMIZATION +
|
||||
SYS_SERVICE_GROUP_MAINTANANCE +
|
||||
[ SYS_SERVICE_UPDATE_DOCKER ]
|
||||
SYS_SERVICE_GROUP_MAINTANANCE
|
||||
) | sort
|
||||
}}
|
||||
|
||||
|
@@ -2,7 +2,7 @@
|
||||
# Service Timers
|
||||
|
||||
## Meta
|
||||
SYS_TIMER_ALL_ENABLED: "{{ not MODE_DEBUG }}" # Runtime Variables for Process Control - Activates all timers, independend if the handlers had been triggered
|
||||
SYS_TIMER_ALL_ENABLED: "{{ MODE_DEBUG }}" # Runtime Variables for Process Control - Activates all timers, independend if the handlers had been triggered
|
||||
|
||||
## Server Tact Variables
|
||||
|
||||
@@ -10,12 +10,12 @@ HOURS_SERVER_AWAKE: "0..23" # Ours in which the server is "awake" (10
|
||||
RANDOMIZED_DELAY_SEC: "5min" # Random delay for systemd timers to avoid peak loads.
|
||||
|
||||
## Timeouts for all services
|
||||
SYS_TIMEOUT_DOCKER_RPR_HARD: "10min"
|
||||
SYS_TIMEOUT_DOCKER_RPR_SOFT: "{{ SYS_TIMEOUT_DOCKER_RPR_HARD }}"
|
||||
SYS_TIMEOUT_CLEANUP_SERVICES: "15min"
|
||||
SYS_TIMEOUT_STORAGE_OPTIMIZER: "10min"
|
||||
SYS_TIMEOUT_BACKUP_SERVICES: "1h"
|
||||
SYS_TIMEOUT_HEAL_DOCKER: "30min"
|
||||
SYS_TIMEOUT_UPDATE_DOCKER: "2min"
|
||||
SYS_TIMEOUT_RESTART_DOCKER: "{{ SYS_TIMEOUT_UPDATE_DOCKER }}"
|
||||
SYS_TIMEOUT_DOCKER_UPDATE: "20min"
|
||||
SYS_TIMEOUT_STORAGE_OPTIMIZER: "{{ SYS_TIMEOUT_DOCKER_UPDATE }}"
|
||||
SYS_TIMEOUT_BACKUP_SERVICES: "60min"
|
||||
|
||||
## On Calendar
|
||||
|
||||
@@ -37,7 +37,6 @@ SYS_SCHEDULE_CLEANUP_FAILED_BACKUPS: "*-*-* 12:00:00"
|
||||
|
||||
### Schedule for repair services
|
||||
SYS_SCHEDULE_REPAIR_BTRFS_AUTO_BALANCER: "Sat *-*-01..07 00:00:00" # Execute btrfs auto balancer every first Saturday of a month
|
||||
SYS_SCHEDULE_REPAIR_DOCKER_SOFT: "*-*-* {{ HOURS_SERVER_AWAKE }}:30:00" # Heal unhealthy docker instances once per hour
|
||||
SYS_SCHEDULE_REPAIR_DOCKER_HARD: "Sun *-*-* 08:00:00" # Restart docker instances every Sunday at 8:00 AM
|
||||
|
||||
### Schedule for backup tasks
|
@@ -10,7 +10,7 @@ defaults_networks:
|
||||
# /28 Networks, 14 Usable Ip Addresses
|
||||
web-app-akaunting:
|
||||
subnet: 192.168.101.0/28
|
||||
web-app-attendize:
|
||||
web-app-confluence:
|
||||
subnet: 192.168.101.16/28
|
||||
web-app-baserow:
|
||||
subnet: 192.168.101.32/28
|
||||
@@ -34,8 +34,8 @@ defaults_networks:
|
||||
subnet: 192.168.101.176/28
|
||||
web-app-listmonk:
|
||||
subnet: 192.168.101.192/28
|
||||
# Free:
|
||||
# subnet: 192.168.101.208/28
|
||||
web-app-jira:
|
||||
subnet: 192.168.101.208/28
|
||||
web-app-matomo:
|
||||
subnet: 192.168.101.224/28
|
||||
web-app-mastodon:
|
||||
@@ -48,7 +48,7 @@ defaults_networks:
|
||||
subnet: 192.168.102.16/28
|
||||
web-app-moodle:
|
||||
subnet: 192.168.102.32/28
|
||||
web-app-mybb:
|
||||
web-app-bookwyrm:
|
||||
subnet: 192.168.102.48/28
|
||||
web-app-nextcloud:
|
||||
subnet: 192.168.102.64/28
|
||||
@@ -96,6 +96,12 @@ defaults_networks:
|
||||
subnet: 192.168.103.160/28
|
||||
web-svc-logout:
|
||||
subnet: 192.168.103.176/28
|
||||
web-app-chess:
|
||||
subnet: 192.168.103.192/28
|
||||
web-app-magento:
|
||||
subnet: 192.168.103.208/28
|
||||
web-app-bridgy-fed:
|
||||
subnet: 192.168.103.224/28
|
||||
|
||||
# /24 Networks / 254 Usable Clients
|
||||
web-app-bigbluebutton:
|
||||
|
@@ -26,7 +26,7 @@ ports:
|
||||
web-app-gitea: 8002
|
||||
web-app-wordpress: 8003
|
||||
web-app-mediawiki: 8004
|
||||
web-app-mybb: 8005
|
||||
web-app-confluence: 8005
|
||||
web-app-yourls: 8006
|
||||
web-app-mailu: 8007
|
||||
web-app-elk: 8008
|
||||
@@ -36,7 +36,7 @@ ports:
|
||||
web-app-funkwhale: 8012
|
||||
web-app-roulette-wheel: 8013
|
||||
web-app-joomla: 8014
|
||||
web-app-attendize: 8015
|
||||
web-app-jira: 8015
|
||||
web-app-pgadmin: 8016
|
||||
web-app-baserow: 8017
|
||||
web-app-matomo: 8018
|
||||
@@ -50,7 +50,7 @@ ports:
|
||||
web-app-moodle: 8026
|
||||
web-app-taiga: 8027
|
||||
web-app-friendica: 8028
|
||||
web-app-port-ui: 8029
|
||||
web-app-desktop: 8029
|
||||
web-app-bluesky_api: 8030
|
||||
web-app-bluesky_web: 8031
|
||||
web-app-keycloak: 8032
|
||||
@@ -70,6 +70,11 @@ ports:
|
||||
web-app-pretix: 8046
|
||||
web-app-mig: 8047
|
||||
web-svc-logout: 8048
|
||||
web-app-bookwyrm: 8049
|
||||
web-app-chess: 8050
|
||||
web-app-bluesky_view: 8051
|
||||
web-app-magento: 8052
|
||||
web-app-bridgy-fed: 8053
|
||||
web-app-bigbluebutton: 48087 # This port is predefined by bbb. @todo Try to change this to a 8XXX port
|
||||
public:
|
||||
# The following ports should be changed to 22 on the subdomain via stream mapping
|
||||
@@ -80,7 +85,8 @@ ports:
|
||||
svc-db-openldap: 636
|
||||
stun:
|
||||
web-app-bigbluebutton: 3478 # Not sure if it's right placed here or if it should be moved to localhost section
|
||||
web-app-nextcloud: 3479
|
||||
# Occupied by BBB: 3479
|
||||
web-app-nextcloud: 3480
|
||||
turn:
|
||||
web-app-bigbluebutton: 5349 # Not sure if it's right placed here or if it should be moved to localhost section
|
||||
web-app-nextcloud: 5350 # Not used yet
|
@@ -7,31 +7,38 @@
|
||||
#############################################
|
||||
# @see https://en.wikipedia.org/wiki/OpenID_Connect
|
||||
|
||||
## Helper Variables:
|
||||
# Helper Variables:
|
||||
_oidc_client_realm: "{{ OIDC.CLIENT.REALM if OIDC.CLIENT is defined and OIDC.CLIENT.REALM is defined else SOFTWARE_NAME | lower }}"
|
||||
_oidc_url: "{{
|
||||
( OIDC.URL
|
||||
if (OIDC is defined and OIDC.URL is defined)
|
||||
else WEB_PROTOCOL ~ '://' ~ (domains | get_domain('web-app-keycloak'))
|
||||
else domains | get_url('web-app-keycloak', WEB_PROTOCOL)
|
||||
).rstrip('/')
|
||||
}}"
|
||||
_oidc_client_issuer_url: "{{ _oidc_url ~ '/realms/' ~ _oidc_client_realm }}"
|
||||
_oidc_client_id: "{{ OIDC.CLIENT.ID if OIDC.CLIENT is defined and OIDC.CLIENT.ID is defined else SOFTWARE_NAME | lower }}"
|
||||
_oidc_account_url: "{{ _oidc_client_issuer_url ~ '/account' }}"
|
||||
_oidc_protocol_oidc: "{{ _oidc_client_issuer_url ~ '/protocol/openid-connect' }}"
|
||||
|
||||
# Definition
|
||||
defaults_oidc:
|
||||
URL: "{{ _oidc_url }}"
|
||||
CLIENT:
|
||||
ID: "{{ _oidc_client_id }}" # Client identifier, typically matching your primary domain
|
||||
# secret: # Client secret for authenticating with the OIDC provider (set in the inventory file). Recommend greater then 32 characters
|
||||
# SECRET: # Client secret for authenticating with the OIDC provider (set in the inventory file). Recommend greater then 32 characters
|
||||
REALM: "{{ _oidc_client_realm }}" # The realm to which the client belongs in the OIDC provider
|
||||
ISSUER_URL: "{{ _oidc_client_issuer_url }}" # Base URL of the OIDC provider (issuer)
|
||||
DISCOVERY_DOCUMENT: "{{ _oidc_client_issuer_url ~ '/.well-known/openid-configuration' }}" # URL for fetching the provider's configuration details
|
||||
AUTHORIZE_URL: "{{ _oidc_client_issuer_url ~ '/protocol/openid-connect/auth' }}" # Endpoint to start the authorization process
|
||||
TOKEN_URL: "{{ _oidc_client_issuer_url ~ '/protocol/openid-connect/token' }}" # Endpoint to exchange authorization codes for tokens (note: 'token_url' may be a typo for 'token_url')
|
||||
USER_INFO_URL: "{{ _oidc_client_issuer_url ~ '/protocol/openid-connect/userinfo' }}" # Endpoint to retrieve user information
|
||||
LOGOUT_URL: "{{ _oidc_client_issuer_url ~ '/protocol/openid-connect/logout' }}" # Endpoint to log out the user
|
||||
CHANGE_CREDENTIALS: "{{ _oidc_client_issuer_url ~ '/account/account-security/signing-in' }}" # URL for managing or changing user credentials
|
||||
CERTS: "{{ _oidc_client_issuer_url ~ '/protocol/openid-connect/certs' }}" # JSON Web Key Set (JWKS)
|
||||
AUTHORIZE_URL: "{{ _oidc_protocol_oidc ~ '/auth' }}" # Endpoint to start the authorization process
|
||||
TOKEN_URL: "{{ _oidc_protocol_oidc ~ '/token' }}" # Endpoint to exchange authorization codes for tokens (note: 'token_url' may be a typo for 'token_url')
|
||||
USER_INFO_URL: "{{ _oidc_protocol_oidc ~ '/userinfo' }}" # Endpoint to retrieve user information
|
||||
LOGOUT_URL: "{{ _oidc_protocol_oidc ~ '/logout' }}" # Endpoint to log out the user
|
||||
CERTS: "{{ _oidc_protocol_oidc ~ '/certs' }}" # JSON Web Key Set (JWKS)
|
||||
ACCOUNT:
|
||||
URL: "{{ _oidc_account_url }}" # Entry point for the user settings console
|
||||
PROFILE_URL: "{{ _oidc_account_url ~ '/#/personal-info' }}" # Section for managing personal information
|
||||
SECURITY_URL: "{{ _oidc_account_url ~ '/#/security/signingin' }}" # Section for managing login and security settings
|
||||
CHANGE_CREDENTIALS: "{{ _oidc_account_url ~ '/account-security/signing-in' }}" # URL for managing or changing user credentials
|
||||
RESET_CREDENTIALS: "{{ _oidc_client_issuer_url ~ '/login-actions/reset-credentials?client_id=' ~ _oidc_client_id }}" # Password reset url
|
||||
BUTTON_TEXT: "SSO Login ({{ PRIMARY_DOMAIN | upper }})" # Default button text
|
||||
ATTRIBUTES:
|
||||
|
@@ -14,22 +14,22 @@ _ldap_domain: "{{ PRIMARY_DOMAIN }}" # LDAP is jsut listening to
|
||||
_ldap_user_id: "uid"
|
||||
_ldap_filters_users_all: "(|(objectclass=inetOrgPerson))"
|
||||
|
||||
ldap:
|
||||
LDAP:
|
||||
# Distinguished Names (DN)
|
||||
dn:
|
||||
DN:
|
||||
# -------------------------------------------------------------------------
|
||||
# Base DN / Suffix
|
||||
# This is the top-level naming context for your directory, used as the
|
||||
# default search base for most operations (e.g. adding users, groups).
|
||||
# Example: “dc=example,dc=com”
|
||||
root: "{{ LDAP_DN_BASE }}"
|
||||
administrator:
|
||||
ROOT: "{{ LDAP_DN_BASE }}"
|
||||
ADMINISTRATOR:
|
||||
# -------------------------------------------------------------------------
|
||||
# Data-Tree Administrator Bind DN
|
||||
# The DN used to authenticate for regular directory operations under
|
||||
# the data tree (adding users, modifying attributes, creating OUs, etc.).
|
||||
# Typically: “cn=admin,dc=example,dc=com”
|
||||
data: "cn={{ applications['svc-db-openldap'].users.administrator.username }},{{ LDAP_DN_BASE }}"
|
||||
DATA: "cn={{ applications['svc-db-openldap'].users.administrator.username }},{{ LDAP_DN_BASE }}"
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Config-Tree Administrator Bind DN
|
||||
@@ -37,9 +37,9 @@ ldap:
|
||||
# need to load or modify schema, overlays, modules, or other server-
|
||||
# level settings.
|
||||
# Typically: “cn=admin,cn=config”
|
||||
configuration: "cn={{ applications['svc-db-openldap'].users.administrator.username }},cn=config"
|
||||
CONFIGURATION: "cn={{ applications['svc-db-openldap'].users.administrator.username }},cn=config"
|
||||
|
||||
ou:
|
||||
OU:
|
||||
# -------------------------------------------------------------------------
|
||||
# Organizational Units (OUs)
|
||||
# Pre-created containers in the directory tree to logically separate entries:
|
||||
@@ -47,9 +47,9 @@ ldap:
|
||||
# – groups: Contains organizational or business groups (e.g., departments, teams).
|
||||
# – roles: Contains application-specific RBAC roles
|
||||
# (e.g., "cn=app1-user", "cn=yourls-admin").
|
||||
users: "ou=users,{{ LDAP_DN_BASE }}"
|
||||
groups: "ou=groups,{{ LDAP_DN_BASE }}"
|
||||
roles: "ou=roles,{{ LDAP_DN_BASE }}"
|
||||
USERS: "ou=users,{{ LDAP_DN_BASE }}"
|
||||
GROUPS: "ou=groups,{{ LDAP_DN_BASE }}"
|
||||
ROLES: "ou=roles,{{ LDAP_DN_BASE }}"
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Additional Notes
|
||||
@@ -59,17 +59,17 @@ ldap:
|
||||
# for ordinary user/group operations, and vice versa.
|
||||
|
||||
# Password to access dn.bind
|
||||
bind_credential: "{{ applications | get_app_conf('svc-db-openldap', 'credentials.administrator_database_password') }}"
|
||||
server:
|
||||
domain: "{{ _ldap_name if _ldap_docker_network_enabled else _ldap_domain }}" # Mapping for public or locale access
|
||||
port: "{{ _ldap_server_port }}"
|
||||
uri: "{{ _ldap_protocol }}://{{ _ldap_name if _ldap_docker_network_enabled else _ldap_domain }}:{{ _ldap_server_port }}"
|
||||
security: "" #TLS, SSL - Leave empty for none
|
||||
network:
|
||||
local: "{{ _ldap_docker_network_enabled }}" # Uses the application configuration to define if local network should be available or not
|
||||
user:
|
||||
objects:
|
||||
structural:
|
||||
BIND_CREDENTIAL: "{{ applications | get_app_conf('svc-db-openldap', 'credentials.administrator_database_password') }}"
|
||||
SERVER:
|
||||
DOMAIN: "{{ _ldap_name if _ldap_docker_network_enabled else _ldap_domain }}" # Mapping for public or locale access
|
||||
PORT: "{{ _ldap_server_port }}"
|
||||
URI: "{{ _ldap_protocol }}://{{ _ldap_name if _ldap_docker_network_enabled else _ldap_domain }}:{{ _ldap_server_port }}"
|
||||
SECURITY: "" #TLS, SSL - Leave empty for none
|
||||
NETWORK:
|
||||
LOCAL: "{{ _ldap_docker_network_enabled }}" # Uses the application configuration to define if local network should be available or not
|
||||
USER:
|
||||
OBJECTS:
|
||||
STRUCTURAL:
|
||||
- person # Structural Classes define the core identity of an entry:
|
||||
# • Specify mandatory attributes (e.g. sn, cn)
|
||||
# • Each entry must have exactly one structural class
|
||||
@@ -77,26 +77,26 @@ ldap:
|
||||
# (e.g. mail, employeeNumber)
|
||||
- posixAccount # Provides UNIX account attributes (uidNumber, gidNumber,
|
||||
# homeDirectory)
|
||||
auxiliary:
|
||||
nextloud_user: "nextcloudUser" # Auxiliary Classes attach optional attributes without
|
||||
AUXILIARY:
|
||||
NEXTCLOUD_USER: "nextcloudUser" # Auxiliary Classes attach optional attributes without
|
||||
# changing the entry’s structural role. Here they add
|
||||
# nextcloudQuota and nextcloudEnabled for Nextcloud.
|
||||
ssh_public_key: "ldapPublicKey" # Allows storing SSH public keys for services like Gitea.
|
||||
attributes:
|
||||
SSH_PUBLIC_KEY: "ldapPublicKey" # Allows storing SSH public keys for services like Gitea.
|
||||
ATTRIBUTES:
|
||||
# Attribut to identify the user
|
||||
id: "{{ _ldap_user_id }}"
|
||||
mail: "mail"
|
||||
fullname: "cn"
|
||||
firstname: "givenname"
|
||||
surname: "sn"
|
||||
ssh_public_key: "sshPublicKey"
|
||||
nextcloud_quota: "nextcloudQuota"
|
||||
filters:
|
||||
users:
|
||||
login: "(&{{ _ldap_filters_users_all }}({{_ldap_user_id}}=%{{_ldap_user_id}}))"
|
||||
all: "{{ _ldap_filters_users_all }}"
|
||||
rbac:
|
||||
flavors:
|
||||
ID: "{{ _ldap_user_id }}"
|
||||
MAIL: "mail"
|
||||
FULLNAME: "cn"
|
||||
FIRSTNAME: "givenname"
|
||||
SURNAME: "sn"
|
||||
SSH_PUBLIC_KEY: "sshPublicKey"
|
||||
NEXTCLOUD_QUOTA: "nextcloudQuota"
|
||||
FILTERS:
|
||||
USERS:
|
||||
LOGIN: "(&{{ _ldap_filters_users_all }}({{_ldap_user_id}}=%{{_ldap_user_id}}))"
|
||||
ALL: "{{ _ldap_filters_users_all }}"
|
||||
RBAC:
|
||||
FLAVORS:
|
||||
# Valid values posixGroup, groupOfNames
|
||||
- groupOfNames
|
||||
# - posixGroup
|
||||
|
@@ -21,7 +21,7 @@ defaults_service_provider:
|
||||
if 'web-app-bluesky' in group_names else '' }}
|
||||
email: "{{ users.contact.username ~ '@' ~ PRIMARY_DOMAIN if 'web-app-mailu' in group_names else '' }}"
|
||||
mastodon: "{{ '@' ~ users.contact.username ~ '@' ~ domains | get_domain('web-app-mastodon') if 'web-app-mastodon' in group_names else '' }}"
|
||||
matrix: "{{ '@' ~ users.contact.username ~ ':' ~ domains['web-app-matrix'].synapse if 'web-app-matrix' in group_names else '' }}"
|
||||
matrix: "{{ '@' ~ users.contact.username ~ ':' ~ applications | get_app_conf('web-app-matrix', 'server_name') if 'web-app-matrix' in group_names else '' }}"
|
||||
peertube: "{{ '@' ~ users.contact.username ~ '@' ~ domains | get_domain('web-app-peertube') if 'web-app-peertube' in group_names else '' }}"
|
||||
pixelfed: "{{ '@' ~ users.contact.username ~ '@' ~ domains | get_domain('web-app-pixelfed') if 'web-app-pixelfed' in group_names else '' }}"
|
||||
phone: "+0 000 000 404"
|
||||
|
53
lookup_plugins/local_mtime_qs.py
Normal file
53
lookup_plugins/local_mtime_qs.py
Normal file
@@ -0,0 +1,53 @@
|
||||
from __future__ import annotations
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.errors import AnsibleError
|
||||
import os
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
"""
|
||||
Return a cache-busting string based on the LOCAL file's mtime.
|
||||
|
||||
Usage (single path → string via Jinja):
|
||||
{{ lookup('local_mtime_qs', '/path/to/file.css') }}
|
||||
-> "?version=1712323456"
|
||||
|
||||
Options:
|
||||
param (str): query parameter name (default: "version")
|
||||
mode (str): "qs" (default) → returns "?<param>=<mtime>"
|
||||
"epoch" → returns "<mtime>"
|
||||
|
||||
Multiple paths (returns list, one result per term):
|
||||
{{ lookup('local_mtime_qs', '/a.js', '/b.js', param='v') }}
|
||||
"""
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
if not terms:
|
||||
return []
|
||||
|
||||
param = kwargs.get('param', 'version')
|
||||
mode = kwargs.get('mode', 'qs')
|
||||
|
||||
if mode not in ('qs', 'epoch'):
|
||||
raise AnsibleError("local_mtime_qs: 'mode' must be 'qs' or 'epoch'")
|
||||
|
||||
results = []
|
||||
for term in terms:
|
||||
path = os.path.abspath(os.path.expanduser(str(term)))
|
||||
|
||||
# Fail fast if path is missing or not a regular file
|
||||
if not os.path.exists(path):
|
||||
raise AnsibleError(f"local_mtime_qs: file does not exist: {path}")
|
||||
if not os.path.isfile(path):
|
||||
raise AnsibleError(f"local_mtime_qs: not a regular file: {path}")
|
||||
|
||||
try:
|
||||
mtime = int(os.stat(path).st_mtime)
|
||||
except OSError as e:
|
||||
raise AnsibleError(f"local_mtime_qs: cannot stat '{path}': {e}")
|
||||
|
||||
if mode == 'qs':
|
||||
results.append(f"?{param}={mtime}")
|
||||
else: # mode == 'epoch'
|
||||
results.append(str(mtime))
|
||||
|
||||
return results
|
@@ -1,9 +1,4 @@
|
||||
roles:
|
||||
cmp:
|
||||
title: "Compositions"
|
||||
description: "Composition of other roles."
|
||||
icon: "fas fa-sitemap"
|
||||
invokable: false
|
||||
docker:
|
||||
title: "Docker Toolkit"
|
||||
description: "Generic Docker helpers and utilities (compose wrappers, container tooling)."
|
||||
@@ -56,6 +51,21 @@ roles:
|
||||
description: "DNS providers, records, and rDNS management (Cloudflare, Hetzner, etc.)."
|
||||
icon: "fas fa-network-wired"
|
||||
invokable: false
|
||||
stk:
|
||||
title: "Stack"
|
||||
description: "Stack levels to setup the server"
|
||||
icon: "fas fa-bars-staggered"
|
||||
invokable: false
|
||||
front:
|
||||
title: "System Frontend Helpers"
|
||||
description: "Frontend helpers for reverse-proxied apps (injection, shared assets, CDN plumbing)."
|
||||
icon: "fas fa-wand-magic-sparkles"
|
||||
invokable: false
|
||||
inj:
|
||||
title: "Injection"
|
||||
description: "Composable HTML injection roles (CSS, JS, logout interceptor, analytics, desktop iframe) for Nginx/OpenResty via sub_filter/Lua with CDN-backed assets."
|
||||
icon: "fas fa-filter"
|
||||
invokable: false
|
||||
update:
|
||||
title: "Updates & Package Management"
|
||||
description: "OS & package updates"
|
||||
@@ -101,21 +111,6 @@ roles:
|
||||
description: "Developer-centric server utilities and admin toolkits."
|
||||
icon: "fas fa-code"
|
||||
invokable: false
|
||||
srv:
|
||||
title: "Server"
|
||||
description: "General server roles for provisioning and managing server infrastructure—covering web servers, proxy servers, network services, and other backend components."
|
||||
icon: "fas fa-server"
|
||||
invokable: false
|
||||
web:
|
||||
title: "Webserver"
|
||||
description: "Web-server roles for installing and configuring Nginx (core, TLS, injection filters, composer modules)."
|
||||
icon: "fas fa-server"
|
||||
invokable: false
|
||||
proxy:
|
||||
title: "Proxy Server"
|
||||
description: "Proxy-server roles for virtual-host orchestration and reverse-proxy setups."
|
||||
icon: "fas fa-project-diagram"
|
||||
invokable: false
|
||||
web:
|
||||
title: "Web Infrastructure"
|
||||
description: "Roles for managing web infrastructure—covering static content services and deployable web applications."
|
||||
|
@@ -1,11 +0,0 @@
|
||||
# Database Docker with Web Proxy
|
||||
|
||||
This role builds on `cmp-db-docker` by adding a reverse-proxy frontend for HTTP access to your database service.
|
||||
|
||||
## Features
|
||||
|
||||
- **Database Composition**
|
||||
Leverages the `cmp-db-docker` role to stand up your containerized database (PostgreSQL, MariaDB, etc.) with backups and user management.
|
||||
|
||||
- **Reverse Proxy**
|
||||
Includes the `srv-domain-provision` role to configure a proxy (e.g. nginx) for routing HTTP(S) traffic to your database UI or management endpoint.
|
@@ -1 +0,0 @@
|
||||
DATABASE_VARS_FILE: "{{ playbook_dir }}/roles/cmp-rdbms/vars/database.yml"
|
@@ -1 +0,0 @@
|
||||
{% include 'roles/cmp-rdbms/templates/services/' + database_type + '.yml.j2' %}
|
@@ -1,20 +0,0 @@
|
||||
# Helper variables
|
||||
_dbtype: "{{ (database_type | d('') | trim) }}"
|
||||
_database_id: "{{ ('svc-db-' ~ _dbtype) if _dbtype else '' }}"
|
||||
_database_central_name: "{{ (applications | get_app_conf(_database_id, 'docker.services.' ~ _dbtype ~ '.name', False, '')) if _dbtype else '' }}"
|
||||
_database_consumer_id: "{{ database_application_id | d(application_id) }}"
|
||||
_database_consumer_entity_name: "{{ _database_consumer_id | get_entity_name }}"
|
||||
_database_central_enabled: "{{ (applications | get_app_conf(_database_consumer_id, 'features.central_database', False)) if _dbtype else False }}"
|
||||
|
||||
# Definition
|
||||
|
||||
database_name: "{{ _database_consumer_entity_name }}"
|
||||
database_instance: "{{ _database_central_name if _database_central_enabled else database_name }}" # This could lead to bugs at dedicated database @todo cleanup
|
||||
database_host: "{{ _database_central_name if _database_central_enabled else 'database' }}" # This could lead to bugs at dedicated database @todo cleanup
|
||||
database_username: "{{ _database_consumer_entity_name }}"
|
||||
database_password: "{{ applications | get_app_conf(_database_consumer_id, 'credentials.database_password', true) }}"
|
||||
database_port: "{{ (ports.localhost.database[_database_id] | d('')) if _dbtype else '' }}"
|
||||
database_env: "{{ docker_compose.directories.env }}{{ database_type }}.env"
|
||||
database_url_jdbc: "jdbc:{{ database_type if database_type == 'mariadb' else 'postgresql' }}://{{ database_host }}:{{ database_port }}/{{ database_name }}"
|
||||
database_url_full: "{{ database_type }}://{{ database_username }}:{{ database_password }}@{{ database_host }}:{{ database_port }}/{{ database_name }}"
|
||||
database_volume: "{{ _database_consumer_entity_name ~ '_' if not _database_central_enabled }}{{ database_host }}"
|
@@ -19,3 +19,5 @@
|
||||
template:
|
||||
src: caffeine.desktop.j2
|
||||
dest: "{{auto_start_directory}}caffeine.desktop"
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
|
@@ -1,4 +1,3 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_desk_gnome_caffeine is not defined
|
||||
|
@@ -9,4 +9,4 @@
|
||||
community.general.pacman:
|
||||
name: "libreoffice-{{ applications['desk-libreoffice'].flavor }}-{{ item }}"
|
||||
state: present
|
||||
loop: "{{libreoffice_languages}}"
|
||||
loop: "{{ libreoffice_languages }}"
|
||||
|
@@ -49,3 +49,5 @@
|
||||
create: yes
|
||||
mode: "0644"
|
||||
become: false
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
@@ -1,4 +1,3 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_desk_ssh is not defined
|
@@ -1,4 +0,0 @@
|
||||
---
|
||||
- name: reload virtualbox kernel modules
|
||||
become: true
|
||||
command: vboxreload
|
@@ -1,8 +1,14 @@
|
||||
---
|
||||
- name: Setup locale.gen
|
||||
template: src=locale.gen dest=/etc/locale.gen
|
||||
template:
|
||||
src: locale.gen.j2
|
||||
dest: /etc/locale.gen
|
||||
|
||||
- name: Setup locale.conf
|
||||
template: src=locale.conf dest=/etc/locale.conf
|
||||
template:
|
||||
src: locale.conf.j2
|
||||
dest: /etc/locale.conf
|
||||
|
||||
- name: Generate locales
|
||||
shell: locale-gen
|
||||
become: true
|
||||
|
@@ -1,2 +0,0 @@
|
||||
LANG=en_US.UTF-8
|
||||
LANGUAGE=en_US.UTF-8
|
2
roles/dev-locales/templates/locale.conf.j2
Normal file
2
roles/dev-locales/templates/locale.conf.j2
Normal file
@@ -0,0 +1,2 @@
|
||||
LANG={{ HOST_LL_CC }}.UTF-8
|
||||
LANGUAGE={{ HOST_LL_CC }}.UTF-8
|
@@ -20,7 +20,7 @@ To offer a centralized, extensible system for managing containerized application
|
||||
- **Reset Logic:** Cleans previous Compose project files and data when `MODE_RESET` is enabled.
|
||||
- **Handlers for Runtime Control:** Automatically builds, sets up, or restarts containers based on handlers.
|
||||
- **Template-ready Service Files:** Predefined service base and health check templates.
|
||||
- **Integration Support:** Compatible with `srv-proxy-core` and other Infinito.Nexus service roles.
|
||||
- **Integration Support:** Compatible with `sys-svc-proxy` and other Infinito.Nexus service roles.
|
||||
|
||||
## Administration Tips
|
||||
|
||||
|
@@ -10,14 +10,22 @@
|
||||
- docker compose up
|
||||
- docker compose restart
|
||||
- docker compose just up
|
||||
when: MODE_ASSERT | bool
|
||||
|
||||
- name: docker compose pull
|
||||
shell: |
|
||||
set -euo pipefail
|
||||
lock="{{ [ PATH_DOCKER_COMPOSE_PULL_LOCK_DIR | docker_compose.directories.instance ] path_join | hash('sha1') }}"
|
||||
lock="{{ [ PATH_DOCKER_COMPOSE_PULL_LOCK_DIR, (docker_compose.directories.instance | hash('sha1')) ~ '.lock' ] | path_join }}"
|
||||
if [ ! -e "$lock" ]; then
|
||||
mkdir -p "$(dirname "$lock")"
|
||||
docker compose pull
|
||||
if docker compose config | grep -qE '^[[:space:]]+build:'; then
|
||||
docker compose build --pull
|
||||
fi
|
||||
if docker compose pull --help 2>/dev/null | grep -q -- '--ignore-buildable'; then
|
||||
docker compose pull --ignore-buildable
|
||||
else
|
||||
docker compose pull || true
|
||||
fi
|
||||
: > "$lock"
|
||||
echo "pulled"
|
||||
fi
|
||||
@@ -40,7 +48,7 @@
|
||||
set -euo pipefail
|
||||
docker compose build || {
|
||||
echo "Retrying without cache and pulling bases...";
|
||||
docker compose build --no-cache --pull;
|
||||
docker compose build --no-cache{{ ' --pull' if MODE_UPDATE | bool else ''}};
|
||||
}
|
||||
args:
|
||||
chdir: "{{ docker_compose.directories.instance }}"
|
||||
|
@@ -5,7 +5,9 @@
|
||||
loop:
|
||||
- "{{ application_id | abs_role_path_by_application_id }}/templates/Dockerfile.j2"
|
||||
- "{{ application_id | abs_role_path_by_application_id }}/files/Dockerfile"
|
||||
notify: docker compose up
|
||||
notify:
|
||||
- docker compose up
|
||||
- docker compose build
|
||||
register: create_dockerfile_result
|
||||
failed_when:
|
||||
- create_dockerfile_result is failed
|
||||
|
@@ -2,7 +2,7 @@
|
||||
services:
|
||||
{# Load Database #}
|
||||
{% if applications | is_docker_service_enabled(application_id, 'database') %}
|
||||
{% include 'roles/cmp-rdbms/templates/services/main.yml.j2' %}
|
||||
{% include 'roles/sys-svc-rdbms/templates/services/main.yml.j2' %}
|
||||
{% endif %}
|
||||
{# Load Redis #}
|
||||
{% if applications | is_docker_service_enabled(application_id, 'redis') or applications | get_app_conf(application_id, 'features.oauth2', False) %}
|
||||
|
@@ -3,6 +3,10 @@
|
||||
- "CMD"
|
||||
- "curl"
|
||||
- "-f"
|
||||
{% if container_hostname is defined %}
|
||||
- "-H"
|
||||
- "Host: {{ container_hostname }}"
|
||||
{% endif %}
|
||||
- "http://127.0.0.1{{ (":" ~ container_port) if container_port is defined else '' }}/{{ container_healthcheck | default('') }}"
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
|
7
roles/docker-container/templates/healthcheck/nc.yml.j2
Normal file
7
roles/docker-container/templates/healthcheck/nc.yml.j2
Normal file
@@ -0,0 +1,7 @@
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "nc -z localhost {{ container_port }} || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 3s
|
||||
retries: 3
|
||||
start_period: 10s
|
||||
{{ "\n" }}
|
@@ -16,29 +16,23 @@
|
||||
|
||||
- name: Create installation directory for Kevin's Package Manager
|
||||
file:
|
||||
path: "{{ pkgmgr_install_path }}"
|
||||
path: "{{ PKGMGR_INSTALL_PATH }}"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
become: true
|
||||
|
||||
- name: Clone Kevin's Package Manager repository
|
||||
git:
|
||||
repo: "{{ pkgmgr_repo_url }}"
|
||||
dest: "{{ pkgmgr_install_path }}"
|
||||
repo: "{{ PKGMGR_REPO_URL }}"
|
||||
dest: "{{ PKGMGR_INSTALL_PATH }}"
|
||||
version: "HEAD"
|
||||
force: yes
|
||||
become: true
|
||||
|
||||
- name: Ensure main.py is executable
|
||||
file:
|
||||
path: "{{ pkgmgr_install_path }}/main.py"
|
||||
mode: '0755'
|
||||
become: true
|
||||
|
||||
- name: create config.yaml
|
||||
template:
|
||||
src: config.yaml.j2
|
||||
dest: "{{ pkgmgr_config_path }}"
|
||||
dest: "{{ PKGMGR_CONFIG_PATH }}"
|
||||
become: true
|
||||
|
||||
- name: Run the Package Manager install command to create an alias for Kevins package manager
|
||||
@@ -46,6 +40,10 @@
|
||||
source ~/.venvs/pkgmgr/bin/activate
|
||||
make setup
|
||||
args:
|
||||
chdir: "{{ pkgmgr_install_path }}"
|
||||
chdir: "{{ PKGMGR_INSTALL_PATH }}"
|
||||
executable: /bin/bash
|
||||
become: true
|
||||
|
||||
- name: "Update all repositories with pkgmgr"
|
||||
command: "pkgmgr pull --all"
|
||||
when: MODE_UPDATE | bool
|
@@ -1,3 +1,3 @@
|
||||
directories:
|
||||
repositories: "{{repositories_directory}}"
|
||||
binaries: "{{binaries_directory}}"
|
||||
repositories: "{{ PKGMGR_REPOSITORIES_DIR }}"
|
||||
binaries: "{{ PKGMGR_BINARIES_DIR }}"
|
@@ -2,16 +2,16 @@
|
||||
# Variables for Kevin's Package Manager installation
|
||||
|
||||
# The Git repository URL for Kevin's Package Manager
|
||||
pkgmgr_repo_url: "https://github.com/kevinveenbirkenbach/package-manager.git"
|
||||
|
||||
# Directory which contains all Repositories managed by Kevin's Package Manager
|
||||
repositories_directory: "/opt/Repositories/"
|
||||
|
||||
# The directory where the repository will be cloned
|
||||
pkgmgr_install_path: "{{repositories_directory}}github.com/kevinveenbirkenbach/package-manager"
|
||||
|
||||
# File containing the configuration
|
||||
pkgmgr_config_path: "{{pkgmgr_install_path}}/config/config.yaml"
|
||||
PKGMGR_REPO_URL: "https://github.com/kevinveenbirkenbach/package-manager.git"
|
||||
|
||||
# The directory where executable aliases will be installed (ensure it's in your PATH)
|
||||
binaries_directory: "/usr/local/bin"
|
||||
PKGMGR_BINARIES_DIR: "/usr/local/bin"
|
||||
|
||||
# Directory which contains all Repositories managed by Kevin's Package Manager
|
||||
PKGMGR_REPOSITORIES_DIR: "/opt/Repositories/"
|
||||
|
||||
# The directory where the repository will be cloned
|
||||
PKGMGR_INSTALL_PATH: "{{ [ PKGMGR_REPOSITORIES_DIR, 'github.com/kevinveenbirkenbach/package-manager' ] | path_join }}"
|
||||
|
||||
# File containing the configuration
|
||||
PKGMGR_CONFIG_PATH: "{{ [ PKGMGR_INSTALL_PATH, 'config/config.yaml' ] | path_join }}"
|
||||
|
@@ -1,9 +0,0 @@
|
||||
# run_once_srv_composer: deactivated
|
||||
|
||||
- name: "include role sys-srv-web-inj-compose for '{{ domain }}'"
|
||||
include_role:
|
||||
name: sys-srv-web-inj-compose
|
||||
|
||||
- name: "include role srv-tls-core for '{{ domain }}'"
|
||||
include_role:
|
||||
name: srv-tls-core
|
@@ -1,5 +0,0 @@
|
||||
# default vhost flavour
|
||||
vhost_flavour: "basic" # valid: basic | ws_generic
|
||||
|
||||
# build the full template path from the flavour
|
||||
vhost_template_src: "roles/srv-proxy-core/templates/vhost/{{ vhost_flavour }}.conf.j2"
|
@@ -1 +0,0 @@
|
||||
configuration_destination: "{{ NGINX.DIRECTORIES.HTTP.SERVERS }}{{ domain }}.conf"
|
@@ -1,4 +0,0 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_srv_letsencrypt is not defined
|
@@ -1,31 +0,0 @@
|
||||
- block:
|
||||
- name: Install certbundle
|
||||
include_role:
|
||||
name: pkgmgr-install
|
||||
vars:
|
||||
package_name: certbundle
|
||||
|
||||
- name: Generate SAN certificate with certbundle
|
||||
command: >-
|
||||
certbundle
|
||||
--domains "{{ current_play_domains_all | join(',') }}"
|
||||
--certbot-email "{{ users.administrator.email }}"
|
||||
--certbot-acme-challenge-method "{{ CERTBOT_ACME_CHALLENGE_METHOD }}"
|
||||
--chunk-size 100
|
||||
{% if CERTBOT_ACME_CHALLENGE_METHOD != 'webroot' %}
|
||||
--certbot-credentials-file "{{ CERTBOT_CREDENTIALS_FILE }}"
|
||||
--certbot-dns-propagation-seconds "{{ CERTBOT_DNS_PROPAGATION_WAIT_SECONDS }}"
|
||||
{% else %}
|
||||
--letsencrypt-webroot-path "{{ LETSENCRYPT_WEBROOT_PATH }}"
|
||||
{% endif %}
|
||||
{{ '--mode-test' if MODE_TEST | bool else '' }}
|
||||
register: certbundle_result
|
||||
changed_when: "'Certificate not yet due for renewal' not in certbundle_result.stdout"
|
||||
failed_when: >
|
||||
certbundle_result.rc != 0
|
||||
and 'too many certificates' not in certbundle_result.stderr
|
||||
|
||||
- name: run the san tasks once
|
||||
set_fact:
|
||||
run_once_san_certs: true
|
||||
when: run_once_san_certs is not defined
|
@@ -1,6 +0,0 @@
|
||||
|
||||
- name: "reload svc-bkp-loc-2-usb service"
|
||||
systemd:
|
||||
name: "{{ 'svc-bkp-loc-2-usb' | get_service_name(SOFTWARE_NAME) }}"
|
||||
state: reloaded
|
||||
daemon_reload: yes
|
@@ -4,5 +4,5 @@ OnFailure={{ SYS_SERVICE_ON_FAILURE_COMPOSE }} {{ SYS_SERVICE_CLEANUP_BACKUPS_FA
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStartPre=/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(' ') }} --ignore {{ SYS_SERVICE_GROUP_BACKUPS | join(' ') }} --timeout "{{ SYS_TIMEOUT_BACKUP_SERVICES }}"
|
||||
ExecStartPre=/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(' ') }} --ignore {{ SYS_SERVICE_BACKUP_RMT_2_LOC }} --timeout "{{ SYS_TIMEOUT_BACKUP_SERVICES }}"
|
||||
ExecStart={{ system_service_script_exec }}
|
||||
|
@@ -1,77 +0,0 @@
|
||||
def build_ldap_nested_group_entries(applications, users, ldap):
|
||||
"""
|
||||
Builds structured LDAP role entries using the global `ldap` configuration.
|
||||
Supports objectClasses: posixGroup (adds gidNumber, memberUid), groupOfNames (adds member).
|
||||
Now nests roles under an application-level OU: application-id/role.
|
||||
"""
|
||||
|
||||
result = {}
|
||||
|
||||
# Base DN components
|
||||
role_dn_base = ldap["dn"]["ou"]["roles"]
|
||||
user_dn_base = ldap["dn"]["ou"]["users"]
|
||||
ldap_user_attr = ldap["user"]["attributes"]["id"]
|
||||
|
||||
# Supported objectClass flavors
|
||||
flavors = ldap.get("rbac", {}).get("flavors", [])
|
||||
|
||||
for application_id, app_config in applications.items():
|
||||
# Compute the DN for the application-level OU
|
||||
app_ou_dn = f"ou={application_id},{role_dn_base}"
|
||||
|
||||
ou_entry = {
|
||||
"dn": app_ou_dn,
|
||||
"objectClass": ["top", "organizationalUnit"],
|
||||
"ou": application_id,
|
||||
"description": f"Roles for application {application_id}"
|
||||
}
|
||||
result[app_ou_dn] = ou_entry
|
||||
|
||||
# Standard roles with an extra 'administrator'
|
||||
base_roles = app_config.get("rbac", {}).get("roles", {})
|
||||
roles = {
|
||||
**base_roles,
|
||||
"administrator": {
|
||||
"description": "Has full administrative access: manage themes, plugins, settings, and users"
|
||||
}
|
||||
}
|
||||
|
||||
group_id = app_config.get("group_id")
|
||||
|
||||
for role_name, role_conf in roles.items():
|
||||
# Build CN under the application OU
|
||||
cn = role_name
|
||||
dn = f"cn={cn},{app_ou_dn}"
|
||||
|
||||
entry = {
|
||||
"dn": dn,
|
||||
"cn": cn,
|
||||
"description": role_conf.get("description", ""),
|
||||
"objectClass": ["top"] + flavors,
|
||||
}
|
||||
|
||||
member_dns = []
|
||||
member_uids = []
|
||||
for username, user_conf in users.items():
|
||||
if role_name in user_conf.get("roles", []):
|
||||
member_dns.append(f"{ldap_user_attr}={username},{user_dn_base}")
|
||||
member_uids.append(username)
|
||||
|
||||
if "posixGroup" in flavors:
|
||||
entry["gidNumber"] = group_id
|
||||
if member_uids:
|
||||
entry["memberUid"] = member_uids
|
||||
|
||||
if "groupOfNames" in flavors and member_dns:
|
||||
entry["member"] = member_dns
|
||||
|
||||
result[dn] = entry
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
"build_ldap_nested_group_entries": build_ldap_nested_group_entries
|
||||
}
|
@@ -16,10 +16,10 @@ def build_ldap_role_entries(applications, users, ldap):
|
||||
}
|
||||
|
||||
group_id = application_config.get("group_id")
|
||||
user_dn_base = ldap["dn"]["ou"]["users"]
|
||||
ldap_user_attr = ldap["user"]["attributes"]["id"]
|
||||
role_dn_base = ldap["dn"]["ou"]["roles"]
|
||||
flavors = ldap.get("rbac", {}).get("flavors", [])
|
||||
user_dn_base = ldap["DN"]["OU"]["USERS"]
|
||||
ldap_user_attr = ldap["USER"]["ATTRIBUTES"]["ID"]
|
||||
role_dn_base = ldap["DN"]["OU"]["ROLES"]
|
||||
flavors = ldap.get("RBAC").get("FLAVORS")
|
||||
|
||||
for role_name, role_conf in roles.items():
|
||||
group_cn = f"{application_id}-{role_name}"
|
||||
|
@@ -1,55 +0,0 @@
|
||||
- name: Load memberof module from file in OpenLDAP container
|
||||
shell: >
|
||||
docker exec -i {{ openldap_name }} ldapmodify -Y EXTERNAL -H ldapi:/// -f {{openldap_ldif_docker_path}}configuration/01_member_of_configuration.ldif
|
||||
listen:
|
||||
- "Import configuration LDIF files"
|
||||
- "Import all LDIF files"
|
||||
# @todo Remove the following ignore errors when setting up a new server
|
||||
# Just here because debugging would take to much time
|
||||
ignore_errors: true
|
||||
|
||||
- name: Refint Module Activation for OpenLDAP
|
||||
shell: >
|
||||
docker exec -i {{ openldap_name }} ldapadd -Y EXTERNAL -H ldapi:/// -f {{openldap_ldif_docker_path}}configuration/02_member_of_configuration.ldif
|
||||
listen:
|
||||
- "Import configuration LDIF files"
|
||||
- "Import all LDIF files"
|
||||
register: ldapadd_result
|
||||
failed_when: ldapadd_result.rc not in [0, 68]
|
||||
# @todo Remove the following ignore errors when setting up a new server
|
||||
# Just here because debugging would take to much time
|
||||
ignore_errors: true
|
||||
|
||||
- name: "Import schemas"
|
||||
shell: >
|
||||
docker exec -i {{ openldap_name }} ldapadd -Y EXTERNAL -H ldapi:/// -f "{{openldap_ldif_docker_path}}schema/{{ item | basename | regex_replace('\.j2$', '') }}"
|
||||
register: ldapadd_result
|
||||
changed_when: "'adding new entry' in ldapadd_result.stdout"
|
||||
failed_when: ldapadd_result.rc not in [0, 80]
|
||||
listen:
|
||||
- "Import schema LDIF files"
|
||||
- "Import all LDIF files"
|
||||
loop: "{{ lookup('fileglob', role_path ~ '/templates/ldif/schema/*.j2', wantlist=True) }}"
|
||||
|
||||
- name: Refint Overlay Configuration for OpenLDAP
|
||||
shell: >
|
||||
docker exec -i {{ openldap_name }} ldapmodify -Y EXTERNAL -H ldapi:/// -f {{openldap_ldif_docker_path}}configuration/03_member_of_configuration.ldif
|
||||
listen:
|
||||
- "Import configuration LDIF files"
|
||||
- "Import all LDIF files"
|
||||
register: ldapadd_result
|
||||
failed_when: ldapadd_result.rc not in [0, 68]
|
||||
# @todo Remove the following ignore errors when setting up a new server
|
||||
# Just here because debugging would take to much time
|
||||
ignore_errors: true
|
||||
|
||||
- name: "Import users, groups, etc. to LDAP"
|
||||
shell: >
|
||||
docker exec -i {{ openldap_name }} ldapadd -x -D "{{ldap.dn.administrator.data}}" -w "{{ldap.bind_credential}}" -c -f "{{openldap_ldif_docker_path}}groups/{{ item | basename | regex_replace('\.j2$', '') }}"
|
||||
register: ldapadd_result
|
||||
changed_when: "'adding new entry' in ldapadd_result.stdout"
|
||||
failed_when: ldapadd_result.rc not in [0, 20, 68, 65]
|
||||
listen:
|
||||
- "Import groups LDIF files"
|
||||
- "Import all LDIF files"
|
||||
loop: "{{ query('fileglob', role_path ~ '/templates/ldif/groups/*.j2') | sort }}"
|
@@ -28,7 +28,7 @@
|
||||
- name: "Generate hash for Database Admin password"
|
||||
shell: |
|
||||
docker exec {{ openldap_name }} \
|
||||
slappasswd -s "{{ ldap.bind_credential }}"
|
||||
slappasswd -s "{{ LDAP.BIND_CREDENTIAL }}"
|
||||
register: database_admin_pw_hash
|
||||
|
||||
- name: "Reset Database Admin password in LDAP (olcRootPW)"
|
||||
|
@@ -3,11 +3,11 @@
|
||||
###############################################################################
|
||||
- name: Ensure LDAP users exist
|
||||
community.general.ldap_entry:
|
||||
dn: "{{ ldap.user.attributes.id }}={{ item.key }},{{ ldap.dn.ou.users }}"
|
||||
dn: "{{ LDAP.USER.ATTRIBUTES.ID }}={{ item.key }},{{ LDAP.DN.OU.USERS }}"
|
||||
server_uri: "{{ openldap_server_uri }}"
|
||||
bind_dn: "{{ ldap.dn.administrator.data }}"
|
||||
bind_pw: "{{ ldap.bind_credential }}"
|
||||
objectClass: "{{ ldap.user.objects.structural }}"
|
||||
bind_dn: "{{ LDAP.DN.ADMINISTRATOR.DATA }}"
|
||||
bind_pw: "{{ LDAP.BIND_CREDENTIAL }}"
|
||||
objectClass: "{{ LDAP.USER.OBJECTS.STRUCTURAL }}"
|
||||
attributes:
|
||||
uid: "{{ item.value.username }}"
|
||||
sn: "{{ item.value.sn | default(item.key) }}"
|
||||
@@ -29,12 +29,12 @@
|
||||
###############################################################################
|
||||
- name: Ensure required objectClass values and mail address are present
|
||||
community.general.ldap_attrs:
|
||||
dn: "{{ ldap.user.attributes.id }}={{ item.key }},{{ ldap.dn.ou.users }}"
|
||||
dn: "{{ LDAP.USER.ATTRIBUTES.ID }}={{ item.key }},{{ LDAP.DN.OU.USERS }}"
|
||||
server_uri: "{{ openldap_server_uri }}"
|
||||
bind_dn: "{{ ldap.dn.administrator.data }}"
|
||||
bind_pw: "{{ ldap.bind_credential }}"
|
||||
bind_dn: "{{ LDAP.DN.ADMINISTRATOR.DATA }}"
|
||||
bind_pw: "{{ LDAP.BIND_CREDENTIAL }}"
|
||||
attributes:
|
||||
objectClass: "{{ ldap.user.objects.structural }}"
|
||||
objectClass: "{{ LDAP.USER.OBJECTS.STRUCTURAL }}"
|
||||
mail: "{{ item.value.email }}"
|
||||
state: exact
|
||||
async: "{{ ASYNC_TIME if ASYNC_ENABLED | bool else omit }}"
|
||||
@@ -45,10 +45,10 @@
|
||||
|
||||
- name: "Ensure container for application roles exists"
|
||||
community.general.ldap_entry:
|
||||
dn: "{{ ldap.dn.ou.roles }}"
|
||||
dn: "{{ LDAP.DN.OU.ROLES }}"
|
||||
server_uri: "{{ openldap_server_uri }}"
|
||||
bind_dn: "{{ ldap.dn.administrator.data }}"
|
||||
bind_pw: "{{ ldap.bind_credential }}"
|
||||
bind_dn: "{{ LDAP.DN.ADMINISTRATOR.DATA }}"
|
||||
bind_pw: "{{ LDAP.BIND_CREDENTIAL }}"
|
||||
objectClass: organizationalUnit
|
||||
attributes:
|
||||
ou: roles
|
||||
|
@@ -1,22 +1,22 @@
|
||||
- name: Gather all users with their current objectClass list
|
||||
community.general.ldap_search:
|
||||
server_uri: "{{ openldap_server_uri }}"
|
||||
bind_dn: "{{ ldap.dn.administrator.data }}"
|
||||
bind_pw: "{{ ldap.bind_credential }}"
|
||||
dn: "{{ ldap.dn.ou.users }}"
|
||||
bind_dn: "{{ LDAP.DN.ADMINISTRATOR.DATA }}"
|
||||
bind_pw: "{{ LDAP.BIND_CREDENTIAL }}"
|
||||
dn: "{{ LDAP.DN.OU.USERS }}"
|
||||
scope: subordinate
|
||||
filter: "{{ ldap.filters.users.all }}"
|
||||
filter: "{{ LDAP.FILTERS.USERS.ALL }}"
|
||||
attrs:
|
||||
- dn
|
||||
- objectClass
|
||||
- "{{ ldap.user.attributes.id }}"
|
||||
- "{{ LDAP.USER.ATTRIBUTES.ID }}"
|
||||
register: ldap_users_with_classes
|
||||
|
||||
- name: Add only missing auxiliary classes
|
||||
community.general.ldap_attrs:
|
||||
server_uri: "{{ openldap_server_uri }}"
|
||||
bind_dn: "{{ ldap.dn.administrator.data }}"
|
||||
bind_pw: "{{ ldap.bind_credential }}"
|
||||
bind_dn: "{{ LDAP.DN.ADMINISTRATOR.DATA }}"
|
||||
bind_pw: "{{ LDAP.BIND_CREDENTIAL }}"
|
||||
dn: "{{ item.dn }}"
|
||||
attributes:
|
||||
objectClass: "{{ missing_auxiliary }}"
|
||||
@@ -28,7 +28,7 @@
|
||||
label: "{{ item.dn }}"
|
||||
vars:
|
||||
missing_auxiliary: >-
|
||||
{{ (ldap.user.objects.auxiliary.values() | list)
|
||||
{{ (LDAP.USER.OBJECTS.AUXILIARY.values() | list)
|
||||
| difference(item.objectClass | default([]))
|
||||
}}
|
||||
when: missing_auxiliary | length > 0
|
||||
|
@@ -7,7 +7,7 @@
|
||||
- name: Create {{ domains | get_domain(application_id) }}.conf if LDAP is exposed to internet
|
||||
template:
|
||||
src: "nginx.stream.conf.j2"
|
||||
dest: "{{NGINX.DIRECTORIES.STREAMS}}{{ domains | get_domain(application_id) }}.conf"
|
||||
dest: "{{ NGINX.DIRECTORIES.STREAMS }}{{ domains | get_domain(application_id) }}.conf"
|
||||
notify: restart openresty
|
||||
when: applications | get_app_conf(application_id, 'network.public', True) | bool
|
||||
|
||||
@@ -37,7 +37,7 @@
|
||||
- name: "Reset LDAP Credentials"
|
||||
include_tasks: 01_credentials.yml
|
||||
when:
|
||||
- applications | get_app_conf(application_id, 'network.local', True)
|
||||
- applications | get_app_conf(application_id, 'network.local')
|
||||
- applications | get_app_conf(application_id, 'provisioning.credentials', True)
|
||||
|
||||
- name: "create directory {{openldap_ldif_host_path}}{{item}}"
|
||||
|
@@ -8,9 +8,9 @@
|
||||
vars:
|
||||
schema_name: "nextcloud"
|
||||
attribute_defs:
|
||||
- "( 1.3.6.1.4.1.99999.1 NAME '{{ ldap.user.attributes.nextcloud_quota }}' DESC 'Quota for Nextcloud' EQUALITY integerMatch ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )"
|
||||
- "( 1.3.6.1.4.1.99999.1 NAME '{{ LDAP.USER.ATTRIBUTES.NEXTCLOUD_QUOTA }}' DESC 'Quota for Nextcloud' EQUALITY integerMatch ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE )"
|
||||
objectclass_defs:
|
||||
- "( 1.3.6.1.4.1.99999.2 NAME 'nextcloudUser' DESC 'Auxiliary class for Nextcloud attributes' AUXILIARY MAY ( {{ ldap.user.attributes.nextcloud_quota }} ) )"
|
||||
- "( 1.3.6.1.4.1.99999.2 NAME '{{ LDAP.USER.OBJECTS.AUXILIARY.NEXTCLOUD_USER }}' DESC 'Auxiliary class for Nextcloud attributes' AUXILIARY MAY ( {{ LDAP.USER.ATTRIBUTES.NEXTCLOUD_QUOTA }} ) )"
|
||||
command: >
|
||||
ldapsm
|
||||
-s {{ openldap_server_uri }}
|
||||
|
@@ -8,16 +8,16 @@
|
||||
vars:
|
||||
schema_name: "openssh-lpk"
|
||||
attribute_defs:
|
||||
- "( 1.3.6.1.4.1.24552.1.1 NAME '{{ ldap.user.attributes.ssh_public_key }}' DESC 'OpenSSH Public Key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 )"
|
||||
- "( 1.3.6.1.4.1.24552.1.1 NAME '{{ LDAP.USER.ATTRIBUTES.SSH_PUBLIC_KEY }}' DESC 'OpenSSH Public Key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 )"
|
||||
- "( 1.3.6.1.4.1.24552.1.2 NAME 'sshFingerprint' DESC 'OpenSSH Public Key Fingerprint' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 )"
|
||||
objectclass_defs:
|
||||
- >-
|
||||
( 1.3.6.1.4.1.24552.2.1
|
||||
NAME '{{ ldap.user.objects.auxiliary.ssh_public_key }}'
|
||||
NAME '{{ LDAP.USER.OBJECTS.AUXILIARY.SSH_PUBLIC_KEY }}'
|
||||
DESC 'Auxiliary class for OpenSSH public keys'
|
||||
SUP top
|
||||
AUXILIARY
|
||||
MAY ( {{ ldap.user.attributes.ssh_public_key }} $ sshFingerprint ) )
|
||||
MAY ( {{ LDAP.USER.ATTRIBUTES.SSH_PUBLIC_KEY }} $ sshFingerprint ) )
|
||||
|
||||
command: >
|
||||
ldapsm
|
||||
|
@@ -10,12 +10,12 @@
|
||||
{% endif %}
|
||||
volumes:
|
||||
- 'data:/bitnami/openldap'
|
||||
- '{{openldap_ldif_host_path}}:{{openldap_ldif_docker_path}}:ro'
|
||||
- '{{openldap_ldif_host_path}}:{{ openldap_ldif_docker_path }}:ro'
|
||||
healthcheck:
|
||||
test: >
|
||||
bash -c '
|
||||
ldapsearch -x -H ldap://localhost:{{ openldap_docker_port_open }} \
|
||||
-D "{{ ldap.dn.administrator.data }}" -w "{{ ldap.bind_credential }}" -b "{{ ldap.dn.root }}" > /dev/null \
|
||||
-D "{{ LDAP.DN.ADMINISTRATOR.DATA }}" -w "{{ LDAP.BIND_CREDENTIAL }}" -b "{{ LDAP.DN.ROOT }}" > /dev/null \
|
||||
&& ldapsearch -Y EXTERNAL -H ldapi:/// \
|
||||
-b cn=config "(&(objectClass=olcOverlayConfig)(olcOverlay=memberof))" \
|
||||
| grep "olcOverlay:" | grep -q "memberof"
|
||||
|
@@ -4,15 +4,15 @@
|
||||
# GENERAL
|
||||
## Admin (Data)
|
||||
LDAP_ADMIN_USERNAME= {{ applications | get_app_conf(application_id, 'users.administrator.username') }} # LDAP database admin user.
|
||||
LDAP_ADMIN_PASSWORD= {{ldap.bind_credential}} # LDAP database admin password.
|
||||
LDAP_ADMIN_PASSWORD= {{ LDAP.BIND_CREDENTIAL }} # LDAP database admin password.
|
||||
|
||||
## Users
|
||||
LDAP_USERS= ' ' # Comma separated list of LDAP users to create in the default LDAP tree. Default: user01,user02
|
||||
LDAP_PASSWORDS= ' ' # Comma separated list of passwords to use for LDAP users. Default: bitnami1,bitnami2
|
||||
LDAP_ROOT= {{ldap.dn.root}} # LDAP baseDN (or suffix) of the LDAP tree. Default: dc=example,dc=org
|
||||
LDAP_ROOT= {{ LDAP.DN.ROOT }} # LDAP baseDN (or suffix) of the LDAP tree. Default: dc=example,dc=org
|
||||
|
||||
## Admin (Config)
|
||||
LDAP_ADMIN_DN= {{ldap.dn.administrator.data}}
|
||||
LDAP_ADMIN_DN= {{LDAP.DN.ADMINISTRATOR.DATA}}
|
||||
LDAP_CONFIG_ADMIN_ENABLED= yes
|
||||
LDAP_CONFIG_ADMIN_USERNAME= {{ applications | get_app_conf(application_id, 'users.administrator.username') }}
|
||||
LDAP_CONFIG_ADMIN_PASSWORD= {{ applications | get_app_conf(application_id, 'credentials.administrator_password') }}
|
||||
|
@@ -2,5 +2,5 @@ server {
|
||||
listen {{ ports.public.ldaps['svc-db-openldap'] }}ssl;
|
||||
proxy_pass 127.0.0.1:{{ ports.localhost.ldap['svc-db-openldap'] }};
|
||||
|
||||
{% include 'roles/srv-letsencrypt/templates/ssl_credentials.j2' %}
|
||||
{% include 'roles/sys-svc-letsencrypt/templates/ssl_credentials.j2' %}
|
||||
}
|
||||
|
@@ -4,7 +4,7 @@ application_id: "svc-db-openldap"
|
||||
openldap_docker_port_secure: 636
|
||||
openldap_docker_port_open: 389
|
||||
openldap_server_uri: "ldap://127.0.0.1:{{ ports.localhost.ldap[application_id] }}"
|
||||
openldap_bind_dn: "{{ ldap.dn.administrator.configuration }}"
|
||||
openldap_bind_dn: "{{ LDAP.DN.ADMINISTRATOR.CONFIGURATION }}"
|
||||
openldap_bind_pw: "{{ applications | get_app_conf(application_id, 'credentials.administrator_password', True) }}"
|
||||
|
||||
# LDIF Variables
|
||||
@@ -21,4 +21,4 @@ openldap_version: "{{ applications | get_app_conf(application_id,
|
||||
openldap_volume: "{{ applications | get_app_conf(application_id, 'docker.volumes.data', True) }}"
|
||||
openldap_network: "{{ applications | get_app_conf(application_id, 'docker.network', True) }}"
|
||||
|
||||
openldap_network_expose_local: "{{ applications | get_app_conf(application_id, 'network.public', True) | bool or applications | get_app_conf(application_id, 'network.local', True) | bool }}"
|
||||
openldap_network_expose_local: "{{ applications | get_app_conf(application_id, 'network.public', True) | bool or applications | get_app_conf(application_id, 'network.local') | bool }}"
|
@@ -0,0 +1,44 @@
|
||||
import os
|
||||
import yaml
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
def _iter_role_vars_files(roles_dir):
|
||||
if not os.path.isdir(roles_dir):
|
||||
raise AnsibleFilterError(f"roles_dir not found: {roles_dir}")
|
||||
for name in os.listdir(roles_dir):
|
||||
role_path = os.path.join(roles_dir, name)
|
||||
if not os.path.isdir(role_path):
|
||||
continue
|
||||
vars_main = os.path.join(role_path, "vars", "main.yml")
|
||||
if os.path.isfile(vars_main):
|
||||
yield vars_main
|
||||
|
||||
def _is_postgres_role(vars_file):
|
||||
try:
|
||||
with open(vars_file, "r", encoding="utf-8") as f:
|
||||
data = yaml.safe_load(f) or {}
|
||||
# only count roles with explicit database_type: postgres in VARS
|
||||
return str(data.get("database_type", "")).strip().lower() == "postgres"
|
||||
except Exception:
|
||||
# ignore unreadable/broken YAML files quietly
|
||||
return False
|
||||
|
||||
def split_postgres_connections(total_connections, roles_dir="roles"):
|
||||
"""
|
||||
Return an integer average: total_connections / number_of_roles_with_database_type_postgres.
|
||||
Uses max(count, 1) to avoid division-by-zero.
|
||||
"""
|
||||
try:
|
||||
total = int(total_connections)
|
||||
except Exception:
|
||||
raise AnsibleFilterError(f"total_connections must be int-like, got: {total_connections!r}")
|
||||
|
||||
count = sum(1 for vf in _iter_role_vars_files(roles_dir) if _is_postgres_role(vf))
|
||||
denom = max(count, 1)
|
||||
return max(1, total // denom)
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
"split_postgres_connections": split_postgres_connections
|
||||
}
|
@@ -1,3 +1,7 @@
|
||||
- name: Compute average allowed connections per Postgres app (once)
|
||||
set_fact:
|
||||
POSTGRES_ALLOWED_AVG_CONNECTIONS: "{{ (POSTGRES_MAX_CONNECTIONS | split_postgres_connections(playbook_dir ~ '/roles')) | int }}"
|
||||
run_once: true
|
||||
|
||||
- name: Include dependency 'sys-svc-docker'
|
||||
include_role:
|
||||
|
@@ -6,6 +6,20 @@
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
pull_policy: never
|
||||
command:
|
||||
- "postgres"
|
||||
- "-c"
|
||||
- "max_connections={{ POSTGRES_MAX_CONNECTIONS }}"
|
||||
- "-c"
|
||||
- "superuser_reserved_connections={{ POSTGRES_SUPERUSER_RESERVED_CONNECTIONS }}"
|
||||
- "-c"
|
||||
- "shared_buffers={{ POSTGRES_SHARED_BUFFERS }}"
|
||||
- "-c"
|
||||
- "work_mem={{ POSTGRES_WORK_MEM }}"
|
||||
- "-c"
|
||||
- "maintenance_work_mem={{ POSTGRES_MAINTENANCE_WORK_MEM }}"
|
||||
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
{% if POSTGRES_EXPOSE_LOCAL %}
|
||||
ports:
|
||||
|
@@ -8,13 +8,13 @@ docker_compose_flush_handlers: true
|
||||
database_type: "{{ application_id | get_entity_name }}"
|
||||
|
||||
## Postgres
|
||||
POSTGRES_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.data', True) }}"
|
||||
POSTGRES_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.name', True) }}"
|
||||
POSTGRES_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.image', True) }}"
|
||||
POSTGRES_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.data') }}"
|
||||
POSTGRES_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.name') }}"
|
||||
POSTGRES_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.image') }}"
|
||||
POSTGRES_SUBNET: "{{ networks.local['svc-db-postgres'].subnet }}"
|
||||
POSTGRES_NETWORK_NAME: "{{ applications | get_app_conf(application_id, 'docker.network', True) }}"
|
||||
POSTGRES_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.version', True) }}"
|
||||
POSTGRES_PASSWORD: "{{ applications | get_app_conf(application_id, 'credentials.POSTGRES_PASSWORD', True) }}"
|
||||
POSTGRES_NETWORK_NAME: "{{ applications | get_app_conf(application_id, 'docker.network') }}"
|
||||
POSTGRES_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.version') }}"
|
||||
POSTGRES_PASSWORD: "{{ applications | get_app_conf(application_id, 'credentials.POSTGRES_PASSWORD') }}"
|
||||
POSTGRES_PORT: "{{ database_port | default(ports.localhost.database[ application_id ]) }}"
|
||||
POSTGRES_INIT: "{{ database_username is defined and database_password is defined and database_name is defined }}"
|
||||
POSTGRES_EXPOSE_LOCAL: True # Exposes the db to localhost, almost everytime neccessary
|
||||
@@ -22,4 +22,16 @@ POSTGRES_CUSTOM_IMAGE_NAME: "postgres_custom"
|
||||
POSTGRES_LOCAL_HOST: "127.0.0.1"
|
||||
POSTGRES_VECTOR_ENABLED: True # Required by discourse, propably in a later step it makes sense to define this as a configuration option in config/main.yml
|
||||
POSTGRES_RETRIES: 5
|
||||
|
||||
## Performance
|
||||
POSTGRES_TOTAL_RAM_MB: "{{ ansible_memtotal_mb | int }}"
|
||||
POSTGRES_VCPUS: "{{ ansible_processor_vcpus | int }}"
|
||||
POSTGRES_MAX_CONNECTIONS: "{{ [ ((POSTGRES_VCPUS | int) * 30 + 50), 400 ] | min }}"
|
||||
POSTGRES_SUPERUSER_RESERVED_CONNECTIONS: 3
|
||||
POSTGRES_SHARED_BUFFERS_MB: "{{ ((POSTGRES_TOTAL_RAM_MB | int) * 25) // 100 }}"
|
||||
POSTGRES_SHARED_BUFFERS: "{{ POSTGRES_SHARED_BUFFERS_MB ~ 'MB' }}"
|
||||
POSTGRES_WORK_MEM_MB: "{{ [ ( (POSTGRES_TOTAL_RAM_MB | int) // ( [ (POSTGRES_MAX_CONNECTIONS | int), 1 ] | max ) // 2 ), 1 ] | max }}"
|
||||
POSTGRES_WORK_MEM: "{{ POSTGRES_WORK_MEM_MB ~ 'MB' }}"
|
||||
POSTGRES_MAINTENANCE_WORK_MEM_MB: "{{ [ (((POSTGRES_TOTAL_RAM_MB | int) * 5) // 100), 64 ] | max }}"
|
||||
POSTGRES_MAINTENANCE_WORK_MEM: "{{ POSTGRES_MAINTENANCE_WORK_MEM_MB ~ 'MB' }}"
|
||||
POSTGRES_DELAY: 2
|
@@ -18,10 +18,10 @@
|
||||
group: root
|
||||
notify: reload sysctl configuration
|
||||
|
||||
- name: create /etc/wireguard/wg0.{{ SOFTWARE_NAME | lower }}.conf
|
||||
- name: "deploy {{ WG0_CONF_DEST }}"
|
||||
copy:
|
||||
src: "{{ inventory_dir }}/files/{{ inventory_hostname }}/etc/wireguard/wg0.conf"
|
||||
dest: /etc/wireguard/wg0.{{ SOFTWARE_NAME | lower }}.conf
|
||||
src: "{{ [inventory_dir, 'files', inventory_hostname, 'etc/wireguard/wg0.conf' ] | path_join }}"
|
||||
dest: "{{ WG0_CONF_DEST }}"
|
||||
owner: root
|
||||
group: root
|
||||
notify: restart wireguard
|
@@ -1 +1,3 @@
|
||||
application_id: svc-net-wireguard-core
|
||||
|
||||
WG0_CONF_DEST: "/etc/wireguard/wg0.conf"
|
@@ -1,5 +1,5 @@
|
||||
- include_role:
|
||||
name: sys-service
|
||||
vars:
|
||||
system_service_tpl_exec_start_pre: '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(" ") }} --ignore {{ SYS_SERVICE_OPTIMIZE_DRIVE }} {{ SYS_SERVICE_BACKUP_RMT_2_LOC }} --timeout "{{ SYS_TIMEOUT_STORAGE_OPTIMIZER }}"'
|
||||
system_service_tpl_exec_start_pre: '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(" ") }} --ignore {{ SYS_SERVICE_OPTIMIZE_DRIVE }} {{ SYS_SERVICE_BACKUP_RMT_2_LOC }} {{ SYS_SERVICE_GROUP_CLEANUP | join(" ") }} --timeout "{{ SYS_TIMEOUT_STORAGE_OPTIMIZER }}"'
|
||||
system_service_tpl_exec_start: '{{ system_service_script_exec }} --mass-storage-path {{ OPT_DRIVE_MASS_STORAGE_PATH }} --rapid-storage-path {{ OPT_DRIVE_RAPID_STORAGE_PATH }}'
|
@@ -8,4 +8,3 @@ database_type: ""
|
||||
OPENRESTY_IMAGE: "openresty/openresty"
|
||||
OPENRESTY_VERSION: "alpine"
|
||||
OPENRESTY_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.openresty.name', True) }}"
|
||||
|
||||
|
@@ -6,5 +6,7 @@
|
||||
|
||||
- include_role:
|
||||
name: sys-service
|
||||
|
||||
|
||||
vars:
|
||||
# If the email notifier fails, trigger the Telegram notifier,
|
||||
# passing the failing unit's name as the instance text.
|
||||
system_service_tpl_on_failure: "{{ ('sys-ctl-alm-telegram@') | get_service_name(SOFTWARE_NAME, False) }}%n.service"
|
||||
|
@@ -12,6 +12,10 @@
|
||||
|
||||
- include_role:
|
||||
name: sys-service
|
||||
vars:
|
||||
# If the Telegram notifier fails, trigger the Email notifier,
|
||||
# passing the failing unit's name as the instance text.
|
||||
system_service_tpl_on_failure: "{{ ('sys-ctl-alm-email@') | get_service_name(SOFTWARE_NAME, False) }}%n.service"
|
||||
|
||||
- name: install curl
|
||||
community.general.pacman:
|
||||
|
@@ -15,4 +15,4 @@ fi
|
||||
/usr/bin/curl -s -X POST \
|
||||
"https://api.telegram.org/bot{{ telegram_bot_token }}/sendMessage" \
|
||||
-d chat_id="{{ telegram_chat_id }}" \
|
||||
--data-urlencode text="service ${friendly} on ${host} failed"
|
||||
--data-urlencode text="service ${friendly//\//-} on ${host} failed"
|
||||
|
@@ -1,36 +0,0 @@
|
||||
def dict_to_cli_args(data):
|
||||
"""
|
||||
Convert a dictionary into CLI argument string.
|
||||
Example:
|
||||
{
|
||||
"backup-dir": "/mnt/backups",
|
||||
"shutdown": True,
|
||||
"ignore-volumes": ["redis", "memcached"]
|
||||
}
|
||||
becomes:
|
||||
--backup-dir=/mnt/backups --shutdown --ignore-volumes="redis memcached"
|
||||
"""
|
||||
if not isinstance(data, dict):
|
||||
raise TypeError("Expected a dictionary for CLI argument conversion")
|
||||
|
||||
args = []
|
||||
|
||||
for key, value in data.items():
|
||||
cli_key = f"--{key}"
|
||||
|
||||
if isinstance(value, bool):
|
||||
if value:
|
||||
args.append(cli_key)
|
||||
elif isinstance(value, list):
|
||||
items = " ".join(map(str, value))
|
||||
args.append(f'{cli_key}="{items}"')
|
||||
elif value is not None:
|
||||
args.append(f'{cli_key}={value}')
|
||||
|
||||
return " ".join(args)
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'dict_to_cli_args': dict_to_cli_args
|
||||
}
|
@@ -4,5 +4,5 @@ OnFailure={{ SYS_SERVICE_ON_FAILURE_COMPOSE }} {{ SYS_SERVICE_CLEANUP_BACKUPS_FA
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStartPre=/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(' ') }} --ignore {{ SYS_SERVICE_GROUP_BACKUPS | reject('equalto', role_name ~ '-everything') | join(' ') }} --timeout "{{ SYS_TIMEOUT_BACKUP_SERVICES }}"
|
||||
ExecStartPre=/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(' ') }} --ignore {{ SYS_SERVICE_BACKUP_DOCKER_2_LOC }} --timeout "{{ SYS_TIMEOUT_BACKUP_SERVICES }}"
|
||||
ExecStart=/bin/sh -c '{{ BKP_DOCKER_2_LOC_EXEC }}'
|
||||
|
@@ -12,13 +12,13 @@ BKP_DOCKER_2_LOC_DB_ENABLED: "{{ database_type | default('') | bool }}"
|
||||
|
||||
# Gather mapped values as lists
|
||||
BKP_DOCKER_2_LOC_DB_ROUTINE: >-
|
||||
{{ applications | find_dock_val_by_bkp_entr('database_routine', 'name') | list }}
|
||||
{{ applications | find_dock_val_by_bkp_entr('database_routine', 'name') | list | sort }}
|
||||
|
||||
BKP_DOCKER_2_LOC_NO_STOP_REQUIRED: >-
|
||||
{{ applications | find_dock_val_by_bkp_entr('no_stop_required', 'image') | list }}
|
||||
{{ applications | find_dock_val_by_bkp_entr('no_stop_required', 'image') | list | sort }}
|
||||
|
||||
BKP_DOCKER_2_LOC_DISABLED: >-
|
||||
{{ applications | find_dock_val_by_bkp_entr('disabled', 'image') | list }}
|
||||
{{ applications | find_dock_val_by_bkp_entr('disabled', 'image') | list | sort }}
|
||||
|
||||
# CLI argument strings (only set if list not empty)
|
||||
BKP_DOCKER_2_LOC_DB_ROUTINE_CLI: >-
|
||||
|
18
roles/sys-ctl-cln-anon-volumes/tasks/main.yml
Normal file
18
roles/sys-ctl-cln-anon-volumes/tasks/main.yml
Normal file
@@ -0,0 +1,18 @@
|
||||
- block:
|
||||
- name: "pkgmgr install"
|
||||
include_role:
|
||||
name: pkgmgr-install
|
||||
vars:
|
||||
package_name: dockreap
|
||||
|
||||
- include_role:
|
||||
name: sys-service
|
||||
vars:
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_tpl_exec_start: dockreap --no-confirmation
|
||||
system_service_tpl_exec_start_pre: "" # Anonymous volumes can allways be removed. It isn't necessary to wait for any service to stop.
|
||||
system_service_copy_files: false
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
when:
|
||||
- run_once_sys_ctl_cln_anon_volumes is not defined
|
1
roles/sys-ctl-cln-anon-volumes/vars/main.yml
Normal file
1
roles/sys-ctl-cln-anon-volumes/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
system_service_id: sys-ctl-cln-anon-volumes
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user