mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-09-08 19:27:18 +02:00
Compare commits
101 Commits
983287a84a
...
master
Author | SHA1 | Date | |
---|---|---|---|
445c94788e | |||
aac9704e8b | |||
a57a5f8828 | |||
90843726de | |||
d25da76117 | |||
d48a1b3c0a | |||
2839d2e1a4 | |||
00c99e58e9 | |||
904040589e | |||
9f3d300bca | |||
9e253a2d09 | |||
49120b0dcf | |||
b6f91ab9d3 | |||
77e8e7ed7e | |||
32bc17e0c3 | |||
e294637cb6 | |||
577767bed6 | |||
e77f8da510 | |||
4738b263ec | |||
0a588023a7 | |||
d2fa90774b | |||
0e72dcbe36 | |||
4f8ce598a9 | |||
3769e66d8d | |||
33a5fadf67 | |||
699a6b6f1e | |||
61c29eee60 | |||
d5204fb5c2 | |||
751615b1a4 | |||
e2993d2912 | |||
24b6647bfb | |||
d2dc2eab5f | |||
a1130e33d7 | |||
df122905eb | |||
d093a22d61 | |||
5e550ce3a3 | |||
0ada12e3ca | |||
1a5ce4a7fa | |||
a9abb3ce5d | |||
71ceb339fc | |||
61bba3d2ef | |||
0bde4295c7 | |||
8059f272d5 | |||
7c814e6e83 | |||
d760c042c2 | |||
6cac8085a8 | |||
3a83f3d14e | |||
61d852c508 | |||
188b098503 | |||
bc56940e55 | |||
5dfc2efb5a | |||
7f9dc65b37 | |||
163a925096 | |||
a8c88634b5 | |||
ce3fe1cd51 | |||
7ca8b7c71d | |||
110381e80c | |||
b02d88adc0 | |||
b7065837df | |||
c98a2378c4 | |||
4ae3cee36c | |||
b834f0c95c | |||
9f734dff17 | |||
6fa4d00547 | |||
7254667186 | |||
aaedaab3da | |||
7791bd8c04 | |||
34b3f3b0ad | |||
94fe58b5da | |||
9feb766e6f | |||
231fd567b3 | |||
3f8e7c1733 | |||
3bfab9ef8e | |||
f1870c07be | |||
d0cec9a7d4 | |||
1dbd714a56 | |||
3a17b2979e | |||
bb0530c2ac | |||
aa2eb53776 | |||
5f66c1a622 | |||
b3dfb8bf22 | |||
db642c1c39 | |||
2fccebbd1f | |||
c23fbd8ec4 | |||
2999d9af77 | |||
2809ffb9f0 | |||
cb12114ce8 | |||
ba99e558f7 | |||
2aed0f97d2 | |||
f36c7831b1 | |||
009bee531b | |||
4c7bb6d9db | |||
092869b29a | |||
f4ea6c6c0f | |||
3ed84717a7 | |||
1cfc2b7e23 | |||
01b9648650 | |||
65d3b3040d | |||
28f7ac5aba | |||
19926b0c57 | |||
3a79d9d630 |
@@ -11,7 +11,7 @@ sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')
|
||||
from module_utils.entity_name_utils import get_entity_name
|
||||
|
||||
# Paths to the group-vars files
|
||||
PORTS_FILE = './group_vars/all/09_ports.yml'
|
||||
PORTS_FILE = './group_vars/all/10_ports.yml'
|
||||
NETWORKS_FILE = './group_vars/all/09_networks.yml'
|
||||
ROLE_TEMPLATE_DIR = './templates/roles/web-app'
|
||||
ROLES_DIR = './roles'
|
||||
|
@@ -228,7 +228,7 @@ def parse_meta_dependencies(role_dir: str) -> List[str]:
|
||||
def sanitize_run_once_var(role_name: str) -> str:
|
||||
"""
|
||||
Generate run_once variable name from role name.
|
||||
Example: 'sys-srv-web-inj-logout' -> 'run_once_sys_srv_web_inj_logout'
|
||||
Example: 'sys-front-inj-logout' -> 'run_once_sys_front_inj_logout'
|
||||
"""
|
||||
return "run_once_" + role_name.replace("-", "_")
|
||||
|
||||
|
@@ -15,7 +15,7 @@ Follow these guides to install and configure Infinito.Nexus:
|
||||
- **Networking & VPN** - Configure `WireGuard`, `OpenVPN`, and `Nginx Reverse Proxy`.
|
||||
|
||||
## Managing & Updating Infinito.Nexus 🔄
|
||||
- Regularly update services using `update-docker`, `update-pacman`, or `update-apt`.
|
||||
- Regularly update services using `update-pacman`, or `update-apt`.
|
||||
- Monitor system health with `sys-ctl-hlth-btrfs`, `sys-ctl-hlth-webserver`, and `sys-ctl-hlth-docker-container`.
|
||||
- Automate system maintenance with `sys-lock`, `sys-ctl-cln-bkps`, and `sys-ctl-rpr-docker-hard`.
|
||||
|
@@ -1,86 +0,0 @@
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {'alias_domains_map': self.alias_domains_map}
|
||||
|
||||
def alias_domains_map(self, apps, PRIMARY_DOMAIN):
|
||||
"""
|
||||
Build a map of application IDs to their alias domains.
|
||||
|
||||
- If no `domains` key → []
|
||||
- If `domains` exists but is an empty dict → return the original cfg
|
||||
- Explicit `aliases` are used (default appended if missing)
|
||||
- If only `canonical` defined and it doesn't include default, default is added
|
||||
- Invalid types raise AnsibleFilterError
|
||||
"""
|
||||
def parse_entry(domains_cfg, key, app_id):
|
||||
if key not in domains_cfg:
|
||||
return None
|
||||
entry = domains_cfg[key]
|
||||
if isinstance(entry, dict):
|
||||
values = list(entry.values())
|
||||
elif isinstance(entry, list):
|
||||
values = entry
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Unexpected type for 'domains.{key}' in application '{app_id}': {type(entry).__name__}"
|
||||
)
|
||||
for d in values:
|
||||
if not isinstance(d, str) or not d.strip():
|
||||
raise AnsibleFilterError(
|
||||
f"Invalid domain entry in '{key}' for application '{app_id}': {d!r}"
|
||||
)
|
||||
return values
|
||||
|
||||
def default_domain(app_id, primary):
|
||||
return f"{app_id}.{primary}"
|
||||
|
||||
# 1) Precompute canonical domains per app (fallback to default)
|
||||
canonical_map = {}
|
||||
for app_id, cfg in apps.items():
|
||||
domains_cfg = cfg.get('server',{}).get('domains',{})
|
||||
entry = domains_cfg.get('canonical')
|
||||
if entry is None:
|
||||
canonical_map[app_id] = [default_domain(app_id, PRIMARY_DOMAIN)]
|
||||
elif isinstance(entry, dict):
|
||||
canonical_map[app_id] = list(entry.values())
|
||||
elif isinstance(entry, list):
|
||||
canonical_map[app_id] = list(entry)
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Unexpected type for 'server.domains.canonical' in application '{app_id}': {type(entry).__name__}"
|
||||
)
|
||||
|
||||
# 2) Build alias list per app
|
||||
result = {}
|
||||
for app_id, cfg in apps.items():
|
||||
domains_cfg = cfg.get('server',{}).get('domains')
|
||||
|
||||
# no domains key → no aliases
|
||||
if domains_cfg is None:
|
||||
result[app_id] = []
|
||||
continue
|
||||
|
||||
# empty domains dict → return the original cfg
|
||||
if isinstance(domains_cfg, dict) and not domains_cfg:
|
||||
result[app_id] = cfg
|
||||
continue
|
||||
|
||||
# otherwise, compute aliases
|
||||
aliases = parse_entry(domains_cfg, 'aliases', app_id) or []
|
||||
default = default_domain(app_id, PRIMARY_DOMAIN)
|
||||
has_aliases = 'aliases' in domains_cfg
|
||||
has_canon = 'canonical' in domains_cfg
|
||||
|
||||
if has_aliases:
|
||||
if default not in aliases:
|
||||
aliases.append(default)
|
||||
elif has_canon:
|
||||
canon = canonical_map.get(app_id, [])
|
||||
if default not in canon and default not in aliases:
|
||||
aliases.append(default)
|
||||
|
||||
result[app_id] = aliases
|
||||
|
||||
return result
|
@@ -1,10 +1,14 @@
|
||||
from ansible.errors import AnsibleFilterError
|
||||
import hashlib
|
||||
import base64
|
||||
import sys, os
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Ensure module_utils is importable when this filter runs from Ansible
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
from module_utils.config_utils import get_app_conf
|
||||
from module_utils.get_url import get_url
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
"""
|
||||
@@ -16,10 +20,14 @@ class FilterModule(object):
|
||||
'build_csp_header': self.build_csp_header,
|
||||
}
|
||||
|
||||
# -------------------------------
|
||||
# Helpers
|
||||
# -------------------------------
|
||||
|
||||
@staticmethod
|
||||
def is_feature_enabled(applications: dict, feature: str, application_id: str) -> bool:
|
||||
"""
|
||||
Return True if applications[application_id].features[feature] is truthy.
|
||||
Returns True if applications[application_id].features[feature] is truthy.
|
||||
"""
|
||||
return get_app_conf(
|
||||
applications,
|
||||
@@ -31,6 +39,10 @@ class FilterModule(object):
|
||||
|
||||
@staticmethod
|
||||
def get_csp_whitelist(applications, application_id, directive):
|
||||
"""
|
||||
Returns a list of additional whitelist entries for a given directive.
|
||||
Accepts both scalar and list in config; always returns a list.
|
||||
"""
|
||||
wl = get_app_conf(
|
||||
applications,
|
||||
application_id,
|
||||
@@ -47,28 +59,37 @@ class FilterModule(object):
|
||||
@staticmethod
|
||||
def get_csp_flags(applications, application_id, directive):
|
||||
"""
|
||||
Dynamically extract all CSP flags for a given directive and return them as tokens,
|
||||
e.g., "'unsafe-eval'", "'unsafe-inline'", etc.
|
||||
Returns CSP flag tokens (e.g., "'unsafe-eval'", "'unsafe-inline'") for a directive,
|
||||
merging sane defaults with app config.
|
||||
Default: 'unsafe-inline' is enabled for style-src and style-src-elem.
|
||||
"""
|
||||
flags = get_app_conf(
|
||||
# Defaults that apply to all apps
|
||||
default_flags = {}
|
||||
if directive in ('style-src', 'style-src-elem'):
|
||||
default_flags = {'unsafe-inline': True}
|
||||
|
||||
configured = get_app_conf(
|
||||
applications,
|
||||
application_id,
|
||||
'server.csp.flags.' + directive,
|
||||
False,
|
||||
{}
|
||||
)
|
||||
tokens = []
|
||||
|
||||
for flag_name, enabled in flags.items():
|
||||
# Merge defaults with configured flags (configured overrides defaults)
|
||||
merged = {**default_flags, **configured}
|
||||
|
||||
tokens = []
|
||||
for flag_name, enabled in merged.items():
|
||||
if enabled:
|
||||
tokens.append(f"'{flag_name}'")
|
||||
|
||||
return tokens
|
||||
|
||||
@staticmethod
|
||||
def get_csp_inline_content(applications, application_id, directive):
|
||||
"""
|
||||
Return inline script/style snippets to hash for a given CSP directive.
|
||||
Returns inline script/style snippets to hash for a given directive.
|
||||
Accepts both scalar and list in config; always returns a list.
|
||||
"""
|
||||
snippets = get_app_conf(
|
||||
applications,
|
||||
@@ -86,7 +107,7 @@ class FilterModule(object):
|
||||
@staticmethod
|
||||
def get_csp_hash(content):
|
||||
"""
|
||||
Compute the SHA256 hash of the given inline content and return
|
||||
Computes the SHA256 hash of the given inline content and returns
|
||||
a CSP token like "'sha256-<base64>'".
|
||||
"""
|
||||
try:
|
||||
@@ -96,6 +117,10 @@ class FilterModule(object):
|
||||
except Exception as exc:
|
||||
raise AnsibleFilterError(f"get_csp_hash failed: {exc}")
|
||||
|
||||
# -------------------------------
|
||||
# Main builder
|
||||
# -------------------------------
|
||||
|
||||
def build_csp_header(
|
||||
self,
|
||||
applications,
|
||||
@@ -105,80 +130,80 @@ class FilterModule(object):
|
||||
matomo_feature_name='matomo'
|
||||
):
|
||||
"""
|
||||
Build the Content-Security-Policy header value dynamically based on application settings.
|
||||
Inline hashes are read from applications[application_id].csp.hashes
|
||||
Builds the Content-Security-Policy header value dynamically based on application settings.
|
||||
- Flags (e.g., 'unsafe-eval', 'unsafe-inline') are read from server.csp.flags.<directive>,
|
||||
with sane defaults applied in get_csp_flags (always 'unsafe-inline' for style-src and style-src-elem).
|
||||
- Inline hashes are read from server.csp.hashes.<directive>.
|
||||
- Whitelists are read from server.csp.whitelist.<directive>.
|
||||
- Inline hashes are added only if the final tokens do NOT include 'unsafe-inline'.
|
||||
"""
|
||||
try:
|
||||
directives = [
|
||||
'default-src',
|
||||
'connect-src',
|
||||
'frame-ancestors',
|
||||
'frame-src',
|
||||
'script-src',
|
||||
'script-src-elem',
|
||||
'style-src',
|
||||
'font-src',
|
||||
'worker-src',
|
||||
'manifest-src',
|
||||
'media-src',
|
||||
'default-src', # Fallback source list for content types not explicitly listed
|
||||
'connect-src', # Allowed URLs for XHR, WebSockets, EventSource, fetch()
|
||||
'frame-ancestors', # Who may embed this page
|
||||
'frame-src', # Sources for nested browsing contexts (e.g., <iframe>)
|
||||
'script-src', # Sources for script execution
|
||||
'script-src-elem', # Sources for <script> elements
|
||||
'style-src', # Sources for inline styles and <style>/<link> elements
|
||||
'style-src-elem', # Sources for <style> and <link rel="stylesheet">
|
||||
'font-src', # Sources for fonts
|
||||
'worker-src', # Sources for workers
|
||||
'manifest-src', # Sources for web app manifests
|
||||
'media-src', # Sources for audio and video
|
||||
]
|
||||
|
||||
parts = []
|
||||
|
||||
for directive in directives:
|
||||
tokens = ["'self'"]
|
||||
|
||||
# unsafe-eval / unsafe-inline flags
|
||||
# 1) Load flags (includes defaults from get_csp_flags)
|
||||
flags = self.get_csp_flags(applications, application_id, directive)
|
||||
tokens += flags
|
||||
|
||||
|
||||
if directive in ['script-src-elem', 'connect-src']:
|
||||
# Matomo integration
|
||||
if self.is_feature_enabled(applications, matomo_feature_name, application_id):
|
||||
matomo_domain = domains.get('web-app-matomo')[0]
|
||||
if matomo_domain:
|
||||
tokens.append(f"{web_protocol}://{matomo_domain}")
|
||||
|
||||
# Allow the loading of js from the cdn
|
||||
if self.is_feature_enabled(applications, 'logout', application_id) or self.is_feature_enabled(applications, 'desktop', application_id):
|
||||
domain = domains.get('web-svc-cdn')[0]
|
||||
tokens.append(f"{web_protocol}://{domain}")
|
||||
# 2) Allow fetching from internal CDN by default for selected directives
|
||||
if directive in ['script-src-elem', 'connect-src', 'style-src-elem']:
|
||||
tokens.append(get_url(domains, 'web-svc-cdn', web_protocol))
|
||||
|
||||
# ReCaptcha integration: allow loading scripts from Google if feature enabled
|
||||
# 3) Matomo integration if feature is enabled
|
||||
if directive in ['script-src-elem', 'connect-src']:
|
||||
if self.is_feature_enabled(applications, matomo_feature_name, application_id):
|
||||
tokens.append(get_url(domains, 'web-app-matomo', web_protocol))
|
||||
|
||||
# 4) ReCaptcha integration (scripts + frames) if feature is enabled
|
||||
if self.is_feature_enabled(applications, 'recaptcha', application_id):
|
||||
if directive in ['script-src-elem',"frame-src"]:
|
||||
if directive in ['script-src-elem', 'frame-src']:
|
||||
tokens.append('https://www.gstatic.com')
|
||||
tokens.append('https://www.google.com')
|
||||
|
||||
# 5) Frame ancestors handling (desktop + logout support)
|
||||
if directive == 'frame-ancestors':
|
||||
# Enable loading via ancestors
|
||||
if self.is_feature_enabled(applications, 'desktop', application_id):
|
||||
# Allow being embedded by the desktop app domain (and potentially its parent)
|
||||
domain = domains.get('web-app-desktop')[0]
|
||||
sld_tld = ".".join(domain.split(".")[-2:]) # yields "example.com"
|
||||
tokens.append(f"{sld_tld}") # yields "*.example.com"
|
||||
|
||||
sld_tld = ".".join(domain.split(".")[-2:]) # e.g., example.com
|
||||
tokens.append(f"{sld_tld}")
|
||||
if self.is_feature_enabled(applications, 'logout', application_id):
|
||||
|
||||
# Allow logout via infinito logout proxy
|
||||
domain = domains.get('web-svc-logout')[0]
|
||||
tokens.append(f"{web_protocol}://{domain}")
|
||||
|
||||
# Allow logout via keycloak app
|
||||
domain = domains.get('web-app-keycloak')[0]
|
||||
tokens.append(f"{web_protocol}://{domain}")
|
||||
|
||||
# whitelist
|
||||
# Allow embedding via logout proxy and Keycloak app
|
||||
tokens.append(get_url(domains, 'web-svc-logout', web_protocol))
|
||||
tokens.append(get_url(domains, 'web-app-keycloak', web_protocol))
|
||||
|
||||
# 6) Custom whitelist entries
|
||||
tokens += self.get_csp_whitelist(applications, application_id, directive)
|
||||
|
||||
# only add hashes if 'unsafe-inline' is NOT in flags
|
||||
if "'unsafe-inline'" not in flags:
|
||||
# 7) Add inline content hashes ONLY if final tokens do NOT include 'unsafe-inline'
|
||||
# (Check tokens, not flags, to include defaults and later modifications.)
|
||||
if "'unsafe-inline'" not in tokens:
|
||||
for snippet in self.get_csp_inline_content(applications, application_id, directive):
|
||||
tokens.append(self.get_csp_hash(snippet))
|
||||
|
||||
# Append directive
|
||||
parts.append(f"{directive} {' '.join(tokens)};")
|
||||
|
||||
# static img-src
|
||||
# 8) Static img-src directive (kept permissive for data/blob and any host)
|
||||
parts.append("img-src * data: blob:;")
|
||||
|
||||
return ' '.join(parts)
|
||||
|
||||
except Exception as exc:
|
||||
|
@@ -1,49 +0,0 @@
|
||||
import os
|
||||
import re
|
||||
import yaml
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
|
||||
def get_application_id(role_name):
|
||||
"""
|
||||
Jinja2/Ansible filter: given a role name, load its vars/main.yml and return the application_id value.
|
||||
"""
|
||||
# Construct path: assumes current working directory is project root
|
||||
vars_file = os.path.join(os.getcwd(), 'roles', role_name, 'vars', 'main.yml')
|
||||
|
||||
if not os.path.isfile(vars_file):
|
||||
raise AnsibleFilterError(f"Vars file not found for role '{role_name}': {vars_file}")
|
||||
|
||||
try:
|
||||
# Read entire file content to avoid lazy stream issues
|
||||
with open(vars_file, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
data = yaml.safe_load(content)
|
||||
except Exception as e:
|
||||
raise AnsibleFilterError(f"Error reading YAML from {vars_file}: {e}")
|
||||
|
||||
# Ensure parsed data is a mapping
|
||||
if not isinstance(data, dict):
|
||||
raise AnsibleFilterError(
|
||||
f"Error reading YAML from {vars_file}: expected mapping, got {type(data).__name__}"
|
||||
)
|
||||
|
||||
# Detect malformed YAML: no valid identifier-like keys
|
||||
valid_key_pattern = re.compile(r'^[A-Za-z_][A-Za-z0-9_]*$')
|
||||
if data and not any(valid_key_pattern.match(k) for k in data.keys()):
|
||||
raise AnsibleFilterError(f"Error reading YAML from {vars_file}: invalid top-level keys")
|
||||
|
||||
if 'application_id' not in data:
|
||||
raise AnsibleFilterError(f"Key 'application_id' not found in {vars_file}")
|
||||
|
||||
return data['application_id']
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
"""
|
||||
Ansible filter plugin entry point.
|
||||
"""
|
||||
def filters(self):
|
||||
return {
|
||||
'get_application_id': get_application_id,
|
||||
}
|
@@ -1,122 +0,0 @@
|
||||
import os
|
||||
import yaml
|
||||
import re
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
# in-memory cache: application_id → (parsed_yaml, is_nested)
|
||||
_cfg_cache = {}
|
||||
|
||||
def load_configuration(application_id, key):
|
||||
if not isinstance(key, str):
|
||||
raise AnsibleFilterError("Key must be a dotted-string, e.g. 'features.matomo'")
|
||||
|
||||
# locate roles/
|
||||
here = os.path.dirname(__file__)
|
||||
root = os.path.abspath(os.path.join(here, '..'))
|
||||
roles_dir = os.path.join(root, 'roles')
|
||||
if not os.path.isdir(roles_dir):
|
||||
raise AnsibleFilterError(f"Roles directory not found at {roles_dir}")
|
||||
|
||||
# first time? load & cache
|
||||
if application_id not in _cfg_cache:
|
||||
config_path = None
|
||||
|
||||
# 1) primary: vars/main.yml declares it
|
||||
for role in os.listdir(roles_dir):
|
||||
mv = os.path.join(roles_dir, role, 'vars', 'main.yml')
|
||||
if os.path.exists(mv):
|
||||
try:
|
||||
md = yaml.safe_load(open(mv)) or {}
|
||||
except Exception:
|
||||
md = {}
|
||||
if md.get('application_id') == application_id:
|
||||
cf = os.path.join(roles_dir, role, "config" , "main.yml")
|
||||
if not os.path.exists(cf):
|
||||
raise AnsibleFilterError(
|
||||
f"Role '{role}' declares '{application_id}' but missing config/main.yml"
|
||||
)
|
||||
config_path = cf
|
||||
break
|
||||
|
||||
# 2) fallback nested
|
||||
if config_path is None:
|
||||
for role in os.listdir(roles_dir):
|
||||
cf = os.path.join(roles_dir, role, "config" , "main.yml")
|
||||
if not os.path.exists(cf):
|
||||
continue
|
||||
try:
|
||||
dd = yaml.safe_load(open(cf)) or {}
|
||||
except Exception:
|
||||
dd = {}
|
||||
if isinstance(dd, dict) and application_id in dd:
|
||||
config_path = cf
|
||||
break
|
||||
|
||||
# 3) fallback flat
|
||||
if config_path is None:
|
||||
for role in os.listdir(roles_dir):
|
||||
cf = os.path.join(roles_dir, role, "config" , "main.yml")
|
||||
if not os.path.exists(cf):
|
||||
continue
|
||||
try:
|
||||
dd = yaml.safe_load(open(cf)) or {}
|
||||
except Exception:
|
||||
dd = {}
|
||||
# flat style: dict with all non-dict values
|
||||
if isinstance(dd, dict) and not any(isinstance(v, dict) for v in dd.values()):
|
||||
config_path = cf
|
||||
break
|
||||
|
||||
if config_path is None:
|
||||
return None
|
||||
|
||||
# parse once
|
||||
try:
|
||||
parsed = yaml.safe_load(open(config_path)) or {}
|
||||
except Exception as e:
|
||||
raise AnsibleFilterError(f"Error loading config/main.yml at {config_path}: {e}")
|
||||
|
||||
# detect nested vs flat
|
||||
is_nested = isinstance(parsed, dict) and (application_id in parsed)
|
||||
_cfg_cache[application_id] = (parsed, is_nested)
|
||||
|
||||
parsed, is_nested = _cfg_cache[application_id]
|
||||
|
||||
# pick base entry
|
||||
entry = parsed[application_id] if is_nested else parsed
|
||||
|
||||
# resolve dotted key
|
||||
key_parts = key.split('.')
|
||||
for part in key_parts:
|
||||
# Check if part has an index (e.g., domains.canonical[0])
|
||||
match = re.match(r'([^\[]+)\[([0-9]+)\]', part)
|
||||
if match:
|
||||
part, index = match.groups()
|
||||
index = int(index)
|
||||
if isinstance(entry, dict) and part in entry:
|
||||
entry = entry[part]
|
||||
# Check if entry is a list and access the index
|
||||
if isinstance(entry, list) and 0 <= index < len(entry):
|
||||
entry = entry[index]
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Index '{index}' out of range for key '{part}' in application '{application_id}'"
|
||||
)
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Key '{part}' not found under application '{application_id}'"
|
||||
)
|
||||
else:
|
||||
if isinstance(entry, dict) and part in entry:
|
||||
entry = entry[part]
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Key '{part}' not found under application '{application_id}'"
|
||||
)
|
||||
|
||||
return entry
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {'load_configuration': load_configuration}
|
@@ -1,55 +0,0 @@
|
||||
from jinja2 import Undefined
|
||||
|
||||
|
||||
def safe_placeholders(template: str, mapping: dict = None) -> str:
|
||||
"""
|
||||
Format a template like "{url}/logo.png".
|
||||
If mapping is provided (not None) and ANY placeholder is missing or maps to None/empty string, the function will raise KeyError.
|
||||
If mapping is None, missing placeholders or invalid templates return empty string.
|
||||
Numerical zero or False are considered valid values.
|
||||
Any other formatting errors return an empty string.
|
||||
"""
|
||||
# Non-string templates yield empty
|
||||
if not isinstance(template, str):
|
||||
return ''
|
||||
|
||||
class SafeDict(dict):
|
||||
def __getitem__(self, key):
|
||||
val = super().get(key, None)
|
||||
# Treat None or empty string as missing
|
||||
if val is None or (isinstance(val, str) and val == ''):
|
||||
raise KeyError(key)
|
||||
return val
|
||||
def __missing__(self, key):
|
||||
raise KeyError(key)
|
||||
|
||||
silent = mapping is None
|
||||
data = mapping or {}
|
||||
try:
|
||||
return template.format_map(SafeDict(data))
|
||||
except KeyError:
|
||||
if silent:
|
||||
return ''
|
||||
raise
|
||||
except Exception:
|
||||
return ''
|
||||
|
||||
def safe_var(value):
|
||||
"""
|
||||
Ansible filter: returns the value unchanged unless it's Undefined or None,
|
||||
in which case returns an empty string.
|
||||
Catches all exceptions and yields ''.
|
||||
"""
|
||||
try:
|
||||
if isinstance(value, Undefined) or value is None:
|
||||
return ''
|
||||
return value
|
||||
except Exception:
|
||||
return ''
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'safe_var': safe_var,
|
||||
'safe_placeholders': safe_placeholders,
|
||||
}
|
@@ -1,28 +0,0 @@
|
||||
"""
|
||||
Ansible filter plugin that joins a base string and a tail path safely.
|
||||
If the base is falsy (None, empty, etc.), returns an empty string.
|
||||
"""
|
||||
|
||||
def safe_join(base, tail):
|
||||
"""
|
||||
Safely join base and tail into a path or URL.
|
||||
|
||||
- base: the base string. If falsy, returns ''.
|
||||
- tail: the string to append. Leading/trailing slashes are handled.
|
||||
- On any exception, returns ''.
|
||||
"""
|
||||
try:
|
||||
if not base:
|
||||
return ''
|
||||
base_str = str(base).rstrip('/')
|
||||
tail_str = str(tail).lstrip('/')
|
||||
return f"{base_str}/{tail_str}"
|
||||
except Exception:
|
||||
return ''
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'safe_join': safe_join,
|
||||
}
|
67
filter_plugins/timeout_start_sec_for_domains.py
Normal file
67
filter_plugins/timeout_start_sec_for_domains.py
Normal file
@@ -0,0 +1,67 @@
|
||||
# filter_plugins/timeout_start_sec_for_domains.py (nur Kern geändert)
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
"timeout_start_sec_for_domains": self.timeout_start_sec_for_domains,
|
||||
}
|
||||
|
||||
def timeout_start_sec_for_domains(
|
||||
self,
|
||||
domains_dict,
|
||||
include_www=True,
|
||||
per_domain_seconds=25,
|
||||
overhead_seconds=30,
|
||||
min_seconds=120,
|
||||
max_seconds=3600,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
domains_dict (dict | list[str] | str): Either the domain mapping dict
|
||||
(values can be str | list[str] | dict[str,str]) or an already
|
||||
flattened list of domains, or a single domain string.
|
||||
include_www (bool): If true, add 'www.<domain>' for non-www entries.
|
||||
...
|
||||
"""
|
||||
try:
|
||||
# Local flattener for dict inputs (like your generate_all_domains source)
|
||||
def _flatten_from_dict(domains_map):
|
||||
flat = []
|
||||
for v in (domains_map or {}).values():
|
||||
if isinstance(v, str):
|
||||
flat.append(v)
|
||||
elif isinstance(v, list):
|
||||
flat.extend(v)
|
||||
elif isinstance(v, dict):
|
||||
flat.extend(v.values())
|
||||
return flat
|
||||
|
||||
# Accept dict | list | str
|
||||
if isinstance(domains_dict, dict):
|
||||
flat = _flatten_from_dict(domains_dict)
|
||||
elif isinstance(domains_dict, list):
|
||||
flat = list(domains_dict)
|
||||
elif isinstance(domains_dict, str):
|
||||
flat = [domains_dict]
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
"Expected 'domains_dict' to be dict | list | str."
|
||||
)
|
||||
|
||||
if include_www:
|
||||
base_unique = sorted(set(flat))
|
||||
www_variants = [f"www.{d}" for d in base_unique if not str(d).lower().startswith("www.")]
|
||||
flat.extend(www_variants)
|
||||
|
||||
unique_domains = sorted(set(flat))
|
||||
count = len(unique_domains)
|
||||
|
||||
raw = overhead_seconds + per_domain_seconds * count
|
||||
clamped = max(min_seconds, min(max_seconds, int(raw)))
|
||||
return clamped
|
||||
|
||||
except AnsibleFilterError:
|
||||
raise
|
||||
except Exception as exc:
|
||||
raise AnsibleFilterError(f"timeout_start_sec_for_domains failed: {exc}")
|
146
filter_plugins/url_join.py
Normal file
146
filter_plugins/url_join.py
Normal file
@@ -0,0 +1,146 @@
|
||||
"""
|
||||
Ansible filter plugin that safely joins URL components from a list.
|
||||
- Requires a valid '<scheme>://' in the first element (any RFC-3986-ish scheme)
|
||||
- Preserves the double slash after the scheme, collapses other duplicate slashes
|
||||
- Supports query parts introduced by elements starting with '?' or '&'
|
||||
* first query element uses '?', subsequent use '&' (regardless of given prefix)
|
||||
* each query element must be exactly one 'key=value' pair
|
||||
* query elements may only appear after path elements; once query starts, no more path parts
|
||||
- Raises specific AnsibleFilterError messages for common misuse
|
||||
"""
|
||||
|
||||
import re
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
_SCHEME_RE = re.compile(r'^([a-zA-Z][a-zA-Z0-9+.\-]*://)(.*)$')
|
||||
_QUERY_PAIR_RE = re.compile(r'^[^&=?#]+=[^&?#]*$') # key=value (no '&', no extra '?' or '#')
|
||||
|
||||
def _to_str_or_error(obj, index):
|
||||
"""Cast to str, raising a specific AnsibleFilterError with index context."""
|
||||
try:
|
||||
return str(obj)
|
||||
except Exception as e:
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: unable to convert part at index {index} to string: {e}"
|
||||
)
|
||||
|
||||
def url_join(parts):
|
||||
"""
|
||||
Join a list of URL parts, URL-aware (scheme, path, query).
|
||||
|
||||
Args:
|
||||
parts (list|tuple): URL segments. First element MUST include '<scheme>://'.
|
||||
Path elements are plain strings.
|
||||
Query elements must start with '?' or '&' and contain exactly one 'key=value'.
|
||||
|
||||
Returns:
|
||||
str: Joined URL.
|
||||
|
||||
Raises:
|
||||
AnsibleFilterError: with specific, descriptive messages.
|
||||
"""
|
||||
# --- basic input validation ---
|
||||
if parts is None:
|
||||
raise AnsibleFilterError("url_join: parts must be a non-empty list; got None")
|
||||
if not isinstance(parts, (list, tuple)):
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: parts must be a list/tuple; got {type(parts).__name__}"
|
||||
)
|
||||
if len(parts) == 0:
|
||||
raise AnsibleFilterError("url_join: parts must be a non-empty list")
|
||||
|
||||
# --- first element must carry a scheme ---
|
||||
first_raw = parts[0]
|
||||
if first_raw is None:
|
||||
raise AnsibleFilterError(
|
||||
"url_join: first element must include a scheme like 'https://'; got None"
|
||||
)
|
||||
|
||||
first_str = _to_str_or_error(first_raw, 0)
|
||||
m = _SCHEME_RE.match(first_str)
|
||||
if not m:
|
||||
raise AnsibleFilterError(
|
||||
"url_join: first element must start with '<scheme>://', e.g. 'https://example.com'; "
|
||||
f"got '{first_str}'"
|
||||
)
|
||||
|
||||
scheme = m.group(1) # e.g., 'https://', 'ftp://', 'myapp+v1://'
|
||||
after_scheme = m.group(2).lstrip('/') # strip only leading slashes right after scheme
|
||||
|
||||
# --- iterate parts: collect path parts until first query part; then only query parts allowed ---
|
||||
path_parts = []
|
||||
query_pairs = []
|
||||
in_query = False
|
||||
|
||||
for i, p in enumerate(parts):
|
||||
if p is None:
|
||||
# skip None silently (consistent with path_join-ish behavior)
|
||||
continue
|
||||
|
||||
s = _to_str_or_error(p, i)
|
||||
|
||||
# disallow additional scheme in later parts
|
||||
if i > 0 and "://" in s:
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: only the first element may contain a scheme; part at index {i} "
|
||||
f"looks like a URL with scheme ('{s}')."
|
||||
)
|
||||
|
||||
# first element: replace with remainder after scheme and continue
|
||||
if i == 0:
|
||||
s = after_scheme
|
||||
|
||||
# check if this is a query element (starts with ? or &)
|
||||
if s.startswith('?') or s.startswith('&'):
|
||||
in_query = True
|
||||
raw_pair = s[1:] # strip the leading ? or &
|
||||
if raw_pair == '':
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: query element at index {i} is empty; expected '?key=value' or '&key=value'"
|
||||
)
|
||||
# Disallow multiple pairs in a single element; enforce exactly one key=value
|
||||
if '&' in raw_pair:
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: query element at index {i} must contain exactly one 'key=value' pair "
|
||||
f"without '&'; got '{s}'"
|
||||
)
|
||||
if not _QUERY_PAIR_RE.match(raw_pair):
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: query element at index {i} must match 'key=value' (no extra '?', '&', '#'); got '{s}'"
|
||||
)
|
||||
query_pairs.append(raw_pair)
|
||||
else:
|
||||
# non-query element
|
||||
if in_query:
|
||||
# once query started, no more path parts allowed
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: path element found at index {i} after query parameters started; "
|
||||
f"query parts must come last"
|
||||
)
|
||||
# normal path part: strip slashes to avoid duplicate '/'
|
||||
path_parts.append(s.strip('/'))
|
||||
|
||||
# normalize path: remove empty chunks
|
||||
path_parts = [p for p in path_parts if p != '']
|
||||
|
||||
# --- build result ---
|
||||
# path portion
|
||||
if path_parts:
|
||||
joined_path = "/".join(path_parts)
|
||||
base = scheme + joined_path
|
||||
else:
|
||||
# no path beyond scheme
|
||||
base = scheme
|
||||
|
||||
# query portion
|
||||
if query_pairs:
|
||||
base = base + "?" + "&".join(query_pairs)
|
||||
|
||||
return base
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'url_join': url_join,
|
||||
}
|
21
filter_plugins/volume_path.py
Normal file
21
filter_plugins/volume_path.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
def docker_volume_path(volume_name: str) -> str:
|
||||
"""
|
||||
Returns the absolute filesystem path of a Docker volume.
|
||||
|
||||
Example:
|
||||
"akaunting_data" -> "/var/lib/docker/volumes/akaunting_data/_data/"
|
||||
"""
|
||||
if not volume_name or not isinstance(volume_name, str):
|
||||
raise AnsibleFilterError(f"Invalid volume name: {volume_name}")
|
||||
|
||||
return f"/var/lib/docker/volumes/{volume_name}/_data/"
|
||||
|
||||
class FilterModule(object):
|
||||
"""Docker volume path filters."""
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
"docker_volume_path": docker_volume_path,
|
||||
}
|
@@ -12,7 +12,6 @@ SYS_SERVICE_BACKUP_RMT_2_LOC: "{{ 'svc-bkp-rmt-2-loc' | get_se
|
||||
SYS_SERVICE_BACKUP_DOCKER_2_LOC: "{{ 'sys-ctl-bkp-docker-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_REPAIR_DOCKER_SOFT: "{{ 'sys-ctl-rpr-docker-soft' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_REPAIR_DOCKER_HARD: "{{ 'sys-ctl-rpr-docker-hard' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_UPDATE_DOCKER: "{{ 'update-docker' | get_service_name(SOFTWARE_NAME) }}"
|
||||
|
||||
## On Failure
|
||||
SYS_SERVICE_ON_FAILURE_COMPOSE: "{{ ('sys-ctl-alm-compose@') | get_service_name(SOFTWARE_NAME, False) }}%n.service"
|
||||
@@ -46,8 +45,7 @@ SYS_SERVICE_GROUP_MANIPULATION: >
|
||||
SYS_SERVICE_GROUP_CLEANUP +
|
||||
SYS_SERVICE_GROUP_REPAIR +
|
||||
SYS_SERVICE_GROUP_OPTIMIZATION +
|
||||
SYS_SERVICE_GROUP_MAINTANANCE +
|
||||
[ SYS_SERVICE_UPDATE_DOCKER ]
|
||||
SYS_SERVICE_GROUP_MAINTANANCE
|
||||
) | sort
|
||||
}}
|
||||
|
||||
|
@@ -2,20 +2,20 @@
|
||||
# Service Timers
|
||||
|
||||
## Meta
|
||||
SYS_TIMER_ALL_ENABLED: "{{ MODE_DEBUG }}" # Runtime Variables for Process Control - Activates all timers, independend if the handlers had been triggered
|
||||
SYS_TIMER_ALL_ENABLED: "{{ MODE_DEBUG }}" # Runtime Variables for Process Control - Activates all timers, independend if the handlers had been triggered
|
||||
|
||||
## Server Tact Variables
|
||||
|
||||
HOURS_SERVER_AWAKE: "0..23" # Ours in which the server is "awake" (100% working). Rest of the time is reserved for maintanance
|
||||
RANDOMIZED_DELAY_SEC: "5min" # Random delay for systemd timers to avoid peak loads.
|
||||
HOURS_SERVER_AWAKE: "0..23" # Ours in which the server is "awake" (100% working). Rest of the time is reserved for maintanance
|
||||
RANDOMIZED_DELAY_SEC: "5min" # Random delay for systemd timers to avoid peak loads.
|
||||
|
||||
## Timeouts for all services
|
||||
SYS_TIMEOUT_CLEANUP_SERVICES: "15min"
|
||||
SYS_TIMEOUT_STORAGE_OPTIMIZER: "10min"
|
||||
SYS_TIMEOUT_BACKUP_SERVICES: "1h"
|
||||
SYS_TIMEOUT_HEAL_DOCKER: "30min"
|
||||
SYS_TIMEOUT_UPDATE_DOCKER: "2min"
|
||||
SYS_TIMEOUT_RESTART_DOCKER: "{{ SYS_TIMEOUT_UPDATE_DOCKER }}"
|
||||
SYS_TIMEOUT_DOCKER_RPR_HARD: "10min"
|
||||
SYS_TIMEOUT_DOCKER_RPR_SOFT: "{{ SYS_TIMEOUT_DOCKER_RPR_HARD }}"
|
||||
SYS_TIMEOUT_CLEANUP_SERVICES: "15min"
|
||||
SYS_TIMEOUT_DOCKER_UPDATE: "20min"
|
||||
SYS_TIMEOUT_STORAGE_OPTIMIZER: "{{ SYS_TIMEOUT_DOCKER_UPDATE }}"
|
||||
SYS_TIMEOUT_BACKUP_SERVICES: "60min"
|
||||
|
||||
## On Calendar
|
||||
|
||||
@@ -37,7 +37,6 @@ SYS_SCHEDULE_CLEANUP_FAILED_BACKUPS: "*-*-* 12:00:00"
|
||||
|
||||
### Schedule for repair services
|
||||
SYS_SCHEDULE_REPAIR_BTRFS_AUTO_BALANCER: "Sat *-*-01..07 00:00:00" # Execute btrfs auto balancer every first Saturday of a month
|
||||
SYS_SCHEDULE_REPAIR_DOCKER_SOFT: "*-*-* {{ HOURS_SERVER_AWAKE }}:30:00" # Heal unhealthy docker instances once per hour
|
||||
SYS_SCHEDULE_REPAIR_DOCKER_HARD: "Sun *-*-* 08:00:00" # Restart docker instances every Sunday at 8:00 AM
|
||||
|
||||
### Schedule for backup tasks
|
@@ -10,7 +10,7 @@ defaults_networks:
|
||||
# /28 Networks, 14 Usable Ip Addresses
|
||||
web-app-akaunting:
|
||||
subnet: 192.168.101.0/28
|
||||
web-app-attendize:
|
||||
web-app-confluence:
|
||||
subnet: 192.168.101.16/28
|
||||
web-app-baserow:
|
||||
subnet: 192.168.101.32/28
|
||||
@@ -34,8 +34,8 @@ defaults_networks:
|
||||
subnet: 192.168.101.176/28
|
||||
web-app-listmonk:
|
||||
subnet: 192.168.101.192/28
|
||||
# Free:
|
||||
# subnet: 192.168.101.208/28
|
||||
web-app-jira:
|
||||
subnet: 192.168.101.208/28
|
||||
web-app-matomo:
|
||||
subnet: 192.168.101.224/28
|
||||
web-app-mastodon:
|
||||
@@ -48,8 +48,8 @@ defaults_networks:
|
||||
subnet: 192.168.102.16/28
|
||||
web-app-moodle:
|
||||
subnet: 192.168.102.32/28
|
||||
# Free:
|
||||
# subnet: 192.168.102.48/28
|
||||
web-app-bookwyrm:
|
||||
subnet: 192.168.102.48/28
|
||||
web-app-nextcloud:
|
||||
subnet: 192.168.102.64/28
|
||||
web-app-openproject:
|
||||
@@ -96,6 +96,12 @@ defaults_networks:
|
||||
subnet: 192.168.103.160/28
|
||||
web-svc-logout:
|
||||
subnet: 192.168.103.176/28
|
||||
web-app-chess:
|
||||
subnet: 192.168.103.192/28
|
||||
web-app-magento:
|
||||
subnet: 192.168.103.208/28
|
||||
web-app-bridgy-fed:
|
||||
subnet: 192.168.103.224/28
|
||||
|
||||
# /24 Networks / 254 Usable Clients
|
||||
web-app-bigbluebutton:
|
||||
|
@@ -2,12 +2,12 @@ ports:
|
||||
# Ports which are exposed to localhost
|
||||
localhost:
|
||||
database:
|
||||
svc-db-postgres: 5432
|
||||
svc-db-mariadb: 3306
|
||||
svc-db-postgres: 5432
|
||||
svc-db-mariadb: 3306
|
||||
# https://developer.mozilla.org/de/docs/Web/API/WebSockets_API
|
||||
websocket:
|
||||
web-app-mastodon: 4001
|
||||
web-app-espocrm: 4002
|
||||
web-app-mastodon: 4001
|
||||
web-app-espocrm: 4002
|
||||
oauth2_proxy:
|
||||
web-app-phpmyadmin: 4181
|
||||
web-app-lam: 4182
|
||||
@@ -26,7 +26,7 @@ ports:
|
||||
web-app-gitea: 8002
|
||||
web-app-wordpress: 8003
|
||||
web-app-mediawiki: 8004
|
||||
# Free: 8005
|
||||
web-app-confluence: 8005
|
||||
web-app-yourls: 8006
|
||||
web-app-mailu: 8007
|
||||
web-app-elk: 8008
|
||||
@@ -36,7 +36,7 @@ ports:
|
||||
web-app-funkwhale: 8012
|
||||
web-app-roulette-wheel: 8013
|
||||
web-app-joomla: 8014
|
||||
web-app-attendize: 8015
|
||||
web-app-jira: 8015
|
||||
web-app-pgadmin: 8016
|
||||
web-app-baserow: 8017
|
||||
web-app-matomo: 8018
|
||||
@@ -70,6 +70,11 @@ ports:
|
||||
web-app-pretix: 8046
|
||||
web-app-mig: 8047
|
||||
web-svc-logout: 8048
|
||||
web-app-bookwyrm: 8049
|
||||
web-app-chess: 8050
|
||||
web-app-bluesky_view: 8051
|
||||
web-app-magento: 8052
|
||||
web-app-bridgy-fed: 8053
|
||||
web-app-bigbluebutton: 48087 # This port is predefined by bbb. @todo Try to change this to a 8XXX port
|
||||
public:
|
||||
# The following ports should be changed to 22 on the subdomain via stream mapping
|
||||
@@ -80,9 +85,10 @@ ports:
|
||||
svc-db-openldap: 636
|
||||
stun:
|
||||
web-app-bigbluebutton: 3478 # Not sure if it's right placed here or if it should be moved to localhost section
|
||||
web-app-nextcloud: 3479
|
||||
# Occupied by BBB: 3479
|
||||
web-app-nextcloud: 3480
|
||||
turn:
|
||||
web-app-bigbluebutton: 5349 # Not sure if it's right placed here or if it should be moved to localhost section
|
||||
web-app-nextcloud: 5350 # Not used yet
|
||||
web-app-nextcloud: 5350 # Not used yet
|
||||
federation:
|
||||
web-app-matrix_synapse: 8448
|
@@ -25,7 +25,7 @@ defaults_oidc:
|
||||
URL: "{{ _oidc_url }}"
|
||||
CLIENT:
|
||||
ID: "{{ _oidc_client_id }}" # Client identifier, typically matching your primary domain
|
||||
# secret: # Client secret for authenticating with the OIDC provider (set in the inventory file). Recommend greater then 32 characters
|
||||
# SECRET: # Client secret for authenticating with the OIDC provider (set in the inventory file). Recommend greater then 32 characters
|
||||
REALM: "{{ _oidc_client_realm }}" # The realm to which the client belongs in the OIDC provider
|
||||
ISSUER_URL: "{{ _oidc_client_issuer_url }}" # Base URL of the OIDC provider (issuer)
|
||||
DISCOVERY_DOCUMENT: "{{ _oidc_client_issuer_url ~ '/.well-known/openid-configuration' }}" # URL for fetching the provider's configuration details
|
||||
|
53
lookup_plugins/local_mtime_qs.py
Normal file
53
lookup_plugins/local_mtime_qs.py
Normal file
@@ -0,0 +1,53 @@
|
||||
from __future__ import annotations
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.errors import AnsibleError
|
||||
import os
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
"""
|
||||
Return a cache-busting string based on the LOCAL file's mtime.
|
||||
|
||||
Usage (single path → string via Jinja):
|
||||
{{ lookup('local_mtime_qs', '/path/to/file.css') }}
|
||||
-> "?version=1712323456"
|
||||
|
||||
Options:
|
||||
param (str): query parameter name (default: "version")
|
||||
mode (str): "qs" (default) → returns "?<param>=<mtime>"
|
||||
"epoch" → returns "<mtime>"
|
||||
|
||||
Multiple paths (returns list, one result per term):
|
||||
{{ lookup('local_mtime_qs', '/a.js', '/b.js', param='v') }}
|
||||
"""
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
if not terms:
|
||||
return []
|
||||
|
||||
param = kwargs.get('param', 'version')
|
||||
mode = kwargs.get('mode', 'qs')
|
||||
|
||||
if mode not in ('qs', 'epoch'):
|
||||
raise AnsibleError("local_mtime_qs: 'mode' must be 'qs' or 'epoch'")
|
||||
|
||||
results = []
|
||||
for term in terms:
|
||||
path = os.path.abspath(os.path.expanduser(str(term)))
|
||||
|
||||
# Fail fast if path is missing or not a regular file
|
||||
if not os.path.exists(path):
|
||||
raise AnsibleError(f"local_mtime_qs: file does not exist: {path}")
|
||||
if not os.path.isfile(path):
|
||||
raise AnsibleError(f"local_mtime_qs: not a regular file: {path}")
|
||||
|
||||
try:
|
||||
mtime = int(os.stat(path).st_mtime)
|
||||
except OSError as e:
|
||||
raise AnsibleError(f"local_mtime_qs: cannot stat '{path}': {e}")
|
||||
|
||||
if mode == 'qs':
|
||||
results.append(f"?{param}={mtime}")
|
||||
else: # mode == 'epoch'
|
||||
results.append(str(mtime))
|
||||
|
||||
return results
|
@@ -56,6 +56,16 @@ roles:
|
||||
description: "Stack levels to setup the server"
|
||||
icon: "fas fa-bars-staggered"
|
||||
invokable: false
|
||||
front:
|
||||
title: "System Frontend Helpers"
|
||||
description: "Frontend helpers for reverse-proxied apps (injection, shared assets, CDN plumbing)."
|
||||
icon: "fas fa-wand-magic-sparkles"
|
||||
invokable: false
|
||||
inj:
|
||||
title: "Injection"
|
||||
description: "Composable HTML injection roles (CSS, JS, logout interceptor, analytics, desktop iframe) for Nginx/OpenResty via sub_filter/Lua with CDN-backed assets."
|
||||
icon: "fas fa-filter"
|
||||
invokable: false
|
||||
update:
|
||||
title: "Updates & Package Management"
|
||||
description: "OS & package updates"
|
||||
@@ -101,21 +111,6 @@ roles:
|
||||
description: "Developer-centric server utilities and admin toolkits."
|
||||
icon: "fas fa-code"
|
||||
invokable: false
|
||||
srv:
|
||||
title: "Server"
|
||||
description: "General server roles for provisioning and managing server infrastructure—covering web servers, proxy servers, network services, and other backend components."
|
||||
icon: "fas fa-server"
|
||||
invokable: false
|
||||
web:
|
||||
title: "Webserver"
|
||||
description: "Web-server roles for installing and configuring Nginx (core, TLS, injection filters, composer modules)."
|
||||
icon: "fas fa-server"
|
||||
invokable: false
|
||||
proxy:
|
||||
title: "Proxy Server"
|
||||
description: "Proxy-server roles for virtual-host orchestration and reverse-proxy setups."
|
||||
icon: "fas fa-project-diagram"
|
||||
invokable: false
|
||||
web:
|
||||
title: "Web Infrastructure"
|
||||
description: "Roles for managing web infrastructure—covering static content services and deployable web applications."
|
||||
|
@@ -19,3 +19,5 @@
|
||||
template:
|
||||
src: caffeine.desktop.j2
|
||||
dest: "{{auto_start_directory}}caffeine.desktop"
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
|
@@ -1,4 +1,3 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_desk_gnome_caffeine is not defined
|
||||
|
@@ -48,4 +48,6 @@
|
||||
state: present
|
||||
create: yes
|
||||
mode: "0644"
|
||||
become: false
|
||||
become: false
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
@@ -1,4 +1,3 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_desk_ssh is not defined
|
@@ -1,4 +0,0 @@
|
||||
---
|
||||
- name: reload virtualbox kernel modules
|
||||
become: true
|
||||
command: vboxreload
|
@@ -1,8 +1,14 @@
|
||||
---
|
||||
- name: Setup locale.gen
|
||||
template: src=locale.gen dest=/etc/locale.gen
|
||||
template:
|
||||
src: locale.gen.j2
|
||||
dest: /etc/locale.gen
|
||||
|
||||
- name: Setup locale.conf
|
||||
template: src=locale.conf dest=/etc/locale.conf
|
||||
template:
|
||||
src: locale.conf.j2
|
||||
dest: /etc/locale.conf
|
||||
|
||||
- name: Generate locales
|
||||
shell: locale-gen
|
||||
become: true
|
||||
|
@@ -1,2 +0,0 @@
|
||||
LANG=en_US.UTF-8
|
||||
LANGUAGE=en_US.UTF-8
|
2
roles/dev-locales/templates/locale.conf.j2
Normal file
2
roles/dev-locales/templates/locale.conf.j2
Normal file
@@ -0,0 +1,2 @@
|
||||
LANG={{ HOST_LL_CC }}.UTF-8
|
||||
LANGUAGE={{ HOST_LL_CC }}.UTF-8
|
@@ -20,7 +20,7 @@ To offer a centralized, extensible system for managing containerized application
|
||||
- **Reset Logic:** Cleans previous Compose project files and data when `MODE_RESET` is enabled.
|
||||
- **Handlers for Runtime Control:** Automatically builds, sets up, or restarts containers based on handlers.
|
||||
- **Template-ready Service Files:** Predefined service base and health check templates.
|
||||
- **Integration Support:** Compatible with `srv-proxy-core` and other Infinito.Nexus service roles.
|
||||
- **Integration Support:** Compatible with `sys-svc-proxy` and other Infinito.Nexus service roles.
|
||||
|
||||
## Administration Tips
|
||||
|
||||
|
@@ -15,10 +15,17 @@
|
||||
- name: docker compose pull
|
||||
shell: |
|
||||
set -euo pipefail
|
||||
lock="{{ [ PATH_DOCKER_COMPOSE_PULL_LOCK_DIR | docker_compose.directories.instance ] | path_join | hash('sha1') }}"
|
||||
lock="{{ [ PATH_DOCKER_COMPOSE_PULL_LOCK_DIR, (docker_compose.directories.instance | hash('sha1')) ~ '.lock' ] | path_join }}"
|
||||
if [ ! -e "$lock" ]; then
|
||||
mkdir -p "$(dirname "$lock")"
|
||||
docker compose pull
|
||||
if docker compose config | grep -qE '^[[:space:]]+build:'; then
|
||||
docker compose build --pull
|
||||
fi
|
||||
if docker compose pull --help 2>/dev/null | grep -q -- '--ignore-buildable'; then
|
||||
docker compose pull --ignore-buildable
|
||||
else
|
||||
docker compose pull || true
|
||||
fi
|
||||
: > "$lock"
|
||||
echo "pulled"
|
||||
fi
|
||||
|
@@ -5,7 +5,9 @@
|
||||
loop:
|
||||
- "{{ application_id | abs_role_path_by_application_id }}/templates/Dockerfile.j2"
|
||||
- "{{ application_id | abs_role_path_by_application_id }}/files/Dockerfile"
|
||||
notify: docker compose up
|
||||
notify:
|
||||
- docker compose up
|
||||
- docker compose build
|
||||
register: create_dockerfile_result
|
||||
failed_when:
|
||||
- create_dockerfile_result is failed
|
||||
|
@@ -3,6 +3,10 @@
|
||||
- "CMD"
|
||||
- "curl"
|
||||
- "-f"
|
||||
{% if container_hostname is defined %}
|
||||
- "-H"
|
||||
- "Host: {{ container_hostname }}"
|
||||
{% endif %}
|
||||
- "http://127.0.0.1{{ (":" ~ container_port) if container_port is defined else '' }}/{{ container_healthcheck | default('') }}"
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
|
7
roles/docker-container/templates/healthcheck/nc.yml.j2
Normal file
7
roles/docker-container/templates/healthcheck/nc.yml.j2
Normal file
@@ -0,0 +1,7 @@
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "nc -z localhost {{ container_port }} || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 3s
|
||||
retries: 3
|
||||
start_period: 10s
|
||||
{{ "\n" }}
|
@@ -16,29 +16,23 @@
|
||||
|
||||
- name: Create installation directory for Kevin's Package Manager
|
||||
file:
|
||||
path: "{{ pkgmgr_install_path }}"
|
||||
path: "{{ PKGMGR_INSTALL_PATH }}"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
become: true
|
||||
|
||||
- name: Clone Kevin's Package Manager repository
|
||||
git:
|
||||
repo: "{{ pkgmgr_repo_url }}"
|
||||
dest: "{{ pkgmgr_install_path }}"
|
||||
repo: "{{ PKGMGR_REPO_URL }}"
|
||||
dest: "{{ PKGMGR_INSTALL_PATH }}"
|
||||
version: "HEAD"
|
||||
force: yes
|
||||
become: true
|
||||
|
||||
- name: Ensure main.py is executable
|
||||
file:
|
||||
path: "{{ pkgmgr_install_path }}/main.py"
|
||||
mode: '0755'
|
||||
become: true
|
||||
|
||||
- name: create config.yaml
|
||||
template:
|
||||
src: config.yaml.j2
|
||||
dest: "{{ pkgmgr_config_path }}"
|
||||
dest: "{{ PKGMGR_CONFIG_PATH }}"
|
||||
become: true
|
||||
|
||||
- name: Run the Package Manager install command to create an alias for Kevins package manager
|
||||
@@ -46,6 +40,10 @@
|
||||
source ~/.venvs/pkgmgr/bin/activate
|
||||
make setup
|
||||
args:
|
||||
chdir: "{{ pkgmgr_install_path }}"
|
||||
chdir: "{{ PKGMGR_INSTALL_PATH }}"
|
||||
executable: /bin/bash
|
||||
become: true
|
||||
|
||||
- name: "Update all repositories with pkgmgr"
|
||||
command: "pkgmgr pull --all"
|
||||
when: MODE_UPDATE | bool
|
@@ -1,3 +1,3 @@
|
||||
directories:
|
||||
repositories: "{{repositories_directory}}"
|
||||
binaries: "{{binaries_directory}}"
|
||||
repositories: "{{ PKGMGR_REPOSITORIES_DIR }}"
|
||||
binaries: "{{ PKGMGR_BINARIES_DIR }}"
|
@@ -2,16 +2,16 @@
|
||||
# Variables for Kevin's Package Manager installation
|
||||
|
||||
# The Git repository URL for Kevin's Package Manager
|
||||
pkgmgr_repo_url: "https://github.com/kevinveenbirkenbach/package-manager.git"
|
||||
|
||||
# Directory which contains all Repositories managed by Kevin's Package Manager
|
||||
repositories_directory: "/opt/Repositories/"
|
||||
|
||||
# The directory where the repository will be cloned
|
||||
pkgmgr_install_path: "{{repositories_directory}}github.com/kevinveenbirkenbach/package-manager"
|
||||
|
||||
# File containing the configuration
|
||||
pkgmgr_config_path: "{{pkgmgr_install_path}}/config/config.yaml"
|
||||
PKGMGR_REPO_URL: "https://github.com/kevinveenbirkenbach/package-manager.git"
|
||||
|
||||
# The directory where executable aliases will be installed (ensure it's in your PATH)
|
||||
binaries_directory: "/usr/local/bin"
|
||||
PKGMGR_BINARIES_DIR: "/usr/local/bin"
|
||||
|
||||
# Directory which contains all Repositories managed by Kevin's Package Manager
|
||||
PKGMGR_REPOSITORIES_DIR: "/opt/Repositories/"
|
||||
|
||||
# The directory where the repository will be cloned
|
||||
PKGMGR_INSTALL_PATH: "{{ [ PKGMGR_REPOSITORIES_DIR, 'github.com/kevinveenbirkenbach/package-manager' ] | path_join }}"
|
||||
|
||||
# File containing the configuration
|
||||
PKGMGR_CONFIG_PATH: "{{ [ PKGMGR_INSTALL_PATH, 'config/config.yaml' ] | path_join }}"
|
||||
|
@@ -1,9 +0,0 @@
|
||||
# run_once_srv_composer: deactivated
|
||||
|
||||
- name: "include role sys-srv-web-inj-compose for '{{ domain }}'"
|
||||
include_role:
|
||||
name: sys-srv-web-inj-compose
|
||||
|
||||
- name: "include role srv-tls-core for '{{ domain }}'"
|
||||
include_role:
|
||||
name: srv-tls-core
|
@@ -1,4 +0,0 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_srv_letsencrypt is not defined
|
@@ -1,6 +0,0 @@
|
||||
|
||||
- name: "reload svc-bkp-loc-2-usb service"
|
||||
systemd:
|
||||
name: "{{ 'svc-bkp-loc-2-usb' | get_service_name(SOFTWARE_NAME) }}"
|
||||
state: reloaded
|
||||
daemon_reload: yes
|
@@ -1,77 +0,0 @@
|
||||
def build_ldap_nested_group_entries(applications, users, ldap):
|
||||
"""
|
||||
Builds structured LDAP role entries using the global `ldap` configuration.
|
||||
Supports objectClasses: posixGroup (adds gidNumber, memberUid), groupOfNames (adds member).
|
||||
Now nests roles under an application-level OU: application-id/role.
|
||||
"""
|
||||
|
||||
result = {}
|
||||
|
||||
# Base DN components
|
||||
role_dn_base = ldap["DN"]["OU"]["ROLES"]
|
||||
user_dn_base = ldap["DN"]["OU"]["USERS"]
|
||||
ldap_user_attr = ldap["USER"]["ATTRIBUTES"]["ID"]
|
||||
|
||||
# Supported objectClass flavors
|
||||
flavors = ldap.get("RBAC").get("FLAVORS")
|
||||
|
||||
for application_id, app_config in applications.items():
|
||||
# Compute the DN for the application-level OU
|
||||
app_ou_dn = f"ou={application_id},{role_dn_base}"
|
||||
|
||||
ou_entry = {
|
||||
"dn": app_ou_dn,
|
||||
"objectClass": ["top", "organizationalUnit"],
|
||||
"ou": application_id,
|
||||
"description": f"Roles for application {application_id}"
|
||||
}
|
||||
result[app_ou_dn] = ou_entry
|
||||
|
||||
# Standard roles with an extra 'administrator'
|
||||
base_roles = app_config.get("rbac", {}).get("roles", {})
|
||||
roles = {
|
||||
**base_roles,
|
||||
"administrator": {
|
||||
"description": "Has full administrative access: manage themes, plugins, settings, and users"
|
||||
}
|
||||
}
|
||||
|
||||
group_id = app_config.get("group_id")
|
||||
|
||||
for role_name, role_conf in roles.items():
|
||||
# Build CN under the application OU
|
||||
cn = role_name
|
||||
dn = f"cn={cn},{app_ou_dn}"
|
||||
|
||||
entry = {
|
||||
"dn": dn,
|
||||
"cn": cn,
|
||||
"description": role_conf.get("description", ""),
|
||||
"objectClass": ["top"] + flavors,
|
||||
}
|
||||
|
||||
member_dns = []
|
||||
member_uids = []
|
||||
for username, user_conf in users.items():
|
||||
if role_name in user_conf.get("roles", []):
|
||||
member_dns.append(f"{ldap_user_attr}={username},{user_dn_base}")
|
||||
member_uids.append(username)
|
||||
|
||||
if "posixGroup" in flavors:
|
||||
entry["gidNumber"] = group_id
|
||||
if member_uids:
|
||||
entry["memberUid"] = member_uids
|
||||
|
||||
if "groupOfNames" in flavors and member_dns:
|
||||
entry["member"] = member_dns
|
||||
|
||||
result[dn] = entry
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
"build_ldap_nested_group_entries": build_ldap_nested_group_entries
|
||||
}
|
@@ -1,55 +0,0 @@
|
||||
- name: Load memberof module from file in OpenLDAP container
|
||||
shell: >
|
||||
docker exec -i {{ openldap_name }} ldapmodify -Y EXTERNAL -H ldapi:/// -f {{ openldap_ldif_docker_path }}configuration/01_member_of_configuration.ldif
|
||||
listen:
|
||||
- "Import configuration LDIF files"
|
||||
- "Import all LDIF files"
|
||||
# @todo Remove the following ignore errors when setting up a new server
|
||||
# Just here because debugging would take to much time
|
||||
ignore_errors: true
|
||||
|
||||
- name: Refint Module Activation for OpenLDAP
|
||||
shell: >
|
||||
docker exec -i {{ openldap_name }} ldapadd -Y EXTERNAL -H ldapi:/// -f {{ openldap_ldif_docker_path }}configuration/02_member_of_configuration.ldif
|
||||
listen:
|
||||
- "Import configuration LDIF files"
|
||||
- "Import all LDIF files"
|
||||
register: ldapadd_result
|
||||
failed_when: ldapadd_result.rc not in [0, 68]
|
||||
# @todo Remove the following ignore errors when setting up a new server
|
||||
# Just here because debugging would take to much time
|
||||
ignore_errors: true
|
||||
|
||||
- name: "Import schemas"
|
||||
shell: >
|
||||
docker exec -i {{ openldap_name }} ldapadd -Y EXTERNAL -H ldapi:/// -f "{{ openldap_ldif_docker_path }}schema/{{ item | basename | regex_replace('\.j2$', '') }}"
|
||||
register: ldapadd_result
|
||||
changed_when: "'adding new entry' in ldapadd_result.stdout"
|
||||
failed_when: ldapadd_result.rc not in [0, 80]
|
||||
listen:
|
||||
- "Import schema LDIF files"
|
||||
- "Import all LDIF files"
|
||||
loop: "{{ lookup('fileglob', role_path ~ '/templates/ldif/schema/*.j2', wantlist=True) }}"
|
||||
|
||||
- name: Refint Overlay Configuration for OpenLDAP
|
||||
shell: >
|
||||
docker exec -i {{ openldap_name }} ldapmodify -Y EXTERNAL -H ldapi:/// -f {{ openldap_ldif_docker_path }}configuration/03_member_of_configuration.ldif
|
||||
listen:
|
||||
- "Import configuration LDIF files"
|
||||
- "Import all LDIF files"
|
||||
register: ldapadd_result
|
||||
failed_when: ldapadd_result.rc not in [0, 68]
|
||||
# @todo Remove the following ignore errors when setting up a new server
|
||||
# Just here because debugging would take to much time
|
||||
ignore_errors: true
|
||||
|
||||
- name: "Import users, groups, etc. to LDAP"
|
||||
shell: >
|
||||
docker exec -i {{ openldap_name }} ldapadd -x -D "{{LDAP.DN.ADMINISTRATOR.DATA}}" -w "{{ LDAP.BIND_CREDENTIAL }}" -c -f "{{ openldap_ldif_docker_path }}groups/{{ item | basename | regex_replace('\.j2$', '') }}"
|
||||
register: ldapadd_result
|
||||
changed_when: "'adding new entry' in ldapadd_result.stdout"
|
||||
failed_when: ldapadd_result.rc not in [0, 20, 68, 65]
|
||||
listen:
|
||||
- "Import groups LDIF files"
|
||||
- "Import all LDIF files"
|
||||
loop: "{{ query('fileglob', role_path ~ '/templates/ldif/groups/*.j2') | sort }}"
|
@@ -37,7 +37,7 @@
|
||||
- name: "Reset LDAP Credentials"
|
||||
include_tasks: 01_credentials.yml
|
||||
when:
|
||||
- applications | get_app_conf(application_id, 'network.local', True)
|
||||
- applications | get_app_conf(application_id, 'network.local')
|
||||
- applications | get_app_conf(application_id, 'provisioning.credentials', True)
|
||||
|
||||
- name: "create directory {{openldap_ldif_host_path}}{{item}}"
|
||||
|
@@ -2,5 +2,5 @@ server {
|
||||
listen {{ ports.public.ldaps['svc-db-openldap'] }}ssl;
|
||||
proxy_pass 127.0.0.1:{{ ports.localhost.ldap['svc-db-openldap'] }};
|
||||
|
||||
{% include 'roles/srv-letsencrypt/templates/ssl_credentials.j2' %}
|
||||
{% include 'roles/sys-svc-letsencrypt/templates/ssl_credentials.j2' %}
|
||||
}
|
||||
|
@@ -21,4 +21,4 @@ openldap_version: "{{ applications | get_app_conf(application_id,
|
||||
openldap_volume: "{{ applications | get_app_conf(application_id, 'docker.volumes.data', True) }}"
|
||||
openldap_network: "{{ applications | get_app_conf(application_id, 'docker.network', True) }}"
|
||||
|
||||
openldap_network_expose_local: "{{ applications | get_app_conf(application_id, 'network.public', True) | bool or applications | get_app_conf(application_id, 'network.local', True) | bool }}"
|
||||
openldap_network_expose_local: "{{ applications | get_app_conf(application_id, 'network.public', True) | bool or applications | get_app_conf(application_id, 'network.local') | bool }}"
|
@@ -0,0 +1,44 @@
|
||||
import os
|
||||
import yaml
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
def _iter_role_vars_files(roles_dir):
|
||||
if not os.path.isdir(roles_dir):
|
||||
raise AnsibleFilterError(f"roles_dir not found: {roles_dir}")
|
||||
for name in os.listdir(roles_dir):
|
||||
role_path = os.path.join(roles_dir, name)
|
||||
if not os.path.isdir(role_path):
|
||||
continue
|
||||
vars_main = os.path.join(role_path, "vars", "main.yml")
|
||||
if os.path.isfile(vars_main):
|
||||
yield vars_main
|
||||
|
||||
def _is_postgres_role(vars_file):
|
||||
try:
|
||||
with open(vars_file, "r", encoding="utf-8") as f:
|
||||
data = yaml.safe_load(f) or {}
|
||||
# only count roles with explicit database_type: postgres in VARS
|
||||
return str(data.get("database_type", "")).strip().lower() == "postgres"
|
||||
except Exception:
|
||||
# ignore unreadable/broken YAML files quietly
|
||||
return False
|
||||
|
||||
def split_postgres_connections(total_connections, roles_dir="roles"):
|
||||
"""
|
||||
Return an integer average: total_connections / number_of_roles_with_database_type_postgres.
|
||||
Uses max(count, 1) to avoid division-by-zero.
|
||||
"""
|
||||
try:
|
||||
total = int(total_connections)
|
||||
except Exception:
|
||||
raise AnsibleFilterError(f"total_connections must be int-like, got: {total_connections!r}")
|
||||
|
||||
count = sum(1 for vf in _iter_role_vars_files(roles_dir) if _is_postgres_role(vf))
|
||||
denom = max(count, 1)
|
||||
return max(1, total // denom)
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
"split_postgres_connections": split_postgres_connections
|
||||
}
|
@@ -1,3 +1,7 @@
|
||||
- name: Compute average allowed connections per Postgres app (once)
|
||||
set_fact:
|
||||
POSTGRES_ALLOWED_AVG_CONNECTIONS: "{{ (POSTGRES_MAX_CONNECTIONS | split_postgres_connections(playbook_dir ~ '/roles')) | int }}"
|
||||
run_once: true
|
||||
|
||||
- name: Include dependency 'sys-svc-docker'
|
||||
include_role:
|
||||
|
@@ -7,6 +7,19 @@
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
pull_policy: never
|
||||
command:
|
||||
- "postgres"
|
||||
- "-c"
|
||||
- "max_connections={{ POSTGRES_MAX_CONNECTIONS }}"
|
||||
- "-c"
|
||||
- "superuser_reserved_connections={{ POSTGRES_SUPERUSER_RESERVED_CONNECTIONS }}"
|
||||
- "-c"
|
||||
- "shared_buffers={{ POSTGRES_SHARED_BUFFERS }}"
|
||||
- "-c"
|
||||
- "work_mem={{ POSTGRES_WORK_MEM }}"
|
||||
- "-c"
|
||||
- "maintenance_work_mem={{ POSTGRES_MAINTENANCE_WORK_MEM }}"
|
||||
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
{% if POSTGRES_EXPOSE_LOCAL %}
|
||||
ports:
|
||||
|
@@ -1,25 +1,37 @@
|
||||
# General
|
||||
application_id: svc-db-postgres
|
||||
application_id: svc-db-postgres
|
||||
|
||||
# Docker
|
||||
docker_compose_flush_handlers: true
|
||||
docker_compose_flush_handlers: true
|
||||
|
||||
# Docker Compose
|
||||
database_type: "{{ application_id | get_entity_name }}"
|
||||
database_type: "{{ application_id | get_entity_name }}"
|
||||
|
||||
## Postgres
|
||||
POSTGRES_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.data', True) }}"
|
||||
POSTGRES_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.name', True) }}"
|
||||
POSTGRES_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.image', True) }}"
|
||||
POSTGRES_SUBNET: "{{ networks.local['svc-db-postgres'].subnet }}"
|
||||
POSTGRES_NETWORK_NAME: "{{ applications | get_app_conf(application_id, 'docker.network', True) }}"
|
||||
POSTGRES_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.version', True) }}"
|
||||
POSTGRES_PASSWORD: "{{ applications | get_app_conf(application_id, 'credentials.POSTGRES_PASSWORD', True) }}"
|
||||
POSTGRES_PORT: "{{ database_port | default(ports.localhost.database[ application_id ]) }}"
|
||||
POSTGRES_INIT: "{{ database_username is defined and database_password is defined and database_name is defined }}"
|
||||
POSTGRES_EXPOSE_LOCAL: True # Exposes the db to localhost, almost everytime neccessary
|
||||
POSTGRES_CUSTOM_IMAGE_NAME: "postgres_custom"
|
||||
POSTGRES_LOCAL_HOST: "127.0.0.1"
|
||||
POSTGRES_VECTOR_ENABLED: True # Required by discourse, propably in a later step it makes sense to define this as a configuration option in config/main.yml
|
||||
POSTGRES_RETRIES: 5
|
||||
POSTGRES_DELAY: 2
|
||||
POSTGRES_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.data') }}"
|
||||
POSTGRES_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.name') }}"
|
||||
POSTGRES_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.image') }}"
|
||||
POSTGRES_SUBNET: "{{ networks.local['svc-db-postgres'].subnet }}"
|
||||
POSTGRES_NETWORK_NAME: "{{ applications | get_app_conf(application_id, 'docker.network') }}"
|
||||
POSTGRES_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.version') }}"
|
||||
POSTGRES_PASSWORD: "{{ applications | get_app_conf(application_id, 'credentials.POSTGRES_PASSWORD') }}"
|
||||
POSTGRES_PORT: "{{ database_port | default(ports.localhost.database[ application_id ]) }}"
|
||||
POSTGRES_INIT: "{{ database_username is defined and database_password is defined and database_name is defined }}"
|
||||
POSTGRES_EXPOSE_LOCAL: True # Exposes the db to localhost, almost everytime neccessary
|
||||
POSTGRES_CUSTOM_IMAGE_NAME: "postgres_custom"
|
||||
POSTGRES_LOCAL_HOST: "127.0.0.1"
|
||||
POSTGRES_VECTOR_ENABLED: True # Required by discourse, propably in a later step it makes sense to define this as a configuration option in config/main.yml
|
||||
POSTGRES_RETRIES: 5
|
||||
|
||||
## Performance
|
||||
POSTGRES_TOTAL_RAM_MB: "{{ ansible_memtotal_mb | int }}"
|
||||
POSTGRES_VCPUS: "{{ ansible_processor_vcpus | int }}"
|
||||
POSTGRES_MAX_CONNECTIONS: "{{ [ ((POSTGRES_VCPUS | int) * 30 + 50), 400 ] | min }}"
|
||||
POSTGRES_SUPERUSER_RESERVED_CONNECTIONS: 3
|
||||
POSTGRES_SHARED_BUFFERS_MB: "{{ ((POSTGRES_TOTAL_RAM_MB | int) * 25) // 100 }}"
|
||||
POSTGRES_SHARED_BUFFERS: "{{ POSTGRES_SHARED_BUFFERS_MB ~ 'MB' }}"
|
||||
POSTGRES_WORK_MEM_MB: "{{ [ ( (POSTGRES_TOTAL_RAM_MB | int) // ( [ (POSTGRES_MAX_CONNECTIONS | int), 1 ] | max ) // 2 ), 1 ] | max }}"
|
||||
POSTGRES_WORK_MEM: "{{ POSTGRES_WORK_MEM_MB ~ 'MB' }}"
|
||||
POSTGRES_MAINTENANCE_WORK_MEM_MB: "{{ [ (((POSTGRES_TOTAL_RAM_MB | int) * 5) // 100), 64 ] | max }}"
|
||||
POSTGRES_MAINTENANCE_WORK_MEM: "{{ POSTGRES_MAINTENANCE_WORK_MEM_MB ~ 'MB' }}"
|
||||
POSTGRES_DELAY: 2
|
||||
|
@@ -18,10 +18,10 @@
|
||||
group: root
|
||||
notify: reload sysctl configuration
|
||||
|
||||
- name: create /etc/wireguard/wg0.{{ SOFTWARE_NAME | lower }}.conf
|
||||
- name: "deploy {{ WG0_CONF_DEST }}"
|
||||
copy:
|
||||
src: "{{ inventory_dir }}/files/{{ inventory_hostname }}/etc/wireguard/wg0.conf"
|
||||
dest: /etc/wireguard/wg0.{{ SOFTWARE_NAME | lower }}.conf
|
||||
owner: root
|
||||
group: root
|
||||
src: "{{ [inventory_dir, 'files', inventory_hostname, 'etc/wireguard/wg0.conf' ] | path_join }}"
|
||||
dest: "{{ WG0_CONF_DEST }}"
|
||||
owner: root
|
||||
group: root
|
||||
notify: restart wireguard
|
@@ -1 +1,3 @@
|
||||
application_id: svc-net-wireguard-core
|
||||
|
||||
WG0_CONF_DEST: "/etc/wireguard/wg0.conf"
|
@@ -8,4 +8,3 @@ database_type: ""
|
||||
OPENRESTY_IMAGE: "openresty/openresty"
|
||||
OPENRESTY_VERSION: "alpine"
|
||||
OPENRESTY_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.openresty.name', True) }}"
|
||||
|
||||
|
@@ -1,36 +0,0 @@
|
||||
def dict_to_cli_args(data):
|
||||
"""
|
||||
Convert a dictionary into CLI argument string.
|
||||
Example:
|
||||
{
|
||||
"backup-dir": "/mnt/backups",
|
||||
"shutdown": True,
|
||||
"ignore-volumes": ["redis", "memcached"]
|
||||
}
|
||||
becomes:
|
||||
--backup-dir=/mnt/backups --shutdown --ignore-volumes="redis memcached"
|
||||
"""
|
||||
if not isinstance(data, dict):
|
||||
raise TypeError("Expected a dictionary for CLI argument conversion")
|
||||
|
||||
args = []
|
||||
|
||||
for key, value in data.items():
|
||||
cli_key = f"--{key}"
|
||||
|
||||
if isinstance(value, bool):
|
||||
if value:
|
||||
args.append(cli_key)
|
||||
elif isinstance(value, list):
|
||||
items = " ".join(map(str, value))
|
||||
args.append(f'{cli_key}="{items}"')
|
||||
elif value is not None:
|
||||
args.append(f'{cli_key}={value}')
|
||||
|
||||
return " ".join(args)
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'dict_to_cli_args': dict_to_cli_args
|
||||
}
|
@@ -1,2 +1,2 @@
|
||||
system_service_id: sys-ctl-cln-faild-bkps
|
||||
CLN_FAILED_DOCKER_BACKUPS_PKG: cleanup-failed-docker-backups
|
||||
system_service_id: sys-ctl-cln-faild-bkps
|
||||
CLN_FAILED_DOCKER_BACKUPS_PKG: cleanup-failed-docker-backups
|
||||
|
@@ -16,4 +16,5 @@
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_HEALTH_CSP_CRAWLER }}"
|
||||
system_service_timer_enabled: true
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_tpl_timeout_start_sec: 15min
|
||||
system_service_tpl_timeout_start_sec: "{{ CURRENT_PLAY_DOMAINS_ALL | timeout_start_sec_for_domains }}"
|
||||
system_service_tpl_exec_start: "{{ system_service_script_exec }} --nginx-config-dir={{ NGINX.DIRECTORIES.HTTP.SERVERS }}"
|
||||
|
@@ -1,7 +0,0 @@
|
||||
[Unit]
|
||||
Description=Check for CSP-blocked resources via Puppeteer
|
||||
OnFailure={{ SYS_SERVICE_ON_FAILURE_COMPOSE }}
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart={{ system_service_script_exec }} --nginx-config-dir={{ NGINX.DIRECTORIES.HTTP.SERVERS }}
|
@@ -3,9 +3,14 @@
|
||||
name: sys-ctl-alm-compose
|
||||
when: run_once_sys_ctl_alm_compose is not defined
|
||||
|
||||
- name: Include dependency 'sys-ctl-rpr-docker-soft'
|
||||
include_role:
|
||||
name: sys-ctl-rpr-docker-soft
|
||||
when: run_once_sys_ctl_rpr_docker_soft is not defined
|
||||
|
||||
- include_role:
|
||||
name: sys-service
|
||||
vars:
|
||||
system_service_timer_enabled: true
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_HEALTH_DOCKER_CONTAINER }}"
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_timer_enabled: true
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_HEALTH_DOCKER_CONTAINER }}"
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }} {{ SYS_SERVICE_REPAIR_DOCKER_SOFT }}"
|
||||
|
@@ -2,7 +2,7 @@
|
||||
include_role:
|
||||
name: sys-ctl-alm-compose
|
||||
when: run_once_sys_ctl_alm_compose is not defined
|
||||
|
||||
|
||||
- include_role:
|
||||
name: sys-service
|
||||
vars:
|
||||
|
@@ -9,4 +9,4 @@
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_HEALTH_JOURNALCTL }}"
|
||||
system_service_timer_enabled: true
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
# system_service_suppress_flush: true # There are almost allways errors in the journalctl logs so suppression is neccessary to let playbook run
|
||||
system_service_suppress_flush: true # There are almost allways errors in the journalctl logs so suppression is neccessary to let playbook run
|
||||
|
@@ -16,6 +16,7 @@
|
||||
- include_role:
|
||||
name: sys-service
|
||||
vars:
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_HEALTH_NGINX }}"
|
||||
system_service_timer_enabled: true
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_HEALTH_NGINX }}"
|
||||
system_service_timer_enabled: true
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_tpl_timeout_start_sec: "{{ CURRENT_PLAY_DOMAINS_ALL | timeout_start_sec_for_domains }}"
|
||||
|
@@ -43,7 +43,7 @@ for filename in os.listdir(config_path):
|
||||
url = f"{{ WEB_PROTOCOL }}://{domain}"
|
||||
|
||||
redirected_domains = [domain['source'] for domain in {{ redirect_domain_mappings }}]
|
||||
redirected_domains.append("{{domains | get_domain('web-app-mailu')}}")
|
||||
redirected_domains.append("{{domains | get_domain('web-app-mailu') }}")
|
||||
|
||||
expected_statuses = get_expected_statuses(domain, parts, redirected_domains)
|
||||
|
||||
|
@@ -3,7 +3,7 @@
|
||||
name: '{{ item }}'
|
||||
loop:
|
||||
- sys-svc-certbot
|
||||
- srv-core
|
||||
- sys-svc-webserver
|
||||
- sys-ctl-alm-compose
|
||||
|
||||
- name: install certbot
|
||||
|
@@ -8,7 +8,7 @@
|
||||
vars:
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_REPAIR_DOCKER_HARD }}"
|
||||
system_service_timer_enabled: true
|
||||
system_service_tpl_exec_start_pre: '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(" ") }} --ignore {{ SYS_SERVICE_REPAIR_DOCKER_HARD }} {{ SYS_SERVICE_GROUP_CLEANUP | join(" ") }} --timeout "{{ SYS_TIMEOUT_RESTART_DOCKER }}"'
|
||||
system_service_tpl_exec_start_pre: '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(" ") }} --ignore {{ SYS_SERVICE_REPAIR_DOCKER_HARD }} {{ SYS_SERVICE_GROUP_CLEANUP | join(" ") }} --timeout "{{ SYS_TIMEOUT_DOCKER_RPR_HARD }}"'
|
||||
system_service_tpl_exec_start: '{{ system_service_script_exec }} {{ PATH_DOCKER_COMPOSE_INSTANCES }}'
|
||||
system_service_tpl_exec_start_post: "/usr/bin/systemctl start {{ SYS_SERVICE_CLEANUP_ANONYMOUS_VOLUMES }}"
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
|
@@ -1,15 +1,26 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Restart Docker-Compose configurations with exited or unhealthy containers.
|
||||
This version receives the *manipulation services* via argparse (no Jinja).
|
||||
|
||||
STRICT mode: Resolve the Compose project exclusively via Docker labels
|
||||
(com.docker.compose.project and com.docker.compose.project.working_dir).
|
||||
No container-name fallback. If labels are missing or Docker is unavailable,
|
||||
the script records an error for that container.
|
||||
|
||||
All shell interactions that matter for tests go through print_bash()
|
||||
so they can be monkeypatched in unit tests.
|
||||
"""
|
||||
import subprocess
|
||||
import time
|
||||
import os
|
||||
import argparse
|
||||
from typing import List
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
|
||||
# ---------------------------
|
||||
# Shell helpers
|
||||
# ---------------------------
|
||||
|
||||
def bash(command: str) -> List[str]:
|
||||
print(command)
|
||||
process = subprocess.Popen(
|
||||
@@ -30,31 +41,45 @@ def list_to_string(lst: List[str]) -> str:
|
||||
|
||||
|
||||
def print_bash(command: str) -> List[str]:
|
||||
"""
|
||||
Wrapper around bash() that echoes combined output for easier debugging
|
||||
and can be monkeypatched in tests.
|
||||
"""
|
||||
output = bash(command)
|
||||
if output:
|
||||
print(list_to_string(output))
|
||||
return output
|
||||
|
||||
|
||||
def find_docker_compose_file(directory: str) -> str | None:
|
||||
# ---------------------------
|
||||
# Filesystem / compose helpers
|
||||
# ---------------------------
|
||||
|
||||
def find_docker_compose_file(directory: str) -> Optional[str]:
|
||||
"""
|
||||
Search for docker-compose.yml beneath a directory.
|
||||
"""
|
||||
for root, _, files in os.walk(directory):
|
||||
if "docker-compose.yml" in files:
|
||||
return os.path.join(root, "docker-compose.yml")
|
||||
return None
|
||||
|
||||
|
||||
def detect_env_file(project_path: str) -> str | None:
|
||||
def detect_env_file(project_path: str) -> Optional[str]:
|
||||
"""
|
||||
Return the path to a Compose env file if present (.env preferred, fallback to env).
|
||||
Return the path to a Compose env file if present (.env preferred, fallback to .env/env).
|
||||
"""
|
||||
candidates = [os.path.join(project_path, ".env"), os.path.join(project_path, ".env", "env")]
|
||||
candidates = [
|
||||
os.path.join(project_path, ".env"),
|
||||
os.path.join(project_path, ".env", "env"),
|
||||
]
|
||||
for candidate in candidates:
|
||||
if os.path.isfile(candidate):
|
||||
return candidate
|
||||
return None
|
||||
|
||||
|
||||
def compose_cmd(subcmd: str, project_path: str, project_name: str | None = None) -> str:
|
||||
def compose_cmd(subcmd: str, project_path: str, project_name: Optional[str] = None) -> str:
|
||||
"""
|
||||
Build a docker-compose command string with optional -p and --env-file if present.
|
||||
Example: compose_cmd("restart", "/opt/docker/foo", "foo")
|
||||
@@ -69,6 +94,10 @@ def compose_cmd(subcmd: str, project_path: str, project_name: str | None = None)
|
||||
return " ".join(parts)
|
||||
|
||||
|
||||
# ---------------------------
|
||||
# Business logic
|
||||
# ---------------------------
|
||||
|
||||
def normalize_services_arg(raw: List[str] | None, raw_str: str | None) -> List[str]:
|
||||
"""
|
||||
Accept either:
|
||||
@@ -78,7 +107,6 @@ def normalize_services_arg(raw: List[str] | None, raw_str: str | None) -> List[s
|
||||
if raw:
|
||||
return [s for s in raw if s.strip()]
|
||||
if raw_str:
|
||||
# split on comma or whitespace
|
||||
parts = [p.strip() for chunk in raw_str.split(",") for p in chunk.split()]
|
||||
return [p for p in parts if p]
|
||||
return []
|
||||
@@ -87,7 +115,7 @@ def normalize_services_arg(raw: List[str] | None, raw_str: str | None) -> List[s
|
||||
def wait_while_manipulation_running(
|
||||
services: List[str],
|
||||
waiting_time: int = 600,
|
||||
timeout: int | None = None,
|
||||
timeout: Optional[int] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Wait until none of the given services are active anymore.
|
||||
@@ -107,7 +135,6 @@ def wait_while_manipulation_running(
|
||||
break
|
||||
|
||||
if any_active:
|
||||
# Check timeout
|
||||
elapsed = time.time() - start
|
||||
if timeout and elapsed >= timeout:
|
||||
print(f"Timeout ({timeout}s) reached while waiting for services. Continuing anyway.")
|
||||
@@ -119,7 +146,30 @@ def wait_while_manipulation_running(
|
||||
break
|
||||
|
||||
|
||||
def main(base_directory: str, manipulation_services: List[str], timeout: int | None) -> int:
|
||||
def get_compose_project_info(container: str) -> Tuple[str, str]:
|
||||
"""
|
||||
Resolve project name and working dir from Docker labels.
|
||||
STRICT: Raises RuntimeError if labels are missing/unreadable.
|
||||
"""
|
||||
out_project = print_bash(
|
||||
f"docker inspect -f '{{{{ index .Config.Labels \"com.docker.compose.project\" }}}}' {container}"
|
||||
)
|
||||
out_workdir = print_bash(
|
||||
f"docker inspect -f '{{{{ index .Config.Labels \"com.docker.compose.project.working_dir\" }}}}' {container}"
|
||||
)
|
||||
|
||||
project = out_project[0].strip() if out_project else ""
|
||||
workdir = out_workdir[0].strip() if out_workdir else ""
|
||||
|
||||
if not project:
|
||||
raise RuntimeError(f"No compose project label found for container {container}")
|
||||
if not workdir:
|
||||
raise RuntimeError(f"No compose working_dir label found for container {container}")
|
||||
|
||||
return project, workdir
|
||||
|
||||
|
||||
def main(base_directory: str, manipulation_services: List[str], timeout: Optional[int]) -> int:
|
||||
errors = 0
|
||||
wait_while_manipulation_running(manipulation_services, waiting_time=600, timeout=timeout)
|
||||
|
||||
@@ -131,43 +181,50 @@ def main(base_directory: str, manipulation_services: List[str], timeout: int | N
|
||||
)
|
||||
failed_containers = unhealthy_container_names + exited_container_names
|
||||
|
||||
unfiltered_failed_docker_compose_repositories = [
|
||||
container.split("-")[0] for container in failed_containers
|
||||
]
|
||||
filtered_failed_docker_compose_repositories = list(
|
||||
dict.fromkeys(unfiltered_failed_docker_compose_repositories)
|
||||
)
|
||||
for container in failed_containers:
|
||||
try:
|
||||
project, workdir = get_compose_project_info(container)
|
||||
except Exception as e:
|
||||
print(f"Error reading compose labels for {container}: {e}")
|
||||
errors += 1
|
||||
continue
|
||||
|
||||
for repo in filtered_failed_docker_compose_repositories:
|
||||
compose_file_path = find_docker_compose_file(os.path.join(base_directory, repo))
|
||||
compose_file_path = os.path.join(workdir, "docker-compose.yml")
|
||||
if not os.path.isfile(compose_file_path):
|
||||
# As STRICT: we only trust labels; if file not there, error out.
|
||||
print(f"Error: docker-compose.yml not found at {compose_file_path} for container {container}")
|
||||
errors += 1
|
||||
continue
|
||||
|
||||
if compose_file_path:
|
||||
project_path = os.path.dirname(compose_file_path)
|
||||
try:
|
||||
print("Restarting unhealthy container in:", compose_file_path)
|
||||
project_path = os.path.dirname(compose_file_path)
|
||||
try:
|
||||
# restart with optional --env-file and -p
|
||||
print_bash(compose_cmd("restart", project_path, repo))
|
||||
except Exception as e:
|
||||
if "port is already allocated" in str(e):
|
||||
print("Detected port allocation problem. Executing recovery steps...")
|
||||
# down (no -p needed), then engine restart, then up -d with -p
|
||||
print_bash(compose_cmd("restart", project_path, project))
|
||||
except Exception as e:
|
||||
if "port is already allocated" in str(e):
|
||||
print("Detected port allocation problem. Executing recovery steps...")
|
||||
try:
|
||||
print_bash(compose_cmd("down", project_path))
|
||||
print_bash("systemctl restart docker")
|
||||
print_bash(compose_cmd("up -d", project_path, repo))
|
||||
else:
|
||||
print("Unhandled exception during restart:", e)
|
||||
print_bash(compose_cmd("up -d", project_path, project))
|
||||
except Exception as e2:
|
||||
print("Unhandled exception during recovery:", e2)
|
||||
errors += 1
|
||||
else:
|
||||
print("Error: Docker Compose file not found for:", repo)
|
||||
errors += 1
|
||||
else:
|
||||
print("Unhandled exception during restart:", e)
|
||||
errors += 1
|
||||
|
||||
print("Finished restart procedure.")
|
||||
return errors
|
||||
|
||||
|
||||
# ---------------------------
|
||||
# CLI
|
||||
# ---------------------------
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Restart Docker-Compose configurations with exited or unhealthy containers."
|
||||
description="Restart Docker-Compose configurations with exited or unhealthy containers (STRICT label mode)."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--manipulation",
|
||||
@@ -184,12 +241,12 @@ if __name__ == "__main__":
|
||||
"--timeout",
|
||||
type=int,
|
||||
default=60,
|
||||
help="Maximum time in seconds to wait for manipulation services before continuing.(Default 1min)",
|
||||
help="Maximum time in seconds to wait for manipulation services before continuing. (Default 1min)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"base_directory",
|
||||
type=str,
|
||||
help="Base directory where Docker Compose configurations are located.",
|
||||
help="(Unused in STRICT mode) Base directory where Docker Compose configurations are located.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
services = normalize_services_arg(args.manipulation, args.manipulation_string)
|
||||
|
@@ -6,9 +6,7 @@
|
||||
- include_role:
|
||||
name: sys-service
|
||||
vars:
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_REPAIR_DOCKER_SOFT }}"
|
||||
system_service_timer_enabled: true
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_tpl_exec_start_pre: "/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(' ') }} --ignore {{ SYS_SERVICE_GROUP_CLEANUP| join(' ') }} {{ SYS_SERVICE_REPAIR_DOCKER_SOFT }} --timeout '{{ SYS_TIMEOUT_HEAL_DOCKER }}'"
|
||||
system_service_tpl_exec_start_pre: "/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(' ') }} --ignore {{ SYS_SERVICE_GROUP_CLEANUP| join(' ') }} {{ SYS_SERVICE_REPAIR_DOCKER_SOFT }} --timeout '{{ SYS_TIMEOUT_DOCKER_RPR_SOFT }}'"
|
||||
system_service_tpl_exec_start: >
|
||||
/bin/sh -c '{{ system_service_script_exec }} --manipulation-string "{{ SYS_SERVICE_GROUP_MANIPULATION | join(" ") }}" {{ PATH_DOCKER_COMPOSE_INSTANCES }}'
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# roles/sys-srv-web-inj-compose/filter_plugins/inj_enabled.py
|
||||
#
|
||||
# Usage in tasks:
|
||||
# - set_fact:
|
@@ -2,10 +2,10 @@
|
||||
Jinja filter: `inj_features(kind)` filters a list of features to only those
|
||||
that actually provide the corresponding snippet template file.
|
||||
|
||||
- kind='head' -> roles/sys-srv-web-inj-<feature>/templates/head_sub.j2
|
||||
- kind='body' -> roles/sys-srv-web-inj-<feature>/templates/body_sub.j2
|
||||
- kind='head' -> roles/sys-front-inj-<feature>/templates/head_sub.j2
|
||||
- kind='body' -> roles/sys-front-inj-<feature>/templates/body_sub.j2
|
||||
|
||||
If the feature's role directory (roles/sys-srv-web-inj-<feature>) does not
|
||||
If the feature's role directory (roles/sys-front-inj-<feature>) does not
|
||||
exist, this filter raises FileNotFoundError.
|
||||
|
||||
Usage in a template:
|
||||
@@ -15,13 +15,13 @@ Usage in a template:
|
||||
|
||||
import os
|
||||
|
||||
# This file lives at: roles/sys-srv-web-inj-compose/filter_plugins/inj_snippets.py
|
||||
# This file lives at: roles/sys-front-inj-all/filter_plugins/inj_snippets.py
|
||||
_THIS_DIR = os.path.dirname(__file__)
|
||||
_ROLE_DIR = os.path.abspath(os.path.join(_THIS_DIR, "..")) # roles/sys-srv-web-inj-compose
|
||||
_ROLE_DIR = os.path.abspath(os.path.join(_THIS_DIR, "..")) # roles/sys-front-inj-all
|
||||
_ROLES_DIR = os.path.abspath(os.path.join(_ROLE_DIR, "..")) # roles
|
||||
|
||||
def _feature_role_dir(feature: str) -> str:
|
||||
return os.path.join(_ROLES_DIR, f"sys-srv-web-inj-{feature}")
|
||||
return os.path.join(_ROLES_DIR, f"sys-front-inj-{feature}")
|
||||
|
||||
def _has_snippet(feature: str, kind: str) -> bool:
|
||||
if kind not in ("head", "body"):
|
@@ -14,7 +14,7 @@ galaxy_info:
|
||||
- theming
|
||||
repository: "https://s.infinito.nexus/code"
|
||||
issue_tracker_url: "https://s.infinito.nexus/issues"
|
||||
documentation: "https://s.infinito.nexus/code/tree/main/roles/sys-srv-web-inj-compose"
|
||||
documentation: "https://s.infinito.nexus/code/tree/main/roles/sys-front-inj-all"
|
||||
min_ansible_version: "2.9"
|
||||
platforms:
|
||||
- name: Any
|
@@ -2,39 +2,10 @@
|
||||
set_fact:
|
||||
inj_enabled: "{{ applications | inj_enabled(application_id, SRV_WEB_INJ_COMP_FEATURES_ALL) }}"
|
||||
|
||||
- block:
|
||||
- name: Include dependency 'srv-core'
|
||||
include_role:
|
||||
name: srv-core
|
||||
when: run_once_srv_core is not defined
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_srv_web_inj_compose is not defined
|
||||
|
||||
- name: "Activate Portfolio iFrame notifier for '{{ domain }}'"
|
||||
- name: "Load CDN Service for '{{ domain }}'"
|
||||
include_role:
|
||||
name: sys-srv-web-inj-desktop
|
||||
public: true # Vars used in templates
|
||||
when: inj_enabled.desktop
|
||||
|
||||
- name: "Load CDN for '{{ domain }}'"
|
||||
include_role:
|
||||
name: web-svc-cdn
|
||||
public: false
|
||||
when:
|
||||
- inj_enabled.logout
|
||||
- inj_enabled.desktop
|
||||
- application_id != 'web-svc-cdn'
|
||||
- run_once_web_svc_cdn is not defined
|
||||
|
||||
- name: Overwritte CDN handlers with neutral handlers
|
||||
ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/utils/load_handlers.yml"
|
||||
loop:
|
||||
- svc-prx-openresty
|
||||
- docker-compose
|
||||
loop_control:
|
||||
label: "{{ item }}"
|
||||
vars:
|
||||
handler_role_name: "{{ item }}"
|
||||
name: sys-svc-cdn
|
||||
public: true # Expose variables so that they can be used in all injection roles
|
||||
|
||||
- name: Reinitialize 'inj_enabled' for '{{ domain }}', after modification by CDN
|
||||
set_fact:
|
||||
@@ -42,25 +13,37 @@
|
||||
inj_head_features: "{{ SRV_WEB_INJ_COMP_FEATURES_ALL | inj_features('head') }}"
|
||||
inj_body_features: "{{ SRV_WEB_INJ_COMP_FEATURES_ALL | inj_features('body') }}"
|
||||
|
||||
- name: "Activate Desktop iFrame notifier for '{{ domain }}'"
|
||||
include_role:
|
||||
name: sys-front-inj-desktop
|
||||
public: true # Vars used in templates
|
||||
when: inj_enabled.desktop
|
||||
|
||||
- name: "Activate Corporate CSS for '{{ domain }}'"
|
||||
include_role:
|
||||
name: sys-srv-web-inj-css
|
||||
when:
|
||||
- inj_enabled.css
|
||||
- run_once_sys_srv_web_inj_css is not defined
|
||||
name: sys-front-inj-css
|
||||
when: inj_enabled.css
|
||||
|
||||
- name: "Activate Matomo Tracking for '{{ domain }}'"
|
||||
include_role:
|
||||
name: sys-srv-web-inj-matomo
|
||||
name: sys-front-inj-matomo
|
||||
when: inj_enabled.matomo
|
||||
|
||||
- name: "Activate Javascript for '{{ domain }}'"
|
||||
include_role:
|
||||
name: sys-srv-web-inj-javascript
|
||||
name: sys-front-inj-javascript
|
||||
when: inj_enabled.javascript
|
||||
|
||||
- name: "Activate logout proxy for '{{ domain }}'"
|
||||
include_role:
|
||||
name: sys-srv-web-inj-logout
|
||||
name: sys-front-inj-logout
|
||||
public: true # Vars used in templates
|
||||
when: inj_enabled.logout
|
||||
|
||||
- block:
|
||||
- name: Include dependency 'sys-svc-webserver'
|
||||
include_role:
|
||||
name: sys-svc-webserver
|
||||
when: run_once_sys_svc_webserver is not defined
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_front_inj_all is not defined
|
@@ -3,7 +3,7 @@
|
||||
{% set kind = list_name | regex_replace('_snippets$','') %}
|
||||
{% for f in features if inj_enabled.get(f) -%}
|
||||
{{ list_name }}[#{{ list_name }} + 1] = [=[
|
||||
{%- include 'roles/sys-srv-web-inj-' ~ f ~ '/templates/' ~ kind ~ '_sub.j2' -%}
|
||||
{%- include 'roles/sys-front-inj-' ~ f ~ '/templates/' ~ kind ~ '_sub.j2' -%}
|
||||
]=]
|
||||
{% endfor -%}
|
||||
{%- endmacro %}
|
@@ -1,7 +1,3 @@
|
||||
{% if inj_enabled.css %}
|
||||
{% include 'roles/sys-srv-web-inj-css/templates/location.conf.j2' %}
|
||||
{% endif %}
|
||||
|
||||
{% if inj_enabled.logout %}
|
||||
{% include 'roles/web-svc-logout/templates/logout-proxy.conf.j2' %}
|
||||
{% endif %}
|
@@ -2,12 +2,12 @@
|
||||
|
||||
## Description
|
||||
|
||||
This Ansible role ensures **consistent global theming** across all Nginx-served applications by injecting a unified `global.css` file.
|
||||
This Ansible role ensures **consistent global theming** across all Nginx-served applications by injecting CSS files.
|
||||
The role leverages [`colorscheme-generator`](https://github.com/kevinveenbirkenbach/colorscheme-generator/) to generate a dynamic, customizable color palette for light and dark mode, compatible with popular web tools like **Bootstrap**, **Keycloak**, **Nextcloud**, **Taiga**, **Mastodon**, and many more.
|
||||
|
||||
## Overview
|
||||
|
||||
This role deploys a centralized global stylesheet (`global.css`) that overrides the default theming of web applications served via Nginx. It's optimized to run only once per deployment and generates a **cache-busting version number** based on file modification timestamps.
|
||||
This role deploys a centralized global stylesheet that overrides the default theming of web applications served via Nginx. It's optimized to run only once per deployment and generates a **cache-busting version number** based on file modification timestamps.
|
||||
It includes support for **dark mode**, **custom fonts**, and **extensive Bootstrap and UI component overrides**.
|
||||
|
||||
## Purpose
|
||||
@@ -18,7 +18,7 @@ It makes all applications feel like part of the same ecosystem — visually and
|
||||
## Features
|
||||
|
||||
- 🎨 **Dynamic Theming** via [`colorscheme-generator`](https://github.com/kevinveenbirkenbach/colorscheme-generator/)
|
||||
- 📁 **Unified global.css** deployment for all Nginx applications
|
||||
- 📁 **Unified CSS Base Configuration** deployment for all Nginx applications
|
||||
- 🌒 **Dark mode support** out of the box
|
||||
- 🚫 **No duplication** – tasks run once per deployment
|
||||
- ⏱️ **Versioning logic** to bust browser cache
|
21
roles/sys-front-inj-css/tasks/01_core.yml
Normal file
21
roles/sys-front-inj-css/tasks/01_core.yml
Normal file
@@ -0,0 +1,21 @@
|
||||
- name: Include dependency 'sys-svc-webserver'
|
||||
include_role:
|
||||
name: sys-svc-webserver
|
||||
when: run_once_sys_svc_webserver is not defined
|
||||
|
||||
- name: Generate color palette with colorscheme-generator
|
||||
set_fact:
|
||||
color_palette: "{{ lookup('colorscheme', CSS_BASE_COLOR, count=CSS_COUNT, shades=CSS_SHADES) }}"
|
||||
|
||||
- name: Generate inverted color palette with colorscheme-generator
|
||||
set_fact:
|
||||
inverted_color_palette: "{{ lookup('colorscheme', CSS_BASE_COLOR, count=CSS_COUNT, shades=CSS_SHADES, invert_lightness=True) }}"
|
||||
|
||||
- name: Deploy default CSS files
|
||||
template:
|
||||
src: "{{ ['css', item ~ '.j2'] | path_join }}"
|
||||
dest: "{{ [cdn_paths_all.shared.css, item] | path_join }}"
|
||||
owner: "{{ NGINX.USER }}"
|
||||
group: "{{ NGINX.USER }}"
|
||||
mode: '0644'
|
||||
loop: "{{ CSS_FILES }}"
|
25
roles/sys-front-inj-css/tasks/main.yml
Normal file
25
roles/sys-front-inj-css/tasks/main.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_front_inj_css is not defined
|
||||
|
||||
- name: "Resolve optional app style.css source for '{{ application_id }}'"
|
||||
vars:
|
||||
app_role_dir: "{{ playbook_dir }}/roles/{{ application_id }}"
|
||||
_app_style_src: >-
|
||||
{{ lookup('first_found', {
|
||||
'files': ['templates/style.css.j2','files/style.css'],
|
||||
'paths': [app_role_dir]
|
||||
}, errors='ignore') | default('', true) }}
|
||||
set_fact:
|
||||
app_style_src: "{{ _app_style_src }}"
|
||||
app_style_present: "{{ _app_style_src | length > 0 }}"
|
||||
|
||||
- name: "Deploy per-app '{{ app_style_src }}' to '{{ css_app_dst }}'"
|
||||
when: app_style_present
|
||||
copy:
|
||||
content: "{{ lookup('template', app_style_src) }}"
|
||||
dest: "{{ css_app_dst }}"
|
||||
owner: "{{ NGINX.USER }}"
|
||||
group: "{{ NGINX.USER }}"
|
||||
mode: '0644'
|
69
roles/sys-front-inj-css/templates/css/bootstrap.css.j2
Normal file
69
roles/sys-front-inj-css/templates/css/bootstrap.css.j2
Normal file
@@ -0,0 +1,69 @@
|
||||
|
||||
/* Buttons (Background, Text, Border, and Shadow)
|
||||
Now using a button background that is only slightly darker than the overall background */
|
||||
html[native-dark-active] .btn, .btn {
|
||||
background-color: var(--color-01-87);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-70), var(--color-01-91), var(--color-01-95), var(--color-01-95));
|
||||
color: var(--color-01-50);
|
||||
border-color: var(--color-01-80);
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
/* Navigation (Background and Text Colors) */
|
||||
.navbar, .navbar-light, .navbar-dark, .navbar.bg-light {
|
||||
background-color: var(--color-01-90);
|
||||
/* New Gradient based on original background (90 -5, 90, 90 +1, 90 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-85), var(--color-01-90), var(--color-01-91), var(--color-01-95));
|
||||
color: var(--color-01-50);
|
||||
border-color: var(--color-01-85);
|
||||
}
|
||||
|
||||
.navbar a {
|
||||
color: var(--color-01-40);
|
||||
}
|
||||
|
||||
.navbar a.dropdown-item {
|
||||
color: var(--color-01-43);
|
||||
}
|
||||
|
||||
/* Cards / Containers (Background, Border, and Shadow)
|
||||
Cards now use a slightly lighter background and a bold, clear shadow */
|
||||
.card {
|
||||
background-color: var(--color-01-90);
|
||||
/* New Gradient based on original background (90 -5, 90, 90 +1, 90 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-85), var(--color-01-90), var(--color-01-91), var(--color-01-95));
|
||||
border-color: var(--color-01-85);
|
||||
color: var(--color-01-12);
|
||||
}
|
||||
|
||||
.card-body {
|
||||
color: var(--color-01-40);
|
||||
}
|
||||
|
||||
/* Dropdown Menu and Submenu (Background, Text, and Shadow) */
|
||||
.navbar .dropdown-menu,
|
||||
.nav-item .dropdown-menu {
|
||||
background-color: var(--color-01-80);
|
||||
/* New Gradient based on original background (80 -5, 80, 80 +1, 80 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-75), var(--color-01-80), var(--color-01-81), var(--color-01-85));
|
||||
color: var(--color-01-40);
|
||||
}
|
||||
|
||||
.navbar-nav {
|
||||
--bs-nav-link-hover-color: var(--color-01-17);
|
||||
}
|
||||
|
||||
.dropdown-item {
|
||||
color: var(--color-01-40);
|
||||
background-color: var(--color-01-80);
|
||||
/* New Gradient based on original background (80 -5, 80, 80 +1, 80 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-75), var(--color-01-80), var(--color-01-81), var(--color-01-85));
|
||||
}
|
||||
|
||||
.dropdown-item:hover,
|
||||
.dropdown-item:focus {
|
||||
background-color: var(--color-01-65);
|
||||
/* New Gradient based on original background (65 -5, 65, 65 +1, 65 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-60), var(--color-01-65), var(--color-01-66), var(--color-01-70));
|
||||
color: var(--color-01-40);
|
||||
}
|
297
roles/sys-front-inj-css/templates/css/default.css.j2
Normal file
297
roles/sys-front-inj-css/templates/css/default.css.j2
Normal file
@@ -0,0 +1,297 @@
|
||||
/***
|
||||
|
||||
Global Theming Styles – Color and Shadow Variables
|
||||
|
||||
HINT:
|
||||
- Better overwritte CSS variables instead of individual elements.
|
||||
- Don't use !important. If possible use a specific selector.
|
||||
|
||||
*/
|
||||
|
||||
{% if design.font.import_url %}
|
||||
@import url('{{ design.font.import_url }}');
|
||||
{% endif %}
|
||||
|
||||
/* Auto-generated by colorscheme-generator */
|
||||
|
||||
:root {
|
||||
{% for var_name, color in color_palette.items() %}
|
||||
{{ var_name }}: {{ color }};
|
||||
{% endfor %}
|
||||
}
|
||||
|
||||
@media (prefers-color-scheme: dark) {
|
||||
:root {
|
||||
{% for var_name, color in inverted_color_palette.items() %}
|
||||
{{ var_name }}: {{ color }};
|
||||
{% endfor %}
|
||||
}
|
||||
}
|
||||
|
||||
:root, ::after, ::before, ::backdrop {
|
||||
/* For Dark Mode Plugin
|
||||
* @See https://chromewebstore.google.com/detail/dark-mode/dmghijelimhndkbmpgbldicpogfkceaj
|
||||
*/
|
||||
--native-dark-accent-color: var(--color-01-60); /* was #a9a9a9 */
|
||||
--native-dark-bg-color: var(--color-01-10); /* was #292929 */
|
||||
--native-dark-bg-image-color: rgba(var(--color-01-rgb-01), 0.10); /* remains the same, or adjust if needed */
|
||||
--native-dark-border-color: var(--color-01-40); /* was #555555 */
|
||||
--native-dark-box-shadow: 0 0 0 1px rgb(var(--color-01-rgb-99), / 10%);
|
||||
--native-dark-cite-color: var(--color-01-70); /* was #92de92 – you might adjust if a green tone is needed */
|
||||
--native-dark-fill-color: var(--color-01-50); /* was #7d7d7d */
|
||||
--native-dark-font-color: var(--color-01-95); /* was #dcdcdc */
|
||||
--native-dark-link-color: var(--color-01-80); /* was #8db2e5 */
|
||||
--native-dark-visited-link-color: var(--color-01-85); /* was #c76ed7 */
|
||||
}
|
||||
|
||||
/* Bootstrap Overrides (Color/Shadow Variables Only) */
|
||||
:root {
|
||||
--bs-black: var(--color-01-01); /* Original tone: Black (#000) */
|
||||
--bs-white: var(--color-01-99); /* Original tone: White (#fff) */
|
||||
--bs-gray: var(--color-01-50); /* Original tone: Gray (#6c757d) */
|
||||
--bs-gray-dark: var(--color-01-20); /* Original tone: Dark Gray (#343a40) */
|
||||
{% for i in range(1, 10) %}
|
||||
{# @see https://chatgpt.com/share/67bcd94e-bb44-800f-bf63-06d1ae0f5096 #}
|
||||
{% set gray = i * 100 %}
|
||||
{% set color = 100 - i * 10 %}
|
||||
--bs-gray-{{ gray }}: var(--color-01-{{ "%02d" % color }});
|
||||
{% endfor %}
|
||||
--bs-primary: var(--color-01-65); /* Original tone: Blue (#0d6efd) */
|
||||
--bs-light: var(--color-01-95); /* Original tone: Light (#f8f9fa) */
|
||||
--bs-dark: var(--color-01-10); /* Original tone: Dark (#212529) */
|
||||
--bs-primary-rgb: var(--color-01-rgb-65); /* Original tone: Blue (13, 110, 253) */
|
||||
--bs-secondary-rgb: var(--color-01-rgb-50); /* Original tone: Grayish (#6c757d / 108, 117, 125) */
|
||||
--bs-light-rgb: var(--color-01-rgb-95); /* Original tone: Light (248, 249, 250) */
|
||||
--bs-dark-rgb: var(--color-01-rgb-10); /* Original tone: Dark (33, 37, 41) */
|
||||
--bs-white-rgb: var(--color-01-rgb-99); /* Original tone: White (255, 255, 255) */
|
||||
--bs-black-rgb: var(--color-01-rgb-01); /* Original tone: Black (0, 0, 0) */
|
||||
--bs-body-color-rgb: var(--color-01-rgb-10); /* Original tone: Dark (#212529 / 33, 37, 41) */
|
||||
--bs-body-bg-rgb: var(--color-01-rgb-99); /* Original tone: White (#fff / 255, 255, 255) */
|
||||
--bs-body-color: var(--color-01-10); /* Original tone: Dark (#212529) */
|
||||
--bs-body-bg: var(--color-01-99); /* Original tone: White (#fff) */
|
||||
--bs-border-color: var(--color-01-85); /* Original tone: Gray (#dee2e6) */
|
||||
--bs-link-color: var(--color-01-65); /* Original tone: Blue (#0d6efd) */
|
||||
--bs-link-hover-color: var(--color-01-60); /* Original tone: Darker Blue (#0a58ca) */
|
||||
--bs-code-color: var(--color-01-55); /* Original tone: Pink (#d63384) */
|
||||
--bs-highlight-bg: var(--color-01-93); /* Original tone: Light Yellow (#fff3cd) */
|
||||
--bs-list-group-bg: var(--color-01-40);
|
||||
--bs-emphasis-color: var(--color-01-01); /* Gemappt von #000 */
|
||||
--bs-emphasis-color-rgb: var(--color-01-rgb-01); /* Gemappt von 0, 0, 0 */
|
||||
--bs-secondary-color: rgba(var(--color-01-rgb-10), 0.75); /* Gemappt von rgba(33, 37, 41, 0.75) */
|
||||
--bs-secondary-color-rgb: var(--color-01-rgb-10); /* Gemappt von 33, 37, 41 */
|
||||
--bs-secondary-bg: var(--color-01-90); /* Gemappt von #e9ecef */
|
||||
--bs-secondary-bg-rgb: var(--color-01-rgb-90); /* Gemappt von 233, 236, 239 */
|
||||
--bs-tertiary-color: rgba(var(--color-01-rgb-10), 0.5); /* Gemappt von rgba(33, 37, 41, 0.5) */
|
||||
--bs-tertiary-color-rgb: var(--color-01-rgb-10); /* Gemappt von 33, 37, 41 */
|
||||
--bs-tertiary-bg: var(--color-01-95); /* Gemappt von #f8f9fa */
|
||||
--bs-tertiary-bg-rgb: var(--color-01-rgb-95); /* Gemappt von 248, 249, 250 */
|
||||
--bs-link-color-rgb: var(--color-01-rgb-65); /* Gemappt von 13, 110, 253 */
|
||||
--bs-link-hover-color-rgb: var(--color-01-rgb-60); /* Gemappt von 10, 88, 202 */
|
||||
--bs-highlight-color: var(--color-01-10); /* Gemappt von #212529 */
|
||||
--bs-border-color-translucent: rgba(var(--color-01-rgb-01), 0.175); /* Gemappt von rgba(0, 0, 0, 0.175) */
|
||||
--bs-focus-ring-color: rgba(var(--color-01-rgb-65), 0.25); /* Gemappt von rgba(13, 110, 253, 0.25) */
|
||||
|
||||
--bs-table-color: var(--bs-emphasis-color);
|
||||
--bs-table-bg: var(--color-01-99); /* White (#fff) */
|
||||
--bs-table-border-color: var(--color-01-99); /* White (#fff) */
|
||||
--bs-table-striped-bg: var(--color-01-85); /* Light Gray (entspricht ca. #dee2e6) */
|
||||
--bs-table-hover-color: var(--color-01-01); /* Black (#000) */
|
||||
--bs-table-hover-bg: rgba(var(--bs-emphasis-color-rgb), 0.075);
|
||||
}
|
||||
|
||||
/* Global Defaults (Colors Only) */
|
||||
body, html[native-dark-active] {
|
||||
background-color: var(--color-01-93);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-93), var(--color-01-91), var(--color-01-95), var(--color-01-93));
|
||||
background-attachment: fixed;
|
||||
color: var(--color-01-40);
|
||||
font-family: {{design.font.type}};
|
||||
}
|
||||
|
||||
{# All links (applies to all anchor elements regardless of state) #}
|
||||
a {
|
||||
color: var(--color-01-50);
|
||||
}
|
||||
|
||||
{# Unvisited links (applies only to links that have not been visited) #}
|
||||
a:link {
|
||||
color: var(--color-01-55);
|
||||
}
|
||||
|
||||
{# Visited links (applies only to links that have been visited) #}
|
||||
a:visited {
|
||||
color: var(--color-01-45);
|
||||
}
|
||||
|
||||
{# Hover state (applies when the mouse pointer is over the link) #}
|
||||
a:hover {
|
||||
color: var(--color-01-60);
|
||||
}
|
||||
|
||||
{# Active state (applies during the time the link is being activated, e.g., on click) #}
|
||||
a:active {
|
||||
color: var(--color-01-65);
|
||||
}
|
||||
|
||||
/** Set default buttons transparent **/
|
||||
html[native-dark-active] button, button{
|
||||
background-color: var(--color-01-87);
|
||||
}
|
||||
|
||||
button:hover, .btn:hover {
|
||||
filter: brightness(0.9);
|
||||
}
|
||||
|
||||
/* {# Invalid state: when the input value fails validation criteria. Use danger color for error indication. #} */
|
||||
input:invalid,
|
||||
textarea:invalid,
|
||||
select:invalid {
|
||||
background-color: var(--color-01-01);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-01), var(--color-01-10));
|
||||
/* Use Bootstrap danger color for error messages */
|
||||
color: var(--bs-danger);
|
||||
border-color: var(--color-01-20);
|
||||
}
|
||||
|
||||
/* {# Valid state: when the input value meets all validation criteria. Use success color for confirmation. #} */
|
||||
input:valid,
|
||||
textarea:valid,
|
||||
select:valid {
|
||||
background-color: var(--color-01-80);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-80), var(--color-01-90));
|
||||
/* Use Bootstrap success color for confirmation messages */
|
||||
color: var(--bs-success);
|
||||
border-color: var(--color-01-70);
|
||||
}
|
||||
|
||||
/* {# Required field: applied to elements that must be filled out by the user. Use warning color for emphasis. #} */
|
||||
input:required,
|
||||
textarea:required,
|
||||
select:required {
|
||||
background-color: var(--color-01-50);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-50), var(--color-01-60));
|
||||
/* Use Bootstrap warning color to indicate a required field */
|
||||
color: var(--bs-warning);
|
||||
border-color: var(--color-01-70);
|
||||
}
|
||||
|
||||
/* {# Optional field: applied to elements that are not mandatory. Use info color to denote additional information. #} */
|
||||
input:optional,
|
||||
textarea:optional,
|
||||
select:optional {
|
||||
background-color: var(--color-01-60);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-60), var(--color-01-70));
|
||||
/* Use Bootstrap info color to indicate optional information */
|
||||
color: var(--bs-info);
|
||||
border-color: var(--color-01-70);
|
||||
}
|
||||
|
||||
/* {# Read-only state: when an element is not editable by the user. #} */
|
||||
input:read-only,
|
||||
textarea:read-only,
|
||||
select:read-only {
|
||||
background-color: var(--color-01-80);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-90), var(--color-01-70));
|
||||
color: var(--color-01-20);
|
||||
border-color: var(--color-01-50);
|
||||
}
|
||||
|
||||
/* {# Read-write state: when an element is editable by the user. #} */
|
||||
input:read-write,
|
||||
textarea:read-write,
|
||||
select:read-write {
|
||||
background-color: var(--color-01-70);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-70), var(--color-01-80));
|
||||
color: var(--color-01-40);
|
||||
border-color: var(--color-01-70);
|
||||
}
|
||||
|
||||
/* {# In-range: for inputs with a defined range, when the value is within the allowed limits. #} */
|
||||
input:in-range,
|
||||
textarea:in-range,
|
||||
select:in-range {
|
||||
background-color: var(--color-01-70);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-70), var(--color-01-85));
|
||||
color: var(--color-01-40);
|
||||
border-color: var(--color-01-70);
|
||||
}
|
||||
|
||||
/* {# Out-of-range: for inputs with a defined range, when the value falls outside the allowed limits. #} */
|
||||
input:out-of-range,
|
||||
textarea:out-of-range,
|
||||
select:out-of-range {
|
||||
background-color: var(--color-01-10);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-10), var(--color-01-30));
|
||||
color: var(--color-01-10);
|
||||
border-color: var(--color-01-50);
|
||||
}
|
||||
|
||||
/* {# Placeholder-shown: when the input field is displaying its placeholder text. #} */
|
||||
input:placeholder-shown,
|
||||
textarea:placeholder-shown,
|
||||
select:placeholder-shown {
|
||||
background-color: var(--color-01-82);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-82), var(--color-01-90));
|
||||
color: var(--color-01-40);
|
||||
border-color: var(--color-01-70);
|
||||
}
|
||||
|
||||
/* {# Focus state: when the element is focused by the user. #} */
|
||||
input:focus,
|
||||
textarea:focus,
|
||||
select:focus {
|
||||
background-color: var(--color-01-75);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-75), var(--color-01-85));
|
||||
color: var(--color-01-40);
|
||||
border-color: var(--color-01-50);
|
||||
}
|
||||
|
||||
/* {# Hover state: when the mouse pointer is over the element. #} */
|
||||
input:hover,
|
||||
textarea:hover,
|
||||
select:hover {
|
||||
background-color: var(--color-01-78);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-78), var(--color-01-88));
|
||||
color: var(--color-01-40);
|
||||
border-color: var(--color-01-65);
|
||||
}
|
||||
|
||||
/* {# Active state: when the element is being activated (e.g., clicked). #} */
|
||||
input:active,
|
||||
textarea:active,
|
||||
select:active {
|
||||
background-color: var(--color-01-68);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-68), var(--color-01-78));
|
||||
color: var(--color-01-40);
|
||||
border-color: var(--color-01-60);
|
||||
}
|
||||
|
||||
/* {# Checked state: specifically for radio buttons and checkboxes when selected. #} */
|
||||
input:checked {
|
||||
background-color: var(--color-01-90);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-90), var(--color-01-99));
|
||||
color: var(--color-01-40);
|
||||
border-color: var(--color-01-70);
|
||||
}
|
||||
|
||||
option {
|
||||
background-color: var(--color-01-82);
|
||||
color: var(--color-01-07);
|
||||
}
|
||||
|
||||
/* Tables (Borders and Header Colors) */
|
||||
th, td {
|
||||
border-color: var(--color-01-70);
|
||||
}
|
||||
|
||||
thead {
|
||||
background-color: var(--color-01-80);
|
||||
/* New Gradient based on original background (80 -5, 80, 80 +1, 80 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-75), var(--color-01-80), var(--color-01-81), var(--color-01-85));
|
||||
color: var(--color-01-40);
|
||||
}
|
||||
|
||||
/* Headings (Text Color) */
|
||||
h1, h2, h3, h4, h5, h6, p{
|
||||
color: var(--color-01-10);
|
||||
}
|
8
roles/sys-front-inj-css/templates/head_sub.j2
Normal file
8
roles/sys-front-inj-css/templates/head_sub.j2
Normal file
@@ -0,0 +1,8 @@
|
||||
{% set __css_tpl_dir = [playbook_dir, 'roles', 'sys-front-inj-css', 'templates', 'css'] | path_join %}
|
||||
|
||||
{% for css_file in ['default.css','bootstrap.css'] %}
|
||||
<link rel="stylesheet" href="{{ [ cdn_urls.shared.css, css_file, lookup('local_mtime_qs', [__css_tpl_dir, css_file ~ '.j2'] | path_join)] | url_join }}">
|
||||
{% endfor %}
|
||||
{% if app_style_present | bool %}
|
||||
<link rel="stylesheet" href="{{ [ cdn_urls.role.release.css, 'style.css', lookup('local_mtime_qs', app_style_src)] | url_join }}">
|
||||
{% endif %}
|
8
roles/sys-front-inj-css/vars/main.yml
Normal file
8
roles/sys-front-inj-css/vars/main.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
# Constants
|
||||
CSS_FILES: ['default.css','bootstrap.css']
|
||||
CSS_BASE_COLOR: "{{ design.css.colors.base }}"
|
||||
CSS_COUNT: 7
|
||||
CSS_SHADES: 100
|
||||
|
||||
# Variables
|
||||
css_app_dst: "{{ [cdn_paths_all.role.release.css, 'style.css'] | path_join }}"
|
7
roles/sys-front-inj-desktop/tasks/01_deploy.yml
Normal file
7
roles/sys-front-inj-desktop/tasks/01_deploy.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
- name: Deploy {{ INJ_DESKTOP_JS_FILE_NAME }}
|
||||
template:
|
||||
src: "{{ INJ_DESKTOP_JS_FILE_NAME }}.j2"
|
||||
dest: "{{ INJ_DESKTOP_JS_FILE_DESTINATION }}"
|
||||
owner: "{{ NGINX.USER }}"
|
||||
group: "{{ NGINX.USER }}"
|
||||
mode: '0644'
|
@@ -1,11 +1,11 @@
|
||||
- block:
|
||||
- name: Include dependency 'srv-core'
|
||||
- name: Include dependency 'sys-svc-webserver'
|
||||
include_role:
|
||||
name: srv-core
|
||||
when: run_once_srv_core is not defined
|
||||
name: sys-svc-webserver
|
||||
when: run_once_sys_svc_webserver is not defined
|
||||
- include_tasks: 01_deploy.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_srv_web_inj_desktop is not defined
|
||||
when: run_once_sys_front_inj_desktop is not defined
|
||||
|
||||
# --- Build tiny inline initializer (CSP-hashed) ---
|
||||
- name: "Load iFrame init code for '{{ application_id }}'"
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user