mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-11-18 19:16:27 +00:00
Compare commits
105 Commits
7405883b48
...
feature/xw
| Author | SHA1 | Date | |
|---|---|---|---|
| 2f46b99e4e | |||
| 295ae7e477 | |||
| c67ccc1df6 | |||
| cb483f60d1 | |||
| 2be73502ca | |||
| 57d5269b07 | |||
| 1eefdea050 | |||
| 561160504e | |||
| 9a4bf91276 | |||
| 468b6e734c | |||
| 83cb94b6ff | |||
| 6857295969 | |||
| 8ab398f679 | |||
| 31133ddd90 | |||
| 783b1e152d | |||
| eca567fefd | |||
| 905f461ee8 | |||
| 9f0b259ba9 | |||
| 06e4323faa | |||
| 3d99226f37 | |||
| 73ba09fbe2 | |||
| 01ea9b76ce | |||
| c22acf202f | |||
| 61e138c1a6 | |||
| 07c8e036ec | |||
| 0b36059cd2 | |||
| d76e384ae3 | |||
| e6f4f3a6a4 | |||
| a80b26ed9e | |||
| 45ec7b0ead | |||
| ec396d130c | |||
| 93c2fbedd7 | |||
| d006f0ba5e | |||
| dd43722e02 | |||
| 05d7ddc491 | |||
| e54436821c | |||
| ed73a37795 | |||
| adff9271fd | |||
| 2f0fb2cb69 | |||
| 6abf2629e0 | |||
| 6a8e0f38d8 | |||
| ae618cbf19 | |||
| c835ca8f2c | |||
| 087175a3c7 | |||
| 3da645f3b8 | |||
| a996e2190f | |||
| 7dccffd52d | |||
| 853f2c3e2d | |||
| b2978a3141 | |||
| 0e0b703ccd | |||
| 0b86b2f057 | |||
| 80e048a274 | |||
| 2610aec293 | |||
| 07db162368 | |||
| a526d1adc4 | |||
| ca95079111 | |||
| e410d66cb4 | |||
| ab48cf522f | |||
| 41c12bdc12 | |||
| aae463b602 | |||
| bb50551533 | |||
| 098099b41e | |||
| 0a7d767252 | |||
| d88599f76c | |||
| 4d9890406e | |||
| 59b652958f | |||
| a327adf8db | |||
| 7a38cb90fb | |||
| 9d6cf03f5b | |||
| 9439ac7f76 | |||
| 23353ac878 | |||
| 8beda2d45d | |||
| 5773409bd7 | |||
| b3ea962338 | |||
| b9fbf92461 | |||
| 6824e444b0 | |||
| 5cdcc18a99 | |||
| e7702948b8 | |||
| 09a4c243d7 | |||
| 1d5a50abf2 | |||
| 0d99c7f297 | |||
| 0a17e54d8c | |||
| bf94338845 | |||
| 5d42b78b3d | |||
| 26a1992d84 | |||
| 2439beb95a | |||
| 251f7b227d | |||
| 3fbb9c38a8 | |||
| 29e8b3a590 | |||
| 27b89d8fb6 | |||
| 55f2d15e93 | |||
| aa19a97ed6 | |||
| c06d1c4d17 | |||
| 66f294537d | |||
| a9097a3ec3 | |||
| fc59c64273 | |||
| dbbb3510f3 | |||
| eb3bf543a4 | |||
| 4f5602c791 | |||
| 75d476267e | |||
| c3e5db7f2e | |||
| dfd2d243b7 | |||
| 78ad2ea4b6 | |||
| c362e160fc | |||
| a044028e03 |
12
ansible.cfg
12
ansible.cfg
@@ -1,5 +1,6 @@
|
||||
[defaults]
|
||||
# --- Performance & Behavior ---
|
||||
pipelining = True
|
||||
forks = 25
|
||||
strategy = linear
|
||||
gathering = smart
|
||||
@@ -14,19 +15,14 @@ stdout_callback = yaml
|
||||
callbacks_enabled = profile_tasks,timer
|
||||
|
||||
# --- Plugin paths ---
|
||||
filter_plugins = ./filter_plugins
|
||||
filter_plugins = ./filter_plugins
|
||||
lookup_plugins = ./lookup_plugins
|
||||
module_utils = ./module_utils
|
||||
|
||||
[ssh_connection]
|
||||
# Multiplexing: safer socket path in HOME instead of /tmp
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=20s -o ControlPath=~/.ssh/ansible-%h-%p-%r \
|
||||
-o ServerAliveInterval=15 -o ServerAliveCountMax=3 -o StrictHostKeyChecking=accept-new \
|
||||
-o PreferredAuthentications=publickey,password,keyboard-interactive
|
||||
|
||||
# Pipelining boosts speed; works fine if sudoers does not enforce "requiretty"
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=20s -o ControlPath=~/.ssh/ansible-%h-%p-%r -o ServerAliveInterval=15 -o ServerAliveCountMax=3 -o StrictHostKeyChecking=accept-new -o PreferredAuthentications=publickey,password,keyboard-interactive
|
||||
pipelining = True
|
||||
scp_if_ssh = smart
|
||||
transfer_method = smart
|
||||
|
||||
[persistent_connection]
|
||||
connect_timeout = 30
|
||||
|
||||
@@ -83,6 +83,13 @@ class DefaultsGenerator:
|
||||
print(f"Error during rendering: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Sort applications by application key for stable output
|
||||
apps = result.get("defaults_applications", {})
|
||||
if isinstance(apps, dict) and apps:
|
||||
result["defaults_applications"] = {
|
||||
k: apps[k] for k in sorted(apps.keys())
|
||||
}
|
||||
|
||||
# Write output
|
||||
self.output_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
with self.output_file.open("w", encoding="utf-8") as f:
|
||||
|
||||
@@ -220,6 +220,10 @@ def main():
|
||||
print(f"Error building user entries: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Sort users by key for deterministic output
|
||||
if isinstance(users, dict) and users:
|
||||
users = OrderedDict(sorted(users.items()))
|
||||
|
||||
# Convert OrderedDict into plain dict for YAML
|
||||
default_users = {'default_users': users}
|
||||
plain_data = dictify(default_users)
|
||||
|
||||
@@ -10,9 +10,23 @@ from module_utils.config_utils import get_app_conf
|
||||
from module_utils.get_url import get_url
|
||||
|
||||
|
||||
def _dedup_preserve(seq):
|
||||
"""Return a list with stable order and unique items."""
|
||||
seen = set()
|
||||
out = []
|
||||
for x in seq:
|
||||
if x not in seen:
|
||||
seen.add(x)
|
||||
out.append(x)
|
||||
return out
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
"""
|
||||
Custom filters for Content Security Policy generation and CSP-related utilities.
|
||||
Jinja filters for building a robust, CSP3-aware Content-Security-Policy header.
|
||||
Safari/CSP2 compatibility is ensured by merging the -elem/-attr variants into the base
|
||||
directives (style-src, script-src). We intentionally do NOT mirror back into -elem/-attr
|
||||
to allow true CSP3 granularity on modern browsers.
|
||||
"""
|
||||
|
||||
def filters(self):
|
||||
@@ -61,11 +75,14 @@ class FilterModule(object):
|
||||
"""
|
||||
Returns CSP flag tokens (e.g., "'unsafe-eval'", "'unsafe-inline'") for a directive,
|
||||
merging sane defaults with app config.
|
||||
Default: 'unsafe-inline' is enabled for style-src and style-src-elem.
|
||||
|
||||
Defaults:
|
||||
- For styles we enable 'unsafe-inline' by default (style-src, style-src-elem, style-src-attr),
|
||||
because many apps rely on inline styles / style attributes.
|
||||
- For scripts we do NOT enable 'unsafe-inline' by default.
|
||||
"""
|
||||
# Defaults that apply to all apps
|
||||
default_flags = {}
|
||||
if directive in ('style-src', 'style-src-elem'):
|
||||
if directive in ('style-src', 'style-src-elem', 'style-src-attr'):
|
||||
default_flags = {'unsafe-inline': True}
|
||||
|
||||
configured = get_app_conf(
|
||||
@@ -76,7 +93,6 @@ class FilterModule(object):
|
||||
{}
|
||||
)
|
||||
|
||||
# Merge defaults with configured flags (configured overrides defaults)
|
||||
merged = {**default_flags, **configured}
|
||||
|
||||
tokens = []
|
||||
@@ -131,77 +147,148 @@ class FilterModule(object):
|
||||
):
|
||||
"""
|
||||
Builds the Content-Security-Policy header value dynamically based on application settings.
|
||||
- Flags (e.g., 'unsafe-eval', 'unsafe-inline') are read from server.csp.flags.<directive>,
|
||||
with sane defaults applied in get_csp_flags (always 'unsafe-inline' for style-src and style-src-elem).
|
||||
- Inline hashes are read from server.csp.hashes.<directive>.
|
||||
- Whitelists are read from server.csp.whitelist.<directive>.
|
||||
- Inline hashes are added only if the final tokens do NOT include 'unsafe-inline'.
|
||||
|
||||
Key points:
|
||||
- CSP3-aware: supports base/elem/attr for styles and scripts.
|
||||
- Safari/CSP2 fallback: base directives (style-src, script-src) always include
|
||||
the union of their -elem/-attr variants.
|
||||
- We do NOT mirror back into -elem/-attr; finer CSP3 rules remain effective
|
||||
on modern browsers if you choose to use them.
|
||||
- If the app explicitly disables a token on the *base* (e.g. style-src.unsafe-inline: false),
|
||||
that token is removed from the merged base even if present in elem/attr.
|
||||
- Inline hashes are added ONLY if that directive does NOT include 'unsafe-inline'.
|
||||
- Whitelists/flags/hashes read from:
|
||||
server.csp.whitelist.<directive>
|
||||
server.csp.flags.<directive>
|
||||
server.csp.hashes.<directive>
|
||||
- “Smart defaults”:
|
||||
* internal CDN for style/script elem and connect
|
||||
* Matomo endpoints (if feature enabled) for script-elem/connect
|
||||
* Simpleicons (if feature enabled) for connect
|
||||
* reCAPTCHA (if feature enabled) for script-elem/frame-src
|
||||
* frame-ancestors extended for desktop/logout/keycloak if enabled
|
||||
"""
|
||||
try:
|
||||
directives = [
|
||||
'default-src', # Fallback source list for content types not explicitly listed
|
||||
'connect-src', # Allowed URLs for XHR, WebSockets, EventSource, fetch()
|
||||
'frame-ancestors', # Who may embed this page
|
||||
'frame-src', # Sources for nested browsing contexts (e.g., <iframe>)
|
||||
'script-src', # Sources for script execution
|
||||
'script-src-elem', # Sources for <script> elements
|
||||
'style-src', # Sources for inline styles and <style>/<link> elements
|
||||
'style-src-elem', # Sources for <style> and <link rel="stylesheet">
|
||||
'font-src', # Sources for fonts
|
||||
'worker-src', # Sources for workers
|
||||
'manifest-src', # Sources for web app manifests
|
||||
'media-src', # Sources for audio and video
|
||||
'default-src',
|
||||
'connect-src',
|
||||
'frame-ancestors',
|
||||
'frame-src',
|
||||
'script-src',
|
||||
'script-src-elem',
|
||||
'script-src-attr',
|
||||
'style-src',
|
||||
'style-src-elem',
|
||||
'style-src-attr',
|
||||
'font-src',
|
||||
'worker-src',
|
||||
'manifest-src',
|
||||
'media-src',
|
||||
]
|
||||
|
||||
parts = []
|
||||
tokens_by_dir = {}
|
||||
explicit_flags_by_dir = {}
|
||||
|
||||
for directive in directives:
|
||||
# Collect explicit flags (to later respect explicit "False" on base during merge)
|
||||
explicit_flags = get_app_conf(
|
||||
applications,
|
||||
application_id,
|
||||
'server.csp.flags.' + directive,
|
||||
False,
|
||||
{}
|
||||
)
|
||||
explicit_flags_by_dir[directive] = explicit_flags
|
||||
|
||||
tokens = ["'self'"]
|
||||
|
||||
# 1) Load flags (includes defaults from get_csp_flags)
|
||||
# 1) Flags (with sane defaults)
|
||||
flags = self.get_csp_flags(applications, application_id, directive)
|
||||
tokens += flags
|
||||
|
||||
# 2) Allow fetching from internal CDN by default for selected directives
|
||||
if directive in ['script-src-elem', 'connect-src', 'style-src-elem']:
|
||||
# 2) Internal CDN defaults for selected directives
|
||||
if directive in ('script-src-elem', 'connect-src', 'style-src-elem', 'style-src'):
|
||||
tokens.append(get_url(domains, 'web-svc-cdn', web_protocol))
|
||||
|
||||
# 3) Matomo integration if feature is enabled
|
||||
if directive in ['script-src-elem', 'connect-src']:
|
||||
# 3) Matomo (if enabled)
|
||||
if directive in ('script-src-elem', 'connect-src'):
|
||||
if self.is_feature_enabled(applications, matomo_feature_name, application_id):
|
||||
tokens.append(get_url(domains, 'web-app-matomo', web_protocol))
|
||||
|
||||
# 4) ReCaptcha integration (scripts + frames) if feature is enabled
|
||||
# 4) Simpleicons (if enabled) – typically used via connect-src (fetch)
|
||||
if directive == 'connect-src':
|
||||
if self.is_feature_enabled(applications, 'simpleicons', application_id):
|
||||
tokens.append(get_url(domains, 'web-svc-simpleicons', web_protocol))
|
||||
|
||||
# 5) reCAPTCHA (if enabled) – scripts + frames
|
||||
if self.is_feature_enabled(applications, 'recaptcha', application_id):
|
||||
if directive in ['script-src-elem', 'frame-src']:
|
||||
if directive in ('script-src-elem', 'frame-src'):
|
||||
tokens.append('https://www.gstatic.com')
|
||||
tokens.append('https://www.google.com')
|
||||
|
||||
# 5) Frame ancestors handling (desktop + logout support)
|
||||
# 6) Frame ancestors (desktop + logout)
|
||||
if directive == 'frame-ancestors':
|
||||
if self.is_feature_enabled(applications, 'desktop', application_id):
|
||||
# Allow being embedded by the desktop app domain (and potentially its parent)
|
||||
# Allow being embedded by the desktop app domain's site
|
||||
domain = domains.get('web-app-desktop')[0]
|
||||
sld_tld = ".".join(domain.split(".")[-2:]) # e.g., example.com
|
||||
tokens.append(f"{sld_tld}")
|
||||
if self.is_feature_enabled(applications, 'logout', application_id):
|
||||
# Allow embedding via logout proxy and Keycloak app
|
||||
tokens.append(get_url(domains, 'web-svc-logout', web_protocol))
|
||||
tokens.append(get_url(domains, 'web-app-keycloak', web_protocol))
|
||||
|
||||
# 6) Custom whitelist entries
|
||||
# 7) Custom whitelist
|
||||
tokens += self.get_csp_whitelist(applications, application_id, directive)
|
||||
|
||||
# 7) Add inline content hashes ONLY if final tokens do NOT include 'unsafe-inline'
|
||||
# (Check tokens, not flags, to include defaults and later modifications.)
|
||||
# 8) Inline hashes (only if this directive does NOT include 'unsafe-inline')
|
||||
if "'unsafe-inline'" not in tokens:
|
||||
for snippet in self.get_csp_inline_content(applications, application_id, directive):
|
||||
tokens.append(self.get_csp_hash(snippet))
|
||||
|
||||
# Append directive
|
||||
parts.append(f"{directive} {' '.join(tokens)};")
|
||||
tokens_by_dir[directive] = _dedup_preserve(tokens)
|
||||
|
||||
# 8) Static img-src directive (kept permissive for data/blob and any host)
|
||||
# ----------------------------------------------------------
|
||||
# CSP3 families → ensure CSP2 fallback (Safari-safe)
|
||||
# Merge style/script families so base contains union of elem/attr.
|
||||
# Respect explicit disables on the base (e.g. unsafe-inline=False).
|
||||
# Do NOT mirror back into elem/attr (keep granularity).
|
||||
# ----------------------------------------------------------
|
||||
def _strip_if_disabled(unioned_tokens, explicit_flags, name):
|
||||
"""
|
||||
Remove a token (e.g. 'unsafe-inline') from the unioned token list
|
||||
if it is explicitly disabled in the base directive flags.
|
||||
"""
|
||||
if isinstance(explicit_flags, dict) and explicit_flags.get(name) is False:
|
||||
tok = f"'{name}'"
|
||||
return [t for t in unioned_tokens if t != tok]
|
||||
return unioned_tokens
|
||||
|
||||
def merge_family(base_key, elem_key, attr_key):
|
||||
base = tokens_by_dir.get(base_key, [])
|
||||
elem = tokens_by_dir.get(elem_key, [])
|
||||
attr = tokens_by_dir.get(attr_key, [])
|
||||
union = _dedup_preserve(base + elem + attr)
|
||||
|
||||
# Respect explicit disables on the base
|
||||
explicit_base = explicit_flags_by_dir.get(base_key, {})
|
||||
# The most relevant flags for script/style:
|
||||
for flag_name in ('unsafe-inline', 'unsafe-eval'):
|
||||
union = _strip_if_disabled(union, explicit_base, flag_name)
|
||||
|
||||
tokens_by_dir[base_key] = union # write back only to base
|
||||
|
||||
merge_family('style-src', 'style-src-elem', 'style-src-attr')
|
||||
merge_family('script-src', 'script-src-elem', 'script-src-attr')
|
||||
|
||||
# ----------------------------------------------------------
|
||||
# Assemble header
|
||||
# ----------------------------------------------------------
|
||||
parts = []
|
||||
for directive in directives:
|
||||
if directive in tokens_by_dir:
|
||||
parts.append(f"{directive} {' '.join(tokens_by_dir[directive])};")
|
||||
|
||||
# Keep permissive img-src for data/blob + any host (as before)
|
||||
parts.append("img-src * data: blob:;")
|
||||
|
||||
return ' '.join(parts)
|
||||
|
||||
@@ -76,8 +76,9 @@ _applications_nextcloud_oidc_flavor: >-
|
||||
False,
|
||||
'oidc_login'
|
||||
if applications
|
||||
| get_app_conf('web-app-nextcloud','features.ldap',False, True)
|
||||
else 'sociallogin'
|
||||
| get_app_conf('web-app-nextcloud','features.ldap',False, True, True)
|
||||
else 'sociallogin',
|
||||
True
|
||||
)
|
||||
}}
|
||||
|
||||
|
||||
@@ -5,5 +5,6 @@ MODE_DUMMY: false # Executes dummy/test routines instead
|
||||
MODE_UPDATE: true # Executes updates
|
||||
MODE_DEBUG: false # This enables debugging in ansible and in the apps, You SHOULD NOT enable this on production servers
|
||||
MODE_RESET: false # Cleans up all Infinito.Nexus files. It's necessary to run to whole playbook and not particial roles when using this function.
|
||||
MODE_CLEANUP: "{{ MODE_DEBUG | bool }}" # Cleanup unused files and configurations
|
||||
MODE_CLEANUP: true # Cleanup unused files and configurations
|
||||
MODE_ASSERT: "{{ MODE_DEBUG | bool }}" # Executes validation tasks during the run.
|
||||
MODE_BACKUP: true # Executes the Backup before the deployment
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
# Service Timers
|
||||
|
||||
## Meta
|
||||
@@ -6,12 +5,12 @@ SYS_TIMER_ALL_ENABLED: "{{ MODE_DEBUG }}" # Runtime Var
|
||||
|
||||
## Server Tact Variables
|
||||
|
||||
HOURS_SERVER_AWAKE: "0..23" # Ours in which the server is "awake" (100% working). Rest of the time is reserved for maintanance
|
||||
HOURS_SERVER_AWAKE: "6..23" # Ours in which the server is "awake" (100% working). Rest of the time is reserved for maintanance
|
||||
RANDOMIZED_DELAY_SEC: "5min" # Random delay for systemd timers to avoid peak loads.
|
||||
|
||||
## Timeouts for all services
|
||||
SYS_TIMEOUT_DOCKER_RPR_HARD: "10min"
|
||||
SYS_TIMEOUT_DOCKER_RPR_SOFT: "{{ SYS_TIMEOUT_DOCKER_RPR_HARD }}"
|
||||
SYS_TIMEOUT_DOCKER_RPR_SOFT: "{{ SYS_TIMEOUT_DOCKER_RPR_HARD }}"
|
||||
SYS_TIMEOUT_CLEANUP_SERVICES: "15min"
|
||||
SYS_TIMEOUT_DOCKER_UPDATE: "20min"
|
||||
SYS_TIMEOUT_STORAGE_OPTIMIZER: "{{ SYS_TIMEOUT_DOCKER_UPDATE }}"
|
||||
@@ -24,29 +23,29 @@ SYS_SCHEDULE_HEALTH_BTRFS: "*-*-* 00:00:00"
|
||||
SYS_SCHEDULE_HEALTH_JOURNALCTL: "*-*-* 00:00:00" # Check once per day the journalctl for errors
|
||||
SYS_SCHEDULE_HEALTH_DISC_SPACE: "*-*-* 06,12,18,00:00:00" # Check four times per day if there is sufficient disc space
|
||||
SYS_SCHEDULE_HEALTH_DOCKER_CONTAINER: "*-*-* {{ HOURS_SERVER_AWAKE }}:00:00" # Check once per hour if the docker containers are healthy
|
||||
SYS_SCHEDULE_HEALTH_DOCKER_VOLUMES: "*-*-* {{ HOURS_SERVER_AWAKE }}:15:00" # Check once per hour if the docker volumes are healthy
|
||||
SYS_SCHEDULE_HEALTH_CSP_CRAWLER: "*-*-* {{ HOURS_SERVER_AWAKE }}:30:00" # Check once per hour if all CSP are fullfilled available
|
||||
SYS_SCHEDULE_HEALTH_NGINX: "*-*-* {{ HOURS_SERVER_AWAKE }}:45:00" # Check once per hour if all webservices are available
|
||||
SYS_SCHEDULE_HEALTH_DOCKER_VOLUMES: "*-*-* {{ HOURS_SERVER_AWAKE }}:00:00" # Check once per hour if the docker volumes are healthy
|
||||
SYS_SCHEDULE_HEALTH_CSP_CRAWLER: "*-*-* {{ HOURS_SERVER_AWAKE }}:00:00" # Check once per hour if all CSP are fullfilled available
|
||||
SYS_SCHEDULE_HEALTH_NGINX: "*-*-* {{ HOURS_SERVER_AWAKE }}:00:00" # Check once per hour if all webservices are available
|
||||
SYS_SCHEDULE_HEALTH_MSMTP: "*-*-* 00:00:00" # Check once per day SMTP Server
|
||||
|
||||
### Schedule for cleanup tasks
|
||||
SYS_SCHEDULE_CLEANUP_BACKUPS: "*-*-* 00,06,12,18:30:00" # Cleanup backups every 6 hours, MUST be called before disc space cleanup
|
||||
SYS_SCHEDULE_CLEANUP_DISC_SPACE: "*-*-* 07,13,19,01:30:00" # Cleanup disc space every 6 hours
|
||||
SYS_SCHEDULE_CLEANUP_CERTS: "*-*-* 12,00:45:00" # Deletes and revokes unused certs
|
||||
SYS_SCHEDULE_CLEANUP_FAILED_BACKUPS: "*-*-* 12:00:00" # Clean up failed docker backups every noon
|
||||
SYS_SCHEDULE_CLEANUP_CERTS: "*-*-* 20:00" # Deletes and revokes unused certs once per day
|
||||
SYS_SCHEDULE_CLEANUP_FAILED_BACKUPS: "*-*-* 21:00" # Clean up failed docker backups once per day
|
||||
SYS_SCHEDULE_CLEANUP_BACKUPS: "*-*-* 22:00" # Cleanup backups once per day, MUST be called before disc space cleanup
|
||||
SYS_SCHEDULE_CLEANUP_DISC_SPACE: "*-*-* 23:00" # Cleanup disc space once per day
|
||||
|
||||
### Schedule for repair services
|
||||
SYS_SCHEDULE_REPAIR_BTRFS_AUTO_BALANCER: "Sat *-*-01..07 00:00:00" # Execute btrfs auto balancer every first Saturday of a month
|
||||
SYS_SCHEDULE_REPAIR_DOCKER_HARD: "Sun *-*-* 08:00:00" # Restart docker instances every Sunday at 8:00 AM
|
||||
SYS_SCHEDULE_REPAIR_DOCKER_HARD: "Sun *-*-* 00:00:00" # Restart docker instances every Sunday
|
||||
|
||||
### Schedule for backup tasks
|
||||
SYS_SCHEDULE_BACKUP_DOCKER_TO_LOCAL: "*-*-* 03:30:00"
|
||||
SYS_SCHEDULE_BACKUP_REMOTE_TO_LOCAL: "*-*-* 21:30:00"
|
||||
SYS_SCHEDULE_BACKUP_REMOTE_TO_LOCAL: "*-*-* 00:30:00" # Pull Backup of the previous day
|
||||
SYS_SCHEDULE_BACKUP_DOCKER_TO_LOCAL: "*-*-* 01:00:00" # Backup the current day
|
||||
|
||||
### Schedule for Maintenance Tasks
|
||||
SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_RENEW: "*-*-* 12,00:30:00" # Renew Mailu certificates twice per day
|
||||
SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_DEPLOY: "*-*-* 13,01:30:00" # Deploy letsencrypt certificates twice per day to docker containers
|
||||
SYS_SCHEDULE_MAINTANANCE_NEXTCLOUD: "22" # Do nextcloud maintanace between 22:00 and 02:00
|
||||
SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_RENEW: "*-*-* 10,22:00:00" # Renew Mailu certificates twice per day
|
||||
SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_DEPLOY: "*-*-* 11,23:00:00" # Deploy letsencrypt certificates twice per day to docker containers
|
||||
SYS_SCHEDULE_MAINTANANCE_NEXTCLOUD: "21" # Do nextcloud maintanace between 21:00 and 01:00
|
||||
|
||||
### Animation
|
||||
SYS_SCHEDULE_ANIMATION_KEYBOARD_COLOR: "*-*-* *:*:00" # Change the keyboard color every minute
|
||||
@@ -112,6 +112,8 @@ defaults_networks:
|
||||
subnet: 192.168.104.32/28
|
||||
web-svc-coturn:
|
||||
subnet: 192.168.104.48/28
|
||||
web-app-mini-qr:
|
||||
subnet: 192.168.104.64/28
|
||||
|
||||
# /24 Networks / 254 Usable Clients
|
||||
web-app-bigbluebutton:
|
||||
|
||||
@@ -80,6 +80,7 @@ ports:
|
||||
web-app-flowise: 8056
|
||||
web-app-minio_api: 8057
|
||||
web-app-minio_console: 8058
|
||||
web-app-mini-qr: 8059
|
||||
web-app-bigbluebutton: 48087 # This port is predefined by bbb. @todo Try to change this to a 8XXX port
|
||||
public:
|
||||
# The following ports should be changed to 22 on the subdomain via stream mapping
|
||||
|
||||
@@ -6,6 +6,7 @@ __metaclass__ = type
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
from datetime import datetime
|
||||
|
||||
class CertUtils:
|
||||
_domain_cert_mapping = None
|
||||
@@ -22,6 +23,30 @@ class CertUtils:
|
||||
except subprocess.CalledProcessError:
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def run_openssl_dates(cert_path):
|
||||
"""
|
||||
Returns (not_before_ts, not_after_ts) as POSIX timestamps or (None, None) on failure.
|
||||
"""
|
||||
try:
|
||||
output = subprocess.check_output(
|
||||
['openssl', 'x509', '-in', cert_path, '-noout', '-startdate', '-enddate'],
|
||||
universal_newlines=True
|
||||
)
|
||||
nb, na = None, None
|
||||
for line in output.splitlines():
|
||||
line = line.strip()
|
||||
if line.startswith('notBefore='):
|
||||
nb = line.split('=', 1)[1].strip()
|
||||
elif line.startswith('notAfter='):
|
||||
na = line.split('=', 1)[1].strip()
|
||||
def _parse(openssl_dt):
|
||||
# OpenSSL format example: "Oct 10 12:34:56 2025 GMT"
|
||||
return int(datetime.strptime(openssl_dt, "%b %d %H:%M:%S %Y %Z").timestamp())
|
||||
return (_parse(nb) if nb else None, _parse(na) if na else None)
|
||||
except Exception:
|
||||
return (None, None)
|
||||
|
||||
@staticmethod
|
||||
def extract_sans(cert_text):
|
||||
dns_entries = []
|
||||
@@ -59,7 +84,6 @@ class CertUtils:
|
||||
else:
|
||||
return domain == san
|
||||
|
||||
|
||||
@classmethod
|
||||
def build_snapshot(cls, cert_base_path):
|
||||
snapshot = []
|
||||
@@ -82,6 +106,17 @@ class CertUtils:
|
||||
|
||||
@classmethod
|
||||
def refresh_cert_mapping(cls, cert_base_path, debug=False):
|
||||
"""
|
||||
Build mapping: SAN -> list of entries
|
||||
entry = {
|
||||
'folder': str,
|
||||
'cert_path': str,
|
||||
'mtime': float,
|
||||
'not_before': int|None,
|
||||
'not_after': int|None,
|
||||
'is_wildcard': bool
|
||||
}
|
||||
"""
|
||||
cert_files = cls.list_cert_files(cert_base_path)
|
||||
mapping = {}
|
||||
for cert_path in cert_files:
|
||||
@@ -90,46 +125,82 @@ class CertUtils:
|
||||
continue
|
||||
sans = cls.extract_sans(cert_text)
|
||||
folder = os.path.basename(os.path.dirname(cert_path))
|
||||
try:
|
||||
mtime = os.stat(cert_path).st_mtime
|
||||
except FileNotFoundError:
|
||||
mtime = 0.0
|
||||
nb, na = cls.run_openssl_dates(cert_path)
|
||||
|
||||
for san in sans:
|
||||
if san not in mapping:
|
||||
mapping[san] = folder
|
||||
entry = {
|
||||
'folder': folder,
|
||||
'cert_path': cert_path,
|
||||
'mtime': mtime,
|
||||
'not_before': nb,
|
||||
'not_after': na,
|
||||
'is_wildcard': san.startswith('*.'),
|
||||
}
|
||||
mapping.setdefault(san, []).append(entry)
|
||||
|
||||
cls._domain_cert_mapping = mapping
|
||||
if debug:
|
||||
print(f"[DEBUG] Refreshed domain-to-cert mapping: {mapping}")
|
||||
print(f"[DEBUG] Refreshed domain-to-cert mapping (counts): "
|
||||
f"{ {k: len(v) for k, v in mapping.items()} }")
|
||||
|
||||
@classmethod
|
||||
def ensure_cert_mapping(cls, cert_base_path, debug=False):
|
||||
if cls._domain_cert_mapping is None or cls.snapshot_changed(cert_base_path):
|
||||
cls.refresh_cert_mapping(cert_base_path, debug)
|
||||
|
||||
@staticmethod
|
||||
def _score_entry(entry):
|
||||
"""
|
||||
Return tuple used for sorting newest-first:
|
||||
(not_before or -inf, mtime)
|
||||
"""
|
||||
nb = entry.get('not_before')
|
||||
mtime = entry.get('mtime', 0.0)
|
||||
return (nb if nb is not None else -1, mtime)
|
||||
|
||||
@classmethod
|
||||
def find_cert_for_domain(cls, domain, cert_base_path, debug=False):
|
||||
cls.ensure_cert_mapping(cert_base_path, debug)
|
||||
|
||||
exact_match = None
|
||||
wildcard_match = None
|
||||
candidates_exact = []
|
||||
candidates_wild = []
|
||||
|
||||
for san, folder in cls._domain_cert_mapping.items():
|
||||
for san, entries in cls._domain_cert_mapping.items():
|
||||
if san == domain:
|
||||
exact_match = folder
|
||||
break
|
||||
if san.startswith('*.'):
|
||||
candidates_exact.extend(entries)
|
||||
elif san.startswith('*.'):
|
||||
base = san[2:]
|
||||
if domain.count('.') == base.count('.') + 1 and domain.endswith('.' + base):
|
||||
wildcard_match = folder
|
||||
candidates_wild.extend(entries)
|
||||
|
||||
if exact_match:
|
||||
if debug:
|
||||
print(f"[DEBUG] Exact match for {domain} found in {exact_match}")
|
||||
return exact_match
|
||||
def _pick_newest(entries):
|
||||
if not entries:
|
||||
return None
|
||||
# newest by (not_before, mtime)
|
||||
best = max(entries, key=cls._score_entry)
|
||||
return best
|
||||
|
||||
if wildcard_match:
|
||||
if debug:
|
||||
print(f"[DEBUG] Wildcard match for {domain} found in {wildcard_match}")
|
||||
return wildcard_match
|
||||
best_exact = _pick_newest(candidates_exact)
|
||||
best_wild = _pick_newest(candidates_wild)
|
||||
|
||||
if best_exact and debug:
|
||||
print(f"[DEBUG] Best exact match for {domain}: {best_exact['folder']} "
|
||||
f"(not_before={best_exact['not_before']}, mtime={best_exact['mtime']})")
|
||||
if best_wild and debug:
|
||||
print(f"[DEBUG] Best wildcard match for {domain}: {best_wild['folder']} "
|
||||
f"(not_before={best_wild['not_before']}, mtime={best_wild['mtime']})")
|
||||
|
||||
# Prefer exact if it exists; otherwise wildcard
|
||||
chosen = best_exact or best_wild
|
||||
|
||||
if chosen:
|
||||
return chosen['folder']
|
||||
|
||||
if debug:
|
||||
print(f"[DEBUG] No certificate folder found for {domain}")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ class ConfigEntryNotSetError(AppConfigKeyError):
|
||||
pass
|
||||
|
||||
|
||||
def get_app_conf(applications, application_id, config_path, strict=True, default=None):
|
||||
def get_app_conf(applications, application_id, config_path, strict=True, default=None, skip_missing_app=False):
|
||||
# Path to the schema file for this application
|
||||
schema_path = os.path.join('roles', application_id, 'schema', 'main.yml')
|
||||
|
||||
@@ -133,6 +133,9 @@ def get_app_conf(applications, application_id, config_path, strict=True, default
|
||||
try:
|
||||
obj = applications[application_id]
|
||||
except KeyError:
|
||||
if skip_missing_app:
|
||||
# Simply return default instead of failing
|
||||
return default if default is not None else False
|
||||
raise AppConfigKeyError(
|
||||
f"Application ID '{application_id}' not found in applications dict.\n"
|
||||
f"path_trace: {path_trace}\n"
|
||||
|
||||
@@ -3,4 +3,7 @@ collections:
|
||||
- name: community.general
|
||||
- name: hetzner.hcloud
|
||||
yay:
|
||||
- python-simpleaudio
|
||||
- python-simpleaudio
|
||||
- python-numpy
|
||||
pacman:
|
||||
- ansible
|
||||
@@ -153,6 +153,11 @@ roles:
|
||||
description: "Core AI building blocks—model serving, OpenAI-compatible gateways, vector databases, orchestration, and chat UIs."
|
||||
icon: "fas fa-brain"
|
||||
invokable: true
|
||||
bkp:
|
||||
title: "Backup Services"
|
||||
description: "Service-level backup and recovery components—handling automated data snapshots, remote backups, synchronization services, and backup orchestration across databases, files, and containers."
|
||||
icon: "fas fa-database"
|
||||
invokable: true
|
||||
user:
|
||||
title: "Users & Access"
|
||||
description: "User accounts & access control"
|
||||
|
||||
@@ -127,7 +127,7 @@
|
||||
#de_BE@euro ISO-8859-15
|
||||
#de_CH.UTF-8 UTF-8
|
||||
#de_CH ISO-8859-1
|
||||
de_DE.UTF-8 UTF-8
|
||||
#de_DE.UTF-8 UTF-8
|
||||
#de_DE ISO-8859-1
|
||||
#de_DE@euro ISO-8859-15
|
||||
#de_IT.UTF-8 UTF-8
|
||||
|
||||
4
roles/dev-yay/defaults/main.yml
Normal file
4
roles/dev-yay/defaults/main.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
AUR_HELPER: yay
|
||||
AUR_BUILDER_USER: aur_builder
|
||||
AUR_BUILDER_GROUP: wheel
|
||||
AUR_BUILDER_SUDOERS_PATH: /etc/sudoers.d/11-install-aur_builder
|
||||
@@ -6,42 +6,53 @@
|
||||
- dev-git
|
||||
- dev-base-devel
|
||||
|
||||
- name: install yay
|
||||
- name: Install yay build prerequisites
|
||||
community.general.pacman:
|
||||
name:
|
||||
- base-devel
|
||||
- patch
|
||||
state: present
|
||||
|
||||
- name: Create the `aur_builder` user
|
||||
- name: Create the AUR builder user
|
||||
become: true
|
||||
ansible.builtin.user:
|
||||
name: aur_builder
|
||||
name: "{{ AUR_BUILDER_USER }}"
|
||||
create_home: yes
|
||||
group: wheel
|
||||
group: "{{ AUR_BUILDER_GROUP }}"
|
||||
|
||||
- name: Allow the `aur_builder` user to run `sudo pacman` without a password
|
||||
- name: Allow AUR builder to run pacman without password
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/sudoers.d/11-install-aur_builder
|
||||
line: 'aur_builder ALL=(ALL) NOPASSWD: /usr/bin/pacman'
|
||||
path: "{{ AUR_BUILDER_SUDOERS_PATH }}"
|
||||
line: '{{ AUR_BUILDER_USER }} ALL=(ALL) NOPASSWD: /usr/bin/pacman'
|
||||
create: yes
|
||||
validate: 'visudo -cf %s'
|
||||
|
||||
- name: Clone yay from AUR
|
||||
become: true
|
||||
become_user: aur_builder
|
||||
become_user: "{{ AUR_BUILDER_USER }}"
|
||||
git:
|
||||
repo: https://aur.archlinux.org/yay.git
|
||||
dest: /home/aur_builder/yay
|
||||
dest: "/home/{{ AUR_BUILDER_USER }}/yay"
|
||||
clone: yes
|
||||
update: yes
|
||||
|
||||
- name: Build and install yay
|
||||
become: true
|
||||
become_user: aur_builder
|
||||
become_user: "{{ AUR_BUILDER_USER }}"
|
||||
shell: |
|
||||
cd /home/aur_builder/yay
|
||||
cd /home/{{ AUR_BUILDER_USER }}/yay
|
||||
makepkg -si --noconfirm
|
||||
args:
|
||||
creates: /usr/bin/yay
|
||||
|
||||
- name: upgrade the system using yay, only act on AUR packages.
|
||||
become: true
|
||||
become_user: "{{ AUR_BUILDER_USER }}"
|
||||
kewlfft.aur.aur:
|
||||
upgrade: yes
|
||||
use: "{{ AUR_HELPER }}"
|
||||
aur_only: yes
|
||||
when: MODE_UPDATE | bool
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- set_fact:
|
||||
run_once_dev_yay: true
|
||||
when: run_once_dev_yay is not defined
|
||||
|
||||
0
roles/svc-bkp-rmt-2-loc/__init__.py
Normal file
0
roles/svc-bkp-rmt-2-loc/__init__.py
Normal file
0
roles/svc-bkp-rmt-2-loc/files/__init__.py
Normal file
0
roles/svc-bkp-rmt-2-loc/files/__init__.py
Normal file
132
roles/svc-bkp-rmt-2-loc/files/pull-specific-host.py
Normal file
132
roles/svc-bkp-rmt-2-loc/files/pull-specific-host.py
Normal file
@@ -0,0 +1,132 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
import sys
|
||||
|
||||
|
||||
def run_command(command, capture_output=True, check=False, shell=True):
|
||||
"""Run a shell command and return its output as string."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
command,
|
||||
capture_output=capture_output,
|
||||
shell=shell,
|
||||
text=True,
|
||||
check=check
|
||||
)
|
||||
return result.stdout.strip()
|
||||
except subprocess.CalledProcessError as e:
|
||||
if capture_output:
|
||||
print(e.stdout)
|
||||
print(e.stderr)
|
||||
raise
|
||||
|
||||
|
||||
def pull_backups(hostname: str):
|
||||
print(f"pulling backups from: {hostname}")
|
||||
errors = 0
|
||||
|
||||
print("loading meta data...")
|
||||
remote_host = f"backup@{hostname}"
|
||||
print(f"host address: {remote_host}")
|
||||
|
||||
remote_machine_id = run_command(f'ssh "{remote_host}" sha256sum /etc/machine-id')[:64]
|
||||
print(f"remote machine id: {remote_machine_id}")
|
||||
|
||||
general_backup_machine_dir = f"/Backups/{remote_machine_id}/"
|
||||
print(f"backup dir: {general_backup_machine_dir}")
|
||||
|
||||
try:
|
||||
remote_backup_types = run_command(
|
||||
f'ssh "{remote_host}" "find {general_backup_machine_dir} -maxdepth 1 -type d -execdir basename {{}} ;"'
|
||||
).splitlines()
|
||||
print(f"backup types: {' '.join(remote_backup_types)}")
|
||||
except subprocess.CalledProcessError:
|
||||
sys.exit(1)
|
||||
|
||||
for backup_type in remote_backup_types:
|
||||
if backup_type == remote_machine_id:
|
||||
continue
|
||||
|
||||
print(f"backup type: {backup_type}")
|
||||
|
||||
general_backup_type_dir = f"{general_backup_machine_dir}{backup_type}/"
|
||||
general_versions_dir = general_backup_type_dir
|
||||
|
||||
# local previous version
|
||||
try:
|
||||
local_previous_version_dir = run_command(f"ls -d {general_versions_dir}* | tail -1")
|
||||
except subprocess.CalledProcessError:
|
||||
local_previous_version_dir = ""
|
||||
print(f"last local backup: {local_previous_version_dir}")
|
||||
|
||||
# remote versions
|
||||
remote_backup_versions = run_command(
|
||||
f'ssh "{remote_host}" "ls -d /Backups/{remote_machine_id}/backup-docker-to-local/*"'
|
||||
).splitlines()
|
||||
print(f"remote backup versions: {' '.join(remote_backup_versions)}")
|
||||
|
||||
remote_last_backup_dir = remote_backup_versions[-1] if remote_backup_versions else ""
|
||||
print(f"last remote backup: {remote_last_backup_dir}")
|
||||
|
||||
remote_source_path = f"{remote_host}:{remote_last_backup_dir}/"
|
||||
print(f"source path: {remote_source_path}")
|
||||
|
||||
local_backup_destination_path = remote_last_backup_dir
|
||||
print(f"backup destination: {local_backup_destination_path}")
|
||||
|
||||
print("creating local backup destination folder...")
|
||||
os.makedirs(local_backup_destination_path, exist_ok=True)
|
||||
|
||||
rsync_command = (
|
||||
f'rsync -abP --delete --delete-excluded --rsync-path="sudo rsync" '
|
||||
f'--link-dest="{local_previous_version_dir}" "{remote_source_path}" "{local_backup_destination_path}"'
|
||||
)
|
||||
print("starting backup...")
|
||||
print(f"executing: {rsync_command}")
|
||||
|
||||
retry_count = 0
|
||||
max_retries = 12
|
||||
retry_delay = 300 # 5 minutes
|
||||
last_retry_start = 0
|
||||
max_retry_duration = 43200 # 12 hours
|
||||
|
||||
rsync_exit_code = 1
|
||||
while retry_count < max_retries:
|
||||
print(f"Retry attempt: {retry_count + 1}")
|
||||
if retry_count > 0:
|
||||
current_time = int(time.time())
|
||||
last_retry_duration = current_time - last_retry_start
|
||||
if last_retry_duration >= max_retry_duration:
|
||||
print("Last retry took more than 12 hours, increasing max retries to 12.")
|
||||
max_retries = 12
|
||||
last_retry_start = int(time.time())
|
||||
rsync_exit_code = os.system(rsync_command)
|
||||
if rsync_exit_code == 0:
|
||||
break
|
||||
retry_count += 1
|
||||
time.sleep(retry_delay)
|
||||
|
||||
if rsync_exit_code != 0:
|
||||
print(f"Error: rsync failed after {max_retries} attempts")
|
||||
errors += 1
|
||||
|
||||
sys.exit(errors)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Pull backups from a remote backup host via rsync."
|
||||
)
|
||||
parser.add_argument(
|
||||
"hostname",
|
||||
help="Hostname from which backup should be pulled"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
pull_backups(args.hostname)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,85 +0,0 @@
|
||||
#!/bin/bash
|
||||
# @param $1 hostname from which backup should be pulled
|
||||
|
||||
echo "pulling backups from: $1" &&
|
||||
|
||||
# error counter
|
||||
errors=0 &&
|
||||
|
||||
echo "loading meta data..." &&
|
||||
|
||||
remote_host="backup@$1" &&
|
||||
echo "host address: $remote_host" &&
|
||||
|
||||
remote_machine_id="$( (ssh "$remote_host" sha256sum /etc/machine-id) | head -c 64 )" &&
|
||||
echo "remote machine id: $remote_machine_id" &&
|
||||
|
||||
general_backup_machine_dir="/Backups/$remote_machine_id/" &&
|
||||
echo "backup dir: $general_backup_machine_dir" &&
|
||||
|
||||
remote_backup_types="$(ssh "$remote_host" "find $general_backup_machine_dir -maxdepth 1 -type d -execdir basename {} ;")" &&
|
||||
echo "backup types: $remote_backup_types" || exit 1
|
||||
|
||||
for backup_type in $remote_backup_types; do
|
||||
if [ "$backup_type" != "$remote_machine_id" ]; then
|
||||
echo "backup type: $backup_type" &&
|
||||
|
||||
general_backup_type_dir="$general_backup_machine_dir""$backup_type/" &&
|
||||
general_versions_dir="$general_backup_type_dir" &&
|
||||
local_previous_version_dir="$(ls -d $general_versions_dir* | tail -1)" &&
|
||||
echo "last local backup: $local_previous_version_dir" &&
|
||||
|
||||
remote_backup_versions="$(ssh "$remote_host" ls -d "$general_backup_type_dir"\*)" &&
|
||||
echo "remote backup versions: $remote_backup_versions" &&
|
||||
|
||||
|
||||
remote_last_backup_dir=$(echo "$remote_backup_versions" | tail -1) &&
|
||||
echo "last remote backup: $remote_last_backup_dir" &&
|
||||
|
||||
remote_source_path="$remote_host:$remote_last_backup_dir/" &&
|
||||
echo "source path: $remote_source_path" &&
|
||||
|
||||
local_backup_destination_path=$remote_last_backup_dir &&
|
||||
echo "backup destination: $local_backup_destination_path" &&
|
||||
|
||||
echo "creating local backup destination folder..." &&
|
||||
mkdir -vp "$local_backup_destination_path" &&
|
||||
|
||||
echo "starting backup..."
|
||||
rsync_command='rsync -abP --delete --delete-excluded --rsync-path="sudo rsync" --link-dest="'$local_previous_version_dir'" "'$remote_source_path'" "'$local_backup_destination_path'"'
|
||||
|
||||
echo "executing: $rsync_command"
|
||||
|
||||
retry_count=0
|
||||
max_retries=12
|
||||
retry_delay=300 # Retry delay in seconds (5 minutes)
|
||||
last_retry_start=0
|
||||
max_retry_duration=43200 # Maximum duration for a single retry attempt (12 hours)
|
||||
|
||||
while [[ $retry_count -lt $max_retries ]]; do
|
||||
echo "Retry attempt: $((retry_count + 1))"
|
||||
if [[ $retry_count -gt 0 ]]; then
|
||||
current_time=$(date +%s)
|
||||
last_retry_duration=$((current_time - last_retry_start))
|
||||
if [[ $last_retry_duration -ge $max_retry_duration ]]; then
|
||||
echo "Last retry took more than 12 hours, increasing max retries to 12."
|
||||
max_retries=12
|
||||
fi
|
||||
fi
|
||||
last_retry_start=$(date +%s)
|
||||
eval "$rsync_command"
|
||||
rsync_exit_code=$?
|
||||
if [[ $rsync_exit_code -eq 0 ]]; then
|
||||
break
|
||||
fi
|
||||
retry_count=$((retry_count + 1))
|
||||
sleep $retry_delay
|
||||
done
|
||||
|
||||
if [[ $rsync_exit_code -ne 0 ]]; then
|
||||
echo "Error: rsync failed after $max_retries attempts"
|
||||
((errors += 1))
|
||||
fi
|
||||
fi
|
||||
done
|
||||
exit $errors;
|
||||
@@ -10,15 +10,15 @@
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_svc_bkp_rmt_2_loc is not defined
|
||||
|
||||
- name: "create {{ DOCKER_BACKUP_REMOTE_2_LOCAL_DIR }}"
|
||||
- name: "Create Directory '{{ DOCKER_BACKUP_REMOTE_2_LOCAL_DIR }}'"
|
||||
file:
|
||||
path: "{{ DOCKER_BACKUP_REMOTE_2_LOCAL_DIR }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: create svc-bkp-rmt-2-loc.sh
|
||||
- name: "Deploy '{{ DOCKER_BACKUP_REMOTE_2_LOCAL_SCRIPT }}'"
|
||||
copy:
|
||||
src: svc-bkp-rmt-2-loc.sh
|
||||
src: "{{ DOCKER_BACKUP_REMOTE_2_LOCAL_FILE }}"
|
||||
dest: "{{ DOCKER_BACKUP_REMOTE_2_LOCAL_SCRIPT }}"
|
||||
mode: "0755"
|
||||
|
||||
|
||||
@@ -3,6 +3,6 @@
|
||||
hosts="{{ DOCKER_BACKUP_REMOTE_2_LOCAL_BACKUP_PROVIDERS | join(' ') }}";
|
||||
errors=0
|
||||
for host in $hosts; do
|
||||
bash {{ DOCKER_BACKUP_REMOTE_2_LOCAL_SCRIPT }} $host || ((errors+=1));
|
||||
python {{ DOCKER_BACKUP_REMOTE_2_LOCAL_SCRIPT }} $host || ((errors+=1));
|
||||
done;
|
||||
exit $errors;
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
# General
|
||||
application_id: svc-bkp-rmt-2-loc
|
||||
system_service_id: "{{ application_id }}"
|
||||
system_service_id: "{{ application_id }}"
|
||||
|
||||
# Role Specific
|
||||
DOCKER_BACKUP_REMOTE_2_LOCAL_DIR: '{{ PATH_ADMINISTRATOR_SCRIPTS }}{{ application_id }}/'
|
||||
DOCKER_BACKUP_REMOTE_2_LOCAL_SCRIPT: "{{ DOCKER_BACKUP_REMOTE_2_LOCAL_DIR }}svc-bkp-rmt-2-loc.sh"
|
||||
DOCKER_BACKUP_REMOTE_2_LOCAL_FILE: 'pull-specific-host.py'
|
||||
DOCKER_BACKUP_REMOTE_2_LOCAL_SCRIPT: "{{ [ DOCKER_BACKUP_REMOTE_2_LOCAL_DIR , DOCKER_BACKUP_REMOTE_2_LOCAL_FILE ] | path_join }}"
|
||||
DOCKER_BACKUP_REMOTE_2_LOCAL_BACKUP_PROVIDERS: "{{ applications | get_app_conf(application_id, 'backup_providers') }}"
|
||||
@@ -5,9 +5,14 @@ network:
|
||||
docker:
|
||||
services:
|
||||
openldap:
|
||||
image: "bitnami/openldap"
|
||||
image: "bitnamilegacy/openldap"
|
||||
name: "openldap"
|
||||
version: "latest"
|
||||
cpus: 1.25
|
||||
# Optimized up to 5k user
|
||||
mem_reservation: 1g
|
||||
mem_limit: 1.5g
|
||||
pids_limit: 1024
|
||||
network: "openldap"
|
||||
volumes:
|
||||
data: "openldap_data"
|
||||
|
||||
@@ -6,7 +6,7 @@ docker:
|
||||
name: postgres
|
||||
# Please set an version in your inventory file!
|
||||
# Rolling release isn't recommended
|
||||
version: "latest"
|
||||
version: "17-3.5"
|
||||
backup:
|
||||
database_routine: true
|
||||
cpus: "2.0"
|
||||
@@ -14,5 +14,5 @@ docker:
|
||||
mem_limit: "6g"
|
||||
pids_limit: 1024
|
||||
volumes:
|
||||
data: "postgres_data"
|
||||
network: "postgres"
|
||||
data: "postgres_data"
|
||||
network: "postgres"
|
||||
@@ -5,7 +5,7 @@ RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
git \
|
||||
postgresql-server-dev-all \
|
||||
postgresql-server-dev-{{ POSTGRES_VERSION_MAJOR | default('all', true) }} \
|
||||
&& git clone https://github.com/pgvector/pgvector.git /tmp/pgvector \
|
||||
&& cd /tmp/pgvector \
|
||||
&& make \
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# General
|
||||
application_id: svc-db-postgres
|
||||
entity_name: "{{ application_id | get_entity_name }}"
|
||||
|
||||
# Docker
|
||||
docker_compose_flush_handlers: true
|
||||
@@ -9,11 +10,12 @@ database_type: "{{ application_id | get_entity_name }
|
||||
|
||||
## Postgres
|
||||
POSTGRES_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.data') }}"
|
||||
POSTGRES_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.name') }}"
|
||||
POSTGRES_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.image') }}"
|
||||
POSTGRES_SUBNET: "{{ networks.local['svc-db-postgres'].subnet }}"
|
||||
POSTGRES_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.' ~ entity_name ~ '.name') }}"
|
||||
POSTGRES_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.' ~ entity_name ~ '.image') }}"
|
||||
POSTGRES_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.' ~ entity_name ~ '.version') }}"
|
||||
POSTGRES_VERSION_MAJOR: "{{ POSTGRES_VERSION | regex_replace('^([0-9]+).*', '\\1') }}"
|
||||
POSTGRES_NETWORK_NAME: "{{ applications | get_app_conf(application_id, 'docker.network') }}"
|
||||
POSTGRES_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.version') }}"
|
||||
POSTGRES_SUBNET: "{{ networks.local['svc-db-postgres'].subnet }}"
|
||||
POSTGRES_PASSWORD: "{{ applications | get_app_conf(application_id, 'credentials.POSTGRES_PASSWORD') }}"
|
||||
POSTGRES_PORT: "{{ database_port | default(ports.localhost.database[ application_id ]) }}"
|
||||
POSTGRES_INIT: "{{ database_username is defined and database_password is defined and database_name is defined }}"
|
||||
|
||||
@@ -16,5 +16,12 @@
|
||||
retries: 30
|
||||
networks:
|
||||
- default
|
||||
{{ lookup('template', 'roles/docker-container/templates/resource.yml.j2',vars={'service_name':'redis'}) | indent(4) }}
|
||||
{% macro include_resource_for(svc, indent=4) -%}
|
||||
{% set service_name = svc -%}
|
||||
{%- set _snippet -%}
|
||||
{% include 'roles/docker-container/templates/resource.yml.j2' %}
|
||||
{%- endset -%}
|
||||
{{ _snippet | indent(indent, true) }}
|
||||
{%- endmacro %}
|
||||
{{ include_resource_for('redis') }}
|
||||
{{ "\n" }}
|
||||
@@ -13,7 +13,7 @@ get_backup_types="find /Backups/$hashed_machine_id/ -maxdepth 1 -type d -execdir
|
||||
|
||||
|
||||
# @todo This configuration is not scalable yet. If other backup services then sys-ctl-bkp-docker-2-loc are integrated, this logic needs to be optimized
|
||||
get_version_directories="ls -d /Backups/$hashed_machine_id/sys-ctl-bkp-docker-2-loc/*"
|
||||
get_version_directories="ls -d /Backups/$hashed_machine_id/backup-docker-to-local/*"
|
||||
last_version_directory="$($get_version_directories | tail -1)"
|
||||
rsync_command="sudo rsync --server --sender -blogDtpre.iLsfxCIvu . $last_version_directory/"
|
||||
|
||||
|
||||
@@ -3,30 +3,6 @@
|
||||
name: backup
|
||||
create_home: yes
|
||||
|
||||
- name: create .ssh directory
|
||||
file:
|
||||
path: /home/backup/.ssh
|
||||
state: directory
|
||||
owner: backup
|
||||
group: backup
|
||||
mode: '0700'
|
||||
|
||||
- name: create /home/backup/.ssh/authorized_keys
|
||||
template:
|
||||
src: "authorized_keys.j2"
|
||||
dest: /home/backup/.ssh/authorized_keys
|
||||
owner: backup
|
||||
group: backup
|
||||
mode: '0644'
|
||||
|
||||
- name: create /home/backup/ssh-wrapper.sh
|
||||
copy:
|
||||
src: "ssh-wrapper.sh"
|
||||
dest: /home/backup/ssh-wrapper.sh
|
||||
owner: backup
|
||||
group: backup
|
||||
mode: '0700'
|
||||
|
||||
- name: grant backup sudo rights
|
||||
copy:
|
||||
src: "backup"
|
||||
@@ -35,3 +11,9 @@
|
||||
owner: root
|
||||
group: root
|
||||
notify: sshd restart
|
||||
|
||||
- include_tasks: 02_permissions_ssh.yml
|
||||
|
||||
- include_tasks: 03_permissions_folders.yml
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
23
roles/sys-bkp-provider-user/tasks/02_permissions_ssh.yml
Normal file
23
roles/sys-bkp-provider-user/tasks/02_permissions_ssh.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
- name: create .ssh directory
|
||||
file:
|
||||
path: /home/backup/.ssh
|
||||
state: directory
|
||||
owner: backup
|
||||
group: backup
|
||||
mode: '0700'
|
||||
|
||||
- name: create /home/backup/.ssh/authorized_keys
|
||||
template:
|
||||
src: "authorized_keys.j2"
|
||||
dest: /home/backup/.ssh/authorized_keys
|
||||
owner: backup
|
||||
group: backup
|
||||
mode: '0644'
|
||||
|
||||
- name: create /home/backup/ssh-wrapper.sh
|
||||
copy:
|
||||
src: "ssh-wrapper.sh"
|
||||
dest: /home/backup/ssh-wrapper.sh
|
||||
owner: backup
|
||||
group: backup
|
||||
mode: '0700'
|
||||
66
roles/sys-bkp-provider-user/tasks/03_permissions_folders.yml
Normal file
66
roles/sys-bkp-provider-user/tasks/03_permissions_folders.yml
Normal file
@@ -0,0 +1,66 @@
|
||||
# Ensure the backups root exists and is owned by backup
|
||||
- name: Ensure backups root exists and owned by backup
|
||||
file:
|
||||
path: "{{ BACKUPS_FOLDER_PATH }}"
|
||||
state: directory
|
||||
owner: backup
|
||||
group: backup
|
||||
mode: "0700"
|
||||
|
||||
# Explicit ACL so 'backup' has rwx, others none
|
||||
- name: Grant ACL rwx on backups root to backup user
|
||||
ansible.posix.acl:
|
||||
path: "{{ BACKUPS_FOLDER_PATH }}"
|
||||
entity: backup
|
||||
etype: user
|
||||
permissions: rwx
|
||||
state: present
|
||||
|
||||
# Set default ACLs so new entries inherit rwx for backup and nothing for others
|
||||
- name: Set default ACL (inherit) for backup user under backups root
|
||||
ansible.posix.acl:
|
||||
path: "{{ BACKUPS_FOLDER_PATH }}"
|
||||
entity: backup
|
||||
etype: user
|
||||
permissions: rwx
|
||||
default: true
|
||||
state: present
|
||||
|
||||
# Remove default ACLs for group/others (defensive hardening)
|
||||
# Default ACLs so new entries inherit only backup's rwx
|
||||
- name: Default ACL for backup user (inherit)
|
||||
ansible.posix.acl:
|
||||
path: "{{ BACKUPS_FOLDER_PATH }}"
|
||||
etype: user
|
||||
entity: backup
|
||||
permissions: rwx
|
||||
default: true
|
||||
state: present
|
||||
|
||||
# Explicitly set default group/other to no permissions (instead of absent)
|
||||
- name: Default ACL for group -> none
|
||||
ansible.posix.acl:
|
||||
path: "{{ BACKUPS_FOLDER_PATH }}"
|
||||
etype: group
|
||||
permissions: '---'
|
||||
default: true
|
||||
state: present
|
||||
|
||||
- name: Default ACL for other -> none
|
||||
ansible.posix.acl:
|
||||
path: "{{ BACKUPS_FOLDER_PATH }}"
|
||||
etype: other
|
||||
permissions: '---'
|
||||
default: true
|
||||
state: present
|
||||
|
||||
- name: Fix ownership level 0..2 directories to backup:backup
|
||||
ansible.builtin.shell: >
|
||||
find "{{ BACKUPS_FOLDER_PATH }}" -mindepth 0 -maxdepth 2 -xdev -type d -exec chown backup:backup {} +
|
||||
changed_when: false
|
||||
|
||||
- name: Fix perms level 0..2 directories to 0700
|
||||
ansible.builtin.shell: >
|
||||
find "{{ BACKUPS_FOLDER_PATH }}" -mindepth 0 -maxdepth 2 -xdev -type d -exec chmod 700 {} +
|
||||
changed_when: false
|
||||
|
||||
@@ -1,4 +1,2 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_sys_bkp_provider_user is not defined
|
||||
@@ -5,21 +5,23 @@
|
||||
- sys-ctl-alm-telegram
|
||||
- sys-ctl-alm-email
|
||||
vars:
|
||||
flush_handlers: true
|
||||
system_service_timer_enabled: false
|
||||
system_service_copy_files: true
|
||||
system_service_tpl_exec_start: "{{ system_service_script_exec }} %I"
|
||||
system_service_tpl_on_failure: ""
|
||||
flush_handlers: true
|
||||
system_service_timer_enabled: false
|
||||
system_service_copy_files: true
|
||||
system_service_tpl_exec_start: "{{ system_service_script_exec }} %I"
|
||||
system_service_tpl_on_failure: ""
|
||||
system_service_force_linear_sync: false
|
||||
|
||||
- name: "Include core service for '{{ system_service_id }}'"
|
||||
include_role:
|
||||
name: sys-service
|
||||
vars:
|
||||
flush_handlers: true
|
||||
system_service_timer_enabled: false
|
||||
system_service_copy_files: true
|
||||
system_service_tpl_exec_start: "{{ system_service_script_exec }} %I"
|
||||
system_service_tpl_on_failure: "" # No on failure needed, because it's anyhow the default on failure procedure
|
||||
flush_handlers: true
|
||||
system_service_timer_enabled: false
|
||||
system_service_copy_files: true
|
||||
system_service_tpl_exec_start: "{{ system_service_script_exec }} %I"
|
||||
system_service_tpl_on_failure: "" # No on failure needed, because it's anyhow the default on failure procedure
|
||||
system_service_force_linear_sync: false
|
||||
|
||||
- name: Assert '{{ system_service_id }}'
|
||||
block:
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
- name: Include dependencies
|
||||
include_role:
|
||||
name: '{{ item }}'
|
||||
loop:
|
||||
- sys-svc-msmtp
|
||||
name: "sys-svc-msmtp"
|
||||
when: run_once_sys_svc_msmtp is not defined or run_once_sys_svc_msmtp is false
|
||||
|
||||
- include_role:
|
||||
name: sys-service
|
||||
|
||||
@@ -19,6 +19,8 @@
|
||||
vars:
|
||||
system_service_copy_files: false
|
||||
system_service_timer_enabled: true
|
||||
system_service_force_linear_sync: true
|
||||
system_service_force_flush: "{{ MODE_BACKUP | bool }}"
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_BACKUP_DOCKER_TO_LOCAL }}"
|
||||
system_service_tpl_exec_start_pre: '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(" ") }} --ignore {{ SYS_SERVICE_BACKUP_DOCKER_2_LOC }} --timeout "{{ SYS_TIMEOUT_BACKUP_SERVICES }}"'
|
||||
system_service_tpl_exec_start: "/bin/sh -c '{{ BKP_DOCKER_2_LOC_EXEC }}'"
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
when:
|
||||
- run_once_sys_ctl_bkp_docker_2_loc is not defined
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_sys_ctl_bkp_docker_2_loc is not defined
|
||||
|
||||
- name: "include 04_seed-database-to-backup.yml"
|
||||
include_tasks: 04_seed-database-to-backup.yml
|
||||
when:
|
||||
- BKP_DOCKER_2_LOC_DB_ENABLED | bool
|
||||
when: BKP_DOCKER_2_LOC_DB_ENABLED | bool
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
system_service_tpl_exec_start: dockreap --no-confirmation
|
||||
system_service_tpl_exec_start_pre: "" # Anonymous volumes can allways be removed. It isn't necessary to wait for any service to stop.
|
||||
system_service_copy_files: false
|
||||
system_service_force_linear_sync: false
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
when:
|
||||
|
||||
@@ -17,9 +17,10 @@
|
||||
name: sys-service
|
||||
vars:
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_tpl_exec_start: "{{ system_service_script_exec }} --backups-folder-path {{ BACKUPS_FOLDER_PATH }} --maximum-backup-size-percent {{SIZE_PERCENT_MAXIMUM_BACKUP}}"
|
||||
system_service_tpl_exec_start: "{{ system_service_script_exec }} --backups-folder-path {{ BACKUPS_FOLDER_PATH }} --maximum-backup-size-percent {{ SIZE_PERCENT_MAXIMUM_BACKUP }}"
|
||||
system_service_tpl_exec_start_pre: '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(" ") }} --ignore {{ SYS_SERVICE_GROUP_CLEANUP | join(" ") }} --timeout "{{ SYS_TIMEOUT_BACKUP_SERVICES }}"'
|
||||
system_service_copy_files: true
|
||||
system_service_force_linear_sync: false
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
vars:
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
- include_role:
|
||||
name: sys-service
|
||||
vars:
|
||||
system_service_timer_enabled: true
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_CLEANUP_CERTS }}"
|
||||
system_service_copy_files: false
|
||||
system_service_timer_enabled: true
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_CLEANUP_CERTS }}"
|
||||
system_service_copy_files: false
|
||||
system_service_force_linear_sync: false
|
||||
@@ -14,3 +14,4 @@
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_tpl_exec_start: "{{ system_service_script_exec }} {{ SIZE_PERCENT_CLEANUP_DISC_SPACE }}"
|
||||
system_service_tpl_exec_start_pre: '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(" ") }} --ignore {{ SYS_SERVICE_GROUP_CLEANUP | join(" ") }} --timeout "{{ SYS_TIMEOUT_BACKUP_SERVICES }}"'
|
||||
system_service_force_linear_sync: false
|
||||
|
||||
@@ -39,6 +39,18 @@ if [ "$force_freeing" = true ]; then
|
||||
docker exec -u www-data $nextcloud_application_container /var/www/html/occ versions:cleanup || exit 6
|
||||
fi
|
||||
|
||||
# Mastodon cleanup (remote media cache)
|
||||
mastodon_application_container="{{ applications | get_app_conf('web-app-mastodon', 'docker.services.mastodon.name') }}"
|
||||
mastodon_cleanup_days="1"
|
||||
|
||||
if [ -n "$mastodon_application_container" ] && docker ps -a --format '{% raw %}{{.Names}}{% endraw %}' | grep -qw "$mastodon_application_container"; then
|
||||
echo "Cleaning up Mastodon media cache (older than ${mastodon_cleanup_days} days)" &&
|
||||
docker exec -u root "$mastodon_application_container" bash -lc "bin/tootctl media remove --days=${mastodon_cleanup_days}" || exit 8
|
||||
|
||||
# Optional: additionally remove local thumbnail/cache files older than X days
|
||||
# Warning: these will be regenerated when accessed, which may cause extra CPU/I/O load
|
||||
# docker exec -u root "$mastodon_application_container" bash -lc "find /mastodon/public/system/cache -type f -mtime +${mastodon_cleanup_days} -delete" || exit 9
|
||||
fi
|
||||
fi
|
||||
|
||||
if command -v pacman >/dev/null 2>&1 ; then
|
||||
|
||||
@@ -21,5 +21,5 @@
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_tpl_exec_start_pre: '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(" ") }} --ignore {{ SYS_SERVICE_GROUP_CLEANUP| join(" ") }} --timeout "{{ SYS_TIMEOUT_CLEANUP_SERVICES }}"'
|
||||
system_service_tpl_exec_start: '/bin/sh -c "{{ CLEANUP_FAILED_BACKUPS_PKG }} --all --workers {{ CLEANUP_FAILED_BACKUPS_WORKERS }} --yes"'
|
||||
|
||||
system_service_force_linear_sync: false
|
||||
- include_tasks: utils/run_once.yml
|
||||
|
||||
@@ -14,6 +14,32 @@ Designed for Archlinux systems, this role periodically checks whether web resour
|
||||
- **Domain Extraction:** Parses all `.conf` files in the NGINX config folder to determine the list of domains to check.
|
||||
- **Automated Execution:** Registers a systemd service and timer for recurring health checks.
|
||||
- **Error Notification:** Integrates with `sys-ctl-alm-compose` for alerting on failure.
|
||||
- **Ignore List Support:** Optional variable to suppress network block reports from specific external domains.
|
||||
|
||||
## Configuration
|
||||
|
||||
### Variables
|
||||
|
||||
- **`HEALTH_CSP_IGNORE_NETWORK_BLOCKS_FROM`** (list, default: `[]`)
|
||||
Optional list of domains whose network block failures (e.g., ORB) should be ignored during CSP checks.
|
||||
|
||||
Example:
|
||||
|
||||
```yaml
|
||||
HEALTH_CSP_IGNORE_NETWORK_BLOCKS_FROM:
|
||||
- pxscdn.com
|
||||
- cdn.example.org
|
||||
```
|
||||
|
||||
This will run the CSP checker with:
|
||||
|
||||
```bash
|
||||
checkcsp start --short --ignore-network-blocks-from pxscdn.com -- cdn.example.org <domains...>
|
||||
```
|
||||
|
||||
### Systemd Integration
|
||||
|
||||
The role configures a systemd service and timer which executes the CSP crawler periodically against all NGINX domains.
|
||||
|
||||
## License
|
||||
|
||||
@@ -24,4 +50,4 @@ Infinito.Nexus NonCommercial License
|
||||
|
||||
Kevin Veen-Birkenbach
|
||||
Consulting & Coaching Solutions
|
||||
[https://www.veen.world](https://www.veen.world)
|
||||
[https://www.veen.world](https://www.veen.world)
|
||||
|
||||
5
roles/sys-ctl-hlth-csp/defaults/main.yml
Normal file
5
roles/sys-ctl-hlth-csp/defaults/main.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
# List of domains whose network block failures (e.g., ORB) should be ignored
|
||||
# during CSP checks. This is useful for suppressing known external resources
|
||||
# (e.g., third-party CDNs) that cannot be influenced but otherwise cause
|
||||
# unnecessary alerts in the crawler reports.
|
||||
HEALTH_CSP_IGNORE_NETWORK_BLOCKS_FROM: []
|
||||
@@ -21,11 +21,20 @@ def extract_domains(config_path):
|
||||
print(f"Directory {config_path} not found.", file=sys.stderr)
|
||||
return None
|
||||
|
||||
def run_checkcsp(domains):
|
||||
def run_checkcsp(domains, ignore_network_blocks_from):
|
||||
"""
|
||||
Executes the 'checkcsp' command with the given domains.
|
||||
Executes the 'checkcsp' command with the given domains and optional ignores.
|
||||
"""
|
||||
cmd = ["checkcsp", "start", "--short"] + domains
|
||||
cmd = ["checkcsp", "start", "--short"]
|
||||
|
||||
# pass through ignore list only if not empty
|
||||
if ignore_network_blocks_from:
|
||||
cmd.append("--ignore-network-blocks-from")
|
||||
cmd.extend(ignore_network_blocks_from)
|
||||
cmd.append("--")
|
||||
|
||||
cmd += domains
|
||||
|
||||
try:
|
||||
result = subprocess.run(cmd, check=True)
|
||||
return result.returncode
|
||||
@@ -45,6 +54,12 @@ def main():
|
||||
required=True,
|
||||
help="Directory containing NGINX .conf files"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ignore-network-blocks-from",
|
||||
nargs="*",
|
||||
default=[],
|
||||
help="Optional: one or more domains whose network block failures should be ignored"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
domains = extract_domains(args.nginx_config_dir)
|
||||
@@ -55,7 +70,7 @@ def main():
|
||||
print("No domains found to check.")
|
||||
sys.exit(0)
|
||||
|
||||
rc = run_checkcsp(domains)
|
||||
rc = run_checkcsp(domains, args.ignore_network_blocks_from)
|
||||
sys.exit(rc)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -18,6 +18,9 @@
|
||||
system_service_timer_enabled: true
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_tpl_timeout_start_sec: "{{ CURRENT_PLAY_DOMAINS_ALL | timeout_start_sec_for_domains }}"
|
||||
system_service_tpl_exec_start: "{{ system_service_script_exec }} --nginx-config-dir={{ NGINX.DIRECTORIES.HTTP.SERVERS }}"
|
||||
system_service_tpl_exec_start: >-
|
||||
{{ system_service_script_exec }}
|
||||
--nginx-config-dir={{ NGINX.DIRECTORIES.HTTP.SERVERS }}
|
||||
--ignore-network-blocks-from {{ HEALTH_CSP_IGNORE_NETWORK_BLOCKS_FROM | join(' ') }}
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
|
||||
@@ -8,8 +8,9 @@
|
||||
- include_role:
|
||||
name: sys-service
|
||||
vars:
|
||||
system_service_state: restarted
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_DEPLOY }}"
|
||||
persistent: "true"
|
||||
system_service_timer_enabled: true
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_state: restarted
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_DEPLOY }}"
|
||||
persistent: "true"
|
||||
system_service_timer_enabled: true
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_force_linear_sync: false
|
||||
@@ -15,8 +15,9 @@
|
||||
- include_role:
|
||||
name: sys-service
|
||||
vars:
|
||||
system_service_copy_files: false
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_RENEW }}"
|
||||
persistent: true
|
||||
system_service_timer_enabled: true
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_copy_files: false
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_RENEW }}"
|
||||
persistent: true
|
||||
system_service_timer_enabled: true
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_force_linear_sync: false
|
||||
|
||||
@@ -12,9 +12,10 @@
|
||||
- include_role:
|
||||
name: sys-service
|
||||
vars:
|
||||
system_service_suppress_flush: true # It takes a super long time - Better wait for failure of timed service instead of executing it on every play
|
||||
system_service_copy_files: false
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_REPAIR_BTRFS_AUTO_BALANCER }}"
|
||||
system_service_timer_enabled: true
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_tpl_exec_start: "/bin/sh -c 'btrfs-auto-balancer 90 10'"
|
||||
system_service_suppress_flush: true # It takes a super long time - Better wait for failure of timed service instead of executing it on every play
|
||||
system_service_copy_files: false
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_REPAIR_BTRFS_AUTO_BALANCER }}"
|
||||
system_service_timer_enabled: true
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_tpl_exec_start: "/bin/sh -c 'btrfs-auto-balancer 90 10'"
|
||||
system_service_force_linear_sync: true
|
||||
@@ -12,5 +12,6 @@
|
||||
system_service_tpl_exec_start: '{{ system_service_script_exec }} {{ PATH_DOCKER_COMPOSE_INSTANCES }}'
|
||||
system_service_tpl_exec_start_post: "/usr/bin/systemctl start {{ SYS_SERVICE_CLEANUP_ANONYMOUS_VOLUMES }}"
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_force_linear_sync: true
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
|
||||
@@ -10,5 +10,6 @@
|
||||
system_service_tpl_exec_start_pre: "/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(' ') }} --ignore {{ SYS_SERVICE_GROUP_CLEANUP| join(' ') }} {{ SYS_SERVICE_REPAIR_DOCKER_SOFT }} --timeout '{{ SYS_TIMEOUT_DOCKER_RPR_SOFT }}'"
|
||||
system_service_tpl_exec_start: >
|
||||
/bin/sh -c '{{ system_service_script_exec }} --manipulation-string "{{ SYS_SERVICE_GROUP_MANIPULATION | join(" ") }}" {{ PATH_DOCKER_COMPOSE_INSTANCES }}'
|
||||
system_service_force_linear_sync: true
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
|
||||
16
roles/sys-front-inj-all/tasks/01_dependencies.yml
Normal file
16
roles/sys-front-inj-all/tasks/01_dependencies.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
- name: "Load CDN for '{{ domain }}'"
|
||||
include_role:
|
||||
name: web-svc-cdn
|
||||
public: false
|
||||
when:
|
||||
- application_id != 'web-svc-cdn'
|
||||
- run_once_web_svc_cdn is not defined
|
||||
|
||||
- name: Load Logout for '{{ domain }}'
|
||||
include_role:
|
||||
name: web-svc-logout
|
||||
public: false
|
||||
when:
|
||||
- run_once_web_svc_logout is not defined
|
||||
- application_id != 'web-svc-logout'
|
||||
- inj_enabled.logout
|
||||
@@ -1,22 +1,41 @@
|
||||
- block:
|
||||
- name: Include dependency 'sys-svc-webserver-core'
|
||||
include_role:
|
||||
name: sys-svc-webserver-core
|
||||
when: run_once_sys_svc_webserver_core is not defined
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_front_inj_all is not defined
|
||||
|
||||
- name: Build inj_enabled
|
||||
set_fact:
|
||||
inj_enabled: "{{ applications | inj_enabled(application_id, SRV_WEB_INJ_COMP_FEATURES_ALL) }}"
|
||||
|
||||
- name: "Load CDN Service for '{{ domain }}'"
|
||||
include_role:
|
||||
name: sys-svc-cdn
|
||||
public: true # Expose variables so that they can be used in all injection roles
|
||||
- name: "Included dependent services"
|
||||
include_tasks: 01_dependencies.yml
|
||||
vars:
|
||||
proxy_extra_configuration: ""
|
||||
|
||||
- name: Reinitialize 'inj_enabled' for '{{ domain }}', after modification by CDN
|
||||
- name: Reinitialize 'inj_enabled' for '{{ domain }}', after loading the required webservices
|
||||
set_fact:
|
||||
inj_enabled: "{{ applications | inj_enabled(application_id, SRV_WEB_INJ_COMP_FEATURES_ALL) }}"
|
||||
inj_head_features: "{{ SRV_WEB_INJ_COMP_FEATURES_ALL | inj_features('head') }}"
|
||||
inj_body_features: "{{ SRV_WEB_INJ_COMP_FEATURES_ALL | inj_features('body') }}"
|
||||
|
||||
- name: "Load CDN Service for '{{ domain }}'"
|
||||
include_role:
|
||||
name: sys-svc-cdn
|
||||
public: true
|
||||
|
||||
- name: "Activate logout proxy for '{{ domain }}'"
|
||||
include_role:
|
||||
name: sys-front-inj-logout
|
||||
public: true
|
||||
when: inj_enabled.logout
|
||||
|
||||
- name: "Activate Desktop iFrame notifier for '{{ domain }}'"
|
||||
include_role:
|
||||
name: sys-front-inj-desktop
|
||||
public: true # Vars used in templates
|
||||
public: true
|
||||
when: inj_enabled.desktop
|
||||
|
||||
- name: "Activate Corporate CSS for '{{ domain }}'"
|
||||
@@ -33,17 +52,3 @@
|
||||
include_role:
|
||||
name: sys-front-inj-javascript
|
||||
when: inj_enabled.javascript
|
||||
|
||||
- name: "Activate logout proxy for '{{ domain }}'"
|
||||
include_role:
|
||||
name: sys-front-inj-logout
|
||||
public: true # Vars used in templates
|
||||
when: inj_enabled.logout
|
||||
|
||||
- block:
|
||||
- name: Include dependency 'sys-svc-webserver-core'
|
||||
include_role:
|
||||
name: sys-svc-webserver-core
|
||||
when: run_once_sys_svc_webserver_core is not defined
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_front_inj_all is not defined
|
||||
@@ -10,17 +10,6 @@
|
||||
|
||||
lua_need_request_body on;
|
||||
|
||||
header_filter_by_lua_block {
|
||||
local ct = ngx.header.content_type or ""
|
||||
if ct:lower():find("^text/html") then
|
||||
ngx.ctx.is_html = true
|
||||
-- IMPORTANT: body will be modified → drop Content-Length to avoid mismatches
|
||||
ngx.header.content_length = nil
|
||||
else
|
||||
ngx.ctx.is_html = false
|
||||
end
|
||||
}
|
||||
|
||||
body_filter_by_lua_block {
|
||||
-- Only process HTML responses
|
||||
if not ngx.ctx.is_html then
|
||||
|
||||
@@ -1,8 +1,3 @@
|
||||
- name: Include dependency 'sys-svc-webserver-core'
|
||||
include_role:
|
||||
name: sys-svc-webserver-core
|
||||
when: run_once_sys_svc_webserver_core is not defined
|
||||
|
||||
- name: Generate color palette with colorscheme-generator
|
||||
set_fact:
|
||||
color_palette: "{{ lookup('colorscheme', CSS_BASE_COLOR, count=CSS_COUNT, shades=CSS_SHADES) }}"
|
||||
@@ -19,3 +14,5 @@
|
||||
group: "{{ NGINX.USER }}"
|
||||
mode: '0644'
|
||||
loop: "{{ CSS_FILES }}"
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
@@ -1,6 +1,4 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_sys_front_inj_css is not defined
|
||||
|
||||
- name: "Resolve optional app style.css source for '{{ application_id }}'"
|
||||
|
||||
@@ -3,6 +3,6 @@
|
||||
{% for css_file in ['default.css','bootstrap.css'] %}
|
||||
<link rel="stylesheet" href="{{ [ cdn_urls.shared.css, css_file, lookup('local_mtime_qs', [__css_tpl_dir, css_file ~ '.j2'] | path_join)] | url_join }}">
|
||||
{% endfor %}
|
||||
{% if app_style_present | bool %}
|
||||
{% if app_style_present | default(false) | bool %}
|
||||
<link rel="stylesheet" href="{{ [ cdn_urls.role.release.css, 'style.css', lookup('local_mtime_qs', app_style_src)] | url_join }}">
|
||||
{% endif %}
|
||||
@@ -1,8 +1,4 @@
|
||||
- block:
|
||||
- name: Include dependency 'sys-svc-webserver-core'
|
||||
include_role:
|
||||
name: sys-svc-webserver-core
|
||||
when: run_once_sys_svc_webserver_core is not defined
|
||||
- include_tasks: 01_deploy.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_front_inj_desktop is not defined
|
||||
|
||||
@@ -1,11 +1,4 @@
|
||||
- block:
|
||||
|
||||
- name: Include dependency 'sys-svc-webserver-core'
|
||||
include_role:
|
||||
name: sys-svc-webserver-core
|
||||
when: run_once_sys_svc_webserver_core is not defined
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_front_inj_javascript is not defined
|
||||
# run_once_sys_front_inj_javascript: deactivated
|
||||
|
||||
- name: "Load JavaScript code for '{{ application_id }}'"
|
||||
set_fact:
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
- name: Include dependency 'sys-svc-webserver-core'
|
||||
include_role:
|
||||
name: sys-svc-webserver-core
|
||||
when:
|
||||
- run_once_sys_svc_webserver_core is not defined
|
||||
|
||||
- name: "deploy the logout.js"
|
||||
include_tasks: "02_deploy.yml"
|
||||
include_tasks: "02_deploy.yml"
|
||||
|
||||
- set_fact:
|
||||
run_once_sys_front_inj_logout: true
|
||||
changed_when: false
|
||||
@@ -1,10 +1,10 @@
|
||||
- name: Deploy logout.js
|
||||
template:
|
||||
src: logout.js.j2
|
||||
dest: "{{ INJ_LOGOUT_JS_DESTINATION }}"
|
||||
owner: "{{ NGINX.USER }}"
|
||||
group: "{{ NGINX.USER }}"
|
||||
mode: '0644'
|
||||
copy:
|
||||
src: logout.js
|
||||
dest: "{{ INJ_LOGOUT_JS_DESTINATION }}"
|
||||
owner: "{{ NGINX.USER }}"
|
||||
group: "{{ NGINX.USER }}"
|
||||
mode: '0644'
|
||||
|
||||
- name: Get stat for logout.js
|
||||
stat:
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- set_fact:
|
||||
run_once_sys_front_inj_logout: true
|
||||
- name: "Load base for '{{ application_id }}'"
|
||||
include_tasks: 01_core.yml
|
||||
when: run_once_sys_front_inj_logout is not defined
|
||||
|
||||
- name: "Load logout code for '{{ application_id }}'"
|
||||
set_fact:
|
||||
logout_code: "{{ lookup('template', 'logout_one_liner.js.j2') }}"
|
||||
changed_when: false
|
||||
|
||||
- name: "Collapse logout code into one-liner for '{{ application_id }}'"
|
||||
set_fact:
|
||||
logout_code_one_liner: "{{ logout_code | to_one_liner }}"
|
||||
changed_when: false
|
||||
|
||||
- name: "Append logout CSP hash for '{{ application_id }}'"
|
||||
set_fact:
|
||||
|
||||
@@ -1 +1 @@
|
||||
<script src="{{ cdn_urls.shared.js }}/{{ INJ_LOGOUT_JS_FILE_NAME }}{{ lookup('local_mtime_qs', [playbook_dir, 'roles', 'sys-front-inj-logout', 'templates', INJ_LOGOUT_JS_FILE_NAME ~ '.j2'] | path_join) }}"></script>
|
||||
<script src="{{ cdn_urls.shared.js }}/{{ INJ_LOGOUT_JS_FILE_NAME }}{{ lookup('local_mtime_qs', [playbook_dir, 'roles', 'sys-front-inj-logout', 'files', INJ_LOGOUT_JS_FILE_NAME] | path_join) }}"></script>
|
||||
|
||||
@@ -1,10 +1,4 @@
|
||||
- block:
|
||||
- name: Include dependency 'sys-svc-webserver-core'
|
||||
include_role:
|
||||
name: sys-svc-webserver-core
|
||||
when: run_once_sys_svc_webserver_core is not defined
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_front_inj_matomo is not defined
|
||||
# run_once_sys_front_inj_matomo: deactivated
|
||||
|
||||
- name: "Relevant variables for role: {{ role_path | basename }}"
|
||||
debug:
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
- name: "Enable systemctl service"
|
||||
systemd:
|
||||
name: "{{ system_service_id | get_service_name(SOFTWARE_NAME) }}"
|
||||
name: "{{ system_service_name }}"
|
||||
enabled: yes
|
||||
daemon_reload: true
|
||||
become: true
|
||||
async: "{{ ASYNC_TIME if ASYNC_ENABLED | bool else omit }}"
|
||||
poll: "{{ ASYNC_POLL if ASYNC_ENABLED | bool else omit }}"
|
||||
async: "{{ system_service_async }}"
|
||||
poll: "{{ system_service_poll }}"
|
||||
listen: refresh systemctl service
|
||||
|
||||
- name: "Set systemctl service state"
|
||||
systemd:
|
||||
name: "{{ system_service_id | get_service_name(SOFTWARE_NAME) }}"
|
||||
name: "{{ system_service_name }}"
|
||||
state: "{{ system_service_state }}"
|
||||
become: true
|
||||
async: "{{ ASYNC_TIME if ASYNC_ENABLED | bool else omit }}"
|
||||
poll: "{{ ASYNC_POLL if ASYNC_ENABLED | bool else omit }}"
|
||||
async: "{{ system_service_async }}"
|
||||
poll: "{{ system_service_poll }}"
|
||||
when: not (system_service_suppress_flush | bool)
|
||||
listen: refresh systemctl service
|
||||
@@ -31,7 +31,7 @@
|
||||
- name: "setup systemctl '{{ system_service_id }}'"
|
||||
template:
|
||||
src: "{{ system_service_template_src }}"
|
||||
dest: "{{ [ PATH_SYSTEM_SERVICE_DIR, system_service_id | get_service_name(SOFTWARE_NAME) ] | path_join }}"
|
||||
dest: "{{ [ PATH_SYSTEM_SERVICE_DIR, system_service_name ] | path_join }}"
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
@@ -46,5 +46,5 @@
|
||||
command: /bin/true
|
||||
notify: refresh systemctl service
|
||||
when: not system_service_uses_at
|
||||
when: system_force_flush | bool
|
||||
when: system_service_force_flush | bool
|
||||
|
||||
|
||||
@@ -1,22 +1,28 @@
|
||||
UNIT_SUFFIX_REMOVER_PACKAGE: "unsure"
|
||||
UNIT_SUFFIX_REMOVER_PACKAGE: "unsure"
|
||||
system_service_name: "{{ system_service_id | get_service_name(SOFTWARE_NAME) }}"
|
||||
|
||||
## Paths
|
||||
system_service_role_name: "{{ system_service_id | regex_replace('@','') }}"
|
||||
system_service_role_dir: "{{ [ playbook_dir, 'roles', system_service_role_name ] | path_join }}"
|
||||
system_service_script_dir: "{{ [ PATH_SYSTEMCTL_SCRIPTS, system_service_id ] | path_join }}"
|
||||
system_service_role_name: "{{ system_service_id | regex_replace('@','') }}"
|
||||
system_service_role_dir: "{{ [ playbook_dir, 'roles', system_service_role_name ] | path_join }}"
|
||||
system_service_script_dir: "{{ [ PATH_SYSTEMCTL_SCRIPTS, system_service_id ] | path_join }}"
|
||||
|
||||
## Settings
|
||||
system_force_flush: "{{ SYS_SERVICE_ALL_ENABLED | bool }}" # When set to true it activates the flushing of services. defaults to SYS_SERVICE_ALL_ENABLED
|
||||
system_service_suppress_flush: "{{ (system_service_id in SYS_SERVICE_SUPPRESS_FLUSH) | bool }}" # When set to true it suppresses the flushing of services
|
||||
system_service_copy_files: true # When set to false file copying will be skipped
|
||||
system_service_timer_enabled: false # When set to true timer will be loaded
|
||||
system_service_state: "{{ SYS_SERVICE_DEFAULT_STATE }}"
|
||||
system_service_force_linear_sync: "{{ system_service_name in SYS_SERVICE_GROUP_MANIPULATION }}" # Disables automatic async
|
||||
system_service_force_flush: "{{ SYS_SERVICE_ALL_ENABLED | bool }}" # When set to true it activates the flushing of services. defaults to SYS_SERVICE_ALL_ENABLED
|
||||
system_service_suppress_flush: "{{ (system_service_id in SYS_SERVICE_SUPPRESS_FLUSH) | bool }}" # When set to true it suppresses the flushing of services
|
||||
system_service_copy_files: true # When set to false file copying will be skipped
|
||||
system_service_timer_enabled: false # When set to true timer will be loaded
|
||||
system_service_state: "{{ SYS_SERVICE_DEFAULT_STATE }}"
|
||||
|
||||
## ASYNC Settings
|
||||
system_service_async: "{{ omit if (system_service_force_linear_sync | bool or not ASYNC_ENABLED | bool) else ASYNC_TIME }}"
|
||||
system_service_poll: "{{ omit if (system_service_force_linear_sync | bool or not ASYNC_ENABLED | bool) else ASYNC_POLL }}"
|
||||
|
||||
# Dynamic Loaded ( Just available when dependencies are loaded )
|
||||
system_service_script_base: "{{ system_service_script_src | basename | regex_replace('\\.j2$', '') }}"
|
||||
system_service_script_type: "{{ system_service_script_base | filetype }}"
|
||||
system_service_script_inter: "/bin/{{ 'bash' if system_service_script_type == 'sh' else 'python3'}}"
|
||||
system_service_script_exec: "{{ system_service_script_inter }} {{ system_service_id | get_service_script_path( system_service_script_type ) }}"
|
||||
system_service_script_base: "{{ system_service_script_src | basename | regex_replace('\\.j2$', '') }}"
|
||||
system_service_script_type: "{{ system_service_script_base | filetype }}"
|
||||
system_service_script_inter: "/bin/{{ 'bash' if system_service_script_type == 'sh' else 'python3'}}"
|
||||
system_service_script_exec: "{{ system_service_script_inter }} {{ system_service_id | get_service_script_path( system_service_script_type ) }}"
|
||||
|
||||
# Service template
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
|
||||
@@ -6,10 +6,10 @@
|
||||
include_role:
|
||||
name: sys-util-csp-cert
|
||||
|
||||
- name: "Copy nginx config to '{{ FRONT_PROXY_DOMAIN_CONF_DST }}'"
|
||||
- name: "Copy nginx config to '{{ front_proxy_domain_conf_dst }}'"
|
||||
template:
|
||||
src: "{{ FRONT_PROXY_DOMAIN_CONF_SRC }}"
|
||||
dest: "{{ FRONT_PROXY_DOMAIN_CONF_DST }}"
|
||||
src: "{{ front_proxy_domain_conf_src }}"
|
||||
dest: "{{ front_proxy_domain_conf_dst }}"
|
||||
register: nginx_conf
|
||||
notify: restart openresty
|
||||
|
||||
@@ -28,4 +28,7 @@
|
||||
when:
|
||||
- site_check.status is defined
|
||||
- not site_check.status in [200,301,302]
|
||||
when: not nginx_conf.changed
|
||||
when: not nginx_conf.changed
|
||||
|
||||
- name: "Restart Webserver for '{{ front_proxy_domain_conf_dst }}'"
|
||||
meta: flush_handlers
|
||||
@@ -1,2 +1,2 @@
|
||||
FRONT_PROXY_DOMAIN_CONF_DST: "{{ [ NGINX.DIRECTORIES.HTTP.SERVERS, domain ~ '.conf'] | path_join }}"
|
||||
FRONT_PROXY_DOMAIN_CONF_SRC: "roles/sys-svc-proxy/templates/vhost/{{ vhost_flavour }}.conf.j2"
|
||||
front_proxy_domain_conf_dst: "{{ [ NGINX.DIRECTORIES.HTTP.SERVERS, domain ~ '.conf'] | path_join }}"
|
||||
front_proxy_domain_conf_src: "roles/sys-svc-proxy/templates/vhost/{{ vhost_flavour }}.conf.j2"
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
- name: "Load CDN for '{{ domain }}'"
|
||||
include_role:
|
||||
name: web-svc-cdn
|
||||
public: false
|
||||
when:
|
||||
- application_id != 'web-svc-cdn'
|
||||
- run_once_web_svc_cdn is not defined
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Only-once creations (shared root and vendor)
|
||||
# ------------------------------------------------------------------
|
||||
- name: Ensure shared root and vendor exist (run once)
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: "{{ NGINX.USER }}"
|
||||
group: "{{ NGINX.USER }}"
|
||||
mode: "0755"
|
||||
loop: "{{ CDN_DIRS_GLOBAL }}"
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
@@ -1,6 +1,14 @@
|
||||
---
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- name: Ensure shared root and vendor exist (run once)
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: "{{ NGINX.USER }}"
|
||||
group: "{{ NGINX.USER }}"
|
||||
mode: "0755"
|
||||
loop: "{{ CDN_DIRS_GLOBAL }}"
|
||||
- include_tasks: utils/run_once.yml
|
||||
when:
|
||||
- run_once_sys_svc_cdn is not defined
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
include_role:
|
||||
name: sys-ctl-cln-anon-volumes
|
||||
vars:
|
||||
system_force_flush: true
|
||||
system_service_force_flush: true
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_ctl_cln_anon_volumes is not defined
|
||||
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
ssl_certificate {{ [ LETSENCRYPT_LIVE_PATH, ssl_cert_folder, 'fullchain.pem'] | path_join }};
|
||||
ssl_certificate_key {{ [ LETSENCRYPT_LIVE_PATH, ssl_cert_folder, 'privkey.pem' ] | path_join }};
|
||||
ssl_trusted_certificate {{ [ LETSENCRYPT_LIVE_PATH, ssl_cert_folder, 'chain.pem' ] | path_join }};
|
||||
ssl_certificate {{ [ LETSENCRYPT_LIVE_PATH | mandatory, ssl_cert_folder | mandatory, 'fullchain.pem'] | path_join }};
|
||||
ssl_certificate_key {{ [ LETSENCRYPT_LIVE_PATH | mandatory, ssl_cert_folder | mandatory, 'privkey.pem' ] | path_join }};
|
||||
ssl_trusted_certificate {{ [ LETSENCRYPT_LIVE_PATH | mandatory, ssl_cert_folder | mandatory, 'chain.pem' ] | path_join }};
|
||||
@@ -14,4 +14,7 @@
|
||||
|
||||
- include_role:
|
||||
name: sys-ctl-hlth-msmtp
|
||||
when: run_once_sys_ctl_hlth_msmtp is not defined
|
||||
when: run_once_sys_ctl_hlth_msmtp is not defined
|
||||
|
||||
- set_fact:
|
||||
run_once_sys_svc_msmtp: true
|
||||
@@ -1,5 +1,6 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- set_fact:
|
||||
run_once_sys_svc_msmtp: true
|
||||
when: run_once_sys_svc_msmtp is not defined
|
||||
- name: "Load MSMTP Core Once"
|
||||
include_tasks: 01_core.yml
|
||||
when:
|
||||
- run_once_sys_svc_msmtp is not defined or run_once_sys_svc_msmtp is false
|
||||
# Just execute when mailu_token is defined
|
||||
- users['no-reply'].mailu_token is defined
|
||||
@@ -0,0 +1,23 @@
|
||||
{# Configure CORS headers dynamically based on role variables.
|
||||
If no variable is defined, defaults are applied (e.g. same-origin).
|
||||
Discussion: https://chat.openai.com/share/2671b961-c1b0-472d-bae2-2804d0455e8a #}
|
||||
|
||||
{# Access-Control-Allow-Origin #}
|
||||
{% if aca_origin is defined %}
|
||||
add_header 'Access-Control-Allow-Origin' {{ aca_origin }};
|
||||
{% endif %}
|
||||
|
||||
{# Access-Control-Allow-Credentials #}
|
||||
{% if aca_credentials is defined %}
|
||||
add_header 'Access-Control-Allow-Credentials' {{ aca_credentials }};
|
||||
{% endif %}
|
||||
|
||||
{# Access-Control-Allow-Methods #}
|
||||
{% if aca_methods is defined %}
|
||||
add_header 'Access-Control-Allow-Methods' {{ aca_methods }};
|
||||
{% endif %}
|
||||
|
||||
{# Access-Control-Allow-Headers #}
|
||||
{% if aca_headers is defined %}
|
||||
add_header 'Access-Control-Allow-Headers' {{ aca_headers }};
|
||||
{% endif %}
|
||||
@@ -1,2 +1,33 @@
|
||||
add_header Content-Security-Policy "{{ applications | build_csp_header(application_id, domains) }}" always;
|
||||
proxy_hide_header Content-Security-Policy; # Todo: Make this optional
|
||||
# ===== Content Security Policy: only for documents and workers (no locations needed) =====
|
||||
|
||||
# 1) Define your CSP once (Jinja: escape double quotes to be safe)
|
||||
set $csp "{{ applications | build_csp_header(application_id, domains) | replace('\"','\\\"') }}";
|
||||
|
||||
# 2) Send CSP ONLY for document responses; also for workers via Sec-Fetch-Dest
|
||||
header_filter_by_lua_block {
|
||||
local ct = ngx.header.content_type or ngx.header["Content-Type"] or ""
|
||||
local dest = ngx.var.http_sec_fetch_dest or ""
|
||||
|
||||
local lct = ct:lower()
|
||||
local is_html = lct:find("^text/html") or lct:find("^application/xhtml+xml")
|
||||
local is_worker = (dest == "worker") or (dest == "serviceworker")
|
||||
|
||||
if is_html or is_worker then
|
||||
ngx.header["Content-Security-Policy"] = ngx.var.csp
|
||||
else
|
||||
ngx.header["Content-Security-Policy"] = nil
|
||||
ngx.header["Content-Security-Policy-Report-Only"] = nil
|
||||
end
|
||||
|
||||
-- If you'll modify the body later, drop Content-Length on HTML
|
||||
if is_html then
|
||||
ngx.ctx.is_html = true
|
||||
ngx.header.content_length = nil
|
||||
else
|
||||
ngx.ctx.is_html = false
|
||||
end
|
||||
}
|
||||
|
||||
# 3) Prevent upstream/app CSP (duplicates)
|
||||
proxy_hide_header Content-Security-Policy;
|
||||
proxy_hide_header Content-Security-Policy-Report-Only;
|
||||
|
||||
@@ -19,6 +19,8 @@ location {{location}}
|
||||
|
||||
{% include 'roles/sys-svc-proxy/templates/headers/content_security_policy.conf.j2' %}
|
||||
|
||||
{% include 'roles/sys-svc-proxy/templates/headers/access_control_allow.conf.j2' %}
|
||||
|
||||
# WebSocket specific header
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
location {{ location_ws }} {
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_pass http://127.0.0.1:{{ ws_port }};
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_pass http://127.0.0.1:{{ ws_port }};
|
||||
|
||||
# Proxy buffering needs to be disabled for websockets.
|
||||
proxy_buffering off;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
tcp_nodelay on;
|
||||
}
|
||||
@@ -58,5 +58,3 @@ server
|
||||
{% endif %}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -1,8 +1,3 @@
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
}
|
||||
|
||||
server {
|
||||
server_name {{ domain }};
|
||||
|
||||
|
||||
@@ -68,7 +68,12 @@ ChallengeResponseAuthentication no
|
||||
#KerberosGetAFSToken no
|
||||
|
||||
# GSSAPI options
|
||||
#GSSAPIAuthentication no
|
||||
# Disable GSSAPI (Kerberos) authentication to avoid unnecessary negotiation delays.
|
||||
# This setting is useful for non-domain environments where GSSAPI is not used,
|
||||
# improving SSH connection startup time and reducing overhead.
|
||||
# See: https://chatgpt.com/share/68efc179-1a10-800f-9656-1e8731b40546
|
||||
GSSAPIAuthentication no
|
||||
|
||||
#GSSAPICleanupCredentials yes
|
||||
|
||||
# Set this to 'yes' to enable PAM authentication, account processing,
|
||||
@@ -97,7 +102,13 @@ PrintMotd no # pam does that
|
||||
#Compression delayed
|
||||
#ClientAliveInterval 0
|
||||
#ClientAliveCountMax 3
|
||||
#UseDNS no
|
||||
|
||||
# Disable reverse DNS lookups to speed up SSH logins.
|
||||
# When UseDNS is enabled, sshd performs a reverse DNS lookup for each connecting client,
|
||||
# which can significantly delay authentication if DNS resolution is slow or misconfigured.
|
||||
# See: https://chatgpt.com/share/68efc179-1a10-800f-9656-1e8731b40546
|
||||
UseDNS no
|
||||
|
||||
#PidFile /run/sshd.pid
|
||||
#MaxStartups 10:30:100
|
||||
#PermitTunnel no
|
||||
|
||||
@@ -7,12 +7,26 @@ events
|
||||
|
||||
http
|
||||
{
|
||||
{#
|
||||
Map the client's Upgrade header to the proper Connection value for WebSocket proxying:
|
||||
use "upgrade" when an Upgrade is requested, otherwise "close". Define once in http{} and use $connection_upgrade.
|
||||
#}
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
}
|
||||
|
||||
include mime.types;
|
||||
|
||||
{# default_type application/octet-stream; If html filter does not work, this one needs to be used#}
|
||||
|
||||
default_type text/html;
|
||||
|
||||
{# Ensure caches (browsers, proxies, CDNs) treat responses as dependent on the Origin header
|
||||
to prevent cross-domain cache poisoning issues.
|
||||
Discussion: https://chat.openai.com/share/2671b961-c1b0-472d-bae2-2804d0455e8a #}
|
||||
add_header 'Vary' 'Origin' always;
|
||||
|
||||
{# caching #}
|
||||
proxy_cache_path {{ NGINX.DIRECTORIES.CACHE.GENERAL }} levels=1:2 keys_zone=cache:20m max_size=20g inactive=14d use_temp_path=off;
|
||||
proxy_cache_path {{ NGINX.DIRECTORIES.CACHE.IMAGE }} levels=1:2 keys_zone=imgcache:10m inactive=60m use_temp_path=off;
|
||||
|
||||
@@ -13,22 +13,3 @@
|
||||
include_role:
|
||||
name: update-apt
|
||||
when: ansible_distribution == "Debian"
|
||||
|
||||
- name: "Check if yay is installed"
|
||||
command: which yay
|
||||
register: yay_installed
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: "Update with yay"
|
||||
include_role:
|
||||
name: update-yay
|
||||
when:
|
||||
- yay_installed.rc == 0
|
||||
- run_once_update_yay is not defined
|
||||
|
||||
- name: "Check if pkgmgr command is available"
|
||||
command: "which pkgmgr"
|
||||
register: pkgmgr_available
|
||||
failed_when: false
|
||||
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
# Update yay
|
||||
|
||||
## Description
|
||||
|
||||
This role updates AUR packages on Arch Linux systems using [yay](https://wiki.archlinux.org/title/Yay). It automates the process of upgrading AUR packages, ensuring that your system stays current with the latest software available in the Arch User Repository.
|
||||
|
||||
## Overview
|
||||
|
||||
The role performs the following:
|
||||
- Checks if the [yay](https://wiki.archlinux.org/title/Yay) AUR helper is installed.
|
||||
- Upgrades AUR packages using the `kewlfft.aur.aur` module with yay.
|
||||
- Works exclusively on Arch Linux systems.
|
||||
|
||||
## Purpose
|
||||
|
||||
The primary purpose of this role is to ensure that AUR packages on Arch Linux are updated automatically. This helps maintain system stability and ensures that the latest features and fixes from the AUR are applied.
|
||||
|
||||
## Features
|
||||
|
||||
- **AUR Package Upgrades:** Uses yay to upgrade AUR packages.
|
||||
- **Conditional Execution:** Only runs if yay is installed on the system.
|
||||
- **Arch Linux Focused:** Specifically designed for Arch Linux systems.
|
||||
@@ -1,24 +0,0 @@
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birkenbach"
|
||||
description: "Updates AUR packages on Arch Linux systems using yay. This role automates the upgrade process for AUR packages, ensuring that the system remains up-to-date with the latest versions available in the Arch User Repository."
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birkenbach
|
||||
Consulting & Coaching Solutions
|
||||
https://www.veen.world
|
||||
min_ansible_version: "2.9"
|
||||
platforms:
|
||||
- name: Archlinux
|
||||
versions:
|
||||
- rolling
|
||||
galaxy_tags:
|
||||
- aur
|
||||
- update
|
||||
- archlinux
|
||||
- yay
|
||||
- system
|
||||
- maintenance
|
||||
repository: "https://s.infinito.nexus/code"
|
||||
issue_tracker_url: "https://s.infinito.nexus/issues"
|
||||
documentation: "https://docs.infinito.nexus"
|
||||
@@ -1,14 +0,0 @@
|
||||
- block:
|
||||
- name: Include dependency 'dev-yay'
|
||||
include_role:
|
||||
name: dev-yay
|
||||
when: run_once_dev_yay is not defined
|
||||
|
||||
- name: upgrade the system using yay, only act on AUR packages.
|
||||
become: false
|
||||
kewlfft.aur.aur:
|
||||
upgrade: yes
|
||||
use: yay
|
||||
aur_only: yes
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_update_yay is not defined
|
||||
@@ -1 +0,0 @@
|
||||
application_id: update-yay
|
||||
@@ -5,7 +5,7 @@ users:
|
||||
username: "{{ PRIMARY_DOMAIN.split('.')[0] }}"
|
||||
tld:
|
||||
description: "Auto Generated Account to reserve the TLD"
|
||||
username: "{{ PRIMARY_DOMAIN.split('.')[1] }}"
|
||||
username: "{{ PRIMARY_DOMAIN.split('.')[1] if (PRIMARY_DOMAIN is defined and (PRIMARY_DOMAIN.split('.') | length) > 1) else (PRIMARY_DOMAIN ~ '_tld ') }}"
|
||||
root:
|
||||
username: root
|
||||
uid: 0
|
||||
|
||||
@@ -18,10 +18,10 @@ server:
|
||||
flags:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
script-src:
|
||||
script-src-attr:
|
||||
unsafe-inline: true
|
||||
unsafe-eval: true
|
||||
style-src:
|
||||
style-src-attr:
|
||||
unsafe-inline: true
|
||||
whitelist:
|
||||
font-src:
|
||||
|
||||
@@ -17,6 +17,10 @@ docker:
|
||||
image: "baserow/baserow"
|
||||
version: "latest"
|
||||
name: "baserow"
|
||||
cpus: 1.0
|
||||
mem_reservation: 0.5g
|
||||
mem_limit: 2g
|
||||
pids_limit: 512
|
||||
volumes:
|
||||
data: "baserow_data"
|
||||
server:
|
||||
@@ -33,5 +37,5 @@ server:
|
||||
flags:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
style-src:
|
||||
style-src-attr:
|
||||
unsafe-inline: true
|
||||
@@ -13,7 +13,7 @@ server:
|
||||
flags:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
style-src:
|
||||
style-src-attr:
|
||||
unsafe-inline: true
|
||||
domains:
|
||||
canonical:
|
||||
@@ -24,8 +24,12 @@ credentials: {}
|
||||
docker:
|
||||
services:
|
||||
bigbluebutton:
|
||||
repository: "https://github.com/kevinveenbirkenbach/bigbluebutton-docker.git"
|
||||
version: "bbb3.0"
|
||||
repository: "https://github.com/kevinveenbirkenbach/bigbluebutton-docker.git"
|
||||
version: "bbb3.0"
|
||||
recording:
|
||||
enabled: false # Enable recordings of sessions (deactivated by default because it crashed, also check GDPR comnpliance)
|
||||
cleanup: true # Auto-Cleanup Recordings
|
||||
max_age_days: 30 # Cleanup recording after this amount of days
|
||||
database:
|
||||
# This is set to true to pass integration test, doesn't have any other function
|
||||
enabled: true
|
||||
|
||||
@@ -14,13 +14,20 @@
|
||||
name: sys-stk-full-stateless
|
||||
vars:
|
||||
docker_compose_flush_handlers: false
|
||||
- name: "include 04_seed-database-to-backup.yml"
|
||||
include_tasks: "{{ [ playbook_dir, 'roles/sys-ctl-bkp-docker-2-loc/tasks/04_seed-database-to-backup.yml' ] | path_join }}"
|
||||
|
||||
- name: "Unset 'proxy_extra_configuration'"
|
||||
set_fact:
|
||||
proxy_extra_configuration: null
|
||||
|
||||
- name: "Include Seed routines for '{{ application_id }}' database backup"
|
||||
include_tasks: "{{ [ playbook_dir, 'roles/sys-ctl-bkp-docker-2-loc/tasks/04_seed-database-to-backup.yml' ] | path_join }}"
|
||||
vars:
|
||||
database_type: "postgres"
|
||||
database_instance: "{{ entity_name }}"
|
||||
database_password: "{{ applications | get_app_conf(application_id, 'credentials.postgresql_secret') }}"
|
||||
database_username: "postgres"
|
||||
database_name: "" # Multiple databases
|
||||
|
||||
- name: configure websocket_upgrade.conf
|
||||
copy:
|
||||
src: "websocket_upgrade.conf"
|
||||
|
||||
@@ -21,7 +21,6 @@ services:
|
||||
--fingerprint
|
||||
--no-multicast-peers
|
||||
--no-cli
|
||||
--no-tcp-relay
|
||||
--min-port={{ BBB_RELAY_PORT_START }}
|
||||
--max-port={{ BBB_RELAY_PORT_END }}
|
||||
--external-ip=${EXTERNAL_IPv4}
|
||||
@@ -29,11 +28,6 @@ services:
|
||||
--cert=${COTURN_TLS_CERT_PATH}
|
||||
--pkey=${COTURN_TLS_KEY_PATH}
|
||||
{% endif %}
|
||||
{% if BBB_GREENLIGHT_ENABLED | bool %}
|
||||
greenlight:
|
||||
{% set container_port = 3000 %}
|
||||
{% include 'roles/docker-container/templates/healthcheck/nc.yml.j2' %}
|
||||
{% endif %}
|
||||
{% if BBB_COLLABORA_ENABLED | bool %}
|
||||
bbb-web:
|
||||
depends_on:
|
||||
|
||||
@@ -22,14 +22,9 @@ ENABLE_GREENLIGHT={{ BBB_GREENLIGHT_ENABLED | lower }}
|
||||
#ENABLE_PROMETHEUS_EXPORTER_OPTIMIZATION=true
|
||||
|
||||
# Recording
|
||||
# IMPORTANT: this is currently a big privacy issues, because it will
|
||||
# record everything which happens in the conference, even when the button
|
||||
# suggets, that it does not.
|
||||
# https://github.com/bigbluebutton/bigbluebutton/issues/9202
|
||||
# make sure that you get peoples consent, before they join a room
|
||||
ENABLE_RECORDING=false
|
||||
REMOVE_OLD_RECORDING=true
|
||||
RECORDING_MAX_AGE_DAYS=365
|
||||
ENABLE_RECORDING={{ BBB_RECORDING_ENABLED | lower }}
|
||||
REMOVE_OLD_RECORDING={{ BBB_RECORDING_CLEANUP_ENABLED | lower }}
|
||||
RECORDING_MAX_AGE_DAYS={{ BBB_RECORDING_MAX_AGE_DAYS }}
|
||||
|
||||
# ====================================
|
||||
# SECRETS
|
||||
@@ -174,27 +169,17 @@ OAUTH2_REDIRECT=
|
||||
# For information about setting up LDAP, see:
|
||||
#
|
||||
# https://docs.bigbluebutton.org/greenlight/gl-config.html#ldap-auth
|
||||
#
|
||||
# LDAP_SERVER=ldap.example.com
|
||||
# LDAP_PORT=389
|
||||
# LDAP_METHOD=plain
|
||||
# LDAP_UID={{ LDAP.USER.ATTRIBUTES.ID }}
|
||||
# LDAP_BASE=dc=example,dc=com
|
||||
# LDAP_AUTH=simple
|
||||
# LDAP_BIND_DN=cn=admin,dc=example,dc=com
|
||||
# LDAP_PASSWORD=password
|
||||
# LDAP_ROLE_FIELD=ou
|
||||
# LDAP_FILTER=(&(attr1=value1)(attr2=value2))
|
||||
|
||||
LDAP_SERVER="{{ LDAP.SERVER.DOMAIN }}"
|
||||
LDAP_PORT="{{ LDAP.SERVER.PORT }}"
|
||||
LDAP_METHOD=
|
||||
# LDAP_METHOD=plain
|
||||
LDAP_UID={{ LDAP.USER.ATTRIBUTES.ID }}
|
||||
LDAP_BASE="{{ LDAP.DN.ROOT }}"
|
||||
LDAP_BIND_DN="{{ LDAP.DN.ADMINISTRATOR.DATA }}"
|
||||
LDAP_AUTH=password
|
||||
LDAP_PASSWORD="{{ LDAP.BIND_CREDENTIAL }}"
|
||||
LDAP_ROLE_FIELD=
|
||||
LDAP_FILTER=
|
||||
# LDAP_ROLE_FIELD=ou
|
||||
# LDAP_FILTER=(&(attr1=value1)(attr2=value2))
|
||||
{% endif %}
|
||||
|
||||
# ====================================
|
||||
|
||||
@@ -2,13 +2,6 @@
|
||||
application_id: "web-app-bigbluebutton"
|
||||
entity_name: "{{ application_id | get_entity_name }}"
|
||||
|
||||
# Database configuration
|
||||
database_type: "postgres"
|
||||
database_instance: "{{ application_id | get_entity_name }}"
|
||||
database_password: "{{ applications | get_app_conf(application_id, 'credentials.postgresql_secret') }}"
|
||||
database_username: "postgres"
|
||||
database_name: "" # Multiple databases
|
||||
|
||||
# Proxy
|
||||
domain: "{{ domains | get_domain(application_id) }}"
|
||||
http_port: "{{ ports.localhost.http[application_id] }}"
|
||||
@@ -43,11 +36,16 @@ BBB_RELAY_PORT_START: "{{ ports.public.relay_port_ranges[applica
|
||||
BBB_RELAY_PORT_END: "{{ ports.public.relay_port_ranges[application_id ~ '_end'] }}"
|
||||
BBB_RELAY_PORT_RANGE: "{{ BBB_RELAY_PORT_START }}-{{ BBB_RELAY_PORT_END }}"
|
||||
|
||||
# Collabora
|
||||
## Collabora
|
||||
BBB_COLLABORA_ENABLED: "{{ applications | get_app_conf(application_id, 'docker.services.collabora.internal') }}"
|
||||
BBB_COLLABORA_URL: "{{ 'https://collabora:9980/cool' if BBB_COLLABORA_ENABLED else (domains | get_url('web-svc-collabora', WEB_PROTOCOL)) }}"
|
||||
|
||||
## Switchs
|
||||
### Recording
|
||||
BBB_RECORDING_ENABLED: "{{ applications | get_app_conf(application_id, 'docker.services.'~ entity_name ~'.recording.enabled') }}"
|
||||
BBB_RECORDING_CLEANUP_ENABLED: "{{ applications | get_app_conf(application_id, 'docker.services.'~ entity_name ~'.recording.cleanup') }}"
|
||||
BBB_RECORDING_MAX_AGE_DAYS: "{{ applications | get_app_conf(application_id, 'docker.services.'~ entity_name ~'.recording.max_age_days') }}"
|
||||
|
||||
## Additional Switchs
|
||||
|
||||
### Network
|
||||
BBB_IP6_ENABLED: "{{ applications | get_app_conf(application_id, 'server.ip6_enabled') }}"
|
||||
@@ -57,4 +55,4 @@ BBB_GREENLIGHT_ENABLED: "{{ applications | get_app_conf(applicatio
|
||||
|
||||
### SSO
|
||||
BBB_LDAP_ENABLED: "{{ applications | get_app_conf(application_id, 'features.ldap') }}"
|
||||
BBB_OIDC_ENABLED: "{{ applications | get_app_conf(application_id, 'features.oidc') }}"
|
||||
BBB_OIDC_ENABLED: "{{ applications | get_app_conf(application_id, 'features.oidc') }}"
|
||||
@@ -2,17 +2,17 @@
|
||||
# Exposes a same-origin /config to avoid CORS when the social-app fetches config.
|
||||
location = /config {
|
||||
proxy_pass {{ BLUESKY_CONFIG_UPSTREAM_URL }};
|
||||
# Nur Hostname extrahieren:
|
||||
|
||||
{# Just extract hostname #}
|
||||
set $up_host "{{ BLUESKY_CONFIG_UPSTREAM_URL | regex_replace('^https?://', '') | regex_replace('/.*$', '') }}";
|
||||
proxy_set_header Host $up_host;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Connection "";
|
||||
proxy_ssl_server_name on;
|
||||
|
||||
# Make response clearly same-origin for browsers
|
||||
{# Access Control Allow Configurations #}
|
||||
proxy_hide_header Access-Control-Allow-Origin;
|
||||
add_header Access-Control-Allow-Origin $scheme://$host always;
|
||||
add_header Vary Origin always;
|
||||
{% include 'roles/sys-svc-proxy/templates/headers/access_control_allow.conf.j2' %}
|
||||
}
|
||||
|
||||
location = /ipcc {
|
||||
@@ -23,7 +23,7 @@ location = /ipcc {
|
||||
proxy_set_header Connection "";
|
||||
proxy_ssl_server_name on;
|
||||
|
||||
{# Access Control Allow Configurations #}
|
||||
proxy_hide_header Access-Control-Allow-Origin;
|
||||
add_header Access-Control-Allow-Origin $scheme://$host always;
|
||||
add_header Vary Origin always;
|
||||
{% include 'roles/sys-svc-proxy/templates/headers/access_control_allow.conf.j2' %}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user