mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-11-07 05:37:59 +00:00
Compare commits
1 Commits
aa1a901309
...
feature/ke
| Author | SHA1 | Date | |
|---|---|---|---|
| 6fcf6a1ab6 |
12
ansible.cfg
12
ansible.cfg
@@ -1,6 +1,5 @@
|
||||
[defaults]
|
||||
# --- Performance & Behavior ---
|
||||
pipelining = True
|
||||
forks = 25
|
||||
strategy = linear
|
||||
gathering = smart
|
||||
@@ -15,14 +14,19 @@ stdout_callback = yaml
|
||||
callbacks_enabled = profile_tasks,timer
|
||||
|
||||
# --- Plugin paths ---
|
||||
filter_plugins = ./filter_plugins
|
||||
filter_plugins = ./filter_plugins
|
||||
lookup_plugins = ./lookup_plugins
|
||||
module_utils = ./module_utils
|
||||
|
||||
[ssh_connection]
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=20s -o ControlPath=~/.ssh/ansible-%h-%p-%r -o ServerAliveInterval=15 -o ServerAliveCountMax=3 -o StrictHostKeyChecking=accept-new -o PreferredAuthentications=publickey,password,keyboard-interactive
|
||||
# Multiplexing: safer socket path in HOME instead of /tmp
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=20s -o ControlPath=~/.ssh/ansible-%h-%p-%r \
|
||||
-o ServerAliveInterval=15 -o ServerAliveCountMax=3 -o StrictHostKeyChecking=accept-new \
|
||||
-o PreferredAuthentications=publickey,password,keyboard-interactive
|
||||
|
||||
# Pipelining boosts speed; works fine if sudoers does not enforce "requiretty"
|
||||
pipelining = True
|
||||
transfer_method = smart
|
||||
scp_if_ssh = smart
|
||||
|
||||
[persistent_connection]
|
||||
connect_timeout = 30
|
||||
|
||||
@@ -83,13 +83,6 @@ class DefaultsGenerator:
|
||||
print(f"Error during rendering: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Sort applications by application key for stable output
|
||||
apps = result.get("defaults_applications", {})
|
||||
if isinstance(apps, dict) and apps:
|
||||
result["defaults_applications"] = {
|
||||
k: apps[k] for k in sorted(apps.keys())
|
||||
}
|
||||
|
||||
# Write output
|
||||
self.output_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
with self.output_file.open("w", encoding="utf-8") as f:
|
||||
|
||||
@@ -220,10 +220,6 @@ def main():
|
||||
print(f"Error building user entries: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Sort users by key for deterministic output
|
||||
if isinstance(users, dict) and users:
|
||||
users = OrderedDict(sorted(users.items()))
|
||||
|
||||
# Convert OrderedDict into plain dict for YAML
|
||||
default_users = {'default_users': users}
|
||||
plain_data = dictify(default_users)
|
||||
|
||||
@@ -10,23 +10,9 @@ from module_utils.config_utils import get_app_conf
|
||||
from module_utils.get_url import get_url
|
||||
|
||||
|
||||
def _dedup_preserve(seq):
|
||||
"""Return a list with stable order and unique items."""
|
||||
seen = set()
|
||||
out = []
|
||||
for x in seq:
|
||||
if x not in seen:
|
||||
seen.add(x)
|
||||
out.append(x)
|
||||
return out
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
"""
|
||||
Jinja filters for building a robust, CSP3-aware Content-Security-Policy header.
|
||||
Safari/CSP2 compatibility is ensured by merging the -elem/-attr variants into the base
|
||||
directives (style-src, script-src). We intentionally do NOT mirror back into -elem/-attr
|
||||
to allow true CSP3 granularity on modern browsers.
|
||||
Custom filters for Content Security Policy generation and CSP-related utilities.
|
||||
"""
|
||||
|
||||
def filters(self):
|
||||
@@ -75,14 +61,11 @@ class FilterModule(object):
|
||||
"""
|
||||
Returns CSP flag tokens (e.g., "'unsafe-eval'", "'unsafe-inline'") for a directive,
|
||||
merging sane defaults with app config.
|
||||
|
||||
Defaults:
|
||||
- For styles we enable 'unsafe-inline' by default (style-src, style-src-elem, style-src-attr),
|
||||
because many apps rely on inline styles / style attributes.
|
||||
- For scripts we do NOT enable 'unsafe-inline' by default.
|
||||
Default: 'unsafe-inline' is enabled for style-src and style-src-elem.
|
||||
"""
|
||||
# Defaults that apply to all apps
|
||||
default_flags = {}
|
||||
if directive in ('style-src', 'style-src-elem', 'style-src-attr'):
|
||||
if directive in ('style-src', 'style-src-elem'):
|
||||
default_flags = {'unsafe-inline': True}
|
||||
|
||||
configured = get_app_conf(
|
||||
@@ -93,6 +76,7 @@ class FilterModule(object):
|
||||
{}
|
||||
)
|
||||
|
||||
# Merge defaults with configured flags (configured overrides defaults)
|
||||
merged = {**default_flags, **configured}
|
||||
|
||||
tokens = []
|
||||
@@ -147,148 +131,82 @@ class FilterModule(object):
|
||||
):
|
||||
"""
|
||||
Builds the Content-Security-Policy header value dynamically based on application settings.
|
||||
|
||||
Key points:
|
||||
- CSP3-aware: supports base/elem/attr for styles and scripts.
|
||||
- Safari/CSP2 fallback: base directives (style-src, script-src) always include
|
||||
the union of their -elem/-attr variants.
|
||||
- We do NOT mirror back into -elem/-attr; finer CSP3 rules remain effective
|
||||
on modern browsers if you choose to use them.
|
||||
- If the app explicitly disables a token on the *base* (e.g. style-src.unsafe-inline: false),
|
||||
that token is removed from the merged base even if present in elem/attr.
|
||||
- Inline hashes are added ONLY if that directive does NOT include 'unsafe-inline'.
|
||||
- Whitelists/flags/hashes read from:
|
||||
server.csp.whitelist.<directive>
|
||||
server.csp.flags.<directive>
|
||||
server.csp.hashes.<directive>
|
||||
- “Smart defaults”:
|
||||
* internal CDN for style/script elem and connect
|
||||
* Matomo endpoints (if feature enabled) for script-elem/connect
|
||||
* Simpleicons (if feature enabled) for connect
|
||||
* reCAPTCHA (if feature enabled) for script-elem/frame-src
|
||||
* frame-ancestors extended for desktop/logout/keycloak if enabled
|
||||
- Flags (e.g., 'unsafe-eval', 'unsafe-inline') are read from server.csp.flags.<directive>,
|
||||
with sane defaults applied in get_csp_flags (always 'unsafe-inline' for style-src and style-src-elem).
|
||||
- Inline hashes are read from server.csp.hashes.<directive>.
|
||||
- Whitelists are read from server.csp.whitelist.<directive>.
|
||||
- Inline hashes are added only if the final tokens do NOT include 'unsafe-inline'.
|
||||
"""
|
||||
try:
|
||||
directives = [
|
||||
'default-src',
|
||||
'connect-src',
|
||||
'frame-ancestors',
|
||||
'frame-src',
|
||||
'script-src',
|
||||
'script-src-elem',
|
||||
'script-src-attr',
|
||||
'style-src',
|
||||
'style-src-elem',
|
||||
'style-src-attr',
|
||||
'font-src',
|
||||
'worker-src',
|
||||
'manifest-src',
|
||||
'media-src',
|
||||
'default-src', # Fallback source list for content types not explicitly listed
|
||||
'connect-src', # Allowed URLs for XHR, WebSockets, EventSource, fetch()
|
||||
'frame-ancestors', # Who may embed this page
|
||||
'frame-src', # Sources for nested browsing contexts (e.g., <iframe>)
|
||||
'script-src', # Sources for script execution
|
||||
'script-src-elem', # Sources for <script> elements
|
||||
'style-src', # Sources for inline styles and <style>/<link> elements
|
||||
'style-src-elem', # Sources for <style> and <link rel="stylesheet">
|
||||
'font-src', # Sources for fonts
|
||||
'worker-src', # Sources for workers
|
||||
'manifest-src', # Sources for web app manifests
|
||||
'media-src', # Sources for audio and video
|
||||
]
|
||||
|
||||
tokens_by_dir = {}
|
||||
explicit_flags_by_dir = {}
|
||||
parts = []
|
||||
|
||||
for directive in directives:
|
||||
# Collect explicit flags (to later respect explicit "False" on base during merge)
|
||||
explicit_flags = get_app_conf(
|
||||
applications,
|
||||
application_id,
|
||||
'server.csp.flags.' + directive,
|
||||
False,
|
||||
{}
|
||||
)
|
||||
explicit_flags_by_dir[directive] = explicit_flags
|
||||
|
||||
tokens = ["'self'"]
|
||||
|
||||
# 1) Flags (with sane defaults)
|
||||
# Load flags (includes defaults from get_csp_flags)
|
||||
flags = self.get_csp_flags(applications, application_id, directive)
|
||||
tokens += flags
|
||||
|
||||
# 2) Internal CDN defaults for selected directives
|
||||
if directive in ('script-src-elem', 'connect-src', 'style-src-elem', 'style-src'):
|
||||
# Allow fetching from internal CDN by default for selected directives
|
||||
if directive in ['script-src-elem', 'connect-src', 'style-src-elem']:
|
||||
tokens.append(get_url(domains, 'web-svc-cdn', web_protocol))
|
||||
|
||||
# 3) Matomo (if enabled)
|
||||
if directive in ('script-src-elem', 'connect-src'):
|
||||
# Matomo integration if feature is enabled
|
||||
if directive in ['script-src-elem', 'connect-src']:
|
||||
if self.is_feature_enabled(applications, matomo_feature_name, application_id):
|
||||
tokens.append(get_url(domains, 'web-app-matomo', web_protocol))
|
||||
|
||||
# 4) Simpleicons (if enabled) – typically used via connect-src (fetch)
|
||||
if directive == 'connect-src':
|
||||
# Simpleicons integration if feature is enabled
|
||||
if directive in ['connect-src']:
|
||||
if self.is_feature_enabled(applications, 'simpleicons', application_id):
|
||||
tokens.append(get_url(domains, 'web-svc-simpleicons', web_protocol))
|
||||
|
||||
# 5) reCAPTCHA (if enabled) – scripts + frames
|
||||
# ReCaptcha integration (scripts + frames) if feature is enabled
|
||||
if self.is_feature_enabled(applications, 'recaptcha', application_id):
|
||||
if directive in ('script-src-elem', 'frame-src'):
|
||||
if directive in ['script-src-elem', 'frame-src']:
|
||||
tokens.append('https://www.gstatic.com')
|
||||
tokens.append('https://www.google.com')
|
||||
|
||||
# 6) Frame ancestors (desktop + logout)
|
||||
# Frame ancestors handling (desktop + logout support)
|
||||
if directive == 'frame-ancestors':
|
||||
if self.is_feature_enabled(applications, 'desktop', application_id):
|
||||
# Allow being embedded by the desktop app domain's site
|
||||
# Allow being embedded by the desktop app domain (and potentially its parent)
|
||||
domain = domains.get('web-app-desktop')[0]
|
||||
sld_tld = ".".join(domain.split(".")[-2:]) # e.g., example.com
|
||||
tokens.append(f"{sld_tld}")
|
||||
if self.is_feature_enabled(applications, 'logout', application_id):
|
||||
# Allow embedding via logout proxy and Keycloak app
|
||||
tokens.append(get_url(domains, 'web-svc-logout', web_protocol))
|
||||
tokens.append(get_url(domains, 'web-app-keycloak', web_protocol))
|
||||
|
||||
# 7) Custom whitelist
|
||||
# Custom whitelist entries
|
||||
tokens += self.get_csp_whitelist(applications, application_id, directive)
|
||||
|
||||
# 8) Inline hashes (only if this directive does NOT include 'unsafe-inline')
|
||||
# Add inline content hashes ONLY if final tokens do NOT include 'unsafe-inline'
|
||||
# (Check tokens, not flags, to include defaults and later modifications.)
|
||||
if "'unsafe-inline'" not in tokens:
|
||||
for snippet in self.get_csp_inline_content(applications, application_id, directive):
|
||||
tokens.append(self.get_csp_hash(snippet))
|
||||
|
||||
tokens_by_dir[directive] = _dedup_preserve(tokens)
|
||||
# Append directive
|
||||
parts.append(f"{directive} {' '.join(tokens)};")
|
||||
|
||||
# ----------------------------------------------------------
|
||||
# CSP3 families → ensure CSP2 fallback (Safari-safe)
|
||||
# Merge style/script families so base contains union of elem/attr.
|
||||
# Respect explicit disables on the base (e.g. unsafe-inline=False).
|
||||
# Do NOT mirror back into elem/attr (keep granularity).
|
||||
# ----------------------------------------------------------
|
||||
def _strip_if_disabled(unioned_tokens, explicit_flags, name):
|
||||
"""
|
||||
Remove a token (e.g. 'unsafe-inline') from the unioned token list
|
||||
if it is explicitly disabled in the base directive flags.
|
||||
"""
|
||||
if isinstance(explicit_flags, dict) and explicit_flags.get(name) is False:
|
||||
tok = f"'{name}'"
|
||||
return [t for t in unioned_tokens if t != tok]
|
||||
return unioned_tokens
|
||||
|
||||
def merge_family(base_key, elem_key, attr_key):
|
||||
base = tokens_by_dir.get(base_key, [])
|
||||
elem = tokens_by_dir.get(elem_key, [])
|
||||
attr = tokens_by_dir.get(attr_key, [])
|
||||
union = _dedup_preserve(base + elem + attr)
|
||||
|
||||
# Respect explicit disables on the base
|
||||
explicit_base = explicit_flags_by_dir.get(base_key, {})
|
||||
# The most relevant flags for script/style:
|
||||
for flag_name in ('unsafe-inline', 'unsafe-eval'):
|
||||
union = _strip_if_disabled(union, explicit_base, flag_name)
|
||||
|
||||
tokens_by_dir[base_key] = union # write back only to base
|
||||
|
||||
merge_family('style-src', 'style-src-elem', 'style-src-attr')
|
||||
merge_family('script-src', 'script-src-elem', 'script-src-attr')
|
||||
|
||||
# ----------------------------------------------------------
|
||||
# Assemble header
|
||||
# ----------------------------------------------------------
|
||||
parts = []
|
||||
for directive in directives:
|
||||
if directive in tokens_by_dir:
|
||||
parts.append(f"{directive} {' '.join(tokens_by_dir[directive])};")
|
||||
|
||||
# Keep permissive img-src for data/blob + any host (as before)
|
||||
# Static img-src directive (kept permissive for data/blob and any host)
|
||||
parts.append("img-src * data: blob:;")
|
||||
|
||||
return ' '.join(parts)
|
||||
|
||||
@@ -76,9 +76,8 @@ _applications_nextcloud_oidc_flavor: >-
|
||||
False,
|
||||
'oidc_login'
|
||||
if applications
|
||||
| get_app_conf('web-app-nextcloud','features.ldap',False, True, True)
|
||||
else 'sociallogin',
|
||||
True
|
||||
| get_app_conf('web-app-nextcloud','features.ldap',False, True)
|
||||
else 'sociallogin'
|
||||
)
|
||||
}}
|
||||
|
||||
|
||||
@@ -5,6 +5,6 @@ MODE_DUMMY: false # Executes dummy/test routines instead
|
||||
MODE_UPDATE: true # Executes updates
|
||||
MODE_DEBUG: false # This enables debugging in ansible and in the apps, You SHOULD NOT enable this on production servers
|
||||
MODE_RESET: false # Cleans up all Infinito.Nexus files. It's necessary to run to whole playbook and not particial roles when using this function.
|
||||
MODE_CLEANUP: true # Cleanup unused files and configurations
|
||||
MODE_CLEANUP: "{{ MODE_DEBUG | bool }}" # Cleanup unused files and configurations
|
||||
MODE_ASSERT: "{{ MODE_DEBUG | bool }}" # Executes validation tasks during the run.
|
||||
MODE_BACKUP: true # Executes the Backup before the deployment
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
|
||||
# Service Timers
|
||||
|
||||
## Meta
|
||||
@@ -23,29 +24,29 @@ SYS_SCHEDULE_HEALTH_BTRFS: "*-*-* 00:00:00"
|
||||
SYS_SCHEDULE_HEALTH_JOURNALCTL: "*-*-* 00:00:00" # Check once per day the journalctl for errors
|
||||
SYS_SCHEDULE_HEALTH_DISC_SPACE: "*-*-* 06,12,18,00:00:00" # Check four times per day if there is sufficient disc space
|
||||
SYS_SCHEDULE_HEALTH_DOCKER_CONTAINER: "*-*-* {{ HOURS_SERVER_AWAKE }}:00:00" # Check once per hour if the docker containers are healthy
|
||||
SYS_SCHEDULE_HEALTH_DOCKER_VOLUMES: "*-*-* {{ HOURS_SERVER_AWAKE }}:00:00" # Check once per hour if the docker volumes are healthy
|
||||
SYS_SCHEDULE_HEALTH_CSP_CRAWLER: "*-*-* {{ HOURS_SERVER_AWAKE }}:00:00" # Check once per hour if all CSP are fullfilled available
|
||||
SYS_SCHEDULE_HEALTH_NGINX: "*-*-* {{ HOURS_SERVER_AWAKE }}:00:00" # Check once per hour if all webservices are available
|
||||
SYS_SCHEDULE_HEALTH_DOCKER_VOLUMES: "*-*-* {{ HOURS_SERVER_AWAKE }}:15:00" # Check once per hour if the docker volumes are healthy
|
||||
SYS_SCHEDULE_HEALTH_CSP_CRAWLER: "*-*-* {{ HOURS_SERVER_AWAKE }}:30:00" # Check once per hour if all CSP are fullfilled available
|
||||
SYS_SCHEDULE_HEALTH_NGINX: "*-*-* {{ HOURS_SERVER_AWAKE }}:45:00" # Check once per hour if all webservices are available
|
||||
SYS_SCHEDULE_HEALTH_MSMTP: "*-*-* 00:00:00" # Check once per day SMTP Server
|
||||
|
||||
### Schedule for cleanup tasks
|
||||
SYS_SCHEDULE_CLEANUP_CERTS: "*-*-* 20:00" # Deletes and revokes unused certs once per day
|
||||
SYS_SCHEDULE_CLEANUP_FAILED_BACKUPS: "*-*-* 21:00" # Clean up failed docker backups once per day
|
||||
SYS_SCHEDULE_CLEANUP_BACKUPS: "*-*-* 22:00" # Cleanup backups once per day, MUST be called before disc space cleanup
|
||||
SYS_SCHEDULE_CLEANUP_DISC_SPACE: "*-*-* 23:00" # Cleanup disc space once per day
|
||||
SYS_SCHEDULE_CLEANUP_BACKUPS: "*-*-* 00,06,12,18:30:00" # Cleanup backups every 6 hours, MUST be called before disc space cleanup
|
||||
SYS_SCHEDULE_CLEANUP_DISC_SPACE: "*-*-* 07,13,19,01:30:00" # Cleanup disc space every 6 hours
|
||||
SYS_SCHEDULE_CLEANUP_CERTS: "*-*-* 12,00:45:00" # Deletes and revokes unused certs
|
||||
SYS_SCHEDULE_CLEANUP_FAILED_BACKUPS: "*-*-* 12:00:00" # Clean up failed docker backups every noon
|
||||
|
||||
### Schedule for repair services
|
||||
SYS_SCHEDULE_REPAIR_BTRFS_AUTO_BALANCER: "Sat *-*-01..07 00:00:00" # Execute btrfs auto balancer every first Saturday of a month
|
||||
SYS_SCHEDULE_REPAIR_DOCKER_HARD: "Sun *-*-* 00:00:00" # Restart docker instances every Sunday
|
||||
SYS_SCHEDULE_REPAIR_DOCKER_HARD: "Sun *-*-* 08:00:00" # Restart docker instances every Sunday at 8:00 AM
|
||||
|
||||
### Schedule for backup tasks
|
||||
SYS_SCHEDULE_BACKUP_REMOTE_TO_LOCAL: "*-*-* 00:30:00" # Pull Backup of the previous day
|
||||
SYS_SCHEDULE_BACKUP_DOCKER_TO_LOCAL: "*-*-* 01:00:00" # Backup the current day
|
||||
SYS_SCHEDULE_BACKUP_DOCKER_TO_LOCAL: "*-*-* 03:30:00"
|
||||
SYS_SCHEDULE_BACKUP_REMOTE_TO_LOCAL: "*-*-* 21:30:00"
|
||||
|
||||
### Schedule for Maintenance Tasks
|
||||
SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_RENEW: "*-*-* 10,22:00:00" # Renew Mailu certificates twice per day
|
||||
SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_DEPLOY: "*-*-* 11,23:00:00" # Deploy letsencrypt certificates twice per day to docker containers
|
||||
SYS_SCHEDULE_MAINTANANCE_NEXTCLOUD: "21" # Do nextcloud maintanace between 21:00 and 01:00
|
||||
SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_RENEW: "*-*-* 12,00:30:00" # Renew Mailu certificates twice per day
|
||||
SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_DEPLOY: "*-*-* 13,01:30:00" # Deploy letsencrypt certificates twice per day to docker containers
|
||||
SYS_SCHEDULE_MAINTANANCE_NEXTCLOUD: "22" # Do nextcloud maintanace between 22:00 and 02:00
|
||||
|
||||
### Animation
|
||||
SYS_SCHEDULE_ANIMATION_KEYBOARD_COLOR: "*-*-* *:*:00" # Change the keyboard color every minute
|
||||
@@ -112,10 +112,6 @@ defaults_networks:
|
||||
subnet: 192.168.104.32/28
|
||||
web-svc-coturn:
|
||||
subnet: 192.168.104.48/28
|
||||
web-app-mini-qr:
|
||||
subnet: 192.168.104.64/28
|
||||
web-app-drupal:
|
||||
subnet: 192.168.104.80/28
|
||||
|
||||
# /24 Networks / 254 Usable Clients
|
||||
web-app-bigbluebutton:
|
||||
|
||||
@@ -80,8 +80,6 @@ ports:
|
||||
web-app-flowise: 8056
|
||||
web-app-minio_api: 8057
|
||||
web-app-minio_console: 8058
|
||||
web-app-mini-qr: 8059
|
||||
web-app-drupal: 8060
|
||||
web-app-bigbluebutton: 48087 # This port is predefined by bbb. @todo Try to change this to a 8XXX port
|
||||
public:
|
||||
# The following ports should be changed to 22 on the subdomain via stream mapping
|
||||
|
||||
@@ -6,7 +6,6 @@ __metaclass__ = type
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
from datetime import datetime
|
||||
|
||||
class CertUtils:
|
||||
_domain_cert_mapping = None
|
||||
@@ -23,30 +22,6 @@ class CertUtils:
|
||||
except subprocess.CalledProcessError:
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def run_openssl_dates(cert_path):
|
||||
"""
|
||||
Returns (not_before_ts, not_after_ts) as POSIX timestamps or (None, None) on failure.
|
||||
"""
|
||||
try:
|
||||
output = subprocess.check_output(
|
||||
['openssl', 'x509', '-in', cert_path, '-noout', '-startdate', '-enddate'],
|
||||
universal_newlines=True
|
||||
)
|
||||
nb, na = None, None
|
||||
for line in output.splitlines():
|
||||
line = line.strip()
|
||||
if line.startswith('notBefore='):
|
||||
nb = line.split('=', 1)[1].strip()
|
||||
elif line.startswith('notAfter='):
|
||||
na = line.split('=', 1)[1].strip()
|
||||
def _parse(openssl_dt):
|
||||
# OpenSSL format example: "Oct 10 12:34:56 2025 GMT"
|
||||
return int(datetime.strptime(openssl_dt, "%b %d %H:%M:%S %Y %Z").timestamp())
|
||||
return (_parse(nb) if nb else None, _parse(na) if na else None)
|
||||
except Exception:
|
||||
return (None, None)
|
||||
|
||||
@staticmethod
|
||||
def extract_sans(cert_text):
|
||||
dns_entries = []
|
||||
@@ -84,6 +59,7 @@ class CertUtils:
|
||||
else:
|
||||
return domain == san
|
||||
|
||||
|
||||
@classmethod
|
||||
def build_snapshot(cls, cert_base_path):
|
||||
snapshot = []
|
||||
@@ -106,17 +82,6 @@ class CertUtils:
|
||||
|
||||
@classmethod
|
||||
def refresh_cert_mapping(cls, cert_base_path, debug=False):
|
||||
"""
|
||||
Build mapping: SAN -> list of entries
|
||||
entry = {
|
||||
'folder': str,
|
||||
'cert_path': str,
|
||||
'mtime': float,
|
||||
'not_before': int|None,
|
||||
'not_after': int|None,
|
||||
'is_wildcard': bool
|
||||
}
|
||||
"""
|
||||
cert_files = cls.list_cert_files(cert_base_path)
|
||||
mapping = {}
|
||||
for cert_path in cert_files:
|
||||
@@ -125,82 +90,46 @@ class CertUtils:
|
||||
continue
|
||||
sans = cls.extract_sans(cert_text)
|
||||
folder = os.path.basename(os.path.dirname(cert_path))
|
||||
try:
|
||||
mtime = os.stat(cert_path).st_mtime
|
||||
except FileNotFoundError:
|
||||
mtime = 0.0
|
||||
nb, na = cls.run_openssl_dates(cert_path)
|
||||
|
||||
for san in sans:
|
||||
entry = {
|
||||
'folder': folder,
|
||||
'cert_path': cert_path,
|
||||
'mtime': mtime,
|
||||
'not_before': nb,
|
||||
'not_after': na,
|
||||
'is_wildcard': san.startswith('*.'),
|
||||
}
|
||||
mapping.setdefault(san, []).append(entry)
|
||||
|
||||
if san not in mapping:
|
||||
mapping[san] = folder
|
||||
cls._domain_cert_mapping = mapping
|
||||
if debug:
|
||||
print(f"[DEBUG] Refreshed domain-to-cert mapping (counts): "
|
||||
f"{ {k: len(v) for k, v in mapping.items()} }")
|
||||
print(f"[DEBUG] Refreshed domain-to-cert mapping: {mapping}")
|
||||
|
||||
@classmethod
|
||||
def ensure_cert_mapping(cls, cert_base_path, debug=False):
|
||||
if cls._domain_cert_mapping is None or cls.snapshot_changed(cert_base_path):
|
||||
cls.refresh_cert_mapping(cert_base_path, debug)
|
||||
|
||||
@staticmethod
|
||||
def _score_entry(entry):
|
||||
"""
|
||||
Return tuple used for sorting newest-first:
|
||||
(not_before or -inf, mtime)
|
||||
"""
|
||||
nb = entry.get('not_before')
|
||||
mtime = entry.get('mtime', 0.0)
|
||||
return (nb if nb is not None else -1, mtime)
|
||||
|
||||
@classmethod
|
||||
def find_cert_for_domain(cls, domain, cert_base_path, debug=False):
|
||||
cls.ensure_cert_mapping(cert_base_path, debug)
|
||||
|
||||
candidates_exact = []
|
||||
candidates_wild = []
|
||||
exact_match = None
|
||||
wildcard_match = None
|
||||
|
||||
for san, entries in cls._domain_cert_mapping.items():
|
||||
for san, folder in cls._domain_cert_mapping.items():
|
||||
if san == domain:
|
||||
candidates_exact.extend(entries)
|
||||
elif san.startswith('*.'):
|
||||
exact_match = folder
|
||||
break
|
||||
if san.startswith('*.'):
|
||||
base = san[2:]
|
||||
if domain.count('.') == base.count('.') + 1 and domain.endswith('.' + base):
|
||||
candidates_wild.extend(entries)
|
||||
wildcard_match = folder
|
||||
|
||||
def _pick_newest(entries):
|
||||
if not entries:
|
||||
return None
|
||||
# newest by (not_before, mtime)
|
||||
best = max(entries, key=cls._score_entry)
|
||||
return best
|
||||
if exact_match:
|
||||
if debug:
|
||||
print(f"[DEBUG] Exact match for {domain} found in {exact_match}")
|
||||
return exact_match
|
||||
|
||||
best_exact = _pick_newest(candidates_exact)
|
||||
best_wild = _pick_newest(candidates_wild)
|
||||
|
||||
if best_exact and debug:
|
||||
print(f"[DEBUG] Best exact match for {domain}: {best_exact['folder']} "
|
||||
f"(not_before={best_exact['not_before']}, mtime={best_exact['mtime']})")
|
||||
if best_wild and debug:
|
||||
print(f"[DEBUG] Best wildcard match for {domain}: {best_wild['folder']} "
|
||||
f"(not_before={best_wild['not_before']}, mtime={best_wild['mtime']})")
|
||||
|
||||
# Prefer exact if it exists; otherwise wildcard
|
||||
chosen = best_exact or best_wild
|
||||
|
||||
if chosen:
|
||||
return chosen['folder']
|
||||
if wildcard_match:
|
||||
if debug:
|
||||
print(f"[DEBUG] Wildcard match for {domain} found in {wildcard_match}")
|
||||
return wildcard_match
|
||||
|
||||
if debug:
|
||||
print(f"[DEBUG] No certificate folder found for {domain}")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ class ConfigEntryNotSetError(AppConfigKeyError):
|
||||
pass
|
||||
|
||||
|
||||
def get_app_conf(applications, application_id, config_path, strict=True, default=None, skip_missing_app=False):
|
||||
def get_app_conf(applications, application_id, config_path, strict=True, default=None):
|
||||
# Path to the schema file for this application
|
||||
schema_path = os.path.join('roles', application_id, 'schema', 'main.yml')
|
||||
|
||||
@@ -133,9 +133,6 @@ def get_app_conf(applications, application_id, config_path, strict=True, default
|
||||
try:
|
||||
obj = applications[application_id]
|
||||
except KeyError:
|
||||
if skip_missing_app:
|
||||
# Simply return default instead of failing
|
||||
return default if default is not None else False
|
||||
raise AppConfigKeyError(
|
||||
f"Application ID '{application_id}' not found in applications dict.\n"
|
||||
f"path_trace: {path_trace}\n"
|
||||
|
||||
@@ -3,7 +3,4 @@ collections:
|
||||
- name: community.general
|
||||
- name: hetzner.hcloud
|
||||
yay:
|
||||
- python-simpleaudio
|
||||
- python-numpy
|
||||
pacman:
|
||||
- ansible
|
||||
- python-simpleaudio
|
||||
@@ -153,11 +153,6 @@ roles:
|
||||
description: "Core AI building blocks—model serving, OpenAI-compatible gateways, vector databases, orchestration, and chat UIs."
|
||||
icon: "fas fa-brain"
|
||||
invokable: true
|
||||
bkp:
|
||||
title: "Backup Services"
|
||||
description: "Service-level backup and recovery components—handling automated data snapshots, remote backups, synchronization services, and backup orchestration across databases, files, and containers."
|
||||
icon: "fas fa-database"
|
||||
invokable: true
|
||||
user:
|
||||
title: "Users & Access"
|
||||
description: "User accounts & access control"
|
||||
|
||||
@@ -127,7 +127,7 @@
|
||||
#de_BE@euro ISO-8859-15
|
||||
#de_CH.UTF-8 UTF-8
|
||||
#de_CH ISO-8859-1
|
||||
#de_DE.UTF-8 UTF-8
|
||||
de_DE.UTF-8 UTF-8
|
||||
#de_DE ISO-8859-1
|
||||
#de_DE@euro ISO-8859-15
|
||||
#de_IT.UTF-8 UTF-8
|
||||
|
||||
@@ -1,132 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
import sys
|
||||
|
||||
|
||||
def run_command(command, capture_output=True, check=False, shell=True):
|
||||
"""Run a shell command and return its output as string."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
command,
|
||||
capture_output=capture_output,
|
||||
shell=shell,
|
||||
text=True,
|
||||
check=check
|
||||
)
|
||||
return result.stdout.strip()
|
||||
except subprocess.CalledProcessError as e:
|
||||
if capture_output:
|
||||
print(e.stdout)
|
||||
print(e.stderr)
|
||||
raise
|
||||
|
||||
|
||||
def pull_backups(hostname: str):
|
||||
print(f"pulling backups from: {hostname}")
|
||||
errors = 0
|
||||
|
||||
print("loading meta data...")
|
||||
remote_host = f"backup@{hostname}"
|
||||
print(f"host address: {remote_host}")
|
||||
|
||||
remote_machine_id = run_command(f'ssh "{remote_host}" sha256sum /etc/machine-id')[:64]
|
||||
print(f"remote machine id: {remote_machine_id}")
|
||||
|
||||
general_backup_machine_dir = f"/Backups/{remote_machine_id}/"
|
||||
print(f"backup dir: {general_backup_machine_dir}")
|
||||
|
||||
try:
|
||||
remote_backup_types = run_command(
|
||||
f'ssh "{remote_host}" "find {general_backup_machine_dir} -maxdepth 1 -type d -execdir basename {{}} ;"'
|
||||
).splitlines()
|
||||
print(f"backup types: {' '.join(remote_backup_types)}")
|
||||
except subprocess.CalledProcessError:
|
||||
sys.exit(1)
|
||||
|
||||
for backup_type in remote_backup_types:
|
||||
if backup_type == remote_machine_id:
|
||||
continue
|
||||
|
||||
print(f"backup type: {backup_type}")
|
||||
|
||||
general_backup_type_dir = f"{general_backup_machine_dir}{backup_type}/"
|
||||
general_versions_dir = general_backup_type_dir
|
||||
|
||||
# local previous version
|
||||
try:
|
||||
local_previous_version_dir = run_command(f"ls -d {general_versions_dir}* | tail -1")
|
||||
except subprocess.CalledProcessError:
|
||||
local_previous_version_dir = ""
|
||||
print(f"last local backup: {local_previous_version_dir}")
|
||||
|
||||
# remote versions
|
||||
remote_backup_versions = run_command(
|
||||
f'ssh "{remote_host}" "ls -d /Backups/{remote_machine_id}/backup-docker-to-local/*"'
|
||||
).splitlines()
|
||||
print(f"remote backup versions: {' '.join(remote_backup_versions)}")
|
||||
|
||||
remote_last_backup_dir = remote_backup_versions[-1] if remote_backup_versions else ""
|
||||
print(f"last remote backup: {remote_last_backup_dir}")
|
||||
|
||||
remote_source_path = f"{remote_host}:{remote_last_backup_dir}/"
|
||||
print(f"source path: {remote_source_path}")
|
||||
|
||||
local_backup_destination_path = remote_last_backup_dir
|
||||
print(f"backup destination: {local_backup_destination_path}")
|
||||
|
||||
print("creating local backup destination folder...")
|
||||
os.makedirs(local_backup_destination_path, exist_ok=True)
|
||||
|
||||
rsync_command = (
|
||||
f'rsync -abP --delete --delete-excluded --rsync-path="sudo rsync" '
|
||||
f'--link-dest="{local_previous_version_dir}" "{remote_source_path}" "{local_backup_destination_path}"'
|
||||
)
|
||||
print("starting backup...")
|
||||
print(f"executing: {rsync_command}")
|
||||
|
||||
retry_count = 0
|
||||
max_retries = 12
|
||||
retry_delay = 300 # 5 minutes
|
||||
last_retry_start = 0
|
||||
max_retry_duration = 43200 # 12 hours
|
||||
|
||||
rsync_exit_code = 1
|
||||
while retry_count < max_retries:
|
||||
print(f"Retry attempt: {retry_count + 1}")
|
||||
if retry_count > 0:
|
||||
current_time = int(time.time())
|
||||
last_retry_duration = current_time - last_retry_start
|
||||
if last_retry_duration >= max_retry_duration:
|
||||
print("Last retry took more than 12 hours, increasing max retries to 12.")
|
||||
max_retries = 12
|
||||
last_retry_start = int(time.time())
|
||||
rsync_exit_code = os.system(rsync_command)
|
||||
if rsync_exit_code == 0:
|
||||
break
|
||||
retry_count += 1
|
||||
time.sleep(retry_delay)
|
||||
|
||||
if rsync_exit_code != 0:
|
||||
print(f"Error: rsync failed after {max_retries} attempts")
|
||||
errors += 1
|
||||
|
||||
sys.exit(errors)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Pull backups from a remote backup host via rsync."
|
||||
)
|
||||
parser.add_argument(
|
||||
"hostname",
|
||||
help="Hostname from which backup should be pulled"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
pull_backups(args.hostname)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
85
roles/svc-bkp-rmt-2-loc/files/sys-bkp-rmt-2-loc.sh
Normal file
85
roles/svc-bkp-rmt-2-loc/files/sys-bkp-rmt-2-loc.sh
Normal file
@@ -0,0 +1,85 @@
|
||||
#!/bin/bash
|
||||
# @param $1 hostname from which backup should be pulled
|
||||
|
||||
echo "pulling backups from: $1" &&
|
||||
|
||||
# error counter
|
||||
errors=0 &&
|
||||
|
||||
echo "loading meta data..." &&
|
||||
|
||||
remote_host="backup@$1" &&
|
||||
echo "host address: $remote_host" &&
|
||||
|
||||
remote_machine_id="$( (ssh "$remote_host" sha256sum /etc/machine-id) | head -c 64 )" &&
|
||||
echo "remote machine id: $remote_machine_id" &&
|
||||
|
||||
general_backup_machine_dir="/Backups/$remote_machine_id/" &&
|
||||
echo "backup dir: $general_backup_machine_dir" &&
|
||||
|
||||
remote_backup_types="$(ssh "$remote_host" "find $general_backup_machine_dir -maxdepth 1 -type d -execdir basename {} ;")" &&
|
||||
echo "backup types: $remote_backup_types" || exit 1
|
||||
|
||||
for backup_type in $remote_backup_types; do
|
||||
if [ "$backup_type" != "$remote_machine_id" ]; then
|
||||
echo "backup type: $backup_type" &&
|
||||
|
||||
general_backup_type_dir="$general_backup_machine_dir""$backup_type/" &&
|
||||
general_versions_dir="$general_backup_type_dir" &&
|
||||
local_previous_version_dir="$(ls -d $general_versions_dir* | tail -1)" &&
|
||||
echo "last local backup: $local_previous_version_dir" &&
|
||||
|
||||
remote_backup_versions="$(ssh "$remote_host" ls -d "$general_backup_type_dir"\*)" &&
|
||||
echo "remote backup versions: $remote_backup_versions" &&
|
||||
|
||||
|
||||
remote_last_backup_dir=$(echo "$remote_backup_versions" | tail -1) &&
|
||||
echo "last remote backup: $remote_last_backup_dir" &&
|
||||
|
||||
remote_source_path="$remote_host:$remote_last_backup_dir/" &&
|
||||
echo "source path: $remote_source_path" &&
|
||||
|
||||
local_backup_destination_path=$remote_last_backup_dir &&
|
||||
echo "backup destination: $local_backup_destination_path" &&
|
||||
|
||||
echo "creating local backup destination folder..." &&
|
||||
mkdir -vp "$local_backup_destination_path" &&
|
||||
|
||||
echo "starting backup..."
|
||||
rsync_command='rsync -abP --delete --delete-excluded --rsync-path="sudo rsync" --link-dest="'$local_previous_version_dir'" "'$remote_source_path'" "'$local_backup_destination_path'"'
|
||||
|
||||
echo "executing: $rsync_command"
|
||||
|
||||
retry_count=0
|
||||
max_retries=12
|
||||
retry_delay=300 # Retry delay in seconds (5 minutes)
|
||||
last_retry_start=0
|
||||
max_retry_duration=43200 # Maximum duration for a single retry attempt (12 hours)
|
||||
|
||||
while [[ $retry_count -lt $max_retries ]]; do
|
||||
echo "Retry attempt: $((retry_count + 1))"
|
||||
if [[ $retry_count -gt 0 ]]; then
|
||||
current_time=$(date +%s)
|
||||
last_retry_duration=$((current_time - last_retry_start))
|
||||
if [[ $last_retry_duration -ge $max_retry_duration ]]; then
|
||||
echo "Last retry took more than 12 hours, increasing max retries to 12."
|
||||
max_retries=12
|
||||
fi
|
||||
fi
|
||||
last_retry_start=$(date +%s)
|
||||
eval "$rsync_command"
|
||||
rsync_exit_code=$?
|
||||
if [[ $rsync_exit_code -eq 0 ]]; then
|
||||
break
|
||||
fi
|
||||
retry_count=$((retry_count + 1))
|
||||
sleep $retry_delay
|
||||
done
|
||||
|
||||
if [[ $rsync_exit_code -ne 0 ]]; then
|
||||
echo "Error: rsync failed after $max_retries attempts"
|
||||
((errors += 1))
|
||||
fi
|
||||
fi
|
||||
done
|
||||
exit $errors;
|
||||
@@ -10,15 +10,15 @@
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_svc_bkp_rmt_2_loc is not defined
|
||||
|
||||
- name: "Create Directory '{{ DOCKER_BACKUP_REMOTE_2_LOCAL_DIR }}'"
|
||||
- name: "create {{ DOCKER_BACKUP_REMOTE_2_LOCAL_DIR }}"
|
||||
file:
|
||||
path: "{{ DOCKER_BACKUP_REMOTE_2_LOCAL_DIR }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: "Deploy '{{ DOCKER_BACKUP_REMOTE_2_LOCAL_SCRIPT }}'"
|
||||
- name: create svc-bkp-rmt-2-loc.sh
|
||||
copy:
|
||||
src: "{{ DOCKER_BACKUP_REMOTE_2_LOCAL_FILE }}"
|
||||
src: svc-bkp-rmt-2-loc.sh
|
||||
dest: "{{ DOCKER_BACKUP_REMOTE_2_LOCAL_SCRIPT }}"
|
||||
mode: "0755"
|
||||
|
||||
|
||||
@@ -3,6 +3,6 @@
|
||||
hosts="{{ DOCKER_BACKUP_REMOTE_2_LOCAL_BACKUP_PROVIDERS | join(' ') }}";
|
||||
errors=0
|
||||
for host in $hosts; do
|
||||
python {{ DOCKER_BACKUP_REMOTE_2_LOCAL_SCRIPT }} $host || ((errors+=1));
|
||||
bash {{ DOCKER_BACKUP_REMOTE_2_LOCAL_SCRIPT }} $host || ((errors+=1));
|
||||
done;
|
||||
exit $errors;
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
# General
|
||||
application_id: svc-bkp-rmt-2-loc
|
||||
system_service_id: "{{ application_id }}"
|
||||
|
||||
# Role Specific
|
||||
system_service_id: "{{ application_id }}"
|
||||
DOCKER_BACKUP_REMOTE_2_LOCAL_DIR: '{{ PATH_ADMINISTRATOR_SCRIPTS }}{{ application_id }}/'
|
||||
DOCKER_BACKUP_REMOTE_2_LOCAL_FILE: 'pull-specific-host.py'
|
||||
DOCKER_BACKUP_REMOTE_2_LOCAL_SCRIPT: "{{ [ DOCKER_BACKUP_REMOTE_2_LOCAL_DIR , DOCKER_BACKUP_REMOTE_2_LOCAL_FILE ] | path_join }}"
|
||||
DOCKER_BACKUP_REMOTE_2_LOCAL_SCRIPT: "{{ DOCKER_BACKUP_REMOTE_2_LOCAL_DIR }}svc-bkp-rmt-2-loc.sh"
|
||||
DOCKER_BACKUP_REMOTE_2_LOCAL_BACKUP_PROVIDERS: "{{ applications | get_app_conf(application_id, 'backup_providers') }}"
|
||||
@@ -8,11 +8,6 @@ docker:
|
||||
image: "bitnamilegacy/openldap"
|
||||
name: "openldap"
|
||||
version: "latest"
|
||||
cpus: 1.25
|
||||
# Optimized up to 5k user
|
||||
mem_reservation: 1g
|
||||
mem_limit: 1.5g
|
||||
pids_limit: 1024
|
||||
network: "openldap"
|
||||
volumes:
|
||||
data: "openldap_data"
|
||||
|
||||
@@ -16,12 +16,5 @@
|
||||
retries: 30
|
||||
networks:
|
||||
- default
|
||||
{% macro include_resource_for(svc, indent=4) -%}
|
||||
{% set service_name = svc -%}
|
||||
{%- set _snippet -%}
|
||||
{% include 'roles/docker-container/templates/resource.yml.j2' %}
|
||||
{%- endset -%}
|
||||
{{ _snippet | indent(indent, true) }}
|
||||
{%- endmacro %}
|
||||
{{ include_resource_for('redis') }}
|
||||
{{ lookup('template', 'roles/docker-container/templates/resource.yml.j2',vars={'service_name':'redis'}) | indent(4) }}
|
||||
{{ "\n" }}
|
||||
@@ -13,7 +13,7 @@ get_backup_types="find /Backups/$hashed_machine_id/ -maxdepth 1 -type d -execdir
|
||||
|
||||
|
||||
# @todo This configuration is not scalable yet. If other backup services then sys-ctl-bkp-docker-2-loc are integrated, this logic needs to be optimized
|
||||
get_version_directories="ls -d /Backups/$hashed_machine_id/backup-docker-to-local/*"
|
||||
get_version_directories="ls -d /Backups/$hashed_machine_id/sys-ctl-bkp-docker-2-loc/*"
|
||||
last_version_directory="$($get_version_directories | tail -1)"
|
||||
rsync_command="sudo rsync --server --sender -blogDtpre.iLsfxCIvu . $last_version_directory/"
|
||||
|
||||
|
||||
@@ -3,6 +3,30 @@
|
||||
name: backup
|
||||
create_home: yes
|
||||
|
||||
- name: create .ssh directory
|
||||
file:
|
||||
path: /home/backup/.ssh
|
||||
state: directory
|
||||
owner: backup
|
||||
group: backup
|
||||
mode: '0700'
|
||||
|
||||
- name: create /home/backup/.ssh/authorized_keys
|
||||
template:
|
||||
src: "authorized_keys.j2"
|
||||
dest: /home/backup/.ssh/authorized_keys
|
||||
owner: backup
|
||||
group: backup
|
||||
mode: '0644'
|
||||
|
||||
- name: create /home/backup/ssh-wrapper.sh
|
||||
copy:
|
||||
src: "ssh-wrapper.sh"
|
||||
dest: /home/backup/ssh-wrapper.sh
|
||||
owner: backup
|
||||
group: backup
|
||||
mode: '0700'
|
||||
|
||||
- name: grant backup sudo rights
|
||||
copy:
|
||||
src: "backup"
|
||||
@@ -11,9 +35,3 @@
|
||||
owner: root
|
||||
group: root
|
||||
notify: sshd restart
|
||||
|
||||
- include_tasks: 02_permissions_ssh.yml
|
||||
|
||||
- include_tasks: 03_permissions_folders.yml
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
@@ -1,23 +0,0 @@
|
||||
- name: create .ssh directory
|
||||
file:
|
||||
path: /home/backup/.ssh
|
||||
state: directory
|
||||
owner: backup
|
||||
group: backup
|
||||
mode: '0700'
|
||||
|
||||
- name: create /home/backup/.ssh/authorized_keys
|
||||
template:
|
||||
src: "authorized_keys.j2"
|
||||
dest: /home/backup/.ssh/authorized_keys
|
||||
owner: backup
|
||||
group: backup
|
||||
mode: '0644'
|
||||
|
||||
- name: create /home/backup/ssh-wrapper.sh
|
||||
copy:
|
||||
src: "ssh-wrapper.sh"
|
||||
dest: /home/backup/ssh-wrapper.sh
|
||||
owner: backup
|
||||
group: backup
|
||||
mode: '0700'
|
||||
@@ -1,66 +0,0 @@
|
||||
# Ensure the backups root exists and is owned by backup
|
||||
- name: Ensure backups root exists and owned by backup
|
||||
file:
|
||||
path: "{{ BACKUPS_FOLDER_PATH }}"
|
||||
state: directory
|
||||
owner: backup
|
||||
group: backup
|
||||
mode: "0700"
|
||||
|
||||
# Explicit ACL so 'backup' has rwx, others none
|
||||
- name: Grant ACL rwx on backups root to backup user
|
||||
ansible.posix.acl:
|
||||
path: "{{ BACKUPS_FOLDER_PATH }}"
|
||||
entity: backup
|
||||
etype: user
|
||||
permissions: rwx
|
||||
state: present
|
||||
|
||||
# Set default ACLs so new entries inherit rwx for backup and nothing for others
|
||||
- name: Set default ACL (inherit) for backup user under backups root
|
||||
ansible.posix.acl:
|
||||
path: "{{ BACKUPS_FOLDER_PATH }}"
|
||||
entity: backup
|
||||
etype: user
|
||||
permissions: rwx
|
||||
default: true
|
||||
state: present
|
||||
|
||||
# Remove default ACLs for group/others (defensive hardening)
|
||||
# Default ACLs so new entries inherit only backup's rwx
|
||||
- name: Default ACL for backup user (inherit)
|
||||
ansible.posix.acl:
|
||||
path: "{{ BACKUPS_FOLDER_PATH }}"
|
||||
etype: user
|
||||
entity: backup
|
||||
permissions: rwx
|
||||
default: true
|
||||
state: present
|
||||
|
||||
# Explicitly set default group/other to no permissions (instead of absent)
|
||||
- name: Default ACL for group -> none
|
||||
ansible.posix.acl:
|
||||
path: "{{ BACKUPS_FOLDER_PATH }}"
|
||||
etype: group
|
||||
permissions: '---'
|
||||
default: true
|
||||
state: present
|
||||
|
||||
- name: Default ACL for other -> none
|
||||
ansible.posix.acl:
|
||||
path: "{{ BACKUPS_FOLDER_PATH }}"
|
||||
etype: other
|
||||
permissions: '---'
|
||||
default: true
|
||||
state: present
|
||||
|
||||
- name: Fix ownership level 0..2 directories to backup:backup
|
||||
ansible.builtin.shell: >
|
||||
find "{{ BACKUPS_FOLDER_PATH }}" -mindepth 0 -maxdepth 2 -xdev -type d -exec chown backup:backup {} +
|
||||
changed_when: false
|
||||
|
||||
- name: Fix perms level 0..2 directories to 0700
|
||||
ansible.builtin.shell: >
|
||||
find "{{ BACKUPS_FOLDER_PATH }}" -mindepth 0 -maxdepth 2 -xdev -type d -exec chmod 700 {} +
|
||||
changed_when: false
|
||||
|
||||
@@ -1,2 +1,4 @@
|
||||
- include_tasks: 01_core.yml
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_bkp_provider_user is not defined
|
||||
@@ -1,7 +1,8 @@
|
||||
- name: Include dependencies
|
||||
include_role:
|
||||
name: "sys-svc-msmtp"
|
||||
when: run_once_sys_svc_msmtp is not defined or run_once_sys_svc_msmtp is false
|
||||
name: '{{ item }}'
|
||||
loop:
|
||||
- sys-svc-msmtp
|
||||
|
||||
- include_role:
|
||||
name: sys-service
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_sys_ctl_bkp_docker_2_loc is not defined
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
when:
|
||||
- run_once_sys_ctl_bkp_docker_2_loc is not defined
|
||||
|
||||
- name: "include 04_seed-database-to-backup.yml"
|
||||
include_tasks: 04_seed-database-to-backup.yml
|
||||
when: BKP_DOCKER_2_LOC_DB_ENABLED | bool
|
||||
when:
|
||||
- BKP_DOCKER_2_LOC_DB_ENABLED | bool
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
name: sys-service
|
||||
vars:
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_tpl_exec_start: "{{ system_service_script_exec }} --backups-folder-path {{ BACKUPS_FOLDER_PATH }} --maximum-backup-size-percent {{ SIZE_PERCENT_MAXIMUM_BACKUP }}"
|
||||
system_service_tpl_exec_start: "{{ system_service_script_exec }} --backups-folder-path {{ BACKUPS_FOLDER_PATH }} --maximum-backup-size-percent {{SIZE_PERCENT_MAXIMUM_BACKUP}}"
|
||||
system_service_tpl_exec_start_pre: '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(" ") }} --ignore {{ SYS_SERVICE_GROUP_CLEANUP | join(" ") }} --timeout "{{ SYS_TIMEOUT_BACKUP_SERVICES }}"'
|
||||
system_service_copy_files: true
|
||||
system_service_force_linear_sync: false
|
||||
|
||||
@@ -39,18 +39,6 @@ if [ "$force_freeing" = true ]; then
|
||||
docker exec -u www-data $nextcloud_application_container /var/www/html/occ versions:cleanup || exit 6
|
||||
fi
|
||||
|
||||
# Mastodon cleanup (remote media cache)
|
||||
mastodon_application_container="{{ applications | get_app_conf('web-app-mastodon', 'docker.services.mastodon.name') }}"
|
||||
mastodon_cleanup_days="1"
|
||||
|
||||
if [ -n "$mastodon_application_container" ] && docker ps -a --format '{% raw %}{{.Names}}{% endraw %}' | grep -qw "$mastodon_application_container"; then
|
||||
echo "Cleaning up Mastodon media cache (older than ${mastodon_cleanup_days} days)" &&
|
||||
docker exec -u root "$mastodon_application_container" bash -lc "bin/tootctl media remove --days=${mastodon_cleanup_days}" || exit 8
|
||||
|
||||
# Optional: additionally remove local thumbnail/cache files older than X days
|
||||
# Warning: these will be regenerated when accessed, which may cause extra CPU/I/O load
|
||||
# docker exec -u root "$mastodon_application_container" bash -lc "find /mastodon/public/system/cache -type f -mtime +${mastodon_cleanup_days} -delete" || exit 9
|
||||
fi
|
||||
fi
|
||||
|
||||
if command -v pacman >/dev/null 2>&1 ; then
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
- name: "Load CDN for '{{ domain }}'"
|
||||
include_role:
|
||||
name: web-svc-cdn
|
||||
public: false
|
||||
when:
|
||||
- application_id != 'web-svc-cdn'
|
||||
- run_once_web_svc_cdn is not defined
|
||||
|
||||
- name: Load Logout for '{{ domain }}'
|
||||
include_role:
|
||||
name: web-svc-logout
|
||||
public: false
|
||||
when:
|
||||
- run_once_web_svc_logout is not defined
|
||||
- application_id != 'web-svc-logout'
|
||||
- inj_enabled.logout
|
||||
@@ -1,41 +1,22 @@
|
||||
- block:
|
||||
- name: Include dependency 'sys-svc-webserver-core'
|
||||
include_role:
|
||||
name: sys-svc-webserver-core
|
||||
when: run_once_sys_svc_webserver_core is not defined
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_front_inj_all is not defined
|
||||
|
||||
- name: Build inj_enabled
|
||||
set_fact:
|
||||
inj_enabled: "{{ applications | inj_enabled(application_id, SRV_WEB_INJ_COMP_FEATURES_ALL) }}"
|
||||
|
||||
- name: "Included dependent services"
|
||||
include_tasks: 01_dependencies.yml
|
||||
vars:
|
||||
proxy_extra_configuration: ""
|
||||
- name: "Load CDN Service for '{{ domain }}'"
|
||||
include_role:
|
||||
name: sys-svc-cdn
|
||||
public: true # Expose variables so that they can be used in all injection roles
|
||||
|
||||
- name: Reinitialize 'inj_enabled' for '{{ domain }}', after loading the required webservices
|
||||
- name: Reinitialize 'inj_enabled' for '{{ domain }}', after modification by CDN
|
||||
set_fact:
|
||||
inj_enabled: "{{ applications | inj_enabled(application_id, SRV_WEB_INJ_COMP_FEATURES_ALL) }}"
|
||||
inj_head_features: "{{ SRV_WEB_INJ_COMP_FEATURES_ALL | inj_features('head') }}"
|
||||
inj_body_features: "{{ SRV_WEB_INJ_COMP_FEATURES_ALL | inj_features('body') }}"
|
||||
|
||||
- name: "Load CDN Service for '{{ domain }}'"
|
||||
include_role:
|
||||
name: sys-svc-cdn
|
||||
public: true
|
||||
|
||||
- name: "Activate logout proxy for '{{ domain }}'"
|
||||
include_role:
|
||||
name: sys-front-inj-logout
|
||||
public: true
|
||||
when: inj_enabled.logout
|
||||
|
||||
- name: "Activate Desktop iFrame notifier for '{{ domain }}'"
|
||||
include_role:
|
||||
name: sys-front-inj-desktop
|
||||
public: true
|
||||
public: true # Vars used in templates
|
||||
when: inj_enabled.desktop
|
||||
|
||||
- name: "Activate Corporate CSS for '{{ domain }}'"
|
||||
@@ -52,3 +33,17 @@
|
||||
include_role:
|
||||
name: sys-front-inj-javascript
|
||||
when: inj_enabled.javascript
|
||||
|
||||
- name: "Activate logout proxy for '{{ domain }}'"
|
||||
include_role:
|
||||
name: sys-front-inj-logout
|
||||
public: true # Vars used in templates
|
||||
when: inj_enabled.logout
|
||||
|
||||
- block:
|
||||
- name: Include dependency 'sys-svc-webserver-core'
|
||||
include_role:
|
||||
name: sys-svc-webserver-core
|
||||
when: run_once_sys_svc_webserver_core is not defined
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_front_inj_all is not defined
|
||||
@@ -10,6 +10,17 @@
|
||||
|
||||
lua_need_request_body on;
|
||||
|
||||
header_filter_by_lua_block {
|
||||
local ct = ngx.header.content_type or ""
|
||||
if ct:lower():find("^text/html") then
|
||||
ngx.ctx.is_html = true
|
||||
-- IMPORTANT: body will be modified → drop Content-Length to avoid mismatches
|
||||
ngx.header.content_length = nil
|
||||
else
|
||||
ngx.ctx.is_html = false
|
||||
end
|
||||
}
|
||||
|
||||
body_filter_by_lua_block {
|
||||
-- Only process HTML responses
|
||||
if not ngx.ctx.is_html then
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
- name: Include dependency 'sys-svc-webserver-core'
|
||||
include_role:
|
||||
name: sys-svc-webserver-core
|
||||
when: run_once_sys_svc_webserver_core is not defined
|
||||
|
||||
- name: Generate color palette with colorscheme-generator
|
||||
set_fact:
|
||||
color_palette: "{{ lookup('colorscheme', CSS_BASE_COLOR, count=CSS_COUNT, shades=CSS_SHADES) }}"
|
||||
@@ -14,5 +19,3 @@
|
||||
group: "{{ NGINX.USER }}"
|
||||
mode: '0644'
|
||||
loop: "{{ CSS_FILES }}"
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
@@ -1,4 +1,6 @@
|
||||
- include_tasks: 01_core.yml
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_front_inj_css is not defined
|
||||
|
||||
- name: "Resolve optional app style.css source for '{{ application_id }}'"
|
||||
|
||||
@@ -3,6 +3,6 @@
|
||||
{% for css_file in ['default.css','bootstrap.css'] %}
|
||||
<link rel="stylesheet" href="{{ [ cdn_urls.shared.css, css_file, lookup('local_mtime_qs', [__css_tpl_dir, css_file ~ '.j2'] | path_join)] | url_join }}">
|
||||
{% endfor %}
|
||||
{% if app_style_present | default(false) | bool %}
|
||||
{% if app_style_present | bool %}
|
||||
<link rel="stylesheet" href="{{ [ cdn_urls.role.release.css, 'style.css', lookup('local_mtime_qs', app_style_src)] | url_join }}">
|
||||
{% endif %}
|
||||
@@ -1,4 +1,8 @@
|
||||
- block:
|
||||
- name: Include dependency 'sys-svc-webserver-core'
|
||||
include_role:
|
||||
name: sys-svc-webserver-core
|
||||
when: run_once_sys_svc_webserver_core is not defined
|
||||
- include_tasks: 01_deploy.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_front_inj_desktop is not defined
|
||||
|
||||
@@ -1,4 +1,11 @@
|
||||
# run_once_sys_front_inj_javascript: deactivated
|
||||
- block:
|
||||
|
||||
- name: Include dependency 'sys-svc-webserver-core'
|
||||
include_role:
|
||||
name: sys-svc-webserver-core
|
||||
when: run_once_sys_svc_webserver_core is not defined
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_front_inj_javascript is not defined
|
||||
|
||||
- name: "Load JavaScript code for '{{ application_id }}'"
|
||||
set_fact:
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
- name: Include dependency 'sys-svc-webserver-core'
|
||||
include_role:
|
||||
name: sys-svc-webserver-core
|
||||
when:
|
||||
- run_once_sys_svc_webserver_core is not defined
|
||||
|
||||
- name: "deploy the logout.js"
|
||||
include_tasks: "02_deploy.yml"
|
||||
|
||||
- set_fact:
|
||||
run_once_sys_front_inj_logout: true
|
||||
changed_when: false
|
||||
include_tasks: "02_deploy.yml"
|
||||
@@ -1,10 +1,10 @@
|
||||
- name: Deploy logout.js
|
||||
copy:
|
||||
src: logout.js
|
||||
dest: "{{ INJ_LOGOUT_JS_DESTINATION }}"
|
||||
owner: "{{ NGINX.USER }}"
|
||||
group: "{{ NGINX.USER }}"
|
||||
mode: '0644'
|
||||
template:
|
||||
src: logout.js.j2
|
||||
dest: "{{ INJ_LOGOUT_JS_DESTINATION }}"
|
||||
owner: "{{ NGINX.USER }}"
|
||||
group: "{{ NGINX.USER }}"
|
||||
mode: '0644'
|
||||
|
||||
- name: Get stat for logout.js
|
||||
stat:
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
- name: "Load base for '{{ application_id }}'"
|
||||
include_tasks: 01_core.yml
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- set_fact:
|
||||
run_once_sys_front_inj_logout: true
|
||||
when: run_once_sys_front_inj_logout is not defined
|
||||
|
||||
- name: "Load logout code for '{{ application_id }}'"
|
||||
set_fact:
|
||||
logout_code: "{{ lookup('template', 'logout_one_liner.js.j2') }}"
|
||||
changed_when: false
|
||||
|
||||
- name: "Collapse logout code into one-liner for '{{ application_id }}'"
|
||||
set_fact:
|
||||
logout_code_one_liner: "{{ logout_code | to_one_liner }}"
|
||||
changed_when: false
|
||||
|
||||
- name: "Append logout CSP hash for '{{ application_id }}'"
|
||||
set_fact:
|
||||
|
||||
@@ -1 +1 @@
|
||||
<script src="{{ cdn_urls.shared.js }}/{{ INJ_LOGOUT_JS_FILE_NAME }}{{ lookup('local_mtime_qs', [playbook_dir, 'roles', 'sys-front-inj-logout', 'files', INJ_LOGOUT_JS_FILE_NAME] | path_join) }}"></script>
|
||||
<script src="{{ cdn_urls.shared.js }}/{{ INJ_LOGOUT_JS_FILE_NAME }}{{ lookup('local_mtime_qs', [playbook_dir, 'roles', 'sys-front-inj-logout', 'templates', INJ_LOGOUT_JS_FILE_NAME ~ '.j2'] | path_join) }}"></script>
|
||||
|
||||
@@ -1,4 +1,10 @@
|
||||
# run_once_sys_front_inj_matomo: deactivated
|
||||
- block:
|
||||
- name: Include dependency 'sys-svc-webserver-core'
|
||||
include_role:
|
||||
name: sys-svc-webserver-core
|
||||
when: run_once_sys_svc_webserver_core is not defined
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_front_inj_matomo is not defined
|
||||
|
||||
- name: "Relevant variables for role: {{ role_path | basename }}"
|
||||
debug:
|
||||
|
||||
21
roles/sys-svc-cdn/tasks/01_core.yml
Normal file
21
roles/sys-svc-cdn/tasks/01_core.yml
Normal file
@@ -0,0 +1,21 @@
|
||||
- name: "Load CDN for '{{ domain }}'"
|
||||
include_role:
|
||||
name: web-svc-cdn
|
||||
public: false
|
||||
when:
|
||||
- application_id != 'web-svc-cdn'
|
||||
- run_once_web_svc_cdn is not defined
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Only-once creations (shared root and vendor)
|
||||
# ------------------------------------------------------------------
|
||||
- name: Ensure shared root and vendor exist (run once)
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: "{{ NGINX.USER }}"
|
||||
group: "{{ NGINX.USER }}"
|
||||
mode: "0755"
|
||||
loop: "{{ CDN_DIRS_GLOBAL }}"
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
@@ -1,14 +1,6 @@
|
||||
---
|
||||
- block:
|
||||
- name: Ensure shared root and vendor exist (run once)
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: "{{ NGINX.USER }}"
|
||||
group: "{{ NGINX.USER }}"
|
||||
mode: "0755"
|
||||
loop: "{{ CDN_DIRS_GLOBAL }}"
|
||||
- include_tasks: utils/run_once.yml
|
||||
- include_tasks: 01_core.yml
|
||||
when:
|
||||
- run_once_sys_svc_cdn is not defined
|
||||
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
ssl_certificate {{ [ LETSENCRYPT_LIVE_PATH | mandatory, ssl_cert_folder | mandatory, 'fullchain.pem'] | path_join }};
|
||||
ssl_certificate_key {{ [ LETSENCRYPT_LIVE_PATH | mandatory, ssl_cert_folder | mandatory, 'privkey.pem' ] | path_join }};
|
||||
ssl_trusted_certificate {{ [ LETSENCRYPT_LIVE_PATH | mandatory, ssl_cert_folder | mandatory, 'chain.pem' ] | path_join }};
|
||||
ssl_certificate {{ [ LETSENCRYPT_LIVE_PATH, ssl_cert_folder, 'fullchain.pem'] | path_join }};
|
||||
ssl_certificate_key {{ [ LETSENCRYPT_LIVE_PATH, ssl_cert_folder, 'privkey.pem' ] | path_join }};
|
||||
ssl_trusted_certificate {{ [ LETSENCRYPT_LIVE_PATH, ssl_cert_folder, 'chain.pem' ] | path_join }};
|
||||
@@ -14,7 +14,4 @@
|
||||
|
||||
- include_role:
|
||||
name: sys-ctl-hlth-msmtp
|
||||
when: run_once_sys_ctl_hlth_msmtp is not defined
|
||||
|
||||
- set_fact:
|
||||
run_once_sys_svc_msmtp: true
|
||||
when: run_once_sys_ctl_hlth_msmtp is not defined
|
||||
@@ -1,6 +1,5 @@
|
||||
- name: "Load MSMTP Core Once"
|
||||
include_tasks: 01_core.yml
|
||||
when:
|
||||
- run_once_sys_svc_msmtp is not defined or run_once_sys_svc_msmtp is false
|
||||
# Just execute when mailu_token is defined
|
||||
- users['no-reply'].mailu_token is defined
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- set_fact:
|
||||
run_once_sys_svc_msmtp: true
|
||||
when: run_once_sys_svc_msmtp is not defined
|
||||
@@ -1,33 +1,2 @@
|
||||
# ===== Content Security Policy: only for documents and workers (no locations needed) =====
|
||||
|
||||
# 1) Define your CSP once (Jinja: escape double quotes to be safe)
|
||||
set $csp "{{ applications | build_csp_header(application_id, domains) | replace('\"','\\\"') }}";
|
||||
|
||||
# 2) Send CSP ONLY for document responses; also for workers via Sec-Fetch-Dest
|
||||
header_filter_by_lua_block {
|
||||
local ct = ngx.header.content_type or ngx.header["Content-Type"] or ""
|
||||
local dest = ngx.var.http_sec_fetch_dest or ""
|
||||
|
||||
local lct = ct:lower()
|
||||
local is_html = lct:find("^text/html") or lct:find("^application/xhtml+xml")
|
||||
local is_worker = (dest == "worker") or (dest == "serviceworker")
|
||||
|
||||
if is_html or is_worker then
|
||||
ngx.header["Content-Security-Policy"] = ngx.var.csp
|
||||
else
|
||||
ngx.header["Content-Security-Policy"] = nil
|
||||
ngx.header["Content-Security-Policy-Report-Only"] = nil
|
||||
end
|
||||
|
||||
-- If you'll modify the body later, drop Content-Length on HTML
|
||||
if is_html then
|
||||
ngx.ctx.is_html = true
|
||||
ngx.header.content_length = nil
|
||||
else
|
||||
ngx.ctx.is_html = false
|
||||
end
|
||||
}
|
||||
|
||||
# 3) Prevent upstream/app CSP (duplicates)
|
||||
proxy_hide_header Content-Security-Policy;
|
||||
proxy_hide_header Content-Security-Policy-Report-Only;
|
||||
add_header Content-Security-Policy "{{ applications | build_csp_header(application_id, domains) }}" always;
|
||||
proxy_hide_header Content-Security-Policy; # Todo: Make this optional
|
||||
@@ -68,12 +68,7 @@ ChallengeResponseAuthentication no
|
||||
#KerberosGetAFSToken no
|
||||
|
||||
# GSSAPI options
|
||||
# Disable GSSAPI (Kerberos) authentication to avoid unnecessary negotiation delays.
|
||||
# This setting is useful for non-domain environments where GSSAPI is not used,
|
||||
# improving SSH connection startup time and reducing overhead.
|
||||
# See: https://chatgpt.com/share/68efc179-1a10-800f-9656-1e8731b40546
|
||||
GSSAPIAuthentication no
|
||||
|
||||
#GSSAPIAuthentication no
|
||||
#GSSAPICleanupCredentials yes
|
||||
|
||||
# Set this to 'yes' to enable PAM authentication, account processing,
|
||||
@@ -102,13 +97,7 @@ PrintMotd no # pam does that
|
||||
#Compression delayed
|
||||
#ClientAliveInterval 0
|
||||
#ClientAliveCountMax 3
|
||||
|
||||
# Disable reverse DNS lookups to speed up SSH logins.
|
||||
# When UseDNS is enabled, sshd performs a reverse DNS lookup for each connecting client,
|
||||
# which can significantly delay authentication if DNS resolution is slow or misconfigured.
|
||||
# See: https://chatgpt.com/share/68efc179-1a10-800f-9656-1e8731b40546
|
||||
UseDNS no
|
||||
|
||||
#UseDNS no
|
||||
#PidFile /run/sshd.pid
|
||||
#MaxStartups 10:30:100
|
||||
#PermitTunnel no
|
||||
|
||||
@@ -5,7 +5,7 @@ users:
|
||||
username: "{{ PRIMARY_DOMAIN.split('.')[0] }}"
|
||||
tld:
|
||||
description: "Auto Generated Account to reserve the TLD"
|
||||
username: "{{ PRIMARY_DOMAIN.split('.')[1] if (PRIMARY_DOMAIN is defined and (PRIMARY_DOMAIN.split('.') | length) > 1) else (PRIMARY_DOMAIN ~ '_tld ') }}"
|
||||
username: "{{ PRIMARY_DOMAIN.split('.')[1] }}"
|
||||
root:
|
||||
username: root
|
||||
uid: 0
|
||||
|
||||
@@ -18,10 +18,10 @@ server:
|
||||
flags:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
script-src-attr:
|
||||
script-src:
|
||||
unsafe-inline: true
|
||||
unsafe-eval: true
|
||||
style-src-attr:
|
||||
style-src:
|
||||
unsafe-inline: true
|
||||
whitelist:
|
||||
font-src:
|
||||
|
||||
@@ -19,7 +19,7 @@ docker:
|
||||
name: "baserow"
|
||||
cpus: 1.0
|
||||
mem_reservation: 0.5g
|
||||
mem_limit: 2g
|
||||
mem_limit: 1g
|
||||
pids_limit: 512
|
||||
volumes:
|
||||
data: "baserow_data"
|
||||
@@ -37,5 +37,5 @@ server:
|
||||
flags:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
style-src-attr:
|
||||
style-src:
|
||||
unsafe-inline: true
|
||||
@@ -13,7 +13,7 @@ server:
|
||||
flags:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
style-src-attr:
|
||||
style-src:
|
||||
unsafe-inline: true
|
||||
domains:
|
||||
canonical:
|
||||
|
||||
@@ -14,20 +14,13 @@
|
||||
name: sys-stk-full-stateless
|
||||
vars:
|
||||
docker_compose_flush_handlers: false
|
||||
- name: "include 04_seed-database-to-backup.yml"
|
||||
include_tasks: "{{ [ playbook_dir, 'roles/sys-ctl-bkp-docker-2-loc/tasks/04_seed-database-to-backup.yml' ] | path_join }}"
|
||||
|
||||
- name: "Unset 'proxy_extra_configuration'"
|
||||
set_fact:
|
||||
proxy_extra_configuration: null
|
||||
|
||||
- name: "Include Seed routines for '{{ application_id }}' database backup"
|
||||
include_tasks: "{{ [ playbook_dir, 'roles/sys-ctl-bkp-docker-2-loc/tasks/04_seed-database-to-backup.yml' ] | path_join }}"
|
||||
vars:
|
||||
database_type: "postgres"
|
||||
database_instance: "{{ entity_name }}"
|
||||
database_password: "{{ applications | get_app_conf(application_id, 'credentials.postgresql_secret') }}"
|
||||
database_username: "postgres"
|
||||
database_name: "" # Multiple databases
|
||||
|
||||
- name: configure websocket_upgrade.conf
|
||||
copy:
|
||||
src: "websocket_upgrade.conf"
|
||||
|
||||
@@ -2,6 +2,13 @@
|
||||
application_id: "web-app-bigbluebutton"
|
||||
entity_name: "{{ application_id | get_entity_name }}"
|
||||
|
||||
# Database configuration
|
||||
database_type: "postgres"
|
||||
database_instance: "{{ application_id | get_entity_name }}"
|
||||
database_password: "{{ applications | get_app_conf(application_id, 'credentials.postgresql_secret') }}"
|
||||
database_username: "postgres"
|
||||
database_name: "" # Multiple databases
|
||||
|
||||
# Proxy
|
||||
domain: "{{ domains | get_domain(application_id) }}"
|
||||
http_port: "{{ ports.localhost.http[application_id] }}"
|
||||
|
||||
@@ -27,7 +27,7 @@ server:
|
||||
flags:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
script-src-attr:
|
||||
script-src:
|
||||
unsafe-inline: true
|
||||
domains:
|
||||
canonical:
|
||||
|
||||
@@ -29,7 +29,7 @@ server:
|
||||
flags:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
script-src-attr:
|
||||
script-src:
|
||||
unsafe-inline: true
|
||||
domains:
|
||||
canonical:
|
||||
|
||||
@@ -15,8 +15,6 @@ server:
|
||||
- https://code.jquery.com/
|
||||
style-src-elem:
|
||||
- https://cdn.jsdelivr.net
|
||||
- https://kit.fontawesome.com
|
||||
- https://code.jquery.com/
|
||||
font-src:
|
||||
- https://ka-f.fontawesome.com
|
||||
- https://cdn.jsdelivr.net
|
||||
@@ -27,7 +25,7 @@ server:
|
||||
frame-src:
|
||||
- "{{ WEB_PROTOCOL }}://*.{{ PRIMARY_DOMAIN }}"
|
||||
flags:
|
||||
script-src-attr:
|
||||
script-src:
|
||||
unsafe-inline: true
|
||||
domains:
|
||||
canonical:
|
||||
|
||||
@@ -17,8 +17,6 @@
|
||||
- name: "load docker, proxy for '{{ application_id }}'"
|
||||
include_role:
|
||||
name: sys-stk-full-stateless
|
||||
vars:
|
||||
docker_compose_flush_handlers: false
|
||||
|
||||
- name: "Check if host-specific config.yaml exists in {{ DESKTOP_CONFIG_INV_PATH }}"
|
||||
stat:
|
||||
@@ -59,16 +57,8 @@
|
||||
notify: docker compose up
|
||||
when: not config_file.stat.exists
|
||||
|
||||
- name: "Flush docker compose handlers"
|
||||
meta: flush_handlers
|
||||
|
||||
- name: Wait for Desktop HTTP endpoint (required so all logos can be downloaded during initialization)
|
||||
uri:
|
||||
url: "http://127.0.0.1:{{ http_port }}/"
|
||||
status_code: 200
|
||||
register: desktop_http
|
||||
retries: 60
|
||||
delay: 5
|
||||
until: desktop_http.status == 200
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
- name: add docker-compose.yml
|
||||
template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ docker_compose.directories.instance }}docker-compose.yml"
|
||||
notify: docker compose up
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
---
|
||||
- include_tasks: 01_core.yml
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_web_app_desktop is not defined
|
||||
@@ -1,6 +1,5 @@
|
||||
# General
|
||||
application_id: "web-app-desktop"
|
||||
http_port: "{{ ports.localhost.http[application_id] }}"
|
||||
|
||||
## Webserver
|
||||
proxy_extra_configuration: "{{ lookup('template', 'nginx/sso.html.conf.j2') }}"
|
||||
|
||||
@@ -10,7 +10,7 @@ features:
|
||||
server:
|
||||
csp:
|
||||
flags:
|
||||
style-src-attr:
|
||||
style-src:
|
||||
unsafe-inline: true
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
@@ -43,10 +43,9 @@ plugins:
|
||||
enabled: true
|
||||
discourse-akismet:
|
||||
enabled: true
|
||||
# The following plugins moved to the default setup
|
||||
# discourse-cakeday:
|
||||
# enabled: true
|
||||
# discourse-solved:
|
||||
discourse-cakeday:
|
||||
enabled: true
|
||||
# discourse-solved: Seems like this plugin is now also part of the default setup
|
||||
# enabled: true
|
||||
# discourse-voting:
|
||||
# enabled: true
|
||||
|
||||
@@ -6,6 +6,4 @@
|
||||
include_tasks: 03_docker.yml
|
||||
|
||||
- name: "Setup '{{ application_id }}' network"
|
||||
include_tasks: 04_network.yml
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
include_tasks: 04_network.yml
|
||||
@@ -1,4 +1,6 @@
|
||||
---
|
||||
- name: "Setup {{ application_id }}"
|
||||
include_tasks: 01_core.yml
|
||||
when: run_once_web_app_discourse is not defined
|
||||
block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
@@ -1,29 +0,0 @@
|
||||
# Administration
|
||||
|
||||
## Shell access
|
||||
|
||||
```bash
|
||||
docker-compose exec -it application /bin/bash
|
||||
```
|
||||
|
||||
## Drush (inside the container)
|
||||
|
||||
```bash
|
||||
drush --version
|
||||
drush cr # Cache rebuild
|
||||
drush status # Site status
|
||||
drush cim -y # Config import (if using config sync)
|
||||
drush updb -y # Run DB updates
|
||||
```
|
||||
|
||||
## Database access (local DB service)
|
||||
|
||||
```bash
|
||||
docker-compose exec -it database /bin/mysql -u drupal -p
|
||||
```
|
||||
|
||||
## Test Email
|
||||
|
||||
```bash
|
||||
docker-compose exec -it application /bin/bash -lc 'echo "Test Email" | sendmail -v your-email@example.com'
|
||||
```
|
||||
@@ -1,32 +0,0 @@
|
||||
# Drupal
|
||||
|
||||
## Description
|
||||
|
||||
[Drupal](https://www.drupal.org/) is a powerful open-source CMS for building secure, extensible, and content-rich digital experiences.
|
||||
This role deploys a containerized **Drupal 10/11** instance optimized for production, including **msmtp** for outbound email, **Drush** for CLI administration, and **OpenID Connect (OIDC)** for SSO (e.g., Keycloak, Auth0, Azure AD).
|
||||
|
||||
## Overview
|
||||
|
||||
* **Flexible Content Model:** Entities, fields, and views for complex data needs.
|
||||
* **Security & Roles:** Fine-grained access control and active security team.
|
||||
* **Robust Ecosystem:** Thousands of modules and themes.
|
||||
* **CLI Automation:** Drush for installs, updates, and configuration import.
|
||||
* **OIDC SSO:** First-class login via external Identity Providers.
|
||||
|
||||
This automated Docker Compose deployment builds a custom Drupal image with Drush and msmtp, wires database credentials and config overrides via environment, and applies OIDC configuration via Ansible/Drush.
|
||||
|
||||
## OIDC
|
||||
|
||||
This role enables **OpenID Connect** via the `openid_connect` module and configures a **client entity** (e.g., `keycloak`) including endpoints and scopes. Global OIDC behavior (auto-create, link existing users, privacy) is set via `openid_connect.settings`.
|
||||
|
||||
## Further Resources
|
||||
|
||||
* [Drupal.org](https://www.drupal.org/)
|
||||
* [OpenID Connect module](https://www.drupal.org/project/openid_connect)
|
||||
|
||||
## Credits
|
||||
|
||||
Developed and maintained by **Kevin Veen-Birkenbach**
|
||||
Learn more at [veen.world](https://veen.world)
|
||||
Part of the [Infinito.Nexus Project](https://s.infinito.nexus/code)
|
||||
License: [Infinito.Nexus NonCommercial License](https://s.infinito.nexus/license)
|
||||
@@ -1,37 +0,0 @@
|
||||
title: "Site"
|
||||
max_upload_size: "256M"
|
||||
features:
|
||||
matomo: true
|
||||
css: false
|
||||
desktop: true
|
||||
oidc: true
|
||||
central_database: true
|
||||
logout: true
|
||||
server:
|
||||
csp:
|
||||
flags: {}
|
||||
whitelist: {}
|
||||
domains:
|
||||
canonical:
|
||||
- "drupal.{{ PRIMARY_DOMAIN }}"
|
||||
aliases: []
|
||||
docker:
|
||||
services:
|
||||
database:
|
||||
enabled: true
|
||||
drupal:
|
||||
version: latest
|
||||
image: drupal
|
||||
name: drupal
|
||||
backup:
|
||||
no_stop_required: true
|
||||
volumes:
|
||||
data: drupal_data
|
||||
rbac:
|
||||
roles:
|
||||
authenticated:
|
||||
description: "Logged-in user"
|
||||
content_editor:
|
||||
description: "Can create and edit content"
|
||||
site_admin:
|
||||
description: "Full site administration"
|
||||
@@ -1,23 +0,0 @@
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birkenbach"
|
||||
description: >
|
||||
Drupal CMS in Docker with Drush, msmtp, and OpenID Connect (OIDC) SSO.
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birkenbach
|
||||
Consulting & Coaching Solutions
|
||||
https://www.veen.world
|
||||
galaxy_tags:
|
||||
- drupal
|
||||
- docker
|
||||
- cms
|
||||
- oidc
|
||||
- sso
|
||||
repository: "https://s.infinito.nexus/code"
|
||||
issue_tracker_url: "https://s.infinito.nexus/issues"
|
||||
documentation: "https://docs.infinito.nexus"
|
||||
logo:
|
||||
class: "fa-solid fa-droplet"
|
||||
run_after:
|
||||
- web-app-keycloak
|
||||
@@ -1,9 +0,0 @@
|
||||
credentials:
|
||||
administrator_password:
|
||||
description: "Initial password for the Drupal admin account"
|
||||
algorithm: "sha256"
|
||||
validation: "^[a-f0-9]{64}$"
|
||||
hash_salt:
|
||||
description: "Drupal hash_salt value used for one-time logins, CSRF tokens, etc."
|
||||
algorithm: "sha256"
|
||||
validation: "^[a-f0-9]{64}$"
|
||||
@@ -1,25 +0,0 @@
|
||||
- name: "Ensure settings.php exists and includes settings.local.php"
|
||||
command: >
|
||||
docker exec -u root {{ DRUPAL_CONTAINER }} bash -lc
|
||||
"set -e;
|
||||
f='{{ DRUPAL_DOCKER_CONF_PATH }}/settings.php';
|
||||
df='{{ DRUPAL_DOCKER_CONF_PATH }}/default.settings.php';
|
||||
if [ ! -f \"$f\" ] && [ -f \"$df\" ]; then
|
||||
cp \"$df\" \"$f\";
|
||||
chown www-data:www-data \"$f\";
|
||||
chmod 644 \"$f\";
|
||||
fi;
|
||||
php -r '
|
||||
$f=\"{{ DRUPAL_DOCKER_CONF_PATH }}/settings.php\";
|
||||
if (!file_exists($f)) { exit(0); }
|
||||
$c=file_get_contents($f);
|
||||
$inc=\"\\nif (file_exists(\\\"\$app_root/\$site_path/settings.local.php\\\")) { include \$app_root/\$site_path/settings.local.php; }\\n\";
|
||||
if (strpos($c, \"settings.local.php\") === false) {
|
||||
file_put_contents($f, $c.$inc);
|
||||
echo \"patched\";
|
||||
} else {
|
||||
echo \"exists\";
|
||||
}
|
||||
'"
|
||||
register: settings_local_include
|
||||
changed_when: "'patched' in settings_local_include.stdout"
|
||||
@@ -1,15 +0,0 @@
|
||||
- name: "Run Drupal site:install via Drush"
|
||||
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||
command: >
|
||||
docker exec {{ DRUPAL_CONTAINER }} bash -lc
|
||||
"/var/www/html/vendor/bin/drush -r {{ DRUPAL_DOCKER_HTML_PATH }} si standard -y
|
||||
--site-name='{{ applications | get_app_conf(application_id, 'title', True) }}'
|
||||
--account-name='{{ applications | get_app_conf(application_id, 'users.administrator.username') }}'
|
||||
--account-mail='{{ applications | get_app_conf(application_id, 'users.administrator.email', True) }}'
|
||||
--account-pass='{{ applications | get_app_conf(application_id, 'credentials.administrator_password', True) }}'
|
||||
--uri='{{ DRUPAL_URL }}'"
|
||||
args:
|
||||
chdir: "{{ docker_compose.directories.instance }}"
|
||||
register: drupal_install
|
||||
changed_when: "'Installation complete' in drupal_install.stdout"
|
||||
failed_when: false
|
||||
@@ -1,12 +0,0 @@
|
||||
- name: "Enable OpenID Connect core module"
|
||||
command: >
|
||||
docker exec {{ DRUPAL_CONTAINER }} bash -lc
|
||||
"/var/www/html/vendor/bin/drush -r {{ DRUPAL_DOCKER_HTML_PATH }} en openid_connect -y"
|
||||
changed_when: true
|
||||
|
||||
- name: "Enable OpenID Connect Keycloak preset (submodule of openid_connect)"
|
||||
command: >
|
||||
docker exec {{ DRUPAL_CONTAINER }} bash -lc
|
||||
"/var/www/html/vendor/bin/drush -r {{ DRUPAL_DOCKER_HTML_PATH }} en openid_connect_client_keycloak -y"
|
||||
changed_when: true
|
||||
failed_when: false
|
||||
@@ -1,59 +0,0 @@
|
||||
- name: "Load OIDC vars"
|
||||
include_vars:
|
||||
file: "{{ role_path }}/vars/oidc.yml"
|
||||
name: oidc_vars
|
||||
|
||||
- name: "Apply openid_connect.settings (global)"
|
||||
loop: "{{ oidc_vars.oidc_settings | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.key }}"
|
||||
command: >
|
||||
docker exec {{ DRUPAL_CONTAINER }} bash -lc
|
||||
"/var/www/html/vendor/bin/drush -r {{ DRUPAL_DOCKER_HTML_PATH }} cset -y
|
||||
openid_connect.settings {{ item.key }}
|
||||
{{ (item.value | to_json) if item.value is mapping or item.value is sequence else item.value }}"
|
||||
|
||||
- name: "Ensure OIDC client entity exists"
|
||||
vars:
|
||||
client_id: "{{ oidc_vars.oidc_client.id }}"
|
||||
client_label: "{{ oidc_vars.oidc_client.label }}"
|
||||
command: >
|
||||
docker exec {{ DRUPAL_CONTAINER }} bash -lc
|
||||
"/var/www/html/vendor/bin/drush -r {{ DRUPAL_DOCKER_HTML_PATH }} eval '
|
||||
$id=\"{{ client_id }}\"; $label=\"{{ client_label }}\";
|
||||
$storage=\Drupal::entityTypeManager()->getStorage(\"openid_connect_client\");
|
||||
if (!$storage->load($id)) {
|
||||
$client=$storage->create([\"id\"=>$id,\"label\"=>$label]);
|
||||
$client->save();
|
||||
print \"created\";
|
||||
} else { print \"exists\"; }'"
|
||||
register: client_exists
|
||||
changed_when: "'created' in client_exists.stdout"
|
||||
|
||||
- name: "Apply OIDC client settings"
|
||||
vars:
|
||||
client_id: "{{ oidc_vars.oidc_client.id }}"
|
||||
settings_map: "{{ oidc_vars.oidc_client.settings }}"
|
||||
kv: "{{ settings_map | dict2items }}"
|
||||
loop: "{{ kv }}"
|
||||
loop_control:
|
||||
label: "{{ item.key }}"
|
||||
command: >
|
||||
docker exec {{ DRUPAL_CONTAINER }} bash -lc
|
||||
"/var/www/html/vendor/bin/drush -r {{ DRUPAL_DOCKER_HTML_PATH }} eval '
|
||||
$id=\"{{ client_id }}\";
|
||||
$key=\"{{ item.key }}\";
|
||||
$val=json_decode(base64_decode(\"{{ (item.value | to_json | b64encode) }}\"), true);
|
||||
$storage=\Drupal::entityTypeManager()->getStorage(\"openid_connect_client\");
|
||||
$c=$storage->load($id);
|
||||
$s=$c->get(\"settings\");
|
||||
$s[$key]=$val;
|
||||
$c->set(\"settings\", $s);
|
||||
$c->save();'"
|
||||
changed_when: true
|
||||
|
||||
- name: "Clear caches after OIDC config"
|
||||
command: >
|
||||
docker exec {{ DRUPAL_CONTAINER }} bash -lc
|
||||
"/var/www/html/vendor/bin/drush -r {{ DRUPAL_DOCKER_HTML_PATH }} cr"
|
||||
changed_when: false
|
||||
@@ -1,19 +0,0 @@
|
||||
- name: "Set trusted_host_patterns for canonical domains"
|
||||
vars:
|
||||
patterns: "{{ DRUPAL_DOMAINS
|
||||
| map('regex_replace','\\\\.','\\\\\\\\.')
|
||||
| map('regex_replace','^','^')
|
||||
| map('regex_replace','$','$')
|
||||
| list }}"
|
||||
php_array: "{{ patterns | to_json }}"
|
||||
command: >
|
||||
docker exec -u root {{ DRUPAL_CONTAINER }} bash -lc
|
||||
"php -r '
|
||||
$f="{{ DRUPAL_DOCKER_CONF_PATH }}/settings.local.php";
|
||||
$c=file_exists($f)?file_get_contents($f):"<?php\n";
|
||||
// Remove existing assignment of $settings[\"trusted_host_patterns\"] (if any)
|
||||
$c=preg_replace(\"/(\\\\$settings\\['trusted_host_patterns'\\]\\s*=).*?;/s\", \"\", $c);
|
||||
$c.="\n\$settings[\'trusted_host_patterns\'] = ".var_export(json_decode("{{ php_array|e }}", true), true).";\n";
|
||||
file_put_contents($f,$c);
|
||||
'"
|
||||
changed_when: true
|
||||
@@ -1,55 +0,0 @@
|
||||
- name: "Include role sys-stk-front-proxy for {{ application_id }}"
|
||||
include_role:
|
||||
name: sys-stk-front-proxy
|
||||
loop: "{{ DRUPAL_DOMAINS }}"
|
||||
loop_control:
|
||||
loop_var: domain
|
||||
vars:
|
||||
proxy_extra_configuration: "client_max_body_size {{ DRUPAL_MAX_UPLOAD_SIZE }};"
|
||||
http_port: "{{ ports.localhost.http[application_id] }}"
|
||||
|
||||
- name: "Load docker and DB for {{ application_id }}"
|
||||
include_role:
|
||||
name: sys-stk-back-stateful
|
||||
vars:
|
||||
docker_compose_flush_handlers: false
|
||||
|
||||
- name: "Transfer upload.ini to {{ DRUPAL_CONFIG_UPLOAD_ABS }}"
|
||||
template:
|
||||
src: upload.ini.j2
|
||||
dest: "{{ DRUPAL_CONFIG_UPLOAD_ABS }}"
|
||||
notify:
|
||||
- docker compose up
|
||||
- docker compose build
|
||||
|
||||
- name: "Transfer msmtprc to {{ DRUPAL_MSMTP_ABS }}"
|
||||
template:
|
||||
src: "{{ DRUPAL_MSMTP_SRC }}"
|
||||
dest: "{{ DRUPAL_MSMTP_ABS }}"
|
||||
notify: docker compose up
|
||||
|
||||
- name: "Transfer settings.local.php overrides"
|
||||
template:
|
||||
src: settings.local.php.j2
|
||||
dest: "{{ DRUPAL_SETTINGS_LOCAL_ABS }}"
|
||||
notify: docker compose up
|
||||
|
||||
- name: Flush handlers to make container ready
|
||||
meta: flush_handlers
|
||||
|
||||
- name: "Ensure settings.php includes settings.local.php"
|
||||
include_tasks: 01_settings_local_include.yml
|
||||
|
||||
- name: "Install Drupal (site:install)"
|
||||
include_tasks: 02_install.yml
|
||||
|
||||
- name: "Enable OIDC modules"
|
||||
include_tasks: 03_enable_modules.yml
|
||||
when: applications | get_app_conf(application_id, 'features.oidc')
|
||||
|
||||
- name: "Configure OIDC (global + client)"
|
||||
include_tasks: 04_configure_oidc.yml
|
||||
when: applications | get_app_conf(application_id, 'features.oidc')
|
||||
|
||||
- name: "Harden trusted host patterns"
|
||||
include_tasks: 05_trusted_hosts.yml
|
||||
@@ -1,75 +0,0 @@
|
||||
FROM {{ DRUPAL_IMAGE }}:{{ DRUPAL_VERSION }}
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# System dependencies (mail support + basic tools)
|
||||
# -------------------------------------------------------------------
|
||||
RUN apt-get update && \
|
||||
apt-get install -y msmtp msmtp-mta git unzip zip less nano curl vim && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Install Composer
|
||||
# -------------------------------------------------------------------
|
||||
RUN php -r "copy('https://getcomposer.org/installer', 'composer-setup.php');" \
|
||||
&& php composer-setup.php --install-dir=/usr/local/bin --filename=composer \
|
||||
&& rm composer-setup.php
|
||||
|
||||
ENV COMPOSER_ALLOW_SUPERUSER=1
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Build Drupal project with Drush + OpenID Connect
|
||||
# IMPORTANT:
|
||||
# - The Drupal base image uses /var/www/html as a symlink to /opt/drupal/web
|
||||
# - Therefore, the actual project root must be placed in /opt/drupal
|
||||
# -------------------------------------------------------------------
|
||||
RUN set -eux; \
|
||||
builddir="$(mktemp -d)"; \
|
||||
composer create-project --no-interaction --no-ansi --no-progress drupal/recommended-project:^10 "$builddir"; \
|
||||
composer --working-dir="$builddir" require -n drush/drush:^13 drupal/openid_connect:^1; \
|
||||
rm -rf /opt/drupal/* /opt/drupal/.[!.]* /opt/drupal/..?* 2>/dev/null || true; \
|
||||
mkdir -p /opt/drupal; \
|
||||
cp -a "$builddir"/. /opt/drupal/; \
|
||||
rm -rf "$builddir"
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Make vendor binaries available in PATH
|
||||
# -------------------------------------------------------------------
|
||||
ENV PATH="/opt/drupal/vendor/bin:${PATH}"
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# PHP upload configuration
|
||||
# -------------------------------------------------------------------
|
||||
COPY {{ DRUPAL_CONFIG_UPLOAD_REL }} $PHP_INI_DIR/conf.d/
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Permissions and ownership fixes
|
||||
# -------------------------------------------------------------------
|
||||
RUN set -eux; \
|
||||
# Ensure all directories are traversable
|
||||
chmod 755 /var /var/www /opt /opt/drupal; \
|
||||
# Ensure correct ownership for Drupal files
|
||||
chown -R www-data:www-data /opt/drupal; \
|
||||
# Apply default permissions
|
||||
find /opt/drupal -type d -exec chmod 755 {} +; \
|
||||
find /opt/drupal -type f -exec chmod 644 {} +; \
|
||||
# Ensure vendor binaries are executable
|
||||
if [ -d /opt/drupal/vendor/bin ]; then chmod a+rx /opt/drupal/vendor/bin/*; fi; \
|
||||
if [ -f /opt/drupal/vendor/drush/drush/drush ]; then chmod a+rx /opt/drupal/vendor/drush/drush/drush; fi; \
|
||||
# Ensure the docroot (/opt/drupal/web) is accessible
|
||||
if [ -d /opt/drupal/web ]; then \
|
||||
chmod 755 /opt/drupal/web; \
|
||||
find /opt/drupal/web -type d -exec chmod 755 {} +; \
|
||||
fi; \
|
||||
# Ensure settings.local.php exists and is owned by www-data
|
||||
install -o www-data -g www-data -m 640 /dev/null /opt/drupal/web/sites/default/settings.local.php
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Runtime defaults
|
||||
# -------------------------------------------------------------------
|
||||
USER www-data
|
||||
WORKDIR /var/www/html # symlink pointing to /opt/drupal/web
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Build-time check (optional)
|
||||
# -------------------------------------------------------------------
|
||||
RUN drush --version
|
||||
@@ -1,22 +0,0 @@
|
||||
{% include 'roles/docker-compose/templates/base.yml.j2' %}
|
||||
application:
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
image: {{ DRUPAL_CUSTOM_IMAGE }}
|
||||
container_name: {{ DRUPAL_CONTAINER }}
|
||||
{{ lookup('template', 'roles/docker-container/templates/build.yml.j2') | indent(4) }}
|
||||
ports:
|
||||
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:80"
|
||||
volumes:
|
||||
- data:/var/www/html/web/sites/default/files
|
||||
- {{ DRUPAL_MSMTP_ABS }}:/etc/msmtprc
|
||||
- {{ DRUPAL_SETTINGS_LOCAL_ABS }}:{{ DRUPAL_DOCKER_CONF_PATH }}/settings.local.php
|
||||
|
||||
{% include 'roles/docker-container/templates/healthcheck/msmtp_curl.yml.j2' %}
|
||||
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
|
||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||
|
||||
{% include 'roles/docker-compose/templates/networks.yml.j2' %}
|
||||
|
||||
{% include 'roles/docker-compose/templates/volumes.yml.j2' %}
|
||||
data:
|
||||
name: "{{ DRUPAL_VOLUME }}"
|
||||
@@ -1,7 +0,0 @@
|
||||
DRUPAL_DB_HOST= "{{ database_host }}:{{ database_port }}"
|
||||
DRUPAL_DB_USER= "{{ database_username }}"
|
||||
DRUPAL_DB_PASSWORD= "{{ database_password }}"
|
||||
DRUPAL_DB_NAME= "{{ database_name }}"
|
||||
|
||||
# Debug flags (optional)
|
||||
DRUPAL_DEBUG={{ MODE_DEBUG | lower }}
|
||||
@@ -1,49 +0,0 @@
|
||||
<?php
|
||||
/**
|
||||
* Local settings overrides generated by Ansible.
|
||||
* - Reads DB + OIDC endpoints from environment variables.
|
||||
* - Sets $databases and selected $config overrides.
|
||||
*/
|
||||
|
||||
$env = getenv();
|
||||
|
||||
/** Database **/
|
||||
$host = getenv('DRUPAL_DB_HOST') ?: '{{ database_host }}:{{ database_port }}';
|
||||
$db = getenv('DRUPAL_DB_NAME') ?: '{{ database_name }}';
|
||||
$user = getenv('DRUPAL_DB_USER') ?: '{{ database_username }}';
|
||||
$pass = getenv('DRUPAL_DB_PASSWORD') ?: '{{ database_password }}';
|
||||
|
||||
$parts = explode(':', $host, 2);
|
||||
$hostname = $parts[0];
|
||||
$port = isset($parts[1]) ? (int)$parts[1] : 3306;
|
||||
|
||||
$databases['default']['default'] = [
|
||||
'database' => $db,
|
||||
'username' => $user,
|
||||
'password' => $pass,
|
||||
'prefix' => '',
|
||||
'host' => $hostname,
|
||||
'port' => $port,
|
||||
'namespace'=> 'Drupal\\Core\\Database\\Driver\\mysql',
|
||||
'driver' => 'mysql',
|
||||
];
|
||||
|
||||
/** OIDC endpoint hints (optional) — the real config is applied via Drush. */
|
||||
$config['openid_connect.settings']['automatic_account_creation'] = true;
|
||||
$config['openid_connect.settings']['always_save_userinfo'] = true;
|
||||
$config['openid_connect.settings']['link_existing_users'] = true;
|
||||
|
||||
/** Trusted host patterns can be extended by Ansible task 04_trusted_hosts.yml */
|
||||
|
||||
/** Enable local services YML if present */
|
||||
$settings['container_yamls'][] = $app_root . '/' . $site_path . '/services.local.yml';
|
||||
|
||||
// Reverse proxy optional über ENV setzen (z.B. "10.0.0.0/8, 172.16.0.0/12")
|
||||
$proxy = getenv('REVERSE_PROXY_ADDRESSES');
|
||||
if ($proxy) {
|
||||
$settings['reverse_proxy'] = TRUE;
|
||||
$settings['reverse_proxy_addresses'] = array_map('trim', explode(',', $proxy));
|
||||
}
|
||||
|
||||
/** Hash salt (from schema/credentials, hashed with SHA-256) */
|
||||
$settings['hash_salt'] = '{{ applications | get_app_conf(application_id, "credentials.hash_salt", True) }}';
|
||||
@@ -1,8 +0,0 @@
|
||||
file_uploads = On
|
||||
memory_limit = {{ DRUPAL_MAX_UPLOAD_SIZE }}
|
||||
upload_max_filesize = {{ DRUPAL_MAX_UPLOAD_SIZE }}
|
||||
post_max_size = {{ DRUPAL_MAX_UPLOAD_SIZE }}
|
||||
max_execution_time = 300
|
||||
|
||||
; Use msmtp as the Mail Transfer Agent
|
||||
sendmail_path = "/usr/bin/msmtp -t"
|
||||
@@ -1,4 +0,0 @@
|
||||
users:
|
||||
administrator:
|
||||
username: "administrator"
|
||||
email: "administrator@{{ PRIMARY_DOMAIN }}"
|
||||
@@ -1,28 +0,0 @@
|
||||
# General
|
||||
|
||||
application_id: "web-app-drupal"
|
||||
database_type: "mariadb"
|
||||
|
||||
# Drupal
|
||||
|
||||
DRUPAL_URL: "{{ domains | get_url(application_id, WEB_PROTOCOL) }}"
|
||||
DRUPAL_CUSTOM_IMAGE: "drupal_custom"
|
||||
DRUPAL_DOCKER_HTML_PATH: "/var/www/html"
|
||||
DRUPAL_DOCKER_CONF_PATH: "/var/www/html/sites/default"
|
||||
DRUPAL_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.drupal.version') }}"
|
||||
DRUPAL_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.drupal.image') }}"
|
||||
DRUPAL_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.drupal.name') }}"
|
||||
DRUPAL_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.data') }}"
|
||||
DRUPAL_DOMAINS: "{{ applications | get_app_conf(application_id, 'server.domains.canonical') }}"
|
||||
DRUPAL_USER: "www-data"
|
||||
|
||||
DRUPAL_CONFIG_UPLOAD_REL: "config/upload.ini"
|
||||
DRUPAL_CONFIG_UPLOAD_ABS: "{{ [docker_compose.directories.instance, DRUPAL_CONFIG_UPLOAD_REL] | path_join }}"
|
||||
|
||||
DRUPAL_SETTINGS_LOCAL_REL: "config/settings.local.php"
|
||||
DRUPAL_SETTINGS_LOCAL_ABS: "{{ [docker_compose.directories.instance, DRUPAL_SETTINGS_LOCAL_REL] | path_join }}"
|
||||
|
||||
DRUPAL_MSMTP_SRC: "{{ [ playbook_dir, 'roles/sys-svc-msmtp/templates/msmtprc.conf.j2' ] | path_join }}"
|
||||
DRUPAL_MSMTP_ABS: "{{ [ docker_compose.directories.config, 'msmtprc.conf'] | path_join }}"
|
||||
|
||||
DRUPAL_MAX_UPLOAD_SIZE: "{{ applications | get_app_conf(application_id, 'max_upload_size') }}"
|
||||
@@ -1,33 +0,0 @@
|
||||
# OIDC configuration for Drupal's OpenID Connect module.
|
||||
|
||||
# Global settings for openid_connect.settings
|
||||
|
||||
oidc_settings:
|
||||
automatic_account_creation: true # Auto-create users on first login
|
||||
always_save_userinfo: true # Store latest userinfo on each login
|
||||
link_existing_users: true # Match existing users by email
|
||||
login_display: "button" # 'button' or 'form'
|
||||
enforced: false # If true, require login for the whole site
|
||||
|
||||
# OIDC client entity (e.g., 'keycloak')
|
||||
|
||||
oidc_client:
|
||||
id: "keycloak"
|
||||
label: "Keycloak"
|
||||
settings:
|
||||
client_id: "{{ OIDC.CLIENT.ID }}"
|
||||
client_secret: "{{ OIDC.CLIENT.SECRET }}"
|
||||
authorization_endpoint: "{{ OIDC.CLIENT.AUTHORIZE_URL }}"
|
||||
token_endpoint: "{{ OIDC.CLIENT.TOKEN_URL }}"
|
||||
userinfo_endpoint: "{{ OIDC.CLIENT.USER_INFO_URL }}"
|
||||
end_session_endpoint: "{{ OIDC.CLIENT.LOGOUT_URL }}"
|
||||
scopes:
|
||||
- "openid"
|
||||
- "email"
|
||||
- "profile"
|
||||
use_standard_claims: true
|
||||
# Optional claim mapping examples:
|
||||
# username_claim: "{{ OIDC.ATTRIBUTES.USERNAME }}"
|
||||
# email_claim: "{{ OIDC.ATTRIBUTES.EMAIL }}"
|
||||
# given_name_claim: "{{ OIDC.ATTRIBUTES.GIVEN_NAME }}"
|
||||
# family_name_claim: "{{ OIDC.ATTRIBUTES.FAMILY_NAME }}"
|
||||
@@ -12,7 +12,9 @@ server:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
unsafe-eval: true
|
||||
script-src-attr:
|
||||
style-src:
|
||||
unsafe-inline: true
|
||||
script-src:
|
||||
unsafe-eval: true
|
||||
whitelist:
|
||||
connect-src:
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
#!/bin/sh
|
||||
# POSIX-safe entrypoint for EspoCRM container
|
||||
# Compatible with /bin/sh (dash/busybox). Avoids 'pipefail' and non-portable features.
|
||||
set -eu
|
||||
set -euo pipefail
|
||||
|
||||
log() { printf '%s %s\n' "[entrypoint]" "$*" >&2; }
|
||||
|
||||
# --- Simple boolean normalization --------------------------------------------
|
||||
bool_norm () {
|
||||
v="$(printf '%s' "${1:-}" | tr '[:upper:]' '[:lower:]' 2>/dev/null || true)"
|
||||
v="$(printf '%s' "${1:-}" | tr '[:upper:]' '[:lower:]')"
|
||||
case "$v" in
|
||||
1|true|yes|on) echo "true" ;;
|
||||
0|false|no|off|"") echo "false" ;;
|
||||
@@ -15,45 +13,30 @@ bool_norm () {
|
||||
esac
|
||||
}
|
||||
|
||||
# --- Environment initialization ----------------------------------------------
|
||||
# Expected ENV (from env.j2)
|
||||
MAINTENANCE="$(bool_norm "${ESPO_INIT_MAINTENANCE_MODE:-false}")"
|
||||
CRON_DISABLED="$(bool_norm "${ESPO_INIT_CRON_DISABLED:-false}")"
|
||||
USE_CACHE="$(bool_norm "${ESPO_INIT_USE_CACHE:-true}")"
|
||||
|
||||
APP_DIR="/var/www/html"
|
||||
|
||||
# Provided by env.j2 (fallback ensures robustness)
|
||||
SET_FLAGS_SCRIPT="${ESPOCRM_SET_FLAGS_SCRIPT:-/usr/local/bin/set_flags.php}"
|
||||
if [ ! -f "$SET_FLAGS_SCRIPT" ]; then
|
||||
log "WARN: SET_FLAGS_SCRIPT '$SET_FLAGS_SCRIPT' not found; falling back to /usr/local/bin/set_flags.php"
|
||||
SET_FLAGS_SCRIPT="/usr/local/bin/set_flags.php"
|
||||
fi
|
||||
SET_FLAGS_SCRIPT="${ESPOCRM_SET_FLAGS_SCRIPT}"
|
||||
|
||||
# --- Wait for bootstrap.php (max 60s, e.g. fresh volume) ----------------------
|
||||
log "Waiting for ${APP_DIR}/bootstrap.php..."
|
||||
count=0
|
||||
while [ $count -lt 60 ] && [ ! -f "${APP_DIR}/bootstrap.php" ]; do
|
||||
for i in $(seq 1 60); do
|
||||
[ -f "${APP_DIR}/bootstrap.php" ] && break
|
||||
sleep 1
|
||||
count=$((count + 1))
|
||||
done
|
||||
if [ ! -f "${APP_DIR}/bootstrap.php" ]; then
|
||||
log "ERROR: bootstrap.php missing after 60s"
|
||||
exit 1
|
||||
log "ERROR: bootstrap.php missing after 60s"; exit 1
|
||||
fi
|
||||
|
||||
# --- Apply config flags via set_flags.php ------------------------------------
|
||||
log "Applying runtime flags via set_flags.php..."
|
||||
if ! php "${SET_FLAGS_SCRIPT}"; then
|
||||
log "ERROR: set_flags.php execution failed"
|
||||
exit 1
|
||||
fi
|
||||
php "${SET_FLAGS_SCRIPT}"
|
||||
|
||||
# --- Clear cache (safe) -------------------------------------------------------
|
||||
if php "${APP_DIR}/clear_cache.php" 2>/dev/null; then
|
||||
log "Cache cleared successfully."
|
||||
else
|
||||
log "WARN: Cache clearing skipped or failed (non-critical)."
|
||||
fi
|
||||
php "${APP_DIR}/clear_cache.php" || true
|
||||
|
||||
# --- Hand off to CMD ----------------------------------------------------------
|
||||
if [ "$#" -gt 0 ]; then
|
||||
@@ -73,6 +56,5 @@ for cmd in apache2-foreground httpd-foreground php-fpm php-fpm8.3 php-fpm8.2 sup
|
||||
fi
|
||||
done
|
||||
|
||||
# --- Fallback ---------------------------------------------------------------
|
||||
log "No known server command found; tailing to keep container alive."
|
||||
exec tail -f /dev/null
|
||||
|
||||
@@ -18,10 +18,10 @@ server:
|
||||
flags:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
script-src-attr:
|
||||
script-src:
|
||||
unsafe-inline: true
|
||||
unsafe-eval: true
|
||||
style-src-attr:
|
||||
style-src:
|
||||
unsafe-inline: true
|
||||
oauth2_proxy:
|
||||
application: "application"
|
||||
|
||||
@@ -7,10 +7,10 @@ docker_compose_flush_handlers: false
|
||||
|
||||
# Friendica
|
||||
friendica_container: "friendica"
|
||||
friendica_no_validation: "{{ applications | get_app_conf(application_id, 'features.oidc') }}" # Email validation is not neccessary if OIDC is active
|
||||
friendica_no_validation: "{{ applications | get_app_conf(application_id, 'features.oidc', True) }}" # Email validation is not neccessary if OIDC is active
|
||||
friendica_application_base: "/var/www/html"
|
||||
friendica_docker_ldap_config: "{{ [ friendica_application_base, 'config/ldapauth.config.php' ] | path_join }}"
|
||||
friendica_host_ldap_config: "{{ [ docker_compose.directories.volumes, 'ldapauth.config.php' ] | path_join }}"
|
||||
friendica_config_dir: "{{ [ friendica_application_base, 'config' ] | path_join }}"
|
||||
friendica_config_file: "{{ [ friendica_config_dir, 'local.config.php' ] | path_join }}"
|
||||
friendica_docker_ldap_config: "{{ friendica_application_base }}/config/ldapauth.config.php"
|
||||
friendica_host_ldap_config: "{{ docker_compose.directories.volumes }}ldapauth.config.php"
|
||||
friendica_config_dir: "{{ friendica_application_base }}/config"
|
||||
friendica_config_file: "{{ friendica_config_dir }}/local.config.php"
|
||||
friendica_user: "www-data"
|
||||
|
||||
@@ -27,7 +27,7 @@ server:
|
||||
aliases: []
|
||||
csp:
|
||||
flags:
|
||||
style-src-attr:
|
||||
style-src:
|
||||
unsafe-inline: true
|
||||
whitelist:
|
||||
font-src:
|
||||
|
||||
@@ -24,7 +24,7 @@ server:
|
||||
flags:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
style-src-attr:
|
||||
style-src:
|
||||
unsafe-inline: true
|
||||
whitelist:
|
||||
font-src:
|
||||
@@ -47,17 +47,7 @@ docker:
|
||||
version: "latest"
|
||||
backup:
|
||||
no_stop_required: true
|
||||
port: 3000
|
||||
name: "gitea"
|
||||
cpus: 1.0
|
||||
mem_reservation: 1g
|
||||
mem_limit: 2g
|
||||
pids_limit: 1024
|
||||
redis:
|
||||
enabled: false
|
||||
cpus: 0.25
|
||||
mem_reservation: 0.2g
|
||||
mem_limit: 0.3g
|
||||
pids_limit: 512
|
||||
port: 3000
|
||||
name: "gitea"
|
||||
volumes:
|
||||
data: "gitea_data"
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
shell: |
|
||||
docker exec -i --user {{ GITEA_USER }} {{ GITEA_CONTAINER }} \
|
||||
gitea admin auth list \
|
||||
| awk -v name="LDAP ({{ SOFTWARE_NAME }})" '$0 ~ name {print $1; exit}'
|
||||
| awk -v name="LDAP ({{ PRIMARY_DOMAIN }})" '$0 ~ name {print $1; exit}'
|
||||
args:
|
||||
chdir: "{{ docker_compose.directories.instance }}"
|
||||
register: ldap_source_id_raw
|
||||
|
||||
@@ -11,7 +11,7 @@ USER_GID=1000
|
||||
|
||||
# Logging configuration
|
||||
GITEA__log__MODE=console
|
||||
GITEA__log__LEVEL={% if MODE_DEBUG | bool %}Debug{% else %}Info{% endif %}
|
||||
GITEA__log__LEVEL={% if MODE_DEBUG | bool %}Debug{% else %}Info{% endif %}
|
||||
|
||||
# Database
|
||||
DB_TYPE=mysql
|
||||
@@ -20,28 +20,6 @@ DB_NAME={{ database_name }}
|
||||
DB_USER={{ database_username }}
|
||||
DB_PASSWD={{ database_password }}
|
||||
|
||||
|
||||
{% if GITEA_REDIS_ENABLED | bool %}
|
||||
# ------------------------------------------------
|
||||
# Redis Configuration for Gitea
|
||||
# ------------------------------------------------
|
||||
# @see https://docs.gitea.com/administration/config-cheat-sheet#cache-cache
|
||||
|
||||
GITEA__cache__ENABLED=true
|
||||
GITEA__cache__ADAPTER=redis
|
||||
# use a different Redis DB index than oauth2-proxy
|
||||
GITEA__cache__HOST=redis://{{ GITEA_REDIS_ADDRESS }}/1
|
||||
|
||||
# Store sessions in Redis (instead of the internal DB)
|
||||
GITEA__session__PROVIDER=redis
|
||||
GITEA__session__PROVIDER_CONFIG=network=tcp,addr={{ GITEA_REDIS_ADDRESS }},db=2,pool_size=100,idle_timeout=180
|
||||
|
||||
# Use Redis for background task queues
|
||||
GITEA__queue__TYPE=redis
|
||||
GITEA__queue__CONN_STR=redis://{{ GITEA_REDIS_ADDRESS }}/3
|
||||
{% endif %}
|
||||
|
||||
|
||||
# SSH
|
||||
SSH_PORT={{ports.public.ssh[application_id]}}
|
||||
SSH_LISTEN_PORT=22
|
||||
@@ -70,7 +48,7 @@ GITEA__security__INSTALL_LOCK=true # Locks the installation page
|
||||
GITEA__openid__ENABLE_OPENID_SIGNUP={{ applications | get_app_conf(application_id, 'features.oidc', False) | lower }}
|
||||
GITEA__openid__ENABLE_OPENID_SIGNIN={{ applications | get_app_conf(application_id, 'features.oidc', False) | lower }}
|
||||
|
||||
{% if GITEA_IAM_ENABLED | bool %}
|
||||
{% if applications | get_app_conf(application_id, 'features.oidc', False) or applications | get_app_conf(application_id, 'features.ldap', False) %}
|
||||
|
||||
EXTERNAL_USER_DISABLE_FEATURES=deletion,manage_credentials,change_username,change_full_name
|
||||
|
||||
@@ -80,5 +58,9 @@ GITEA__ldap__SYNC_USER_ON_LOGIN=true
|
||||
|
||||
{% endif %}
|
||||
|
||||
GITEA__service__DISABLE_REGISTRATION={{ GITEA_IAM_ENABLED | lower }}
|
||||
# ------------------------------------------------
|
||||
# Disable user self-registration
|
||||
# ------------------------------------------------
|
||||
# After this only admins can create accounts
|
||||
GITEA__service__DISABLE_REGISTRATION=false
|
||||
|
||||
|
||||
@@ -22,15 +22,9 @@ GITEA_LDAP_AUTH_ARGS:
|
||||
- '--email-attribute "{{ LDAP.USER.ATTRIBUTES.MAIL }}"'
|
||||
- '--public-ssh-key-attribute "{{ LDAP.USER.ATTRIBUTES.SSH_PUBLIC_KEY }}"'
|
||||
- '--synchronize-users'
|
||||
GITEA_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.gitea.version') }}"
|
||||
GITEA_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.gitea.image') }}"
|
||||
GITEA_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.gitea.name') }}"
|
||||
GITEA_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.data') }}"
|
||||
GITEA_USER: "git"
|
||||
GITEA_CONFIG: "/data/gitea/conf/app.ini"
|
||||
|
||||
## Redis
|
||||
GITEA_REDIS_ENABLED: "{{ applications | get_app_conf(application_id, 'docker.services.redis.enabled') }}"
|
||||
GITEA_REDIS_ADDRESS: "redis:6379"
|
||||
|
||||
GITEA_IAM_ENABLED: "{{ applications | get_app_conf(application_id, 'features.oidc', False) or applications | get_app_conf(application_id, 'features.ldap', False) }}"
|
||||
GITEA_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.gitea.version') }}"
|
||||
GITEA_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.gitea.image') }}"
|
||||
GITEA_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.gitea.name') }}"
|
||||
GITEA_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.data') }}"
|
||||
GITEA_USER: "git"
|
||||
GITEA_CONFIG: "/data/gitea/conf/app.ini"
|
||||
|
||||
@@ -27,7 +27,3 @@ server:
|
||||
domains:
|
||||
canonical:
|
||||
- lab.git.{{ PRIMARY_DOMAIN }}
|
||||
csp:
|
||||
flags:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
|
||||
@@ -29,7 +29,7 @@ server:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
unsafe-eval: true
|
||||
script-src-attr:
|
||||
script-src:
|
||||
unsafe-inline: true
|
||||
unsafe-eval: true
|
||||
domains:
|
||||
|
||||
@@ -14,7 +14,7 @@ server:
|
||||
aliases: []
|
||||
csp:
|
||||
flags:
|
||||
style-src-attr:
|
||||
style-src:
|
||||
unsafe-inline: true
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
# (Optional) specifically wait for the CLI installer script
|
||||
- name: "Check for CLI installer"
|
||||
command:
|
||||
argv: [ docker, exec, "{{ JOOMLA_CONTAINER }}", test, -f, "{{ JOOMLA_INSTALLER_CLI_FILE }}" ]
|
||||
argv: [ docker, exec, "{{ JOOMLA_CONTAINER }}", test, -f, /var/www/html/installation/joomla.php ]
|
||||
register: has_installer
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
@@ -32,7 +32,7 @@
|
||||
- exec
|
||||
- "{{ JOOMLA_CONTAINER }}"
|
||||
- php
|
||||
- "{{ JOOMLA_INSTALLER_CLI_FILE }}"
|
||||
- /var/www/html/installation/joomla.php
|
||||
- install
|
||||
- "--db-type={{ JOOMLA_DB_CONNECTOR }}"
|
||||
- "--db-host={{ database_host }}"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user