mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-11-18 11:06:29 +00:00
Compare commits
33 Commits
feature/ke
...
93c2fbedd7
| Author | SHA1 | Date | |
|---|---|---|---|
| 93c2fbedd7 | |||
| d006f0ba5e | |||
| dd43722e02 | |||
| 05d7ddc491 | |||
| e54436821c | |||
| ed73a37795 | |||
| adff9271fd | |||
| 2f0fb2cb69 | |||
| 6abf2629e0 | |||
| 6a8e0f38d8 | |||
| ae618cbf19 | |||
| c835ca8f2c | |||
| 087175a3c7 | |||
| 3da645f3b8 | |||
| a996e2190f | |||
| 7dccffd52d | |||
| 853f2c3e2d | |||
| b2978a3141 | |||
| 0e0b703ccd | |||
| 0b86b2f057 | |||
| 80e048a274 | |||
| 2610aec293 | |||
| 07db162368 | |||
| a526d1adc4 | |||
| ca95079111 | |||
| e410d66cb4 | |||
| ab48cf522f | |||
| 41c12bdc12 | |||
| aae463b602 | |||
| bb50551533 | |||
| 098099b41e | |||
| 0a7d767252 | |||
| d88599f76c |
@@ -76,8 +76,9 @@ _applications_nextcloud_oidc_flavor: >-
|
||||
False,
|
||||
'oidc_login'
|
||||
if applications
|
||||
| get_app_conf('web-app-nextcloud','features.ldap',False, True)
|
||||
else 'sociallogin'
|
||||
| get_app_conf('web-app-nextcloud','features.ldap',False, True, True)
|
||||
else 'sociallogin',
|
||||
True
|
||||
)
|
||||
}}
|
||||
|
||||
|
||||
@@ -5,6 +5,6 @@ MODE_DUMMY: false # Executes dummy/test routines instead
|
||||
MODE_UPDATE: true # Executes updates
|
||||
MODE_DEBUG: false # This enables debugging in ansible and in the apps, You SHOULD NOT enable this on production servers
|
||||
MODE_RESET: false # Cleans up all Infinito.Nexus files. It's necessary to run to whole playbook and not particial roles when using this function.
|
||||
MODE_CLEANUP: "{{ MODE_DEBUG | bool }}" # Cleanup unused files and configurations
|
||||
MODE_CLEANUP: true # Cleanup unused files and configurations
|
||||
MODE_ASSERT: "{{ MODE_DEBUG | bool }}" # Executes validation tasks during the run.
|
||||
MODE_BACKUP: true # Executes the Backup before the deployment
|
||||
|
||||
@@ -24,29 +24,29 @@ SYS_SCHEDULE_HEALTH_BTRFS: "*-*-* 00:00:00"
|
||||
SYS_SCHEDULE_HEALTH_JOURNALCTL: "*-*-* 00:00:00" # Check once per day the journalctl for errors
|
||||
SYS_SCHEDULE_HEALTH_DISC_SPACE: "*-*-* 06,12,18,00:00:00" # Check four times per day if there is sufficient disc space
|
||||
SYS_SCHEDULE_HEALTH_DOCKER_CONTAINER: "*-*-* {{ HOURS_SERVER_AWAKE }}:00:00" # Check once per hour if the docker containers are healthy
|
||||
SYS_SCHEDULE_HEALTH_DOCKER_VOLUMES: "*-*-* {{ HOURS_SERVER_AWAKE }}:15:00" # Check once per hour if the docker volumes are healthy
|
||||
SYS_SCHEDULE_HEALTH_CSP_CRAWLER: "*-*-* {{ HOURS_SERVER_AWAKE }}:30:00" # Check once per hour if all CSP are fullfilled available
|
||||
SYS_SCHEDULE_HEALTH_NGINX: "*-*-* {{ HOURS_SERVER_AWAKE }}:45:00" # Check once per hour if all webservices are available
|
||||
SYS_SCHEDULE_HEALTH_DOCKER_VOLUMES: "*-*-* {{ HOURS_SERVER_AWAKE }}:00:00" # Check once per hour if the docker volumes are healthy
|
||||
SYS_SCHEDULE_HEALTH_CSP_CRAWLER: "*-*-* {{ HOURS_SERVER_AWAKE }}:00:00" # Check once per hour if all CSP are fullfilled available
|
||||
SYS_SCHEDULE_HEALTH_NGINX: "*-*-* {{ HOURS_SERVER_AWAKE }}:00:00" # Check once per hour if all webservices are available
|
||||
SYS_SCHEDULE_HEALTH_MSMTP: "*-*-* 00:00:00" # Check once per day SMTP Server
|
||||
|
||||
### Schedule for cleanup tasks
|
||||
SYS_SCHEDULE_CLEANUP_BACKUPS: "*-*-* 00,06,12,18:30:00" # Cleanup backups every 6 hours, MUST be called before disc space cleanup
|
||||
SYS_SCHEDULE_CLEANUP_DISC_SPACE: "*-*-* 07,13,19,01:30:00" # Cleanup disc space every 6 hours
|
||||
SYS_SCHEDULE_CLEANUP_CERTS: "*-*-* 12,00:45:00" # Deletes and revokes unused certs
|
||||
SYS_SCHEDULE_CLEANUP_CERTS: "*-*-* 11,23:00:00" # Deletes and revokes unused certs
|
||||
SYS_SCHEDULE_CLEANUP_FAILED_BACKUPS: "*-*-* 12:00:00" # Clean up failed docker backups every noon
|
||||
SYS_SCHEDULE_CLEANUP_BACKUPS: "*-*-* 00,06,12,18:15:00" # Cleanup backups every 6 hours, MUST be called before disc space cleanup
|
||||
SYS_SCHEDULE_CLEANUP_DISC_SPACE: "*-*-* 00,06,12,18:30:00" # Cleanup disc space every 6 hours
|
||||
|
||||
### Schedule for repair services
|
||||
SYS_SCHEDULE_REPAIR_BTRFS_AUTO_BALANCER: "Sat *-*-01..07 00:00:00" # Execute btrfs auto balancer every first Saturday of a month
|
||||
SYS_SCHEDULE_REPAIR_DOCKER_HARD: "Sun *-*-* 08:00:00" # Restart docker instances every Sunday at 8:00 AM
|
||||
SYS_SCHEDULE_REPAIR_DOCKER_HARD: "Sun *-*-* 05:30:00" # Restart docker instances every Sunday at 8:00 AM
|
||||
|
||||
### Schedule for backup tasks
|
||||
SYS_SCHEDULE_BACKUP_DOCKER_TO_LOCAL: "*-*-* 03:30:00"
|
||||
SYS_SCHEDULE_BACKUP_REMOTE_TO_LOCAL: "*-*-* 21:30:00"
|
||||
SYS_SCHEDULE_BACKUP_REMOTE_TO_LOCAL: "*-*-* 00:15:00" # Pull Backup of the previous day
|
||||
SYS_SCHEDULE_BACKUP_DOCKER_TO_LOCAL: "*-*-* 00:30:00" # Backup the current day
|
||||
|
||||
### Schedule for Maintenance Tasks
|
||||
SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_RENEW: "*-*-* 12,00:30:00" # Renew Mailu certificates twice per day
|
||||
SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_DEPLOY: "*-*-* 13,01:30:00" # Deploy letsencrypt certificates twice per day to docker containers
|
||||
SYS_SCHEDULE_MAINTANANCE_NEXTCLOUD: "22" # Do nextcloud maintanace between 22:00 and 02:00
|
||||
SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_RENEW: "*-*-* 12,00:15:00" # Renew Mailu certificates twice per day
|
||||
SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_DEPLOY: "*-*-* 12,00:30:00" # Deploy letsencrypt certificates twice per day to docker containers
|
||||
SYS_SCHEDULE_MAINTANANCE_NEXTCLOUD: "21" # Do nextcloud maintanace between 21:00 and 01:00
|
||||
|
||||
### Animation
|
||||
SYS_SCHEDULE_ANIMATION_KEYBOARD_COLOR: "*-*-* *:*:00" # Change the keyboard color every minute
|
||||
@@ -24,7 +24,7 @@ class ConfigEntryNotSetError(AppConfigKeyError):
|
||||
pass
|
||||
|
||||
|
||||
def get_app_conf(applications, application_id, config_path, strict=True, default=None):
|
||||
def get_app_conf(applications, application_id, config_path, strict=True, default=None, skip_missing_app=False):
|
||||
# Path to the schema file for this application
|
||||
schema_path = os.path.join('roles', application_id, 'schema', 'main.yml')
|
||||
|
||||
@@ -133,6 +133,9 @@ def get_app_conf(applications, application_id, config_path, strict=True, default
|
||||
try:
|
||||
obj = applications[application_id]
|
||||
except KeyError:
|
||||
if skip_missing_app:
|
||||
# Simply return default instead of failing
|
||||
return default if default is not None else False
|
||||
raise AppConfigKeyError(
|
||||
f"Application ID '{application_id}' not found in applications dict.\n"
|
||||
f"path_trace: {path_trace}\n"
|
||||
|
||||
@@ -153,6 +153,11 @@ roles:
|
||||
description: "Core AI building blocks—model serving, OpenAI-compatible gateways, vector databases, orchestration, and chat UIs."
|
||||
icon: "fas fa-brain"
|
||||
invokable: true
|
||||
bkp:
|
||||
title: "Backup Services"
|
||||
description: "Service-level backup and recovery components—handling automated data snapshots, remote backups, synchronization services, and backup orchestration across databases, files, and containers."
|
||||
icon: "fas fa-database"
|
||||
invokable: true
|
||||
user:
|
||||
title: "Users & Access"
|
||||
description: "User accounts & access control"
|
||||
|
||||
@@ -127,7 +127,7 @@
|
||||
#de_BE@euro ISO-8859-15
|
||||
#de_CH.UTF-8 UTF-8
|
||||
#de_CH ISO-8859-1
|
||||
de_DE.UTF-8 UTF-8
|
||||
#de_DE.UTF-8 UTF-8
|
||||
#de_DE ISO-8859-1
|
||||
#de_DE@euro ISO-8859-15
|
||||
#de_IT.UTF-8 UTF-8
|
||||
|
||||
0
roles/svc-bkp-rmt-2-loc/__init__.py
Normal file
0
roles/svc-bkp-rmt-2-loc/__init__.py
Normal file
0
roles/svc-bkp-rmt-2-loc/files/__init__.py
Normal file
0
roles/svc-bkp-rmt-2-loc/files/__init__.py
Normal file
132
roles/svc-bkp-rmt-2-loc/files/pull-specific-host.py
Normal file
132
roles/svc-bkp-rmt-2-loc/files/pull-specific-host.py
Normal file
@@ -0,0 +1,132 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
import sys
|
||||
|
||||
|
||||
def run_command(command, capture_output=True, check=False, shell=True):
|
||||
"""Run a shell command and return its output as string."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
command,
|
||||
capture_output=capture_output,
|
||||
shell=shell,
|
||||
text=True,
|
||||
check=check
|
||||
)
|
||||
return result.stdout.strip()
|
||||
except subprocess.CalledProcessError as e:
|
||||
if capture_output:
|
||||
print(e.stdout)
|
||||
print(e.stderr)
|
||||
raise
|
||||
|
||||
|
||||
def pull_backups(hostname: str):
|
||||
print(f"pulling backups from: {hostname}")
|
||||
errors = 0
|
||||
|
||||
print("loading meta data...")
|
||||
remote_host = f"backup@{hostname}"
|
||||
print(f"host address: {remote_host}")
|
||||
|
||||
remote_machine_id = run_command(f'ssh "{remote_host}" sha256sum /etc/machine-id')[:64]
|
||||
print(f"remote machine id: {remote_machine_id}")
|
||||
|
||||
general_backup_machine_dir = f"/Backups/{remote_machine_id}/"
|
||||
print(f"backup dir: {general_backup_machine_dir}")
|
||||
|
||||
try:
|
||||
remote_backup_types = run_command(
|
||||
f'ssh "{remote_host}" "find {general_backup_machine_dir} -maxdepth 1 -type d -execdir basename {{}} ;"'
|
||||
).splitlines()
|
||||
print(f"backup types: {' '.join(remote_backup_types)}")
|
||||
except subprocess.CalledProcessError:
|
||||
sys.exit(1)
|
||||
|
||||
for backup_type in remote_backup_types:
|
||||
if backup_type == remote_machine_id:
|
||||
continue
|
||||
|
||||
print(f"backup type: {backup_type}")
|
||||
|
||||
general_backup_type_dir = f"{general_backup_machine_dir}{backup_type}/"
|
||||
general_versions_dir = general_backup_type_dir
|
||||
|
||||
# local previous version
|
||||
try:
|
||||
local_previous_version_dir = run_command(f"ls -d {general_versions_dir}* | tail -1")
|
||||
except subprocess.CalledProcessError:
|
||||
local_previous_version_dir = ""
|
||||
print(f"last local backup: {local_previous_version_dir}")
|
||||
|
||||
# remote versions
|
||||
remote_backup_versions = run_command(
|
||||
f'ssh "{remote_host}" "ls -d /Backups/{remote_machine_id}/backup-docker-to-local/*"'
|
||||
).splitlines()
|
||||
print(f"remote backup versions: {' '.join(remote_backup_versions)}")
|
||||
|
||||
remote_last_backup_dir = remote_backup_versions[-1] if remote_backup_versions else ""
|
||||
print(f"last remote backup: {remote_last_backup_dir}")
|
||||
|
||||
remote_source_path = f"{remote_host}:{remote_last_backup_dir}/"
|
||||
print(f"source path: {remote_source_path}")
|
||||
|
||||
local_backup_destination_path = remote_last_backup_dir
|
||||
print(f"backup destination: {local_backup_destination_path}")
|
||||
|
||||
print("creating local backup destination folder...")
|
||||
os.makedirs(local_backup_destination_path, exist_ok=True)
|
||||
|
||||
rsync_command = (
|
||||
f'rsync -abP --delete --delete-excluded --rsync-path="sudo rsync" '
|
||||
f'--link-dest="{local_previous_version_dir}" "{remote_source_path}" "{local_backup_destination_path}"'
|
||||
)
|
||||
print("starting backup...")
|
||||
print(f"executing: {rsync_command}")
|
||||
|
||||
retry_count = 0
|
||||
max_retries = 12
|
||||
retry_delay = 300 # 5 minutes
|
||||
last_retry_start = 0
|
||||
max_retry_duration = 43200 # 12 hours
|
||||
|
||||
rsync_exit_code = 1
|
||||
while retry_count < max_retries:
|
||||
print(f"Retry attempt: {retry_count + 1}")
|
||||
if retry_count > 0:
|
||||
current_time = int(time.time())
|
||||
last_retry_duration = current_time - last_retry_start
|
||||
if last_retry_duration >= max_retry_duration:
|
||||
print("Last retry took more than 12 hours, increasing max retries to 12.")
|
||||
max_retries = 12
|
||||
last_retry_start = int(time.time())
|
||||
rsync_exit_code = os.system(rsync_command)
|
||||
if rsync_exit_code == 0:
|
||||
break
|
||||
retry_count += 1
|
||||
time.sleep(retry_delay)
|
||||
|
||||
if rsync_exit_code != 0:
|
||||
print(f"Error: rsync failed after {max_retries} attempts")
|
||||
errors += 1
|
||||
|
||||
sys.exit(errors)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Pull backups from a remote backup host via rsync."
|
||||
)
|
||||
parser.add_argument(
|
||||
"hostname",
|
||||
help="Hostname from which backup should be pulled"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
pull_backups(args.hostname)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,85 +0,0 @@
|
||||
#!/bin/bash
|
||||
# @param $1 hostname from which backup should be pulled
|
||||
|
||||
echo "pulling backups from: $1" &&
|
||||
|
||||
# error counter
|
||||
errors=0 &&
|
||||
|
||||
echo "loading meta data..." &&
|
||||
|
||||
remote_host="backup@$1" &&
|
||||
echo "host address: $remote_host" &&
|
||||
|
||||
remote_machine_id="$( (ssh "$remote_host" sha256sum /etc/machine-id) | head -c 64 )" &&
|
||||
echo "remote machine id: $remote_machine_id" &&
|
||||
|
||||
general_backup_machine_dir="/Backups/$remote_machine_id/" &&
|
||||
echo "backup dir: $general_backup_machine_dir" &&
|
||||
|
||||
remote_backup_types="$(ssh "$remote_host" "find $general_backup_machine_dir -maxdepth 1 -type d -execdir basename {} ;")" &&
|
||||
echo "backup types: $remote_backup_types" || exit 1
|
||||
|
||||
for backup_type in $remote_backup_types; do
|
||||
if [ "$backup_type" != "$remote_machine_id" ]; then
|
||||
echo "backup type: $backup_type" &&
|
||||
|
||||
general_backup_type_dir="$general_backup_machine_dir""$backup_type/" &&
|
||||
general_versions_dir="$general_backup_type_dir" &&
|
||||
local_previous_version_dir="$(ls -d $general_versions_dir* | tail -1)" &&
|
||||
echo "last local backup: $local_previous_version_dir" &&
|
||||
|
||||
remote_backup_versions="$(ssh "$remote_host" ls -d "$general_backup_type_dir"\*)" &&
|
||||
echo "remote backup versions: $remote_backup_versions" &&
|
||||
|
||||
|
||||
remote_last_backup_dir=$(echo "$remote_backup_versions" | tail -1) &&
|
||||
echo "last remote backup: $remote_last_backup_dir" &&
|
||||
|
||||
remote_source_path="$remote_host:$remote_last_backup_dir/" &&
|
||||
echo "source path: $remote_source_path" &&
|
||||
|
||||
local_backup_destination_path=$remote_last_backup_dir &&
|
||||
echo "backup destination: $local_backup_destination_path" &&
|
||||
|
||||
echo "creating local backup destination folder..." &&
|
||||
mkdir -vp "$local_backup_destination_path" &&
|
||||
|
||||
echo "starting backup..."
|
||||
rsync_command='rsync -abP --delete --delete-excluded --rsync-path="sudo rsync" --link-dest="'$local_previous_version_dir'" "'$remote_source_path'" "'$local_backup_destination_path'"'
|
||||
|
||||
echo "executing: $rsync_command"
|
||||
|
||||
retry_count=0
|
||||
max_retries=12
|
||||
retry_delay=300 # Retry delay in seconds (5 minutes)
|
||||
last_retry_start=0
|
||||
max_retry_duration=43200 # Maximum duration for a single retry attempt (12 hours)
|
||||
|
||||
while [[ $retry_count -lt $max_retries ]]; do
|
||||
echo "Retry attempt: $((retry_count + 1))"
|
||||
if [[ $retry_count -gt 0 ]]; then
|
||||
current_time=$(date +%s)
|
||||
last_retry_duration=$((current_time - last_retry_start))
|
||||
if [[ $last_retry_duration -ge $max_retry_duration ]]; then
|
||||
echo "Last retry took more than 12 hours, increasing max retries to 12."
|
||||
max_retries=12
|
||||
fi
|
||||
fi
|
||||
last_retry_start=$(date +%s)
|
||||
eval "$rsync_command"
|
||||
rsync_exit_code=$?
|
||||
if [[ $rsync_exit_code -eq 0 ]]; then
|
||||
break
|
||||
fi
|
||||
retry_count=$((retry_count + 1))
|
||||
sleep $retry_delay
|
||||
done
|
||||
|
||||
if [[ $rsync_exit_code -ne 0 ]]; then
|
||||
echo "Error: rsync failed after $max_retries attempts"
|
||||
((errors += 1))
|
||||
fi
|
||||
fi
|
||||
done
|
||||
exit $errors;
|
||||
@@ -10,15 +10,15 @@
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_svc_bkp_rmt_2_loc is not defined
|
||||
|
||||
- name: "create {{ DOCKER_BACKUP_REMOTE_2_LOCAL_DIR }}"
|
||||
- name: "Create Directory '{{ DOCKER_BACKUP_REMOTE_2_LOCAL_DIR }}'"
|
||||
file:
|
||||
path: "{{ DOCKER_BACKUP_REMOTE_2_LOCAL_DIR }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: create svc-bkp-rmt-2-loc.sh
|
||||
- name: "Deploy '{{ DOCKER_BACKUP_REMOTE_2_LOCAL_SCRIPT }}'"
|
||||
copy:
|
||||
src: svc-bkp-rmt-2-loc.sh
|
||||
src: "{{ DOCKER_BACKUP_REMOTE_2_LOCAL_FILE }}"
|
||||
dest: "{{ DOCKER_BACKUP_REMOTE_2_LOCAL_SCRIPT }}"
|
||||
mode: "0755"
|
||||
|
||||
|
||||
@@ -3,6 +3,6 @@
|
||||
hosts="{{ DOCKER_BACKUP_REMOTE_2_LOCAL_BACKUP_PROVIDERS | join(' ') }}";
|
||||
errors=0
|
||||
for host in $hosts; do
|
||||
bash {{ DOCKER_BACKUP_REMOTE_2_LOCAL_SCRIPT }} $host || ((errors+=1));
|
||||
python {{ DOCKER_BACKUP_REMOTE_2_LOCAL_SCRIPT }} $host || ((errors+=1));
|
||||
done;
|
||||
exit $errors;
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
# General
|
||||
application_id: svc-bkp-rmt-2-loc
|
||||
system_service_id: "{{ application_id }}"
|
||||
system_service_id: "{{ application_id }}"
|
||||
|
||||
# Role Specific
|
||||
DOCKER_BACKUP_REMOTE_2_LOCAL_DIR: '{{ PATH_ADMINISTRATOR_SCRIPTS }}{{ application_id }}/'
|
||||
DOCKER_BACKUP_REMOTE_2_LOCAL_SCRIPT: "{{ DOCKER_BACKUP_REMOTE_2_LOCAL_DIR }}svc-bkp-rmt-2-loc.sh"
|
||||
DOCKER_BACKUP_REMOTE_2_LOCAL_FILE: 'pull-specific-host.py'
|
||||
DOCKER_BACKUP_REMOTE_2_LOCAL_SCRIPT: "{{ [ DOCKER_BACKUP_REMOTE_2_LOCAL_DIR , DOCKER_BACKUP_REMOTE_2_LOCAL_FILE ] | path_join }}"
|
||||
DOCKER_BACKUP_REMOTE_2_LOCAL_BACKUP_PROVIDERS: "{{ applications | get_app_conf(application_id, 'backup_providers') }}"
|
||||
@@ -13,7 +13,7 @@ get_backup_types="find /Backups/$hashed_machine_id/ -maxdepth 1 -type d -execdir
|
||||
|
||||
|
||||
# @todo This configuration is not scalable yet. If other backup services then sys-ctl-bkp-docker-2-loc are integrated, this logic needs to be optimized
|
||||
get_version_directories="ls -d /Backups/$hashed_machine_id/sys-ctl-bkp-docker-2-loc/*"
|
||||
get_version_directories="ls -d /Backups/$hashed_machine_id/backup-docker-to-local/*"
|
||||
last_version_directory="$($get_version_directories | tail -1)"
|
||||
rsync_command="sudo rsync --server --sender -blogDtpre.iLsfxCIvu . $last_version_directory/"
|
||||
|
||||
|
||||
@@ -3,30 +3,6 @@
|
||||
name: backup
|
||||
create_home: yes
|
||||
|
||||
- name: create .ssh directory
|
||||
file:
|
||||
path: /home/backup/.ssh
|
||||
state: directory
|
||||
owner: backup
|
||||
group: backup
|
||||
mode: '0700'
|
||||
|
||||
- name: create /home/backup/.ssh/authorized_keys
|
||||
template:
|
||||
src: "authorized_keys.j2"
|
||||
dest: /home/backup/.ssh/authorized_keys
|
||||
owner: backup
|
||||
group: backup
|
||||
mode: '0644'
|
||||
|
||||
- name: create /home/backup/ssh-wrapper.sh
|
||||
copy:
|
||||
src: "ssh-wrapper.sh"
|
||||
dest: /home/backup/ssh-wrapper.sh
|
||||
owner: backup
|
||||
group: backup
|
||||
mode: '0700'
|
||||
|
||||
- name: grant backup sudo rights
|
||||
copy:
|
||||
src: "backup"
|
||||
@@ -35,3 +11,9 @@
|
||||
owner: root
|
||||
group: root
|
||||
notify: sshd restart
|
||||
|
||||
- include_tasks: 02_permissions_ssh.yml
|
||||
|
||||
- include_tasks: 03_permissions_folders.yml
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
23
roles/sys-bkp-provider-user/tasks/02_permissions_ssh.yml
Normal file
23
roles/sys-bkp-provider-user/tasks/02_permissions_ssh.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
- name: create .ssh directory
|
||||
file:
|
||||
path: /home/backup/.ssh
|
||||
state: directory
|
||||
owner: backup
|
||||
group: backup
|
||||
mode: '0700'
|
||||
|
||||
- name: create /home/backup/.ssh/authorized_keys
|
||||
template:
|
||||
src: "authorized_keys.j2"
|
||||
dest: /home/backup/.ssh/authorized_keys
|
||||
owner: backup
|
||||
group: backup
|
||||
mode: '0644'
|
||||
|
||||
- name: create /home/backup/ssh-wrapper.sh
|
||||
copy:
|
||||
src: "ssh-wrapper.sh"
|
||||
dest: /home/backup/ssh-wrapper.sh
|
||||
owner: backup
|
||||
group: backup
|
||||
mode: '0700'
|
||||
64
roles/sys-bkp-provider-user/tasks/03_permissions_folders.yml
Normal file
64
roles/sys-bkp-provider-user/tasks/03_permissions_folders.yml
Normal file
@@ -0,0 +1,64 @@
|
||||
# Ensure the backups root exists and is owned by backup
|
||||
- name: Ensure backups root exists and owned by backup
|
||||
file:
|
||||
path: "{{ BACKUPS_FOLDER_PATH }}"
|
||||
state: directory
|
||||
owner: backup
|
||||
group: backup
|
||||
mode: "0700"
|
||||
|
||||
# Explicit ACL so 'backup' has rwx, others none
|
||||
- name: Grant ACL rwx on backups root to backup user
|
||||
ansible.posix.acl:
|
||||
path: "{{ BACKUPS_FOLDER_PATH }}"
|
||||
entity: backup
|
||||
etype: user
|
||||
permissions: rwx
|
||||
state: present
|
||||
|
||||
# Set default ACLs so new entries inherit rwx for backup and nothing for others
|
||||
- name: Set default ACL (inherit) for backup user under backups root
|
||||
ansible.posix.acl:
|
||||
path: "{{ BACKUPS_FOLDER_PATH }}"
|
||||
entity: backup
|
||||
etype: user
|
||||
permissions: rwx
|
||||
default: true
|
||||
state: present
|
||||
|
||||
# Remove default ACLs for group/others (defensive hardening)
|
||||
# Default ACLs so new entries inherit only backup's rwx
|
||||
- name: Default ACL for backup user (inherit)
|
||||
ansible.posix.acl:
|
||||
path: "{{ BACKUPS_FOLDER_PATH }}"
|
||||
etype: user
|
||||
entity: backup
|
||||
permissions: rwx
|
||||
default: true
|
||||
state: present
|
||||
|
||||
# Explicitly set default group/other to no permissions (instead of absent)
|
||||
- name: Default ACL for group -> none
|
||||
ansible.posix.acl:
|
||||
path: "{{ BACKUPS_FOLDER_PATH }}"
|
||||
etype: group
|
||||
permissions: '---'
|
||||
default: true
|
||||
state: present
|
||||
|
||||
- name: Default ACL for other -> none
|
||||
ansible.posix.acl:
|
||||
path: "{{ BACKUPS_FOLDER_PATH }}"
|
||||
etype: other
|
||||
permissions: '---'
|
||||
default: true
|
||||
state: present
|
||||
|
||||
- name: Fix ownership level 0..2 directories to backup:backup
|
||||
ansible.builtin.shell: >
|
||||
find "{{ BACKUPS_FOLDER_PATH }}" -mindepth 0 -maxdepth 2 -xdev -type d -exec chown backup:backup {} +
|
||||
|
||||
- name: Fix perms level 0..2 directories to 0700
|
||||
ansible.builtin.shell: >
|
||||
find "{{ BACKUPS_FOLDER_PATH }}" -mindepth 0 -maxdepth 2 -xdev -type d -exec chmod 700 {} +
|
||||
|
||||
@@ -1,4 +1,2 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_sys_bkp_provider_user is not defined
|
||||
@@ -1,8 +1,7 @@
|
||||
- name: Include dependencies
|
||||
include_role:
|
||||
name: '{{ item }}'
|
||||
loop:
|
||||
- sys-svc-msmtp
|
||||
name: "sys-svc-msmtp"
|
||||
when: run_once_sys_svc_msmtp is not defined or run_once_sys_svc_msmtp is false
|
||||
|
||||
- include_role:
|
||||
name: sys-service
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
name: sys-service
|
||||
vars:
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_tpl_exec_start: "{{ system_service_script_exec }} --backups-folder-path {{ BACKUPS_FOLDER_PATH }} --maximum-backup-size-percent {{SIZE_PERCENT_MAXIMUM_BACKUP}}"
|
||||
system_service_tpl_exec_start: "{{ system_service_script_exec }} --backups-folder-path {{ BACKUPS_FOLDER_PATH }} --maximum-backup-size-percent {{ SIZE_PERCENT_MAXIMUM_BACKUP }}"
|
||||
system_service_tpl_exec_start_pre: '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(" ") }} --ignore {{ SYS_SERVICE_GROUP_CLEANUP | join(" ") }} --timeout "{{ SYS_TIMEOUT_BACKUP_SERVICES }}"'
|
||||
system_service_copy_files: true
|
||||
system_service_force_linear_sync: false
|
||||
|
||||
@@ -39,6 +39,18 @@ if [ "$force_freeing" = true ]; then
|
||||
docker exec -u www-data $nextcloud_application_container /var/www/html/occ versions:cleanup || exit 6
|
||||
fi
|
||||
|
||||
# Mastodon cleanup (remote media cache)
|
||||
mastodon_application_container="{{ applications | get_app_conf('web-app-mastodon', 'docker.services.mastodon.name') }}"
|
||||
mastodon_cleanup_days="1"
|
||||
|
||||
if [ -n "$mastodon_application_container" ] && docker ps -a --format '{% raw %}{{.Names}}{% endraw %}' | grep -qw "$mastodon_application_container"; then
|
||||
echo "Cleaning up Mastodon media cache (older than ${mastodon_cleanup_days} days)" &&
|
||||
docker exec -u root "$mastodon_application_container" bash -lc "bin/tootctl media remove --days=${mastodon_cleanup_days}" || exit 8
|
||||
|
||||
# Optional: additionally remove local thumbnail/cache files older than X days
|
||||
# Warning: these will be regenerated when accessed, which may cause extra CPU/I/O load
|
||||
# docker exec -u root "$mastodon_application_container" bash -lc "find /mastodon/public/system/cache -type f -mtime +${mastodon_cleanup_days} -delete" || exit 9
|
||||
fi
|
||||
fi
|
||||
|
||||
if command -v pacman >/dev/null 2>&1 ; then
|
||||
|
||||
16
roles/sys-front-inj-all/tasks/01_dependencies.yml
Normal file
16
roles/sys-front-inj-all/tasks/01_dependencies.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
- name: "Load CDN for '{{ domain }}'"
|
||||
include_role:
|
||||
name: web-svc-cdn
|
||||
public: false
|
||||
when:
|
||||
- application_id != 'web-svc-cdn'
|
||||
- run_once_web_svc_cdn is not defined
|
||||
|
||||
- name: Load Logout for '{{ domain }}'
|
||||
include_role:
|
||||
name: web-svc-logout
|
||||
public: false
|
||||
when:
|
||||
- run_once_web_svc_logout is not defined
|
||||
- application_id != 'web-svc-logout'
|
||||
- inj_enabled.logout
|
||||
@@ -1,22 +1,41 @@
|
||||
- block:
|
||||
- name: Include dependency 'sys-svc-webserver-core'
|
||||
include_role:
|
||||
name: sys-svc-webserver-core
|
||||
when: run_once_sys_svc_webserver_core is not defined
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_front_inj_all is not defined
|
||||
|
||||
- name: Build inj_enabled
|
||||
set_fact:
|
||||
inj_enabled: "{{ applications | inj_enabled(application_id, SRV_WEB_INJ_COMP_FEATURES_ALL) }}"
|
||||
|
||||
- name: "Load CDN Service for '{{ domain }}'"
|
||||
include_role:
|
||||
name: sys-svc-cdn
|
||||
public: true # Expose variables so that they can be used in all injection roles
|
||||
- name: "Included dependent services"
|
||||
include_tasks: 01_dependencies.yml
|
||||
vars:
|
||||
proxy_extra_configuration: ""
|
||||
|
||||
- name: Reinitialize 'inj_enabled' for '{{ domain }}', after modification by CDN
|
||||
- name: Reinitialize 'inj_enabled' for '{{ domain }}', after loading the required webservices
|
||||
set_fact:
|
||||
inj_enabled: "{{ applications | inj_enabled(application_id, SRV_WEB_INJ_COMP_FEATURES_ALL) }}"
|
||||
inj_head_features: "{{ SRV_WEB_INJ_COMP_FEATURES_ALL | inj_features('head') }}"
|
||||
inj_body_features: "{{ SRV_WEB_INJ_COMP_FEATURES_ALL | inj_features('body') }}"
|
||||
|
||||
- name: "Load CDN Service for '{{ domain }}'"
|
||||
include_role:
|
||||
name: sys-svc-cdn
|
||||
public: true
|
||||
|
||||
- name: "Activate logout proxy for '{{ domain }}'"
|
||||
include_role:
|
||||
name: sys-front-inj-logout
|
||||
public: true
|
||||
when: inj_enabled.logout
|
||||
|
||||
- name: "Activate Desktop iFrame notifier for '{{ domain }}'"
|
||||
include_role:
|
||||
name: sys-front-inj-desktop
|
||||
public: true # Vars used in templates
|
||||
public: true
|
||||
when: inj_enabled.desktop
|
||||
|
||||
- name: "Activate Corporate CSS for '{{ domain }}'"
|
||||
@@ -33,17 +52,3 @@
|
||||
include_role:
|
||||
name: sys-front-inj-javascript
|
||||
when: inj_enabled.javascript
|
||||
|
||||
- name: "Activate logout proxy for '{{ domain }}'"
|
||||
include_role:
|
||||
name: sys-front-inj-logout
|
||||
public: true # Vars used in templates
|
||||
when: inj_enabled.logout
|
||||
|
||||
- block:
|
||||
- name: Include dependency 'sys-svc-webserver-core'
|
||||
include_role:
|
||||
name: sys-svc-webserver-core
|
||||
when: run_once_sys_svc_webserver_core is not defined
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_front_inj_all is not defined
|
||||
@@ -1,8 +1,3 @@
|
||||
- name: Include dependency 'sys-svc-webserver-core'
|
||||
include_role:
|
||||
name: sys-svc-webserver-core
|
||||
when: run_once_sys_svc_webserver_core is not defined
|
||||
|
||||
- name: Generate color palette with colorscheme-generator
|
||||
set_fact:
|
||||
color_palette: "{{ lookup('colorscheme', CSS_BASE_COLOR, count=CSS_COUNT, shades=CSS_SHADES) }}"
|
||||
@@ -19,3 +14,5 @@
|
||||
group: "{{ NGINX.USER }}"
|
||||
mode: '0644'
|
||||
loop: "{{ CSS_FILES }}"
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
@@ -1,6 +1,4 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_sys_front_inj_css is not defined
|
||||
|
||||
- name: "Resolve optional app style.css source for '{{ application_id }}'"
|
||||
|
||||
@@ -3,6 +3,6 @@
|
||||
{% for css_file in ['default.css','bootstrap.css'] %}
|
||||
<link rel="stylesheet" href="{{ [ cdn_urls.shared.css, css_file, lookup('local_mtime_qs', [__css_tpl_dir, css_file ~ '.j2'] | path_join)] | url_join }}">
|
||||
{% endfor %}
|
||||
{% if app_style_present | bool %}
|
||||
{% if app_style_present | default(false) | bool %}
|
||||
<link rel="stylesheet" href="{{ [ cdn_urls.role.release.css, 'style.css', lookup('local_mtime_qs', app_style_src)] | url_join }}">
|
||||
{% endif %}
|
||||
@@ -1,8 +1,4 @@
|
||||
- block:
|
||||
- name: Include dependency 'sys-svc-webserver-core'
|
||||
include_role:
|
||||
name: sys-svc-webserver-core
|
||||
when: run_once_sys_svc_webserver_core is not defined
|
||||
- include_tasks: 01_deploy.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_front_inj_desktop is not defined
|
||||
|
||||
@@ -1,11 +1,4 @@
|
||||
- block:
|
||||
|
||||
- name: Include dependency 'sys-svc-webserver-core'
|
||||
include_role:
|
||||
name: sys-svc-webserver-core
|
||||
when: run_once_sys_svc_webserver_core is not defined
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_front_inj_javascript is not defined
|
||||
# run_once_sys_front_inj_javascript: deactivated
|
||||
|
||||
- name: "Load JavaScript code for '{{ application_id }}'"
|
||||
set_fact:
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
- name: Include dependency 'sys-svc-webserver-core'
|
||||
include_role:
|
||||
name: sys-svc-webserver-core
|
||||
when:
|
||||
- run_once_sys_svc_webserver_core is not defined
|
||||
|
||||
- name: "deploy the logout.js"
|
||||
include_tasks: "02_deploy.yml"
|
||||
include_tasks: "02_deploy.yml"
|
||||
|
||||
- set_fact:
|
||||
run_once_sys_front_inj_logout: true
|
||||
changed_when: false
|
||||
@@ -1,10 +1,10 @@
|
||||
- name: Deploy logout.js
|
||||
template:
|
||||
src: logout.js.j2
|
||||
dest: "{{ INJ_LOGOUT_JS_DESTINATION }}"
|
||||
owner: "{{ NGINX.USER }}"
|
||||
group: "{{ NGINX.USER }}"
|
||||
mode: '0644'
|
||||
copy:
|
||||
src: logout.js
|
||||
dest: "{{ INJ_LOGOUT_JS_DESTINATION }}"
|
||||
owner: "{{ NGINX.USER }}"
|
||||
group: "{{ NGINX.USER }}"
|
||||
mode: '0644'
|
||||
|
||||
- name: Get stat for logout.js
|
||||
stat:
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- set_fact:
|
||||
run_once_sys_front_inj_logout: true
|
||||
- name: "Load base for '{{ application_id }}'"
|
||||
include_tasks: 01_core.yml
|
||||
when: run_once_sys_front_inj_logout is not defined
|
||||
|
||||
- name: "Load logout code for '{{ application_id }}'"
|
||||
set_fact:
|
||||
logout_code: "{{ lookup('template', 'logout_one_liner.js.j2') }}"
|
||||
changed_when: false
|
||||
|
||||
- name: "Collapse logout code into one-liner for '{{ application_id }}'"
|
||||
set_fact:
|
||||
logout_code_one_liner: "{{ logout_code | to_one_liner }}"
|
||||
changed_when: false
|
||||
|
||||
- name: "Append logout CSP hash for '{{ application_id }}'"
|
||||
set_fact:
|
||||
|
||||
@@ -1 +1 @@
|
||||
<script src="{{ cdn_urls.shared.js }}/{{ INJ_LOGOUT_JS_FILE_NAME }}{{ lookup('local_mtime_qs', [playbook_dir, 'roles', 'sys-front-inj-logout', 'templates', INJ_LOGOUT_JS_FILE_NAME ~ '.j2'] | path_join) }}"></script>
|
||||
<script src="{{ cdn_urls.shared.js }}/{{ INJ_LOGOUT_JS_FILE_NAME }}{{ lookup('local_mtime_qs', [playbook_dir, 'roles', 'sys-front-inj-logout', 'files', INJ_LOGOUT_JS_FILE_NAME] | path_join) }}"></script>
|
||||
|
||||
@@ -1,10 +1,4 @@
|
||||
- block:
|
||||
- name: Include dependency 'sys-svc-webserver-core'
|
||||
include_role:
|
||||
name: sys-svc-webserver-core
|
||||
when: run_once_sys_svc_webserver_core is not defined
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_front_inj_matomo is not defined
|
||||
# run_once_sys_front_inj_matomo: deactivated
|
||||
|
||||
- name: "Relevant variables for role: {{ role_path | basename }}"
|
||||
debug:
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
- name: "Load CDN for '{{ domain }}'"
|
||||
include_role:
|
||||
name: web-svc-cdn
|
||||
public: false
|
||||
when:
|
||||
- application_id != 'web-svc-cdn'
|
||||
- run_once_web_svc_cdn is not defined
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Only-once creations (shared root and vendor)
|
||||
# ------------------------------------------------------------------
|
||||
- name: Ensure shared root and vendor exist (run once)
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: "{{ NGINX.USER }}"
|
||||
group: "{{ NGINX.USER }}"
|
||||
mode: "0755"
|
||||
loop: "{{ CDN_DIRS_GLOBAL }}"
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
@@ -1,6 +1,14 @@
|
||||
---
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- name: Ensure shared root and vendor exist (run once)
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: "{{ NGINX.USER }}"
|
||||
group: "{{ NGINX.USER }}"
|
||||
mode: "0755"
|
||||
loop: "{{ CDN_DIRS_GLOBAL }}"
|
||||
- include_tasks: utils/run_once.yml
|
||||
when:
|
||||
- run_once_sys_svc_cdn is not defined
|
||||
|
||||
|
||||
@@ -14,4 +14,7 @@
|
||||
|
||||
- include_role:
|
||||
name: sys-ctl-hlth-msmtp
|
||||
when: run_once_sys_ctl_hlth_msmtp is not defined
|
||||
when: run_once_sys_ctl_hlth_msmtp is not defined
|
||||
|
||||
- set_fact:
|
||||
run_once_sys_svc_msmtp: true
|
||||
@@ -1,5 +1,6 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- set_fact:
|
||||
run_once_sys_svc_msmtp: true
|
||||
when: run_once_sys_svc_msmtp is not defined
|
||||
- name: "Load MSMTP Core Once"
|
||||
include_tasks: 01_core.yml
|
||||
when:
|
||||
- run_once_sys_svc_msmtp is not defined or run_once_sys_svc_msmtp is false
|
||||
# Just execute when mailu_token is defined
|
||||
- users['no-reply'].mailu_token is defined
|
||||
@@ -5,7 +5,7 @@ users:
|
||||
username: "{{ PRIMARY_DOMAIN.split('.')[0] }}"
|
||||
tld:
|
||||
description: "Auto Generated Account to reserve the TLD"
|
||||
username: "{{ PRIMARY_DOMAIN.split('.')[1] }}"
|
||||
username: "{{ PRIMARY_DOMAIN.split('.')[1] if (PRIMARY_DOMAIN is defined and (PRIMARY_DOMAIN.split('.') | length) > 1) else (PRIMARY_DOMAIN ~ '_tld ') }}"
|
||||
root:
|
||||
username: root
|
||||
uid: 0
|
||||
|
||||
@@ -19,7 +19,7 @@ docker:
|
||||
name: "baserow"
|
||||
cpus: 1.0
|
||||
mem_reservation: 0.5g
|
||||
mem_limit: 1g
|
||||
mem_limit: 2g
|
||||
pids_limit: 512
|
||||
volumes:
|
||||
data: "baserow_data"
|
||||
|
||||
@@ -17,6 +17,8 @@
|
||||
- name: "load docker, proxy for '{{ application_id }}'"
|
||||
include_role:
|
||||
name: sys-stk-full-stateless
|
||||
vars:
|
||||
docker_compose_flush_handlers: false
|
||||
|
||||
- name: "Check if host-specific config.yaml exists in {{ DESKTOP_CONFIG_INV_PATH }}"
|
||||
stat:
|
||||
@@ -57,8 +59,16 @@
|
||||
notify: docker compose up
|
||||
when: not config_file.stat.exists
|
||||
|
||||
- name: add docker-compose.yml
|
||||
template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ docker_compose.directories.instance }}docker-compose.yml"
|
||||
notify: docker compose up
|
||||
- name: "Flush docker compose handlers"
|
||||
meta: flush_handlers
|
||||
|
||||
- name: Wait for Desktop HTTP endpoint (required so all logos can be downloaded during initialization)
|
||||
uri:
|
||||
url: "http://127.0.0.1:{{ http_port }}/"
|
||||
status_code: 200
|
||||
register: desktop_http
|
||||
retries: 60
|
||||
delay: 5
|
||||
until: desktop_http.status == 200
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
@@ -1,5 +1,3 @@
|
||||
---
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_web_app_desktop is not defined
|
||||
@@ -1,5 +1,6 @@
|
||||
# General
|
||||
application_id: "web-app-desktop"
|
||||
http_port: "{{ ports.localhost.http[application_id] }}"
|
||||
|
||||
## Webserver
|
||||
proxy_extra_configuration: "{{ lookup('template', 'nginx/sso.html.conf.j2') }}"
|
||||
|
||||
@@ -43,9 +43,10 @@ plugins:
|
||||
enabled: true
|
||||
discourse-akismet:
|
||||
enabled: true
|
||||
discourse-cakeday:
|
||||
enabled: true
|
||||
# discourse-solved: Seems like this plugin is now also part of the default setup
|
||||
# The following plugins moved to the default setup
|
||||
# discourse-cakeday:
|
||||
# enabled: true
|
||||
# discourse-solved:
|
||||
# enabled: true
|
||||
# discourse-voting:
|
||||
# enabled: true
|
||||
|
||||
@@ -6,4 +6,6 @@
|
||||
include_tasks: 03_docker.yml
|
||||
|
||||
- name: "Setup '{{ application_id }}' network"
|
||||
include_tasks: 04_network.yml
|
||||
include_tasks: 04_network.yml
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
@@ -1,6 +1,4 @@
|
||||
---
|
||||
- name: "Setup {{ application_id }}"
|
||||
include_tasks: 01_core.yml
|
||||
when: run_once_web_app_discourse is not defined
|
||||
block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
@@ -1,11 +1,13 @@
|
||||
#!/bin/sh
|
||||
set -euo pipefail
|
||||
# POSIX-safe entrypoint for EspoCRM container
|
||||
# Compatible with /bin/sh (dash/busybox). Avoids 'pipefail' and non-portable features.
|
||||
set -eu
|
||||
|
||||
log() { printf '%s %s\n' "[entrypoint]" "$*" >&2; }
|
||||
|
||||
# --- Simple boolean normalization --------------------------------------------
|
||||
bool_norm () {
|
||||
v="$(printf '%s' "${1:-}" | tr '[:upper:]' '[:lower:]')"
|
||||
v="$(printf '%s' "${1:-}" | tr '[:upper:]' '[:lower:]' 2>/dev/null || true)"
|
||||
case "$v" in
|
||||
1|true|yes|on) echo "true" ;;
|
||||
0|false|no|off|"") echo "false" ;;
|
||||
@@ -13,30 +15,45 @@ bool_norm () {
|
||||
esac
|
||||
}
|
||||
|
||||
# Expected ENV (from env.j2)
|
||||
# --- Environment initialization ----------------------------------------------
|
||||
MAINTENANCE="$(bool_norm "${ESPO_INIT_MAINTENANCE_MODE:-false}")"
|
||||
CRON_DISABLED="$(bool_norm "${ESPO_INIT_CRON_DISABLED:-false}")"
|
||||
USE_CACHE="$(bool_norm "${ESPO_INIT_USE_CACHE:-true}")"
|
||||
|
||||
APP_DIR="/var/www/html"
|
||||
SET_FLAGS_SCRIPT="${ESPOCRM_SET_FLAGS_SCRIPT}"
|
||||
|
||||
# Provided by env.j2 (fallback ensures robustness)
|
||||
SET_FLAGS_SCRIPT="${ESPOCRM_SET_FLAGS_SCRIPT:-/usr/local/bin/set_flags.php}"
|
||||
if [ ! -f "$SET_FLAGS_SCRIPT" ]; then
|
||||
log "WARN: SET_FLAGS_SCRIPT '$SET_FLAGS_SCRIPT' not found; falling back to /usr/local/bin/set_flags.php"
|
||||
SET_FLAGS_SCRIPT="/usr/local/bin/set_flags.php"
|
||||
fi
|
||||
|
||||
# --- Wait for bootstrap.php (max 60s, e.g. fresh volume) ----------------------
|
||||
log "Waiting for ${APP_DIR}/bootstrap.php..."
|
||||
for i in $(seq 1 60); do
|
||||
[ -f "${APP_DIR}/bootstrap.php" ] && break
|
||||
count=0
|
||||
while [ $count -lt 60 ] && [ ! -f "${APP_DIR}/bootstrap.php" ]; do
|
||||
sleep 1
|
||||
count=$((count + 1))
|
||||
done
|
||||
if [ ! -f "${APP_DIR}/bootstrap.php" ]; then
|
||||
log "ERROR: bootstrap.php missing after 60s"; exit 1
|
||||
log "ERROR: bootstrap.php missing after 60s"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# --- Apply config flags via set_flags.php ------------------------------------
|
||||
log "Applying runtime flags via set_flags.php..."
|
||||
php "${SET_FLAGS_SCRIPT}"
|
||||
if ! php "${SET_FLAGS_SCRIPT}"; then
|
||||
log "ERROR: set_flags.php execution failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# --- Clear cache (safe) -------------------------------------------------------
|
||||
php "${APP_DIR}/clear_cache.php" || true
|
||||
if php "${APP_DIR}/clear_cache.php" 2>/dev/null; then
|
||||
log "Cache cleared successfully."
|
||||
else
|
||||
log "WARN: Cache clearing skipped or failed (non-critical)."
|
||||
fi
|
||||
|
||||
# --- Hand off to CMD ----------------------------------------------------------
|
||||
if [ "$#" -gt 0 ]; then
|
||||
@@ -56,5 +73,6 @@ for cmd in apache2-foreground httpd-foreground php-fpm php-fpm8.3 php-fpm8.2 sup
|
||||
fi
|
||||
done
|
||||
|
||||
# --- Fallback ---------------------------------------------------------------
|
||||
log "No known server command found; tailing to keep container alive."
|
||||
exec tail -f /dev/null
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
load_dependencies: True # When set to false the dependencies aren't loaded. Helpful for developing
|
||||
load_dependencies: True # When set to false the dependencies aren't loaded. Helpful for developing
|
||||
actions:
|
||||
import_realm: True # Import REALM
|
||||
create_automation_client: True
|
||||
import_realm: True # Import REALM
|
||||
features:
|
||||
matomo: true
|
||||
css: true
|
||||
@@ -50,4 +49,10 @@ docker:
|
||||
credentials:
|
||||
recaptcha:
|
||||
website_key: "" # Required if you enabled recaptcha:
|
||||
secret_key: "" # Required if you enabled recaptcha:
|
||||
secret_key: "" # Required if you enabled recaptcha:
|
||||
|
||||
accounts:
|
||||
bootstrap:
|
||||
username: "administrator"
|
||||
system:
|
||||
username: "{{ SOFTWARE_NAME | replace('.', '_') | lower }}"
|
||||
89
roles/web-app-keycloak/tasks/05_login.yml
Normal file
89
roles/web-app-keycloak/tasks/05_login.yml
Normal file
@@ -0,0 +1,89 @@
|
||||
- name: "Wait until '{{ KEYCLOAK_CONTAINER }}' container is healthy"
|
||||
community.docker.docker_container_info:
|
||||
name: "{{ KEYCLOAK_CONTAINER }}"
|
||||
register: kc_info
|
||||
retries: 60
|
||||
delay: 5
|
||||
until: >
|
||||
kc_info is succeeded and
|
||||
(kc_info.container | default({})) != {} and
|
||||
(kc_info.container.State | default({})) != {} and
|
||||
(kc_info.container.State.Health | default({})) != {} and
|
||||
(kc_info.container.State.Health.Status | default('')) == 'healthy'
|
||||
|
||||
- name: Ensure permanent Keycloak admin exists and can log in (container env only)
|
||||
block:
|
||||
|
||||
- name: Try login with permanent admin (uses container ENV)
|
||||
shell: |
|
||||
{{ KEYCLOAK_EXEC_CONTAINER }} sh -lc '
|
||||
{{ KEYCLOAK_KCADM }} config credentials \
|
||||
--server {{ KEYCLOAK_SERVER_INTERNAL_URL }} \
|
||||
--realm master \
|
||||
--user "$KEYCLOAK_PERMANENT_ADMIN_USERNAME" \
|
||||
--password "$KEYCLOAK_PERMANENT_ADMIN_PASSWORD"
|
||||
'
|
||||
register: kc_login_perm
|
||||
changed_when: false
|
||||
|
||||
rescue:
|
||||
|
||||
- name: Login with bootstrap admin (uses container ENV)
|
||||
shell: |
|
||||
{{ KEYCLOAK_EXEC_CONTAINER }} sh -lc '
|
||||
{{ KEYCLOAK_KCADM }} config credentials \
|
||||
--server {{ KEYCLOAK_SERVER_INTERNAL_URL }} \
|
||||
--realm master \
|
||||
--user "$KC_BOOTSTRAP_ADMIN_USERNAME" \
|
||||
--password "$KC_BOOTSTRAP_ADMIN_PASSWORD"
|
||||
'
|
||||
register: kc_login_bootstrap
|
||||
changed_when: false
|
||||
|
||||
- name: Ensure permanent admin user exists (create if missing)
|
||||
shell: |
|
||||
{{ KEYCLOAK_EXEC_CONTAINER }} sh -lc '
|
||||
{{ KEYCLOAK_KCADM }} create users -r master \
|
||||
-s "username=$KEYCLOAK_PERMANENT_ADMIN_USERNAME" \
|
||||
-s "enabled=true"
|
||||
'
|
||||
register: kc_create_perm_admin
|
||||
failed_when: >
|
||||
not (
|
||||
kc_create_perm_admin.rc == 0 or
|
||||
(kc_create_perm_admin.stderr is defined and
|
||||
('User exists with same username' in kc_create_perm_admin.stderr))
|
||||
)
|
||||
changed_when: kc_create_perm_admin.rc == 0
|
||||
|
||||
- name: Set permanent admin password (by username, no ID needed)
|
||||
shell: |
|
||||
{{ KEYCLOAK_EXEC_CONTAINER }} sh -lc '
|
||||
{{ KEYCLOAK_KCADM }} set-password -r master \
|
||||
--username "$KEYCLOAK_PERMANENT_ADMIN_USERNAME" \
|
||||
--new-password "$KEYCLOAK_PERMANENT_ADMIN_PASSWORD"
|
||||
'
|
||||
changed_when: true
|
||||
|
||||
- name: Grant global admin via master realm role 'admin'
|
||||
shell: |
|
||||
{{ KEYCLOAK_EXEC_CONTAINER }} sh -lc '
|
||||
{{ KEYCLOAK_KCADM }} add-roles -r master \
|
||||
--uusername "$KEYCLOAK_PERMANENT_ADMIN_USERNAME" \
|
||||
--rolename admin
|
||||
'
|
||||
register: kc_grant_master_admin
|
||||
changed_when: (kc_grant_master_admin.stderr is defined and kc_grant_master_admin.stderr | length > 0) or
|
||||
(kc_grant_master_admin.stdout is defined and kc_grant_master_admin.stdout | length > 0)
|
||||
failed_when: false
|
||||
|
||||
- name: Verify login with permanent admin (after creation)
|
||||
shell: |
|
||||
{{ KEYCLOAK_EXEC_CONTAINER }} sh -lc '
|
||||
{{ KEYCLOAK_KCADM }} config credentials \
|
||||
--server {{ KEYCLOAK_SERVER_INTERNAL_URL }} \
|
||||
--realm master \
|
||||
--user "$KEYCLOAK_PERMANENT_ADMIN_USERNAME" \
|
||||
--password "$KEYCLOAK_PERMANENT_ADMIN_PASSWORD"
|
||||
'
|
||||
changed_when: false
|
||||
@@ -1,63 +0,0 @@
|
||||
# Creates a confidential client with service account, fetches the secret,
|
||||
# and grants realm-management/realm-admin to its service-account user.
|
||||
|
||||
- name: "Ensure automation client exists (confidential + service accounts)"
|
||||
shell: |
|
||||
{{ KEYCLOAK_EXEC_KCADM }} create clients -r {{ KEYCLOAK_REALM }} \
|
||||
-s clientId={{ KEYCLOAK_AUTOMATION_CLIENT_ID }} \
|
||||
-s protocol=openid-connect \
|
||||
-s publicClient=false \
|
||||
-s serviceAccountsEnabled=true \
|
||||
-s directAccessGrantsEnabled=false
|
||||
register: create_client
|
||||
changed_when: create_client.rc == 0
|
||||
failed_when: create_client.rc != 0 and ('already exists' not in (create_client.stderr | lower))
|
||||
|
||||
- name: "Resolve automation client id"
|
||||
shell: >
|
||||
{{ KEYCLOAK_EXEC_KCADM }} get clients -r {{ KEYCLOAK_REALM }}
|
||||
--query 'clientId={{ KEYCLOAK_AUTOMATION_CLIENT_ID }}' --fields id --format json | jq -r '.[0].id'
|
||||
register: auto_client_id_cmd
|
||||
changed_when: false
|
||||
|
||||
- name: "Fail if client id could not be resolved"
|
||||
assert:
|
||||
that:
|
||||
- "(auto_client_id_cmd.stdout | trim) is match('^[0-9a-f-]+$')"
|
||||
fail_msg: "Automation client id could not be resolved."
|
||||
|
||||
- name: "Read client secret"
|
||||
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||
shell: >
|
||||
{{ KEYCLOAK_EXEC_KCADM }} get clients/{{ auto_client_id_cmd.stdout | trim }}/client-secret
|
||||
-r {{ KEYCLOAK_REALM }} --format json | jq -r .value
|
||||
register: auto_client_secret_cmd
|
||||
changed_when: false
|
||||
|
||||
- name: "Expose client secret as a fact"
|
||||
set_fact:
|
||||
KEYCLOAK_AUTOMATION_CLIENT_SECRET: "{{ auto_client_secret_cmd.stdout | trim }}"
|
||||
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||
|
||||
- name: "Grant {{ KEYCLOAK_AUTOMATION_GRANT_ROLE }} to service account"
|
||||
shell: >
|
||||
{{ KEYCLOAK_EXEC_KCADM }} add-roles -r {{ KEYCLOAK_REALM }}
|
||||
--uusername service-account-{{ KEYCLOAK_AUTOMATION_CLIENT_ID }}
|
||||
--cclientid realm-management
|
||||
--rolename {{ KEYCLOAK_AUTOMATION_GRANT_ROLE }}
|
||||
register: grant_role
|
||||
changed_when: grant_role.rc == 0
|
||||
failed_when: grant_role.rc != 0 and ('already exists' not in (grant_role.stderr | lower))
|
||||
|
||||
- name: "Verify client-credentials login works"
|
||||
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||
shell: >
|
||||
{{ KEYCLOAK_EXEC_KCADM }} config credentials
|
||||
--server {{ KEYCLOAK_SERVER_INTERNAL_URL }}
|
||||
--realm {{ KEYCLOAK_REALM }}
|
||||
--client {{ KEYCLOAK_AUTOMATION_CLIENT_ID }}
|
||||
--client-secret {{ KEYCLOAK_AUTOMATION_CLIENT_SECRET }} &&
|
||||
{{ KEYCLOAK_EXEC_KCADM }} get realms/{{ KEYCLOAK_REALM }} --format json | jq -r '.realm'
|
||||
register: verify_cc
|
||||
changed_when: false
|
||||
failed_when: (verify_cc.rc != 0) or ((verify_cc.stdout | trim) != (KEYCLOAK_REALM | trim))
|
||||
@@ -13,118 +13,21 @@
|
||||
include_tasks: 04_dependencies.yml
|
||||
when: KEYCLOAK_LOAD_DEPENDENCIES | bool
|
||||
|
||||
- name: "Wait until '{{ KEYCLOAK_CONTAINER }}' container is healthy"
|
||||
community.docker.docker_container_info:
|
||||
name: "{{ KEYCLOAK_CONTAINER }}"
|
||||
register: kc_info
|
||||
retries: 60
|
||||
delay: 5
|
||||
until: >
|
||||
kc_info is succeeded and
|
||||
(kc_info.container | default({})) != {} and
|
||||
(kc_info.container.State | default({})) != {} and
|
||||
(kc_info.container.State.Health | default({})) != {} and
|
||||
(kc_info.container.State.Health.Status | default('')) == 'healthy'
|
||||
- name: "Load Login routines for '{{ application_id }}'"
|
||||
include_tasks: 05_login.yml
|
||||
|
||||
- name: kcadm login (master)
|
||||
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||
shell: >
|
||||
{{ KEYCLOAK_EXEC_KCADM }} config credentials
|
||||
--server {{ KEYCLOAK_SERVER_INTERNAL_URL }}
|
||||
--realm master
|
||||
--user {{ KEYCLOAK_MASTER_API_USER_NAME }}
|
||||
--password {{ KEYCLOAK_MASTER_API_USER_PASSWORD }}
|
||||
changed_when: false
|
||||
- name: "Load Client Update routines for '{{ application_id }}'"
|
||||
include_tasks: update/01_client.yml
|
||||
|
||||
- name: Verify kcadm session works (quick read)
|
||||
shell: >
|
||||
{{ KEYCLOAK_EXEC_KCADM }} get realms --format json | jq -r '.[0].realm' | head -n1
|
||||
register: kcadm_verify
|
||||
changed_when: false
|
||||
failed_when: >
|
||||
(kcadm_verify.rc != 0)
|
||||
or ('HTTP 401' in (kcadm_verify.stderr | default('')))
|
||||
or ((kcadm_verify.stdout | trim) == '')
|
||||
|
||||
# --- Create & grant automation service account (Option A) ---
|
||||
- name: "Ensure automation service account client (Option A)"
|
||||
include_tasks: 05a_service_account.yml
|
||||
when: applications | get_app_conf(application_id, 'actions.create_automation_client', True)
|
||||
- name: "Load Mail Update routines for '{{ application_id }} - {{ KEYCLOAK_REALM }}'"
|
||||
include_tasks: update/02_mail_realm.yml
|
||||
|
||||
# --- Switch session to the service account for all subsequent API work ---
|
||||
- name: kcadm login (realm) using service account
|
||||
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||
shell: >
|
||||
{{ KEYCLOAK_EXEC_KCADM }} config credentials
|
||||
--server {{ KEYCLOAK_SERVER_INTERNAL_URL }}
|
||||
--realm {{ KEYCLOAK_REALM }}
|
||||
--client {{ KEYCLOAK_AUTOMATION_CLIENT_ID }}
|
||||
--client-secret {{ KEYCLOAK_AUTOMATION_CLIENT_SECRET }}
|
||||
changed_when: false
|
||||
- name: "Load Mail Update routines for '{{ application_id }} - master'"
|
||||
include_tasks: update/03_mail_master.yml
|
||||
|
||||
- name: Verify kcadm session works (exact realm via service account)
|
||||
shell: >
|
||||
{{ KEYCLOAK_EXEC_KCADM }} get realms/{{ KEYCLOAK_REALM }} --format json | jq -r '.realm'
|
||||
register: kcadm_verify_sa
|
||||
changed_when: false
|
||||
failed_when: >
|
||||
(kcadm_verify_sa.rc != 0)
|
||||
or ('HTTP 401' in (kcadm_verify_sa.stderr | default('')))
|
||||
or ((kcadm_verify_sa.stdout | trim) != (KEYCLOAK_REALM | trim))
|
||||
- name: "Load RBAC Update routines for '{{ application_id }}'"
|
||||
include_tasks: update/04_rbac_client_scope.yml
|
||||
|
||||
- name: "Update Client settings"
|
||||
vars:
|
||||
kc_object_kind: "client"
|
||||
kc_lookup_value: "{{ KEYCLOAK_CLIENT_ID }}"
|
||||
kc_desired: >-
|
||||
{{
|
||||
KEYCLOAK_DICTIONARY_REALM.clients
|
||||
| selectattr('clientId','equalto', KEYCLOAK_CLIENT_ID)
|
||||
| list | first
|
||||
}}
|
||||
kc_force_attrs:
|
||||
publicClient: >-
|
||||
{{
|
||||
(KEYCLOAK_DICTIONARY_REALM.clients
|
||||
| selectattr('clientId','equalto', KEYCLOAK_CLIENT_ID)
|
||||
| map(attribute='publicClient')
|
||||
| first)
|
||||
}}
|
||||
serviceAccountsEnabled: >-
|
||||
{{
|
||||
(KEYCLOAK_DICTIONARY_REALM.clients
|
||||
| selectattr('clientId','equalto', KEYCLOAK_CLIENT_ID)
|
||||
| map(attribute='serviceAccountsEnabled')
|
||||
| first )
|
||||
}}
|
||||
frontchannelLogout: >-
|
||||
{{
|
||||
(KEYCLOAK_DICTIONARY_REALM.clients
|
||||
| selectattr('clientId','equalto', KEYCLOAK_CLIENT_ID)
|
||||
| map(attribute='frontchannelLogout')
|
||||
| first)
|
||||
}}
|
||||
attributes: >-
|
||||
{{
|
||||
( (KEYCLOAK_DICTIONARY_REALM.clients
|
||||
| selectattr('clientId','equalto', KEYCLOAK_CLIENT_ID)
|
||||
| list | first | default({}) ).attributes | default({}) )
|
||||
| combine({'frontchannel.logout.url': KEYCLOAK_FRONTCHANNEL_LOGOUT_URL}, recursive=True)
|
||||
}}
|
||||
include_tasks: _update.yml
|
||||
|
||||
- name: "Update REALM mail settings from realm dictionary (SPOT)"
|
||||
include_tasks: _update.yml
|
||||
vars:
|
||||
kc_object_kind: "realm"
|
||||
kc_lookup_field: "id"
|
||||
kc_lookup_value: "{{ KEYCLOAK_REALM }}"
|
||||
kc_desired:
|
||||
smtpServer: "{{ KEYCLOAK_DICTIONARY_REALM.smtpServer | default({}, true) }}"
|
||||
kc_merge_path: "smtpServer"
|
||||
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||
|
||||
- include_tasks: 05_rbac_client_scope.yml
|
||||
|
||||
- include_tasks: 06_ldap.yml
|
||||
- name: "Load LDAP Update routines for '{{ application_id }}'"
|
||||
include_tasks: update/05_ldap.yml
|
||||
when: KEYCLOAK_LDAP_ENABLED | bool
|
||||
|
||||
40
roles/web-app-keycloak/tasks/update/01_client.yml
Normal file
40
roles/web-app-keycloak/tasks/update/01_client.yml
Normal file
@@ -0,0 +1,40 @@
|
||||
- name: "Update Client settings"
|
||||
vars:
|
||||
kc_object_kind: "client"
|
||||
kc_lookup_value: "{{ KEYCLOAK_CLIENT_ID }}"
|
||||
kc_desired: >-
|
||||
{{
|
||||
KEYCLOAK_DICTIONARY_REALM.clients
|
||||
| selectattr('clientId','equalto', KEYCLOAK_CLIENT_ID)
|
||||
| list | first
|
||||
}}
|
||||
kc_force_attrs:
|
||||
publicClient: >-
|
||||
{{
|
||||
(KEYCLOAK_DICTIONARY_REALM.clients
|
||||
| selectattr('clientId','equalto', KEYCLOAK_CLIENT_ID)
|
||||
| map(attribute='publicClient')
|
||||
| first)
|
||||
}}
|
||||
serviceAccountsEnabled: >-
|
||||
{{
|
||||
(KEYCLOAK_DICTIONARY_REALM.clients
|
||||
| selectattr('clientId','equalto', KEYCLOAK_CLIENT_ID)
|
||||
| map(attribute='serviceAccountsEnabled')
|
||||
| first )
|
||||
}}
|
||||
frontchannelLogout: >-
|
||||
{{
|
||||
(KEYCLOAK_DICTIONARY_REALM.clients
|
||||
| selectattr('clientId','equalto', KEYCLOAK_CLIENT_ID)
|
||||
| map(attribute='frontchannelLogout')
|
||||
| first)
|
||||
}}
|
||||
attributes: >-
|
||||
{{
|
||||
( (KEYCLOAK_DICTIONARY_REALM.clients
|
||||
| selectattr('clientId','equalto', KEYCLOAK_CLIENT_ID)
|
||||
| list | first | default({}) ).attributes | default({}) )
|
||||
| combine({'frontchannel.logout.url': KEYCLOAK_FRONTCHANNEL_LOGOUT_URL}, recursive=True)
|
||||
}}
|
||||
include_tasks: _update.yml
|
||||
10
roles/web-app-keycloak/tasks/update/02_mail_realm.yml
Normal file
10
roles/web-app-keycloak/tasks/update/02_mail_realm.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
- name: "Update {{ KEYCLOAK_REALM }} REALM mail settings from realm dictionary"
|
||||
include_tasks: _update.yml
|
||||
vars:
|
||||
kc_object_kind: "realm"
|
||||
kc_lookup_field: "id"
|
||||
kc_lookup_value: "{{ KEYCLOAK_REALM }}"
|
||||
kc_desired:
|
||||
smtpServer: "{{ KEYCLOAK_DICTIONARY_REALM.smtpServer | default({}, true) }}"
|
||||
kc_merge_path: "smtpServer"
|
||||
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||
10
roles/web-app-keycloak/tasks/update/03_mail_master.yml
Normal file
10
roles/web-app-keycloak/tasks/update/03_mail_master.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
- name: "Update Master REALM mail settings from realm dictionary"
|
||||
include_tasks: _update.yml
|
||||
vars:
|
||||
kc_object_kind: "realm"
|
||||
kc_lookup_field: "id"
|
||||
kc_lookup_value: "master"
|
||||
kc_desired:
|
||||
smtpServer: "{{ KEYCLOAK_DICTIONARY_REALM.smtpServer | default({}, true) }}"
|
||||
kc_merge_path: "smtpServer"
|
||||
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||
@@ -1,4 +1,3 @@
|
||||
# --- Ensure RBAC client scope exists (idempotent) ---
|
||||
- name: Ensure RBAC client scope exists
|
||||
shell: |
|
||||
cat <<'JSON' | {{ KEYCLOAK_EXEC_KCADM }} create client-scopes -r {{ KEYCLOAK_REALM }} -f -
|
||||
@@ -16,12 +15,10 @@
|
||||
('already exists' not in (create_rbac_scope.stderr | lower))
|
||||
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||
|
||||
# --- Get the scope id we will attach to the client ---
|
||||
- name: Get all client scopes
|
||||
shell: "{{ KEYCLOAK_EXEC_KCADM }} get client-scopes -r {{ KEYCLOAK_REALM }} --format json"
|
||||
register: all_scopes
|
||||
changed_when: false
|
||||
failed_when: "'HTTP 401' in (all_scopes.stderr | default(''))"
|
||||
|
||||
- name: Extract RBAC scope id
|
||||
set_fact:
|
||||
@@ -10,19 +10,21 @@ KC_HTTP_ENABLED= true
|
||||
KC_HEALTH_ENABLED= {{ KEYCLOAK_HEALTH_ENABLED | lower }}
|
||||
KC_METRICS_ENABLED= true
|
||||
|
||||
# Administrator
|
||||
KEYCLOAK_ADMIN= "{{ KEYCLOAK_ADMIN }}"
|
||||
KEYCLOAK_ADMIN_PASSWORD= "{{ KEYCLOAK_ADMIN_PASSWORD }}"
|
||||
|
||||
# Database
|
||||
KC_DB= {{ database_type }}
|
||||
KC_DB_URL= {{ database_url_jdbc }}
|
||||
KC_DB_USERNAME= {{ database_username }}
|
||||
KC_DB_PASSWORD= {{ database_password }}
|
||||
|
||||
# If the initial administrator already exists and the environment variables are still present at startup, an error message stating the failed creation of the initial administrator is shown in the logs. Keycloak ignores the values and starts up correctly.
|
||||
KC_BOOTSTRAP_ADMIN_USERNAME= "{{ KEYCLOAK_ADMIN }}"
|
||||
KC_BOOTSTRAP_ADMIN_PASSWORD= "{{ KEYCLOAK_ADMIN_PASSWORD }}"
|
||||
# Credentials
|
||||
|
||||
## Bootstrap
|
||||
KC_BOOTSTRAP_ADMIN_USERNAME="{{ KEYCLOAK_BOOTSTRAP_ADMIN_USERNAME }}"
|
||||
KC_BOOTSTRAP_ADMIN_PASSWORD="{{ KEYCLOAK_BOOTSTRAP_ADMIN_PASSWORD }}"
|
||||
|
||||
## Permanent
|
||||
KEYCLOAK_PERMANENT_ADMIN_USERNAME="{{ KEYCLOAK_PERMANENT_ADMIN_USERNAME }}"
|
||||
KEYCLOAK_PERMANENT_ADMIN_PASSWORD="{{ KEYCLOAK_PERMANENT_ADMIN_PASSWORD }}"
|
||||
|
||||
# Enable detailed logs
|
||||
{% if MODE_DEBUG | bool %}
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
users:
|
||||
administrator:
|
||||
username: "administrator"
|
||||
@@ -1,6 +1,6 @@
|
||||
# General
|
||||
application_id: "web-app-keycloak" # Internal Infinito.Nexus application id
|
||||
database_type: "postgres" # Database which will be used
|
||||
application_id: "web-app-keycloak" # Internal Infinito.Nexus application id
|
||||
database_type: "postgres" # Database which will be used
|
||||
|
||||
# Keycloak
|
||||
|
||||
@@ -29,21 +29,22 @@ KEYCLOAK_REALM_IMPORT_FILE_SRC: "import/realm.json.j2"
|
||||
KEYCLOAK_REALM_IMPORT_FILE_DST: "{{ [KEYCLOAK_REALM_IMPORT_DIR_HOST,'realm.json'] | path_join }}"
|
||||
|
||||
## Credentials
|
||||
KEYCLOAK_ADMIN: "{{ applications | get_app_conf(application_id, 'users.administrator.username') }}"
|
||||
KEYCLOAK_ADMIN_PASSWORD: "{{ applications | get_app_conf(application_id, 'credentials.administrator_password') }}"
|
||||
|
||||
### Bootstrap
|
||||
KEYCLOAK_BOOTSTRAP_ADMIN_USERNAME: "{{ applications | get_app_conf(application_id, 'accounts.bootstrap.username') }}"
|
||||
KEYCLOAK_BOOTSTRAP_ADMIN_PASSWORD: "{{ applications | get_app_conf(application_id, 'credentials.administrator_password') }}"
|
||||
|
||||
### Permanent
|
||||
KEYCLOAK_PERMANENT_ADMIN_USERNAME: "{{ applications | get_app_conf(application_id, 'accounts.system.username') }}"
|
||||
KEYCLOAK_PERMANENT_ADMIN_PASSWORD: "{{ applications | get_app_conf(application_id, 'credentials.administrator_password') }}"
|
||||
|
||||
## Docker
|
||||
KEYCLOAK_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.keycloak.name') }}" # Name of the keycloak docker container
|
||||
KEYCLOAK_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.keycloak.image') }}" # Keycloak docker image
|
||||
KEYCLOAK_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.keycloak.version') }}" # Keycloak docker version
|
||||
KEYCLOAK_KCADM_CONFIG: "/opt/keycloak/data/kcadm.config"
|
||||
KEYCLOAK_EXEC_KCADM: "docker exec -i {{ KEYCLOAK_CONTAINER }} /opt/keycloak/bin/kcadm.sh --config {{ KEYCLOAK_KCADM_CONFIG }}"
|
||||
|
||||
## Automation Service Account (Option A)
|
||||
KEYCLOAK_AUTOMATION_CLIENT_ID: "infinito-automation"
|
||||
KEYCLOAK_AUTOMATION_GRANT_ROLE: "realm-admin" # or granular roles if you prefer
|
||||
# Will be discovered dynamically and set as a fact during the run:
|
||||
# KEYCLOAK_AUTOMATION_CLIENT_SECRET
|
||||
KEYCLOAK_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.keycloak.name') }}"
|
||||
KEYCLOAK_EXEC_CONTAINER: "docker exec -i {{ KEYCLOAK_CONTAINER }}"
|
||||
KEYCLOAK_KCADM: "/opt/keycloak/bin/kcadm.sh"
|
||||
KEYCLOAK_EXEC_KCADM: "{{ KEYCLOAK_EXEC_CONTAINER }} {{ KEYCLOAK_KCADM }}"
|
||||
KEYCLOAK_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.keycloak.image') }}"
|
||||
KEYCLOAK_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.keycloak.version') }}"
|
||||
|
||||
## Server
|
||||
KEYCLOAK_SERVER_HOST: "127.0.0.1:{{ ports.localhost.http[application_id] }}"
|
||||
@@ -76,11 +77,6 @@ KEYCLOAK_LDAP_USER_OBJECT_CLASSES: >
|
||||
) | join(', ')
|
||||
}}
|
||||
|
||||
## API
|
||||
KEYCLOAK_MASTER_API_USER: "{{ applications | get_app_conf(application_id, 'users.administrator') }}" # Master Administrator
|
||||
KEYCLOAK_MASTER_API_USER_NAME: "{{ KEYCLOAK_MASTER_API_USER.username }}" # Master Administrator Username
|
||||
KEYCLOAK_MASTER_API_USER_PASSWORD: "{{ KEYCLOAK_MASTER_API_USER.password }}" # Master Administrator Password
|
||||
|
||||
# Dictionaries
|
||||
KEYCLOAK_DICTIONARY_REALM_RAW: "{{ lookup('template', 'import/realm.json.j2') }}"
|
||||
KEYCLOAK_DICTIONARY_REALM: >-
|
||||
|
||||
@@ -17,12 +17,12 @@ server:
|
||||
csp:
|
||||
flags:
|
||||
style-src:
|
||||
unsafe-inline: true
|
||||
unsafe-inline: true
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
unsafe-inline: true
|
||||
script-src:
|
||||
unsafe-inline: true
|
||||
unsafe-eval: true
|
||||
unsafe-inline: true
|
||||
unsafe-eval: true
|
||||
rbac:
|
||||
roles:
|
||||
mail-bot:
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
meta: flush_handlers
|
||||
|
||||
- name: "Create Mailu accounts"
|
||||
include_tasks: 02_create-user.yml
|
||||
include_tasks: 02_manage_user.yml
|
||||
vars:
|
||||
MAILU_DOCKER_DIR: "{{ docker_compose.directories.instance }}"
|
||||
mailu_api_base_url: "http://127.0.0.1:8080/api/v1"
|
||||
@@ -55,7 +55,8 @@
|
||||
mailu_user_key: "{{ item.key }}"
|
||||
mailu_user_name: "{{ item.value.username }}"
|
||||
mailu_password: "{{ item.value.password }}"
|
||||
mailu_token_ip: "{{ item.value.ip | default('') }}"
|
||||
mailu_token_ip: "{{ item.value.ip | default(networks.internet.ip4) }}"
|
||||
mailu_token_name: "{{ SOFTWARE_NAME ~ ' Token for ' ~ item.value.username }}"
|
||||
loop: "{{ users | dict2items }}"
|
||||
loop_control:
|
||||
loop_var: item
|
||||
@@ -66,3 +67,5 @@
|
||||
|
||||
- name: Set Mailu DNS records
|
||||
include_tasks: 05_dns-records.yml
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
@@ -25,5 +25,5 @@
|
||||
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||
|
||||
- name: "Create Mailu API Token for {{ mailu_user_name }}"
|
||||
include_tasks: 03_create-token.yml
|
||||
when: "{{ 'mail-bot' in item.value.roles }}"
|
||||
include_tasks: 03a_manage_user_token.yml
|
||||
when: "'mail-bot' in item.value.roles"
|
||||
26
roles/web-app-mailu/tasks/03a_manage_user_token.yml
Normal file
26
roles/web-app-mailu/tasks/03a_manage_user_token.yml
Normal file
@@ -0,0 +1,26 @@
|
||||
|
||||
- name: "Fetch existing API tokens via curl inside admin container"
|
||||
command: >-
|
||||
{{ docker_compose_command_exec }} -T admin \
|
||||
curl -s -X GET {{ mailu_api_base_url }}/token \
|
||||
-H "Authorization: Bearer {{ MAILU_API_TOKEN }}"
|
||||
args:
|
||||
chdir: "{{ MAILU_DOCKER_DIR }}"
|
||||
register: mailu_tokens_cli
|
||||
changed_when: false
|
||||
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||
|
||||
- name: "Extract existing token info for '{{ mailu_user_key }};{{ mailu_user_name }}'"
|
||||
set_fact:
|
||||
mailu_user_existing_token: >-
|
||||
{{ (
|
||||
mailu_tokens_cli.stdout
|
||||
| default('[]')
|
||||
| from_json
|
||||
| selectattr('comment','equalto', mailu_token_name)
|
||||
| list
|
||||
).0 | default(None) }}
|
||||
|
||||
- name: "Start Mailu token procedures for undefined tokens"
|
||||
when: users[mailu_user_key].mailu_token is not defined
|
||||
include_tasks: 03b_create_user_token.yml
|
||||
@@ -1,26 +1,3 @@
|
||||
|
||||
- name: "Fetch existing API tokens via curl inside admin container"
|
||||
command: >-
|
||||
{{ docker_compose_command_exec }} -T admin \
|
||||
curl -s -X GET {{ mailu_api_base_url }}/token \
|
||||
-H "Authorization: Bearer {{ MAILU_API_TOKEN }}"
|
||||
args:
|
||||
chdir: "{{ MAILU_DOCKER_DIR }}"
|
||||
register: mailu_tokens_cli
|
||||
changed_when: false
|
||||
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||
|
||||
- name: "Extract existing token info for '{{ mailu_user_key }};{{ mailu_user_name }}'"
|
||||
set_fact:
|
||||
mailu_user_existing_token: >-
|
||||
{{ (
|
||||
mailu_tokens_cli.stdout
|
||||
| default('[]')
|
||||
| from_json
|
||||
| selectattr('comment','equalto', mailu_user_key ~ " - ansible.infinito")
|
||||
| list
|
||||
).0 | default(None) }}
|
||||
|
||||
- name: "Delete existing API token for '{{ mailu_user_key }};{{ mailu_user_name }}' if local token missing but remote exists"
|
||||
command: >-
|
||||
{{ docker_compose_command_exec }} -T admin \
|
||||
@@ -29,7 +6,6 @@
|
||||
args:
|
||||
chdir: "{{ MAILU_DOCKER_DIR }}"
|
||||
when:
|
||||
- users[mailu_user_key].mailu_token is not defined
|
||||
- mailu_user_existing_token is not none
|
||||
- mailu_user_existing_token.id is defined
|
||||
register: mailu_token_delete
|
||||
@@ -43,13 +19,12 @@
|
||||
-H "Authorization: Bearer {{ MAILU_API_TOKEN }}"
|
||||
-H "Content-Type: application/json"
|
||||
-d '{{ {
|
||||
"comment": mailu_user_key ~ " - ansible.infinito",
|
||||
"comment": mailu_token_name,
|
||||
"email": users[mailu_user_key].email,
|
||||
"ip": mailu_token_ip
|
||||
} | to_json }}'
|
||||
args:
|
||||
chdir: "{{ MAILU_DOCKER_DIR }}"
|
||||
when: users[mailu_user_key].mailu_token is not defined
|
||||
register: mailu_token_creation
|
||||
# If curl sees 4xx/5xx it returns non-zero due to -f → fail the task.
|
||||
failed_when:
|
||||
@@ -57,7 +32,7 @@
|
||||
# Fallback: if some gateway returns 200 but embeds an error JSON.
|
||||
- mailu_token_creation.rc == 0 and
|
||||
(mailu_token_creation.stdout is search('"code"\\s*:\\s*4\\d\\d') or
|
||||
mailu_token_creation.stdout is search('cannot be found'))
|
||||
mailu_token_creation.stdout is search('cannot be found'))
|
||||
# Only mark changed when a token is actually present in the JSON.
|
||||
changed_when: mailu_token_creation.stdout is search('"token"\\s*:')
|
||||
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||
@@ -66,14 +41,25 @@
|
||||
set_fact:
|
||||
users: >-
|
||||
{{ users
|
||||
| combine({
|
||||
mailu_user_key: (
|
||||
users[mailu_user_key]
|
||||
| combine({
|
||||
'mailu_token': (mailu_token_creation.stdout | from_json).token
|
||||
})
|
||||
)
|
||||
}, recursive=True)
|
||||
| combine({
|
||||
mailu_user_key: (
|
||||
users[mailu_user_key]
|
||||
| combine({
|
||||
'mailu_token': (mailu_token_creation.stdout | from_json).token
|
||||
})
|
||||
)
|
||||
}, recursive=True)
|
||||
}}
|
||||
when: users[mailu_user_key].mailu_token is not defined
|
||||
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||
|
||||
- name: "Reset MSMTP Configuration if No-Reply User Token changed"
|
||||
when: users['no-reply'].username == mailu_user_name
|
||||
block:
|
||||
- name: "Set MSMTP run-once fact false"
|
||||
set_fact:
|
||||
run_once_sys_svc_msmtp: false
|
||||
changed_when: false
|
||||
|
||||
- name: Reload MSMTP role
|
||||
include_role:
|
||||
name: "sys-svc-msmtp"
|
||||
@@ -1,5 +1,3 @@
|
||||
---
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_web_app_mailu is not defined
|
||||
19
roles/web-app-mastodon/tasks/01_wait.yml
Normal file
19
roles/web-app-mastodon/tasks/01_wait.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
- name: Check health status of '{{ item }}' container
|
||||
shell: |
|
||||
cid=$(docker compose ps -q {{ item }})
|
||||
docker inspect \
|
||||
--format '{{ "{{.State.Health.Status}}" }}' \
|
||||
$cid
|
||||
args:
|
||||
chdir: "{{ docker_compose.directories.instance }}"
|
||||
register: healthcheck
|
||||
retries: 60
|
||||
delay: 5
|
||||
until: healthcheck.stdout == "healthy"
|
||||
loop:
|
||||
- mastodon
|
||||
- streaming
|
||||
- sidekiq
|
||||
loop_control:
|
||||
label: "{{ item }}"
|
||||
changed_when: false
|
||||
9
roles/web-app-mastodon/tasks/02_cleanup.yml
Normal file
9
roles/web-app-mastodon/tasks/02_cleanup.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
# Cleanup routine for Mastodon
|
||||
# Removes cached remote media older than 14 days when MODE_CLEANUP is enabled.
|
||||
- name: "Cleanup Mastodon media cache older than 14 days"
|
||||
command:
|
||||
cmd: "docker exec -u root {{ MASTODON_CONTAINER }} bin/tootctl media remove --days=14"
|
||||
register: mastodon_cleanup
|
||||
changed_when: mastodon_cleanup.rc == 0
|
||||
failed_when: mastodon_cleanup.rc != 0
|
||||
@@ -1,6 +1,3 @@
|
||||
- name: "Execute migration for '{{ application_id }}'"
|
||||
command:
|
||||
cmd: "docker exec {{ MASTODON_CONTAINER }} bundle exec rails db:migrate"
|
||||
|
||||
- name: "Include administrator routines for '{{ application_id }}'"
|
||||
include_tasks: 02_administrator.yml
|
||||
@@ -1,26 +1,5 @@
|
||||
# Routines to create the administrator account
|
||||
# @see https://chatgpt.com/share/67b9b12c-064c-800f-9354-8e42e6459764
|
||||
|
||||
- name: Check health status of '{{ item }}' container
|
||||
shell: |
|
||||
cid=$(docker compose ps -q {{ item }})
|
||||
docker inspect \
|
||||
--format '{{ "{{.State.Health.Status}}" }}' \
|
||||
$cid
|
||||
args:
|
||||
chdir: "{{ docker_compose.directories.instance }}"
|
||||
register: healthcheck
|
||||
retries: 60
|
||||
delay: 5
|
||||
until: healthcheck.stdout == "healthy"
|
||||
loop:
|
||||
- mastodon
|
||||
- streaming
|
||||
- sidekiq
|
||||
loop_control:
|
||||
label: "{{ item }}"
|
||||
changed_when: false
|
||||
|
||||
- name: Remove line containing "- administrator" from config/settings.yml to allow creating administrator account
|
||||
command:
|
||||
cmd: "docker exec -u root {{ MASTODON_CONTAINER }} sed -i '/- administrator/d' config/settings.yml"
|
||||
@@ -18,5 +18,15 @@
|
||||
vars:
|
||||
docker_compose_flush_handlers: true
|
||||
|
||||
- name: "Wait for Mastodon"
|
||||
include_tasks: 01_wait.yml
|
||||
|
||||
- name: "Cleanup Mastodon caches when MODE_CLEANUP is true"
|
||||
include_tasks: 02_cleanup.yml
|
||||
when: MODE_CLEANUP | bool
|
||||
|
||||
- name: "start setup procedures for mastodon"
|
||||
include_tasks: 01_setup.yml
|
||||
include_tasks: 03_setup.yml
|
||||
|
||||
- name: "Include administrator routines for '{{ application_id }}'"
|
||||
include_tasks: 04_administrator.yml
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
features:
|
||||
matomo: true
|
||||
css: true
|
||||
matomo: true
|
||||
css: true
|
||||
desktop: true
|
||||
logout: false
|
||||
logout: false
|
||||
server:
|
||||
csp:
|
||||
whitelist:
|
||||
@@ -16,14 +16,15 @@ server:
|
||||
font-src:
|
||||
- https://cdnjs.cloudflare.com
|
||||
frame-src:
|
||||
- "{{ WEB_PROTOCOL }}://*.{{ PRIMARY_DOMAIN }}" # Makes sense that all of the website content is available in the navigator
|
||||
# Makes sense that all of the website content is available in the navigator
|
||||
- "{{ WEB_PROTOCOL }}://*.{{ PRIMARY_DOMAIN }}"
|
||||
flags:
|
||||
style-src:
|
||||
unsafe-inline: true
|
||||
unsafe-inline: true
|
||||
script-src:
|
||||
unsafe-eval: true
|
||||
unsafe-eval: true
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
unsafe-inline: true
|
||||
domains:
|
||||
canonical:
|
||||
- "slides.{{ PRIMARY_DOMAIN }}"
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birkenbach"
|
||||
description: "An interactive presentation platform focused on guiding end-users through the practical use of the Infinito.Nexus software. Designed to demonstrate features, workflows, and real-world applications for Administrators, Developers, End-Users, Businesses, and Investors."
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
author: "Kevin Veen-Birkenbach"
|
||||
description: "An interactive presentation platform focused on guiding end-users through the practical use of the Infinito.Nexus software. Designed to demonstrate features, workflows, and real-world applications for Administrators, Developers, End-Users, Businesses, and Investors."
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birkenbach
|
||||
Consulting & Coaching Solutions
|
||||
|
||||
@@ -13,4 +13,3 @@
|
||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||
|
||||
{% include 'roles/docker-compose/templates/networks.yml.j2' %}
|
||||
|
||||
|
||||
@@ -91,7 +91,7 @@ docker:
|
||||
mem_reservation: "128m"
|
||||
mem_limit: "512m"
|
||||
pids_limit: 256
|
||||
enabled: "{{ applications | get_app_conf('web-app-nextcloud', 'features.oidc', False) }}" # Activate OIDC for Nextcloud
|
||||
enabled: "{{ applications | get_app_conf('web-app-nextcloud', 'features.oidc', False, True, True) }}" # Activate OIDC for Nextcloud
|
||||
# floavor decides which OICD plugin should be used.
|
||||
# Available options: oidc_login, sociallogin
|
||||
# @see https://apps.nextcloud.com/apps/oidc_login
|
||||
@@ -194,7 +194,7 @@ plugins:
|
||||
enabled: false
|
||||
fileslibreofficeedit:
|
||||
# Nextcloud LibreOffice integration: allows online editing of documents with LibreOffice (https://apps.nextcloud.com/apps/fileslibreofficeedit)
|
||||
enabled: "{{ not (applications | get_app_conf('web-app-nextcloud', 'plugins.richdocuments.enabled', False, True)) }}"
|
||||
enabled: "{{ not (applications | get_app_conf('web-app-nextcloud', 'plugins.richdocuments.enabled', False, True, True)) }}"
|
||||
forms:
|
||||
# Nextcloud forms: facilitates creation of forms and surveys (https://apps.nextcloud.com/apps/forms)
|
||||
enabled: true
|
||||
@@ -292,13 +292,13 @@ plugins:
|
||||
# enabled: false
|
||||
twofactor_nextcloud_notification:
|
||||
# Nextcloud two-factor notification: sends notifications for two-factor authentication events (https://apps.nextcloud.com/apps/twofactor_nextcloud_notification)
|
||||
enabled: "{{ not applications | get_app_conf('web-app-nextcloud', 'features.oidc', False, True) }}" # Deactivate 2FA if oidc is active
|
||||
enabled: "{{ not applications | get_app_conf('web-app-nextcloud', 'features.oidc', False, True, True) }}" # Deactivate 2FA if oidc is active
|
||||
twofactor_totp:
|
||||
# Nextcloud two-factor TOTP: provides time-based one-time password authentication (https://apps.nextcloud.com/apps/twofactor_totp)
|
||||
enabled: "{{ not applications | get_app_conf('web-app-nextcloud', 'features.oidc', False, True) }}" # Deactivate 2FA if oidc is active
|
||||
enabled: "{{ not applications | get_app_conf('web-app-nextcloud', 'features.oidc', False, True, True) }}" # Deactivate 2FA if oidc is active
|
||||
user_ldap:
|
||||
# Nextcloud user LDAP: integrates LDAP for user management and authentication (https://apps.nextcloud.com/apps/user_ldap)
|
||||
enabled: "{{ applications | get_app_conf('web-app-nextcloud', 'features.ldap', False, True) }}"
|
||||
enabled: "{{ applications | get_app_conf('web-app-nextcloud', 'features.ldap', False, True, True) }}"
|
||||
user_directory:
|
||||
enabled: true # Enables the LDAP User Directory Search
|
||||
user_oidc:
|
||||
|
||||
@@ -17,9 +17,13 @@ docker:
|
||||
database:
|
||||
enabled: false
|
||||
collabora:
|
||||
image: collabora/code
|
||||
version: latest
|
||||
name: collabora
|
||||
image: collabora/code
|
||||
version: latest
|
||||
name: collabora
|
||||
cpus: 2
|
||||
mem_reservation: 1g
|
||||
mem_limit: 2g
|
||||
pids_limit: 2048
|
||||
features:
|
||||
logout: false
|
||||
desktop: true # Just set to allow the iframe to load it
|
||||
|
||||
@@ -4,6 +4,15 @@
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
image: "{{ COLLABORA_IMAGE }}:{{ COLLABORA_VERSION }}"
|
||||
container_name: {{ COLLABORA_CONTAINER }}
|
||||
security_opt:
|
||||
- seccomp=unconfined
|
||||
- apparmor=unconfined
|
||||
cap_add:
|
||||
- MKNOD
|
||||
- SYS_CHROOT
|
||||
- SETUID
|
||||
- SETGID
|
||||
- FOWNER
|
||||
ports:
|
||||
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:{{ container_port }}"
|
||||
{% include 'roles/docker-container/templates/healthcheck/curl.yml.j2' %}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
features:
|
||||
matomo: true
|
||||
css: true
|
||||
desktop: true
|
||||
javascript: false
|
||||
logout: false
|
||||
matomo: true
|
||||
css: true
|
||||
desktop: true
|
||||
javascript: false
|
||||
logout: false
|
||||
server:
|
||||
domains:
|
||||
canonical:
|
||||
@@ -19,10 +19,11 @@ server:
|
||||
connect-src:
|
||||
- "{{ WEB_PROTOCOL }}://*.{{ PRIMARY_DOMAIN }}"
|
||||
- "{{ WEB_PROTOCOL }}://{{ PRIMARY_DOMAIN }}"
|
||||
- "https://cdn.jsdelivr.net"
|
||||
script-src-elem:
|
||||
- https://cdn.jsdelivr.net
|
||||
- "https://cdn.jsdelivr.net"
|
||||
style-src-elem:
|
||||
- https://cdn.jsdelivr.net
|
||||
- "https://cdn.jsdelivr.net"
|
||||
frame-ancestors:
|
||||
- "{{ WEB_PROTOCOL }}://<< defaults_applications[web-app-keycloak].server.domains.canonical[0] >>"
|
||||
|
||||
|
||||
@@ -21,11 +21,6 @@
|
||||
- name: "load docker, proxy for '{{ application_id }}'"
|
||||
include_role:
|
||||
name: sys-stk-full-stateless
|
||||
vars:
|
||||
aca_origin: "'{{ domains | get_url('web-svc-logout', WEB_PROTOCOL) }}' always"
|
||||
aca_credentials: "'true' always"
|
||||
aca_methods: "'GET, OPTIONS' always"
|
||||
aca_headers: "'Accept, Authorization' always"
|
||||
|
||||
- name: Create symbolic link from .env file to repository
|
||||
file:
|
||||
|
||||
@@ -8,7 +8,11 @@ location = /logout {
|
||||
proxy_http_version 1.1;
|
||||
|
||||
{# CORS headers – allow your central page to call this #}
|
||||
{% include 'roles/sys-svc-proxy/templates/headers/access_control_allow.conf.j2' %}
|
||||
{%- set aca_origin = "'{{ domains | get_url('web-svc-logout', WEB_PROTOCOL) }}' always" -%}
|
||||
{%- set aca_credentials = "'true' always" -%}
|
||||
{%- set aca_methods = "'GET, OPTIONS' always" -%}
|
||||
{%- set aca_headers = "'Accept, Authorization' always" -%}
|
||||
{%- include 'roles/sys-svc-proxy/templates/headers/access_control_allow.conf.j2' -%}
|
||||
|
||||
{# Disable caching absolutely #}
|
||||
add_header Cache-Control "no-store, no-cache, must-revalidate, proxy-revalidate, max-age=0" always;
|
||||
|
||||
@@ -16,6 +16,10 @@
|
||||
users: "{{ default_users | combine(users| default({}), recursive=True) }}"
|
||||
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||
|
||||
- name: Merge networks definitions
|
||||
set_fact:
|
||||
networks: "{{ defaults_networks | combine(networks | default({}, true), recursive=True) }}"
|
||||
|
||||
- name: Merge application definitions
|
||||
set_fact:
|
||||
applications: "{{ defaults_applications | merge_with_defaults(applications | default({}, true)) }}"
|
||||
@@ -92,10 +96,6 @@
|
||||
)) |
|
||||
generate_all_domains(WWW_REDIRECT_ENABLED | bool)
|
||||
}}
|
||||
|
||||
- name: Merge networks definitions
|
||||
set_fact:
|
||||
networks: "{{ defaults_networks | combine(networks | default({}, true), recursive=True) }}"
|
||||
|
||||
- name: Merge OIDC configuration
|
||||
set_fact:
|
||||
@@ -120,6 +120,10 @@
|
||||
name: update-compose
|
||||
when: MODE_UPDATE | bool
|
||||
|
||||
- name: "Ensure correct timezone is '{{ HOST_TIMEZONE }}'"
|
||||
ansible.builtin.timezone:
|
||||
name: "{{ HOST_TIMEZONE }}"
|
||||
|
||||
- name: "Load base roles"
|
||||
include_tasks: "./tasks/groups/{{ item }}-roles.yml"
|
||||
loop:
|
||||
@@ -128,6 +132,7 @@
|
||||
- svc-net # 3. Load network roles
|
||||
- svc-db # 4. Load database roles
|
||||
- svc-prx # 5. Load proxy roles
|
||||
- svc-ai # 6. Load ai roles
|
||||
- svc-ai # 6. Load AI roles
|
||||
- svc-bkp # 7. Load Backup Roles
|
||||
loop_control:
|
||||
label: "{{ item }}-roles.yml"
|
||||
|
||||
@@ -28,14 +28,17 @@ BUILTIN_FILTERS: Set[str] = {
|
||||
"int", "join", "last", "length", "list", "lower", "map", "min", "max", "random",
|
||||
"reject", "rejectattr", "replace", "reverse", "round", "safe", "select",
|
||||
"selectattr", "slice", "sort", "string", "striptags", "sum", "title", "trim",
|
||||
"truncate", "unique", "upper", "urlencode", "urlize", "wordcount", "xmlattr",
|
||||
"truncate", "unique", "upper", "urlencode", "urlize", "wordcount", "xmlattr","contains",
|
||||
|
||||
# Common Ansible filters (subset, extend as needed)
|
||||
"b64decode", "b64encode", "basename", "dirname", "from_json", "to_json",
|
||||
"from_yaml", "to_yaml", "combine", "difference", "intersect",
|
||||
"flatten", "zip", "regex_search", "regex_replace", "bool",
|
||||
"type_debug", "json_query", "mandatory", "hash", "checksum",
|
||||
"lower", "upper", "capitalize", "unique", "dict2items", "items2dict", "password_hash", "path_join", "product", "quote", "split", "ternary", "to_nice_yaml", "tojson",
|
||||
"lower", "upper", "capitalize", "unique", "dict2items", "items2dict",
|
||||
"password_hash", "path_join", "product", "quote", "split", "ternary", "to_nice_yaml",
|
||||
"tojson", "to_nice_json",
|
||||
|
||||
|
||||
# Date/time-ish
|
||||
"strftime",
|
||||
|
||||
125
tests/unit/module_utils/test_config_utils.py
Normal file
125
tests/unit/module_utils/test_config_utils.py
Normal file
@@ -0,0 +1,125 @@
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
from module_utils.config_utils import (
|
||||
get_app_conf,
|
||||
AppConfigKeyError,
|
||||
ConfigEntryNotSetError,
|
||||
)
|
||||
|
||||
class TestGetAppConf(unittest.TestCase):
|
||||
def setUp(self):
|
||||
# Isolate working directory so that schema files can be discovered
|
||||
self._cwd = os.getcwd()
|
||||
self.tmpdir = tempfile.mkdtemp(prefix="cfgutilstest_")
|
||||
os.chdir(self.tmpdir)
|
||||
|
||||
# Minimal schema structure:
|
||||
# roles/web-app-demo/schema/main.yml
|
||||
os.makedirs(os.path.join("roles", "web-app-demo", "schema"), exist_ok=True)
|
||||
with open(os.path.join("roles", "web-app-demo", "schema", "main.yml"), "w") as f:
|
||||
f.write(
|
||||
# Defines 'features.defined_but_unset' in schema (without a value in applications),
|
||||
# plus 'features.oidc' and 'features.nested.list'
|
||||
"features:\n"
|
||||
" oidc: {}\n"
|
||||
" defined_but_unset: {}\n"
|
||||
" nested:\n"
|
||||
" list:\n"
|
||||
" - {}\n"
|
||||
)
|
||||
|
||||
# Example configuration with actual values
|
||||
self.applications = {
|
||||
"web-app-demo": {
|
||||
"features": {
|
||||
"oidc": True,
|
||||
"nested": {
|
||||
"list": ["first", "second"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def tearDown(self):
|
||||
os.chdir(self._cwd)
|
||||
shutil.rmtree(self.tmpdir, ignore_errors=True)
|
||||
|
||||
# --- Tests ---
|
||||
|
||||
def test_missing_app_with_skip_missing_app_returns_default_true(self):
|
||||
"""If app ID is missing and skip_missing_app=True, it should return the default (True)."""
|
||||
apps = {"some-other-app": {}}
|
||||
val = get_app_conf(apps, "web-app-nextcloud", "features.oidc",
|
||||
strict=True, default=True, skip_missing_app=True)
|
||||
self.assertTrue(val)
|
||||
|
||||
def test_missing_app_with_skip_missing_app_returns_default_false(self):
|
||||
"""If app ID is missing and skip_missing_app=True, it should return the default (False)."""
|
||||
apps = {"svc-bkp-rmt-2-loc": {}}
|
||||
val = get_app_conf(apps, "web-app-nextcloud", "features.oidc",
|
||||
strict=True, default=False, skip_missing_app=True)
|
||||
self.assertFalse(val)
|
||||
|
||||
def test_missing_app_without_skip_missing_app_and_strict_true_raises(self):
|
||||
"""Missing app ID without skip_missing_app and strict=True should raise."""
|
||||
apps = {}
|
||||
with self.assertRaises(AppConfigKeyError):
|
||||
get_app_conf(apps, "web-app-nextcloud", "features.oidc",
|
||||
strict=True, default=True, skip_missing_app=False)
|
||||
|
||||
def test_missing_app_without_skip_missing_app_and_strict_false_raises(self):
|
||||
apps = {}
|
||||
with self.assertRaises(AppConfigKeyError):
|
||||
get_app_conf(apps, "web-app-nextcloud", "features.oidc",
|
||||
strict=False, default=True, skip_missing_app=False)
|
||||
|
||||
def test_existing_app_returns_expected_value(self):
|
||||
"""Existing app and key should return the configured value."""
|
||||
val = get_app_conf(self.applications, "web-app-demo", "features.oidc",
|
||||
strict=True, default=False, skip_missing_app=False)
|
||||
self.assertTrue(val)
|
||||
|
||||
def test_nested_list_index_access(self):
|
||||
"""Accessing list indices should work correctly."""
|
||||
val0 = get_app_conf(self.applications, "web-app-demo", "features.nested.list[0]",
|
||||
strict=True, default=None, skip_missing_app=False)
|
||||
val1 = get_app_conf(self.applications, "web-app-demo", "features.nested.list[1]",
|
||||
strict=True, default=None, skip_missing_app=False)
|
||||
self.assertEqual(val0, "first")
|
||||
self.assertEqual(val1, "second")
|
||||
|
||||
def test_schema_defined_but_unset_raises_in_strict_mode(self):
|
||||
"""Schema-defined but unset value should raise in strict mode."""
|
||||
with self.assertRaises(ConfigEntryNotSetError):
|
||||
get_app_conf(self.applications, "web-app-demo", "features.defined_but_unset",
|
||||
strict=True, default=False, skip_missing_app=False)
|
||||
|
||||
def test_schema_defined_but_unset_strict_false_returns_default(self):
|
||||
"""Schema-defined but unset value should return default when strict=False."""
|
||||
val = get_app_conf(self.applications, "web-app-demo", "features.defined_but_unset",
|
||||
strict=False, default=True, skip_missing_app=False)
|
||||
self.assertTrue(val)
|
||||
|
||||
def test_invalid_key_format_raises(self):
|
||||
"""Invalid key format in path should raise AppConfigKeyError."""
|
||||
with self.assertRaises(AppConfigKeyError):
|
||||
get_app_conf(self.applications, "web-app-demo", "features.nested.list[not-an-int]",
|
||||
strict=True, default=None, skip_missing_app=False)
|
||||
|
||||
def test_index_out_of_range_respects_strict(self):
|
||||
"""Out-of-range index should respect strict parameter."""
|
||||
# strict=False returns default
|
||||
val = get_app_conf(self.applications, "web-app-demo", "features.nested.list[99]",
|
||||
strict=False, default="fallback", skip_missing_app=False)
|
||||
self.assertEqual(val, "fallback")
|
||||
# strict=True raises
|
||||
with self.assertRaises(AppConfigKeyError):
|
||||
get_app_conf(self.applications, "web-app-demo", "features.nested.list[99]",
|
||||
strict=True, default=None, skip_missing_app=False)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
0
tests/unit/roles/svc-bkp-rmt-2-loc/__init__.py
Normal file
0
tests/unit/roles/svc-bkp-rmt-2-loc/__init__.py
Normal file
@@ -0,0 +1,122 @@
|
||||
import unittest
|
||||
import sys
|
||||
import types
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch, MagicMock
|
||||
import subprocess
|
||||
import time
|
||||
import os
|
||||
|
||||
|
||||
def load_module():
|
||||
"""
|
||||
Dynamically load the target script:
|
||||
roles/svc-bkp-rmt-2-loc/files/pull-specific-host.py
|
||||
relative to this test file.
|
||||
"""
|
||||
here = Path(__file__).resolve()
|
||||
# tests/unit/roles/svc-bkp-rmt-2-loc/files -> up 5 levels to repo root
|
||||
repo_root = here.parents[5]
|
||||
target_path = repo_root / "roles" / "svc-bkp-rmt-2-loc" / "files" / "pull-specific-host.py"
|
||||
if not target_path.exists():
|
||||
raise FileNotFoundError(f"Cannot find script at {target_path}")
|
||||
spec = types.ModuleType("pull_specific_host_module")
|
||||
code = target_path.read_text(encoding="utf-8")
|
||||
exec(compile(code, str(target_path), "exec"), spec.__dict__)
|
||||
return spec
|
||||
|
||||
|
||||
class PullSpecificHostTests(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.mod = load_module()
|
||||
self.hash64 = "a" * 64
|
||||
self.host = "1.2.3.4"
|
||||
self.remote = f"backup@{self.host}"
|
||||
self.base = f"/Backups/{self.hash64}/"
|
||||
self.backup_type = "backup-docker-to-local"
|
||||
self.type_dir = f"{self.base}{self.backup_type}/"
|
||||
self.last_local = f"{self.type_dir}20250101000000"
|
||||
self.last_remote = f"{self.type_dir}20250202000000"
|
||||
|
||||
def _completed(self, stdout="", returncode=0):
|
||||
return subprocess.CompletedProcess(args="mock", returncode=returncode, stdout=stdout, stderr="")
|
||||
|
||||
def _run_side_effect_success(self, command, capture_output=True, shell=True, text=True, check=False):
|
||||
cmd = command if isinstance(command, str) else " ".join(command)
|
||||
if cmd.startswith(f'ssh "{self.remote}" sha256sum /etc/machine-id'):
|
||||
return self._completed(stdout=f"{self.hash64} /etc/machine-id\n")
|
||||
if cmd.startswith(f'ssh "{self.remote}" "find {self.base} -maxdepth 1 -type d -execdir basename {{}} ;"'):
|
||||
return self._completed(stdout=f"{self.hash64}\n{self.backup_type}\n")
|
||||
if cmd.startswith(f"ls -d {self.type_dir}* | tail -1"):
|
||||
return self._completed(stdout=self.last_local)
|
||||
if cmd.startswith(f'ssh "{self.remote}" "ls -d {self.type_dir}*'):
|
||||
return self._completed(stdout=f"{self.last_remote}\n")
|
||||
return self._completed(stdout="")
|
||||
|
||||
def _run_side_effect_find_fail(self, command, capture_output=True, shell=True, text=True, check=False):
|
||||
cmd = command if isinstance(command, str) else " ".join(command)
|
||||
if cmd.startswith(f'ssh "backup@{self.host}" "find {self.base} -maxdepth 1 -type d -execdir basename {{}} ;"'):
|
||||
raise subprocess.CalledProcessError(returncode=1, cmd=cmd, output="", stderr="find: error")
|
||||
if cmd.startswith(f'ssh "backup@{self.host}" sha256sum /etc/machine-id'):
|
||||
return self._completed(stdout=f"{self.hash64} /etc/machine-id\n")
|
||||
return self._completed(stdout="")
|
||||
|
||||
def _run_side_effect_no_types(self, command, capture_output=True, shell=True, text=True, check=False):
|
||||
cmd = command if isinstance(command, str) else " ".join(command)
|
||||
if cmd.startswith(f'ssh "{self.remote}" sha256sum /etc/machine-id'):
|
||||
return self._completed(stdout=f"{self.hash64} /etc/machine-id\n")
|
||||
if cmd.startswith(f'ssh "{self.remote}" "find {self.base} -maxdepth 1 -type d -execdir basename {{}} ;"'):
|
||||
return self._completed(stdout="")
|
||||
return self._completed(stdout="")
|
||||
|
||||
@patch("time.sleep", new=lambda *a, **k: None)
|
||||
@patch.object(os, "makedirs")
|
||||
@patch.object(os, "system")
|
||||
@patch.object(subprocess, "run")
|
||||
def test_success_rsync_zero_exit(self, mock_run, mock_system, _mkd):
|
||||
mock_run.side_effect = self._run_side_effect_success
|
||||
mock_system.return_value = 0
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
self.mod.pull_backups(self.host)
|
||||
self.assertEqual(cm.exception.code, 0)
|
||||
self.assertTrue(mock_system.called, "rsync (os.system) should be called")
|
||||
|
||||
@patch("time.sleep", new=lambda *a, **k: None)
|
||||
@patch.object(os, "makedirs")
|
||||
@patch.object(os, "system")
|
||||
@patch.object(subprocess, "run")
|
||||
def test_no_backup_types_exit_zero(self, mock_run, mock_system, _mkd):
|
||||
mock_run.side_effect = self._run_side_effect_no_types
|
||||
mock_system.return_value = 0
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
self.mod.pull_backups(self.host)
|
||||
self.assertEqual(cm.exception.code, 0)
|
||||
self.assertFalse(mock_system.called, "rsync should not be called when no types found")
|
||||
|
||||
@patch("time.sleep", new=lambda *a, **k: None)
|
||||
@patch.object(os, "makedirs")
|
||||
@patch.object(os, "system")
|
||||
@patch.object(subprocess, "run")
|
||||
def test_find_failure_exits_one(self, mock_run, mock_system, _mkd):
|
||||
mock_run.side_effect = self._run_side_effect_find_fail
|
||||
mock_system.return_value = 0
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
self.mod.pull_backups(self.host)
|
||||
self.assertEqual(cm.exception.code, 1)
|
||||
self.assertFalse(mock_system.called, "rsync should not be called when find fails")
|
||||
|
||||
@patch("time.sleep", new=lambda *a, **k: None)
|
||||
@patch.object(os, "makedirs")
|
||||
@patch.object(os, "system")
|
||||
@patch.object(subprocess, "run")
|
||||
def test_rsync_fails_after_retries_exit_nonzero(self, mock_run, mock_system, _mkd):
|
||||
mock_run.side_effect = self._run_side_effect_success
|
||||
mock_system.side_effect = [1] * 12 # 12 retries in the script
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
self.mod.pull_backups(self.host)
|
||||
self.assertEqual(cm.exception.code, 1)
|
||||
self.assertEqual(mock_system.call_count, 12, "rsync should have retried 12 times")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
Reference in New Issue
Block a user