mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-09-08 11:17:17 +02:00
Compare commits
62 Commits
9f734dff17
...
master
Author | SHA1 | Date | |
---|---|---|---|
445c94788e | |||
aac9704e8b | |||
a57a5f8828 | |||
90843726de | |||
d25da76117 | |||
d48a1b3c0a | |||
2839d2e1a4 | |||
00c99e58e9 | |||
904040589e | |||
9f3d300bca | |||
9e253a2d09 | |||
49120b0dcf | |||
b6f91ab9d3 | |||
77e8e7ed7e | |||
32bc17e0c3 | |||
e294637cb6 | |||
577767bed6 | |||
e77f8da510 | |||
4738b263ec | |||
0a588023a7 | |||
d2fa90774b | |||
0e72dcbe36 | |||
4f8ce598a9 | |||
3769e66d8d | |||
33a5fadf67 | |||
699a6b6f1e | |||
61c29eee60 | |||
d5204fb5c2 | |||
751615b1a4 | |||
e2993d2912 | |||
24b6647bfb | |||
d2dc2eab5f | |||
a1130e33d7 | |||
df122905eb | |||
d093a22d61 | |||
5e550ce3a3 | |||
0ada12e3ca | |||
1a5ce4a7fa | |||
a9abb3ce5d | |||
71ceb339fc | |||
61bba3d2ef | |||
0bde4295c7 | |||
8059f272d5 | |||
7c814e6e83 | |||
d760c042c2 | |||
6cac8085a8 | |||
3a83f3d14e | |||
61d852c508 | |||
188b098503 | |||
bc56940e55 | |||
5dfc2efb5a | |||
7f9dc65b37 | |||
163a925096 | |||
a8c88634b5 | |||
ce3fe1cd51 | |||
7ca8b7c71d | |||
110381e80c | |||
b02d88adc0 | |||
b7065837df | |||
c98a2378c4 | |||
4ae3cee36c | |||
b834f0c95c |
@@ -11,7 +11,7 @@ sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')
|
||||
from module_utils.entity_name_utils import get_entity_name
|
||||
|
||||
# Paths to the group-vars files
|
||||
PORTS_FILE = './group_vars/all/09_ports.yml'
|
||||
PORTS_FILE = './group_vars/all/10_ports.yml'
|
||||
NETWORKS_FILE = './group_vars/all/09_networks.yml'
|
||||
ROLE_TEMPLATE_DIR = './templates/roles/web-app'
|
||||
ROLES_DIR = './roles'
|
||||
|
@@ -15,7 +15,7 @@ Follow these guides to install and configure Infinito.Nexus:
|
||||
- **Networking & VPN** - Configure `WireGuard`, `OpenVPN`, and `Nginx Reverse Proxy`.
|
||||
|
||||
## Managing & Updating Infinito.Nexus 🔄
|
||||
- Regularly update services using `update-docker`, `update-pacman`, or `update-apt`.
|
||||
- Regularly update services using `update-pacman`, or `update-apt`.
|
||||
- Monitor system health with `sys-ctl-hlth-btrfs`, `sys-ctl-hlth-webserver`, and `sys-ctl-hlth-docker-container`.
|
||||
- Automate system maintenance with `sys-lock`, `sys-ctl-cln-bkps`, and `sys-ctl-rpr-docker-hard`.
|
||||
|
||||
|
21
filter_plugins/volume_path.py
Normal file
21
filter_plugins/volume_path.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
def docker_volume_path(volume_name: str) -> str:
|
||||
"""
|
||||
Returns the absolute filesystem path of a Docker volume.
|
||||
|
||||
Example:
|
||||
"akaunting_data" -> "/var/lib/docker/volumes/akaunting_data/_data/"
|
||||
"""
|
||||
if not volume_name or not isinstance(volume_name, str):
|
||||
raise AnsibleFilterError(f"Invalid volume name: {volume_name}")
|
||||
|
||||
return f"/var/lib/docker/volumes/{volume_name}/_data/"
|
||||
|
||||
class FilterModule(object):
|
||||
"""Docker volume path filters."""
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
"docker_volume_path": docker_volume_path,
|
||||
}
|
@@ -12,7 +12,6 @@ SYS_SERVICE_BACKUP_RMT_2_LOC: "{{ 'svc-bkp-rmt-2-loc' | get_se
|
||||
SYS_SERVICE_BACKUP_DOCKER_2_LOC: "{{ 'sys-ctl-bkp-docker-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_REPAIR_DOCKER_SOFT: "{{ 'sys-ctl-rpr-docker-soft' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_REPAIR_DOCKER_HARD: "{{ 'sys-ctl-rpr-docker-hard' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_UPDATE_DOCKER: "{{ 'update-docker' | get_service_name(SOFTWARE_NAME) }}"
|
||||
|
||||
## On Failure
|
||||
SYS_SERVICE_ON_FAILURE_COMPOSE: "{{ ('sys-ctl-alm-compose@') | get_service_name(SOFTWARE_NAME, False) }}%n.service"
|
||||
@@ -46,8 +45,7 @@ SYS_SERVICE_GROUP_MANIPULATION: >
|
||||
SYS_SERVICE_GROUP_CLEANUP +
|
||||
SYS_SERVICE_GROUP_REPAIR +
|
||||
SYS_SERVICE_GROUP_OPTIMIZATION +
|
||||
SYS_SERVICE_GROUP_MAINTANANCE +
|
||||
[ SYS_SERVICE_UPDATE_DOCKER ]
|
||||
SYS_SERVICE_GROUP_MAINTANANCE
|
||||
) | sort
|
||||
}}
|
||||
|
||||
|
@@ -37,7 +37,6 @@ SYS_SCHEDULE_CLEANUP_FAILED_BACKUPS: "*-*-* 12:00:00"
|
||||
|
||||
### Schedule for repair services
|
||||
SYS_SCHEDULE_REPAIR_BTRFS_AUTO_BALANCER: "Sat *-*-01..07 00:00:00" # Execute btrfs auto balancer every first Saturday of a month
|
||||
SYS_SCHEDULE_REPAIR_DOCKER_SOFT: "*-*-* {{ HOURS_SERVER_AWAKE }}:30:00" # Heal unhealthy docker instances once per hour
|
||||
SYS_SCHEDULE_REPAIR_DOCKER_HARD: "Sun *-*-* 08:00:00" # Restart docker instances every Sunday at 8:00 AM
|
||||
|
||||
### Schedule for backup tasks
|
||||
|
@@ -10,8 +10,8 @@ defaults_networks:
|
||||
# /28 Networks, 14 Usable Ip Addresses
|
||||
web-app-akaunting:
|
||||
subnet: 192.168.101.0/28
|
||||
# Free:
|
||||
# subnet: 192.168.101.16/28
|
||||
web-app-confluence:
|
||||
subnet: 192.168.101.16/28
|
||||
web-app-baserow:
|
||||
subnet: 192.168.101.32/28
|
||||
web-app-mobilizon:
|
||||
@@ -34,8 +34,8 @@ defaults_networks:
|
||||
subnet: 192.168.101.176/28
|
||||
web-app-listmonk:
|
||||
subnet: 192.168.101.192/28
|
||||
# Free:
|
||||
# subnet: 192.168.101.208/28
|
||||
web-app-jira:
|
||||
subnet: 192.168.101.208/28
|
||||
web-app-matomo:
|
||||
subnet: 192.168.101.224/28
|
||||
web-app-mastodon:
|
||||
@@ -48,8 +48,8 @@ defaults_networks:
|
||||
subnet: 192.168.102.16/28
|
||||
web-app-moodle:
|
||||
subnet: 192.168.102.32/28
|
||||
# Free:
|
||||
# subnet: 192.168.102.48/28
|
||||
web-app-bookwyrm:
|
||||
subnet: 192.168.102.48/28
|
||||
web-app-nextcloud:
|
||||
subnet: 192.168.102.64/28
|
||||
web-app-openproject:
|
||||
@@ -96,6 +96,12 @@ defaults_networks:
|
||||
subnet: 192.168.103.160/28
|
||||
web-svc-logout:
|
||||
subnet: 192.168.103.176/28
|
||||
web-app-chess:
|
||||
subnet: 192.168.103.192/28
|
||||
web-app-magento:
|
||||
subnet: 192.168.103.208/28
|
||||
web-app-bridgy-fed:
|
||||
subnet: 192.168.103.224/28
|
||||
|
||||
# /24 Networks / 254 Usable Clients
|
||||
web-app-bigbluebutton:
|
||||
|
@@ -2,12 +2,12 @@ ports:
|
||||
# Ports which are exposed to localhost
|
||||
localhost:
|
||||
database:
|
||||
svc-db-postgres: 5432
|
||||
svc-db-mariadb: 3306
|
||||
svc-db-postgres: 5432
|
||||
svc-db-mariadb: 3306
|
||||
# https://developer.mozilla.org/de/docs/Web/API/WebSockets_API
|
||||
websocket:
|
||||
web-app-mastodon: 4001
|
||||
web-app-espocrm: 4002
|
||||
web-app-mastodon: 4001
|
||||
web-app-espocrm: 4002
|
||||
oauth2_proxy:
|
||||
web-app-phpmyadmin: 4181
|
||||
web-app-lam: 4182
|
||||
@@ -26,7 +26,7 @@ ports:
|
||||
web-app-gitea: 8002
|
||||
web-app-wordpress: 8003
|
||||
web-app-mediawiki: 8004
|
||||
# Free: 8005
|
||||
web-app-confluence: 8005
|
||||
web-app-yourls: 8006
|
||||
web-app-mailu: 8007
|
||||
web-app-elk: 8008
|
||||
@@ -36,7 +36,7 @@ ports:
|
||||
web-app-funkwhale: 8012
|
||||
web-app-roulette-wheel: 8013
|
||||
web-app-joomla: 8014
|
||||
# Free: 8015
|
||||
web-app-jira: 8015
|
||||
web-app-pgadmin: 8016
|
||||
web-app-baserow: 8017
|
||||
web-app-matomo: 8018
|
||||
@@ -70,6 +70,11 @@ ports:
|
||||
web-app-pretix: 8046
|
||||
web-app-mig: 8047
|
||||
web-svc-logout: 8048
|
||||
web-app-bookwyrm: 8049
|
||||
web-app-chess: 8050
|
||||
web-app-bluesky_view: 8051
|
||||
web-app-magento: 8052
|
||||
web-app-bridgy-fed: 8053
|
||||
web-app-bigbluebutton: 48087 # This port is predefined by bbb. @todo Try to change this to a 8XXX port
|
||||
public:
|
||||
# The following ports should be changed to 22 on the subdomain via stream mapping
|
||||
@@ -80,9 +85,10 @@ ports:
|
||||
svc-db-openldap: 636
|
||||
stun:
|
||||
web-app-bigbluebutton: 3478 # Not sure if it's right placed here or if it should be moved to localhost section
|
||||
web-app-nextcloud: 3479
|
||||
# Occupied by BBB: 3479
|
||||
web-app-nextcloud: 3480
|
||||
turn:
|
||||
web-app-bigbluebutton: 5349 # Not sure if it's right placed here or if it should be moved to localhost section
|
||||
web-app-nextcloud: 5350 # Not used yet
|
||||
web-app-nextcloud: 5350 # Not used yet
|
||||
federation:
|
||||
web-app-matrix_synapse: 8448
|
@@ -111,16 +111,6 @@ roles:
|
||||
description: "Developer-centric server utilities and admin toolkits."
|
||||
icon: "fas fa-code"
|
||||
invokable: false
|
||||
srv:
|
||||
title: "Server"
|
||||
description: "General server roles for provisioning and managing server infrastructure—covering web servers, proxy servers, network services, and other backend components."
|
||||
icon: "fas fa-server"
|
||||
invokable: false
|
||||
proxy:
|
||||
title: "Proxy Server"
|
||||
description: "Proxy-server roles for virtual-host orchestration and reverse-proxy setups."
|
||||
icon: "fas fa-project-diagram"
|
||||
invokable: false
|
||||
web:
|
||||
title: "Web Infrastructure"
|
||||
description: "Roles for managing web infrastructure—covering static content services and deployable web applications."
|
||||
|
@@ -1,4 +0,0 @@
|
||||
---
|
||||
- name: reload virtualbox kernel modules
|
||||
become: true
|
||||
command: vboxreload
|
@@ -20,7 +20,7 @@ To offer a centralized, extensible system for managing containerized application
|
||||
- **Reset Logic:** Cleans previous Compose project files and data when `MODE_RESET` is enabled.
|
||||
- **Handlers for Runtime Control:** Automatically builds, sets up, or restarts containers based on handlers.
|
||||
- **Template-ready Service Files:** Predefined service base and health check templates.
|
||||
- **Integration Support:** Compatible with `srv-proxy-core` and other Infinito.Nexus service roles.
|
||||
- **Integration Support:** Compatible with `sys-svc-proxy` and other Infinito.Nexus service roles.
|
||||
|
||||
## Administration Tips
|
||||
|
||||
|
@@ -15,10 +15,17 @@
|
||||
- name: docker compose pull
|
||||
shell: |
|
||||
set -euo pipefail
|
||||
lock="{{ [ PATH_DOCKER_COMPOSE_PULL_LOCK_DIR, docker_compose.directories.instance ] | path_join | hash('sha1') }}"
|
||||
lock="{{ [ PATH_DOCKER_COMPOSE_PULL_LOCK_DIR, (docker_compose.directories.instance | hash('sha1')) ~ '.lock' ] | path_join }}"
|
||||
if [ ! -e "$lock" ]; then
|
||||
mkdir -p "$(dirname "$lock")"
|
||||
docker compose pull
|
||||
if docker compose config | grep -qE '^[[:space:]]+build:'; then
|
||||
docker compose build --pull
|
||||
fi
|
||||
if docker compose pull --help 2>/dev/null | grep -q -- '--ignore-buildable'; then
|
||||
docker compose pull --ignore-buildable
|
||||
else
|
||||
docker compose pull || true
|
||||
fi
|
||||
: > "$lock"
|
||||
echo "pulled"
|
||||
fi
|
||||
|
@@ -5,7 +5,9 @@
|
||||
loop:
|
||||
- "{{ application_id | abs_role_path_by_application_id }}/templates/Dockerfile.j2"
|
||||
- "{{ application_id | abs_role_path_by_application_id }}/files/Dockerfile"
|
||||
notify: docker compose up
|
||||
notify:
|
||||
- docker compose up
|
||||
- docker compose build
|
||||
register: create_dockerfile_result
|
||||
failed_when:
|
||||
- create_dockerfile_result is failed
|
||||
|
@@ -3,7 +3,7 @@
|
||||
- "CMD"
|
||||
- "curl"
|
||||
- "-f"
|
||||
{% if container_hostname %}
|
||||
{% if container_hostname is defined %}
|
||||
- "-H"
|
||||
- "Host: {{ container_hostname }}"
|
||||
{% endif %}
|
||||
|
7
roles/docker-container/templates/healthcheck/nc.yml.j2
Normal file
7
roles/docker-container/templates/healthcheck/nc.yml.j2
Normal file
@@ -0,0 +1,7 @@
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "nc -z localhost {{ container_port }} || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 3s
|
||||
retries: 3
|
||||
start_period: 10s
|
||||
{{ "\n" }}
|
@@ -43,3 +43,7 @@
|
||||
chdir: "{{ PKGMGR_INSTALL_PATH }}"
|
||||
executable: /bin/bash
|
||||
become: true
|
||||
|
||||
- name: "Update all repositories with pkgmgr"
|
||||
command: "pkgmgr pull --all"
|
||||
when: MODE_UPDATE | bool
|
@@ -1,4 +0,0 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_srv_letsencrypt is not defined
|
@@ -1,6 +0,0 @@
|
||||
|
||||
- name: "reload svc-bkp-loc-2-usb service"
|
||||
systemd:
|
||||
name: "{{ 'svc-bkp-loc-2-usb' | get_service_name(SOFTWARE_NAME) }}"
|
||||
state: reloaded
|
||||
daemon_reload: yes
|
@@ -1,55 +0,0 @@
|
||||
- name: Load memberof module from file in OpenLDAP container
|
||||
shell: >
|
||||
docker exec -i {{ openldap_name }} ldapmodify -Y EXTERNAL -H ldapi:/// -f {{ openldap_ldif_docker_path }}configuration/01_member_of_configuration.ldif
|
||||
listen:
|
||||
- "Import configuration LDIF files"
|
||||
- "Import all LDIF files"
|
||||
# @todo Remove the following ignore errors when setting up a new server
|
||||
# Just here because debugging would take to much time
|
||||
ignore_errors: true
|
||||
|
||||
- name: Refint Module Activation for OpenLDAP
|
||||
shell: >
|
||||
docker exec -i {{ openldap_name }} ldapadd -Y EXTERNAL -H ldapi:/// -f {{ openldap_ldif_docker_path }}configuration/02_member_of_configuration.ldif
|
||||
listen:
|
||||
- "Import configuration LDIF files"
|
||||
- "Import all LDIF files"
|
||||
register: ldapadd_result
|
||||
failed_when: ldapadd_result.rc not in [0, 68]
|
||||
# @todo Remove the following ignore errors when setting up a new server
|
||||
# Just here because debugging would take to much time
|
||||
ignore_errors: true
|
||||
|
||||
- name: "Import schemas"
|
||||
shell: >
|
||||
docker exec -i {{ openldap_name }} ldapadd -Y EXTERNAL -H ldapi:/// -f "{{ openldap_ldif_docker_path }}schema/{{ item | basename | regex_replace('\.j2$', '') }}"
|
||||
register: ldapadd_result
|
||||
changed_when: "'adding new entry' in ldapadd_result.stdout"
|
||||
failed_when: ldapadd_result.rc not in [0, 80]
|
||||
listen:
|
||||
- "Import schema LDIF files"
|
||||
- "Import all LDIF files"
|
||||
loop: "{{ lookup('fileglob', role_path ~ '/templates/ldif/schema/*.j2', wantlist=True) }}"
|
||||
|
||||
- name: Refint Overlay Configuration for OpenLDAP
|
||||
shell: >
|
||||
docker exec -i {{ openldap_name }} ldapmodify -Y EXTERNAL -H ldapi:/// -f {{ openldap_ldif_docker_path }}configuration/03_member_of_configuration.ldif
|
||||
listen:
|
||||
- "Import configuration LDIF files"
|
||||
- "Import all LDIF files"
|
||||
register: ldapadd_result
|
||||
failed_when: ldapadd_result.rc not in [0, 68]
|
||||
# @todo Remove the following ignore errors when setting up a new server
|
||||
# Just here because debugging would take to much time
|
||||
ignore_errors: true
|
||||
|
||||
- name: "Import users, groups, etc. to LDAP"
|
||||
shell: >
|
||||
docker exec -i {{ openldap_name }} ldapadd -x -D "{{LDAP.DN.ADMINISTRATOR.DATA}}" -w "{{ LDAP.BIND_CREDENTIAL }}" -c -f "{{ openldap_ldif_docker_path }}groups/{{ item | basename | regex_replace('\.j2$', '') }}"
|
||||
register: ldapadd_result
|
||||
changed_when: "'adding new entry' in ldapadd_result.stdout"
|
||||
failed_when: ldapadd_result.rc not in [0, 20, 68, 65]
|
||||
listen:
|
||||
- "Import groups LDIF files"
|
||||
- "Import all LDIF files"
|
||||
loop: "{{ query('fileglob', role_path ~ '/templates/ldif/groups/*.j2') | sort }}"
|
@@ -37,7 +37,7 @@
|
||||
- name: "Reset LDAP Credentials"
|
||||
include_tasks: 01_credentials.yml
|
||||
when:
|
||||
- applications | get_app_conf(application_id, 'network.local', True)
|
||||
- applications | get_app_conf(application_id, 'network.local')
|
||||
- applications | get_app_conf(application_id, 'provisioning.credentials', True)
|
||||
|
||||
- name: "create directory {{openldap_ldif_host_path}}{{item}}"
|
||||
|
@@ -2,5 +2,5 @@ server {
|
||||
listen {{ ports.public.ldaps['svc-db-openldap'] }}ssl;
|
||||
proxy_pass 127.0.0.1:{{ ports.localhost.ldap['svc-db-openldap'] }};
|
||||
|
||||
{% include 'roles/srv-letsencrypt/templates/ssl_credentials.j2' %}
|
||||
{% include 'roles/sys-svc-letsencrypt/templates/ssl_credentials.j2' %}
|
||||
}
|
||||
|
@@ -21,4 +21,4 @@ openldap_version: "{{ applications | get_app_conf(application_id,
|
||||
openldap_volume: "{{ applications | get_app_conf(application_id, 'docker.volumes.data', True) }}"
|
||||
openldap_network: "{{ applications | get_app_conf(application_id, 'docker.network', True) }}"
|
||||
|
||||
openldap_network_expose_local: "{{ applications | get_app_conf(application_id, 'network.public', True) | bool or applications | get_app_conf(application_id, 'network.local', True) | bool }}"
|
||||
openldap_network_expose_local: "{{ applications | get_app_conf(application_id, 'network.public', True) | bool or applications | get_app_conf(application_id, 'network.local') | bool }}"
|
@@ -8,4 +8,3 @@ database_type: ""
|
||||
OPENRESTY_IMAGE: "openresty/openresty"
|
||||
OPENRESTY_VERSION: "alpine"
|
||||
OPENRESTY_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.openresty.name', True) }}"
|
||||
|
||||
|
@@ -3,9 +3,14 @@
|
||||
name: sys-ctl-alm-compose
|
||||
when: run_once_sys_ctl_alm_compose is not defined
|
||||
|
||||
- name: Include dependency 'sys-ctl-rpr-docker-soft'
|
||||
include_role:
|
||||
name: sys-ctl-rpr-docker-soft
|
||||
when: run_once_sys_ctl_rpr_docker_soft is not defined
|
||||
|
||||
- include_role:
|
||||
name: sys-service
|
||||
vars:
|
||||
system_service_timer_enabled: true
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_HEALTH_DOCKER_CONTAINER }}"
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_timer_enabled: true
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_HEALTH_DOCKER_CONTAINER }}"
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }} {{ SYS_SERVICE_REPAIR_DOCKER_SOFT }}"
|
||||
|
@@ -2,7 +2,7 @@
|
||||
include_role:
|
||||
name: sys-ctl-alm-compose
|
||||
when: run_once_sys_ctl_alm_compose is not defined
|
||||
|
||||
|
||||
- include_role:
|
||||
name: sys-service
|
||||
vars:
|
||||
|
@@ -43,7 +43,7 @@ for filename in os.listdir(config_path):
|
||||
url = f"{{ WEB_PROTOCOL }}://{domain}"
|
||||
|
||||
redirected_domains = [domain['source'] for domain in {{ redirect_domain_mappings }}]
|
||||
redirected_domains.append("{{domains | get_domain('web-app-mailu')}}")
|
||||
redirected_domains.append("{{domains | get_domain('web-app-mailu') }}")
|
||||
|
||||
expected_statuses = get_expected_statuses(domain, parts, redirected_domains)
|
||||
|
||||
|
@@ -3,7 +3,7 @@
|
||||
name: '{{ item }}'
|
||||
loop:
|
||||
- sys-svc-certbot
|
||||
- srv-core
|
||||
- sys-svc-webserver
|
||||
- sys-ctl-alm-compose
|
||||
|
||||
- name: install certbot
|
||||
|
@@ -1,15 +1,26 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Restart Docker-Compose configurations with exited or unhealthy containers.
|
||||
This version receives the *manipulation services* via argparse (no Jinja).
|
||||
|
||||
STRICT mode: Resolve the Compose project exclusively via Docker labels
|
||||
(com.docker.compose.project and com.docker.compose.project.working_dir).
|
||||
No container-name fallback. If labels are missing or Docker is unavailable,
|
||||
the script records an error for that container.
|
||||
|
||||
All shell interactions that matter for tests go through print_bash()
|
||||
so they can be monkeypatched in unit tests.
|
||||
"""
|
||||
import subprocess
|
||||
import time
|
||||
import os
|
||||
import argparse
|
||||
from typing import List
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
|
||||
# ---------------------------
|
||||
# Shell helpers
|
||||
# ---------------------------
|
||||
|
||||
def bash(command: str) -> List[str]:
|
||||
print(command)
|
||||
process = subprocess.Popen(
|
||||
@@ -30,31 +41,45 @@ def list_to_string(lst: List[str]) -> str:
|
||||
|
||||
|
||||
def print_bash(command: str) -> List[str]:
|
||||
"""
|
||||
Wrapper around bash() that echoes combined output for easier debugging
|
||||
and can be monkeypatched in tests.
|
||||
"""
|
||||
output = bash(command)
|
||||
if output:
|
||||
print(list_to_string(output))
|
||||
return output
|
||||
|
||||
|
||||
def find_docker_compose_file(directory: str) -> str | None:
|
||||
# ---------------------------
|
||||
# Filesystem / compose helpers
|
||||
# ---------------------------
|
||||
|
||||
def find_docker_compose_file(directory: str) -> Optional[str]:
|
||||
"""
|
||||
Search for docker-compose.yml beneath a directory.
|
||||
"""
|
||||
for root, _, files in os.walk(directory):
|
||||
if "docker-compose.yml" in files:
|
||||
return os.path.join(root, "docker-compose.yml")
|
||||
return None
|
||||
|
||||
|
||||
def detect_env_file(project_path: str) -> str | None:
|
||||
def detect_env_file(project_path: str) -> Optional[str]:
|
||||
"""
|
||||
Return the path to a Compose env file if present (.env preferred, fallback to env).
|
||||
Return the path to a Compose env file if present (.env preferred, fallback to .env/env).
|
||||
"""
|
||||
candidates = [os.path.join(project_path, ".env"), os.path.join(project_path, ".env", "env")]
|
||||
candidates = [
|
||||
os.path.join(project_path, ".env"),
|
||||
os.path.join(project_path, ".env", "env"),
|
||||
]
|
||||
for candidate in candidates:
|
||||
if os.path.isfile(candidate):
|
||||
return candidate
|
||||
return None
|
||||
|
||||
|
||||
def compose_cmd(subcmd: str, project_path: str, project_name: str | None = None) -> str:
|
||||
def compose_cmd(subcmd: str, project_path: str, project_name: Optional[str] = None) -> str:
|
||||
"""
|
||||
Build a docker-compose command string with optional -p and --env-file if present.
|
||||
Example: compose_cmd("restart", "/opt/docker/foo", "foo")
|
||||
@@ -69,6 +94,10 @@ def compose_cmd(subcmd: str, project_path: str, project_name: str | None = None)
|
||||
return " ".join(parts)
|
||||
|
||||
|
||||
# ---------------------------
|
||||
# Business logic
|
||||
# ---------------------------
|
||||
|
||||
def normalize_services_arg(raw: List[str] | None, raw_str: str | None) -> List[str]:
|
||||
"""
|
||||
Accept either:
|
||||
@@ -78,7 +107,6 @@ def normalize_services_arg(raw: List[str] | None, raw_str: str | None) -> List[s
|
||||
if raw:
|
||||
return [s for s in raw if s.strip()]
|
||||
if raw_str:
|
||||
# split on comma or whitespace
|
||||
parts = [p.strip() for chunk in raw_str.split(",") for p in chunk.split()]
|
||||
return [p for p in parts if p]
|
||||
return []
|
||||
@@ -87,7 +115,7 @@ def normalize_services_arg(raw: List[str] | None, raw_str: str | None) -> List[s
|
||||
def wait_while_manipulation_running(
|
||||
services: List[str],
|
||||
waiting_time: int = 600,
|
||||
timeout: int | None = None,
|
||||
timeout: Optional[int] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Wait until none of the given services are active anymore.
|
||||
@@ -107,7 +135,6 @@ def wait_while_manipulation_running(
|
||||
break
|
||||
|
||||
if any_active:
|
||||
# Check timeout
|
||||
elapsed = time.time() - start
|
||||
if timeout and elapsed >= timeout:
|
||||
print(f"Timeout ({timeout}s) reached while waiting for services. Continuing anyway.")
|
||||
@@ -119,7 +146,30 @@ def wait_while_manipulation_running(
|
||||
break
|
||||
|
||||
|
||||
def main(base_directory: str, manipulation_services: List[str], timeout: int | None) -> int:
|
||||
def get_compose_project_info(container: str) -> Tuple[str, str]:
|
||||
"""
|
||||
Resolve project name and working dir from Docker labels.
|
||||
STRICT: Raises RuntimeError if labels are missing/unreadable.
|
||||
"""
|
||||
out_project = print_bash(
|
||||
f"docker inspect -f '{{{{ index .Config.Labels \"com.docker.compose.project\" }}}}' {container}"
|
||||
)
|
||||
out_workdir = print_bash(
|
||||
f"docker inspect -f '{{{{ index .Config.Labels \"com.docker.compose.project.working_dir\" }}}}' {container}"
|
||||
)
|
||||
|
||||
project = out_project[0].strip() if out_project else ""
|
||||
workdir = out_workdir[0].strip() if out_workdir else ""
|
||||
|
||||
if not project:
|
||||
raise RuntimeError(f"No compose project label found for container {container}")
|
||||
if not workdir:
|
||||
raise RuntimeError(f"No compose working_dir label found for container {container}")
|
||||
|
||||
return project, workdir
|
||||
|
||||
|
||||
def main(base_directory: str, manipulation_services: List[str], timeout: Optional[int]) -> int:
|
||||
errors = 0
|
||||
wait_while_manipulation_running(manipulation_services, waiting_time=600, timeout=timeout)
|
||||
|
||||
@@ -131,43 +181,50 @@ def main(base_directory: str, manipulation_services: List[str], timeout: int | N
|
||||
)
|
||||
failed_containers = unhealthy_container_names + exited_container_names
|
||||
|
||||
unfiltered_failed_docker_compose_repositories = [
|
||||
container.split("-")[0] for container in failed_containers
|
||||
]
|
||||
filtered_failed_docker_compose_repositories = list(
|
||||
dict.fromkeys(unfiltered_failed_docker_compose_repositories)
|
||||
)
|
||||
for container in failed_containers:
|
||||
try:
|
||||
project, workdir = get_compose_project_info(container)
|
||||
except Exception as e:
|
||||
print(f"Error reading compose labels for {container}: {e}")
|
||||
errors += 1
|
||||
continue
|
||||
|
||||
for repo in filtered_failed_docker_compose_repositories:
|
||||
compose_file_path = find_docker_compose_file(os.path.join(base_directory, repo))
|
||||
compose_file_path = os.path.join(workdir, "docker-compose.yml")
|
||||
if not os.path.isfile(compose_file_path):
|
||||
# As STRICT: we only trust labels; if file not there, error out.
|
||||
print(f"Error: docker-compose.yml not found at {compose_file_path} for container {container}")
|
||||
errors += 1
|
||||
continue
|
||||
|
||||
if compose_file_path:
|
||||
project_path = os.path.dirname(compose_file_path)
|
||||
try:
|
||||
print("Restarting unhealthy container in:", compose_file_path)
|
||||
project_path = os.path.dirname(compose_file_path)
|
||||
try:
|
||||
# restart with optional --env-file and -p
|
||||
print_bash(compose_cmd("restart", project_path, repo))
|
||||
except Exception as e:
|
||||
if "port is already allocated" in str(e):
|
||||
print("Detected port allocation problem. Executing recovery steps...")
|
||||
# down (no -p needed), then engine restart, then up -d with -p
|
||||
print_bash(compose_cmd("restart", project_path, project))
|
||||
except Exception as e:
|
||||
if "port is already allocated" in str(e):
|
||||
print("Detected port allocation problem. Executing recovery steps...")
|
||||
try:
|
||||
print_bash(compose_cmd("down", project_path))
|
||||
print_bash("systemctl restart docker")
|
||||
print_bash(compose_cmd("up -d", project_path, repo))
|
||||
else:
|
||||
print("Unhandled exception during restart:", e)
|
||||
print_bash(compose_cmd("up -d", project_path, project))
|
||||
except Exception as e2:
|
||||
print("Unhandled exception during recovery:", e2)
|
||||
errors += 1
|
||||
else:
|
||||
print("Error: Docker Compose file not found for:", repo)
|
||||
errors += 1
|
||||
else:
|
||||
print("Unhandled exception during restart:", e)
|
||||
errors += 1
|
||||
|
||||
print("Finished restart procedure.")
|
||||
return errors
|
||||
|
||||
|
||||
# ---------------------------
|
||||
# CLI
|
||||
# ---------------------------
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Restart Docker-Compose configurations with exited or unhealthy containers."
|
||||
description="Restart Docker-Compose configurations with exited or unhealthy containers (STRICT label mode)."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--manipulation",
|
||||
@@ -184,12 +241,12 @@ if __name__ == "__main__":
|
||||
"--timeout",
|
||||
type=int,
|
||||
default=60,
|
||||
help="Maximum time in seconds to wait for manipulation services before continuing.(Default 1min)",
|
||||
help="Maximum time in seconds to wait for manipulation services before continuing. (Default 1min)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"base_directory",
|
||||
type=str,
|
||||
help="Base directory where Docker Compose configurations are located.",
|
||||
help="(Unused in STRICT mode) Base directory where Docker Compose configurations are located.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
services = normalize_services_arg(args.manipulation, args.manipulation_string)
|
||||
|
@@ -6,8 +6,6 @@
|
||||
- include_role:
|
||||
name: sys-service
|
||||
vars:
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_REPAIR_DOCKER_SOFT }}"
|
||||
system_service_timer_enabled: true
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_tpl_exec_start_pre: "/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(' ') }} --ignore {{ SYS_SERVICE_GROUP_CLEANUP| join(' ') }} {{ SYS_SERVICE_REPAIR_DOCKER_SOFT }} --timeout '{{ SYS_TIMEOUT_DOCKER_RPR_SOFT }}'"
|
||||
system_service_tpl_exec_start: >
|
||||
|
@@ -41,9 +41,9 @@
|
||||
when: inj_enabled.logout
|
||||
|
||||
- block:
|
||||
- name: Include dependency 'srv-core'
|
||||
- name: Include dependency 'sys-svc-webserver'
|
||||
include_role:
|
||||
name: srv-core
|
||||
when: run_once_srv_core is not defined
|
||||
name: sys-svc-webserver
|
||||
when: run_once_sys_svc_webserver is not defined
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_front_inj_all is not defined
|
@@ -1,7 +1,7 @@
|
||||
- name: Include dependency 'srv-core'
|
||||
- name: Include dependency 'sys-svc-webserver'
|
||||
include_role:
|
||||
name: srv-core
|
||||
when: run_once_srv_core is not defined
|
||||
name: sys-svc-webserver
|
||||
when: run_once_sys_svc_webserver is not defined
|
||||
|
||||
- name: Generate color palette with colorscheme-generator
|
||||
set_fact:
|
||||
|
@@ -1,8 +1,8 @@
|
||||
- block:
|
||||
- name: Include dependency 'srv-core'
|
||||
- name: Include dependency 'sys-svc-webserver'
|
||||
include_role:
|
||||
name: srv-core
|
||||
when: run_once_srv_core is not defined
|
||||
name: sys-svc-webserver
|
||||
when: run_once_sys_svc_webserver is not defined
|
||||
- include_tasks: 01_deploy.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_front_inj_desktop is not defined
|
||||
|
@@ -1,9 +1,9 @@
|
||||
- block:
|
||||
|
||||
- name: Include dependency 'srv-core'
|
||||
- name: Include dependency 'sys-svc-webserver'
|
||||
include_role:
|
||||
name: srv-core
|
||||
when: run_once_srv_core is not defined
|
||||
name: sys-svc-webserver
|
||||
when: run_once_sys_svc_webserver is not defined
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_front_inj_javascript is not defined
|
||||
|
||||
|
@@ -1,8 +1,8 @@
|
||||
- name: Include dependency 'srv-core'
|
||||
- name: Include dependency 'sys-svc-webserver'
|
||||
include_role:
|
||||
name: srv-core
|
||||
name: sys-svc-webserver
|
||||
when:
|
||||
- run_once_srv_core is not defined
|
||||
- run_once_sys_svc_webserver is not defined
|
||||
|
||||
- name: "deploy the logout.js"
|
||||
include_tasks: "02_deploy.yml"
|
@@ -1,8 +1,8 @@
|
||||
- block:
|
||||
- name: Include dependency 'srv-core'
|
||||
- name: Include dependency 'sys-svc-webserver'
|
||||
include_role:
|
||||
name: srv-core
|
||||
when: run_once_srv_core is not defined
|
||||
name: sys-svc-webserver
|
||||
when: run_once_sys_svc_webserver is not defined
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_front_inj_matomo is not defined
|
||||
|
||||
|
@@ -10,7 +10,7 @@ A higher-level orchestration wrapper, *sys-stk-front-proxy* ties together severa
|
||||
|
||||
1. **`sys-front-inj-all`** – applies global tweaks and includes.
|
||||
2. **`sys-svc-certs`** – obtains Let’s Encrypt certificates.
|
||||
3. **Domain template deployment** – copies a Jinja2 vHost from *srv-proxy-core*.
|
||||
3. **Domain template deployment** – copies a Jinja2 vHost from *sys-svc-proxy*.
|
||||
4. **`web-app-oauth2-proxy`** *(optional)* – protects the site with OAuth2.
|
||||
|
||||
The result is a complete, reproducible domain rollout in a single playbook task.
|
||||
|
@@ -2,4 +2,4 @@
|
||||
vhost_flavour: "basic" # valid: basic, ws_generic
|
||||
|
||||
# build the full template path from the flavour
|
||||
vhost_template_src: "roles/srv-proxy-core/templates/vhost/{{ vhost_flavour }}.conf.j2"
|
||||
vhost_template_src: "roles/sys-svc-proxy/templates/vhost/{{ vhost_flavour }}.conf.j2"
|
@@ -1,8 +1,8 @@
|
||||
- block:
|
||||
- name: Include dependency 'srv-proxy-core'
|
||||
- name: Include dependency 'sys-svc-proxy'
|
||||
include_role:
|
||||
name: srv-proxy-core
|
||||
when: run_once_srv_proxy_core is not defined
|
||||
name: sys-svc-proxy
|
||||
when: run_once_sys_svc_proxy is not defined
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_stk_front_proxy is not defined
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
- name: "include role for {{ domain }} to receive certificates and do the modification routines"
|
||||
include_role:
|
||||
name: srv-composer
|
||||
name: sys-util-csp-cert
|
||||
|
||||
- name: "Copy nginx config to {{ configuration_destination }}"
|
||||
template:
|
||||
|
@@ -1 +1 @@
|
||||
configuration_destination: "{{ NGINX.DIRECTORIES.HTTP.SERVERS }}{{ domain }}.conf"
|
||||
configuration_destination: "{{ [ NGINX.DIRECTORIES.HTTP.SERVERS, domain ~ '.conf'] | path_join }}"
|
@@ -7,7 +7,7 @@ The **sys-stk-front-pure** role extends a basic Nginx installation by wiring in
|
||||
2. Pulls in Let’s Encrypt ACME challenge handling.
|
||||
3. Applies global cleanup of unused domain configs.
|
||||
|
||||
This role is built on top of your existing `srv-core` role, and it automates the end-to-end process of turning HTTP sites into secure HTTPS sites.
|
||||
This role is built on top of your existing `sys-svc-webserver` role, and it automates the end-to-end process of turning HTTP sites into secure HTTPS sites.
|
||||
|
||||
---
|
||||
|
||||
@@ -15,9 +15,9 @@ This role is built on top of your existing `srv-core` role, and it automates the
|
||||
|
||||
When you apply **sys-stk-front-pure**, it will:
|
||||
|
||||
1. **Include** the `srv-core` role to install and configure Nginx.
|
||||
1. **Include** the `sys-svc-webserver` role to install and configure Nginx.
|
||||
2. **Clean up** any stale vHost files under `sys-svc-cln-domains`.
|
||||
3. **Deploy** the Let’s Encrypt challenge-and-redirect snippet from `srv-letsencrypt`.
|
||||
3. **Deploy** the Let’s Encrypt challenge-and-redirect snippet from `sys-svc-letsencrypt`.
|
||||
4. **Reload** Nginx automatically when any template changes.
|
||||
|
||||
All tasks are idempotent—once your certificates are in place and your configuration is set, Ansible will skip unchanged steps on subsequent runs.
|
||||
@@ -42,7 +42,7 @@ All tasks are idempotent—once your certificates are in place and your configur
|
||||
|
||||
## Requirements
|
||||
|
||||
- A working `srv-core` setup.
|
||||
- A working `sys-svc-webserver` setup.
|
||||
- DNS managed via Cloudflare (for CAA record tasks) or equivalent ACME DNS flow.
|
||||
- Variables:
|
||||
- `LETSENCRYPT_WEBROOT_PATH`
|
||||
|
@@ -3,8 +3,8 @@
|
||||
include_role:
|
||||
name: '{{ item }}'
|
||||
loop:
|
||||
- srv-core
|
||||
- sys-svc-webserver
|
||||
- sys-svc-cln-domains
|
||||
- srv-letsencrypt
|
||||
- sys-svc-letsencrypt
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_stk_front_pure is not defined
|
||||
|
@@ -3,7 +3,7 @@
|
||||
include_role:
|
||||
name: '{{ item }}'
|
||||
loop:
|
||||
- srv-core
|
||||
- sys-svc-webserver
|
||||
|
||||
- name: Include task to remove deprecated nginx configs
|
||||
include_tasks: remove_deprecated_nginx_configs.yml
|
||||
|
@@ -15,6 +15,6 @@
|
||||
|
||||
- name: Remove exact nginx config for {{ domain }}
|
||||
ansible.builtin.file:
|
||||
path: "{{ NGINX.DIRECTORIES.HTTP.SERVERS }}{{ domain }}.conf"
|
||||
path: "{{ [ NGINX.DIRECTORIES.HTTP.SERVERS, domain ~ '.conf'] | path_join }}"
|
||||
state: absent
|
||||
notify: restart openresty
|
@@ -17,14 +17,8 @@ When enabled via `MODE_CLEANUP` or `MODE_RESET`, it will automatically prune unu
|
||||
Installs Docker and Docker Compose via the system package manager.
|
||||
|
||||
- **Integrated Dependencies**
|
||||
Includes backup, repair, and health check sub-roles:
|
||||
- `sys-ctl-bkp-docker-2-loc`
|
||||
- `user-administrator`
|
||||
- `sys-ctl-hlth-docker-container`
|
||||
- `sys-ctl-hlth-docker-volumes`
|
||||
- `sys-ctl-rpr-docker-soft`
|
||||
- `sys-ctl-rpr-docker-hard`
|
||||
|
||||
Includes backup, repair, and health check sub-roles
|
||||
|
||||
- **Cleanup & Reset Modes**
|
||||
- `MODE_CLEANUP`: Removes unused Docker containers, networks, images, and volumes.
|
||||
- `MODE_RESET`: Performs cleanup and restarts the Docker service.
|
||||
|
@@ -21,6 +21,5 @@
|
||||
- sys-ctl-bkp-docker-2-loc
|
||||
- sys-ctl-hlth-docker-container
|
||||
- sys-ctl-hlth-docker-volumes
|
||||
- sys-ctl-rpr-docker-soft
|
||||
- sys-ctl-rpr-docker-hard
|
||||
when: SYS_SVC_DOCKER_LOAD_SERVICES | bool
|
@@ -1,5 +1,4 @@
|
||||
---
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_srv_core is not defined
|
||||
when: run_once_sys_svc_letsencrypt is not defined
|
@@ -12,4 +12,4 @@ ssl_session_tickets on;
|
||||
add_header Strict-Transport-Security max-age=15768000;
|
||||
ssl_stapling on;
|
||||
ssl_stapling_verify on;
|
||||
{% include 'roles/srv-letsencrypt/templates/ssl_credentials.j2' %}
|
||||
{% include 'roles/sys-svc-letsencrypt/templates/ssl_credentials.j2' %}
|
@@ -4,6 +4,6 @@
|
||||
name: '{{ item }}'
|
||||
loop:
|
||||
- sys-stk-front-pure
|
||||
- srv-core
|
||||
- sys-svc-webserver
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_srv_proxy_core is not defined
|
||||
when: run_once_sys_svc_proxy is not defined
|
@@ -1,6 +1,6 @@
|
||||
# Nginx Location Templates
|
||||
|
||||
This directory contains Jinja2 templates for different Nginx `location` blocks, each designed to proxy and optimize different types of web traffic. These templates are used by the `srv-proxy-core` role to modularize and standardize reverse proxy configuration across a wide variety of applications.
|
||||
This directory contains Jinja2 templates for different Nginx `location` blocks, each designed to proxy and optimize different types of web traffic. These templates are used by the `sys-svc-proxy` role to modularize and standardize reverse proxy configuration across a wide variety of applications.
|
||||
|
||||
---
|
||||
|
@@ -15,7 +15,7 @@ location {{location}}
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Port {{ WEB_PORT }};
|
||||
|
||||
{% include 'roles/srv-proxy-core/templates/headers/content_security_policy.conf.j2' %}
|
||||
{% include 'roles/sys-svc-proxy/templates/headers/content_security_policy.conf.j2' %}
|
||||
|
||||
# WebSocket specific header
|
||||
proxy_http_version 1.1;
|
@@ -1,7 +1,7 @@
|
||||
server
|
||||
{
|
||||
server_name {{ domain }};
|
||||
{% include 'roles/srv-proxy-core/templates/headers/buffers.conf.j2' %}
|
||||
{% include 'roles/sys-svc-proxy/templates/headers/buffers.conf.j2' %}
|
||||
|
||||
{% if applications | get_app_conf(application_id, 'features.oauth2', False) %}
|
||||
{% include 'roles/web-app-oauth2-proxy/templates/endpoint.conf.j2'%}
|
||||
@@ -14,7 +14,7 @@ server
|
||||
{{ proxy_extra_configuration }}
|
||||
{% endif %}
|
||||
|
||||
{% include 'roles/srv-letsencrypt/templates/ssl_header.j2' %}
|
||||
{% include 'roles/sys-svc-letsencrypt/templates/ssl_header.j2' %}
|
||||
|
||||
{% if applications | get_app_conf(application_id, 'features.oauth2', False) %}
|
||||
{% set acl = applications | get_app_conf(application_id, 'oauth2_proxy.acl', False, {}) %}
|
||||
@@ -23,38 +23,38 @@ server
|
||||
{# 1. Expose everything by default, then protect blacklisted paths #}
|
||||
{% set oauth2_proxy_enabled = false %}
|
||||
{% set location = "/" %}
|
||||
{% include 'roles/srv-proxy-core/templates/location/html.conf.j2' %}
|
||||
{% include 'roles/sys-svc-proxy/templates/location/html.conf.j2' %}
|
||||
|
||||
{% for loc in acl.blacklist %}
|
||||
{% set oauth2_proxy_enabled = true %}
|
||||
{% set location = loc %}
|
||||
{% include 'roles/srv-proxy-core/templates/location/html.conf.j2' %}
|
||||
{% include 'roles/sys-svc-proxy/templates/location/html.conf.j2' %}
|
||||
{% endfor %}
|
||||
|
||||
{% elif acl.whitelist is defined %}
|
||||
{# 2. Protect everything by default, then expose whitelisted paths #}
|
||||
{% set oauth2_proxy_enabled = true %}
|
||||
{% set location = "/" %}
|
||||
{% include 'roles/srv-proxy-core/templates/location/html.conf.j2' %}
|
||||
{% include 'roles/sys-svc-proxy/templates/location/html.conf.j2' %}
|
||||
|
||||
{% for loc in acl.whitelist %}
|
||||
{% set oauth2_proxy_enabled = false %}
|
||||
{% set location = loc %}
|
||||
{% include 'roles/srv-proxy-core/templates/location/html.conf.j2' %}
|
||||
{% include 'roles/sys-svc-proxy/templates/location/html.conf.j2' %}
|
||||
{% endfor %}
|
||||
|
||||
{% else %}
|
||||
{# 3. OAuth2 enabled but no (or empty) ACL — protect all #}
|
||||
{% set oauth2_proxy_enabled = true %}
|
||||
{% set location = "/" %}
|
||||
{% include 'roles/srv-proxy-core/templates/location/html.conf.j2' %}
|
||||
{% include 'roles/sys-svc-proxy/templates/location/html.conf.j2' %}
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
{# 4. OAuth2 completely disabled — expose all #}
|
||||
{% set oauth2_proxy_enabled = false %}
|
||||
{% set location = "/" %}
|
||||
{% include 'roles/srv-proxy-core/templates/location/html.conf.j2' %}
|
||||
{% include 'roles/sys-svc-proxy/templates/location/html.conf.j2' %}
|
||||
{% endif %}
|
||||
|
||||
}
|
@@ -6,7 +6,7 @@ map $http_upgrade $connection_upgrade {
|
||||
server {
|
||||
server_name {{ domain }};
|
||||
|
||||
{% include 'roles/srv-letsencrypt/templates/ssl_header.j2' %}
|
||||
{% include 'roles/sys-svc-letsencrypt/templates/ssl_header.j2' %}
|
||||
|
||||
{% include 'roles/sys-front-inj-all/templates/server.conf.j2' %}
|
||||
|
||||
@@ -25,10 +25,10 @@ server {
|
||||
|
||||
add_header Strict-Transport-Security "max-age=31536000";
|
||||
|
||||
{% include 'roles/srv-proxy-core/templates/location/html.conf.j2' %}
|
||||
{% include 'roles/sys-svc-proxy/templates/location/html.conf.j2' %}
|
||||
|
||||
{% if location_ws is defined %}
|
||||
{% include 'roles/srv-proxy-core/templates/location/ws.conf.j2' %}
|
||||
{% include 'roles/sys-svc-proxy/templates/location/ws.conf.j2' %}
|
||||
{% endif %}
|
||||
|
||||
error_page 500 501 502 503 504 /500.html;
|
@@ -8,10 +8,10 @@
|
||||
path: "{{ docker_compose.directories.env }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
- name: "For '{{ application_id }}': Create {{database_env}}"
|
||||
- name: "For '{{ application_id }}': Create {{ database_env }}"
|
||||
template:
|
||||
src: "env/{{database_type}}.env.j2"
|
||||
dest: "{{database_env}}"
|
||||
src: "env/{{ database_type }}.env.j2"
|
||||
dest: "{{ database_env }}"
|
||||
notify: docker compose up
|
||||
when: not applications | get_app_conf(application_id, 'features.central_database', False)
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
# I don't know why this includes leads to that the application_id in vars/main.yml of the database role isn't used
|
||||
# This is the behaviour which I want, but I'm still wondering why ;)
|
||||
include_role:
|
||||
name: "svc-db-{{database_type}}"
|
||||
name: "svc-db-{{ database_type }}"
|
||||
when: applications | get_app_conf(application_id, 'features.central_database', False)
|
||||
|
||||
- name: "For '{{ application_id }}': Add Entry for Backup Procedure"
|
||||
|
@@ -5,10 +5,10 @@
|
||||
container_name: {{ application_id | get_entity_name }}-database
|
||||
logging:
|
||||
driver: journald
|
||||
image: mariadb
|
||||
image: {{ database_image }}:{{ database_version }}
|
||||
restart: {{ DOCKER_RESTART_POLICY }}
|
||||
env_file:
|
||||
- {{database_env}}
|
||||
- {{ database_env }}
|
||||
command: "--transaction-isolation=READ-COMMITTED --binlog-format=ROW"
|
||||
volumes:
|
||||
- database:/var/lib/mysql
|
||||
|
@@ -2,13 +2,13 @@
|
||||
|
||||
{% if not applications | get_app_conf(application_id, 'features.central_database', False) %}
|
||||
{{ database_host }}:
|
||||
image: postgres:{{applications['svc-db-postgres'].version}}-alpine
|
||||
image: {{ database_image }}:{{ database_version }}
|
||||
container_name: {{ application_id | get_entity_name }}-database
|
||||
env_file:
|
||||
- {{database_env}}
|
||||
- {{ database_env }}
|
||||
restart: {{ DOCKER_RESTART_POLICY }}
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U {{ database_name }}"]
|
||||
test: ["CMD-SHELL", "pg_isready -U {{ database_username }}"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 6
|
||||
|
@@ -1,20 +1,23 @@
|
||||
# Helper variables
|
||||
_dbtype: "{{ (database_type | d('') | trim) }}"
|
||||
_database_id: "{{ ('svc-db-' ~ _dbtype) if _dbtype else '' }}"
|
||||
_database_central_name: "{{ (applications | get_app_conf(_database_id, 'docker.services.' ~ _dbtype ~ '.name', False, '')) if _dbtype else '' }}"
|
||||
_database_consumer_id: "{{ database_application_id | d(application_id) }}"
|
||||
_database_consumer_entity_name: "{{ _database_consumer_id | get_entity_name }}"
|
||||
_database_central_enabled: "{{ (applications | get_app_conf(_database_consumer_id, 'features.central_database', False)) if _dbtype else False }}"
|
||||
_dbtype: "{{ (database_type | d('') | trim) }}"
|
||||
_database_id: "{{ ('svc-db-' ~ _dbtype) if _dbtype else '' }}"
|
||||
_database_central_name: "{{ (applications | get_app_conf(_database_id, 'docker.services.' ~ _dbtype ~ '.name', False, '')) if _dbtype else '' }}"
|
||||
_database_consumer_id: "{{ database_application_id | d(application_id) }}"
|
||||
_database_consumer_entity_name: "{{ _database_consumer_id | get_entity_name }}"
|
||||
_database_central_enabled: "{{ (applications | get_app_conf(_database_consumer_id, 'features.central_database', False)) if _dbtype else False }}"
|
||||
_database_default_version: "{{ applications | get_app_conf(_database_id, 'docker.services.' ~ _dbtype ~ '.version') }}"
|
||||
|
||||
# Definition
|
||||
|
||||
database_name: "{{ _database_consumer_entity_name }}"
|
||||
database_instance: "{{ _database_central_name if _database_central_enabled else database_name }}" # This could lead to bugs at dedicated database @todo cleanup
|
||||
database_host: "{{ _database_central_name if _database_central_enabled else 'database' }}" # This could lead to bugs at dedicated database @todo cleanup
|
||||
database_username: "{{ _database_consumer_entity_name }}"
|
||||
database_password: "{{ applications | get_app_conf(_database_consumer_id, 'credentials.database_password', true) }}"
|
||||
database_port: "{{ (ports.localhost.database[_database_id] | d('')) if _dbtype else '' }}"
|
||||
database_env: "{{ docker_compose.directories.env }}{{ database_type }}.env"
|
||||
database_url_jdbc: "jdbc:{{ database_type if database_type == 'mariadb' else 'postgresql' }}://{{ database_host }}:{{ database_port }}/{{ database_name }}"
|
||||
database_url_full: "{{ database_type }}://{{ database_username }}:{{ database_password }}@{{ database_host }}:{{ database_port }}/{{ database_name }}"
|
||||
database_volume: "{{ _database_consumer_entity_name ~ '_' if not _database_central_enabled }}{{ database_host }}"
|
||||
database_name: "{{ _database_consumer_entity_name }}"
|
||||
database_instance: "{{ _database_central_name if _database_central_enabled else database_name }}" # This could lead to bugs at dedicated database @todo cleanup
|
||||
database_host: "{{ _database_central_name if _database_central_enabled else 'database' }}" # This could lead to bugs at dedicated database @todo cleanup
|
||||
database_username: "{{ _database_consumer_entity_name }}"
|
||||
database_password: "{{ applications | get_app_conf(_database_consumer_id, 'credentials.database_password', true) }}"
|
||||
database_port: "{{ (ports.localhost.database[_database_id] | d('')) if _dbtype else '' }}"
|
||||
database_env: "{{ docker_compose.directories.env }}{{ database_type }}.env"
|
||||
database_url_jdbc: "jdbc:{{ database_type if database_type == 'mariadb' else 'postgresql' }}://{{ database_host }}:{{ database_port }}/{{ database_name }}"
|
||||
database_url_full: "{{ database_type }}://{{ database_username }}:{{ database_password }}@{{ database_host }}:{{ database_port }}/{{ database_name }}"
|
||||
database_volume: "{{ _database_consumer_entity_name ~ '_' if not _database_central_enabled }}{{ database_host }}"
|
||||
database_image: "{{ _dbtype }}"
|
||||
database_version: "{{ applications | get_app_conf( _database_consumer_id, 'docker.services.database.version', False, _database_default_version) }}"
|
||||
|
@@ -18,4 +18,4 @@ galaxy_info:
|
||||
- performance
|
||||
repository: "https://s.infinito.nexus/code"
|
||||
issue_tracker_url: "https://s.infinito.nexus/issues"
|
||||
documentation: "https://s.infinito.nexus/code/roles/srv-core"
|
||||
documentation: "https://s.infinito.nexus/code/roles/sys-svc-webserver"
|
@@ -49,3 +49,5 @@
|
||||
- sys-ctl-hlth-csp
|
||||
vars:
|
||||
flush_handlers: false
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
4
roles/sys-svc-webserver/tasks/main.yml
Normal file
4
roles/sys-svc-webserver/tasks/main.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_sys_svc_webserver is not defined
|
@@ -1,4 +1,4 @@
|
||||
# Role: srv-composer
|
||||
# Role: sys-util-csp-cert
|
||||
|
||||
This Ansible role composes and orchestrates all necessary HTTPS-layer tasks and HTML-content injections for your webserver domains. It integrates two key sub-roles into a unified workflow:
|
||||
|
@@ -27,4 +27,4 @@ galaxy_info:
|
||||
- orchestration
|
||||
repository: "https://s.infinito.nexus/code"
|
||||
issue_tracker_url: "https://s.infinito.nexus/issues"
|
||||
documentation: "https://s.infinito.nexus/code/roles/srv-composer"
|
||||
documentation: "https://s.infinito.nexus/code/roles/sys-util-csp-cert"
|
@@ -1,4 +1,4 @@
|
||||
# run_once_srv_composer: deactivated
|
||||
# run_once_sys_util_csp_cert: deactivated
|
||||
|
||||
- name: "include role sys-front-inj-all for '{{ domain }}'"
|
||||
include_role:
|
@@ -14,13 +14,6 @@
|
||||
name: update-apt
|
||||
when: ansible_distribution == "Debian"
|
||||
|
||||
- name: "Update Docker Images"
|
||||
include_role:
|
||||
name: update-docker
|
||||
when:
|
||||
- docker_compose_directory_stat.stat.exists
|
||||
- run_once_update_docker is not defined
|
||||
|
||||
- name: "Check if yay is installed"
|
||||
command: which yay
|
||||
register: yay_installed
|
||||
@@ -51,7 +44,3 @@
|
||||
register: pkgmgr_available
|
||||
failed_when: false
|
||||
|
||||
- name: "Update all repositories using pkgmgr"
|
||||
include_role:
|
||||
name: update-pkgmgr
|
||||
when: pkgmgr_available.rc == 0
|
||||
|
@@ -1,27 +0,0 @@
|
||||
# Update Docker
|
||||
|
||||
## Description
|
||||
|
||||
This role updates Docker Compose instances by checking for changes in Docker image digests and applying updates if necessary. It utilizes a Python script to handle git pulls and Docker image pulls, and rebuilds containers when changes are detected.
|
||||
|
||||
## Overview
|
||||
|
||||
The role performs the following:
|
||||
- Deploys a Python script to check for Docker image updates.
|
||||
- Configures a systemd service to run the update script.
|
||||
- Restarts the Docker update service upon configuration changes.
|
||||
- Supports additional procedures for specific Docker applications (e.g., Discourse, Mastodon, Nextcloud).
|
||||
|
||||
## Purpose
|
||||
|
||||
The role is designed to ensure that Docker images remain current by automatically detecting changes and rebuilding containers as needed. This helps maintain a secure and efficient container environment.
|
||||
|
||||
## Features
|
||||
|
||||
- **Docker Image Monitoring:** Checks for changes in image digests.
|
||||
- **Automated Updates:** Pulls new images and rebuilds containers when necessary.
|
||||
- **Service Management:** Configures and restarts a systemd service to handle updates.
|
||||
- **Application-Specific Procedures:** Includes hooks for updating specific Docker applications.
|
||||
|
||||
## Credits 📝
|
||||
It was created with the help of ChatGPT. The conversation is available [here](https://chat.openai.com/share/165418b8-25fa-433b-baca-caded941e22a)
|
@@ -1,27 +0,0 @@
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birkenbach"
|
||||
description: "Updates Docker Compose instances by detecting changes in Docker image digests and rebuilding containers when necessary. This role automates Docker image pulls and container rebuilds."
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birkenbach
|
||||
Consulting & Coaching Solutions
|
||||
https://www.veen.world
|
||||
min_ansible_version: "2.9"
|
||||
platforms:
|
||||
- name: Archlinux
|
||||
versions:
|
||||
- rolling
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- all
|
||||
galaxy_tags:
|
||||
- docker
|
||||
- update
|
||||
- compose
|
||||
- images
|
||||
- systemd
|
||||
- maintenance
|
||||
repository: "https://s.infinito.nexus/code"
|
||||
issue_tracker_url: "https://s.infinito.nexus/issues"
|
||||
documentation: "https://docs.infinito.nexus"
|
@@ -1,20 +0,0 @@
|
||||
- name: Include dependency 'sys-lock'
|
||||
include_role:
|
||||
name: sys-lock
|
||||
when: run_once_sys_lock is not defined
|
||||
|
||||
- name: "start {{ 'sys-ctl-bkp-docker-2-loc-everything' | get_service_name(SOFTWARE_NAME) }}"
|
||||
systemd:
|
||||
name: "{{ 'sys-ctl-bkp-docker-2-loc-everything' | get_service_name(SOFTWARE_NAME) }}"
|
||||
state: started
|
||||
when:
|
||||
- MODE_BACKUP | bool
|
||||
|
||||
- include_role:
|
||||
name: sys-service
|
||||
vars:
|
||||
system_service_restarted: true
|
||||
system_service_timer_enabled: false
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_tpl_exec_start: "{{ system_service_script_exec }} {{ PATH_DOCKER_COMPOSE_INSTANCES }}"
|
||||
system_service_tpl_exec_start_pre: "/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(' ') }} --ignore {{ SYS_SERVICE_GROUP_CLEANUP | join(' ') }} {{ 'update-docker' | get_service_name(SOFTWARE_NAME) }} --timeout '{{ SYS_TIMEOUT_DOCKER_UPDATE }}'"
|
@@ -1,4 +0,0 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_update_docker is not defined
|
@@ -1,217 +0,0 @@
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
def run_command(command):
|
||||
"""
|
||||
Executes the specified shell command, streaming and collecting its output in real-time.
|
||||
If the command exits with a non-zero status, a subprocess.CalledProcessError is raised,
|
||||
including the exit code, the executed command, and the full output (as bytes) for debugging purposes.
|
||||
"""
|
||||
process = None
|
||||
try:
|
||||
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
output = []
|
||||
|
||||
for line in iter(process.stdout.readline, b''):
|
||||
decoded_line = line.decode()
|
||||
output.append(decoded_line)
|
||||
sys.stdout.write(decoded_line)
|
||||
|
||||
return_code = process.wait()
|
||||
if return_code:
|
||||
full_output = ''.join(output)
|
||||
raise subprocess.CalledProcessError(return_code, command, output=full_output.encode())
|
||||
finally:
|
||||
if process and process.stdout:
|
||||
process.stdout.close()
|
||||
|
||||
def git_pull():
|
||||
"""
|
||||
Checks whether the Git repository in the specified directory is up to date and performs a git pull if necessary.
|
||||
|
||||
Raises:
|
||||
Exception: If retrieving the local or remote git revision fails because the command returns a non-zero exit code.
|
||||
"""
|
||||
print("Checking if the git repository is up to date.")
|
||||
|
||||
# Run 'git rev-parse @' and check its exit code explicitly.
|
||||
local_proc = subprocess.run("git rev-parse @", shell=True, capture_output=True)
|
||||
if local_proc.returncode != 0:
|
||||
error_msg = local_proc.stderr.decode().strip() or "Unknown error while retrieving local revision."
|
||||
raise Exception(f"Failed to retrieve local git revision: {error_msg}")
|
||||
local = local_proc.stdout.decode().strip()
|
||||
|
||||
# Run 'git rev-parse @{u}' and check its exit code explicitly.
|
||||
remote_proc = subprocess.run("git rev-parse @{u}", shell=True, capture_output=True)
|
||||
if remote_proc.returncode != 0:
|
||||
error_msg = remote_proc.stderr.decode().strip() or "Unknown error while retrieving remote revision."
|
||||
raise Exception(f"Failed to retrieve remote git revision: {error_msg}")
|
||||
remote = remote_proc.stdout.decode().strip()
|
||||
|
||||
if local != remote:
|
||||
print("Repository is not up to date. Performing git pull.")
|
||||
run_command("git pull")
|
||||
return True
|
||||
|
||||
print("Repository is already up to date.")
|
||||
return False
|
||||
|
||||
{% raw %}
|
||||
def get_image_digests(directory):
|
||||
"""
|
||||
Retrieves the image digests for all images in the specified Docker Compose project.
|
||||
"""
|
||||
compose_project = os.path.basename(directory)
|
||||
try:
|
||||
images_output = subprocess.check_output(
|
||||
f'docker images --format "{{{{.Repository}}}}:{{{{.Tag}}}}@{{{{.Digest}}}}" | grep {compose_project}',
|
||||
shell=True
|
||||
).decode().strip()
|
||||
return dict(line.split('@') for line in images_output.splitlines() if line)
|
||||
except subprocess.CalledProcessError as e:
|
||||
if e.returncode == 1: # grep no match found
|
||||
return {}
|
||||
else:
|
||||
raise # Other errors are still raised
|
||||
{% endraw %}
|
||||
|
||||
def is_any_service_up():
|
||||
"""
|
||||
Checks if any Docker services are currently running.
|
||||
"""
|
||||
process = subprocess.Popen("docker-compose ps -q", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
output, _ = process.communicate()
|
||||
service_ids = output.decode().strip().splitlines()
|
||||
return bool(service_ids)
|
||||
|
||||
def pull_docker_images():
|
||||
"""
|
||||
Pulls the latest Docker images for the project.
|
||||
"""
|
||||
print("Pulling docker images.")
|
||||
try:
|
||||
run_command("docker-compose pull")
|
||||
except subprocess.CalledProcessError as e:
|
||||
if "pull access denied" in e.output.decode() or "must be built from source" in e.output.decode():
|
||||
print("Need to build the image from source.")
|
||||
return True
|
||||
else:
|
||||
print("Failed to pull images with unexpected error.")
|
||||
raise
|
||||
return False
|
||||
|
||||
def update_docker(directory):
|
||||
"""
|
||||
Checks for updates to Docker images and rebuilds containers if necessary.
|
||||
"""
|
||||
print(f"Checking for updates to Docker images in {directory}.")
|
||||
before_digests = get_image_digests(directory)
|
||||
need_to_build = pull_docker_images()
|
||||
after_digests = get_image_digests(directory)
|
||||
if before_digests != after_digests:
|
||||
print("Changes detected in image digests. Rebuilding containers.")
|
||||
need_to_build = True
|
||||
|
||||
if need_to_build:
|
||||
# This propably just rebuilds the Dockerfile image if there is a change in the other docker compose containers
|
||||
run_command("docker-compose build --pull")
|
||||
start_docker(directory)
|
||||
else:
|
||||
print("Docker images are up to date. No rebuild necessary.")
|
||||
|
||||
def update_discourse(directory):
|
||||
"""
|
||||
Updates Discourse by running the rebuild command on the launcher script.
|
||||
"""
|
||||
docker_repository_directory = os.path.join(directory, "services", "{{ applications | get_app_conf('web-app-discourse','repository') }}")
|
||||
print(f"Using path {docker_repository_directory } to pull discourse repository.")
|
||||
os.chdir(docker_repository_directory )
|
||||
if git_pull():
|
||||
print("Start Discourse update procedure.")
|
||||
update_procedure("docker stop {{ applications | get_app_conf('web-app-discourse','docker.services.discourse.name') }}")
|
||||
update_procedure("docker rm {{ applications | get_app_conf('web-app-discourse','docker.services.discourse.name') }}")
|
||||
try:
|
||||
update_procedure("docker network connect {{ applications | get_app_conf('web-app-discourse','docker.network') }} {{ applications | get_app_conf('svc-db-postgres', 'docker.network') }}")
|
||||
except subprocess.CalledProcessError as e:
|
||||
error_message = e.output.decode()
|
||||
if "already exists" in error_message or "is already connected" in error_message:
|
||||
print("Network connection already exists. Skipping...")
|
||||
else:
|
||||
raise
|
||||
update_procedure("./launcher rebuild {{ applications | get_app_conf('web-app-discourse','docker.services.discourse.name') }}")
|
||||
else:
|
||||
print("Discourse update skipped. No changes in git repository.")
|
||||
|
||||
def upgrade_listmonk():
|
||||
"""
|
||||
Runs the upgrade for Listmonk
|
||||
"""
|
||||
print("Starting Listmonk upgrade.")
|
||||
run_command('echo "y" | docker compose run -T application ./listmonk --upgrade')
|
||||
print("Upgrade complete.")
|
||||
|
||||
def update_procedure(command):
|
||||
"""
|
||||
Attempts to execute a command up to a maximum number of retries.
|
||||
"""
|
||||
max_attempts = 3
|
||||
for attempt in range(max_attempts):
|
||||
try:
|
||||
run_command(command)
|
||||
break # If the command succeeds, exit the loop
|
||||
except subprocess.CalledProcessError as e:
|
||||
if attempt < max_attempts - 1: # Check if it's not the last attempt
|
||||
print(f"Attempt {attempt + 1} failed, retrying in 60 seconds...")
|
||||
time.sleep(60) # Wait for 60 seconds before retrying
|
||||
else:
|
||||
print("All attempts to update have failed.")
|
||||
raise # Re-raise the last exception after all attempts fail
|
||||
|
||||
def start_docker(directory):
|
||||
"""
|
||||
Starts or restarts Docker services in the specified directory.
|
||||
"""
|
||||
if is_any_service_up():
|
||||
print(f"Restarting containers in {directory}.")
|
||||
run_command("docker-compose up -d --force-recreate")
|
||||
else:
|
||||
print(f"Skipped starting. No service is up in {directory}.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2:
|
||||
print("Please provide the path to the parent directory as a parameter.")
|
||||
sys.exit(1)
|
||||
|
||||
parent_directory = sys.argv[1]
|
||||
for dir_entry in os.scandir(parent_directory):
|
||||
if dir_entry.is_dir():
|
||||
dir_path = dir_entry.path
|
||||
print(f"Checking for updates in: {dir_path}")
|
||||
os.chdir(dir_path)
|
||||
|
||||
# Pull git repository if it exist
|
||||
# @deprecated: This function should be removed in the future, as soon as all docker applications use the correct folder path
|
||||
if os.path.isdir(os.path.join(dir_path, ".git")):
|
||||
print("DEPRECATED: Docker .git repositories should be saved under /opt/docker/{instance}/services/{repository_name} ")
|
||||
git_pull()
|
||||
|
||||
if os.path.basename(dir_path) == "matrix":
|
||||
# No autoupdate for matrix is possible atm,
|
||||
# due to the reason that the role has to be executed every time.
|
||||
# The update has to be executed in the role
|
||||
# @todo implement in future
|
||||
pass
|
||||
else:
|
||||
# Pull and update docker images
|
||||
update_docker(dir_path)
|
||||
|
||||
# The following instances need additional update and upgrade procedures
|
||||
if os.path.basename(dir_path) == "discourse":
|
||||
update_discourse(dir_path)
|
||||
elif os.path.basename(dir_path) == "listmonk":
|
||||
upgrade_listmonk()
|
||||
|
||||
# @todo implement dedicated procedure for bluesky
|
||||
# @todo implement dedicated procedure for taiga
|
@@ -1,2 +0,0 @@
|
||||
application_id: update-docker
|
||||
system_service_id: "{{ application_id }}"
|
@@ -1,23 +0,0 @@
|
||||
# Update Pip Packages
|
||||
|
||||
## Description
|
||||
|
||||
This Ansible role automatically updates all installed Python Pip packages to their latest versions.
|
||||
|
||||
## Overview
|
||||
|
||||
The role performs the following:
|
||||
- Executes a command to retrieve all installed Python Pip packages.
|
||||
- Updates each package individually to its latest available version.
|
||||
- Ensures a smooth and automated Python environment maintenance process.
|
||||
|
||||
## Purpose
|
||||
|
||||
Ensures Python packages remain up-to-date, improving security and functionality.
|
||||
|
||||
## Features
|
||||
|
||||
- **Automatic Updates:** Automates the process of upgrading Python packages.
|
||||
- **Platform Independent:** Works on Linux, macOS, and Windows environments.
|
||||
- **Ansible Integration:** Easy to include in larger playbooks or maintenance routines.
|
||||
|
@@ -1,25 +0,0 @@
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birkenbach"
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birkenbach
|
||||
Consulting & Coaching Solutions
|
||||
https://www.veen.world
|
||||
description: "Automatically updates all Python Pip packages to their latest available versions."
|
||||
min_ansible_version: "2.9"
|
||||
platforms:
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- all
|
||||
- name: Archlinux
|
||||
versions:
|
||||
- rolling
|
||||
- name: Debian
|
||||
versions:
|
||||
- all
|
||||
galaxy_tags:
|
||||
- python
|
||||
- pip
|
||||
- update
|
||||
- maintenance
|
@@ -1,9 +0,0 @@
|
||||
- block:
|
||||
- name: Include dependency 'dev-python-pip'
|
||||
include_role:
|
||||
name: dev-python-pip
|
||||
when: run_once_dev_python_pip is not defined
|
||||
- include_tasks: utils/run_once.yml
|
||||
vars:
|
||||
flush_handlers: false
|
||||
when: run_once_update_pip is not defined
|
@@ -1 +0,0 @@
|
||||
application_id: update-pip
|
@@ -1,27 +0,0 @@
|
||||
# Update pkgmgr
|
||||
|
||||
## Description
|
||||
|
||||
This role checks if the [package manager](https://github.com/kevinveenbirkenbach/package-manager) is available on the system. If so, it runs `pkgmgr update --all` to update all repositories managed by the `pkgmgr`.
|
||||
|
||||
## Overview
|
||||
|
||||
This role performs the following tasks:
|
||||
- Checks if the `pkgmgr` command is available.
|
||||
- If available, runs `pkgmgr update --all` to update all repositories.
|
||||
|
||||
## Purpose
|
||||
|
||||
The purpose of this role is to simplify system updates by using the `pkgmgr` package manager to handle all repository updates with a single command.
|
||||
|
||||
## Features
|
||||
|
||||
- **Conditional Execution**: Runs only if the `pkgmgr` command is found on the system.
|
||||
- **Automated Updates**: Automatically runs `pkgmgr update --all` to update all repositories.
|
||||
|
||||
## License
|
||||
|
||||
Infinito.Nexus NonCommercial License
|
||||
[Learn More](https://s.infinito.nexus/license)
|
||||
|
||||
|
@@ -1,2 +0,0 @@
|
||||
# Todos
|
||||
- Activate update again. Atm not possible, because it pulls all repos
|
@@ -1,3 +0,0 @@
|
||||
# run_once_update_pkgmgr: deactivated
|
||||
#- name: "Update all repositories with pkgmgr"
|
||||
# command: "pkgmgr update --all"
|
@@ -1 +0,0 @@
|
||||
application_id: update-pkgmgr
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user