THE HUGE REFACTORING CALENDER WEEK 33; Optimized Matrix and during this updated variables, and implemented better reset and cleanup mode handling, also solved some initial setup bugs

This commit is contained in:
Kevin Veen-Birkenbach 2025-08-15 15:15:48 +02:00
parent 0228014d34
commit 022800425d
No known key found for this signature in database
GPG Key ID: 44D8F11FD62F878E
271 changed files with 1098 additions and 916 deletions

View File

@ -29,7 +29,7 @@ WEB_PORT: "{{ 443 if WEB_PROTOCOL == 'https' else 80 }}" # Defaul
# Domain # Domain
PRIMARY_DOMAIN: "localhost" # Primary Domain of the server PRIMARY_DOMAIN: "localhost" # Primary Domain of the server
PRIMARY_DOMAIN_tld: "{{ (PRIMARY_DOMAIN == 'localhost') | ternary('localhost', PRIMARY_DOMAIN.split('.')[-1]) }}" # Top Level Domain of the server PRIMARY_DOMAIN_TLD: "{{ (PRIMARY_DOMAIN == 'localhost') | ternary('localhost', PRIMARY_DOMAIN.split('.')[-1]) }}" # Top Level Domain of the server
PRIMARY_DOMAIN_SLD: "{{ (PRIMARY_DOMAIN == 'localhost') | ternary('localhost', PRIMARY_DOMAIN.split('.')[-2]) }}" # Second Level Domain of the server PRIMARY_DOMAIN_SLD: "{{ (PRIMARY_DOMAIN == 'localhost') | ternary('localhost', PRIMARY_DOMAIN.split('.')[-2]) }}" # Second Level Domain of the server
# Server Tact Variables # Server Tact Variables

View File

@ -1,9 +1,9 @@
# Mode # Mode
# The following modes can be combined with each other # The following modes can be combined with each other
MODE_RESET: false # Cleans up all Infinito.Nexus files. It's necessary to run to whole playbook and not particial roles when using this function.
MODE_TEST: false # Executes test routines instead of productive routines MODE_TEST: false # Executes test routines instead of productive routines
MODE_UPDATE: true # Executes updates MODE_UPDATE: true # Executes updates
MODE_BACKUP: true # Activates the backup before the update procedure MODE_BACKUP: true # Activates the backup before the update procedure
MODE_CLEANUP: true # Cleanup unused files and configurations MODE_CLEANUP: true # Cleanup unused files and configurations
MODE_DEBUG: false # This enables debugging in ansible and in the apps, You SHOULD NOT enable this on production servers MODE_DEBUG: false # This enables debugging in ansible and in the apps, You SHOULD NOT enable this on production servers
MODE_RESET: false # Cleans up all Infinito.Nexus files. It's necessary to run to whole playbook and not particial roles when using this function.

View File

@ -1,27 +1,32 @@
# Webserver Configuration # Webserver Configuration
# Helper # Helper
_nginx_www_dir: /var/www/ _nginx_www_dir: "{{ applications | get_app_conf('svc-prx-openresty','docker.volumes.www') }}"
## Nginx-Specific Path Configurations _nginx_dir: "{{ applications | get_app_conf('svc-prx-openresty','docker.volumes.nginx') }}"
nginx: _nginx_conf_dir: "{{ _nginx_dir }}conf.d/"
files: _nginx_http_dir: "{{ _nginx_conf_dir }}http/"
configuration: "/etc/nginx/nginx.conf"
directories:
configuration: "/etc/nginx/conf.d/" # Configuration directory
http:
global: "/etc/nginx/conf.d/http/global/" # Contains global configurations which will be loaded into the http block
servers: "/etc/nginx/conf.d/http/servers/" # Contains one configuration per domain
maps: "/etc/nginx/conf.d/http/maps/" # Contains mappings
streams: "/etc/nginx/conf.d/streams/" # Contains streams configuration e.g. for ldaps
data:
www: "{{ _nginx_www_dir }}"
well_known: "/usr/share/nginx/well-known/" # Path where well-known files are stored
html: "{{ _nginx_www_dir }}public_html/" # Path where the static homepage files are stored
files: "{{ _nginx_www_dir }}public_files/" # Path where the web accessable files are stored
cdn: "{{ _nginx_www_dir }}public_cdn/" # Contains files which will be accessable via the content delivery network
global: "{{ _nginx_www_dir }}global/" # Directory containing files which will be globaly accessable
cache:
general: "/tmp/cache_nginx_general/" # Directory which nginx uses to cache general data
image: "/tmp/cache_nginx_image/" # Directory which nginx uses to cache images
user: "http" # Default nginx user in ArchLinux
## Nginx-Specific Path Configurations
NGINX:
FILES:
CONFIGURATION: "{{ _nginx_dir }}nginx.conf"
DIRECTORIES:
CONFIGURATION: "{{ _nginx_conf_dir }}" # Configuration directory
HTTP:
GLOBAL: "{{ _nginx_http_dir }}global/" # Contains global configurations which will be loaded into the http block
SERVERS: "{{ _nginx_http_dir }}servers/" # Contains one configuration per domain
MAPS: "{{ _nginx_http_dir }}maps/" # Contains mappings
STREAMS: "{{ _nginx_conf_dir }}streams/" # Contains streams configuration e.g. for ldaps
DATA:
WWW: "{{ _nginx_www_dir }}"
WELL_KNOWN: "/usr/share/nginx/well-known/" # Path where well-known files are stored
HTML: "{{ _nginx_www_dir }}public_html/" # Path where the static homepage files are stored
FILES: "{{ _nginx_www_dir }}public_files/" # Path where the web accessable files are stored
CDN: "{{ _nginx_www_dir }}public_cdn/" # Contains files which will be accessable via the content delivery network
GLOBAL: "{{ _nginx_www_dir }}global/" # Directory containing files which will be globaly accessable, @Todo remove this when css migrated to CDN
CACHE:
GENERAL: "/tmp/cache_nginx_general/" # Directory which nginx uses to cache general data
IMAGE: "/tmp/cache_nginx_image/" # Directory which nginx uses to cache images
USER: "http" # Default nginx user in ArchLinux
# @todo It propably makes sense to distinguish between target and source mount path, so that the config files can be stored in the openresty volumes folder

View File

@ -1,6 +1,6 @@
# Path Variables for Key Directories and Scripts # Path Variables for Key Directories and Scripts
path_administrator_home: "/home/administrator/" PATH_ADMINISTRATOR_HOME: "/home/administrator/"
path_administrator_scripts: "/opt/scripts/" PATH_ADMINISTRATOR_SCRIPTS: "/opt/scripts/"
path_docker_compose_instances: "/opt/docker/" PATH_DOCKER_COMPOSE_INSTANCES: "/opt/docker/"
path_system_lock_script: "/opt/scripts/sys-lock.py" PATH_SYSTEM_LOCK_SCRIPT: "/opt/scripts/sys-lock.py"

View File

@ -5,7 +5,7 @@
# Helper Variables: # Helper Variables:
# Keep in mind to mapp this variables if there is ever the possibility for the user to define them in the inventory # Keep in mind to mapp this variables if there is ever the possibility for the user to define them in the inventory
_ldap_dn_base: "dc={{PRIMARY_DOMAIN_SLD}},dc={{PRIMARY_DOMAIN_tld}}" LDAP_DN_BASE: "dc={{ PRIMARY_DOMAIN_SLD }},dc={{ PRIMARY_DOMAIN_TLD }}"
_ldap_docker_network_enabled: "{{ applications | get_app_conf('svc-db-openldap', 'network.docker') }}" _ldap_docker_network_enabled: "{{ applications | get_app_conf('svc-db-openldap', 'network.docker') }}"
_ldap_protocol: "{{ 'ldap' if _ldap_docker_network_enabled else 'ldaps' }}" _ldap_protocol: "{{ 'ldap' if _ldap_docker_network_enabled else 'ldaps' }}"
_ldap_server_port: "{{ ports.localhost[_ldap_protocol]['svc-db-openldap'] }}" _ldap_server_port: "{{ ports.localhost[_ldap_protocol]['svc-db-openldap'] }}"
@ -22,14 +22,14 @@ ldap:
# This is the top-level naming context for your directory, used as the # This is the top-level naming context for your directory, used as the
# default search base for most operations (e.g. adding users, groups). # default search base for most operations (e.g. adding users, groups).
# Example: “dc=example,dc=com” # Example: “dc=example,dc=com”
root: "{{_ldap_dn_base}}" root: "{{ LDAP_DN_BASE }}"
administrator: administrator:
# ------------------------------------------------------------------------- # -------------------------------------------------------------------------
# Data-Tree Administrator Bind DN # Data-Tree Administrator Bind DN
# The DN used to authenticate for regular directory operations under # The DN used to authenticate for regular directory operations under
# the data tree (adding users, modifying attributes, creating OUs, etc.). # the data tree (adding users, modifying attributes, creating OUs, etc.).
# Typically: “cn=admin,dc=example,dc=com” # Typically: “cn=admin,dc=example,dc=com”
data: "cn={{ applications['svc-db-openldap'].users.administrator.username }},{{ _ldap_dn_base }}" data: "cn={{ applications['svc-db-openldap'].users.administrator.username }},{{ LDAP_DN_BASE }}"
# ------------------------------------------------------------------------- # -------------------------------------------------------------------------
# Config-Tree Administrator Bind DN # Config-Tree Administrator Bind DN
@ -47,9 +47,9 @@ ldap:
# groups: Contains organizational or business groups (e.g., departments, teams). # groups: Contains organizational or business groups (e.g., departments, teams).
# roles: Contains application-specific RBAC roles # roles: Contains application-specific RBAC roles
# (e.g., "cn=app1-user", "cn=yourls-admin"). # (e.g., "cn=app1-user", "cn=yourls-admin").
users: "ou=users,{{ _ldap_dn_base }}" users: "ou=users,{{ LDAP_DN_BASE }}"
groups: "ou=groups,{{ _ldap_dn_base }}" groups: "ou=groups,{{ LDAP_DN_BASE }}"
roles: "ou=roles,{{ _ldap_dn_base }}" roles: "ou=roles,{{ LDAP_DN_BASE }}"
# ------------------------------------------------------------------------- # -------------------------------------------------------------------------
# Additional Notes # Additional Notes

View File

@ -10,7 +10,7 @@
- name: "set oauth2_proxy_application_id (Needed due to lazzy loading issue)" - name: "set oauth2_proxy_application_id (Needed due to lazzy loading issue)"
set_fact: set_fact:
oauth2_proxy_application_id: "{{ application_id }}" oauth2_proxy_application_id: "{{ application_id }}"
- name: "include the web-app-oauth2-proxy role {{domain}}" - name: "include the web-app-oauth2-proxy role {{ domain }}"
include_tasks: "{{ playbook_dir }}/roles/web-app-oauth2-proxy/tasks/main.yml" include_tasks: "{{ playbook_dir }}/roles/web-app-oauth2-proxy/tasks/main.yml"
when: applications | get_app_conf(application_id, 'features.oauth2', False) when: applications | get_app_conf(application_id, 'features.oauth2', False)

View File

@ -6,7 +6,7 @@
logging: logging:
driver: journald driver: journald
image: mariadb image: mariadb
restart: {{DOCKER_RESTART_POLICY}} restart: {{ DOCKER_RESTART_POLICY }}
env_file: env_file:
- {{database_env}} - {{database_env}}
command: "--transaction-isolation=READ-COMMITTED --binlog-format=ROW" command: "--transaction-isolation=READ-COMMITTED --binlog-format=ROW"

View File

@ -6,7 +6,7 @@
container_name: {{ application_id | get_entity_name }}-database container_name: {{ application_id | get_entity_name }}-database
env_file: env_file:
- {{database_env}} - {{database_env}}
restart: {{DOCKER_RESTART_POLICY}} restart: {{ DOCKER_RESTART_POLICY }}
healthcheck: healthcheck:
test: ["CMD-SHELL", "pg_isready -U {{database_name}}"] test: ["CMD-SHELL", "pg_isready -U {{database_name}}"]
interval: 10s interval: 10s

View File

@ -0,0 +1,16 @@
- name: "Load docker container role"
include_role:
name: docker-container
when: run_once_docker_container is not defined
- name: "reset (if enabled)"
include_tasks: 02_reset.yml
when: MODE_RESET | bool
- name: "create {{ PATH_DOCKER_COMPOSE_INSTANCES }}"
file:
path: "{{ PATH_DOCKER_COMPOSE_INSTANCES }}"
state: directory
mode: 0700
owner: root
group: root

View File

@ -1,11 +0,0 @@
# It is necessary to shut the projects down, when reset is activated.
# Otherwise it can lead to this bug:
# https://github.com/ansible/ansible/issues/10244
- name: shut down docker compose project
command:
cmd: "docker-compose -p {{ application_id }} down"
- name: "Remove {{ docker_compose.directories.instance }} and all its contents"
file:
path: "{{ docker_compose.directories.instance }}"
state: absent

View File

@ -0,0 +1,16 @@
# It is necessary to shut the projects down, when reset is activated.
# Otherwise it can lead to this bug:
# https://github.com/ansible/ansible/issues/10244
- name: "pkgmgr install '{{ DOCKER_COMPOSE_DOWN_ALL_PACKAGE }}'"
include_role:
name: pkgmgr-install
vars:
package_name: "{{ DOCKER_COMPOSE_DOWN_ALL_PACKAGE }}"
- name: Shutdown all docker compose instances in '{{ PATH_DOCKER_COMPOSE_INSTANCES }}' with '{{ DOCKER_COMPOSE_DOWN_ALL_PACKAGE }}'
command: "{{ DOCKER_COMPOSE_DOWN_ALL_PACKAGE }} {{ PATH_DOCKER_COMPOSE_INSTANCES }}"
- name: "Remove directory '{{ PATH_DOCKER_COMPOSE_INSTANCES }}' and all its contents"
file:
path: "{{ PATH_DOCKER_COMPOSE_INSTANCES }}"
state: absent

View File

@ -1,16 +1,10 @@
- block: - block:
- include_role: - include_tasks: 01_core.yml
name: docker-container
when: run_once_docker_container is not defined
- include_tasks: utils/run_once.yml - include_tasks: utils/run_once.yml
when: run_once_docker_compose is not defined when: run_once_docker_compose is not defined
- name: "Load variables from {{ docker_compose_variable_file }} for whole play" - name: "Load variables from {{ DOCKER_COMPOSE_VARIABLE_FILE }} for whole play"
include_vars: "{{ docker_compose_variable_file }}" include_vars: "{{ DOCKER_COMPOSE_VARIABLE_FILE }}"
- name: "reset (if enabled)"
include_tasks: 01_reset.yml
when: MODE_RESET | bool
# This could lead to problems in docker-compose directories which are based on a git repository # This could lead to problems in docker-compose directories which are based on a git repository
# @todo Verify that this isn't the case. E.g. in accounting # @todo Verify that this isn't the case. E.g. in accounting
@ -21,15 +15,15 @@
mode: '0755' mode: '0755'
with_dict: "{{ docker_compose.directories }}" with_dict: "{{ docker_compose.directories }}"
- name: "Include routines to set up a git repository based installation for '{{application_id}}'." - name: "Include routines to set up a git repository based installation for '{{ application_id }}'."
include_tasks: "02_repository.yml" include_tasks: "03_repository.yml"
when: docker_pull_git_repository | bool when: docker_pull_git_repository | bool
- block: - block:
- name: "Include file management routines for '{{application_id}}'." - name: "Include file management routines for '{{ application_id }}'."
include_tasks: "03_files.yml" include_tasks: "04_files.yml"
- name: "Ensure that {{ docker_compose.directories.instance }} is up" - name: "Ensure that {{ docker_compose.directories.instance }} is up"
include_tasks: "04_ensure_up.yml" include_tasks: "05_ensure_up.yml"
when: not docker_compose_skipp_file_creation | bool when: not docker_compose_skipp_file_creation | bool
- name: "flush docker compose for '{{ application_id }}'" - name: "flush docker compose for '{{ application_id }}'"

View File

@ -1,2 +1,2 @@
# @See https://chatgpt.com/share/67a23d18-fb54-800f-983c-d6d00752b0b4 # @See https://chatgpt.com/share/67a23d18-fb54-800f-983c-d6d00752b0b4
docker_compose: "{{ application_id | get_docker_paths(path_docker_compose_instances) }}" docker_compose: "{{ application_id | get_docker_paths(PATH_DOCKER_COMPOSE_INSTANCES) }}"

View File

@ -1 +1,2 @@
docker_compose_variable_file: "{{ role_path }}/vars/docker-compose.yml" DOCKER_COMPOSE_VARIABLE_FILE: "{{ role_path }}/vars/docker-compose.yml"
DOCKER_COMPOSE_DOWN_ALL_PACKAGE: "docodol"

View File

@ -1,6 +1,6 @@
- block: - block:
- include_role: - include_role:
name: docker-core name: sys-svc-docker
when: run_once_docker_core is not defined when: run_once_sys_svc_docker is not defined
- include_tasks: utils/run_once.yml - include_tasks: utils/run_once.yml
when: run_once_docker_container is not defined when: run_once_docker_container is not defined

View File

@ -1,6 +1,6 @@
{# Base for docker services #} {# Base for docker services #}
restart: {{DOCKER_RESTART_POLICY}} restart: {{ DOCKER_RESTART_POLICY }}
{% if application_id | has_env %} {% if application_id | has_env %}
env_file: env_file:
- "{{docker_compose.files.env}}" - "{{docker_compose.files.env}}"

View File

@ -16,7 +16,7 @@
- CMD-SHELL - CMD-SHELL
- > - >
if [ ! -f /tmp/email_sent ]; then if [ ! -f /tmp/email_sent ]; then
echo 'Subject: testmessage from {{domains | get_domain(application_id)}}\n\nSUCCESSFULL' | msmtp -t {{users.blackhole.email}} && touch /tmp/email_sent; echo 'Subject: testmessage from {{ domains | get_domain(application_id) }}\n\nSUCCESSFULL' | msmtp -t {{ users.blackhole.email }} && touch /tmp/email_sent;
fi && fi &&
curl -f http://localhost:80/ || exit 1 curl -f http://localhost:80/ || exit 1
interval: 1m interval: 1m

View File

@ -1,5 +0,0 @@
# Docker Server
This role is part of the [Infinito.Nexus Project](https://s.infinito.nexus/code), maintained and developed by [Kevin Veen-Birkenbach](https://www.veen.world/).
Enjoy using this role and happy containerizing! 🎉

View File

@ -1,2 +0,0 @@
# Todos
- Add cleanup service for docker system prune -f

View File

@ -1,3 +0,0 @@
---
- name: docker restart
service: name=docker.service state=restarted enabled=yes

View File

@ -1,26 +0,0 @@
- name: Include backup, repair, health and user dependencies
include_role:
name: "{{ item }}"
loop:
- sys-bkp-docker-2-loc
- user-administrator
- sys-hlth-docker-container
- sys-hlth-docker-volumes
- sys-rpr-docker-soft
- sys-rpr-docker-hard
- name: docker & docker compose install
community.general.pacman:
name:
- 'docker'
- 'docker-compose'
state: present
notify: docker restart
- name: "create {{path_docker_compose_instances}}"
file:
path: "{{path_docker_compose_instances}}"
state: directory
mode: 0700
owner: root
group: root

View File

@ -30,7 +30,7 @@
set_fact: set_fact:
service_name: "{{ role_name }}" service_name: "{{ role_name }}"
- name: "include role for sys-timer for {{service_name}}" - name: "include role for sys-timer for {{ service_name }}"
include_role: include_role:
name: sys-timer name: sys-timer
vars: vars:

View File

@ -7,3 +7,5 @@
shell: | shell: |
source ~/.venvs/pkgmgr/bin/activate source ~/.venvs/pkgmgr/bin/activate
pkgmgr update pkgmgr pkgmgr update pkgmgr
register: pkgmgr_update
changed_when: "'already up to date' not in (pkgmgr_update.stdout | lower)"

View File

@ -5,11 +5,25 @@
when: run_once_pkgmgr_install is not defined when: run_once_pkgmgr_install is not defined
- name: update {{ package_name }} - name: update {{ package_name }}
shell: | ansible.builtin.shell: |
source ~/.venvs/pkgmgr/bin/activate source ~/.venvs/pkgmgr/bin/activate
pkgmgr update {{ package_name }} --dependencies --clone-mode https pkgmgr update {{ package_name }} --dependencies --clone-mode https
args:
executable: /bin/bash
notify: "{{ package_notify | default(omit,true) }}" notify: "{{ package_notify | default(omit,true) }}"
register: pkgmgr_update_result register: pkgmgr_update_result
changed_when: "'No command defined and neither main.sh nor main.py found' not in pkgmgr_update_result.stdout" # Mark changed only if it's not "already up to date" and not "no command defined..."
failed_when: pkgmgr_update_result.rc != 0 and 'No command defined and neither main.sh nor main.py found' not in pkgmgr_update_result.stdout changed_when: >
('already up to date' not in ((pkgmgr_update_result.stdout | default('') | lower)
~ ' ' ~ (pkgmgr_update_result.stderr | default('') | lower)))
and
('no command defined' not in ((pkgmgr_update_result.stdout | default('') | lower)
~ ' ' ~ (pkgmgr_update_result.stderr | default('') | lower)))
# Fail only on real errors; allow the "no command defined..." case
failed_when: >
(pkgmgr_update_result.rc != 0)
and
('no command defined' not in ((pkgmgr_update_result.stdout | default('') | lower)
~ ' ' ~ (pkgmgr_update_result.stderr | default('') | lower)))

View File

@ -1 +1 @@
configuration_destination: "{{nginx.directories.http.servers}}{{domain}}.conf" configuration_destination: "{{ NGINX.DIRECTORIES.HTTP.SERVERS }}{{ domain }}.conf"

View File

@ -1,7 +1,7 @@
--- ---
- name: "restart srv-proxy-6-6-tls-deploy service" - name: "restart srv-proxy-6-6-tls-deploy service"
systemd: systemd:
name: srv-proxy-6-6-tls-deploy.{{application_id}}{{ SYS_SERVICE_SUFFIX }} name: srv-proxy-6-6-tls-deploy.{{ application_id }}{{ SYS_SERVICE_SUFFIX }}
state: restarted state: restarted
enabled: yes enabled: yes
daemon_reload: yes daemon_reload: yes

View File

@ -14,14 +14,14 @@
- name: configure srv-proxy-6-6-tls-deploy service - name: configure srv-proxy-6-6-tls-deploy service
template: template:
src: "srv-proxy-6-6-tls-deploy.service.j2" src: "srv-proxy-6-6-tls-deploy.service.j2"
dest: "/etc/systemd/system/srv-proxy-6-6-tls-deploy.{{application_id}}{{ SYS_SERVICE_SUFFIX }}" dest: "/etc/systemd/system/srv-proxy-6-6-tls-deploy.{{ application_id }}{{ SYS_SERVICE_SUFFIX }}"
notify: restart srv-proxy-6-6-tls-deploy service notify: restart srv-proxy-6-6-tls-deploy service
- name: "include role for sys-timer for {{service_name}}" - name: "include role for sys-timer for {{ service_name }}"
include_role: include_role:
name: sys-timer name: sys-timer
vars: vars:
on_calendar: "{{on_calendar_deploy_certificates}}" on_calendar: "{{on_calendar_deploy_certificates}}"
service_name: "srv-proxy-6-6-tls-deploy.{{application_id}}" service_name: "srv-proxy-6-6-tls-deploy.{{ application_id }}"
persistent: "true" persistent: "true"

View File

@ -4,4 +4,4 @@ OnFailure=sys-alm-compose.infinito@%n.service
[Service] [Service]
Type=oneshot Type=oneshot
ExecStart=/usr/bin/bash {{path_administrator_scripts}}/srv-proxy-6-6-tls-deploy.sh {{ssl_cert_folder}} {{docker_compose.directories.instance}} ExecStart=/usr/bin/bash {{ PATH_ADMINISTRATOR_SCRIPTS }}/srv-proxy-6-6-tls-deploy.sh {{ssl_cert_folder}} {{docker_compose.directories.instance}}

View File

@ -1 +1 @@
nginx_docker_cert_deploy_script: "{{path_administrator_scripts}}srv-proxy-6-6-tls-deploy.sh" nginx_docker_cert_deploy_script: "{{ PATH_ADMINISTRATOR_SCRIPTS }}srv-proxy-6-6-tls-deploy.sh"

View File

@ -21,7 +21,7 @@ location {{location}}
# WebSocket specific header # WebSocket specific header
proxy_http_version 1.1; proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade; proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade; proxy_set_header Connection "upgrade";
# timeouts # timeouts
proxy_connect_timeout 5s; proxy_connect_timeout 5s;

View File

@ -1,6 +1,6 @@
server server
{ {
server_name {{domain}}; server_name {{ domain }};
{% if applications | get_app_conf(application_id, 'features.oauth2', False) %} {% if applications | get_app_conf(application_id, 'features.oauth2', False) %}
{% include 'roles/web-app-oauth2-proxy/templates/endpoint.conf.j2'%} {% include 'roles/web-app-oauth2-proxy/templates/endpoint.conf.j2'%}

View File

@ -2,7 +2,7 @@
include_role: include_role:
name: '{{ item }}' name: '{{ item }}'
loop: loop:
- srv-web-7-7-certbot - sys-svc-certbot
- srv-web-7-4-core - srv-web-7-4-core
- sys-alm-compose - sys-alm-compose
@ -22,7 +22,7 @@
set_fact: set_fact:
service_name: "{{ role_name }}" service_name: "{{ role_name }}"
- name: "include role for sys-timer for {{service_name}}" - name: "include role for sys-timer for {{ service_name }}"
include_role: include_role:
name: sys-timer name: sys-timer
vars: vars:

View File

@ -0,0 +1,2 @@
# To-dos
- It could make sense to merge this role with svc-prx-openresty

View File

@ -1,3 +1,27 @@
- name: "cleanup (if enabled)"
include_tasks: 02_cleanup.yml
when: >
MODE_CLEANUP | bool or
MODE_RESET | bool
- name: "reset (if enabled)"
include_tasks: 03_reset.yml
when: MODE_RESET | bool
- name: "Load docker compose handlers"
include_tasks: "{{ playbook_dir }}/tasks/utils/load_handlers.yml"
vars:
handler_role_name: "docker-compose"
- name: "Include tasks to create directories"
include_tasks: 04_directories.yml
- name: create nginx config file
template:
src: nginx.conf.j2
dest: "{{ NGINX.FILES.CONFIGURATION }}"
notify: docker compose up
- name: Include health dependencies - name: Include health dependencies
include_role: include_role:
name: "{{ item }}" name: "{{ item }}"
@ -5,6 +29,7 @@
- sys-hlth-webserver - sys-hlth-webserver
- sys-hlth-csp - sys-hlth-csp
vars: vars:
# Extra flush is for performance reasons not necessary
flush_handlers: false flush_handlers: false
- name: Include openresty - name: Include openresty
@ -13,50 +38,12 @@
# Inside openresty their is a validation that it doesn't run multiple times # Inside openresty their is a validation that it doesn't run multiple times
include_role: include_role:
name: svc-prx-openresty name: svc-prx-openresty
public: false
# Explicit set to guaranty that application_id will not be overwritten. # Explicit set to guaranty that application_id will not be overwritten.
# Should be anyhow the default case # Should be anyhow the default case
public: false
vars:
# Flush openresty handler on first run, so that openresty is up, before openresty related handlers are triggered
flush_handlers: true
when: run_once_svc_prx_openresty is not defined when: run_once_svc_prx_openresty is not defined
- name: "reset (if enabled)"
include_tasks: 02_reset.yml
when: MODE_RESET | bool
- name: Ensure nginx configuration directories are present
file:
path: "{{ item }}"
state: directory
owner: "{{nginx.user}}"
group: "{{nginx.user}}"
mode: '0755'
recurse: yes
loop: >
{{
[ nginx.directories.configuration ] +
( nginx.directories.http.values() | list ) +
[ nginx.directories.streams ]
}}
- name: Ensure nginx data storage directories are present
file:
path: "{{ item }}"
state: directory
recurse: yes
owner: "{{nginx.user}}"
group: "{{nginx.user}}"
mode: '0755'
loop: >
{{ nginx.directories.data.values() | list }}
async: "{{ ASYNC_TIME if ASYNC_ENABLED | bool else omit }}"
poll: "{{ ASYNC_POLL if ASYNC_ENABLED | bool else omit }}"
- name: "Include tasks to create cache directories"
include_tasks: 03_cache_directories.yml
when: run_once_nginx_reverse_proxy is not defined
- name: create nginx config file
template:
src: nginx.conf.j2
dest: "{{ nginx.files.configuration }}"
notify: restart openresty

View File

@ -0,0 +1,8 @@
- name: Cleanup all NGINX cache directories
become: true
ansible.builtin.file:
path: "{{ item.value }}"
state: absent
loop: "{{ NGINX.DIRECTORIES.CACHE | dict2items }}"
loop_control:
label: "{{ item.key }}"

View File

@ -1,4 +0,0 @@
- name: "Delete {{nginx.directories.configuration}} directory, when MODE_RESET"
file:
path: "{{ nginx.directories.configuration }}"
state: absent

View File

@ -1,28 +0,0 @@
- name: Cleanup all NGINX cache directories
become: true
ansible.builtin.file:
path: "{{ item.value }}"
state: absent
when:
- MODE_CLEANUP | bool
loop: "{{ nginx.directories.cache | dict2items }}"
loop_control:
label: "{{ item.key }}"
- name: Ensure all NGINX cache directories exist
become: true
ansible.builtin.file:
path: "{{ item.value }}"
state: directory
owner: "{{ nginx.user }}"
group: "{{ nginx.user }}"
mode: '0700'
loop: "{{ nginx.directories.cache | dict2items }}"
loop_control:
label: "{{ item.key }}"
async: "{{ ASYNC_TIME if ASYNC_ENABLED | bool else omit }}"
poll: "{{ ASYNC_POLL if ASYNC_ENABLED | bool else omit }}"
- name: run the nginx_reverse_proxy tasks once
set_fact:
run_once_nginx_reverse_proxy: true

View File

@ -0,0 +1,9 @@
- name: Delete NGINX config paths
file:
path: "{{ item }}"
state: absent
loop:
- "{{ NGINX.DIRECTORIES.CONFIGURATION }}"
- "{{ NGINX.FILES.CONFIGURATION }}"
loop_control:
label: "{{ item }}"

View File

@ -0,0 +1,36 @@
- name: Ensure nginx configuration directories are present
file:
path: "{{ item }}"
state: directory
owner: "{{ NGINX.USER }}"
group: "{{ NGINX.USER }}"
mode: '0755'
recurse: yes
loop: >
{{
( NGINX.DIRECTORIES.HTTP.values() | list ) +
[ NGINX.DIRECTORIES.STREAMS ]
}}
- name: Ensure all NGINX cache directories exist
become: true
ansible.builtin.file:
path: "{{ item.value }}"
state: directory
owner: "{{ NGINX.USER }}"
group: "{{ NGINX.USER }}"
mode: '0700'
loop: "{{ NGINX.DIRECTORIES.CACHE | dict2items }}"
loop_control:
label: "{{ item.key }}"
- name: Ensure nginx data storage directories are present
file:
path: "{{ item }}"
state: directory
recurse: yes
owner: "{{ NGINX.USER }}"
group: "{{ NGINX.USER }}"
mode: '0755'
loop: >
{{ NGINX.DIRECTORIES.DATA.values() | list }}

View File

@ -14,8 +14,8 @@ http
default_type text/html; default_type text/html;
{# caching #} {# caching #}
proxy_cache_path {{ nginx.directories.cache.general }} levels=1:2 keys_zone=cache:20m max_size=20g inactive=14d use_temp_path=off; proxy_cache_path {{ NGINX.DIRECTORIES.CACHE.GENERAL }} levels=1:2 keys_zone=cache:20m max_size=20g inactive=14d use_temp_path=off;
proxy_cache_path {{ nginx.directories.cache.image }} levels=1:2 keys_zone=imgcache:10m inactive=60m use_temp_path=off; proxy_cache_path {{ NGINX.DIRECTORIES.CACHE.IMAGE }} levels=1:2 keys_zone=imgcache:10m inactive=60m use_temp_path=off;
# -------------------------------------------------------------------------------- # --------------------------------------------------------------------------------
# Tweak the hash table used to store your server_name entries: # Tweak the hash table used to store your server_name entries:
@ -54,12 +54,12 @@ http
gzip_types application/atom+xml application/javascript application/xml+rss application/x-javascript application/json application/ld+json application/manifest+json application/rss+xml application/vnd.geo+json application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-app-manifest+json application/xhtml+xml application/xml font/opentype image/bmp image/svg+xml image/x-icon text/cache-manifest text/css text/plain text/vcard text/vnd.rim.location.xloc text/vtt text/x-component text/x-cross-domain-policy text/javascript text/xml; gzip_types application/atom+xml application/javascript application/xml+rss application/x-javascript application/json application/ld+json application/manifest+json application/rss+xml application/vnd.geo+json application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-app-manifest+json application/xhtml+xml application/xml font/opentype image/bmp image/svg+xml image/x-icon text/cache-manifest text/css text/plain text/vcard text/vnd.rim.location.xloc text/vtt text/x-component text/x-cross-domain-policy text/javascript text/xml;
types_hash_max_size 4096; types_hash_max_size 4096;
{% for dir in nginx.directories.http.values() %} {% for dir in NGINX.DIRECTORIES.HTTP.values() %}
include {{ dir }}*.conf; include {{ dir }}*.conf;
{% endfor %} {% endfor %}
} }
# For port proxies # For port proxies
stream{ stream{
include {{nginx.directories.streams}}*.conf; include {{NGINX.DIRECTORIES.STREAMS}}*.conf;
} }

View File

@ -1,9 +1,9 @@
# run_once_srv_web_7_6_composer: deactivated # run_once_srv_web_7_6_composer: deactivated
- name: "include role srv-web-7-7-inj-compose for {{domain}}" - name: "include role srv-web-7-7-inj-compose for {{ domain }}"
include_role: include_role:
name: srv-web-7-7-inj-compose name: srv-web-7-7-inj-compose
- name: "include role srv-web-6-6-tls-core for {{domain}}" - name: "include role srv-web-6-6-tls-core for {{ domain }}"
include_role: include_role:
name: srv-web-6-6-tls-core name: srv-web-6-6-tls-core

View File

@ -1,41 +0,0 @@
- name: install certbot
community.general.pacman:
name: certbot
state: present
when: run_once_srv_web_7_7_certbot is not defined
- name: install certbot DNS plugin
community.general.pacman:
name: "certbot-dns-{{ CERTBOT_ACME_CHALLENGE_METHOD }}"
state: present
when:
- run_once_srv_web_7_7_certbot is not defined
- CERTBOT_ACME_CHALLENGE_METHOD != 'webroot'
- name: Ensure /etc/certbot directory exists
file:
path: "{{ CERTBOT_CREDENTIALS_DIR }}"
state: directory
owner: root
group: root
mode: '0755'
when:
- run_once_srv_web_7_7_certbot is not defined
- CERTBOT_ACME_CHALLENGE_METHOD != 'webroot'
- name: Install plugin credentials file
copy:
dest: "{{ CERTBOT_CREDENTIALS_FILE }}"
content: |
dns_{{ CERTBOT_ACME_CHALLENGE_METHOD }}_api_token = {{ CERTBOT_DNS_API_TOKEN }}
owner: root
group: root
mode: '0600'
when:
- run_once_srv_web_7_7_certbot is not defined
- CERTBOT_ACME_CHALLENGE_METHOD != 'webroot'
- name: run the certbot role once
set_fact:
run_once_srv_web_7_7_certbot: true
when: run_once_srv_web_7_7_certbot is not defined

View File

@ -1,3 +1,12 @@
- name: Set inj_enabled dictionary
set_fact:
inj_enabled:
javascript: "{{ applications | get_app_conf(application_id, 'features.javascript', False) }}"
logout: "{{ (applications | get_app_conf(application_id, 'features.logout', False) or domain == PRIMARY_DOMAIN) }}"
css: "{{ applications | get_app_conf(application_id, 'features.css', False) }}"
matomo: "{{ applications | get_app_conf(application_id, 'features.matomo', False) }}"
desktop: "{{ applications | get_app_conf(application_id, 'features.desktop', False) }}"
- block: - block:
- name: Include dependency 'srv-web-7-4-core' - name: Include dependency 'srv-web-7-4-core'
include_role: include_role:
@ -6,28 +15,19 @@
- include_tasks: utils/run_once.yml - include_tasks: utils/run_once.yml
when: run_once_srv_web_7_7_inj_compose is not defined when: run_once_srv_web_7_7_inj_compose is not defined
- name: Set inj_enabled dictionary
set_fact:
inj_enabled:
javascript: "{{ applications | get_app_conf(application_id, 'features.javascript', False) }}"
logout: "{{ (applications | get_app_conf(application_id, 'features.logout', False) or domain == PRIMARY_DOMAIN) }}"
css: "{{ applications | get_app_conf(application_id, 'features.css', False) }}"
matomo: "{{ applications | get_app_conf(application_id, 'features.matomo', False) }}"
port_ui: "{{ applications | get_app_conf(application_id, 'features.desktop', False) }}"
- name: "Activate Portfolio iFrame notifier for {{ domain }}" - name: "Activate Portfolio iFrame notifier for {{ domain }}"
include_role: include_role:
name: srv-web-7-7-inj-desktop name: srv-web-7-7-inj-desktop
public: true # Expose variables so that they can be used in template public: true # Vars used in templates
when: inj_enabled.port_ui when: inj_enabled.desktop
- name: "Load CDN for {{domain}}" - name: "Load CDN for {{ domain }}"
include_role: include_role:
name: web-svc-cdn name: web-svc-cdn
public: false public: false
# ATM just the Logout is using the CDN.
when: when:
- inj_enabled.logout - inj_enabled.logout
- inj_enabled.desktop
- application_id != 'web-svc-cdn' - application_id != 'web-svc-cdn'
- run_once_web_svc_cdn is not defined - run_once_web_svc_cdn is not defined
@ -41,23 +41,14 @@
vars: vars:
handler_role_name: "{{ item }}" handler_role_name: "{{ item }}"
- name: Set inj_enabled dictionary - name: "Activate Corporate CSS for {{ domain }}"
set_fact:
inj_enabled:
javascript: "{{ applications | get_app_conf(application_id, 'features.javascript', False) }}"
logout: "{{ (applications | get_app_conf(application_id, 'features.logout', False) or domain == PRIMARY_DOMAIN) }}"
css: "{{ applications | get_app_conf(application_id, 'features.css', False) }}"
matomo: "{{ applications | get_app_conf(application_id, 'features.matomo', False) }}"
port_ui: "{{ applications | get_app_conf(application_id, 'features.desktop', False) }}"
- name: "Activate Corporate CSS for {{domain}}"
include_role: include_role:
name: srv-web-7-7-inj-css name: srv-web-7-7-inj-css
when: when:
- inj_enabled.css - inj_enabled.css
- run_once_srv_web_7_7_inj_css is not defined - run_once_srv_web_7_7_inj_css is not defined
- name: "Activate Matomo Tracking for {{domain}}" - name: "Activate Matomo Tracking for {{ domain }}"
include_role: include_role:
name: srv-web-7-7-inj-matomo name: srv-web-7-7-inj-matomo
when: inj_enabled.matomo when: inj_enabled.matomo
@ -70,4 +61,5 @@
- name: "Activate logout proxy for {{ domain }}" - name: "Activate logout proxy for {{ domain }}"
include_role: include_role:
name: srv-web-7-7-inj-logout name: srv-web-7-7-inj-logout
public: true # Vars used in templates
when: inj_enabled.logout when: inj_enabled.logout

View File

@ -44,7 +44,7 @@ body_filter_by_lua_block {
local head_snippets = {} local head_snippets = {}
{% for head_feature in ['css', 'matomo', 'desktop', 'javascript', 'logout' ] %} {% for head_feature in ['css', 'matomo', 'desktop', 'javascript', 'logout' ] %}
{% if applications | get_app_conf(application_id, 'features.' ~ head_feature, false) | bool %} {% if applications | get_app_conf(application_id, 'features.' ~ head_feature, false) %}
head_snippets[#head_snippets + 1] = [=[ head_snippets[#head_snippets + 1] = [=[
{%- include "roles/srv-web-7-7-inj-" ~ head_feature ~ "/templates/head_sub.j2" -%} {%- include "roles/srv-web-7-7-inj-" ~ head_feature ~ "/templates/head_sub.j2" -%}
]=] ]=]
@ -59,7 +59,7 @@ body_filter_by_lua_block {
local body_snippets = {} local body_snippets = {}
{% for body_feature in ['matomo', 'logout', 'desktop'] %} {% for body_feature in ['matomo', 'logout', 'desktop'] %}
{% if applications | get_app_conf(application_id, 'features.' ~ body_feature, false) | bool %} {% if applications | get_app_conf(application_id, 'features.' ~ body_feature, false) %}
body_snippets[#body_snippets + 1] = [=[ body_snippets[#body_snippets + 1] = [=[
{%- include "roles/srv-web-7-7-inj-" ~ body_feature ~ "/templates/body_sub.j2" -%} {%- include "roles/srv-web-7-7-inj-" ~ body_feature ~ "/templates/body_sub.j2" -%}
]=] ]=]

View File

@ -15,8 +15,8 @@
template: template:
src: global.css.j2 src: global.css.j2
dest: "{{ global_css_destination }}" dest: "{{ global_css_destination }}"
owner: "{{ nginx.user }}" owner: "{{ NGINX.USER }}"
group: "{{ nginx.user }}" group: "{{ NGINX.USER }}"
mode: '0644' mode: '0644'
- name: Get stat for global.css - name: Get stat for global.css

View File

@ -1,3 +1,3 @@
location = /global.css { location = /global.css {
root {{nginx.directories.data.cdn}}; root {{NGINX.DIRECTORIES.DATA.CDN}};
} }

View File

@ -1,4 +1,4 @@
global_css_destination: "{{nginx.directories.data.cdn}}global.css" global_css_destination: "{{NGINX.DIRECTORIES.DATA.CDN}}global.css"
global_css_base_color: "{{ design.css.colors.base }}" global_css_base_color: "{{ design.css.colors.base }}"
global_css_count: 7 global_css_count: 7
global_css_shades: 100 global_css_shades: 100

View File

@ -1,14 +1,14 @@
- name: Deploy iframe-handler.js - name: Deploy iframe-handler.js
template: template:
src: iframe-handler.js.j2 src: iframe-handler.js.j2
dest: "{{ inj_port_ui_js_destination }}" dest: "{{ INJ_DESKTOP_JS_FILE_DESTINATION }}"
owner: "{{ nginx.user }}" owner: "{{ NGINX.USER }}"
group: "{{ nginx.user }}" group: "{{ NGINX.USER }}"
mode: '0644' mode: '0644'
- name: Get stat for iframe-handler.js - name: Get stat for iframe-handler.js
stat: stat:
path: "{{ inj_port_ui_js_destination }}" path: "{{ INJ_DESKTOP_JS_FILE_DESTINATION }}"
register: inj_port_ui_js_stat register: inj_port_ui_js_stat
- name: Set inj_port_ui_js_version - name: Set inj_port_ui_js_version

View File

@ -1 +1 @@
<script src="{{ domains | get_url('web-svc-cdn', WEB_PROTOCOL) }}/{{ inj_port_ui_file_name }}?{{ inj_port_ui_js_version }}"></script> <script src="{{ domains | get_url('web-svc-cdn', WEB_PROTOCOL) }}/{{ INJ_DESKTOP_JS_FILE_NAME }}?{{ inj_port_ui_js_version }}"></script>

View File

@ -1,2 +1,2 @@
inj_port_ui_file_name: "iframe-handler.js" INJ_DESKTOP_JS_FILE_NAME: "iframe-handler.js"
inj_port_ui_js_destination: "{{ [ nginx.directories.data.cdn, inj_port_ui_file_name ] | path_join }}" INJ_DESKTOP_JS_FILE_DESTINATION: "{{ [ NGINX.DIRECTORIES.DATA.CDN, INJ_DESKTOP_JS_FILE_NAME ] | path_join }}"

View File

@ -11,11 +11,11 @@
set_fact: set_fact:
javascript_code: "{{ lookup('template', modifier_javascript_template_file) }}" javascript_code: "{{ lookup('template', modifier_javascript_template_file) }}"
- name: "Collapse Javascript code into one-liner for '{{application_id}}'" - name: "Collapse Javascript code into one-liner for '{{ application_id }}'"
set_fact: set_fact:
javascript_code_one_liner: "{{ javascript_code | to_one_liner }}" javascript_code_one_liner: "{{ javascript_code | to_one_liner }}"
- name: "Append Javascript CSP hash for '{{application_id}}'" - name: "Append Javascript CSP hash for '{{ application_id }}'"
set_fact: set_fact:
applications: "{{ applications | append_csp_hash(application_id, javascript_code_one_liner) }}" applications: "{{ applications | append_csp_hash(application_id, javascript_code_one_liner) }}"
changed_when: false changed_when: false

View File

@ -5,4 +5,4 @@
- run_once_srv_web_7_4_core is not defined - run_once_srv_web_7_4_core is not defined
- name: "deploy the logout.js" - name: "deploy the logout.js"
include_tasks: "deploy.yml" include_tasks: "02_deploy.yml"

View File

@ -0,0 +1,16 @@
- name: Deploy logout.js
template:
src: logout.js.j2
dest: "{{ INJ_LOGOUT_JS_DESTINATION }}"
owner: "{{ NGINX.USER }}"
group: "{{ NGINX.USER }}"
mode: '0644'
- name: Get stat for logout.js
stat:
path: "{{ INJ_LOGOUT_JS_DESTINATION }}"
register: INJ_LOGOUT_JS_STAT
- name: Set INJ_LOGOUT_JS_VERSION
set_fact:
INJ_LOGOUT_JS_VERSION: "{{ INJ_LOGOUT_JS_STAT.stat.mtime }}"

View File

@ -1,16 +0,0 @@
- name: Deploy logout.js
template:
src: logout.js.j2
dest: "{{ inj_logout_js_destination }}"
owner: "{{ nginx.user }}"
group: "{{ nginx.user }}"
mode: '0644'
- name: Get stat for logout.js
stat:
path: "{{ inj_logout_js_destination }}"
register: inj_logout_js_stat
- name: Set inj_logout_js_version
set_fact:
inj_logout_js_version: "{{ inj_logout_js_stat.stat.mtime }}"

View File

@ -8,11 +8,11 @@
set_fact: set_fact:
logout_code: "{{ lookup('template', 'logout_one_liner.js.j2') }}" logout_code: "{{ lookup('template', 'logout_one_liner.js.j2') }}"
- name: "Collapse logout code into one-liner for '{{application_id}}'" - name: "Collapse logout code into one-liner for '{{ application_id }}'"
set_fact: set_fact:
logout_code_one_liner: "{{ logout_code | to_one_liner }}" logout_code_one_liner: "{{ logout_code | to_one_liner }}"
- name: "Append logout CSP hash for '{{application_id}}'" - name: "Append logout CSP hash for '{{ application_id }}'"
set_fact: set_fact:
applications: "{{ applications | append_csp_hash(application_id, logout_code_one_liner) }}" applications: "{{ applications | append_csp_hash(application_id, logout_code_one_liner) }}"
changed_when: false changed_when: false

View File

@ -1 +1 @@
<script src="{{ domains | get_url('web-svc-cdn', WEB_PROTOCOL) }}/logout.js?{{ inj_logout_js_version }}"></script> <script src="{{ domains | get_url('web-svc-cdn', WEB_PROTOCOL) }}/{{ INJ_LOGOUT_JS_FILE_NAME }}?{{ INJ_LOGOUT_JS_VERSION }}"></script>

View File

@ -1,2 +1,2 @@
inj_logout_file_name: "logout.js" INJ_LOGOUT_JS_FILE_NAME: "logout.js"
inj_logout_js_destination: "{{ [ nginx.directories.data.cdn, inj_logout_file_name ] | path_join }}" INJ_LOGOUT_JS_DESTINATION: "{{ [ NGINX.DIRECTORIES.DATA.CDN, INJ_LOGOUT_JS_FILE_NAME ] | path_join }}"

View File

@ -6,7 +6,7 @@
- name: create nginx letsencrypt config file - name: create nginx letsencrypt config file
template: template:
src: "letsencrypt.conf.j2" src: "letsencrypt.conf.j2"
dest: "{{nginx.directories.http.global}}letsencrypt.conf" dest: "{{NGINX.DIRECTORIES.HTTP.GLOBAL}}letsencrypt.conf"
notify: restart openresty notify: restart openresty
- name: "Set CAA records for all base domains" - name: "Set CAA records for all base domains"

View File

@ -6,7 +6,6 @@
- dev-git - dev-git
- sys-alm-compose - sys-alm-compose
- sys-lock - sys-lock
- user-root
- sys-rst-daemon - sys-rst-daemon
- include_tasks: utils/run_once.yml - include_tasks: utils/run_once.yml
when: run_once_svc_bkp_rmt_2_loc is not defined when: run_once_svc_bkp_rmt_2_loc is not defined
@ -39,7 +38,7 @@
set_fact: set_fact:
service_name: "{{ role_name }}" service_name: "{{ role_name }}"
- name: "include role for sys-timer for {{service_name}}" - name: "include role for sys-timer for {{ service_name }}"
include_role: include_role:
name: sys-timer name: sys-timer
vars: vars:

View File

@ -4,5 +4,5 @@ OnFailure=sys-alm-compose.infinito@%n.service sys-cln-faild-bkps{{ SYS_SERVICE_S
[Service] [Service]
Type=oneshot Type=oneshot
ExecStartPre=/bin/sh -c '/usr/bin/python {{ path_system_lock_script }} {{ system_maintenance_services | join(' ') }} --ignore {{system_maintenance_backup_services| join(' ') }} --timeout "{{system_maintenance_lock_timeout_backup_services}}"' ExecStartPre=/bin/sh -c '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ system_maintenance_services | join(' ') }} --ignore {{system_maintenance_backup_services| join(' ') }} --timeout "{{system_maintenance_lock_timeout_backup_services}}"'
ExecStart=/bin/sh -c '/usr/bin/bash {{docker_backup_remote_to_local_folder}}sys-bkp-rmt-2-loc-multi-provider.sh' ExecStart=/bin/sh -c '/usr/bin/bash {{docker_backup_remote_to_local_folder}}sys-bkp-rmt-2-loc-multi-provider.sh'

View File

@ -1,3 +1,3 @@
application_id: svc-bkp-rmt-2-loc application_id: svc-bkp-rmt-2-loc
docker_backup_remote_to_local_folder: '{{ path_administrator_scripts }}{{ application_id }}/' docker_backup_remote_to_local_folder: '{{ PATH_ADMINISTRATOR_SCRIPTS }}{{ application_id }}/'
rmt2loc_backup_providers: "{{ applications | get_app_conf(application_id, 'backup_providers') }}" rmt2loc_backup_providers: "{{ applications | get_app_conf(application_id, 'backup_providers') }}"

View File

@ -4,16 +4,16 @@
include_role: include_role:
name: docker-compose name: docker-compose
- name: Create {{domains | get_domain(application_id)}}.conf if LDAP is exposed to internet - name: Create {{ domains | get_domain(application_id) }}.conf if LDAP is exposed to internet
template: template:
src: "nginx.stream.conf.j2" src: "nginx.stream.conf.j2"
dest: "{{nginx.directories.streams}}{{domains | get_domain(application_id)}}.conf" dest: "{{NGINX.DIRECTORIES.STREAMS}}{{ domains | get_domain(application_id) }}.conf"
notify: restart openresty notify: restart openresty
when: applications | get_app_conf(application_id, 'network.public', True) | bool when: applications | get_app_conf(application_id, 'network.public', True) | bool
- name: Remove {{domains | get_domain(application_id)}}.conf if LDAP is not exposed to internet - name: Remove {{ domains | get_domain(application_id) }}.conf if LDAP is not exposed to internet
file: file:
path: "{{ nginx.directories.streams }}{{ domains | get_domain(application_id) }}.conf" path: "{{ NGINX.DIRECTORIES.STREAMS }}{{ domains | get_domain(application_id) }}.conf"
state: absent state: absent
when: not applications | get_app_conf(application_id, 'network.public', True) | bool when: not applications | get_app_conf(application_id, 'network.public', True) | bool

View File

@ -1,5 +1,5 @@
credentials: credentials:
postgres_password: POSTGRES_PASSWORD:
description: "Password for the PostgreSQL superuser 'postgres'" description: "Password for the PostgreSQL superuser 'postgres'"
algorithm: "bcrypt" algorithm: "bcrypt"
validation: "^\\$2[aby]\\$.{56}$" validation: "^\\$2[aby]\\$.{56}$"

View File

@ -1,26 +1,21 @@
- name: Include dependency 'docker-core' - name: Include dependency 'sys-svc-docker'
include_role: include_role:
name: docker-core name: sys-svc-docker
when: run_once_docker_core is not defined when: run_once_sys_svc_docker is not defined
- name: Create Docker network for PostgreSQL - name: Create Docker network for PostgreSQL
community.docker.docker_network: community.docker.docker_network:
name: "{{ postgres_network_name }}" name: "{{ POSTGRES_NETWORK_NAME }}"
state: present state: present
ipam_config: ipam_config:
- subnet: "{{ postgres_subnet }}" - subnet: "{{ POSTGRES_SUBNET }}"
- name: "include docker-compose role" - name: "include docker-compose role"
include_role: include_role:
name: docker-compose name: docker-compose
vars:
- name: Wait for Postgres inside the container docker_compose_flush_handlers: true
shell: "docker exec {{ postgres_name }} pg_isready -U postgres"
register: pg_ready
until: pg_ready.rc == 0
retries: 30
delay: 5
- name: install python-psycopg2 - name: install python-psycopg2
community.general.pacman: community.general.pacman:

View File

@ -1,11 +1,10 @@
--- ---
- name: "Wait until Postgres is listening on port {{ postgres_port }}" - name: Wait for Postgres inside the container
wait_for: shell: "docker exec {{ POSTGRES_CONTAINER }} pg_isready -U postgres"
host: "{{ postgres_local_host }}" register: pg_ready
port: "{{ postgres_port }}" until: pg_ready.rc == 0
retries: 30
delay: 5 delay: 5
timeout: 300
state: started
# 1) Create the database # 1) Create the database
- name: "Create database: {{ database_name }}" - name: "Create database: {{ database_name }}"
@ -13,13 +12,13 @@
name: "{{ database_name }}" name: "{{ database_name }}"
state: present state: present
login_user: postgres login_user: postgres
login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}" login_password: "{{ applications | get_app_conf(application_id, 'credentials.POSTGRES_PASSWORD', True) }}"
login_host: "{{ postgres_local_host }}" login_host: "{{ POSTGRES_LOCAL_HOST }}"
login_port: "{{ postgres_port }}" login_port: "{{ POSTGRES_PORT }}"
register: postgresql_result register: postgresql_result
until: postgresql_result is succeeded until: postgresql_result is succeeded
retries: "{{ postgres_retry_retries }}" retries: "{{ POSTGRES_RETRIES }}"
delay: "{{ postgres_retry_delay }}" delay: "{{ POSTGRES_DELAY }}"
# 2) Create the database user (with password) # 2) Create the database user (with password)
- name: "Create database user: {{ database_username }}" - name: "Create database user: {{ database_username }}"
@ -29,29 +28,29 @@
db: "{{ database_name }}" db: "{{ database_name }}"
state: present state: present
login_user: postgres login_user: postgres
login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}" login_password: "{{ applications | get_app_conf(application_id, 'credentials.POSTGRES_PASSWORD', True) }}"
login_host: "{{ postgres_local_host }}" login_host: "{{ POSTGRES_LOCAL_HOST }}"
login_port: "{{ postgres_port }}" login_port: "{{ POSTGRES_PORT }}"
register: postgresql_result register: postgresql_result
until: postgresql_result is succeeded until: postgresql_result is succeeded
retries: "{{ postgres_retry_retries }}" retries: "{{ POSTGRES_RETRIES }}"
delay: "{{ postgres_retry_delay }}" delay: "{{ POSTGRES_DELAY }}"
# 3) Enable LOGIN for the role (removes NOLOGIN) # 3) Enable LOGIN for the role (removes NOLOGIN)
- name: "Enable login for role {{ database_username }}" - name: "Enable login for role {{ database_username }}"
community.postgresql.postgresql_query: community.postgresql.postgresql_query:
db: postgres db: postgres
login_user: postgres login_user: postgres
login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}" login_password: "{{ applications | get_app_conf(application_id, 'credentials.POSTGRES_PASSWORD', True) }}"
login_host: "{{ postgres_local_host }}" login_host: "{{ POSTGRES_LOCAL_HOST }}"
login_port: "{{ postgres_port }}" login_port: "{{ POSTGRES_PORT }}"
query: | query: |
ALTER ROLE "{{ database_username }}" ALTER ROLE "{{ database_username }}"
WITH LOGIN; WITH LOGIN;
register: postgresql_result register: postgresql_result
until: postgresql_result is succeeded until: postgresql_result is succeeded
retries: "{{ postgres_retry_retries }}" retries: "{{ POSTGRES_RETRIES }}"
delay: "{{ postgres_retry_delay }}" delay: "{{ POSTGRES_DELAY }}"
# 4) Grant ALL privileges on all tables in the public schema # 4) Grant ALL privileges on all tables in the public schema
- name: "Grant ALL privileges on tables in public schema to {{ database_username }}" - name: "Grant ALL privileges on tables in public schema to {{ database_username }}"
@ -64,13 +63,13 @@
schema: public schema: public
state: present state: present
login_user: postgres login_user: postgres
login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}" login_password: "{{ applications | get_app_conf(application_id, 'credentials.POSTGRES_PASSWORD', True) }}"
login_host: "{{ postgres_local_host }}" login_host: "{{ POSTGRES_LOCAL_HOST }}"
login_port: "{{ postgres_port }}" login_port: "{{ POSTGRES_PORT }}"
register: postgresql_result register: postgresql_result
until: postgresql_result is succeeded until: postgresql_result is succeeded
retries: "{{ postgres_retry_retries }}" retries: "{{ POSTGRES_RETRIES }}"
delay: "{{ postgres_retry_delay }}" delay: "{{ POSTGRES_DELAY }}"
# 5) Grant ALL privileges at the database level # 5) Grant ALL privileges at the database level
- name: "Grant all privileges on database {{ database_name }} to {{ database_username }}" - name: "Grant all privileges on database {{ database_name }} to {{ database_username }}"
@ -81,22 +80,22 @@
privs: ALL privs: ALL
state: present state: present
login_user: postgres login_user: postgres
login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}" login_password: "{{ applications | get_app_conf(application_id, 'credentials.POSTGRES_PASSWORD', True) }}"
login_host: "{{ postgres_local_host }}" login_host: "{{ POSTGRES_LOCAL_HOST }}"
login_port: "{{ postgres_port }}" login_port: "{{ POSTGRES_PORT }}"
register: postgresql_result register: postgresql_result
until: postgresql_result is succeeded until: postgresql_result is succeeded
retries: "{{ postgres_retry_retries }}" retries: "{{ POSTGRES_RETRIES }}"
delay: "{{ postgres_retry_delay }}" delay: "{{ POSTGRES_DELAY }}"
# 6) Grant USAGE/CREATE on schema and set default privileges # 6) Grant USAGE/CREATE on schema and set default privileges
- name: "Set comprehensive schema privileges for {{ database_username }}" - name: "Set comprehensive schema privileges for {{ database_username }}"
community.postgresql.postgresql_query: community.postgresql.postgresql_query:
db: "{{ database_name }}" db: "{{ database_name }}"
login_user: postgres login_user: postgres
login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}" login_password: "{{ applications | get_app_conf(application_id, 'credentials.POSTGRES_PASSWORD', True) }}"
login_host: "{{ postgres_local_host }}" login_host: "{{ POSTGRES_LOCAL_HOST }}"
login_port: "{{ postgres_port }}" login_port: "{{ POSTGRES_PORT }}"
query: | query: |
GRANT USAGE ON SCHEMA public TO "{{ database_username }}"; GRANT USAGE ON SCHEMA public TO "{{ database_username }}";
GRANT CREATE ON SCHEMA public TO "{{ database_username }}"; GRANT CREATE ON SCHEMA public TO "{{ database_username }}";
@ -104,8 +103,8 @@
GRANT ALL PRIVILEGES ON TABLES TO "{{ database_username }}"; GRANT ALL PRIVILEGES ON TABLES TO "{{ database_username }}";
register: postgresql_result register: postgresql_result
until: postgresql_result is succeeded until: postgresql_result is succeeded
retries: "{{ postgres_retry_retries }}" retries: "{{ POSTGRES_RETRIES }}"
delay: "{{ postgres_retry_delay }}" delay: "{{ POSTGRES_DELAY }}"
# 7) Ensure PostGIS and related extensions are installed (if enabled) # 7) Ensure PostGIS and related extensions are installed (if enabled)
- name: "Ensure PostGIS-related extensions are installed" - name: "Ensure PostGIS-related extensions are installed"
@ -114,9 +113,9 @@
ext: "{{ item }}" ext: "{{ item }}"
state: present state: present
login_user: postgres login_user: postgres
login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}" login_password: "{{ applications | get_app_conf(application_id, 'credentials.POSTGRES_PASSWORD', True) }}"
login_host: "{{ postgres_local_host }}" login_host: "{{ POSTGRES_LOCAL_HOST }}"
login_port: "{{ postgres_port }}" login_port: "{{ POSTGRES_PORT }}"
loop: loop:
- postgis - postgis
- pg_trgm - pg_trgm
@ -124,8 +123,8 @@
when: postgres_gis_enabled | bool when: postgres_gis_enabled | bool
register: postgresql_result register: postgresql_result
until: postgresql_result is succeeded until: postgresql_result is succeeded
retries: "{{ postgres_retry_retries }}" retries: "{{ POSTGRES_RETRIES }}"
delay: "{{ postgres_retry_delay }}" delay: "{{ POSTGRES_DELAY }}"
# 8) Ensure pgvector (vector) extension is installed (for DiscourseAI, pgvector, …) # 8) Ensure pgvector (vector) extension is installed (for DiscourseAI, pgvector, …)
- name: "Ensure pgvector (vector) extension is installed" - name: "Ensure pgvector (vector) extension is installed"
@ -134,10 +133,10 @@
ext: vector ext: vector
state: present state: present
login_user: postgres login_user: postgres
login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}" login_password: "{{ applications | get_app_conf(application_id, 'credentials.POSTGRES_PASSWORD', True) }}"
login_host: "{{ postgres_local_host }}" login_host: "{{ POSTGRES_LOCAL_HOST }}"
login_port: "{{ postgres_port }}" login_port: "{{ POSTGRES_PORT }}"
register: postgresql_result register: postgresql_result
until: postgresql_result is succeeded until: postgresql_result is succeeded
retries: "{{ postgres_retry_retries }}" retries: "{{ POSTGRES_RETRIES }}"
delay: "{{ postgres_retry_delay }}" delay: "{{ POSTGRES_DELAY }}"

View File

@ -1,6 +1,9 @@
- block: - block:
- include_tasks: 01_core.yml - include_tasks: 01_core.yml
- include_tasks: utils/run_once.yml - include_tasks: utils/run_once.yml
vars:
# Force the flush of the pg handler on the first run
flush_handlers: true
when: run_once_svc_db_postgres is not defined when: run_once_svc_db_postgres is not defined
- include_tasks: "{{ playbook_dir }}/tasks/utils/load_handlers.yml" - include_tasks: "{{ playbook_dir }}/tasks/utils/load_handlers.yml"
@ -10,4 +13,4 @@
- name: "Initialize database for '{{ database_name }}'" - name: "Initialize database for '{{ database_name }}'"
include_tasks: 02_init.yml include_tasks: 02_init.yml
when: postgres_init | bool when: POSTGRES_INIT | bool

View File

@ -1,6 +1,6 @@
FROM {{ postgres_image }}:{{ postgres_version }} FROM {{ POSTGRES_IMAGE }}:{{ POSTGRES_VERSION }}
{% if postgres_pg_vector_enabled %} {% if POSTGRES_VECTOR_ENABLED %}
RUN apt-get update \ RUN apt-get update \
&& apt-get install -y --no-install-recommends \ && apt-get install -y --no-install-recommends \
build-essential \ build-essential \

View File

@ -1,15 +1,15 @@
{% include 'roles/docker-compose/templates/base.yml.j2' %} {% include 'roles/docker-compose/templates/base.yml.j2' %}
postgres: postgres:
container_name: "{{ postgres_name }}" container_name: "{{ POSTGRES_CONTAINER }}"
image: "{{ postgres_custom_image_name }}" image: "{{ POSTGRES_CUSTOM_IMAGE_NAME }}"
build: build:
context: . context: .
dockerfile: Dockerfile dockerfile: Dockerfile
{% include 'roles/docker-container/templates/base.yml.j2' %} {% include 'roles/docker-container/templates/base.yml.j2' %}
{% if postgres_expose_local %} {% if POSTGRES_EXPOSE_LOCAL %}
ports: ports:
- "{{ postgres_local_host }}:{{ postgres_port }}:5432" - "{{ POSTGRES_LOCAL_HOST }}:{{ POSTGRES_PORT }}:5432"
{% endif %} {% endif %}
volumes: volumes:
- "data:/var/lib/postgresql/data" - "data:/var/lib/postgresql/data"
@ -17,6 +17,6 @@
{% include 'roles/docker-compose/templates/volumes.yml.j2' %} {% include 'roles/docker-compose/templates/volumes.yml.j2' %}
data: data:
name: "{{ postgres_volume }}" name: "{{ POSTGRES_VOLUME }}"
{% include 'roles/docker-compose/templates/networks.yml.j2' %} {% include 'roles/docker-compose/templates/networks.yml.j2' %}

View File

@ -1,3 +1,3 @@
POSTGRES_PASSWORD="{{ postgres_password }}" POSTGRES_PASSWORD="{{ POSTGRES_PASSWORD }}"
# Necessary for web-app-matrix # Necessary for web-app-matrix
POSTGRES_INITDB_ARGS="--encoding=UTF8 --locale=C" POSTGRES_INITDB_ARGS="--encoding=UTF8 --locale=C"

View File

@ -8,18 +8,18 @@ docker_compose_flush_handlers: true
database_type: "{{ application_id | get_entity_name }}" database_type: "{{ application_id | get_entity_name }}"
## Postgres ## Postgres
postgres_volume: "{{ applications | get_app_conf(application_id, 'docker.volumes.data', True) }}" POSTGRES_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.data', True) }}"
postgres_name: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.name', True) }}" POSTGRES_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.name', True) }}"
postgres_image: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.image', True) }}" POSTGRES_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.image', True) }}"
postgres_subnet: "{{ networks.local['svc-db-postgres'].subnet }}" POSTGRES_SUBNET: "{{ networks.local['svc-db-postgres'].subnet }}"
postgres_network_name: "{{ applications | get_app_conf(application_id, 'docker.network', True) }}" POSTGRES_NETWORK_NAME: "{{ applications | get_app_conf(application_id, 'docker.network', True) }}"
postgres_version: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.version', True) }}" POSTGRES_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.version', True) }}"
postgres_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}" POSTGRES_PASSWORD: "{{ applications | get_app_conf(application_id, 'credentials.POSTGRES_PASSWORD', True) }}"
postgres_port: "{{ database_port | default(ports.localhost.database[ application_id ]) }}" POSTGRES_PORT: "{{ database_port | default(ports.localhost.database[ application_id ]) }}"
postgres_init: "{{ database_username is defined and database_password is defined and database_name is defined }}" POSTGRES_INIT: "{{ database_username is defined and database_password is defined and database_name is defined }}"
postgres_expose_local: True # Exposes the db to localhost, almost everytime neccessary POSTGRES_EXPOSE_LOCAL: True # Exposes the db to localhost, almost everytime neccessary
postgres_custom_image_name: "postgres_custom" POSTGRES_CUSTOM_IMAGE_NAME: "postgres_custom"
postgres_local_host: "127.0.0.1" POSTGRES_LOCAL_HOST: "127.0.0.1"
postgres_pg_vector_enabled: True # Required by discourse, propably in a later step it makes sense to define this as a configuration option in config/main.yml POSTGRES_VECTOR_ENABLED: True # Required by discourse, propably in a later step it makes sense to define this as a configuration option in config/main.yml
postgres_retry_retries: 5 POSTGRES_RETRIES: 5
postgres_retry_delay: 2 POSTGRES_DELAY: 2

View File

@ -1,5 +1,5 @@
credentials: credentials:
postgres_password: POSTGRES_PASSWORD:
description: "Password for the PostgreSQL superuser 'postgres'" description: "Password for the PostgreSQL superuser 'postgres'"
algorithm: "bcrypt" algorithm: "bcrypt"
validation: "^\\$2[aby]\\$.{56}$" validation: "^\\$2[aby]\\$.{56}$"

View File

@ -4,5 +4,5 @@ OnFailure=sys-alm-compose.infinito@%n.service
[Service] [Service]
Type=oneshot Type=oneshot
ExecStartPre=/bin/sh -c '/usr/bin/python {{ path_system_lock_script }} {{ system_maintenance_services | join(' ') }} --ignore svc-opt-ssd-hdd svc-bkp-rmt-2-loc --timeout "{{system_maintenance_lock_timeout_storage_optimizer}}"' ExecStartPre=/bin/sh -c '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ system_maintenance_services | join(' ') }} --ignore svc-opt-ssd-hdd svc-bkp-rmt-2-loc --timeout "{{system_maintenance_lock_timeout_storage_optimizer}}"'
ExecStart=/bin/sh -c '/usr/bin/python {{storage_optimizer_script}} --rapid-storage-path {{path_rapid_storage}} --mass-storage-path {{path_mass_storage}}' ExecStart=/bin/sh -c '/usr/bin/python {{storage_optimizer_script}} --rapid-storage-path {{path_rapid_storage}} --mass-storage-path {{path_mass_storage}}'

View File

@ -1,5 +1,5 @@
application_id: svc-opt-ssd-hdd application_id: svc-opt-ssd-hdd
storage_optimizer_directory: '{{ path_administrator_scripts }}{{ application_id }}/' storage_optimizer_directory: '{{ PATH_ADMINISTRATOR_SCRIPTS }}{{ application_id }}/'
storage_optimizer_script: '{{ storage_optimizer_directory }}{{ application_id }}.py' storage_optimizer_script: '{{ storage_optimizer_directory }}{{ application_id }}.py'
path_rapid_storage: "{{ applications | get_app_conf(application_id, 'volumes.rapid_storage') }}" path_rapid_storage: "{{ applications | get_app_conf(application_id, 'volumes.rapid_storage') }}"
path_mass_storage: "{{ applications | get_app_conf(application_id, 'volumes.mass_storage') }}" path_mass_storage: "{{ applications | get_app_conf(application_id, 'volumes.mass_storage') }}"

View File

@ -2,3 +2,6 @@ docker:
services: services:
openresty: openresty:
name: "openresty" name: "openresty"
volumes:
www: "/var/www/"
nginx: "/etc/nginx/"

View File

@ -9,7 +9,7 @@ This document provides commands and tips to validate and inspect the OpenResty (
* **Quick syntax check (quiet):** * **Quick syntax check (quiet):**
```bash ```bash
docker exec {{ openresty_container }} openresty -t -q docker exec {{ OPENRESTY_CONTAINER }} openresty -t -q
``` ```
*Returns only errors.* *Returns only errors.*
@ -17,13 +17,13 @@ This document provides commands and tips to validate and inspect the OpenResty (
* **Detailed syntax check (show warnings):** * **Detailed syntax check (show warnings):**
```bash ```bash
docker exec {{ openresty_container }} openresty -t docker exec {{ OPENRESTY_CONTAINER }} openresty -t
``` ```
or: or:
```bash ```bash
docker exec {{ openresty_container }} nginx -t docker exec {{ OPENRESTY_CONTAINER }} nginx -t
``` ```
--- ---
@ -34,9 +34,9 @@ To see the full configuration after all `include` directives are processed:
```bash ```bash
# Within the running container # Within the running container
docker exec {{ openresty_container }} openresty -T docker exec {{ OPENRESTY_CONTAINER }} openresty -T
# or equivalently # or equivalently
docker exec {{ openresty_container }} nginx -T docker exec {{ OPENRESTY_CONTAINER }} nginx -T
``` ```
This outputs every directive from `nginx.conf` and all files in `conf.d` in the order Nginx will use them. This outputs every directive from `nginx.conf` and all files in `conf.d` in the order Nginx will use them.
@ -103,5 +103,5 @@ That way youll see exactly which domains your server is serving and which nam
* After fixing issues, reload without downtime: * After fixing issues, reload without downtime:
```bash ```bash
docker exec {{ openresty_container }} openresty -s reload docker exec {{ OPENRESTY_CONTAINER }} openresty -s reload
``` ```

View File

@ -1,12 +1,15 @@
--- ---
- name: Validate OpenResty configuration - name: Validate OpenResty configuration
command: > command: >
docker exec {{ openresty_container }} openresty -t -q docker exec {{ OPENRESTY_CONTAINER }} openresty -t -q
register: openresty_test register: openresty_test
changed_when: false changed_when: false
failed_when: openresty_test.rc != 0 failed_when: >
openresty_test.rc != 0 and
('is not running' not in ((openresty_test.stderr | default('')) | lower)) and
('no such container' not in ((openresty_test.stderr | default('')) | lower))
listen: restart openresty listen: restart openresty
- name: Restart OpenResty container - name: Restart OpenResty container
command: docker restart {{ openresty_container }} command: docker restart {{ OPENRESTY_CONTAINER }}
listen: restart openresty listen: restart openresty

View File

@ -1,15 +1,15 @@
{% include 'roles/docker-compose/templates/base.yml.j2' %} {% include 'roles/docker-compose/templates/base.yml.j2' %}
openresty: openresty:
container_name: {{ openresty_container }} container_name: {{ OPENRESTY_CONTAINER }}
image: {{ openresty_image }}:{{ openresty_version }} image: {{ OPENRESTY_IMAGE }}:{{ OPENRESTY_VERSION }}
network_mode: "host" network_mode: "host"
volumes: volumes:
- {{ nginx.files.configuration }}:/usr/local/openresty/nginx/conf/nginx.conf:ro - {{ NGINX.FILES.CONFIGURATION }}:/usr/local/openresty/nginx/conf/nginx.conf:ro
- {{ nginx.directories.configuration }}:/usr/local/openresty/nginx/conf/conf.d:ro - {{ NGINX.DIRECTORIES.CONFIGURATION }}:/usr/local/openresty/nginx/conf/conf.d:ro
- {{ nginx.files.configuration }}:{{ nginx.files.configuration }}:ro - {{ NGINX.FILES.CONFIGURATION }}:{{ NGINX.FILES.CONFIGURATION }}:ro
- {{ nginx.directories.configuration }}:{{ nginx.directories.configuration }}:ro - {{ NGINX.DIRECTORIES.CONFIGURATION }}:{{ NGINX.DIRECTORIES.CONFIGURATION }}:ro
- {{ nginx.directories.data.www }}:{{ nginx.directories.data.www }}:ro - {{ NGINX.DIRECTORIES.DATA.WWW }}:{{ NGINX.DIRECTORIES.DATA.WWW }}:ro
- {{ nginx.directories.data.well_known }}:{{ nginx.directories.data.well_known }}:ro - {{ NGINX.DIRECTORIES.DATA.WELL_KNOWN }}:{{ NGINX.DIRECTORIES.DATA.WELL_KNOWN }}:ro
- {{ LETSENCRYPT_WEBROOT_PATH }}:{{ LETSENCRYPT_WEBROOT_PATH }}:ro - {{ LETSENCRYPT_WEBROOT_PATH }}:{{ LETSENCRYPT_WEBROOT_PATH }}:ro
- {{ LETSENCRYPT_BASE_PATH }}:{{ LETSENCRYPT_BASE_PATH }}:ro - {{ LETSENCRYPT_BASE_PATH }}:{{ LETSENCRYPT_BASE_PATH }}:ro
command: ["openresty", "-g", "daemon off;"] command: ["openresty", "-g", "daemon off;"]

View File

@ -5,6 +5,7 @@ application_id: "svc-prx-openresty"
database_type: "" database_type: ""
# Openresty # Openresty
openresty_image: "openresty/openresty" OPENRESTY_IMAGE: "openresty/openresty"
openresty_version: "alpine" OPENRESTY_VERSION: "alpine"
openresty_container: "{{ applications | get_app_conf(application_id, 'docker.services.openresty.name', True) }}" OPENRESTY_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.openresty.name', True) }}"

View File

@ -1 +1 @@
systemd_notifier_email_folder: '{{path_administrator_scripts}}sys-alm-email/' systemd_notifier_email_folder: '{{ PATH_ADMINISTRATOR_SCRIPTS }}sys-alm-email/'

View File

@ -30,7 +30,7 @@
set_fact: set_fact:
service_name: "{{ role_name }}" service_name: "{{ role_name }}"
- name: "include role for sys-timer for {{service_name}}" - name: "include role for sys-timer for {{ service_name }}"
include_role: include_role:
name: sys-timer name: sys-timer
vars: vars:

View File

@ -12,6 +12,7 @@
database_username: "{{ database_username | default('undefined') }}" database_username: "{{ database_username | default('undefined') }}"
database_password: "{{ database_password | default('undefined') }}" database_password: "{{ database_password | default('undefined') }}"
when: MODE_DEBUG | bool when: MODE_DEBUG | bool
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
- name: "fail if not all required database variables are defined" - name: "fail if not all required database variables are defined"
fail: fail:
@ -25,6 +26,7 @@
database_name is defined and database_name is defined and
database_username is defined and database_username is defined and
database_password is defined) database_password is defined)
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
- name: "seed database values in directory {{ backup_docker_to_local_folder }}" - name: "seed database values in directory {{ backup_docker_to_local_folder }}"
command: > command: >
@ -40,6 +42,7 @@
- database_name is defined - database_name is defined
- database_username is defined - database_username is defined
- database_password is defined - database_password is defined
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
- name: Set file permissions for databases.csv to be readable, writable, and executable by root only - name: Set file permissions for databases.csv to be readable, writable, and executable by root only
ansible.builtin.file: ansible.builtin.file:

View File

@ -4,6 +4,6 @@ OnFailure=sys-alm-compose.infinito@%n.service sys-cln-faild-bkps{{ SYS_SERVICE_S
[Service] [Service]
Type=oneshot Type=oneshot
ExecStartPre=/bin/sh -c '/usr/bin/python {{ path_system_lock_script }} {{ system_maintenance_services | join(' ') }} --ignore {{ system_maintenance_backup_services | reject('equalto', 'sys-bkp-docker-2-loc') | join(' ') }} --timeout "{{system_maintenance_lock_timeout_backup_services}}"' ExecStartPre=/bin/sh -c '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ system_maintenance_services | join(' ') }} --ignore {{ system_maintenance_backup_services | reject('equalto', 'sys-bkp-docker-2-loc') | join(' ') }} --timeout "{{system_maintenance_lock_timeout_backup_services}}"'
ExecStart=/bin/sh -c '{{ bkp_docker_2_loc_exec }} --everything' ExecStart=/bin/sh -c '{{ bkp_docker_2_loc_exec }} --everything'
ExecStartPost=/bin/sh -c '/bin/systemctl start sys-rpr-docker-soft{{ SYS_SERVICE_SUFFIX }} &' ExecStartPost=/bin/sh -c '/bin/systemctl start sys-rpr-docker-soft{{ SYS_SERVICE_SUFFIX }} &'

View File

@ -4,6 +4,6 @@ OnFailure=sys-alm-compose.infinito@%n.service sys-cln-faild-bkps{{ SYS_SERVICE_S
[Service] [Service]
Type=oneshot Type=oneshot
ExecStartPre=/bin/sh -c '/usr/bin/python {{ path_system_lock_script }} {{ system_maintenance_services | join(' ') }} --ignore {{ system_maintenance_backup_services | reject('equalto', 'sys-bkp-docker-2-loc-everything') | join(' ') }} --timeout "{{system_maintenance_lock_timeout_backup_services}}"' ExecStartPre=/bin/sh -c '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ system_maintenance_services | join(' ') }} --ignore {{ system_maintenance_backup_services | reject('equalto', 'sys-bkp-docker-2-loc-everything') | join(' ') }} --timeout "{{system_maintenance_lock_timeout_backup_services}}"'
ExecStart=/bin/sh -c '{{ bkp_docker_2_loc_exec }}' ExecStart=/bin/sh -c '{{ bkp_docker_2_loc_exec }}'
ExecStartPost=/bin/sh -c '/bin/systemctl start sys-rpr-docker-soft{{ SYS_SERVICE_SUFFIX }} &' ExecStartPost=/bin/sh -c '/bin/systemctl start sys-rpr-docker-soft{{ SYS_SERVICE_SUFFIX }} &'

View File

@ -42,5 +42,5 @@ bkp_docker_2_loc_cli_args_list:
bkp_docker_2_loc_exec: >- bkp_docker_2_loc_exec: >-
/usr/bin/python {{ backup_docker_to_local_folder }}backup-docker-to-local.py /usr/bin/python {{ backup_docker_to_local_folder }}backup-docker-to-local.py
--compose-dir {{ path_docker_compose_instances }} --compose-dir {{ PATH_DOCKER_COMPOSE_INSTANCES }}
{{ bkp_docker_2_loc_cli_args_list | select('string') | join(' ') }} {{ bkp_docker_2_loc_cli_args_list | select('string') | join(' ') }}

View File

@ -4,5 +4,5 @@ OnFailure=sys-alm-compose.infinito@%n.service
[Service] [Service]
Type=oneshot Type=oneshot
ExecStartPre=/bin/sh -c '/usr/bin/python {{ path_system_lock_script }} {{ system_maintenance_services | join(' ') }} --ignore {{system_maintenance_cleanup_services| join(' ') }} --timeout "{{system_maintenance_lock_timeout_backup_services}}"' ExecStartPre=/bin/sh -c '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ system_maintenance_services | join(' ') }} --ignore {{system_maintenance_cleanup_services| join(' ') }} --timeout "{{system_maintenance_lock_timeout_backup_services}}"'
ExecStart=/bin/sh -c '/usr/bin/python {{cleanup_backups_directory}}sys-cln-backups.py --backups-folder-path {{backups_folder_path}} --maximum-backup-size-percent {{size_percent_maximum_backup}}' ExecStart=/bin/sh -c '/usr/bin/python {{cleanup_backups_directory}}sys-cln-backups.py --backups-folder-path {{backups_folder_path}} --maximum-backup-size-percent {{size_percent_maximum_backup}}'

View File

@ -1,2 +1,2 @@
cleanup_backups_directory: '{{path_administrator_scripts}}sys-cln-backups/' cleanup_backups_directory: '{{ PATH_ADMINISTRATOR_SCRIPTS }}sys-cln-backups/'

View File

@ -9,7 +9,7 @@
set_fact: set_fact:
service_name: "sys-cln-backups" service_name: "sys-cln-backups"
- name: "include role for sys-timer for {{service_name}}" - name: "include role for sys-timer for {{ service_name }}"
include_role: include_role:
name: sys-timer name: sys-timer
vars: vars:

View File

@ -21,7 +21,7 @@
set_fact: set_fact:
service_name: "{{ role_name }}" service_name: "{{ role_name }}"
- name: "include role for sys-timer for {{service_name}}" - name: "include role for sys-timer for {{ service_name }}"
include_role: include_role:
name: sys-timer name: sys-timer
vars: vars:

View File

@ -30,7 +30,7 @@
set_fact: set_fact:
service_name: "{{ role_name }}" service_name: "{{ role_name }}"
- name: "include role for sys-timer for {{service_name}}" - name: "include role for sys-timer for {{ service_name }}"
include_role: include_role:
name: sys-timer name: sys-timer
vars: vars:

View File

@ -4,5 +4,5 @@ OnFailure=sys-alm-compose.infinito@%n.service
[Service] [Service]
Type=oneshot Type=oneshot
ExecStartPre=/bin/sh -c '/usr/bin/python {{ path_system_lock_script }} {{ system_maintenance_services | join(' ') }} --ignore {{system_maintenance_cleanup_services| join(' ') }} --timeout "{{system_maintenance_lock_timeout_backup_services}}"' ExecStartPre=/bin/sh -c '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ system_maintenance_services | join(' ') }} --ignore {{system_maintenance_cleanup_services| join(' ') }} --timeout "{{system_maintenance_lock_timeout_backup_services}}"'
ExecStart=/bin/sh -c '/bin/bash {{cleanup_disc_space_folder}}sys-cln-disc-space.sh {{size_percent_cleanup_disc_space}}' ExecStart=/bin/sh -c '/bin/bash {{cleanup_disc_space_folder}}sys-cln-disc-space.sh {{size_percent_cleanup_disc_space}}'

View File

@ -24,7 +24,7 @@ if [ "$force_freeing" = true ]; then
{% if backups_folder_path is defined and size_percent_maximum_backup is defined %} {% if backups_folder_path is defined and size_percent_maximum_backup is defined %}
echo "cleaning up backups" && echo "cleaning up backups" &&
python {{path_administrator_scripts}}sys-cln-backups/sys-cln-backups.py --backups-folder-path {{backups_folder_path}} --maximum-backup-size-percent {{size_percent_maximum_backup}} || exit 2 python {{ PATH_ADMINISTRATOR_SCRIPTS }}sys-cln-backups/sys-cln-backups.py --backups-folder-path {{backups_folder_path}} --maximum-backup-size-percent {{size_percent_maximum_backup}} || exit 2
{% endif %} {% endif %}
if pacman -Qs $package > /dev/null ; then if pacman -Qs $package > /dev/null ; then

View File

@ -1 +1 @@
cleanup_disc_space_folder: '{{ path_administrator_scripts }}sys-cln-disc-space/' cleanup_disc_space_folder: '{{ PATH_ADMINISTRATOR_SCRIPTS }}sys-cln-disc-space/'

View File

@ -1,7 +1,7 @@
--- ---
- name: Find matching nginx configs for {{ domain }} - name: Find matching nginx configs for {{ domain }}
ansible.builtin.find: ansible.builtin.find:
paths: "{{ nginx.directories.http.servers }}" paths: "{{ NGINX.DIRECTORIES.HTTP.SERVERS }}"
patterns: "*.{{ domain }}.conf" patterns: "*.{{ domain }}.conf"
register: find_result register: find_result
@ -15,6 +15,6 @@
- name: Remove exact nginx config for {{ domain }} - name: Remove exact nginx config for {{ domain }}
ansible.builtin.file: ansible.builtin.file:
path: "{{ nginx.directories.http.servers }}{{ domain }}.conf" path: "{{ NGINX.DIRECTORIES.HTTP.SERVERS }}{{ domain }}.conf"
state: absent state: absent
notify: restart openresty notify: restart openresty

View File

@ -33,7 +33,7 @@
set_fact: set_fact:
service_name: "{{ role_name }}" service_name: "{{ role_name }}"
- name: "include role for sys-timer for {{service_name}}" - name: "include role for sys-timer for {{ service_name }}"
include_role: include_role:
name: sys-timer name: sys-timer
vars: vars:

View File

@ -4,5 +4,5 @@ OnFailure=sys-alm-compose.infinito@%n.service
[Service] [Service]
Type=oneshot Type=oneshot
ExecStartPre=/bin/sh -c '/usr/bin/python {{ path_system_lock_script }} {{ system_maintenance_services | join(' ') }} --ignore {{system_maintenance_cleanup_services| join(' ') }} --timeout "{{system_maintenance_lock_timeout_cleanup_services}}"' ExecStartPre=/bin/sh -c '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ system_maintenance_services | join(' ') }} --ignore {{system_maintenance_cleanup_services| join(' ') }} --timeout "{{system_maintenance_lock_timeout_cleanup_services}}"'
ExecStart=/bin/sh -c '/usr/bin/yes | /usr/bin/bash {{backup_docker_to_local_cleanup_script}}' ExecStart=/bin/sh -c '/usr/bin/yes | /usr/bin/bash {{backup_docker_to_local_cleanup_script}}'

View File

@ -11,6 +11,6 @@ Checks the health of all mounted Btrfs filesystems by inspecting device error co
## Usage ## Usage
Just include this role in your playbook; it will: Just include this role in your playbook; it will:
1. Deploy a small shell script under `{{ path_administrator_scripts }}/sys-hlth-btrfs/`. 1. Deploy a small shell script under `{{ PATH_ADMINISTRATOR_SCRIPTS }}/sys-hlth-btrfs/`.
2. Install a `.service` and `.timer` unit. 2. Install a `.service` and `.timer` unit.
3. Send alerts via `sys-alm-compose` if any filesystem shows errors. 3. Send alerts via `sys-alm-compose` if any filesystem shows errors.

View File

@ -29,7 +29,7 @@
set_fact: set_fact:
service_name: "{{ role_name }}" service_name: "{{ role_name }}"
- name: "include role for sys-timer for {{service_name}}" - name: "include role for sys-timer for {{ service_name }}"
include_role: include_role:
name: sys-timer name: sys-timer
vars: vars:

Some files were not shown because too many files have changed in this diff Show More