mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-08-15 16:40:45 +02:00
THE HUGE REFACTORING CALENDER WEEK 33; Optimized Matrix and during this updated variables, and implemented better reset and cleanup mode handling, also solved some initial setup bugs
This commit is contained in:
parent
0228014d34
commit
022800425d
@ -29,7 +29,7 @@ WEB_PORT: "{{ 443 if WEB_PROTOCOL == 'https' else 80 }}" # Defaul
|
||||
|
||||
# Domain
|
||||
PRIMARY_DOMAIN: "localhost" # Primary Domain of the server
|
||||
PRIMARY_DOMAIN_tld: "{{ (PRIMARY_DOMAIN == 'localhost') | ternary('localhost', PRIMARY_DOMAIN.split('.')[-1]) }}" # Top Level Domain of the server
|
||||
PRIMARY_DOMAIN_TLD: "{{ (PRIMARY_DOMAIN == 'localhost') | ternary('localhost', PRIMARY_DOMAIN.split('.')[-1]) }}" # Top Level Domain of the server
|
||||
PRIMARY_DOMAIN_SLD: "{{ (PRIMARY_DOMAIN == 'localhost') | ternary('localhost', PRIMARY_DOMAIN.split('.')[-2]) }}" # Second Level Domain of the server
|
||||
|
||||
# Server Tact Variables
|
||||
|
@ -1,9 +1,9 @@
|
||||
# Mode
|
||||
|
||||
# The following modes can be combined with each other
|
||||
MODE_RESET: false # Cleans up all Infinito.Nexus files. It's necessary to run to whole playbook and not particial roles when using this function.
|
||||
MODE_TEST: false # Executes test routines instead of productive routines
|
||||
MODE_UPDATE: true # Executes updates
|
||||
MODE_BACKUP: true # Activates the backup before the update procedure
|
||||
MODE_CLEANUP: true # Cleanup unused files and configurations
|
||||
MODE_DEBUG: false # This enables debugging in ansible and in the apps, You SHOULD NOT enable this on production servers
|
||||
MODE_DEBUG: false # This enables debugging in ansible and in the apps, You SHOULD NOT enable this on production servers
|
||||
MODE_RESET: false # Cleans up all Infinito.Nexus files. It's necessary to run to whole playbook and not particial roles when using this function.
|
@ -1,27 +1,32 @@
|
||||
# Webserver Configuration
|
||||
|
||||
# Helper
|
||||
_nginx_www_dir: /var/www/
|
||||
_nginx_www_dir: "{{ applications | get_app_conf('svc-prx-openresty','docker.volumes.www') }}"
|
||||
_nginx_dir: "{{ applications | get_app_conf('svc-prx-openresty','docker.volumes.nginx') }}"
|
||||
_nginx_conf_dir: "{{ _nginx_dir }}conf.d/"
|
||||
_nginx_http_dir: "{{ _nginx_conf_dir }}http/"
|
||||
|
||||
## Nginx-Specific Path Configurations
|
||||
nginx:
|
||||
files:
|
||||
configuration: "/etc/nginx/nginx.conf"
|
||||
directories:
|
||||
configuration: "/etc/nginx/conf.d/" # Configuration directory
|
||||
http:
|
||||
global: "/etc/nginx/conf.d/http/global/" # Contains global configurations which will be loaded into the http block
|
||||
servers: "/etc/nginx/conf.d/http/servers/" # Contains one configuration per domain
|
||||
maps: "/etc/nginx/conf.d/http/maps/" # Contains mappings
|
||||
streams: "/etc/nginx/conf.d/streams/" # Contains streams configuration e.g. for ldaps
|
||||
data:
|
||||
www: "{{ _nginx_www_dir }}"
|
||||
well_known: "/usr/share/nginx/well-known/" # Path where well-known files are stored
|
||||
html: "{{ _nginx_www_dir }}public_html/" # Path where the static homepage files are stored
|
||||
files: "{{ _nginx_www_dir }}public_files/" # Path where the web accessable files are stored
|
||||
cdn: "{{ _nginx_www_dir }}public_cdn/" # Contains files which will be accessable via the content delivery network
|
||||
global: "{{ _nginx_www_dir }}global/" # Directory containing files which will be globaly accessable
|
||||
cache:
|
||||
general: "/tmp/cache_nginx_general/" # Directory which nginx uses to cache general data
|
||||
image: "/tmp/cache_nginx_image/" # Directory which nginx uses to cache images
|
||||
user: "http" # Default nginx user in ArchLinux
|
||||
|
||||
NGINX:
|
||||
FILES:
|
||||
CONFIGURATION: "{{ _nginx_dir }}nginx.conf"
|
||||
DIRECTORIES:
|
||||
CONFIGURATION: "{{ _nginx_conf_dir }}" # Configuration directory
|
||||
HTTP:
|
||||
GLOBAL: "{{ _nginx_http_dir }}global/" # Contains global configurations which will be loaded into the http block
|
||||
SERVERS: "{{ _nginx_http_dir }}servers/" # Contains one configuration per domain
|
||||
MAPS: "{{ _nginx_http_dir }}maps/" # Contains mappings
|
||||
STREAMS: "{{ _nginx_conf_dir }}streams/" # Contains streams configuration e.g. for ldaps
|
||||
DATA:
|
||||
WWW: "{{ _nginx_www_dir }}"
|
||||
WELL_KNOWN: "/usr/share/nginx/well-known/" # Path where well-known files are stored
|
||||
HTML: "{{ _nginx_www_dir }}public_html/" # Path where the static homepage files are stored
|
||||
FILES: "{{ _nginx_www_dir }}public_files/" # Path where the web accessable files are stored
|
||||
CDN: "{{ _nginx_www_dir }}public_cdn/" # Contains files which will be accessable via the content delivery network
|
||||
GLOBAL: "{{ _nginx_www_dir }}global/" # Directory containing files which will be globaly accessable, @Todo remove this when css migrated to CDN
|
||||
CACHE:
|
||||
GENERAL: "/tmp/cache_nginx_general/" # Directory which nginx uses to cache general data
|
||||
IMAGE: "/tmp/cache_nginx_image/" # Directory which nginx uses to cache images
|
||||
USER: "http" # Default nginx user in ArchLinux
|
||||
|
||||
# @todo It propably makes sense to distinguish between target and source mount path, so that the config files can be stored in the openresty volumes folder
|
||||
|
@ -1,6 +1,6 @@
|
||||
|
||||
# Path Variables for Key Directories and Scripts
|
||||
path_administrator_home: "/home/administrator/"
|
||||
path_administrator_scripts: "/opt/scripts/"
|
||||
path_docker_compose_instances: "/opt/docker/"
|
||||
path_system_lock_script: "/opt/scripts/sys-lock.py"
|
||||
PATH_ADMINISTRATOR_HOME: "/home/administrator/"
|
||||
PATH_ADMINISTRATOR_SCRIPTS: "/opt/scripts/"
|
||||
PATH_DOCKER_COMPOSE_INSTANCES: "/opt/docker/"
|
||||
PATH_SYSTEM_LOCK_SCRIPT: "/opt/scripts/sys-lock.py"
|
@ -5,7 +5,7 @@
|
||||
|
||||
# Helper Variables:
|
||||
# Keep in mind to mapp this variables if there is ever the possibility for the user to define them in the inventory
|
||||
_ldap_dn_base: "dc={{PRIMARY_DOMAIN_SLD}},dc={{PRIMARY_DOMAIN_tld}}"
|
||||
LDAP_DN_BASE: "dc={{ PRIMARY_DOMAIN_SLD }},dc={{ PRIMARY_DOMAIN_TLD }}"
|
||||
_ldap_docker_network_enabled: "{{ applications | get_app_conf('svc-db-openldap', 'network.docker') }}"
|
||||
_ldap_protocol: "{{ 'ldap' if _ldap_docker_network_enabled else 'ldaps' }}"
|
||||
_ldap_server_port: "{{ ports.localhost[_ldap_protocol]['svc-db-openldap'] }}"
|
||||
@ -22,14 +22,14 @@ ldap:
|
||||
# This is the top-level naming context for your directory, used as the
|
||||
# default search base for most operations (e.g. adding users, groups).
|
||||
# Example: “dc=example,dc=com”
|
||||
root: "{{_ldap_dn_base}}"
|
||||
root: "{{ LDAP_DN_BASE }}"
|
||||
administrator:
|
||||
# -------------------------------------------------------------------------
|
||||
# Data-Tree Administrator Bind DN
|
||||
# The DN used to authenticate for regular directory operations under
|
||||
# the data tree (adding users, modifying attributes, creating OUs, etc.).
|
||||
# Typically: “cn=admin,dc=example,dc=com”
|
||||
data: "cn={{ applications['svc-db-openldap'].users.administrator.username }},{{ _ldap_dn_base }}"
|
||||
data: "cn={{ applications['svc-db-openldap'].users.administrator.username }},{{ LDAP_DN_BASE }}"
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Config-Tree Administrator Bind DN
|
||||
@ -47,9 +47,9 @@ ldap:
|
||||
# – groups: Contains organizational or business groups (e.g., departments, teams).
|
||||
# – roles: Contains application-specific RBAC roles
|
||||
# (e.g., "cn=app1-user", "cn=yourls-admin").
|
||||
users: "ou=users,{{ _ldap_dn_base }}"
|
||||
groups: "ou=groups,{{ _ldap_dn_base }}"
|
||||
roles: "ou=roles,{{ _ldap_dn_base }}"
|
||||
users: "ou=users,{{ LDAP_DN_BASE }}"
|
||||
groups: "ou=groups,{{ LDAP_DN_BASE }}"
|
||||
roles: "ou=roles,{{ LDAP_DN_BASE }}"
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Additional Notes
|
||||
|
@ -10,7 +10,7 @@
|
||||
- name: "set oauth2_proxy_application_id (Needed due to lazzy loading issue)"
|
||||
set_fact:
|
||||
oauth2_proxy_application_id: "{{ application_id }}"
|
||||
- name: "include the web-app-oauth2-proxy role {{domain}}"
|
||||
- name: "include the web-app-oauth2-proxy role {{ domain }}"
|
||||
include_tasks: "{{ playbook_dir }}/roles/web-app-oauth2-proxy/tasks/main.yml"
|
||||
when: applications | get_app_conf(application_id, 'features.oauth2', False)
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
logging:
|
||||
driver: journald
|
||||
image: mariadb
|
||||
restart: {{DOCKER_RESTART_POLICY}}
|
||||
restart: {{ DOCKER_RESTART_POLICY }}
|
||||
env_file:
|
||||
- {{database_env}}
|
||||
command: "--transaction-isolation=READ-COMMITTED --binlog-format=ROW"
|
||||
|
@ -6,7 +6,7 @@
|
||||
container_name: {{ application_id | get_entity_name }}-database
|
||||
env_file:
|
||||
- {{database_env}}
|
||||
restart: {{DOCKER_RESTART_POLICY}}
|
||||
restart: {{ DOCKER_RESTART_POLICY }}
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U {{database_name}}"]
|
||||
interval: 10s
|
||||
|
16
roles/docker-compose/tasks/01_core.yml
Normal file
16
roles/docker-compose/tasks/01_core.yml
Normal file
@ -0,0 +1,16 @@
|
||||
- name: "Load docker container role"
|
||||
include_role:
|
||||
name: docker-container
|
||||
|
||||
when: run_once_docker_container is not defined
|
||||
- name: "reset (if enabled)"
|
||||
include_tasks: 02_reset.yml
|
||||
when: MODE_RESET | bool
|
||||
|
||||
- name: "create {{ PATH_DOCKER_COMPOSE_INSTANCES }}"
|
||||
file:
|
||||
path: "{{ PATH_DOCKER_COMPOSE_INSTANCES }}"
|
||||
state: directory
|
||||
mode: 0700
|
||||
owner: root
|
||||
group: root
|
@ -1,11 +0,0 @@
|
||||
# It is necessary to shut the projects down, when reset is activated.
|
||||
# Otherwise it can lead to this bug:
|
||||
# https://github.com/ansible/ansible/issues/10244
|
||||
- name: shut down docker compose project
|
||||
command:
|
||||
cmd: "docker-compose -p {{ application_id }} down"
|
||||
|
||||
- name: "Remove {{ docker_compose.directories.instance }} and all its contents"
|
||||
file:
|
||||
path: "{{ docker_compose.directories.instance }}"
|
||||
state: absent
|
16
roles/docker-compose/tasks/02_reset.yml
Normal file
16
roles/docker-compose/tasks/02_reset.yml
Normal file
@ -0,0 +1,16 @@
|
||||
# It is necessary to shut the projects down, when reset is activated.
|
||||
# Otherwise it can lead to this bug:
|
||||
# https://github.com/ansible/ansible/issues/10244
|
||||
- name: "pkgmgr install '{{ DOCKER_COMPOSE_DOWN_ALL_PACKAGE }}'"
|
||||
include_role:
|
||||
name: pkgmgr-install
|
||||
vars:
|
||||
package_name: "{{ DOCKER_COMPOSE_DOWN_ALL_PACKAGE }}"
|
||||
|
||||
- name: Shutdown all docker compose instances in '{{ PATH_DOCKER_COMPOSE_INSTANCES }}' with '{{ DOCKER_COMPOSE_DOWN_ALL_PACKAGE }}'
|
||||
command: "{{ DOCKER_COMPOSE_DOWN_ALL_PACKAGE }} {{ PATH_DOCKER_COMPOSE_INSTANCES }}"
|
||||
|
||||
- name: "Remove directory '{{ PATH_DOCKER_COMPOSE_INSTANCES }}' and all its contents"
|
||||
file:
|
||||
path: "{{ PATH_DOCKER_COMPOSE_INSTANCES }}"
|
||||
state: absent
|
@ -1,16 +1,10 @@
|
||||
- block:
|
||||
- include_role:
|
||||
name: docker-container
|
||||
when: run_once_docker_container is not defined
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_docker_compose is not defined
|
||||
|
||||
- name: "Load variables from {{ docker_compose_variable_file }} for whole play"
|
||||
include_vars: "{{ docker_compose_variable_file }}"
|
||||
|
||||
- name: "reset (if enabled)"
|
||||
include_tasks: 01_reset.yml
|
||||
when: MODE_RESET | bool
|
||||
- name: "Load variables from {{ DOCKER_COMPOSE_VARIABLE_FILE }} for whole play"
|
||||
include_vars: "{{ DOCKER_COMPOSE_VARIABLE_FILE }}"
|
||||
|
||||
# This could lead to problems in docker-compose directories which are based on a git repository
|
||||
# @todo Verify that this isn't the case. E.g. in accounting
|
||||
@ -21,15 +15,15 @@
|
||||
mode: '0755'
|
||||
with_dict: "{{ docker_compose.directories }}"
|
||||
|
||||
- name: "Include routines to set up a git repository based installation for '{{application_id}}'."
|
||||
include_tasks: "02_repository.yml"
|
||||
- name: "Include routines to set up a git repository based installation for '{{ application_id }}'."
|
||||
include_tasks: "03_repository.yml"
|
||||
when: docker_pull_git_repository | bool
|
||||
|
||||
- block:
|
||||
- name: "Include file management routines for '{{application_id}}'."
|
||||
include_tasks: "03_files.yml"
|
||||
- name: "Include file management routines for '{{ application_id }}'."
|
||||
include_tasks: "04_files.yml"
|
||||
- name: "Ensure that {{ docker_compose.directories.instance }} is up"
|
||||
include_tasks: "04_ensure_up.yml"
|
||||
include_tasks: "05_ensure_up.yml"
|
||||
when: not docker_compose_skipp_file_creation | bool
|
||||
|
||||
- name: "flush docker compose for '{{ application_id }}'"
|
||||
|
@ -1,2 +1,2 @@
|
||||
# @See https://chatgpt.com/share/67a23d18-fb54-800f-983c-d6d00752b0b4
|
||||
docker_compose: "{{ application_id | get_docker_paths(path_docker_compose_instances) }}"
|
||||
docker_compose: "{{ application_id | get_docker_paths(PATH_DOCKER_COMPOSE_INSTANCES) }}"
|
@ -1 +1,2 @@
|
||||
docker_compose_variable_file: "{{ role_path }}/vars/docker-compose.yml"
|
||||
DOCKER_COMPOSE_VARIABLE_FILE: "{{ role_path }}/vars/docker-compose.yml"
|
||||
DOCKER_COMPOSE_DOWN_ALL_PACKAGE: "docodol"
|
@ -1,6 +1,6 @@
|
||||
- block:
|
||||
- include_role:
|
||||
name: docker-core
|
||||
when: run_once_docker_core is not defined
|
||||
name: sys-svc-docker
|
||||
when: run_once_sys_svc_docker is not defined
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_docker_container is not defined
|
||||
|
@ -1,6 +1,6 @@
|
||||
{# Base for docker services #}
|
||||
|
||||
restart: {{DOCKER_RESTART_POLICY}}
|
||||
restart: {{ DOCKER_RESTART_POLICY }}
|
||||
{% if application_id | has_env %}
|
||||
env_file:
|
||||
- "{{docker_compose.files.env}}"
|
||||
|
@ -16,7 +16,7 @@
|
||||
- CMD-SHELL
|
||||
- >
|
||||
if [ ! -f /tmp/email_sent ]; then
|
||||
echo 'Subject: testmessage from {{domains | get_domain(application_id)}}\n\nSUCCESSFULL' | msmtp -t {{users.blackhole.email}} && touch /tmp/email_sent;
|
||||
echo 'Subject: testmessage from {{ domains | get_domain(application_id) }}\n\nSUCCESSFULL' | msmtp -t {{ users.blackhole.email }} && touch /tmp/email_sent;
|
||||
fi &&
|
||||
curl -f http://localhost:80/ || exit 1
|
||||
interval: 1m
|
||||
|
@ -1,5 +0,0 @@
|
||||
# Docker Server
|
||||
|
||||
This role is part of the [Infinito.Nexus Project](https://s.infinito.nexus/code), maintained and developed by [Kevin Veen-Birkenbach](https://www.veen.world/).
|
||||
|
||||
Enjoy using this role and happy containerizing! 🎉
|
@ -1,2 +0,0 @@
|
||||
# Todos
|
||||
- Add cleanup service for docker system prune -f
|
@ -1,3 +0,0 @@
|
||||
---
|
||||
- name: docker restart
|
||||
service: name=docker.service state=restarted enabled=yes
|
@ -1,26 +0,0 @@
|
||||
- name: Include backup, repair, health and user dependencies
|
||||
include_role:
|
||||
name: "{{ item }}"
|
||||
loop:
|
||||
- sys-bkp-docker-2-loc
|
||||
- user-administrator
|
||||
- sys-hlth-docker-container
|
||||
- sys-hlth-docker-volumes
|
||||
- sys-rpr-docker-soft
|
||||
- sys-rpr-docker-hard
|
||||
|
||||
- name: docker & docker compose install
|
||||
community.general.pacman:
|
||||
name:
|
||||
- 'docker'
|
||||
- 'docker-compose'
|
||||
state: present
|
||||
notify: docker restart
|
||||
|
||||
- name: "create {{path_docker_compose_instances}}"
|
||||
file:
|
||||
path: "{{path_docker_compose_instances}}"
|
||||
state: directory
|
||||
mode: 0700
|
||||
owner: root
|
||||
group: root
|
@ -30,7 +30,7 @@
|
||||
set_fact:
|
||||
service_name: "{{ role_name }}"
|
||||
|
||||
- name: "include role for sys-timer for {{service_name}}"
|
||||
- name: "include role for sys-timer for {{ service_name }}"
|
||||
include_role:
|
||||
name: sys-timer
|
||||
vars:
|
||||
|
@ -7,3 +7,5 @@
|
||||
shell: |
|
||||
source ~/.venvs/pkgmgr/bin/activate
|
||||
pkgmgr update pkgmgr
|
||||
register: pkgmgr_update
|
||||
changed_when: "'already up to date' not in (pkgmgr_update.stdout | lower)"
|
||||
|
@ -5,11 +5,25 @@
|
||||
when: run_once_pkgmgr_install is not defined
|
||||
|
||||
- name: update {{ package_name }}
|
||||
shell: |
|
||||
ansible.builtin.shell: |
|
||||
source ~/.venvs/pkgmgr/bin/activate
|
||||
pkgmgr update {{ package_name }} --dependencies --clone-mode https
|
||||
args:
|
||||
executable: /bin/bash
|
||||
notify: "{{ package_notify | default(omit,true) }}"
|
||||
register: pkgmgr_update_result
|
||||
changed_when: "'No command defined and neither main.sh nor main.py found' not in pkgmgr_update_result.stdout"
|
||||
failed_when: pkgmgr_update_result.rc != 0 and 'No command defined and neither main.sh nor main.py found' not in pkgmgr_update_result.stdout
|
||||
# Mark changed only if it's not "already up to date" and not "no command defined..."
|
||||
changed_when: >
|
||||
('already up to date' not in ((pkgmgr_update_result.stdout | default('') | lower)
|
||||
~ ' ' ~ (pkgmgr_update_result.stderr | default('') | lower)))
|
||||
and
|
||||
('no command defined' not in ((pkgmgr_update_result.stdout | default('') | lower)
|
||||
~ ' ' ~ (pkgmgr_update_result.stderr | default('') | lower)))
|
||||
|
||||
# Fail only on real errors; allow the "no command defined..." case
|
||||
failed_when: >
|
||||
(pkgmgr_update_result.rc != 0)
|
||||
and
|
||||
('no command defined' not in ((pkgmgr_update_result.stdout | default('') | lower)
|
||||
~ ' ' ~ (pkgmgr_update_result.stderr | default('') | lower)))
|
||||
|
||||
|
@ -1 +1 @@
|
||||
configuration_destination: "{{nginx.directories.http.servers}}{{domain}}.conf"
|
||||
configuration_destination: "{{ NGINX.DIRECTORIES.HTTP.SERVERS }}{{ domain }}.conf"
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
- name: "restart srv-proxy-6-6-tls-deploy service"
|
||||
systemd:
|
||||
name: srv-proxy-6-6-tls-deploy.{{application_id}}{{ SYS_SERVICE_SUFFIX }}
|
||||
name: srv-proxy-6-6-tls-deploy.{{ application_id }}{{ SYS_SERVICE_SUFFIX }}
|
||||
state: restarted
|
||||
enabled: yes
|
||||
daemon_reload: yes
|
@ -14,14 +14,14 @@
|
||||
- name: configure srv-proxy-6-6-tls-deploy service
|
||||
template:
|
||||
src: "srv-proxy-6-6-tls-deploy.service.j2"
|
||||
dest: "/etc/systemd/system/srv-proxy-6-6-tls-deploy.{{application_id}}{{ SYS_SERVICE_SUFFIX }}"
|
||||
dest: "/etc/systemd/system/srv-proxy-6-6-tls-deploy.{{ application_id }}{{ SYS_SERVICE_SUFFIX }}"
|
||||
notify: restart srv-proxy-6-6-tls-deploy service
|
||||
|
||||
- name: "include role for sys-timer for {{service_name}}"
|
||||
- name: "include role for sys-timer for {{ service_name }}"
|
||||
include_role:
|
||||
name: sys-timer
|
||||
vars:
|
||||
on_calendar: "{{on_calendar_deploy_certificates}}"
|
||||
service_name: "srv-proxy-6-6-tls-deploy.{{application_id}}"
|
||||
service_name: "srv-proxy-6-6-tls-deploy.{{ application_id }}"
|
||||
persistent: "true"
|
||||
|
||||
|
@ -4,4 +4,4 @@ OnFailure=sys-alm-compose.infinito@%n.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/usr/bin/bash {{path_administrator_scripts}}/srv-proxy-6-6-tls-deploy.sh {{ssl_cert_folder}} {{docker_compose.directories.instance}}
|
||||
ExecStart=/usr/bin/bash {{ PATH_ADMINISTRATOR_SCRIPTS }}/srv-proxy-6-6-tls-deploy.sh {{ssl_cert_folder}} {{docker_compose.directories.instance}}
|
||||
|
@ -1 +1 @@
|
||||
nginx_docker_cert_deploy_script: "{{path_administrator_scripts}}srv-proxy-6-6-tls-deploy.sh"
|
||||
nginx_docker_cert_deploy_script: "{{ PATH_ADMINISTRATOR_SCRIPTS }}srv-proxy-6-6-tls-deploy.sh"
|
@ -21,7 +21,7 @@ location {{location}}
|
||||
# WebSocket specific header
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
|
||||
# timeouts
|
||||
proxy_connect_timeout 5s;
|
||||
|
@ -1,6 +1,6 @@
|
||||
server
|
||||
{
|
||||
server_name {{domain}};
|
||||
server_name {{ domain }};
|
||||
|
||||
{% if applications | get_app_conf(application_id, 'features.oauth2', False) %}
|
||||
{% include 'roles/web-app-oauth2-proxy/templates/endpoint.conf.j2'%}
|
||||
|
@ -2,7 +2,7 @@
|
||||
include_role:
|
||||
name: '{{ item }}'
|
||||
loop:
|
||||
- srv-web-7-7-certbot
|
||||
- sys-svc-certbot
|
||||
- srv-web-7-4-core
|
||||
- sys-alm-compose
|
||||
|
||||
@ -22,7 +22,7 @@
|
||||
set_fact:
|
||||
service_name: "{{ role_name }}"
|
||||
|
||||
- name: "include role for sys-timer for {{service_name}}"
|
||||
- name: "include role for sys-timer for {{ service_name }}"
|
||||
include_role:
|
||||
name: sys-timer
|
||||
vars:
|
||||
|
2
roles/srv-web-7-4-core/Todo.md
Normal file
2
roles/srv-web-7-4-core/Todo.md
Normal file
@ -0,0 +1,2 @@
|
||||
# To-dos
|
||||
- It could make sense to merge this role with svc-prx-openresty
|
@ -1,3 +1,27 @@
|
||||
- name: "cleanup (if enabled)"
|
||||
include_tasks: 02_cleanup.yml
|
||||
when: >
|
||||
MODE_CLEANUP | bool or
|
||||
MODE_RESET | bool
|
||||
|
||||
- name: "reset (if enabled)"
|
||||
include_tasks: 03_reset.yml
|
||||
when: MODE_RESET | bool
|
||||
|
||||
- name: "Load docker compose handlers"
|
||||
include_tasks: "{{ playbook_dir }}/tasks/utils/load_handlers.yml"
|
||||
vars:
|
||||
handler_role_name: "docker-compose"
|
||||
|
||||
- name: "Include tasks to create directories"
|
||||
include_tasks: 04_directories.yml
|
||||
|
||||
- name: create nginx config file
|
||||
template:
|
||||
src: nginx.conf.j2
|
||||
dest: "{{ NGINX.FILES.CONFIGURATION }}"
|
||||
notify: docker compose up
|
||||
|
||||
- name: Include health dependencies
|
||||
include_role:
|
||||
name: "{{ item }}"
|
||||
@ -5,6 +29,7 @@
|
||||
- sys-hlth-webserver
|
||||
- sys-hlth-csp
|
||||
vars:
|
||||
# Extra flush is for performance reasons not necessary
|
||||
flush_handlers: false
|
||||
|
||||
- name: Include openresty
|
||||
@ -13,50 +38,12 @@
|
||||
# Inside openresty their is a validation that it doesn't run multiple times
|
||||
include_role:
|
||||
name: svc-prx-openresty
|
||||
public: false
|
||||
|
||||
# Explicit set to guaranty that application_id will not be overwritten.
|
||||
# Should be anyhow the default case
|
||||
when: run_once_svc_prx_openresty is not defined
|
||||
public: false
|
||||
|
||||
- name: "reset (if enabled)"
|
||||
include_tasks: 02_reset.yml
|
||||
when: MODE_RESET | bool
|
||||
|
||||
- name: Ensure nginx configuration directories are present
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: "{{nginx.user}}"
|
||||
group: "{{nginx.user}}"
|
||||
mode: '0755'
|
||||
recurse: yes
|
||||
loop: >
|
||||
{{
|
||||
[ nginx.directories.configuration ] +
|
||||
( nginx.directories.http.values() | list ) +
|
||||
[ nginx.directories.streams ]
|
||||
}}
|
||||
|
||||
- name: Ensure nginx data storage directories are present
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
recurse: yes
|
||||
owner: "{{nginx.user}}"
|
||||
group: "{{nginx.user}}"
|
||||
mode: '0755'
|
||||
loop: >
|
||||
{{ nginx.directories.data.values() | list }}
|
||||
async: "{{ ASYNC_TIME if ASYNC_ENABLED | bool else omit }}"
|
||||
poll: "{{ ASYNC_POLL if ASYNC_ENABLED | bool else omit }}"
|
||||
|
||||
- name: "Include tasks to create cache directories"
|
||||
include_tasks: 03_cache_directories.yml
|
||||
when: run_once_nginx_reverse_proxy is not defined
|
||||
|
||||
- name: create nginx config file
|
||||
template:
|
||||
src: nginx.conf.j2
|
||||
dest: "{{ nginx.files.configuration }}"
|
||||
notify: restart openresty
|
||||
|
||||
vars:
|
||||
# Flush openresty handler on first run, so that openresty is up, before openresty related handlers are triggered
|
||||
flush_handlers: true
|
||||
when: run_once_svc_prx_openresty is not defined
|
8
roles/srv-web-7-4-core/tasks/02_cleanup.yml
Normal file
8
roles/srv-web-7-4-core/tasks/02_cleanup.yml
Normal file
@ -0,0 +1,8 @@
|
||||
- name: Cleanup all NGINX cache directories
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ item.value }}"
|
||||
state: absent
|
||||
loop: "{{ NGINX.DIRECTORIES.CACHE | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.key }}"
|
@ -1,4 +0,0 @@
|
||||
- name: "Delete {{nginx.directories.configuration}} directory, when MODE_RESET"
|
||||
file:
|
||||
path: "{{ nginx.directories.configuration }}"
|
||||
state: absent
|
@ -1,28 +0,0 @@
|
||||
- name: Cleanup all NGINX cache directories
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ item.value }}"
|
||||
state: absent
|
||||
when:
|
||||
- MODE_CLEANUP | bool
|
||||
loop: "{{ nginx.directories.cache | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.key }}"
|
||||
|
||||
- name: Ensure all NGINX cache directories exist
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ item.value }}"
|
||||
state: directory
|
||||
owner: "{{ nginx.user }}"
|
||||
group: "{{ nginx.user }}"
|
||||
mode: '0700'
|
||||
loop: "{{ nginx.directories.cache | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.key }}"
|
||||
async: "{{ ASYNC_TIME if ASYNC_ENABLED | bool else omit }}"
|
||||
poll: "{{ ASYNC_POLL if ASYNC_ENABLED | bool else omit }}"
|
||||
|
||||
- name: run the nginx_reverse_proxy tasks once
|
||||
set_fact:
|
||||
run_once_nginx_reverse_proxy: true
|
9
roles/srv-web-7-4-core/tasks/03_reset.yml
Normal file
9
roles/srv-web-7-4-core/tasks/03_reset.yml
Normal file
@ -0,0 +1,9 @@
|
||||
- name: Delete NGINX config paths
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
loop:
|
||||
- "{{ NGINX.DIRECTORIES.CONFIGURATION }}"
|
||||
- "{{ NGINX.FILES.CONFIGURATION }}"
|
||||
loop_control:
|
||||
label: "{{ item }}"
|
36
roles/srv-web-7-4-core/tasks/04_directories.yml
Normal file
36
roles/srv-web-7-4-core/tasks/04_directories.yml
Normal file
@ -0,0 +1,36 @@
|
||||
- name: Ensure nginx configuration directories are present
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: "{{ NGINX.USER }}"
|
||||
group: "{{ NGINX.USER }}"
|
||||
mode: '0755'
|
||||
recurse: yes
|
||||
loop: >
|
||||
{{
|
||||
( NGINX.DIRECTORIES.HTTP.values() | list ) +
|
||||
[ NGINX.DIRECTORIES.STREAMS ]
|
||||
}}
|
||||
|
||||
- name: Ensure all NGINX cache directories exist
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ item.value }}"
|
||||
state: directory
|
||||
owner: "{{ NGINX.USER }}"
|
||||
group: "{{ NGINX.USER }}"
|
||||
mode: '0700'
|
||||
loop: "{{ NGINX.DIRECTORIES.CACHE | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.key }}"
|
||||
|
||||
- name: Ensure nginx data storage directories are present
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
recurse: yes
|
||||
owner: "{{ NGINX.USER }}"
|
||||
group: "{{ NGINX.USER }}"
|
||||
mode: '0755'
|
||||
loop: >
|
||||
{{ NGINX.DIRECTORIES.DATA.values() | list }}
|
@ -14,8 +14,8 @@ http
|
||||
default_type text/html;
|
||||
|
||||
{# caching #}
|
||||
proxy_cache_path {{ nginx.directories.cache.general }} levels=1:2 keys_zone=cache:20m max_size=20g inactive=14d use_temp_path=off;
|
||||
proxy_cache_path {{ nginx.directories.cache.image }} levels=1:2 keys_zone=imgcache:10m inactive=60m use_temp_path=off;
|
||||
proxy_cache_path {{ NGINX.DIRECTORIES.CACHE.GENERAL }} levels=1:2 keys_zone=cache:20m max_size=20g inactive=14d use_temp_path=off;
|
||||
proxy_cache_path {{ NGINX.DIRECTORIES.CACHE.IMAGE }} levels=1:2 keys_zone=imgcache:10m inactive=60m use_temp_path=off;
|
||||
|
||||
# --------------------------------------------------------------------------------
|
||||
# Tweak the hash table used to store your server_name entries:
|
||||
@ -54,12 +54,12 @@ http
|
||||
gzip_types application/atom+xml application/javascript application/xml+rss application/x-javascript application/json application/ld+json application/manifest+json application/rss+xml application/vnd.geo+json application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-app-manifest+json application/xhtml+xml application/xml font/opentype image/bmp image/svg+xml image/x-icon text/cache-manifest text/css text/plain text/vcard text/vnd.rim.location.xloc text/vtt text/x-component text/x-cross-domain-policy text/javascript text/xml;
|
||||
|
||||
types_hash_max_size 4096;
|
||||
{% for dir in nginx.directories.http.values() %}
|
||||
{% for dir in NGINX.DIRECTORIES.HTTP.values() %}
|
||||
include {{ dir }}*.conf;
|
||||
{% endfor %}
|
||||
}
|
||||
|
||||
# For port proxies
|
||||
stream{
|
||||
include {{nginx.directories.streams}}*.conf;
|
||||
include {{NGINX.DIRECTORIES.STREAMS}}*.conf;
|
||||
}
|
||||
|
@ -1,9 +1,9 @@
|
||||
# run_once_srv_web_7_6_composer: deactivated
|
||||
|
||||
- name: "include role srv-web-7-7-inj-compose for {{domain}}"
|
||||
- name: "include role srv-web-7-7-inj-compose for {{ domain }}"
|
||||
include_role:
|
||||
name: srv-web-7-7-inj-compose
|
||||
|
||||
- name: "include role srv-web-6-6-tls-core for {{domain}}"
|
||||
- name: "include role srv-web-6-6-tls-core for {{ domain }}"
|
||||
include_role:
|
||||
name: srv-web-6-6-tls-core
|
||||
|
@ -1,41 +0,0 @@
|
||||
- name: install certbot
|
||||
community.general.pacman:
|
||||
name: certbot
|
||||
state: present
|
||||
when: run_once_srv_web_7_7_certbot is not defined
|
||||
|
||||
- name: install certbot DNS plugin
|
||||
community.general.pacman:
|
||||
name: "certbot-dns-{{ CERTBOT_ACME_CHALLENGE_METHOD }}"
|
||||
state: present
|
||||
when:
|
||||
- run_once_srv_web_7_7_certbot is not defined
|
||||
- CERTBOT_ACME_CHALLENGE_METHOD != 'webroot'
|
||||
|
||||
- name: Ensure /etc/certbot directory exists
|
||||
file:
|
||||
path: "{{ CERTBOT_CREDENTIALS_DIR }}"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
when:
|
||||
- run_once_srv_web_7_7_certbot is not defined
|
||||
- CERTBOT_ACME_CHALLENGE_METHOD != 'webroot'
|
||||
|
||||
- name: Install plugin credentials file
|
||||
copy:
|
||||
dest: "{{ CERTBOT_CREDENTIALS_FILE }}"
|
||||
content: |
|
||||
dns_{{ CERTBOT_ACME_CHALLENGE_METHOD }}_api_token = {{ CERTBOT_DNS_API_TOKEN }}
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0600'
|
||||
when:
|
||||
- run_once_srv_web_7_7_certbot is not defined
|
||||
- CERTBOT_ACME_CHALLENGE_METHOD != 'webroot'
|
||||
|
||||
- name: run the certbot role once
|
||||
set_fact:
|
||||
run_once_srv_web_7_7_certbot: true
|
||||
when: run_once_srv_web_7_7_certbot is not defined
|
@ -1,3 +1,12 @@
|
||||
- name: Set inj_enabled dictionary
|
||||
set_fact:
|
||||
inj_enabled:
|
||||
javascript: "{{ applications | get_app_conf(application_id, 'features.javascript', False) }}"
|
||||
logout: "{{ (applications | get_app_conf(application_id, 'features.logout', False) or domain == PRIMARY_DOMAIN) }}"
|
||||
css: "{{ applications | get_app_conf(application_id, 'features.css', False) }}"
|
||||
matomo: "{{ applications | get_app_conf(application_id, 'features.matomo', False) }}"
|
||||
desktop: "{{ applications | get_app_conf(application_id, 'features.desktop', False) }}"
|
||||
|
||||
- block:
|
||||
- name: Include dependency 'srv-web-7-4-core'
|
||||
include_role:
|
||||
@ -6,28 +15,19 @@
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_srv_web_7_7_inj_compose is not defined
|
||||
|
||||
- name: Set inj_enabled dictionary
|
||||
set_fact:
|
||||
inj_enabled:
|
||||
javascript: "{{ applications | get_app_conf(application_id, 'features.javascript', False) }}"
|
||||
logout: "{{ (applications | get_app_conf(application_id, 'features.logout', False) or domain == PRIMARY_DOMAIN) }}"
|
||||
css: "{{ applications | get_app_conf(application_id, 'features.css', False) }}"
|
||||
matomo: "{{ applications | get_app_conf(application_id, 'features.matomo', False) }}"
|
||||
port_ui: "{{ applications | get_app_conf(application_id, 'features.desktop', False) }}"
|
||||
|
||||
- name: "Activate Portfolio iFrame notifier for {{ domain }}"
|
||||
include_role:
|
||||
name: srv-web-7-7-inj-desktop
|
||||
public: true # Expose variables so that they can be used in template
|
||||
when: inj_enabled.port_ui
|
||||
public: true # Vars used in templates
|
||||
when: inj_enabled.desktop
|
||||
|
||||
- name: "Load CDN for {{domain}}"
|
||||
- name: "Load CDN for {{ domain }}"
|
||||
include_role:
|
||||
name: web-svc-cdn
|
||||
public: false
|
||||
# ATM just the Logout is using the CDN.
|
||||
when:
|
||||
- inj_enabled.logout
|
||||
- inj_enabled.desktop
|
||||
- application_id != 'web-svc-cdn'
|
||||
- run_once_web_svc_cdn is not defined
|
||||
|
||||
@ -41,23 +41,14 @@
|
||||
vars:
|
||||
handler_role_name: "{{ item }}"
|
||||
|
||||
- name: Set inj_enabled dictionary
|
||||
set_fact:
|
||||
inj_enabled:
|
||||
javascript: "{{ applications | get_app_conf(application_id, 'features.javascript', False) }}"
|
||||
logout: "{{ (applications | get_app_conf(application_id, 'features.logout', False) or domain == PRIMARY_DOMAIN) }}"
|
||||
css: "{{ applications | get_app_conf(application_id, 'features.css', False) }}"
|
||||
matomo: "{{ applications | get_app_conf(application_id, 'features.matomo', False) }}"
|
||||
port_ui: "{{ applications | get_app_conf(application_id, 'features.desktop', False) }}"
|
||||
|
||||
- name: "Activate Corporate CSS for {{domain}}"
|
||||
- name: "Activate Corporate CSS for {{ domain }}"
|
||||
include_role:
|
||||
name: srv-web-7-7-inj-css
|
||||
when:
|
||||
- inj_enabled.css
|
||||
- run_once_srv_web_7_7_inj_css is not defined
|
||||
|
||||
- name: "Activate Matomo Tracking for {{domain}}"
|
||||
- name: "Activate Matomo Tracking for {{ domain }}"
|
||||
include_role:
|
||||
name: srv-web-7-7-inj-matomo
|
||||
when: inj_enabled.matomo
|
||||
@ -70,4 +61,5 @@
|
||||
- name: "Activate logout proxy for {{ domain }}"
|
||||
include_role:
|
||||
name: srv-web-7-7-inj-logout
|
||||
public: true # Vars used in templates
|
||||
when: inj_enabled.logout
|
||||
|
@ -44,7 +44,7 @@ body_filter_by_lua_block {
|
||||
local head_snippets = {}
|
||||
|
||||
{% for head_feature in ['css', 'matomo', 'desktop', 'javascript', 'logout' ] %}
|
||||
{% if applications | get_app_conf(application_id, 'features.' ~ head_feature, false) | bool %}
|
||||
{% if applications | get_app_conf(application_id, 'features.' ~ head_feature, false) %}
|
||||
head_snippets[#head_snippets + 1] = [=[
|
||||
{%- include "roles/srv-web-7-7-inj-" ~ head_feature ~ "/templates/head_sub.j2" -%}
|
||||
]=]
|
||||
@ -59,7 +59,7 @@ body_filter_by_lua_block {
|
||||
local body_snippets = {}
|
||||
|
||||
{% for body_feature in ['matomo', 'logout', 'desktop'] %}
|
||||
{% if applications | get_app_conf(application_id, 'features.' ~ body_feature, false) | bool %}
|
||||
{% if applications | get_app_conf(application_id, 'features.' ~ body_feature, false) %}
|
||||
body_snippets[#body_snippets + 1] = [=[
|
||||
{%- include "roles/srv-web-7-7-inj-" ~ body_feature ~ "/templates/body_sub.j2" -%}
|
||||
]=]
|
||||
|
@ -15,8 +15,8 @@
|
||||
template:
|
||||
src: global.css.j2
|
||||
dest: "{{ global_css_destination }}"
|
||||
owner: "{{ nginx.user }}"
|
||||
group: "{{ nginx.user }}"
|
||||
owner: "{{ NGINX.USER }}"
|
||||
group: "{{ NGINX.USER }}"
|
||||
mode: '0644'
|
||||
|
||||
- name: Get stat for global.css
|
||||
|
@ -1,3 +1,3 @@
|
||||
location = /global.css {
|
||||
root {{nginx.directories.data.cdn}};
|
||||
root {{NGINX.DIRECTORIES.DATA.CDN}};
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
global_css_destination: "{{nginx.directories.data.cdn}}global.css"
|
||||
global_css_destination: "{{NGINX.DIRECTORIES.DATA.CDN}}global.css"
|
||||
global_css_base_color: "{{ design.css.colors.base }}"
|
||||
global_css_count: 7
|
||||
global_css_shades: 100
|
@ -1,14 +1,14 @@
|
||||
- name: Deploy iframe-handler.js
|
||||
template:
|
||||
src: iframe-handler.js.j2
|
||||
dest: "{{ inj_port_ui_js_destination }}"
|
||||
owner: "{{ nginx.user }}"
|
||||
group: "{{ nginx.user }}"
|
||||
dest: "{{ INJ_DESKTOP_JS_FILE_DESTINATION }}"
|
||||
owner: "{{ NGINX.USER }}"
|
||||
group: "{{ NGINX.USER }}"
|
||||
mode: '0644'
|
||||
|
||||
- name: Get stat for iframe-handler.js
|
||||
stat:
|
||||
path: "{{ inj_port_ui_js_destination }}"
|
||||
path: "{{ INJ_DESKTOP_JS_FILE_DESTINATION }}"
|
||||
register: inj_port_ui_js_stat
|
||||
|
||||
- name: Set inj_port_ui_js_version
|
||||
|
@ -1 +1 @@
|
||||
<script src="{{ domains | get_url('web-svc-cdn', WEB_PROTOCOL) }}/{{ inj_port_ui_file_name }}?{{ inj_port_ui_js_version }}"></script>
|
||||
<script src="{{ domains | get_url('web-svc-cdn', WEB_PROTOCOL) }}/{{ INJ_DESKTOP_JS_FILE_NAME }}?{{ inj_port_ui_js_version }}"></script>
|
@ -1,2 +1,2 @@
|
||||
inj_port_ui_file_name: "iframe-handler.js"
|
||||
inj_port_ui_js_destination: "{{ [ nginx.directories.data.cdn, inj_port_ui_file_name ] | path_join }}"
|
||||
INJ_DESKTOP_JS_FILE_NAME: "iframe-handler.js"
|
||||
INJ_DESKTOP_JS_FILE_DESTINATION: "{{ [ NGINX.DIRECTORIES.DATA.CDN, INJ_DESKTOP_JS_FILE_NAME ] | path_join }}"
|
||||
|
@ -11,11 +11,11 @@
|
||||
set_fact:
|
||||
javascript_code: "{{ lookup('template', modifier_javascript_template_file) }}"
|
||||
|
||||
- name: "Collapse Javascript code into one-liner for '{{application_id}}'"
|
||||
- name: "Collapse Javascript code into one-liner for '{{ application_id }}'"
|
||||
set_fact:
|
||||
javascript_code_one_liner: "{{ javascript_code | to_one_liner }}"
|
||||
|
||||
- name: "Append Javascript CSP hash for '{{application_id}}'"
|
||||
- name: "Append Javascript CSP hash for '{{ application_id }}'"
|
||||
set_fact:
|
||||
applications: "{{ applications | append_csp_hash(application_id, javascript_code_one_liner) }}"
|
||||
changed_when: false
|
||||
|
@ -5,4 +5,4 @@
|
||||
- run_once_srv_web_7_4_core is not defined
|
||||
|
||||
- name: "deploy the logout.js"
|
||||
include_tasks: "deploy.yml"
|
||||
include_tasks: "02_deploy.yml"
|
16
roles/srv-web-7-7-inj-logout/tasks/02_deploy.yml
Normal file
16
roles/srv-web-7-7-inj-logout/tasks/02_deploy.yml
Normal file
@ -0,0 +1,16 @@
|
||||
- name: Deploy logout.js
|
||||
template:
|
||||
src: logout.js.j2
|
||||
dest: "{{ INJ_LOGOUT_JS_DESTINATION }}"
|
||||
owner: "{{ NGINX.USER }}"
|
||||
group: "{{ NGINX.USER }}"
|
||||
mode: '0644'
|
||||
|
||||
- name: Get stat for logout.js
|
||||
stat:
|
||||
path: "{{ INJ_LOGOUT_JS_DESTINATION }}"
|
||||
register: INJ_LOGOUT_JS_STAT
|
||||
|
||||
- name: Set INJ_LOGOUT_JS_VERSION
|
||||
set_fact:
|
||||
INJ_LOGOUT_JS_VERSION: "{{ INJ_LOGOUT_JS_STAT.stat.mtime }}"
|
@ -1,16 +0,0 @@
|
||||
- name: Deploy logout.js
|
||||
template:
|
||||
src: logout.js.j2
|
||||
dest: "{{ inj_logout_js_destination }}"
|
||||
owner: "{{ nginx.user }}"
|
||||
group: "{{ nginx.user }}"
|
||||
mode: '0644'
|
||||
|
||||
- name: Get stat for logout.js
|
||||
stat:
|
||||
path: "{{ inj_logout_js_destination }}"
|
||||
register: inj_logout_js_stat
|
||||
|
||||
- name: Set inj_logout_js_version
|
||||
set_fact:
|
||||
inj_logout_js_version: "{{ inj_logout_js_stat.stat.mtime }}"
|
@ -8,11 +8,11 @@
|
||||
set_fact:
|
||||
logout_code: "{{ lookup('template', 'logout_one_liner.js.j2') }}"
|
||||
|
||||
- name: "Collapse logout code into one-liner for '{{application_id}}'"
|
||||
- name: "Collapse logout code into one-liner for '{{ application_id }}'"
|
||||
set_fact:
|
||||
logout_code_one_liner: "{{ logout_code | to_one_liner }}"
|
||||
|
||||
- name: "Append logout CSP hash for '{{application_id}}'"
|
||||
- name: "Append logout CSP hash for '{{ application_id }}'"
|
||||
set_fact:
|
||||
applications: "{{ applications | append_csp_hash(application_id, logout_code_one_liner) }}"
|
||||
changed_when: false
|
||||
|
@ -1 +1 @@
|
||||
<script src="{{ domains | get_url('web-svc-cdn', WEB_PROTOCOL) }}/logout.js?{{ inj_logout_js_version }}"></script>
|
||||
<script src="{{ domains | get_url('web-svc-cdn', WEB_PROTOCOL) }}/{{ INJ_LOGOUT_JS_FILE_NAME }}?{{ INJ_LOGOUT_JS_VERSION }}"></script>
|
@ -1,2 +1,2 @@
|
||||
inj_logout_file_name: "logout.js"
|
||||
inj_logout_js_destination: "{{ [ nginx.directories.data.cdn, inj_logout_file_name ] | path_join }}"
|
||||
INJ_LOGOUT_JS_FILE_NAME: "logout.js"
|
||||
INJ_LOGOUT_JS_DESTINATION: "{{ [ NGINX.DIRECTORIES.DATA.CDN, INJ_LOGOUT_JS_FILE_NAME ] | path_join }}"
|
@ -6,7 +6,7 @@
|
||||
- name: create nginx letsencrypt config file
|
||||
template:
|
||||
src: "letsencrypt.conf.j2"
|
||||
dest: "{{nginx.directories.http.global}}letsencrypt.conf"
|
||||
dest: "{{NGINX.DIRECTORIES.HTTP.GLOBAL}}letsencrypt.conf"
|
||||
notify: restart openresty
|
||||
|
||||
- name: "Set CAA records for all base domains"
|
||||
|
@ -6,7 +6,6 @@
|
||||
- dev-git
|
||||
- sys-alm-compose
|
||||
- sys-lock
|
||||
- user-root
|
||||
- sys-rst-daemon
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_svc_bkp_rmt_2_loc is not defined
|
||||
@ -39,7 +38,7 @@
|
||||
set_fact:
|
||||
service_name: "{{ role_name }}"
|
||||
|
||||
- name: "include role for sys-timer for {{service_name}}"
|
||||
- name: "include role for sys-timer for {{ service_name }}"
|
||||
include_role:
|
||||
name: sys-timer
|
||||
vars:
|
||||
|
@ -4,5 +4,5 @@ OnFailure=sys-alm-compose.infinito@%n.service sys-cln-faild-bkps{{ SYS_SERVICE_S
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStartPre=/bin/sh -c '/usr/bin/python {{ path_system_lock_script }} {{ system_maintenance_services | join(' ') }} --ignore {{system_maintenance_backup_services| join(' ') }} --timeout "{{system_maintenance_lock_timeout_backup_services}}"'
|
||||
ExecStartPre=/bin/sh -c '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ system_maintenance_services | join(' ') }} --ignore {{system_maintenance_backup_services| join(' ') }} --timeout "{{system_maintenance_lock_timeout_backup_services}}"'
|
||||
ExecStart=/bin/sh -c '/usr/bin/bash {{docker_backup_remote_to_local_folder}}sys-bkp-rmt-2-loc-multi-provider.sh'
|
||||
|
@ -1,3 +1,3 @@
|
||||
application_id: svc-bkp-rmt-2-loc
|
||||
docker_backup_remote_to_local_folder: '{{ path_administrator_scripts }}{{ application_id }}/'
|
||||
docker_backup_remote_to_local_folder: '{{ PATH_ADMINISTRATOR_SCRIPTS }}{{ application_id }}/'
|
||||
rmt2loc_backup_providers: "{{ applications | get_app_conf(application_id, 'backup_providers') }}"
|
@ -4,16 +4,16 @@
|
||||
include_role:
|
||||
name: docker-compose
|
||||
|
||||
- name: Create {{domains | get_domain(application_id)}}.conf if LDAP is exposed to internet
|
||||
- name: Create {{ domains | get_domain(application_id) }}.conf if LDAP is exposed to internet
|
||||
template:
|
||||
src: "nginx.stream.conf.j2"
|
||||
dest: "{{nginx.directories.streams}}{{domains | get_domain(application_id)}}.conf"
|
||||
dest: "{{NGINX.DIRECTORIES.STREAMS}}{{ domains | get_domain(application_id) }}.conf"
|
||||
notify: restart openresty
|
||||
when: applications | get_app_conf(application_id, 'network.public', True) | bool
|
||||
|
||||
- name: Remove {{domains | get_domain(application_id)}}.conf if LDAP is not exposed to internet
|
||||
- name: Remove {{ domains | get_domain(application_id) }}.conf if LDAP is not exposed to internet
|
||||
file:
|
||||
path: "{{ nginx.directories.streams }}{{ domains | get_domain(application_id) }}.conf"
|
||||
path: "{{ NGINX.DIRECTORIES.STREAMS }}{{ domains | get_domain(application_id) }}.conf"
|
||||
state: absent
|
||||
when: not applications | get_app_conf(application_id, 'network.public', True) | bool
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
credentials:
|
||||
postgres_password:
|
||||
POSTGRES_PASSWORD:
|
||||
description: "Password for the PostgreSQL superuser 'postgres'"
|
||||
algorithm: "bcrypt"
|
||||
validation: "^\\$2[aby]\\$.{56}$"
|
@ -1,26 +1,21 @@
|
||||
|
||||
- name: Include dependency 'docker-core'
|
||||
- name: Include dependency 'sys-svc-docker'
|
||||
include_role:
|
||||
name: docker-core
|
||||
when: run_once_docker_core is not defined
|
||||
name: sys-svc-docker
|
||||
when: run_once_sys_svc_docker is not defined
|
||||
|
||||
- name: Create Docker network for PostgreSQL
|
||||
community.docker.docker_network:
|
||||
name: "{{ postgres_network_name }}"
|
||||
name: "{{ POSTGRES_NETWORK_NAME }}"
|
||||
state: present
|
||||
ipam_config:
|
||||
- subnet: "{{ postgres_subnet }}"
|
||||
- subnet: "{{ POSTGRES_SUBNET }}"
|
||||
|
||||
- name: "include docker-compose role"
|
||||
include_role:
|
||||
name: docker-compose
|
||||
|
||||
- name: Wait for Postgres inside the container
|
||||
shell: "docker exec {{ postgres_name }} pg_isready -U postgres"
|
||||
register: pg_ready
|
||||
until: pg_ready.rc == 0
|
||||
retries: 30
|
||||
delay: 5
|
||||
vars:
|
||||
docker_compose_flush_handlers: true
|
||||
|
||||
- name: install python-psycopg2
|
||||
community.general.pacman:
|
||||
|
@ -1,11 +1,10 @@
|
||||
---
|
||||
- name: "Wait until Postgres is listening on port {{ postgres_port }}"
|
||||
wait_for:
|
||||
host: "{{ postgres_local_host }}"
|
||||
port: "{{ postgres_port }}"
|
||||
delay: 5
|
||||
timeout: 300
|
||||
state: started
|
||||
- name: Wait for Postgres inside the container
|
||||
shell: "docker exec {{ POSTGRES_CONTAINER }} pg_isready -U postgres"
|
||||
register: pg_ready
|
||||
until: pg_ready.rc == 0
|
||||
retries: 30
|
||||
delay: 5
|
||||
|
||||
# 1) Create the database
|
||||
- name: "Create database: {{ database_name }}"
|
||||
@ -13,13 +12,13 @@
|
||||
name: "{{ database_name }}"
|
||||
state: present
|
||||
login_user: postgres
|
||||
login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}"
|
||||
login_host: "{{ postgres_local_host }}"
|
||||
login_port: "{{ postgres_port }}"
|
||||
login_password: "{{ applications | get_app_conf(application_id, 'credentials.POSTGRES_PASSWORD', True) }}"
|
||||
login_host: "{{ POSTGRES_LOCAL_HOST }}"
|
||||
login_port: "{{ POSTGRES_PORT }}"
|
||||
register: postgresql_result
|
||||
until: postgresql_result is succeeded
|
||||
retries: "{{ postgres_retry_retries }}"
|
||||
delay: "{{ postgres_retry_delay }}"
|
||||
retries: "{{ POSTGRES_RETRIES }}"
|
||||
delay: "{{ POSTGRES_DELAY }}"
|
||||
|
||||
# 2) Create the database user (with password)
|
||||
- name: "Create database user: {{ database_username }}"
|
||||
@ -29,29 +28,29 @@
|
||||
db: "{{ database_name }}"
|
||||
state: present
|
||||
login_user: postgres
|
||||
login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}"
|
||||
login_host: "{{ postgres_local_host }}"
|
||||
login_port: "{{ postgres_port }}"
|
||||
login_password: "{{ applications | get_app_conf(application_id, 'credentials.POSTGRES_PASSWORD', True) }}"
|
||||
login_host: "{{ POSTGRES_LOCAL_HOST }}"
|
||||
login_port: "{{ POSTGRES_PORT }}"
|
||||
register: postgresql_result
|
||||
until: postgresql_result is succeeded
|
||||
retries: "{{ postgres_retry_retries }}"
|
||||
delay: "{{ postgres_retry_delay }}"
|
||||
retries: "{{ POSTGRES_RETRIES }}"
|
||||
delay: "{{ POSTGRES_DELAY }}"
|
||||
|
||||
# 3) Enable LOGIN for the role (removes NOLOGIN)
|
||||
- name: "Enable login for role {{ database_username }}"
|
||||
community.postgresql.postgresql_query:
|
||||
db: postgres
|
||||
login_user: postgres
|
||||
login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}"
|
||||
login_host: "{{ postgres_local_host }}"
|
||||
login_port: "{{ postgres_port }}"
|
||||
login_password: "{{ applications | get_app_conf(application_id, 'credentials.POSTGRES_PASSWORD', True) }}"
|
||||
login_host: "{{ POSTGRES_LOCAL_HOST }}"
|
||||
login_port: "{{ POSTGRES_PORT }}"
|
||||
query: |
|
||||
ALTER ROLE "{{ database_username }}"
|
||||
WITH LOGIN;
|
||||
register: postgresql_result
|
||||
until: postgresql_result is succeeded
|
||||
retries: "{{ postgres_retry_retries }}"
|
||||
delay: "{{ postgres_retry_delay }}"
|
||||
retries: "{{ POSTGRES_RETRIES }}"
|
||||
delay: "{{ POSTGRES_DELAY }}"
|
||||
|
||||
# 4) Grant ALL privileges on all tables in the public schema
|
||||
- name: "Grant ALL privileges on tables in public schema to {{ database_username }}"
|
||||
@ -64,13 +63,13 @@
|
||||
schema: public
|
||||
state: present
|
||||
login_user: postgres
|
||||
login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}"
|
||||
login_host: "{{ postgres_local_host }}"
|
||||
login_port: "{{ postgres_port }}"
|
||||
login_password: "{{ applications | get_app_conf(application_id, 'credentials.POSTGRES_PASSWORD', True) }}"
|
||||
login_host: "{{ POSTGRES_LOCAL_HOST }}"
|
||||
login_port: "{{ POSTGRES_PORT }}"
|
||||
register: postgresql_result
|
||||
until: postgresql_result is succeeded
|
||||
retries: "{{ postgres_retry_retries }}"
|
||||
delay: "{{ postgres_retry_delay }}"
|
||||
retries: "{{ POSTGRES_RETRIES }}"
|
||||
delay: "{{ POSTGRES_DELAY }}"
|
||||
|
||||
# 5) Grant ALL privileges at the database level
|
||||
- name: "Grant all privileges on database {{ database_name }} to {{ database_username }}"
|
||||
@ -81,22 +80,22 @@
|
||||
privs: ALL
|
||||
state: present
|
||||
login_user: postgres
|
||||
login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}"
|
||||
login_host: "{{ postgres_local_host }}"
|
||||
login_port: "{{ postgres_port }}"
|
||||
login_password: "{{ applications | get_app_conf(application_id, 'credentials.POSTGRES_PASSWORD', True) }}"
|
||||
login_host: "{{ POSTGRES_LOCAL_HOST }}"
|
||||
login_port: "{{ POSTGRES_PORT }}"
|
||||
register: postgresql_result
|
||||
until: postgresql_result is succeeded
|
||||
retries: "{{ postgres_retry_retries }}"
|
||||
delay: "{{ postgres_retry_delay }}"
|
||||
retries: "{{ POSTGRES_RETRIES }}"
|
||||
delay: "{{ POSTGRES_DELAY }}"
|
||||
|
||||
# 6) Grant USAGE/CREATE on schema and set default privileges
|
||||
- name: "Set comprehensive schema privileges for {{ database_username }}"
|
||||
community.postgresql.postgresql_query:
|
||||
db: "{{ database_name }}"
|
||||
login_user: postgres
|
||||
login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}"
|
||||
login_host: "{{ postgres_local_host }}"
|
||||
login_port: "{{ postgres_port }}"
|
||||
login_password: "{{ applications | get_app_conf(application_id, 'credentials.POSTGRES_PASSWORD', True) }}"
|
||||
login_host: "{{ POSTGRES_LOCAL_HOST }}"
|
||||
login_port: "{{ POSTGRES_PORT }}"
|
||||
query: |
|
||||
GRANT USAGE ON SCHEMA public TO "{{ database_username }}";
|
||||
GRANT CREATE ON SCHEMA public TO "{{ database_username }}";
|
||||
@ -104,8 +103,8 @@
|
||||
GRANT ALL PRIVILEGES ON TABLES TO "{{ database_username }}";
|
||||
register: postgresql_result
|
||||
until: postgresql_result is succeeded
|
||||
retries: "{{ postgres_retry_retries }}"
|
||||
delay: "{{ postgres_retry_delay }}"
|
||||
retries: "{{ POSTGRES_RETRIES }}"
|
||||
delay: "{{ POSTGRES_DELAY }}"
|
||||
|
||||
# 7) Ensure PostGIS and related extensions are installed (if enabled)
|
||||
- name: "Ensure PostGIS-related extensions are installed"
|
||||
@ -114,9 +113,9 @@
|
||||
ext: "{{ item }}"
|
||||
state: present
|
||||
login_user: postgres
|
||||
login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}"
|
||||
login_host: "{{ postgres_local_host }}"
|
||||
login_port: "{{ postgres_port }}"
|
||||
login_password: "{{ applications | get_app_conf(application_id, 'credentials.POSTGRES_PASSWORD', True) }}"
|
||||
login_host: "{{ POSTGRES_LOCAL_HOST }}"
|
||||
login_port: "{{ POSTGRES_PORT }}"
|
||||
loop:
|
||||
- postgis
|
||||
- pg_trgm
|
||||
@ -124,8 +123,8 @@
|
||||
when: postgres_gis_enabled | bool
|
||||
register: postgresql_result
|
||||
until: postgresql_result is succeeded
|
||||
retries: "{{ postgres_retry_retries }}"
|
||||
delay: "{{ postgres_retry_delay }}"
|
||||
retries: "{{ POSTGRES_RETRIES }}"
|
||||
delay: "{{ POSTGRES_DELAY }}"
|
||||
|
||||
# 8) Ensure pgvector (vector) extension is installed (for Discourse‑AI, pgvector, …)
|
||||
- name: "Ensure pgvector (vector) extension is installed"
|
||||
@ -134,10 +133,10 @@
|
||||
ext: vector
|
||||
state: present
|
||||
login_user: postgres
|
||||
login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}"
|
||||
login_host: "{{ postgres_local_host }}"
|
||||
login_port: "{{ postgres_port }}"
|
||||
login_password: "{{ applications | get_app_conf(application_id, 'credentials.POSTGRES_PASSWORD', True) }}"
|
||||
login_host: "{{ POSTGRES_LOCAL_HOST }}"
|
||||
login_port: "{{ POSTGRES_PORT }}"
|
||||
register: postgresql_result
|
||||
until: postgresql_result is succeeded
|
||||
retries: "{{ postgres_retry_retries }}"
|
||||
delay: "{{ postgres_retry_delay }}"
|
||||
retries: "{{ POSTGRES_RETRIES }}"
|
||||
delay: "{{ POSTGRES_DELAY }}"
|
||||
|
@ -1,6 +1,9 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
vars:
|
||||
# Force the flush of the pg handler on the first run
|
||||
flush_handlers: true
|
||||
when: run_once_svc_db_postgres is not defined
|
||||
|
||||
- include_tasks: "{{ playbook_dir }}/tasks/utils/load_handlers.yml"
|
||||
@ -10,4 +13,4 @@
|
||||
|
||||
- name: "Initialize database for '{{ database_name }}'"
|
||||
include_tasks: 02_init.yml
|
||||
when: postgres_init | bool
|
||||
when: POSTGRES_INIT | bool
|
@ -1,6 +1,6 @@
|
||||
FROM {{ postgres_image }}:{{ postgres_version }}
|
||||
FROM {{ POSTGRES_IMAGE }}:{{ POSTGRES_VERSION }}
|
||||
|
||||
{% if postgres_pg_vector_enabled %}
|
||||
{% if POSTGRES_VECTOR_ENABLED %}
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
|
@ -1,15 +1,15 @@
|
||||
{% include 'roles/docker-compose/templates/base.yml.j2' %}
|
||||
|
||||
postgres:
|
||||
container_name: "{{ postgres_name }}"
|
||||
image: "{{ postgres_custom_image_name }}"
|
||||
container_name: "{{ POSTGRES_CONTAINER }}"
|
||||
image: "{{ POSTGRES_CUSTOM_IMAGE_NAME }}"
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
{% if postgres_expose_local %}
|
||||
{% if POSTGRES_EXPOSE_LOCAL %}
|
||||
ports:
|
||||
- "{{ postgres_local_host }}:{{ postgres_port }}:5432"
|
||||
- "{{ POSTGRES_LOCAL_HOST }}:{{ POSTGRES_PORT }}:5432"
|
||||
{% endif %}
|
||||
volumes:
|
||||
- "data:/var/lib/postgresql/data"
|
||||
@ -17,6 +17,6 @@
|
||||
|
||||
{% include 'roles/docker-compose/templates/volumes.yml.j2' %}
|
||||
data:
|
||||
name: "{{ postgres_volume }}"
|
||||
name: "{{ POSTGRES_VOLUME }}"
|
||||
|
||||
{% include 'roles/docker-compose/templates/networks.yml.j2' %}
|
@ -1,3 +1,3 @@
|
||||
POSTGRES_PASSWORD="{{ postgres_password }}"
|
||||
POSTGRES_PASSWORD="{{ POSTGRES_PASSWORD }}"
|
||||
# Necessary for web-app-matrix
|
||||
POSTGRES_INITDB_ARGS="--encoding=UTF8 --locale=C"
|
@ -8,18 +8,18 @@ docker_compose_flush_handlers: true
|
||||
database_type: "{{ application_id | get_entity_name }}"
|
||||
|
||||
## Postgres
|
||||
postgres_volume: "{{ applications | get_app_conf(application_id, 'docker.volumes.data', True) }}"
|
||||
postgres_name: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.name', True) }}"
|
||||
postgres_image: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.image', True) }}"
|
||||
postgres_subnet: "{{ networks.local['svc-db-postgres'].subnet }}"
|
||||
postgres_network_name: "{{ applications | get_app_conf(application_id, 'docker.network', True) }}"
|
||||
postgres_version: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.version', True) }}"
|
||||
postgres_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}"
|
||||
postgres_port: "{{ database_port | default(ports.localhost.database[ application_id ]) }}"
|
||||
postgres_init: "{{ database_username is defined and database_password is defined and database_name is defined }}"
|
||||
postgres_expose_local: True # Exposes the db to localhost, almost everytime neccessary
|
||||
postgres_custom_image_name: "postgres_custom"
|
||||
postgres_local_host: "127.0.0.1"
|
||||
postgres_pg_vector_enabled: True # Required by discourse, propably in a later step it makes sense to define this as a configuration option in config/main.yml
|
||||
postgres_retry_retries: 5
|
||||
postgres_retry_delay: 2
|
||||
POSTGRES_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.data', True) }}"
|
||||
POSTGRES_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.name', True) }}"
|
||||
POSTGRES_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.image', True) }}"
|
||||
POSTGRES_SUBNET: "{{ networks.local['svc-db-postgres'].subnet }}"
|
||||
POSTGRES_NETWORK_NAME: "{{ applications | get_app_conf(application_id, 'docker.network', True) }}"
|
||||
POSTGRES_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.version', True) }}"
|
||||
POSTGRES_PASSWORD: "{{ applications | get_app_conf(application_id, 'credentials.POSTGRES_PASSWORD', True) }}"
|
||||
POSTGRES_PORT: "{{ database_port | default(ports.localhost.database[ application_id ]) }}"
|
||||
POSTGRES_INIT: "{{ database_username is defined and database_password is defined and database_name is defined }}"
|
||||
POSTGRES_EXPOSE_LOCAL: True # Exposes the db to localhost, almost everytime neccessary
|
||||
POSTGRES_CUSTOM_IMAGE_NAME: "postgres_custom"
|
||||
POSTGRES_LOCAL_HOST: "127.0.0.1"
|
||||
POSTGRES_VECTOR_ENABLED: True # Required by discourse, propably in a later step it makes sense to define this as a configuration option in config/main.yml
|
||||
POSTGRES_RETRIES: 5
|
||||
POSTGRES_DELAY: 2
|
@ -1,5 +1,5 @@
|
||||
credentials:
|
||||
postgres_password:
|
||||
POSTGRES_PASSWORD:
|
||||
description: "Password for the PostgreSQL superuser 'postgres'"
|
||||
algorithm: "bcrypt"
|
||||
validation: "^\\$2[aby]\\$.{56}$"
|
||||
|
@ -4,5 +4,5 @@ OnFailure=sys-alm-compose.infinito@%n.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStartPre=/bin/sh -c '/usr/bin/python {{ path_system_lock_script }} {{ system_maintenance_services | join(' ') }} --ignore svc-opt-ssd-hdd svc-bkp-rmt-2-loc --timeout "{{system_maintenance_lock_timeout_storage_optimizer}}"'
|
||||
ExecStartPre=/bin/sh -c '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ system_maintenance_services | join(' ') }} --ignore svc-opt-ssd-hdd svc-bkp-rmt-2-loc --timeout "{{system_maintenance_lock_timeout_storage_optimizer}}"'
|
||||
ExecStart=/bin/sh -c '/usr/bin/python {{storage_optimizer_script}} --rapid-storage-path {{path_rapid_storage}} --mass-storage-path {{path_mass_storage}}'
|
@ -1,5 +1,5 @@
|
||||
application_id: svc-opt-ssd-hdd
|
||||
storage_optimizer_directory: '{{ path_administrator_scripts }}{{ application_id }}/'
|
||||
storage_optimizer_directory: '{{ PATH_ADMINISTRATOR_SCRIPTS }}{{ application_id }}/'
|
||||
storage_optimizer_script: '{{ storage_optimizer_directory }}{{ application_id }}.py'
|
||||
path_rapid_storage: "{{ applications | get_app_conf(application_id, 'volumes.rapid_storage') }}"
|
||||
path_mass_storage: "{{ applications | get_app_conf(application_id, 'volumes.mass_storage') }}"
|
||||
|
@ -1,4 +1,7 @@
|
||||
docker:
|
||||
services:
|
||||
openresty:
|
||||
name: "openresty"
|
||||
name: "openresty"
|
||||
volumes:
|
||||
www: "/var/www/"
|
||||
nginx: "/etc/nginx/"
|
@ -9,7 +9,7 @@ This document provides commands and tips to validate and inspect the OpenResty (
|
||||
* **Quick syntax check (quiet):**
|
||||
|
||||
```bash
|
||||
docker exec {{ openresty_container }} openresty -t -q
|
||||
docker exec {{ OPENRESTY_CONTAINER }} openresty -t -q
|
||||
```
|
||||
|
||||
*Returns only errors.*
|
||||
@ -17,13 +17,13 @@ This document provides commands and tips to validate and inspect the OpenResty (
|
||||
* **Detailed syntax check (show warnings):**
|
||||
|
||||
```bash
|
||||
docker exec {{ openresty_container }} openresty -t
|
||||
docker exec {{ OPENRESTY_CONTAINER }} openresty -t
|
||||
```
|
||||
|
||||
or:
|
||||
|
||||
```bash
|
||||
docker exec {{ openresty_container }} nginx -t
|
||||
docker exec {{ OPENRESTY_CONTAINER }} nginx -t
|
||||
```
|
||||
|
||||
---
|
||||
@ -34,9 +34,9 @@ To see the full configuration after all `include` directives are processed:
|
||||
|
||||
```bash
|
||||
# Within the running container
|
||||
docker exec {{ openresty_container }} openresty -T
|
||||
docker exec {{ OPENRESTY_CONTAINER }} openresty -T
|
||||
# or equivalently
|
||||
docker exec {{ openresty_container }} nginx -T
|
||||
docker exec {{ OPENRESTY_CONTAINER }} nginx -T
|
||||
```
|
||||
|
||||
This outputs every directive from `nginx.conf` and all files in `conf.d` in the order Nginx will use them.
|
||||
@ -103,5 +103,5 @@ That way you’ll see exactly which domains your server is serving and which nam
|
||||
* After fixing issues, reload without downtime:
|
||||
|
||||
```bash
|
||||
docker exec {{ openresty_container }} openresty -s reload
|
||||
docker exec {{ OPENRESTY_CONTAINER }} openresty -s reload
|
||||
```
|
@ -1,12 +1,15 @@
|
||||
---
|
||||
- name: Validate OpenResty configuration
|
||||
command: >
|
||||
docker exec {{ openresty_container }} openresty -t -q
|
||||
docker exec {{ OPENRESTY_CONTAINER }} openresty -t -q
|
||||
register: openresty_test
|
||||
changed_when: false
|
||||
failed_when: openresty_test.rc != 0
|
||||
failed_when: >
|
||||
openresty_test.rc != 0 and
|
||||
('is not running' not in ((openresty_test.stderr | default('')) | lower)) and
|
||||
('no such container' not in ((openresty_test.stderr | default('')) | lower))
|
||||
listen: restart openresty
|
||||
|
||||
- name: Restart OpenResty container
|
||||
command: docker restart {{ openresty_container }}
|
||||
command: docker restart {{ OPENRESTY_CONTAINER }}
|
||||
listen: restart openresty
|
||||
|
@ -1,15 +1,15 @@
|
||||
{% include 'roles/docker-compose/templates/base.yml.j2' %}
|
||||
openresty:
|
||||
container_name: {{ openresty_container }}
|
||||
image: {{ openresty_image }}:{{ openresty_version }}
|
||||
container_name: {{ OPENRESTY_CONTAINER }}
|
||||
image: {{ OPENRESTY_IMAGE }}:{{ OPENRESTY_VERSION }}
|
||||
network_mode: "host"
|
||||
volumes:
|
||||
- {{ nginx.files.configuration }}:/usr/local/openresty/nginx/conf/nginx.conf:ro
|
||||
- {{ nginx.directories.configuration }}:/usr/local/openresty/nginx/conf/conf.d:ro
|
||||
- {{ nginx.files.configuration }}:{{ nginx.files.configuration }}:ro
|
||||
- {{ nginx.directories.configuration }}:{{ nginx.directories.configuration }}:ro
|
||||
- {{ nginx.directories.data.www }}:{{ nginx.directories.data.www }}:ro
|
||||
- {{ nginx.directories.data.well_known }}:{{ nginx.directories.data.well_known }}:ro
|
||||
- {{ NGINX.FILES.CONFIGURATION }}:/usr/local/openresty/nginx/conf/nginx.conf:ro
|
||||
- {{ NGINX.DIRECTORIES.CONFIGURATION }}:/usr/local/openresty/nginx/conf/conf.d:ro
|
||||
- {{ NGINX.FILES.CONFIGURATION }}:{{ NGINX.FILES.CONFIGURATION }}:ro
|
||||
- {{ NGINX.DIRECTORIES.CONFIGURATION }}:{{ NGINX.DIRECTORIES.CONFIGURATION }}:ro
|
||||
- {{ NGINX.DIRECTORIES.DATA.WWW }}:{{ NGINX.DIRECTORIES.DATA.WWW }}:ro
|
||||
- {{ NGINX.DIRECTORIES.DATA.WELL_KNOWN }}:{{ NGINX.DIRECTORIES.DATA.WELL_KNOWN }}:ro
|
||||
- {{ LETSENCRYPT_WEBROOT_PATH }}:{{ LETSENCRYPT_WEBROOT_PATH }}:ro
|
||||
- {{ LETSENCRYPT_BASE_PATH }}:{{ LETSENCRYPT_BASE_PATH }}:ro
|
||||
command: ["openresty", "-g", "daemon off;"]
|
@ -5,6 +5,7 @@ application_id: "svc-prx-openresty"
|
||||
database_type: ""
|
||||
|
||||
# Openresty
|
||||
openresty_image: "openresty/openresty"
|
||||
openresty_version: "alpine"
|
||||
openresty_container: "{{ applications | get_app_conf(application_id, 'docker.services.openresty.name', True) }}"
|
||||
OPENRESTY_IMAGE: "openresty/openresty"
|
||||
OPENRESTY_VERSION: "alpine"
|
||||
OPENRESTY_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.openresty.name', True) }}"
|
||||
|
||||
|
@ -1 +1 @@
|
||||
systemd_notifier_email_folder: '{{path_administrator_scripts}}sys-alm-email/'
|
||||
systemd_notifier_email_folder: '{{ PATH_ADMINISTRATOR_SCRIPTS }}sys-alm-email/'
|
||||
|
@ -30,7 +30,7 @@
|
||||
set_fact:
|
||||
service_name: "{{ role_name }}"
|
||||
|
||||
- name: "include role for sys-timer for {{service_name}}"
|
||||
- name: "include role for sys-timer for {{ service_name }}"
|
||||
include_role:
|
||||
name: sys-timer
|
||||
vars:
|
||||
|
@ -12,6 +12,7 @@
|
||||
database_username: "{{ database_username | default('undefined') }}"
|
||||
database_password: "{{ database_password | default('undefined') }}"
|
||||
when: MODE_DEBUG | bool
|
||||
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||
|
||||
- name: "fail if not all required database variables are defined"
|
||||
fail:
|
||||
@ -25,6 +26,7 @@
|
||||
database_name is defined and
|
||||
database_username is defined and
|
||||
database_password is defined)
|
||||
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||
|
||||
- name: "seed database values in directory {{ backup_docker_to_local_folder }}"
|
||||
command: >
|
||||
@ -40,6 +42,7 @@
|
||||
- database_name is defined
|
||||
- database_username is defined
|
||||
- database_password is defined
|
||||
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||
|
||||
- name: Set file permissions for databases.csv to be readable, writable, and executable by root only
|
||||
ansible.builtin.file:
|
||||
|
@ -4,6 +4,6 @@ OnFailure=sys-alm-compose.infinito@%n.service sys-cln-faild-bkps{{ SYS_SERVICE_S
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStartPre=/bin/sh -c '/usr/bin/python {{ path_system_lock_script }} {{ system_maintenance_services | join(' ') }} --ignore {{ system_maintenance_backup_services | reject('equalto', 'sys-bkp-docker-2-loc') | join(' ') }} --timeout "{{system_maintenance_lock_timeout_backup_services}}"'
|
||||
ExecStartPre=/bin/sh -c '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ system_maintenance_services | join(' ') }} --ignore {{ system_maintenance_backup_services | reject('equalto', 'sys-bkp-docker-2-loc') | join(' ') }} --timeout "{{system_maintenance_lock_timeout_backup_services}}"'
|
||||
ExecStart=/bin/sh -c '{{ bkp_docker_2_loc_exec }} --everything'
|
||||
ExecStartPost=/bin/sh -c '/bin/systemctl start sys-rpr-docker-soft{{ SYS_SERVICE_SUFFIX }} &'
|
@ -4,6 +4,6 @@ OnFailure=sys-alm-compose.infinito@%n.service sys-cln-faild-bkps{{ SYS_SERVICE_S
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStartPre=/bin/sh -c '/usr/bin/python {{ path_system_lock_script }} {{ system_maintenance_services | join(' ') }} --ignore {{ system_maintenance_backup_services | reject('equalto', 'sys-bkp-docker-2-loc-everything') | join(' ') }} --timeout "{{system_maintenance_lock_timeout_backup_services}}"'
|
||||
ExecStartPre=/bin/sh -c '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ system_maintenance_services | join(' ') }} --ignore {{ system_maintenance_backup_services | reject('equalto', 'sys-bkp-docker-2-loc-everything') | join(' ') }} --timeout "{{system_maintenance_lock_timeout_backup_services}}"'
|
||||
ExecStart=/bin/sh -c '{{ bkp_docker_2_loc_exec }}'
|
||||
ExecStartPost=/bin/sh -c '/bin/systemctl start sys-rpr-docker-soft{{ SYS_SERVICE_SUFFIX }} &'
|
@ -42,5 +42,5 @@ bkp_docker_2_loc_cli_args_list:
|
||||
|
||||
bkp_docker_2_loc_exec: >-
|
||||
/usr/bin/python {{ backup_docker_to_local_folder }}backup-docker-to-local.py
|
||||
--compose-dir {{ path_docker_compose_instances }}
|
||||
--compose-dir {{ PATH_DOCKER_COMPOSE_INSTANCES }}
|
||||
{{ bkp_docker_2_loc_cli_args_list | select('string') | join(' ') }}
|
@ -4,5 +4,5 @@ OnFailure=sys-alm-compose.infinito@%n.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStartPre=/bin/sh -c '/usr/bin/python {{ path_system_lock_script }} {{ system_maintenance_services | join(' ') }} --ignore {{system_maintenance_cleanup_services| join(' ') }} --timeout "{{system_maintenance_lock_timeout_backup_services}}"'
|
||||
ExecStartPre=/bin/sh -c '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ system_maintenance_services | join(' ') }} --ignore {{system_maintenance_cleanup_services| join(' ') }} --timeout "{{system_maintenance_lock_timeout_backup_services}}"'
|
||||
ExecStart=/bin/sh -c '/usr/bin/python {{cleanup_backups_directory}}sys-cln-backups.py --backups-folder-path {{backups_folder_path}} --maximum-backup-size-percent {{size_percent_maximum_backup}}'
|
@ -1,2 +1,2 @@
|
||||
cleanup_backups_directory: '{{path_administrator_scripts}}sys-cln-backups/'
|
||||
cleanup_backups_directory: '{{ PATH_ADMINISTRATOR_SCRIPTS }}sys-cln-backups/'
|
||||
|
||||
|
@ -9,7 +9,7 @@
|
||||
set_fact:
|
||||
service_name: "sys-cln-backups"
|
||||
|
||||
- name: "include role for sys-timer for {{service_name}}"
|
||||
- name: "include role for sys-timer for {{ service_name }}"
|
||||
include_role:
|
||||
name: sys-timer
|
||||
vars:
|
||||
|
@ -21,7 +21,7 @@
|
||||
set_fact:
|
||||
service_name: "{{ role_name }}"
|
||||
|
||||
- name: "include role for sys-timer for {{service_name}}"
|
||||
- name: "include role for sys-timer for {{ service_name }}"
|
||||
include_role:
|
||||
name: sys-timer
|
||||
vars:
|
||||
|
@ -30,7 +30,7 @@
|
||||
set_fact:
|
||||
service_name: "{{ role_name }}"
|
||||
|
||||
- name: "include role for sys-timer for {{service_name}}"
|
||||
- name: "include role for sys-timer for {{ service_name }}"
|
||||
include_role:
|
||||
name: sys-timer
|
||||
vars:
|
||||
|
@ -4,5 +4,5 @@ OnFailure=sys-alm-compose.infinito@%n.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStartPre=/bin/sh -c '/usr/bin/python {{ path_system_lock_script }} {{ system_maintenance_services | join(' ') }} --ignore {{system_maintenance_cleanup_services| join(' ') }} --timeout "{{system_maintenance_lock_timeout_backup_services}}"'
|
||||
ExecStartPre=/bin/sh -c '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ system_maintenance_services | join(' ') }} --ignore {{system_maintenance_cleanup_services| join(' ') }} --timeout "{{system_maintenance_lock_timeout_backup_services}}"'
|
||||
ExecStart=/bin/sh -c '/bin/bash {{cleanup_disc_space_folder}}sys-cln-disc-space.sh {{size_percent_cleanup_disc_space}}'
|
@ -24,7 +24,7 @@ if [ "$force_freeing" = true ]; then
|
||||
|
||||
{% if backups_folder_path is defined and size_percent_maximum_backup is defined %}
|
||||
echo "cleaning up backups" &&
|
||||
python {{path_administrator_scripts}}sys-cln-backups/sys-cln-backups.py --backups-folder-path {{backups_folder_path}} --maximum-backup-size-percent {{size_percent_maximum_backup}} || exit 2
|
||||
python {{ PATH_ADMINISTRATOR_SCRIPTS }}sys-cln-backups/sys-cln-backups.py --backups-folder-path {{backups_folder_path}} --maximum-backup-size-percent {{size_percent_maximum_backup}} || exit 2
|
||||
{% endif %}
|
||||
|
||||
if pacman -Qs $package > /dev/null ; then
|
||||
|
@ -1 +1 @@
|
||||
cleanup_disc_space_folder: '{{ path_administrator_scripts }}sys-cln-disc-space/'
|
||||
cleanup_disc_space_folder: '{{ PATH_ADMINISTRATOR_SCRIPTS }}sys-cln-disc-space/'
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
- name: Find matching nginx configs for {{ domain }}
|
||||
ansible.builtin.find:
|
||||
paths: "{{ nginx.directories.http.servers }}"
|
||||
paths: "{{ NGINX.DIRECTORIES.HTTP.SERVERS }}"
|
||||
patterns: "*.{{ domain }}.conf"
|
||||
register: find_result
|
||||
|
||||
@ -15,6 +15,6 @@
|
||||
|
||||
- name: Remove exact nginx config for {{ domain }}
|
||||
ansible.builtin.file:
|
||||
path: "{{ nginx.directories.http.servers }}{{ domain }}.conf"
|
||||
path: "{{ NGINX.DIRECTORIES.HTTP.SERVERS }}{{ domain }}.conf"
|
||||
state: absent
|
||||
notify: restart openresty
|
@ -33,7 +33,7 @@
|
||||
set_fact:
|
||||
service_name: "{{ role_name }}"
|
||||
|
||||
- name: "include role for sys-timer for {{service_name}}"
|
||||
- name: "include role for sys-timer for {{ service_name }}"
|
||||
include_role:
|
||||
name: sys-timer
|
||||
vars:
|
||||
|
@ -4,5 +4,5 @@ OnFailure=sys-alm-compose.infinito@%n.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStartPre=/bin/sh -c '/usr/bin/python {{ path_system_lock_script }} {{ system_maintenance_services | join(' ') }} --ignore {{system_maintenance_cleanup_services| join(' ') }} --timeout "{{system_maintenance_lock_timeout_cleanup_services}}"'
|
||||
ExecStartPre=/bin/sh -c '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ system_maintenance_services | join(' ') }} --ignore {{system_maintenance_cleanup_services| join(' ') }} --timeout "{{system_maintenance_lock_timeout_cleanup_services}}"'
|
||||
ExecStart=/bin/sh -c '/usr/bin/yes | /usr/bin/bash {{backup_docker_to_local_cleanup_script}}'
|
@ -11,6 +11,6 @@ Checks the health of all mounted Btrfs filesystems by inspecting device error co
|
||||
|
||||
## Usage
|
||||
Just include this role in your playbook; it will:
|
||||
1. Deploy a small shell script under `{{ path_administrator_scripts }}/sys-hlth-btrfs/`.
|
||||
1. Deploy a small shell script under `{{ PATH_ADMINISTRATOR_SCRIPTS }}/sys-hlth-btrfs/`.
|
||||
2. Install a `.service` and `.timer` unit.
|
||||
3. Send alerts via `sys-alm-compose` if any filesystem shows errors.
|
||||
|
@ -29,7 +29,7 @@
|
||||
set_fact:
|
||||
service_name: "{{ role_name }}"
|
||||
|
||||
- name: "include role for sys-timer for {{service_name}}"
|
||||
- name: "include role for sys-timer for {{ service_name }}"
|
||||
include_role:
|
||||
name: sys-timer
|
||||
vars:
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user