Compare commits

...

11 Commits

Author SHA1 Message Date
c9a7830953 Renamed to CONSTANT DATABASE_VARS_FILE 2025-08-12 17:45:19 +02:00
53e5c563ae Refactor MIG build process to run asynchronously with optional wait control
- Moved MIG data build commands into a dedicated 02_build_data.yml task file.
- Added async execution (async: 3600, poll: 0) for non-blocking build.
- Introduced mig_wait_for_build variable to optionally wait for completion.
- Added debug message to inform how to disable waiting via build_data.wait_for=false for performance.
- Updated config to use nested build_data.enabled and build_data.wait_for structure.
- Adjusted variable lookups accordingly.

https://chatgpt.com/share/689b54d2-e3b0-800f-91df-939ebc5e12ef
2025-08-12 16:51:24 +02:00
0b3b3a810a Solved bug which prevented backup2loc to be activated 2025-08-12 15:42:26 +02:00
6d14f16dfd Optimized sys-timer 2025-08-12 15:00:12 +02:00
632d922977 Solved discourse flush handlers bug 2025-08-12 14:59:00 +02:00
26b29debc0 Add integration test to ensure no Jinja variables are used in handler names
This test scans roles/*/handlers/main.yml and fails if a handler's 'name' contains a Jinja variable ({{ ... }}).
Reason:
- Handler names must be static to ensure reliable 'notify' resolution.
- Dynamic names can break handler matching, cause undefined-variable errors, and produce unstable logs.
Recommendation:
- Keep handler names static and, if dynamic behavior is needed, use a static 'listen:' key.

https://chatgpt.com/share/689b37dc-e1e4-800f-bd56-00b43c7701f6
2025-08-12 14:48:43 +02:00
0c4cd283c4 Optimized CDN variables during bug research 2025-08-12 14:31:24 +02:00
5d36a806ff svc-db-postgres: add retry mechanism to all PostgreSQL tasks and fix condition handling
- Added register, until, retries, and delay to all PostgreSQL-related tasks
  in 02_init.yml to handle transient 'tuple concurrently updated' and similar errors.
- Changed 'when: "{{ postgres_init }}"' to 'when: postgres_init | bool' in main.yml
  for correct boolean evaluation.
- Switched 'role' to 'roles' in postgresql_privs tasks for forward compatibility.
- Added postgres_retry_retries and postgres_retry_delay defaults in vars/main.yml
  to centralize retry configuration.

  https://chatgpt.com/share/689b2360-a8a4-800f-9acb-6d88d6aa5cb7
2025-08-12 13:20:30 +02:00
84de85d905 Solved matrix flush handler bug 2025-08-12 12:54:27 +02:00
457f3659fa Solved mobilizon flush docker handler bug 2025-08-12 12:03:53 +02:00
4c7ee0441e Solved baserow variable bugs 2025-08-12 11:23:56 +02:00
33 changed files with 296 additions and 90 deletions

View File

@@ -7,8 +7,8 @@
- name: "For '{{ application_id }}': Load database variables" - name: "For '{{ application_id }}': Load database variables"
include_vars: "{{ item }}" include_vars: "{{ item }}"
loop: loop:
- "{{ DOCKER_VARS_FILE }}" # Important to load docker variables first so that database can use them - "{{ DOCKER_VARS_FILE }}" # Important to load docker variables first so that database can use them
- "{{ cmp_db_docker_vars_file_db }}" # Important to load them before docker role so that backup can use them - "{{ DATABASE_VARS_FILE }}" # Important to load them before docker role so that backup can use them
- name: "For '{{ application_id }}': Load central RDBMS" - name: "For '{{ application_id }}': Load central RDBMS"
include_role: include_role:

View File

@@ -1 +1 @@
cmp_db_docker_vars_file_db: "{{ playbook_dir }}/roles/cmp-rdbms/vars/database.yml" DATABASE_VARS_FILE: "{{ playbook_dir }}/roles/cmp-rdbms/vars/database.yml"

View File

@@ -16,6 +16,10 @@
login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}" login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}"
login_host: "{{ postgres_local_host }}" login_host: "{{ postgres_local_host }}"
login_port: "{{ postgres_port }}" login_port: "{{ postgres_port }}"
register: postgresql_result
until: postgresql_result is succeeded
retries: "{{ postgres_retry_retries }}"
delay: "{{ postgres_retry_delay }}"
# 2) Create the database user (with password) # 2) Create the database user (with password)
- name: "Create database user: {{ database_username }}" - name: "Create database user: {{ database_username }}"
@@ -28,6 +32,10 @@
login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}" login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}"
login_host: "{{ postgres_local_host }}" login_host: "{{ postgres_local_host }}"
login_port: "{{ postgres_port }}" login_port: "{{ postgres_port }}"
register: postgresql_result
until: postgresql_result is succeeded
retries: "{{ postgres_retry_retries }}"
delay: "{{ postgres_retry_delay }}"
# 3) Enable LOGIN for the role (removes NOLOGIN) # 3) Enable LOGIN for the role (removes NOLOGIN)
- name: "Enable login for role {{ database_username }}" - name: "Enable login for role {{ database_username }}"
@@ -40,12 +48,16 @@
query: | query: |
ALTER ROLE "{{ database_username }}" ALTER ROLE "{{ database_username }}"
WITH LOGIN; WITH LOGIN;
register: postgresql_result
until: postgresql_result is succeeded
retries: "{{ postgres_retry_retries }}"
delay: "{{ postgres_retry_delay }}"
# 4) Grant ALL privileges on all tables in the public schema # 4) Grant ALL privileges on all tables in the public schema
- name: "Grant ALL privileges on tables in public schema to {{ database_username }}" - name: "Grant ALL privileges on tables in public schema to {{ database_username }}"
community.postgresql.postgresql_privs: community.postgresql.postgresql_privs:
db: "{{ database_name }}" db: "{{ database_name }}"
role: "{{ database_username }}" roles: "{{ database_username }}"
objs: ALL_IN_SCHEMA objs: ALL_IN_SCHEMA
privs: ALL privs: ALL
type: table type: table
@@ -55,12 +67,16 @@
login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}" login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}"
login_host: "{{ postgres_local_host }}" login_host: "{{ postgres_local_host }}"
login_port: "{{ postgres_port }}" login_port: "{{ postgres_port }}"
register: postgresql_result
until: postgresql_result is succeeded
retries: "{{ postgres_retry_retries }}"
delay: "{{ postgres_retry_delay }}"
# 5) Grant ALL privileges at the database level # 5) Grant ALL privileges at the database level
- name: "Grant all privileges on database {{ database_name }} to {{ database_username }}" - name: "Grant all privileges on database {{ database_name }} to {{ database_username }}"
community.postgresql.postgresql_privs: community.postgresql.postgresql_privs:
db: "{{ database_name }}" db: "{{ database_name }}"
role: "{{ database_username }}" roles: "{{ database_username }}"
type: database type: database
privs: ALL privs: ALL
state: present state: present
@@ -68,6 +84,10 @@
login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}" login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}"
login_host: "{{ postgres_local_host }}" login_host: "{{ postgres_local_host }}"
login_port: "{{ postgres_port }}" login_port: "{{ postgres_port }}"
register: postgresql_result
until: postgresql_result is succeeded
retries: "{{ postgres_retry_retries }}"
delay: "{{ postgres_retry_delay }}"
# 6) Grant USAGE/CREATE on schema and set default privileges # 6) Grant USAGE/CREATE on schema and set default privileges
- name: "Set comprehensive schema privileges for {{ database_username }}" - name: "Set comprehensive schema privileges for {{ database_username }}"
@@ -82,6 +102,10 @@
GRANT CREATE ON SCHEMA public TO "{{ database_username }}"; GRANT CREATE ON SCHEMA public TO "{{ database_username }}";
ALTER DEFAULT PRIVILEGES IN SCHEMA public ALTER DEFAULT PRIVILEGES IN SCHEMA public
GRANT ALL PRIVILEGES ON TABLES TO "{{ database_username }}"; GRANT ALL PRIVILEGES ON TABLES TO "{{ database_username }}";
register: postgresql_result
until: postgresql_result is succeeded
retries: "{{ postgres_retry_retries }}"
delay: "{{ postgres_retry_delay }}"
# 7) Ensure PostGIS and related extensions are installed (if enabled) # 7) Ensure PostGIS and related extensions are installed (if enabled)
- name: "Ensure PostGIS-related extensions are installed" - name: "Ensure PostGIS-related extensions are installed"
@@ -98,6 +122,10 @@
- pg_trgm - pg_trgm
- unaccent - unaccent
when: postgres_gis_enabled | bool when: postgres_gis_enabled | bool
register: postgresql_result
until: postgresql_result is succeeded
retries: "{{ postgres_retry_retries }}"
delay: "{{ postgres_retry_delay }}"
# 8) Ensure pgvector (vector) extension is installed (for DiscourseAI, pgvector, …) # 8) Ensure pgvector (vector) extension is installed (for DiscourseAI, pgvector, …)
- name: "Ensure pgvector (vector) extension is installed" - name: "Ensure pgvector (vector) extension is installed"
@@ -109,3 +137,7 @@
login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}" login_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}"
login_host: "{{ postgres_local_host }}" login_host: "{{ postgres_local_host }}"
login_port: "{{ postgres_port }}" login_port: "{{ postgres_port }}"
register: postgresql_result
until: postgresql_result is succeeded
retries: "{{ postgres_retry_retries }}"
delay: "{{ postgres_retry_delay }}"

View File

@@ -10,4 +10,4 @@
- name: "Initialize database for '{{ database_name }}'" - name: "Initialize database for '{{ database_name }}'"
include_tasks: 02_init.yml include_tasks: 02_init.yml
when: "{{ postgres_init }}" when: postgres_init | bool

View File

@@ -21,3 +21,5 @@ postgres_expose_local: True # Exposes the db to localhost, almost every
postgres_custom_image_name: "postgres_custom" postgres_custom_image_name: "postgres_custom"
postgres_local_host: "127.0.0.1" postgres_local_host: "127.0.0.1"
postgres_pg_vector_enabled: True # Required by discourse, propably in a later step it makes sense to define this as a configuration option in config/main.yml postgres_pg_vector_enabled: True # Required by discourse, propably in a later step it makes sense to define this as a configuration option in config/main.yml
postgres_retry_retries: 5
postgres_retry_delay: 2

View File

@@ -2,7 +2,7 @@
application_id: "svc-prx-openresty" application_id: "svc-prx-openresty"
# Deactivate Database for openresty # Deactivate Database for openresty
database_enabled: false database_type: ""
# Openresty # Openresty
openresty_image: "openresty/openresty" openresty_image: "openresty/openresty"

View File

@@ -1 +0,0 @@
database_enabled: "{{ database_type | default('') | bool }}" # Enables the database backup

View File

@@ -1,12 +1,12 @@
- block: - block:
- name: "pkgmgr install {{ bkp_docker_to_local_pkg }}" - name: "pkgmgr install {{ bkp_docker_2_loc_pkg }}"
include_role: include_role:
name: pkgmgr-install name: pkgmgr-install
vars: vars:
package_name: "{{ bkp_docker_to_local_pkg }}" package_name: "{{ bkp_docker_2_loc_pkg }}"
- name: "Retrieve {{ bkp_docker_to_local_pkg }} path from pkgmgr" - name: "Retrieve {{ bkp_docker_2_loc_pkg }} path from pkgmgr"
command: "pkgmgr path {{ bkp_docker_to_local_pkg }}" command: "pkgmgr path {{ bkp_docker_2_loc_pkg }}"
register: pkgmgr_output register: pkgmgr_output
changed_when: false changed_when: false
@@ -16,4 +16,4 @@
changed_when: false changed_when: false
when: backup_docker_to_local_folder is not defined when: backup_docker_to_local_folder is not defined
vars: vars:
bkp_docker_to_local_pkg: backup-docker-to-local bkp_docker_2_loc_pkg: backup-docker-to-local

View File

@@ -52,10 +52,10 @@
database_name is defined and database_name is defined and
database_username is defined and database_username is defined and
database_password is defined) and database_password is defined) and
run_once_bkp_docker_to_local_file_permission is not defined run_once_bkp_docker_2_loc_file_permission is not defined
register: file_permission_result register: file_permission_result
- name: run the backup_docker_to_local_file_permission tasks once - name: run the backup_docker_to_local_file_permission tasks once
set_fact: set_fact:
run_once_bkp_docker_to_local_file_permission: true run_once_bkp_docker_2_loc_file_permission: true
when: run_once_bkp_docker_to_local_file_permission is not defined and file_permission_result is defined and file_permission_result.changed when: run_once_bkp_docker_2_loc_file_permission is not defined and file_permission_result is defined and file_permission_result.changed

View File

@@ -3,9 +3,8 @@
- include_tasks: utils/run_once.yml - include_tasks: utils/run_once.yml
when: when:
- run_once_sys_bkp_docker_2_loc is not defined - run_once_sys_bkp_docker_2_loc is not defined
- database_enabled | bool
- name: "include 04_seed-database-to-backup.yml" - name: "include 04_seed-database-to-backup.yml"
include_tasks: 04_seed-database-to-backup.yml include_tasks: 04_seed-database-to-backup.yml
when: when:
- database_enabled | bool - bkp_docker_2_loc_db_enabled | bool

View File

@@ -5,5 +5,5 @@ OnFailure=sys-alm-compose.infinito@%n.service sys-cln-faild-bkps.infinito.servic
[Service] [Service]
Type=oneshot Type=oneshot
ExecStartPre=/bin/sh -c '/usr/bin/python {{ path_system_lock_script }} {{ system_maintenance_services | join(' ') }} --ignore {{ system_maintenance_backup_services | reject('equalto', 'sys-bkp-docker-2-loc') | join(' ') }} --timeout "{{system_maintenance_lock_timeout_backup_services}}"' ExecStartPre=/bin/sh -c '/usr/bin/python {{ path_system_lock_script }} {{ system_maintenance_services | join(' ') }} --ignore {{ system_maintenance_backup_services | reject('equalto', 'sys-bkp-docker-2-loc') | join(' ') }} --timeout "{{system_maintenance_lock_timeout_backup_services}}"'
ExecStart=/bin/sh -c '{{ bkp_docker_to_local_exec }} --everything' ExecStart=/bin/sh -c '{{ bkp_docker_2_loc_exec }} --everything'
ExecStartPost=/bin/sh -c '/bin/systemctl start sys-rpr-docker-soft.infinito.service &' ExecStartPost=/bin/sh -c '/bin/systemctl start sys-rpr-docker-soft.infinito.service &'

View File

@@ -5,5 +5,5 @@ OnFailure=sys-alm-compose.infinito@%n.service sys-cln-faild-bkps.infinito.servic
[Service] [Service]
Type=oneshot Type=oneshot
ExecStartPre=/bin/sh -c '/usr/bin/python {{ path_system_lock_script }} {{ system_maintenance_services | join(' ') }} --ignore {{ system_maintenance_backup_services | reject('equalto', 'sys-bkp-docker-2-loc-everything') | join(' ') }} --timeout "{{system_maintenance_lock_timeout_backup_services}}"' ExecStartPre=/bin/sh -c '/usr/bin/python {{ path_system_lock_script }} {{ system_maintenance_services | join(' ') }} --ignore {{ system_maintenance_backup_services | reject('equalto', 'sys-bkp-docker-2-loc-everything') | join(' ') }} --timeout "{{system_maintenance_lock_timeout_backup_services}}"'
ExecStart=/bin/sh -c '{{ bkp_docker_to_local_exec }}' ExecStart=/bin/sh -c '{{ bkp_docker_2_loc_exec }}'
ExecStartPost=/bin/sh -c '/bin/systemctl start sys-rpr-docker-soft.infinito.service &' ExecStartPost=/bin/sh -c '/bin/systemctl start sys-rpr-docker-soft.infinito.service &'

View File

@@ -1,43 +1,46 @@
# Mapping logic for backup-docker-to-local CLI arguments # Mapping logic for backup-docker-to-local CLI arguments
# #
# - bkp_docker_to_local_database_routine: All service names where backup.database_routine is set (for --database-containers) # - bkp_docker_2_loc_database_routine: All service names where backup.database_routine is set (for --database-containers)
# - bkp_docker_to_local_no_stop_required: All images where backup.no_stop_required is set (for --images-no-stop-required) # - bkp_docker_2_loc_no_stop_required: All images where backup.no_stop_required is set (for --images-no-stop-required)
# - bkp_docker_to_local_disabled: All images where backup.disabled is set (for --images-no-backup-required) # - bkp_docker_2_loc_disabled: All images where backup.disabled is set (for --images-no-backup-required)
# CLI-ready variables render these lists as argument strings. # CLI-ready variables render these lists as argument strings.
# Verify if DB is enabled
bkp_docker_2_loc_db_enabled: "{{ database_type | default('') | bool }}"
# Gather mapped values as lists # Gather mapped values as lists
bkp_docker_to_local_database_routine: >- bkp_docker_2_loc_database_routine: >-
{{ applications | find_dock_val_by_bkp_entr('database_routine', 'name') | list }} {{ applications | find_dock_val_by_bkp_entr('database_routine', 'name') | list }}
bkp_docker_to_local_no_stop_required: >- bkp_docker_2_loc_no_stop_required: >-
{{ applications | find_dock_val_by_bkp_entr('no_stop_required', 'image') | list }} {{ applications | find_dock_val_by_bkp_entr('no_stop_required', 'image') | list }}
bkp_docker_to_local_disabled: >- bkp_docker_2_loc_disabled: >-
{{ applications | find_dock_val_by_bkp_entr('disabled', 'image') | list }} {{ applications | find_dock_val_by_bkp_entr('disabled', 'image') | list }}
# CLI argument strings (only set if list not empty) # CLI argument strings (only set if list not empty)
bkp_docker_to_local_database_routine_cli: >- bkp_docker_2_loc_database_routine_cli: >-
{% if bkp_docker_to_local_database_routine | length > 0 -%} {% if bkp_docker_2_loc_database_routine | length > 0 -%}
--database-containers {{ bkp_docker_to_local_database_routine | join(' ') }} --database-containers {{ bkp_docker_2_loc_database_routine | join(' ') }}
{%- endif %} {%- endif %}
bkp_docker_to_local_no_stop_required_cli: >- bkp_docker_2_loc_no_stop_required_cli: >-
{% if bkp_docker_to_local_no_stop_required | length > 0 -%} {% if bkp_docker_2_loc_no_stop_required | length > 0 -%}
--images-no-stop-required {{ bkp_docker_to_local_no_stop_required | join(' ') }} --images-no-stop-required {{ bkp_docker_2_loc_no_stop_required | join(' ') }}
{%- endif %} {%- endif %}
bkp_docker_to_local_disabled_cli: >- bkp_docker_2_loc_disabled_cli: >-
{% if bkp_docker_to_local_disabled | length > 0 -%} {% if bkp_docker_2_loc_disabled | length > 0 -%}
--images-no-backup-required {{ bkp_docker_to_local_disabled | join(' ') }} --images-no-backup-required {{ bkp_docker_2_loc_disabled | join(' ') }}
{%- endif %} {%- endif %}
# List of CLI args for convenience (e.g. for looping or joining) # List of CLI args for convenience (e.g. for looping or joining)
bkp_docker_to_local_cli_args_list: bkp_docker_2_loc_cli_args_list:
- "{{ bkp_docker_to_local_database_routine_cli }}" - "{{ bkp_docker_2_loc_database_routine_cli }}"
- "{{ bkp_docker_to_local_no_stop_required_cli }}" - "{{ bkp_docker_2_loc_no_stop_required_cli }}"
- "{{ bkp_docker_to_local_disabled_cli }}" - "{{ bkp_docker_2_loc_disabled_cli }}"
bkp_docker_to_local_exec: >- bkp_docker_2_loc_exec: >-
/usr/bin/python {{ backup_docker_to_local_folder }}backup-docker-to-local.py /usr/bin/python {{ backup_docker_to_local_folder }}backup-docker-to-local.py
--compose-dir {{ path_docker_compose_instances }} --compose-dir {{ path_docker_compose_instances }}
{{ bkp_docker_to_local_cli_args_list | select('string') | join(' ') }} {{ bkp_docker_2_loc_cli_args_list | select('string') | join(' ') }}

View File

@@ -1,23 +1,24 @@
- name: "reset (if enabled)" - name: "reset (if enabled)"
include_tasks: reset.yml include_tasks: 01_reset.yml
when: mode_reset | bool and run_once_sys_timer is not defined when: mode_reset | bool and run_once_sys_timer is not defined
- name: create {{service_name}}.infinito.timer
template:
src: dummy.timer.j2
dest: "/etc/systemd/system/{{service_name}}.infinito.timer"
register: dummy_timer
- name: "restart timer"
systemd:
daemon_reload: yes
name: "{{service_name}}.infinito.timer"
state: restarted
enabled: yes
when: dummy_timer.changed or activate_all_timers | bool
- name: run {{ role_name }} once - name: run {{ role_name }} once
set_fact: set_fact:
run_once_sys_timer: true run_once_sys_timer: true
when: run_once_sys_timer is not defined when: run_once_sys_timer is not defined
- name: create {{ sys_timer_file }}
template:
src: dummy.timer.j2
dest: "/etc/systemd/system/{{ sys_timer_file }}"
register: dummy_timer
- name: "restart timer"
systemd:
daemon_reload: yes
name: "{{ sys_timer_file }}"
state: restarted
enabled: yes
when: dummy_timer.changed or activate_all_timers | bool

View File

@@ -0,0 +1 @@
sys_timer_file: "{{service_name}}.infinito.timer"

View File

@@ -16,8 +16,8 @@ docker:
image: "baserow/baserow" image: "baserow/baserow"
version: "latest" version: "latest"
name: "baserow" name: "baserow"
volumes: volumes:
data: "baserow_data" data: "baserow_data"
server: server:
domains: domains:
canonical: canonical:

View File

@@ -1,5 +1,5 @@
# Public URL # Public URL
BASEROW_PUBLIC_URL=https://{{ domain }} BASEROW_PUBLIC_URL={{ domains | get_url(application_id, WEB_PROTOCOL) }}
# Email Server Configuration # Email Server Configuration
EMAIL_SMTP={{ system_email.smtp | upper }} EMAIL_SMTP={{ system_email.smtp | upper }}
@@ -12,7 +12,7 @@ EMAIL_SMTP_USE_TLS={{ system_email.tls | upper }}
DATABASE_USER={{ database_username }} DATABASE_USER={{ database_username }}
DATABASE_NAME={{ database_name }} DATABASE_NAME={{ database_name }}
DATABASE_HOST={{ database_host }} DATABASE_HOST={{ database_host }}
DATABASE_PORT={{database_port}} DATABASE_PORT={{ database_port }}
DATABASE_PASSWORD={{ database_password }} DATABASE_PASSWORD={{ database_password }}
REDIS_URL=redis://redis:6379 REDIS_URL=redis://redis:6379

View File

@@ -15,4 +15,4 @@ discourse_redis_host: "{{ application_id |get_entity_name }}-r
# General Docker Configuration # General Docker Configuration
docker_repository_directory : "{{ docker_compose.directories.services}}{{applications | get_app_conf( application_id, 'repository') }}/" docker_repository_directory : "{{ docker_compose.directories.services}}{{applications | get_app_conf( application_id, 'repository') }}/"
docker_compose_flush_handlers: false docker_compose_flush_handlers: true

View File

@@ -3,7 +3,7 @@ database_type: "postgres"
container_port: "{{ applications | get_app_conf(application_id, 'docker.services.listmonk.port', True) }}" container_port: "{{ applications | get_app_conf(application_id, 'docker.services.listmonk.port', True) }}"
# Docker specific # Docker
docker_compose_flush_handlers: false docker_compose_flush_handlers: false
# Listmonk Specific # Listmonk Specific

View File

@@ -1,5 +1,8 @@
# General
application_id: "web-app-mastodon" application_id: "web-app-mastodon"
database_type: "postgres" database_type: "postgres"
# Mastodon Specific
mastodon_version: "{{ applications | get_app_conf(application_id, 'docker.services.mastodon.version', True) }}" mastodon_version: "{{ applications | get_app_conf(application_id, 'docker.services.mastodon.version', True) }}"
mastodon_image: "{{ applications | get_app_conf(application_id, 'docker.services.mastodon.image', True) }}" mastodon_image: "{{ applications | get_app_conf(application_id, 'docker.services.mastodon.image', True) }}"
mastodon_name: "{{ applications | get_app_conf(application_id, 'docker.services.mastodon.name', True) }}" mastodon_name: "{{ applications | get_app_conf(application_id, 'docker.services.mastodon.name', True) }}"

View File

@@ -4,7 +4,7 @@ application_id: "web-app-matrix"
database_type: "postgres" database_type: "postgres"
registration_file_folder: "/data/" registration_file_folder: "/data/"
# Matrix specific # Matrix
matrix_synapse_version: "{{ applications | get_app_conf(application_id, 'docker.services.synapse.version', True) }}" matrix_synapse_version: "{{ applications | get_app_conf(application_id, 'docker.services.synapse.version', True) }}"
matrix_synapse_image: "{{ applications | get_app_conf(application_id, 'docker.services.synapse.image', True) }}" matrix_synapse_image: "{{ applications | get_app_conf(application_id, 'docker.services.synapse.image', True) }}"
matrix_synapse_name: "{{ applications | get_app_conf(application_id, 'docker.services.synapse.name', True) }}" matrix_synapse_name: "{{ applications | get_app_conf(application_id, 'docker.services.synapse.name', True) }}"
@@ -18,3 +18,6 @@ matrix_project: "{{ application_id | get_entity_name }}"
well_known_directory: "{{nginx.directories.data.well_known}}/matrix/" well_known_directory: "{{nginx.directories.data.well_known}}/matrix/"
location_upload: "~ ^/_matrix/media/v3/" location_upload: "~ ^/_matrix/media/v3/"
client_max_body_size: "{{ applications | get_app_conf(application_id, 'server.client_max_body_size') }}" client_max_body_size: "{{ applications | get_app_conf(application_id, 'server.client_max_body_size') }}"
# Docker
docker_compose_flush_handlers: false

View File

@@ -36,4 +36,11 @@ server:
- "mig.{{ primary_domain }}" - "mig.{{ primary_domain }}"
aliases: aliases:
- "meta-infinite-graph.{{ primary_domain }}" - "meta-infinite-graph.{{ primary_domain }}"
build_data: true # Enables the building of the meta data which the graph requiers
build_data:
# This shouldn't be relevant anymore, because the data is anyhow build async in background
# Enables the building of the meta data which the graph requiers
enabled: true
# Recommended to set this to false in inventory to optimize speed
wait_for: true

View File

@@ -20,12 +20,6 @@
include_role: include_role:
name: cmp-docker-proxy name: cmp-docker-proxy
- name: Create tree - name: Build data (single async task)
command: "infinito build tree --no-signal --alarm-timeout 0 -s {{ mig_roles_meta_volume }}" include_tasks: 02_build_data.yml
when: when: mig_build_data | bool
- mig_build_data
- name: Create roles list
command: "infinito build roles_list --no-signal --alarm-timeout 0 -o {{ mig_roles_meta_list }}"
when:
- mig_build_data

View File

@@ -0,0 +1,38 @@
- name: Build data (single async task)
shell: |
set -euo pipefail
infinito build tree --no-signal --alarm-timeout 0 -s {{ mig_roles_meta_volume }}
infinito build roles_list --no-signal --alarm-timeout 0 -o {{ mig_roles_meta_list }}
async: 3600
poll: 0
register: mig_build_job
- name: Fail if MIG build job did not start
fail:
msg: >
MIG build job failed to start. No job ID returned.
when: mig_build_job.ansible_job_id is not defined
- name: Debug MIG build job ID
debug:
msg: "MIG build job started with ID: {{ mig_build_job.ansible_job_id }}"
when: enable_debug | bool
- debug:
msg: "Waiting for MIG build job to finish. Set 'build_data.wait_for=false' in the application config to skip waiting and improve performance."
when: mig_wait_for_build | bool
- name: Wait for MIG build job to finish (enforce failure)
async_status:
jid: "{{ mig_build_job.ansible_job_id }}"
register: mig_build_result
until: mig_build_result.finished
retries: 360
delay: 10
when:
- mig_wait_for_build | bool
failed_when:
- mig_build_result.result is defined
- mig_build_result.result.rc is defined
- mig_build_result.result.rc != 0

View File

@@ -1,12 +1,13 @@
# General # General
application_id: web-app-mig # ID of the application, should be the name of the role folder application_id: web-app-mig
# Docker # Docker
docker_compose_flush_handlers: true docker_compose_flush_handlers: true
docker_pull_git_repository: true docker_pull_git_repository: true
docker_repository_address: "https://github.com/kevinveenbirkenbach/meta-infinite-graph" docker_repository_address: "https://github.com/kevinveenbirkenbach/meta-infinite-graph"
# Helper variables # Helper variables
mig_image: "mig:latest" mig_image: "mig:latest"
mig_container: "mig" mig_container: "mig"
mig_build_data: "{{ applications | get_app_conf(application_id, 'build_data') }}" mig_build_data: "{{ applications | get_app_conf(application_id, 'build_data.enabled') }}"
mig_wait_for_build: "{{ applications | get_app_conf(application_id, 'build_data.wait_for') }}"

View File

@@ -1,9 +1,15 @@
# General
application_id: web-app-mobilizon application_id: web-app-mobilizon
container_port: 4000
# Database
database_type: "postgres" database_type: "postgres"
postgres_gis_enabled: true postgres_gis_enabled: true
container_port: 4000 # Docker
docker_compose_flush_handlers: false
# Mobilizon
mobilizon_host_conf_exs_file: "{{docker_compose.directories.config}}config.exs" mobilizon_host_conf_exs_file: "{{docker_compose.directories.config}}config.exs"
mobilizon_version: "{{ applications | get_app_conf(application_id, 'docker.services.mobilizon.version', True) }}" mobilizon_version: "{{ applications | get_app_conf(application_id, 'docker.services.mobilizon.version', True) }}"
mobilizon_image: "{{ applications | get_app_conf(application_id, 'docker.services.mobilizon.image', True) }}" mobilizon_image: "{{ applications | get_app_conf(application_id, 'docker.services.mobilizon.image', True) }}"

View File

@@ -2,7 +2,7 @@
application_id: "web-app-peertube" application_id: "web-app-peertube"
database_type: "postgres" database_type: "postgres"
# Docker Specific # Docker
docker_compose_flush_handlers: true docker_compose_flush_handlers: true
# Role variables # Role variables

View File

@@ -1,5 +1,5 @@
- name: "load variables from {{ cmp_db_docker_vars_file_db }}" - name: "load variables from {{ DATABASE_VARS_FILE }}"
include_vars: "{{ cmp_db_docker_vars_file_db }}" include_vars: "{{ DATABASE_VARS_FILE }}"
- name: "loading database configuration variables" - name: "loading database configuration variables"
include_vars: include_vars:

View File

@@ -9,11 +9,10 @@
include_role: include_role:
name: srv-web-7-6-composer name: srv-web-7-6-composer
vars: vars:
domain: "{{ domains | get_domain(application_id) }}"
http_port: "{{ ports.localhost.http[application_id] }}" http_port: "{{ ports.localhost.http[application_id] }}"
- name: "generate {{domains | get_domain(application_id)}}.conf" - name: "generate '{{ CDN_NGINX_FILE }}'"
template: template:
src: "nginx.conf.j2" src: "nginx.conf.j2"
dest: "{{ nginx.directories.http.servers }}{{ domains | get_domain(application_id) }}.conf" dest: "{{ nginx.directories.http.servers }}{{ CDN_NGINX_FILE }}"
notify: restart openresty notify: restart openresty

View File

@@ -1,6 +1,6 @@
server server
{ {
server_name {{domains | get_domain(application_id)}}; server_name {{ domains | get_domain(application_id) }};
{% include 'roles/srv-web-7-7-letsencrypt/templates/ssl_header.j2' %} {% include 'roles/srv-web-7-7-letsencrypt/templates/ssl_header.j2' %}

View File

@@ -1,2 +1,6 @@
# General
application_id: "web-svc-cdn" application_id: "web-svc-cdn"
domain: "{{ domains | get_domain(application_id) }}" domain: "{{ domains | get_domain(application_id) }}"
# CDN
CDN_NGINX_FILE: "{{ domain }}.conf"

View File

@@ -0,0 +1,114 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Integration test: ensure no Jinja variables are used in handler *names*.
Why this policy?
- Handler identifiers should be stable strings. If you ever notify by handler
name (instead of a dedicated `listen:` key), a templated name can fail to
resolve or silently not match what `notify` referenced.
- Even when `listen:` is used (recommended), variable-laden names make logs and
tooling brittle and can trigger undefined-variable errors at parse/run time.
- Keeping handler names static improves reliability, debuggability, and
compatibility with analysis tools.
Allowed:
- You may still template other fields or use `listen:` for dynamic trigger
routing; just keep the handlers `name` static text.
This test scans: roles/*/handlers/main.yml
"""
import os
import glob
import re
import unittest
try:
import yaml # PyYAML
except ImportError as exc:
raise SystemExit(
"PyYAML is required to run this test. Install with: pip install pyyaml"
) from exc
JINJA_VAR_PATTERN = re.compile(r"{{.*?}}") # minimal check for any templating
def _iter_tasks(node):
"""
Yield all task-like dicts from a loaded YAML node, descending into common
task containers (`block`, `rescue`, `always`), just in case.
"""
if isinstance(node, dict):
# If this dict looks like a task (has 'name' or a module key), yield it.
if any(k in node for k in ("name", "action")):
yield node
# Dive into known task containers (handlers can include blocks too).
for key in ("block", "rescue", "always"):
if key in node and isinstance(node[key], list):
for item in node[key]:
yield from _iter_tasks(item)
elif isinstance(node, list):
for item in node:
yield from _iter_tasks(item)
class StaticHandlerNamesTest(unittest.TestCase):
"""
Ensures handler names are static strings (no Jinja variables like {{ ... }}).
"""
def test_no_templated_names_in_handlers(self):
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
pattern = os.path.join(project_root, "roles", "*", "handlers", "main.yml")
violations = []
for handler_path in sorted(glob.glob(pattern)):
# Load possibly multi-document YAML safely
try:
with open(handler_path, "r", encoding="utf-8") as f:
docs = list(yaml.safe_load_all(f))
except FileNotFoundError:
continue
except yaml.YAMLError as e:
violations.append(
f"{handler_path} -> YAML parse error: {e}"
)
continue
for doc in docs:
for task in _iter_tasks(doc):
name = task.get("name")
if not isinstance(name, str):
# ignore unnamed or non-string names
continue
if JINJA_VAR_PATTERN.search(name):
# Compose a clear, actionable message
listen = task.get("listen")
listen_hint = (
""
if listen
else " Consider using a static handler name and, if you need flexible triggers, add a static `listen:` key that your tasks `notify`."
)
violations.append(
f"{handler_path} -> Handler name contains variables: {name!r}\n"
"Reason: Handler names must be static. Using Jinja variables in the name "
"can break handler resolution (when notified by name), produces unstable logs, "
"and may cause undefined-variable errors. Keep the handler `name` constant."
f"{listen_hint}"
)
if violations:
self.fail(
"Templated handler names are not allowed.\n\n"
+ "\n\n".join(violations)
)
if __name__ == "__main__":
unittest.main()