Compare commits

...

9 Commits

34 changed files with 428 additions and 162 deletions

View File

@@ -6,7 +6,6 @@ import os
import datetime
import sys
def run_ansible_playbook(
inventory,
modes,
@@ -81,6 +80,24 @@ def run_ansible_playbook(
duration = end_time - start_time
print(f"⏱️ Total execution time: {duration}\n")
def validate_application_ids(inventory, app_ids):
"""
Abort the script if any application IDs are invalid, with detailed reasons.
"""
from utils.valid_deploy_id import ValidDeployId
validator = ValidDeployId()
invalid = validator.validate(inventory, app_ids)
if invalid:
print("\n❌ Detected invalid application_id(s):\n")
for app_id, status in invalid.items():
reasons = []
if not status['in_roles']:
reasons.append("not defined in roles (cymais)")
if not status['in_inventory']:
reasons.append("not found in inventory file")
print(f" - {app_id}: " + ", ".join(reasons))
sys.exit(1)
def main():
parser = argparse.ArgumentParser(
@@ -150,6 +167,7 @@ def main():
)
args = parser.parse_args()
validate_application_ids(args.inventory, args.id)
modes = {
"mode_reset": args.reset,

View File

@@ -1,6 +1,6 @@
database_id: "svc-db-{{ database_type }}"
database_instance: "{{ applications | get_app_conf(database_application_id, 'hostname', false) if applications | get_app_conf(database_application_id, 'features.central_database', False) else database_application_id }}"
database_host: "{{ applications | get_app_conf(database_application_id, 'hostname', false) if applications | get_app_conf(database_application_id, 'features.central_database', False) else 'database' }}"
database_instance: "{{ applications | get_app_conf(database_id, 'hostname', True) if applications | get_app_conf(database_application_id, 'features.central_database', False) else database_application_id }}"
database_host: "{{ applications | get_app_conf(database_id, 'hostname', True) if applications | get_app_conf(database_application_id, 'features.central_database', False) else 'database' }}"
database_name: "{{ applications | get_app_conf(database_application_id, 'database.name', false, database_application_id ) }}" # The overwritte configuration is needed by bigbluebutton
database_username: "{{ applications | get_app_conf(database_application_id, 'database.username', false, database_application_id )}}" # The overwritte configuration is needed by bigbluebutton
database_password: "{{ applications | get_app_conf(database_application_id, 'credentials.database_password', true) }}"

View File

@@ -11,12 +11,11 @@ networks:
%}
svc-db-openldap:
external: true
{% endif %}
{% else %}
default:
{% if
application_id in networks.local and
networks.local[application_id].subnet is defined and
application_id != 'svc-db-openldap'
networks.local[application_id].subnet is defined
%}
name: {{ application_id }}
driver: bridge
@@ -25,4 +24,5 @@ networks:
config:
- subnet: {{networks.local[application_id].subnet}}
{% endif %}
{% endif %}
{{ "\n" }}

View File

@@ -1,4 +1,8 @@
version: "latest"
hostname: "svc-db-mariadb"
network: "<< defaults_applications[svc-db-mariadb].hostname >>"
volume: "<< defaults_applications[svc-db-mariadb].hostname >>_data"
docker:
services:
mariadb:
version: "latest"
image: "mariadb"
volume: "<< defaults_applications[svc-db-mariadb].hostname >>_data"

View File

@@ -0,0 +1,30 @@
- name: "Create database: {{ database_name }}"
mysql_db:
name: "{{ database_name }}"
state: present
login_user: root
login_password: "{{ mariadb_root_pwd }}"
login_host: 127.0.0.1
login_port: "{{ database_port }}"
encoding: "{{ database_encoding }}"
collation: "{{ database_collation }}"
- name: "Create database user: {{ database_username }}"
mysql_user:
name: "{{database_username}}"
password: "{{database_password}}"
host: "%"
priv: '{{database_name}}.*:ALL'
state: present
login_user: root
login_password: "{{mariadb_root_pwd}}"
login_host: 127.0.0.1
login_port: "{{database_port}}"
# Deactivated due to https://chatgpt.com/share/683ba14b-0e74-800f-9ad1-a8979bc77093
# @todo Remove if this works fine in the future.
#- name: Grant database privileges
# ansible.builtin.shell:
# cmd: "docker exec {{mariadb_hostname }} mariadb -u root -p{{ mariadb_root_pwd }} -e \"GRANT ALL PRIVILEGES ON `{{database_name}}`.* TO '{{database_username}}'@'%';\""
# args:
# executable: /bin/bash

View File

@@ -1,29 +1,29 @@
- name: Create Docker network for MariaDB
docker_network:
name: "{{ applications['svc-db-mariadb'].network }}"
name: "{{ mariadb_network_name }}"
state: present
ipam_config:
- subnet: "{{ networks.local['svc-db-mariadb'].subnet }}"
- subnet: "{{ mariadb_subnet }}"
when: run_once_docker_mariadb is not defined
- name: install MariaDB
docker_container:
name: "{{ mariadb_hostname }}"
image: "mariadb:{{applications['svc-db-mariadb'].version}}"
image: "{{ mariadb_image }}:{{ mariadb_version}}"
detach: yes
env:
MARIADB_ROOT_PASSWORD: "{{mariadb_root_pwd}}"
MARIADB_ROOT_PASSWORD: "{{ mariadb_root_pwd }}"
MARIADB_AUTO_UPGRADE: "1"
networks:
- name: "{{ applications['svc-db-mariadb'].network }}"
- name: "{{ mariadb_network_name }}"
volumes:
- "{{ applications['svc-db-mariadb'].volume }}:/var/lib/mysql"
- "{{ mariadb_volume }}:/var/lib/mysql"
published_ports:
- "127.0.0.1:{{database_port}}:3306" # can be that this will be removed if all applications use sockets
- "127.0.0.1:{{ mariadb_port }}:3306" # can be that this will be removed if all applications use sockets
command: "--transaction-isolation=READ-COMMITTED --binlog-format=ROW" #for nextcloud
restart_policy: "{{docker_restart_policy}}"
restart_policy: "{{ docker_restart_policy }}"
healthcheck:
test: "/usr/bin/mariadb --user=root --password={{mariadb_root_pwd}} --execute \"SHOW DATABASES;\""
test: "/usr/bin/mariadb --user=root --password={{ mariadb_root_pwd }} --execute \"SHOW DATABASES;\""
interval: 3s
timeout: 1s
retries: 5
@@ -51,36 +51,9 @@
- setup_mariadb_container_result.changed
- run_once_docker_mariadb is not defined
- name: "Create database: {{ database_name }}"
mysql_db:
name: "{{ database_name }}"
state: present
login_user: root
login_password: "{{ mariadb_root_pwd }}"
login_host: 127.0.0.1
login_port: "{{ database_port }}"
encoding: "{{ database_encoding }}"
collation: "{{ database_collation }}"
- name: "Create database user: {{ database_username }}"
mysql_user:
name: "{{database_username}}"
password: "{{database_password}}"
host: "%"
priv: '{{database_name}}.*:ALL'
state: present
login_user: root
login_password: "{{mariadb_root_pwd}}"
login_host: 127.0.0.1
login_port: "{{database_port}}"
# Deactivated due to https://chatgpt.com/share/683ba14b-0e74-800f-9ad1-a8979bc77093
# @todo Remove if this works fine in the future.
#- name: Grant database privileges
# ansible.builtin.shell:
# cmd: "docker exec {{mariadb_hostname }} mariadb -u root -p{{ mariadb_root_pwd }} -e \"GRANT ALL PRIVILEGES ON `{{database_name}}`.* TO '{{database_username}}'@'%';\""
# args:
# executable: /bin/bash
- name: "Initialize database for '{{ database_name }}'"
include_tasks: init.yml
when: "{{ mariadb_init }}"
- name: run the docker_mariadb tasks once
set_fact:

View File

@@ -1,3 +1,11 @@
application_id: svc-db-mariadb
mariadb_hostname: "{{ applications | get_app_conf(application_id, 'hostname', True) }}"
mariadb_root_pwd: "{{ applications['svc-db-mariadb'].credentials.root_password }}"
application_id: svc-db-mariadb
mariadb_hostname: "{{ applications | get_app_conf(application_id,'hostname', True) }}"
mariadb_root_pwd: "{{ applications | get_app_conf(application_id,'credentials.root_password', True) }}"
mariadb_init: "{{ database_username is defined and database_password is defined and database_name is defined }}"
mariadb_subnet: "{{ networks.local['svc-db-mariadb'].subnet }}"
mariadb_network_name: "{{ applications | get_app_conf(application_id,'network', True) }}"
mariadb_volume: "{{ applications | get_app_conf(application_id,'docker.services.mariadb.volume', True) }}"
mariadb_image: "{{ applications | get_app_conf(application_id,'docker.services.mariadb.image','mariadb', True) }}"
mariadb_version: "{{ applications | get_app_conf(application_id,'docker.services.mariadb.version', True) }}"
mariadb_port: "{{ database_port | default(ports.localhost.database[ application_id ]) }}"

View File

@@ -4,15 +4,21 @@ network:
local: True # Activates local network. Necessary for LDIF import routines
docker: True # Activates docker network to allow other docker containers to connect
public: False # Set to true in inventory file if you want to expose the LDAP port to the internet
images:
openldap: "bitnami/openldap:latest"
docker:
services:
openldap:
image: "bitnami/openldap"
version: "latest"
container: "<< defaults_applications[svc-db-openldap].hostname >>"
webinterface: "lam" # The webinterface which should be used. Possible: lam and phpldapadmin
features:
ldap: true
import:
# Here it's possible to define what can be imported.
provisioning:
# Here it's possible to define what should be imported and updated.
# It doesn't make sense to let the import run everytime because its very time consuming
credentials: true
schemas: true
entries: true
users: true
configuration: true # E.g. MemberOf and Hashed Password Configuration
credentials: true # Administrator Password
schemas: true # E.g. Nextcloud, Openssl
users: true # E.g. User, group and role entries
groups: true # Roles and Groups import
update: true # User Class updates

View File

@@ -1,6 +1,6 @@
- name: Load memberof module from file in OpenLDAP container
shell: >
docker exec -i {{ applications | get_app_conf(application_id, 'hostname', True) }} ldapmodify -Y EXTERNAL -H ldapi:/// -f {{ldif_docker_path}}configuration/01_member_of_configuration.ldif
docker exec -i {{ applications | get_app_conf(application_id, 'hostname', True) }} ldapmodify -Y EXTERNAL -H ldapi:/// -f {{openldap_ldif_docker_path}}configuration/01_member_of_configuration.ldif
listen:
- "Import configuration LDIF files"
- "Import all LDIF files"
@@ -10,7 +10,7 @@
- name: Refint Module Activation for OpenLDAP
shell: >
docker exec -i {{ applications | get_app_conf(application_id, 'hostname', True) }} ldapadd -Y EXTERNAL -H ldapi:/// -f {{ldif_docker_path}}configuration/02_member_of_configuration.ldif
docker exec -i {{ applications | get_app_conf(application_id, 'hostname', True) }} ldapadd -Y EXTERNAL -H ldapi:/// -f {{openldap_ldif_docker_path}}configuration/02_member_of_configuration.ldif
listen:
- "Import configuration LDIF files"
- "Import all LDIF files"
@@ -22,7 +22,7 @@
- name: "Import schemas"
shell: >
docker exec -i {{ applications | get_app_conf(application_id, 'hostname', True) }} ldapadd -Y EXTERNAL -H ldapi:/// -f "{{ldif_docker_path}}schema/{{ item | basename | regex_replace('\.j2$', '') }}"
docker exec -i {{ applications | get_app_conf(application_id, 'hostname', True) }} ldapadd -Y EXTERNAL -H ldapi:/// -f "{{openldap_ldif_docker_path}}schema/{{ item | basename | regex_replace('\.j2$', '') }}"
register: ldapadd_result
changed_when: "'adding new entry' in ldapadd_result.stdout"
failed_when: ldapadd_result.rc not in [0, 80]
@@ -33,7 +33,7 @@
- name: Refint Overlay Configuration for OpenLDAP
shell: >
docker exec -i {{ applications | get_app_conf(application_id, 'hostname', True) }} ldapmodify -Y EXTERNAL -H ldapi:/// -f {{ldif_docker_path}}configuration/03_member_of_configuration.ldif
docker exec -i {{ applications | get_app_conf(application_id, 'hostname', True) }} ldapmodify -Y EXTERNAL -H ldapi:/// -f {{openldap_ldif_docker_path}}configuration/03_member_of_configuration.ldif
listen:
- "Import configuration LDIF files"
- "Import all LDIF files"
@@ -45,7 +45,7 @@
- name: "Import users, groups, etc. to LDAP"
shell: >
docker exec -i {{ applications | get_app_conf(application_id, 'hostname', True) }} ldapadd -x -D "{{ldap.dn.administrator.data}}" -w "{{ldap.bind_credential}}" -c -f "{{ldif_docker_path}}data/{{ item | basename | regex_replace('\.j2$', '') }}"
docker exec -i {{ applications | get_app_conf(application_id, 'hostname', True) }} ldapadd -x -D "{{ldap.dn.administrator.data}}" -w "{{ldap.bind_credential}}" -c -f "{{openldap_ldif_docker_path}}data/{{ item | basename | regex_replace('\.j2$', '') }}"
register: ldapadd_result
changed_when: "'adding new entry' in ldapadd_result.stdout"
failed_when: ldapadd_result.rc not in [0, 20, 68, 65]

View File

@@ -4,7 +4,7 @@
- name: Ensure LDAP users exist
community.general.ldap_entry:
dn: "{{ ldap.user.attributes.id }}={{ item.key }},{{ ldap.dn.ou.users }}"
server_uri: "{{ ldap_server_uri }}"
server_uri: "{{ openldap_server_uri }}"
bind_dn: "{{ ldap.dn.administrator.data }}"
bind_pw: "{{ ldap.bind_credential }}"
objectClass: "{{ ldap.user.objects.structural }}"
@@ -30,7 +30,7 @@
- name: Ensure required objectClass values and mail address are present
community.general.ldap_attrs:
dn: "{{ ldap.user.attributes.id }}={{ item.key }},{{ ldap.dn.ou.users }}"
server_uri: "{{ ldap_server_uri }}"
server_uri: "{{ openldap_server_uri }}"
bind_dn: "{{ ldap.dn.administrator.data }}"
bind_pw: "{{ ldap.bind_credential }}"
attributes:
@@ -46,7 +46,7 @@
- name: "Ensure container for application roles exists"
community.general.ldap_entry:
dn: "{{ ldap.dn.ou.roles }}"
server_uri: "{{ ldap_server_uri }}"
server_uri: "{{ openldap_server_uri }}"
bind_dn: "{{ ldap.dn.administrator.data }}"
bind_pw: "{{ ldap.bind_credential }}"
objectClass: organizationalUnit

View File

@@ -1,6 +1,6 @@
- name: Gather all users with their current objectClass list
community.general.ldap_search:
server_uri: "{{ ldap_server_uri }}"
server_uri: "{{ openldap_server_uri }}"
bind_dn: "{{ ldap.dn.administrator.data }}"
bind_pw: "{{ ldap.bind_credential }}"
dn: "{{ ldap.dn.ou.users }}"
@@ -14,7 +14,7 @@
- name: Add only missing auxiliary classes
community.general.ldap_attrs:
server_uri: "{{ ldap_server_uri }}"
server_uri: "{{ openldap_server_uri }}"
bind_dn: "{{ ldap.dn.administrator.data }}"
bind_pw: "{{ ldap.bind_credential }}"
dn: "{{ item.dn }}"

View File

@@ -1,7 +1,7 @@
- name: "Create LDIF files at {{ ldif_host_path }}{{ folder }}"
- name: "Create LDIF files at {{ openldap_ldif_host_path }}{{ folder }}"
template:
src: "{{ item }}"
dest: "{{ ldif_host_path }}{{ folder }}/{{ item | basename | regex_replace('\\.j2$', '') }}"
dest: "{{ openldap_ldif_host_path }}{{ folder }}/{{ item | basename | regex_replace('\\.j2$', '') }}"
mode: '770'
loop: >-
{{

View File

@@ -22,28 +22,30 @@
name: "{{ applications | get_app_conf(application_id, 'network.name', True) }}"
state: present
ipam_config:
- subnet: "{{ networks.local['svc-db-openldap'].subnet }}"
- subnet: "{{ networks.local[application_id].subnet }}"
- meta: flush_handlers
- name: "Wait for LDAP to be available"
wait_for:
host: "127.0.0.1"
port: "{{ ports.localhost.ldap['svc-db-openldap'] }}"
port: "{{ ports.localhost.ldap[application_id] }}"
delay: 5
timeout: 120
state: started
- name: "Reset LDAP Credentials"
include_tasks: 01_credentials.yml
when: applications | get_app_conf(application_id, 'network.local', True)
when:
- applications | get_app_conf(application_id, 'network.local', True)
- applications | get_app_conf(application_id, 'provisioning.credentials', True)
- name: "create directory {{ldif_host_path}}{{item}}"
- name: "create directory {{openldap_ldif_host_path}}{{item}}"
file:
path: "{{ldif_host_path}}{{item}}"
path: "{{openldap_ldif_host_path}}{{item}}"
state: directory
mode: 0755
loop: "{{ldif_types}}"
loop: "{{openldap_ldif_types}}"
- name: "Import LDIF Configuration"
include_tasks: ldifs_creation.yml
@@ -51,6 +53,7 @@
- configuration
loop_control:
loop_var: folder
when: applications | get_app_conf(application_id, 'provisioning.configuration', True)
- name: flush LDIF handlers
meta: flush_handlers
@@ -63,16 +66,20 @@
- name: "Include Schemas (if enabled)"
include_tasks: 02_schemas.yml
when: applications | get_app_conf(application_id, 'provisioning.schemas', True)
- name: "Import LDAP Entries (if enabled)"
include_tasks: 03_entries.yml
include_tasks: 03_users.yml
when: applications | get_app_conf(application_id, 'provisioning.users', True)
- name: "Import LDIF Data (if enabled)"
include_tasks: ldifs_creation.yml
loop:
- data
- groups
loop_control:
loop_var: folder
when: applications | get_app_conf(application_id, 'provisioning.groups', True)
- name: "Add Objects to all users"
include_tasks: 04_user_updates.yml
include_tasks: 04_update.yml
when: applications | get_app_conf(application_id, 'provisioning.update', True)

View File

@@ -13,9 +13,9 @@
- "( 1.3.6.1.4.1.99999.2 NAME 'nextcloudUser' DESC 'Auxiliary class for Nextcloud attributes' AUXILIARY MAY ( {{ ldap.user.attributes.nextcloud_quota }} ) )"
command: >
ldapsm
-s {{ ldap_server_uri }}
-D '{{ ldap_bind_dn }}'
-W '{{ ldap_bind_pw }}'
-s {{ openldap_server_uri }}
-D '{{ openldap_bind_dn }}'
-W '{{ openldap_bind_pw }}'
-n {{ schema_name }}
{% for at in attribute_defs %}
-a "{{ at }}"

View File

@@ -21,9 +21,9 @@
command: >
ldapsm
-s {{ ldap_server_uri }}
-D '{{ ldap_bind_dn }}'
-W '{{ ldap_bind_pw }}'
-s {{ openldap_server_uri }}
-D '{{ openldap_bind_dn }}'
-W '{{ openldap_bind_pw }}'
-n {{ schema_name }}
{% for at in attribute_defs %}
-a "{{ at }}"

View File

@@ -1,20 +1,20 @@
{% include 'roles/docker-compose/templates/base.yml.j2' %}
application:
image: "{{ applications | get_app_conf(application_id, 'images.openldap', True) }}"
container_name: {{ applications | get_app_conf(application_id, 'hostname', True) }}
image: "{{ openldap_image }}:{{ openldap_version }}"
container_name: "{{ openldap_container }}"
{% include 'roles/docker-container/templates/base.yml.j2' %}
{% if applications | get_app_conf(application_id, 'network.public', True) | bool or applications | get_app_conf(application_id, 'network.local', True) | bool %}
{% if openldap_network_expose_local %}
ports:
- 127.0.0.1:{{ports.localhost.ldap['svc-db-openldap']}}:{{ldap_docker_port}}
- 127.0.0.1:{{ports.localhost.ldap['svc-db-openldap']}}:{{openldap_docker_port_open}}
{% endif %}
volumes:
- 'data:/bitnami/openldap'
- '{{ldif_host_path}}:{{ldif_docker_path}}:ro'
- '{{openldap_ldif_host_path}}:{{openldap_ldif_docker_path}}:ro'
healthcheck:
test: >
bash -c '
ldapsearch -x -H ldap://localhost:{{ ldap_docker_port }} \
ldapsearch -x -H ldap://localhost:{{ openldap_docker_port_open }} \
-D "{{ ldap.dn.administrator.data }}" -w "{{ ldap.bind_credential }}" -b "{{ ldap.dn.root }}" > /dev/null \
&& ldapsearch -Y EXTERNAL -H ldapi:/// \
-b cn=config "(&(objectClass=olcOverlayConfig)(olcOverlay=memberof))" \
@@ -24,5 +24,6 @@
{% include 'roles/docker-compose/templates/volumes.yml.j2' %}
data:
name: "{{ openldap_volume }}"
{% include 'roles/docker-compose/templates/networks.yml.j2' %}

View File

@@ -18,9 +18,9 @@ LDAP_CONFIG_ADMIN_USERNAME= {{applications | get_app_conf(application_id, 'users
LDAP_CONFIG_ADMIN_PASSWORD= {{applications | get_app_conf(application_id, 'credentials.administrator_password', True)}}
# Network
LDAP_PORT_NUMBER= {{ldap_docker_port}} # Route to default port
LDAP_PORT_NUMBER= {{openldap_docker_port_open}} # Route to default port
LDAP_ENABLE_TLS= no # Using nginx proxy for tls
LDAP_LDAPS_PORT_NUMBER= {{ldaps_docker_port}} # Port used for TLS secure traffic. Priviledged port is supported (e.g. 636). Default: 1636 (non privileged port).
LDAP_LDAPS_PORT_NUMBER= {{openldap_docker_port_secure}} # Port used for TLS secure traffic. Priviledged port is supported (e.g. 636). Default: 1636 (non privileged port).
# Security
LDAP_ALLOW_ANON_BINDING= no # Allow anonymous bindings to the LDAP server. Default: yes.

View File

@@ -1,17 +1,24 @@
application_id: "svc-db-openldap"
application_id: "svc-db-openldap"
# LDAP Variables
ldaps_docker_port: 636
ldap_docker_port: 389
ldap_server_uri: "ldap://127.0.0.1:{{ ports.localhost.ldap['svc-db-openldap'] }}"
ldap_hostname: "{{ applications | get_app_conf(application_id, 'hostname', True) }}"
ldap_bind_dn: "{{ ldap.dn.administrator.configuration }}"
ldap_bind_pw: "{{ applications | get_app_conf(application_id, 'credentials.administrator_password', True) }}"
openldap_docker_port_secure: 636
openldap_docker_port_open: 389
openldap_server_uri: "ldap://127.0.0.1:{{ ports.localhost.ldap[application_id] }}"
openldap_hostname: "{{ applications | get_app_conf(application_id, 'hostname', True) }}"
openldap_bind_dn: "{{ ldap.dn.administrator.configuration }}"
openldap_bind_pw: "{{ applications | get_app_conf(application_id, 'credentials.administrator_password', True) }}"
# LDIF Variables
ldif_host_path: "{{docker_compose.directories.volumes}}ldif/"
ldif_docker_path: "/tmp/ldif/"
ldif_types:
openldap_ldif_host_path: "{{docker_compose.directories.volumes}}ldif/"
openldap_ldif_docker_path: "/tmp/ldif/"
openldap_ldif_types:
- configuration
- data
- schema # Don't know if this is still needed, it's now setup via tasks
- schema # Don't know if this is still needed, it's now setup via tasks
openldap_container: "{{ applications | get_app_conf(application_id, 'docker.services.openldap.container', True) }}"
openldap_image: "{{ applications | get_app_conf(application_id, 'docker.services.openldap.image', True) }}"
openldap_version: "{{ applications | get_app_conf(application_id, 'docker.services.openldap.version', True) }}"
openldap_volume: "{{ application_id }}_data"
openldap_network_expose_local: "{{ applications | get_app_conf(application_id, 'network.public', True) | bool or applications | get_app_conf(application_id, 'network.local', True) | bool }}"

View File

@@ -1,6 +1,5 @@
hostname: "svc-db-postgres"
network: "<< defaults_applications[svc-db-postgres].hostname >>"
volume: "<< defaults_applications[svc-db-postgres].hostname >>"
docker:
services:
postgres:
@@ -8,4 +7,5 @@ docker:
image: postgis/postgis
# Please set an version in your inventory file!
# Rolling release isn't recommended
version: "latest"
version: "latest"
volume: "<< defaults_applications[svc-db-postgres].hostname >>_data"

View File

@@ -1,25 +1,25 @@
- name: Create Docker network for PostgreSQL
docker_network:
name: "{{ applications | get_app_conf(application_id, 'network', True) }}"
name: "{{ postgres_network_name }}"
state: present
ipam_config:
- subnet: "{{ networks.local['svc-db-postgres'].subnet }}"
- subnet: "{{ postgres_subnet }}"
when: run_once_docker_postgres is not defined
- name: Install PostgreSQL
docker_container:
name: "{{ applications | get_app_conf(application_id, 'hostname', True) }}"
image: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.image', True) }}:{{ applications | get_app_conf(application_id, 'docker.services.postgres.version', True) }}"
name: "{{ postgres_hostname }}"
image: "{{ postgres_image }}:{{ postgres_version }}"
detach: yes
env:
POSTGRES_PASSWORD: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}"
POSTGRES_PASSWORD: "{{ postgres_password }}"
POSTGRES_INITDB_ARGS: "--encoding=UTF8 --locale=C" # Necessary for web-app-matrix
networks:
- name: "{{ applications | get_app_conf(application_id, 'network', True) }}"
- name: "{{ postgres_network_name }}"
published_ports:
- "127.0.0.1:{{ database_port }}:5432"
- "127.0.0.1:{{ postgres_port }}:5432"
volumes:
- "{{ applications['svc-db-postgres'].volume }}:/var/lib/postgresql/data"
- "{{ postgres_volume }}:/var/lib/postgresql/data"
restart_policy: "{{ docker_restart_policy }}"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
@@ -31,7 +31,7 @@
when: run_once_docker_postgres is not defined
- name: Wait for Postgres inside the container
shell: "docker exec {{ applications | get_app_conf(application_id, 'hostname', True) }} pg_isready -U postgres"
shell: "docker exec {{ postgres_hostname }} pg_isready -U postgres"
register: pg_ready
until: pg_ready.rc == 0
retries: 30
@@ -47,12 +47,9 @@
state: present
when: run_once_docker_postgres is not defined
- name: Load database initialization tasks dynamically
include_tasks: init_database.yml
when:
- database_username is defined
- database_password is defined
- database_name is defined
- name: "Initialize database for '{{ database_name }}'"
include_tasks: init.yml
when: "{{ postgres_init }}"
- name: Run the docker_postgres tasks once
set_fact:

View File

@@ -1 +1,10 @@
application_id: svc-db-postgres
application_id: svc-db-postgres
postgres_volume: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.volume', True) }}"
postgres_hostname: "{{ applications | get_app_conf(application_id, 'hostname', True) }}"
postgres_image: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.image', True) }}"
postgres_subnet: "{{ networks.local['svc-db-postgres'].subnet }}"
postgres_network_name: "{{ applications | get_app_conf(application_id, 'network', True) }}"
postgres_version: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.version', True) }}"
postgres_password: "{{ applications | get_app_conf(application_id, 'credentials.postgres_password', True) }}"
postgres_port: "{{ database_port | default(ports.localhost.database[ application_id ]) }}"
postgres_init: "{{ database_username is defined and database_password is defined and database_name is defined }}"

View File

@@ -1,10 +1,8 @@
images:
keycloak: "quay.io/keycloak/keycloak:latest"
import_realm: True # If True realm will be imported. If false skip.
features:
matomo: true
css: false
port-ui-desktop: true
port-ui-desktop: true
ldap: true
central_database: true
recaptcha: true
@@ -26,6 +24,9 @@ scopes:
rbac_groups: "/rbac"
docker:
services:
keycloak:
image: "quay.io/keycloak/keycloak"
version: "latest"
database:
enabled: true

View File

@@ -72,11 +72,11 @@
- name: Render user-profile JSON for SSH key
template:
src: import/user-profile.json.j2
dest: "{{ import_directory_host }}/user-profile.json"
dest: "{{ keycloak_host_import_directory }}/user-profile.json"
mode: '0644'
notify: docker compose up
- name: Apply SSH Public Key to user-profile via kcadm
shell: |
docker exec -i {{ container_name }} \
/opt/keycloak/bin/kcadm.sh update realms/{{ keycloak_realm }} -f {{ import_directory_docker }}user-profile.json
/opt/keycloak/bin/kcadm.sh update realms/{{ keycloak_realm }} -f {{ keycloak_docker_import_directory }}user-profile.json

View File

@@ -3,16 +3,16 @@
include_role:
name: cmp-db-docker-proxy
- name: "create directory {{import_directory_host}}"
- name: "create directory {{keycloak_host_import_directory}}"
file:
path: "{{import_directory_host}}"
path: "{{keycloak_host_import_directory}}"
state: directory
mode: 0755
- name: "Copy import files to {{ import_directory_host }}"
- name: "Copy import files to {{ keycloak_host_import_directory }}"
template:
src: "{{ item }}"
dest: "{{ import_directory_host }}/{{ item | basename | regex_replace('\\.j2$', '') }}"
dest: "{{ keycloak_host_import_directory }}/{{ item | basename | regex_replace('\\.j2$', '') }}"
mode: '770'
loop: "{{ lookup('fileglob', '{{ role_path }}/templates/import/*.j2', wantlist=True) }}"
notify: docker compose up

View File

@@ -1,14 +1,14 @@
{% include 'roles/docker-compose/templates/base.yml.j2' %}
application:
image: "{{ applications | get_app_conf(application_id, 'images.keycloak', True) }}"
container_name: {{container_name}}
command: start {% if applications | get_app_conf(application_id, 'import_realm', True) | bool %}--import-realm{% endif %}
image: "{{ keycloak_image }}:{{ keycloak_version }}"
container_name: {{ keycloak_container }}
command: start{% if keycloak_import_realm %} --import-realm{% endif %}{% if keycloak_debug_enabled %} --verbose{% endif %}
{% include 'roles/docker-container/templates/base.yml.j2' %}
ports:
- "{{ keycloak_server_host }}:8080"
volumes:
- "{{import_directory_host}}:{{import_directory_docker}}"
- "{{keycloak_host_import_directory}}:{{keycloak_docker_import_directory}}"
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
{% include 'roles/docker-container/templates/networks.yml.j2' %}
{% set container_port = 9000 %}

View File

@@ -1,14 +1,17 @@
application_id: "keycloak"
database_type: "postgres"
container_name: "{{application_id}}_application"
import_directory_host: "{{docker_compose.directories.volumes}}import/" # Directory in which keycloack import files are placed on the host
import_directory_docker: "/opt/keycloak/data/import/" # Directory in which keycloack import files are placed in the running docker container
keycloak_realm: "{{ primary_domain}}" # This is the name of the default realm which is used by the applications
keycloak_administrator: "{{ applications | get_app_conf(application_id, 'users.administrator', True) }}" # Master Administrator
keycloak_administrator_username: "{{ keycloak_administrator.username}}" # Master Administrator Username
keycloak_administrator_password: "{{ keycloak_administrator.password}}" # Master Administrator Password
keycloak_kcadm_path: "docker exec -i {{ container_name }} /opt/keycloak/bin/kcadm.sh"
application_id: "keycloak" # Internal CyMaIS application id
database_type: "postgres" # Database which will be used
keycloak_container: "{{ application_id }}_application" # Name of the keycloack docker container
keycloak_host_import_directory: "{{ docker_compose.directories.volumes }}import/" # Directory in which keycloack import files are placed on the host
keycloak_docker_import_directory: "/opt/keycloak/data/import/" # Directory in which keycloack import files are placed in the running docker container
keycloak_realm: "{{ primary_domain}}" # This is the name of the default realm which is used by the applications
keycloak_administrator: "{{ applications | get_app_conf(application_id, 'users.administrator', True) }}" # Master Administrator
keycloak_administrator_username: "{{ keycloak_administrator.username }}" # Master Administrator Username
keycloak_administrator_password: "{{ keycloak_administrator.password }}" # Master Administrator Password
keycloak_kcadm_path: "docker exec -i {{ keycloak_container }} /opt/keycloak/bin/kcadm.sh" # Init script for keycloak
keycloak_server_internal_url: "http://127.0.0.1:8080"
keycloak_server_host: "127.0.0.1:{{ports.localhost.http[application_id]}}"
keycloak_server_host: "127.0.0.1:{{ ports.localhost.http[application_id] }}"
keycloak_server_host_url: "http://{{ keycloak_server_host }}"
keycloak_image: "{{ applications | get_app_conf(application_id, 'docker.services.keycloak.image', True) }}" # Keycloak docker image
keycloak_version: "{{ applications | get_app_conf(application_id, 'docker.services.keycloak.version', True) }}" # Keyloak docker version
keycloak_import_realm: "{{ applications | get_app_conf(application_id, 'import_realm', True, True) }}" # Activate realm import
keycloak_debug_enabled: "{{ enable_debug }}"

View File

@@ -33,12 +33,12 @@ class TestDeprecatedVersionKey(unittest.TestCase):
if uses_version:
warnings.append(
f"[DEPRECATION WARNING] {role_path.name}/config/main.yml: "
f"'version' is deprecated. Replace it by docker.versions[version]."
f"'version' is deprecated. Replace it by docker.services[service].version."
)
if uses_images:
warnings.append(
f"[DEPRECATION WARNING] {role_path.name}/config/main.yml: "
f"'images' is deprecated. Replace it by docker.images[image]."
f"'images' is deprecated. Replace it by docker.services[service].image."
)
if warnings:

View File

@@ -3,14 +3,11 @@ import yaml
from pathlib import Path
import re
class TestDockerRoleImagesConfiguration(unittest.TestCase):
def test_images_keys_and_templates(self):
class TestDockerRoleServicesConfiguration(unittest.TestCase):
def test_services_keys_and_templates(self):
"""
For each web-app-* role, check that:
- roles/web-app-*/config/main.yml contains 'images' as a dict with keys/values
- Each image key is referenced as:
image: "{{ applications[application_id].images.<key> }}"
in either roles/web-app-*/templates/docker-compose.yml.j2 or env.j2
- roles/web-app-*/config/main.yml contains 'services' as a dict with keys/values
"""
repo_root = Path(__file__).resolve().parent.parent.parent
roles_dir = repo_root / "roles"
@@ -33,13 +30,13 @@ class TestDockerRoleImagesConfiguration(unittest.TestCase):
errors.append(f"{role_path.name}: YAML parse error: {e}")
continue
images = config.get("docker",{}).get("images")
if not images:
warnings.append(f"[WARNING] {role_path.name}: No 'docker.images' key in config/main.yml")
services = config.get("docker",{}).get("services",{})
if not services:
warnings.append(f"[WARNING] {role_path.name}: No 'docker.services' key in config/main.yml")
continue
if not isinstance(images, dict):
errors.append(f"{role_path.name}: 'images' must be a dict in config/main.yml")
if not isinstance(services, dict):
errors.append(f"{role_path.name}: 'services' must be a dict in config/main.yml")
continue
# OPTIONAL: Check if the image is available locally via docker images
@@ -55,9 +52,9 @@ class TestDockerRoleImagesConfiguration(unittest.TestCase):
# except Exception as e:
# errors.append(f"{role_path.name}: Error running 'docker images' (optional): {e}")
if warnings:
print("\nWarnings in docker role images configuration:\n" + "\n".join(warnings))
print("\nWarnings in docker role services configuration:\n" + "\n".join(warnings))
if errors:
self.fail("Errors in docker role images configuration:\n" + "\n".join(errors))
self.fail("Errors in docker role services configuration:\n" + "\n".join(errors))
if __name__ == "__main__":
unittest.main()

View File

@@ -0,0 +1,116 @@
# File: tests/unit/utils/test_valid_deploy_id.py
import os
import tempfile
import unittest
import yaml
from utils.valid_deploy_id import ValidDeployId
class TestValidDeployId(unittest.TestCase):
def setUp(self):
# Create a temporary directory for roles
self.temp_dir = tempfile.TemporaryDirectory()
self.roles_dir = os.path.join(self.temp_dir.name, 'roles')
os.makedirs(self.roles_dir)
# Create a dummy role with application_id 'app1'
role_path = os.path.join(self.roles_dir, 'role1', 'vars')
os.makedirs(role_path)
with open(os.path.join(role_path, 'main.yml'), 'w', encoding='utf-8') as f:
yaml.safe_dump({'application_id': 'app1'}, f)
# Initialize validator with our temp roles_dir
self.validator = ValidDeployId(roles_dir=self.roles_dir)
def tearDown(self):
self.temp_dir.cleanup()
def _write_ini_inventory(self, content):
fd, path = tempfile.mkstemp(suffix='.ini')
os.close(fd)
with open(path, 'w', encoding='utf-8') as f:
f.write(content)
return path
def _write_yaml_inventory(self, data):
fd, path = tempfile.mkstemp(suffix='.yml')
os.close(fd)
with open(path, 'w', encoding='utf-8') as f:
yaml.safe_dump(data, f)
return path
def test_valid_in_roles_and_ini_inventory(self):
# Inventory contains app1 as a host
ini_content = """
[servers]
app1,otherhost
"""
inv = self._write_ini_inventory(ini_content)
result = self.validator.validate(inv, ['app1'])
self.assertEqual(result, {}, "app1 should be valid when in roles and ini inventory")
def test_missing_in_roles(self):
# Inventory contains app2 but roles only have app1
ini_content = """
[servers]
app2
"""
inv = self._write_ini_inventory(ini_content)
result = self.validator.validate(inv, ['app2'])
# app2 not in roles, but in inventory
expected = {'app2': {'in_roles': False, 'in_inventory': True}}
self.assertEqual(result, expected)
def test_missing_in_inventory_ini(self):
# Roles have app1 but inventory does not mention it
ini_content = """
[servers]
otherhost
"""
inv = self._write_ini_inventory(ini_content)
result = self.validator.validate(inv, ['app1'])
expected = {'app1': {'in_roles': True, 'in_inventory': False}}
self.assertEqual(result, expected)
def test_missing_both_ini(self):
# Neither roles nor inventory have appX
ini_content = """
[servers]
otherhost
"""
inv = self._write_ini_inventory(ini_content)
result = self.validator.validate(inv, ['appX'])
expected = {'appX': {'in_roles': False, 'in_inventory': False}}
self.assertEqual(result, expected)
def test_valid_in_roles_and_yaml_inventory(self):
# YAML inventory with app1 as a dict key
data = {'app1': {'hosts': ['app1']}, 'group': {'app1': {}}}
inv = self._write_yaml_inventory(data)
result = self.validator.validate(inv, ['app1'])
self.assertEqual(result, {}, "app1 should be valid in roles and yaml inventory")
def test_missing_in_roles_yaml(self):
# YAML inventory has app2 key but roles only have app1
data = {'app2': {}}
inv = self._write_yaml_inventory(data)
result = self.validator.validate(inv, ['app2'])
expected = {'app2': {'in_roles': False, 'in_inventory': True}}
self.assertEqual(result, expected)
def test_missing_in_inventory_yaml(self):
# Roles have app1 but YAML inventory has no app1
data = {'group': {'other': {}}}
inv = self._write_yaml_inventory(data)
result = self.validator.validate(inv, ['app1'])
expected = {'app1': {'in_roles': True, 'in_inventory': False}}
self.assertEqual(result, expected)
def test_missing_both_yaml(self):
data = {}
inv = self._write_yaml_inventory(data)
result = self.validator.validate(inv, ['unknown'])
expected = {'unknown': {'in_roles': False, 'in_inventory': False}}
self.assertEqual(result, expected)
if __name__ == '__main__':
unittest.main()

89
utils/valid_deploy_id.py Normal file
View File

@@ -0,0 +1,89 @@
# File: utils/valid_deploy_id.py
"""
Utility for validating deployment application IDs against defined roles and inventory.
"""
import os
import yaml
import glob
import configparser
from filter_plugins.get_all_application_ids import get_all_application_ids
class ValidDeployId:
def __init__(self, roles_dir='roles'):
# Load all known application IDs from roles
self.valid_ids = set(get_all_application_ids(roles_dir))
def validate(self, inventory_path, ids):
"""
Validate a list of application IDs against both role definitions and inventory.
Returns a dict mapping invalid IDs to their presence status.
Example:
{
"app1": {"in_roles": False, "in_inventory": True},
"app2": {"in_roles": True, "in_inventory": False}
}
"""
invalid = {}
for app_id in ids:
in_roles = app_id in self.valid_ids
in_inventory = self._exists_in_inventory(inventory_path, app_id)
if not (in_roles and in_inventory):
invalid[app_id] = {
'in_roles': in_roles,
'in_inventory': in_inventory
}
return invalid
def _exists_in_inventory(self, inventory_path, app_id):
_, ext = os.path.splitext(inventory_path)
if ext in ('.yml', '.yaml'):
return self._search_yaml_keys(inventory_path, app_id)
else:
return self._search_ini_sections(inventory_path, app_id)
def _search_ini_sections(self, inventory_path, app_id):
"""
Manually parse INI inventory for sections and host lists.
Returns True if app_id matches a section name or a host in a section.
"""
present = False
with open(inventory_path, 'r', encoding='utf-8') as f:
current_section = None
for raw in f:
line = raw.strip()
# Skip blanks and comments
if not line or line.startswith(('#', ';')):
continue
# Section header
if line.startswith('[') and line.endswith(']'):
current_section = line[1:-1].strip()
if current_section == app_id:
return True
continue
# Host or variable line under a section
if current_section:
# Split on commas or whitespace
for part in [p.strip() for p in line.replace(',', ' ').split()]:
if part == app_id:
return True
return False
def _search_yaml_keys(self, inventory_path, app_id):
with open(inventory_path, 'r', encoding='utf-8') as f:
data = yaml.safe_load(f)
return self._find_key(data, app_id)
def _find_key(self, node, key): # recursive search
if isinstance(node, dict):
for k, v in node.items():
# If key matches and maps to a dict or list, consider it present
if k == key and isinstance(v, (dict, list)):
return True
if self._find_key(v, key):
return True
elif isinstance(node, list):
for item in node:
if self._find_key(item, key):
return True
return False