Optimized role structure in preparation for new backup script

This commit is contained in:
Kevin Veen-Birkenbach 2025-07-16 12:31:01 +02:00
parent e56c960900
commit f9426cfb74
No known key found for this signature in database
GPG Key ID: 44D8F11FD62F878E
29 changed files with 217 additions and 82 deletions

View File

@ -0,0 +1,11 @@
# Memcached
## Description
This Ansible role provides a Jinja2 snippet to inject a Memcached service definition into your Docker Compose setup.
## Further Resources
- [Official Memcached Docker image on Docker Hub](https://hub.docker.com/_/memcached)
- [Memcached official documentation](https://memcached.org/)
- [Docker Compose reference](https://docs.docker.com/compose/compose-file/)

View File

@ -0,0 +1,7 @@
docker:
services:
memcached:
image: memcached
version: latest
backup:
enabled: false

View File

@ -0,0 +1,17 @@
galaxy_info:
author: "Kevin Veen-Birkenbach"
description: "Provides a Docker Compose snippet for a Memcached service (`memcached`) with optional volume, healthcheck, and logging."
license: "CyMaIS NonCommercial License (CNCL)"
license_url: "https://s.veen.world/cncl"
company: |
Kevin Veen-Birkenbach
Consulting & Coaching Solutions
https://www.veen.world
galaxy_tags:
- memcached
- docker
- cache
repository: "https://github.com/kevinveenbirkenbach/cymais"
issue_tracker_url: "https://github.com/kevinveenbirkenbach/cymais/issues"
documentation: "https://github.com/kevinveenbirkenbach/cymais/tree/main/roles/svc-db-memcached"
dependencies: []

View File

@ -0,0 +1 @@
application_id: svc-db-memcached

View File

@ -1,4 +1,4 @@
# Role: svc-db-redis # Redis
## Description ## Description

View File

@ -0,0 +1,7 @@
docker:
services:
redis:
image: redis
version: alpine
backup:
enabled: false

View File

@ -1,8 +1,10 @@
# This template needs to be included in docker-compose.yml, which depend on redis # This template needs to be included in docker-compose.yml, which depend on redis
{% set redis_image = applications | get_app_conf('svc-db-redis', 'docker.services.redis.image') %}
{% set redis_version = applications | get_app_conf('svc-db-redis', 'docker.services.redis.version')%}
redis: redis:
image: redis:alpine image: "{{ redis_image }}:{{ redis_version }}"
container_name: {{application_id}}-redis container_name: {{ application_id }}-redis
restart: {{docker_restart_policy}} restart: {{ docker_restart_policy }}
logging: logging:
driver: journald driver: journald
volumes: volumes:

View File

@ -1 +1 @@
application_id: redis application_id: svc-db-redis

View File

@ -15,6 +15,7 @@ docker:
database: database:
enabled: true enabled: true
akaunting: akaunting:
backup:
no_stop_required: true no_stop_required: true
image: docker.io/akaunting/akaunting image: docker.io/akaunting/akaunting
version: latest version: latest

View File

@ -10,6 +10,7 @@ docker:
database: database:
enabled: true enabled: true
baserow: baserow:
backup:
no_stop_required: true no_stop_required: true
image: "baserow/baserow" image: "baserow/baserow"
version: "latest" version: "latest"

View File

@ -29,6 +29,8 @@ docker:
# @todo check this out and repair it if necessary # @todo check this out and repair it if necessary
discourse: discourse:
name: "discourse" name: "discourse"
image: "local_discourse/discourse_application" # Necessary to define this for the docker 2 loc backup
backup:
no_stop_required: true no_stop_required: true
volumes: volumes:
data: discourse_data data: discourse_data

View File

@ -42,6 +42,7 @@ docker:
gitea: gitea:
image: "gitea/gitea" image: "gitea/gitea"
version: "latest" version: "latest"
backup:
no_stop_required: true no_stop_required: true
port: 3000 port: 3000
name: "gitea" name: "gitea"

View File

@ -15,6 +15,7 @@ docker:
listmonk: listmonk:
image: listmonk/listmonk image: listmonk/listmonk
version: latest version: latest
backup:
no_stop_required: true no_stop_required: true
name: listmonk name: listmonk
port: 9000 port: 9000

View File

@ -22,6 +22,7 @@ docker:
mastodon: mastodon:
image: "ghcr.io/mastodon/mastodon" image: "ghcr.io/mastodon/mastodon"
version: latest version: latest
backup:
no_stop_required: true no_stop_required: true
name: "mastodon" name: "mastodon"
streaming: streaming:

View File

@ -36,6 +36,7 @@ docker:
image: "matomo" image: "matomo"
version: "latest" version: "latest"
name: "matomo" name: "matomo"
backup:
no_stop_required: true no_stop_required: true
database: database:
enabled: true enabled: true

View File

@ -6,6 +6,7 @@ docker:
version: latest version: latest
image: matrixdotorg/synapse image: matrixdotorg/synapse
name: matrix-synapse name: matrix-synapse
backup:
no_stop_required: true no_stop_required: true
element: element:
version: latest version: latest

View File

@ -6,6 +6,7 @@ docker:
mediawiki: mediawiki:
image: mediawiki image: mediawiki
version: latest version: latest
backup:
no_stop_required: true no_stop_required: true
name: mediawiki name: mediawiki
volumes: volumes:

View File

@ -22,6 +22,7 @@ docker:
name: "nextcloud" name: "nextcloud"
image: "nextcloud" image: "nextcloud"
version: "latest-fpm-alpine" version: "latest-fpm-alpine"
backup:
no_stop_required: true no_stop_required: true
proxy: proxy:
name: "nextcloud-proxy" name: "nextcloud-proxy"

View File

@ -34,7 +34,8 @@ docker:
web: web:
name: openproject-web name: openproject-web
image: openproject/community image: openproject/community
version: "13" # Update when available. Sadly no rolling release implemented version: "13" # Update when available. No rolling release implemented
backup:
no_stop_required: true no_stop_required: true
seeder: seeder:
name: openproject-seeder name: openproject-seeder
@ -44,6 +45,10 @@ docker:
name: openproject-worker name: openproject-worker
proxy: proxy:
name: openproject-proxy name: openproject-proxy
cache:
name: openproject-cache
image: "" # If need a specific memcached image you have to define it here, otherwise the version from svc-db-memcached will be used
version: "" # If need a specific memcached version you have to define it here, otherwise the version from svc-db-memcached will be used
volumes: volumes:
data: "openproject_data" data: "openproject_data"

View File

@ -10,8 +10,8 @@ x-op-app: &app
{% include 'roles/docker-compose/templates/base.yml.j2' %} {% include 'roles/docker-compose/templates/base.yml.j2' %}
cache: cache:
image: memcached image: "{{ openproject_cache_image}}:{{openproject_cache_version }}"
container_name: openproject-memcached container_name: {{ openproject_cache_name }}
{% include 'roles/docker-container/templates/base.yml.j2' %} {% include 'roles/docker-container/templates/base.yml.j2' %}
proxy: proxy:

View File

@ -11,6 +11,22 @@ openproject_cron_name: "{{ applications | get_app_conf(application_id, 'd
openproject_proxy_name: "{{ applications | get_app_conf(application_id, 'docker.services.proxy.name', True) }}" openproject_proxy_name: "{{ applications | get_app_conf(application_id, 'docker.services.proxy.name', True) }}"
openproject_worker_name: "{{ applications | get_app_conf(application_id, 'docker.services.worker.name', True) }}" openproject_worker_name: "{{ applications | get_app_conf(application_id, 'docker.services.worker.name', True) }}"
openproject_cache_name: "{{ applications | get_app_conf(application_id, 'docker.services.cache.name', True) }}"
openproject_cache_image: >-
{{ applications
| get_app_conf(application_id, 'docker.services.cache.image')
or applications
| get_app_conf('svc-db-memcached', 'docker.services.memcached.image')
}}
openproject_cache_version: >-
{{ applications
| get_app_conf(application_id, 'docker.services.cache.version')
or applications
| get_app_conf('svc-db-memcached', 'docker.services.memcached.version')
}}
openproject_plugins_folder: "{{docker_compose.directories.volumes}}plugins/" openproject_plugins_folder: "{{docker_compose.directories.volumes}}plugins/"
custom_openproject_image: "custom_openproject" custom_openproject_image: "custom_openproject"

View File

@ -34,6 +34,7 @@ docker:
name: "peertube" name: "peertube"
version: "production-bookworm" version: "production-bookworm"
image: "chocobozzz/peertube" image: "chocobozzz/peertube"
backup:
no_stop_required: true no_stop_required: true
volumes: volumes:
data: peertube_data data: peertube_data

View File

@ -30,6 +30,7 @@ docker:
image: "zknt/pixelfed" image: "zknt/pixelfed"
version: "latest" version: "latest"
name: "pixelfed" name: "pixelfed"
backup:
no_stop_required: true no_stop_required: true
worker: worker:
name: "pixelfed_worker" name: "pixelfed_worker"

View File

@ -46,6 +46,7 @@ docker:
version: latest version: latest
image: wordpress image: wordpress
name: wordpress name: wordpress
backup:
no_stop_required: true no_stop_required: true
volumes: volumes:
data: wordpress_data data: wordpress_data

View File

@ -6,6 +6,7 @@ docker:
database: database:
enabled: false # Enable the database enabled: false # Enable the database
{{ application_id }}: {{ application_id }}:
backup:
no_stop_required: true no_stop_required: true
image: "" image: ""
version: "latest" version: "latest"

View File

View File

@ -0,0 +1,51 @@
import unittest
import os
import yaml
class TestBackupsEnabledIntegrity(unittest.TestCase):
def setUp(self):
# Path to the roles directory
self.roles_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../../../roles')
)
def test_backups_enabled_image_consistency(self):
"""
Ensure that if `backups.enabled` is set for any docker.services[*]:
- it's a boolean value
- the containing service dict has an `image` entry at the same level
"""
for role in os.listdir(self.roles_dir):
docker_config_path = os.path.join(
self.roles_dir, role, 'config', 'main.yml'
)
if not os.path.isfile(docker_config_path):
continue
with open(docker_config_path, 'r') as f:
try:
config = yaml.safe_load(f) or {}
except yaml.YAMLError as e:
self.fail(f"YAML parsing failed for {docker_config_path}: {e}")
continue
services = (config.get('docker', {}) or {}).get('services', {}) or {}
for service_key, service in services.items():
if not isinstance(service, dict):
continue
backups_cfg = service.get('backups', {}) or {}
if 'enabled' in backups_cfg:
with self.subTest(role=role, service=service_key):
self.assertIsInstance(
backups_cfg['enabled'], bool,
f"`backups.enabled` in role '{role}', service '{service_key}' must be a boolean."
)
self.assertIn(
'image', service,
f"`image` is required in role '{role}', service '{service_key}' when `backups.enabled` is defined."
)
if __name__ == '__main__':
unittest.main()

View File

@ -0,0 +1,55 @@
import unittest
import os
import yaml
class TestNoStopRequiredIntegrity(unittest.TestCase):
def setUp(self):
# Path to the roles directory
self.roles_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../../../roles')
)
def test_backup_no_stop_required_consistency(self):
"""
Ensure that if `backup.no_stop_required: true` is set for any docker.services[*]:
- it's a boolean value
- the containing service dict has an `image` entry at the same level
"""
for role in os.listdir(self.roles_dir):
docker_config_path = os.path.join(
self.roles_dir, role, 'config', 'main.yml'
)
if not os.path.isfile(docker_config_path):
continue
with open(docker_config_path, 'r') as f:
try:
# Ensure config is at least an empty dict if YAML is empty or null
config = yaml.safe_load(f) or {}
except yaml.YAMLError as e:
self.fail(f"YAML parsing failed for {docker_config_path}: {e}")
continue
# Safely get services dict
services = (config.get('docker', {}) or {}).get('services', {}) or {}
for service_key, service in services.items():
if not isinstance(service, dict):
continue
backup_cfg = service.get('backup', {}) or {}
# Check if no_stop_required is explicitly True
if backup_cfg.get('no_stop_required') is True:
with self.subTest(role=role, service=service_key):
# Must be a boolean
self.assertIsInstance(
backup_cfg['no_stop_required'], bool,
f"`backup.no_stop_required` in role '{role}', service '{service_key}' must be a boolean."
)
# Must have `image` defined at the service level
self.assertIn(
'image', service,
f"`image` is required in role '{role}', service '{service_key}' when `backup.no_stop_required` is set to True."
)
if __name__ == '__main__':
unittest.main()

View File

@ -1,52 +0,0 @@
import unittest
import os
import yaml
class TestNoStopRequiredIntegrity(unittest.TestCase):
def setUp(self):
self.roles_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../roles'))
def test_no_stop_required_consistency(self):
"""
This test ensures that if 'no_stop_required' is defined in any
docker.services[*] entry, it must:
- be a boolean value (True/False)
- have a 'name' entry defined on the same level
This is critical for the role 'sys-bkp-docker-2-loc', which uses the
'no_stop_required' flag to determine which container names should be excluded
from stopping during backup operations.
The logic for processing this flag is implemented in:
https://github.com/kevinveenbirkenbach/backup-docker-to-local
"""
for role in os.listdir(self.roles_dir):
docker_config_path = os.path.join(self.roles_dir, role, 'config', 'main.yml')
if not os.path.isfile(docker_config_path):
continue
with open(docker_config_path, 'r') as f:
try:
config = yaml.safe_load(f)
except yaml.YAMLError as e:
self.fail(f"YAML parsing failed for {docker_config_path}: {e}")
continue
docker_services = (
config.get('docker', {}).get('services', {}) if config else {}
)
for service_key, service in docker_services.items():
if isinstance(service, dict) and 'no_stop_required' in service:
with self.subTest(role=role, service=service_key):
self.assertIsInstance(
service['no_stop_required'], bool,
f"'no_stop_required' in role '{role}', service '{service_key}' must be a boolean."
)
self.assertIn(
'name', service,
f"'name' is required in role '{role}', service '{service_key}' when 'no_stop_required' is set."
)
if __name__ == '__main__':
unittest.main()