Compare commits

...

6 Commits

80 changed files with 938 additions and 311 deletions

View File

@@ -45,6 +45,12 @@ def main():
action='store_true',
help="Preview graphs to console instead of writing files"
)
parser.add_argument(
'-s', '--shadow-folder',
type=str,
default=None,
help="If set, writes tree.json to this shadow folder instead of the role's actual meta/ folder"
)
parser.add_argument(
'-v', '--verbose',
action='store_true',
@@ -57,6 +63,7 @@ def main():
print(f"Max depth: {args.depth}")
print(f"Output format: {args.output}")
print(f"Preview mode: {args.preview}")
print(f"Shadow folder: {args.shadow_folder}")
for role_name, role_path in find_roles(args.role_dir):
if args.verbose:
@@ -74,7 +81,13 @@ def main():
print(f"Previewing graph '{key}' for role '{role_name}'")
output_graph(data, 'console', role_name, key)
else:
tree_file = os.path.join(role_path, 'meta', 'tree.json')
# Decide on output folder
if args.shadow_folder:
tree_file = os.path.join(
args.shadow_folder, role_name, 'meta', 'tree.json'
)
else:
tree_file = os.path.join(role_path, 'meta', 'tree.json')
os.makedirs(os.path.dirname(tree_file), exist_ok=True)
with open(tree_file, 'w') as f:
json.dump(graphs, f, indent=2)

View File

@@ -16,11 +16,12 @@ def run_ansible_playbook(
skip_tests=False,
skip_validation=False,
skip_build=False,
cleanup=False
):
start_time = datetime.datetime.now()
print(f"\n▶️ Script started at: {start_time.isoformat()}\n")
if not skip_build:
if cleanup:
print("\n🧹 Cleaning up project (make clean)...\n")
subprocess.run(["make", "clean"], check=True)
else:
@@ -202,7 +203,8 @@ def main():
verbose=args.verbose,
skip_tests=args.skip_tests,
skip_validation=args.skip_validation,
skip_build=args.skip_build # Pass the new param
skip_build=args.skip_build,
cleanup=args.cleanup
)

View File

@@ -3,6 +3,12 @@ import re
import yaml
from ansible.errors import AnsibleFilterError
from ansible.errors import AnsibleUndefinedVariable
try:
from ansible.utils.unsafe_proxy import AnsibleUndefined
except ImportError:
class AnsibleUndefined: pass
class AppConfigKeyError(AnsibleFilterError, ValueError):
"""
Raised when a required application config key is missing (strict mode).
@@ -50,6 +56,16 @@ def get_app_conf(applications, application_id, config_path, strict=True, default
)
k, idx = m.group(1), m.group(2)
if (hasattr(obj, '__class__') and obj.__class__.__name__ == 'AnsibleUndefined') \
or isinstance(obj, AnsibleUndefinedVariable):
if not strict:
return default if default is not None else False
raise AppConfigKeyError(
f"Key '{k}' is undefined at '{'.'.join(path_trace)}'\n"
f"application_id: {application_id}\n"
f"config_path: {config_path}"
)
# Access dict key
if isinstance(obj, dict):
if k not in obj:

14
filter_plugins/has_env.py Normal file
View File

@@ -0,0 +1,14 @@
import os
def has_env(application_id, base_dir='.'):
"""
Check if env.j2 exists under roles/{{ application_id }}/templates/env.j2
"""
path = os.path.join(base_dir, 'roles', application_id, 'templates', 'env.j2')
return os.path.isfile(path)
class FilterModule(object):
def filters(self):
return {
'has_env': has_env,
}

View File

@@ -2,8 +2,8 @@ ports:
# Ports which are exposed to localhost
localhost:
database:
svc-db-postgres: 5432
svc-db-mariadb: 3306
svc-db-postgres: 5432
svc-db-mariadb: 3306
# https://developer.mozilla.org/de/docs/Web/API/WebSockets_API
websocket:
web-app-mastodon: 4001
@@ -21,6 +21,7 @@ ports:
ldap:
svc-db-openldap: 389
http:
# Ports which are exposed to the World Wide Web
web-app-nextcloud: 8001
web-app-gitea: 8002
web-app-wordpress: 8003
@@ -65,11 +66,10 @@ ports:
collabora: 8042
mobilizon: 8043
simpleicons: 8044
libretranslate: 8055
pretix: 8056
libretranslate: 8045
pretix: 8046
web-app-mig: 8047
web-app-bigbluebutton: 48087 # This port is predefined by bbb. @todo Try to change this to a 8XXX port
# Ports which are exposed to the World Wide Web
public:
# The following ports should be changed to 22 on the subdomain via stream mapping
ssh:
@@ -79,7 +79,7 @@ ports:
svc-db-openldap: 636
stun:
web-app-bigbluebutton: 3478 # Not sure if it's right placed here or if it should be moved to localhost section
web-app-nextcloud: 3479
web-app-nextcloud: 3479
turn:
web-app-bigbluebutton: 5349 # Not sure if it's right placed here or if it should be moved to localhost section
web-app-nextcloud: 5350 # Not used yet
web-app-nextcloud: 5350 # Not used yet

View File

@@ -92,10 +92,14 @@ defaults_networks:
subnet: 192.168.103.128/28
pretix:
subnet: 192.168.103.144/28
web-app-mig:
subnet: 192.168.103.160/28
# /24 Networks / 254 Usable Clients
web-app-bigbluebutton:
subnet: 10.7.7.0/24 # This variable does not have an impact. It's just there for documentation reasons, because this network is used in bbb
# This network variable isn't used.
# It's registered here to make transparent which network bbb is using and to avoid conflicts.
subnet: 10.7.7.0/24
svc-db-postgres:
subnet: 192.168.200.0/24
svc-db-mariadb:

5
group_vars/all/Todo.md Normal file
View File

@@ -0,0 +1,5 @@
# Todos
- Remove, *_users.yml, *_applications.yml, *_ports.yml and *_networks.yml
- Move their values to the roles/*/config/main.yml file of each role
- Implement validators (e.g. no overlappsing networks, unique ports) for all of this configurations
- Keep this design decissions in ming during new feature implementations

49
main.py
View File

@@ -8,6 +8,8 @@ import threading
import signal
from datetime import datetime
import pty
from module_utils.sounds import Sound
import time
# Color support
try:
@@ -18,25 +20,6 @@ except ImportError:
def __getattr__(self, name): return ''
Fore = Back = Style = Dummy()
_IN_DOCKER = os.path.exists('/.dockerenv')
if _IN_DOCKER:
class Quiet:
@staticmethod
def play_start_sound(): pass
@staticmethod
def play_cymais_intro_sound(): pass
@staticmethod
def play_finished_successfully_sound(): pass
@staticmethod
def play_finished_failed_sound(): pass
@staticmethod
def play_warning_sound(): pass
Sound = Quiet
else:
from module_utils.sounds import Sound
def color_text(text, color):
return f"{color}{text}{Style.RESET_ALL}"
@@ -113,18 +96,29 @@ def play_start_intro():
Sound.play_cymais_intro_sound()
def failure_with_warning_loop(no_signal, sound_enabled):
import time
def failure_with_warning_loop(no_signal, sound_enabled, alarm_timeout=60):
"""
On failure: Plays warning sound in a loop.
Aborts after alarm_timeout seconds and exits with code 1.
"""
if not no_signal:
Sound.play_finished_failed_sound()
print(color_text("Warning: command failed. Press Ctrl+C to stop warnings.", Fore.RED))
start = time.monotonic()
try:
while True:
if not no_signal:
Sound.play_warning_sound()
if time.monotonic() - start > alarm_timeout:
print(color_text(f"Alarm aborted after {alarm_timeout} seconds.", Fore.RED))
sys.exit(1)
except KeyboardInterrupt:
print(color_text("Warnings stopped by user.", Fore.YELLOW))
if __name__ == "__main__":
# Parse flags
sound_enabled = '--sound' in sys.argv and (sys.argv.remove('--sound') or True)
@@ -132,7 +126,16 @@ if __name__ == "__main__":
log_enabled = '--log' in sys.argv and (sys.argv.remove('--log') or True)
git_clean = '--git-clean' in sys.argv and (sys.argv.remove('--git-clean') or True)
infinite = '--infinite' in sys.argv and (sys.argv.remove('--infinite') or True)
alarm_timeout = 60
if '--alarm-timeout' in sys.argv:
i = sys.argv.index('--alarm-timeout')
try:
alarm_timeout = int(sys.argv[i+1])
del sys.argv[i:i+2]
except Exception:
print(color_text("Invalid --alarm-timeout value!", Fore.RED))
sys.exit(1)
# Segfault handler
def segv_handler(signum, frame):
if not no_signal:
@@ -317,7 +320,7 @@ if __name__ == "__main__":
log_file.close()
if rc != 0:
failure_with_warning_loop(no_signal, sound_enabled)
failure_with_warning_loop(no_signal, sound_enabled, alarm_timeout)
sys.exit(rc)
else:
if not no_signal:
@@ -325,7 +328,7 @@ if __name__ == "__main__":
return True
except Exception as e:
print(color_text(f"Exception running command: {e}", Fore.RED))
failure_with_warning_loop(no_signal, sound_enabled)
failure_with_warning_loop(no_signal, sound_enabled, alarm_timeout)
sys.exit(1)
if infinite:

View File

@@ -1,124 +1,148 @@
import numpy as np
import simpleaudio as sa
class Sound:
"""
Sound effects for the application with enhanced complexity.
Each sound uses at least 6 distinct tones and lasts no more than max_length seconds,
except the intro sound which is a detailed 26-second Berlin techno-style build-up, 12-second celebration with a descending-fifth chord sequence of 7 chords, and breakdown with melodic background.
Transitions between phases now crossfade over 3 seconds for smoother flow.
"""
fs = 44100 # Sampling rate (samples per second)
complexity_factor = 10 # Number of harmonics to sum for richer timbres
max_length = 2.0 # Maximum total duration of any sound in seconds
import os
import warnings
class DummySound:
@staticmethod
def _generate_complex_wave(frequency: float, duration: float, harmonics: int = None) -> np.ndarray:
if harmonics is None:
harmonics = Sound.complexity_factor
t = np.linspace(0, duration, int(Sound.fs * duration), False)
wave = np.zeros_like(t)
for n in range(1, harmonics + 1):
wave += (1 / n) * np.sin(2 * np.pi * frequency * n * t)
# ADSR envelope
attack = int(0.02 * Sound.fs)
release = int(0.05 * Sound.fs)
env = np.ones_like(wave)
env[:attack] = np.linspace(0, 1, attack)
env[-release:] = np.linspace(1, 0, release)
wave *= env
wave /= np.max(np.abs(wave))
return (wave * (2**15 - 1)).astype(np.int16)
def play_start_sound(): pass
@staticmethod
def _crossfade(w1: np.ndarray, w2: np.ndarray, fade_len: int) -> np.ndarray:
# Ensure fade_len less than each
fade_len = min(fade_len, len(w1), len(w2))
fade_out = np.linspace(1, 0, fade_len)
fade_in = np.linspace(0, 1, fade_len)
w1_end = w1[-fade_len:] * fade_out
w2_start = w2[:fade_len] * fade_in
middle = (w1_end + w2_start).astype(np.int16)
return np.concatenate([w1[:-fade_len], middle, w2[fade_len:]])
def play_cymais_intro_sound(): pass
@staticmethod
def _play(wave: np.ndarray):
play_obj = sa.play_buffer(wave, 1, 2, Sound.fs)
play_obj.wait_done()
def play_finished_successfully_sound(): pass
@staticmethod
def play_finished_failed_sound(): pass
@staticmethod
def play_warning_sound(): pass
@classmethod
def play_cymais_intro_sound(cls):
# Phase durations
build_time = 10.0
celebr_time = 12.0
breakdown_time = 10.0
overlap = 3.0 # seconds of crossfade
bass_seg = 0.125 # 1/8s kick
melody_seg = 0.25 # 2/8s melody
bass_freq = 65.41 # C2 kick
melody_freqs = [261.63, 293.66, 329.63, 392.00, 440.00, 523.25]
_IN_DOCKER = os.path.exists('/.dockerenv')
# Build-up phase
steps = int(build_time / (bass_seg + melody_seg))
build_seq = []
for i in range(steps):
amp = (i + 1) / steps
b = cls._generate_complex_wave(bass_freq, bass_seg).astype(np.float32) * amp
m = cls._generate_complex_wave(melody_freqs[i % len(melody_freqs)], melody_seg).astype(np.float32) * amp
build_seq.append(b.astype(np.int16))
build_seq.append(m.astype(np.int16))
build_wave = np.concatenate(build_seq)
if _IN_DOCKER:
warnings.warn("Sound support disabled: running inside Docker.", RuntimeWarning)
Sound = DummySound
else:
try:
import numpy as np
import simpleaudio as sa
class Sound:
"""
Sound effects for the application with enhanced complexity.
Each sound uses at least 6 distinct tones and lasts no more than max_length seconds,
except the intro sound which is a detailed 26-second Berlin techno-style build-up, 12-second celebration with a descending-fifth chord sequence of 7 chords, and breakdown with melodic background.
Transitions between phases now crossfade over 3 seconds for smoother flow.
"""
# Celebration phase: 7 descending-fifth chords
roots = [523.25, 349.23, 233.08, 155.56, 103.83, 69.30, 46.25]
chord_time = celebr_time / len(roots)
celebr_seq = []
for root in roots:
t = np.linspace(0, chord_time, int(cls.fs * chord_time), False)
chord = sum(np.sin(2 * np.pi * f * t) for f in [root, root * 5/4, root * 3/2])
chord /= np.max(np.abs(chord))
celebr_seq.append((chord * (2**15 - 1)).astype(np.int16))
celebr_wave = np.concatenate(celebr_seq)
fs = 44100 # Sampling rate (samples per second)
complexity_factor = 10 # Number of harmonics to sum for richer timbres
max_length = 2.0 # Maximum total duration of any sound in seconds
# Breakdown phase (mirror of build-up)
breakdown_wave = np.concatenate(list(reversed(build_seq)))
@staticmethod
def _generate_complex_wave(frequency: float, duration: float, harmonics: int = None) -> np.ndarray:
if harmonics is None:
harmonics = Sound.complexity_factor
t = np.linspace(0, duration, int(Sound.fs * duration), False)
wave = np.zeros_like(t)
for n in range(1, harmonics + 1):
wave += (1 / n) * np.sin(2 * np.pi * frequency * n * t)
# ADSR envelope
attack = int(0.02 * Sound.fs)
release = int(0.05 * Sound.fs)
env = np.ones_like(wave)
env[:attack] = np.linspace(0, 1, attack)
env[-release:] = np.linspace(1, 0, release)
wave *= env
wave /= np.max(np.abs(wave))
return (wave * (2**15 - 1)).astype(np.int16)
# Crossfade transitions
fade_samples = int(overlap * cls.fs)
bc = cls._crossfade(build_wave, celebr_wave, fade_samples)
full = cls._crossfade(bc, breakdown_wave, fade_samples)
@staticmethod
def _crossfade(w1: np.ndarray, w2: np.ndarray, fade_len: int) -> np.ndarray:
# Ensure fade_len less than each
fade_len = min(fade_len, len(w1), len(w2))
fade_out = np.linspace(1, 0, fade_len)
fade_in = np.linspace(0, 1, fade_len)
w1_end = w1[-fade_len:] * fade_out
w2_start = w2[:fade_len] * fade_in
middle = (w1_end + w2_start).astype(np.int16)
return np.concatenate([w1[:-fade_len], middle, w2[fade_len:]])
cls._play(full)
@staticmethod
def _play(wave: np.ndarray):
play_obj = sa.play_buffer(wave, 1, 2, Sound.fs)
play_obj.wait_done()
@classmethod
def play_start_sound(cls):
freqs = [523.25, 659.26, 783.99, 880.00, 1046.50, 1174.66]
cls._prepare_and_play(freqs)
@classmethod
def play_cymais_intro_sound(cls):
# Phase durations
build_time = 10.0
celebr_time = 12.0
breakdown_time = 10.0
overlap = 3.0 # seconds of crossfade
bass_seg = 0.125 # 1/8s kick
melody_seg = 0.25 # 2/8s melody
bass_freq = 65.41 # C2 kick
melody_freqs = [261.63, 293.66, 329.63, 392.00, 440.00, 523.25]
@classmethod
def play_finished_successfully_sound(cls):
freqs = [523.25, 587.33, 659.26, 783.99, 880.00, 987.77]
cls._prepare_and_play(freqs)
# Build-up phase
steps = int(build_time / (bass_seg + melody_seg))
build_seq = []
for i in range(steps):
amp = (i + 1) / steps
b = cls._generate_complex_wave(bass_freq, bass_seg).astype(np.float32) * amp
m = cls._generate_complex_wave(melody_freqs[i % len(melody_freqs)], melody_seg).astype(np.float32) * amp
build_seq.append(b.astype(np.int16))
build_seq.append(m.astype(np.int16))
build_wave = np.concatenate(build_seq)
@classmethod
def play_finished_failed_sound(cls):
freqs = [880.00, 830.61, 783.99, 659.26, 622.25, 523.25]
durations = [0.4, 0.3, 0.25, 0.25, 0.25, 0.25]
cls._prepare_and_play(freqs, durations)
# Celebration phase: 7 descending-fifth chords
roots = [523.25, 349.23, 233.08, 155.56, 103.83, 69.30, 46.25]
chord_time = celebr_time / len(roots)
celebr_seq = []
for root in roots:
t = np.linspace(0, chord_time, int(cls.fs * chord_time), False)
chord = sum(np.sin(2 * np.pi * f * t) for f in [root, root * 5/4, root * 3/2])
chord /= np.max(np.abs(chord))
celebr_seq.append((chord * (2**15 - 1)).astype(np.int16))
celebr_wave = np.concatenate(celebr_seq)
@classmethod
def play_warning_sound(cls):
freqs = [700.00, 550.00, 750.00, 500.00, 800.00, 450.00]
cls._prepare_and_play(freqs)
# Breakdown phase (mirror of build-up)
breakdown_wave = np.concatenate(list(reversed(build_seq)))
@classmethod
def _prepare_and_play(cls, freqs, durations=None):
count = len(freqs)
if durations is None:
durations = [cls.max_length / count] * count
else:
total = sum(durations)
durations = [d * cls.max_length / total for d in durations]
waves = [cls._generate_complex_wave(f, d) for f, d in zip(freqs, durations)]
cls._play(np.concatenate(waves))
# Crossfade transitions
fade_samples = int(overlap * cls.fs)
bc = cls._crossfade(build_wave, celebr_wave, fade_samples)
full = cls._crossfade(bc, breakdown_wave, fade_samples)
cls._play(full)
@classmethod
def play_start_sound(cls):
freqs = [523.25, 659.26, 783.99, 880.00, 1046.50, 1174.66]
cls._prepare_and_play(freqs)
@classmethod
def play_finished_successfully_sound(cls):
freqs = [523.25, 587.33, 659.26, 783.99, 880.00, 987.77]
cls._prepare_and_play(freqs)
@classmethod
def play_finished_failed_sound(cls):
freqs = [880.00, 830.61, 783.99, 659.26, 622.25, 523.25]
durations = [0.4, 0.3, 0.25, 0.25, 0.25, 0.25]
cls._prepare_and_play(freqs, durations)
@classmethod
def play_warning_sound(cls):
freqs = [700.00, 550.00, 750.00, 500.00, 800.00, 450.00]
cls._prepare_and_play(freqs)
@classmethod
def _prepare_and_play(cls, freqs, durations=None):
count = len(freqs)
if durations is None:
durations = [cls.max_length / count] * count
else:
total = sum(durations)
durations = [d * cls.max_length / total for d in durations]
waves = [cls._generate_complex_wave(f, d) for f, d in zip(freqs, durations)]
cls._play(np.concatenate(waves))
except Exception:
warnings.warn("Sound support disabled: numpy or simpleaudio could not be imported", RuntimeWarning)
Sound = DummySound

View File

@@ -1,3 +1,5 @@
# run_once_cmp_db_docker_proxy: deactivated
- name: "For '{{ application_id }}': load docker and db"
include_role:
name: cmp-db-docker

View File

@@ -14,4 +14,4 @@ galaxy_info:
- autostart
- archlinux
dependencies:
- sys-pgm-aur
- dev-yay

View File

@@ -11,7 +11,7 @@ The `main.yml` file in the `desk-qbittorrent` role includes the following task:
## Dependencies
This role depends on:
- **sys-pgm-aur**: Ensures that an Arch User Repository (AUR) helper is installed, which is necessary for installing packages like `qbittorrent` that are not available in the standard repositories.
- **dev-yay**: Ensures that an Arch User Repository (AUR) helper is installed, which is necessary for installing packages like `qbittorrent` that are not available in the standard repositories.
## Purpose and Usage
The `desk-qbittorrent` role is tailored for users who require a reliable and user-friendly torrent client for downloading and sharing files via the BitTorrent protocol. qBittorrent is known for its balance of features, simplicity, and minimal impact on system resources.

View File

@@ -19,5 +19,5 @@ galaxy_info:
- name: Archlinux
versions: [ all ]
dependencies:
- sys-pgm-aur
- dev-yay

View File

@@ -16,18 +16,18 @@ To automate the installation of Spotify on Arch-based systems while ensuring pro
- 🎧 Installs the official [Spotify AUR package](https://aur.archlinux.org/packages/spotify)
- 🛠 Uses `yay` (or other helper) via [`kewlfft.aur`](https://github.com/kewlfft/ansible-aur) Ansible module
- 🔗 Declares dependency on `sys-pgm-aur` for seamless integration
- 🔗 Declares dependency on `dev-yay` for seamless integration
## Requirements
- The `sys-pgm-aur` role must be applied before using this role.
- The `dev-yay` role must be applied before using this role.
- An AUR helper like `yay` must be available on the system.
## Dependencies
This role depends on:
- [`sys-pgm-aur`](../sys-pgm-aur) provides and configures an AUR helper like `yay`
- [`dev-yay`](../dev-yay) provides and configures an AUR helper like `yay`
## Credits 📝

View File

@@ -24,4 +24,4 @@ galaxy_info:
issue_tracker_url: https://s.veen.world/cymaisissues
documentation: https://s.veen.world/cymais
dependencies:
- sys-pgm-aur
- dev-yay

View File

@@ -14,7 +14,7 @@ The `main.yml` file in the `desk-zoom` role includes tasks for setting up video
## Dependencies
This role relies on:
- **sys-pgm-aur**: Ensures that an Arch User Repository (AUR) helper is installed, necessary for installing software like Zoom which may not be available in standard repositories.
- **dev-yay**: Ensures that an Arch User Repository (AUR) helper is installed, necessary for installing software like Zoom which may not be available in standard repositories.
## Purpose and Usage
The `desk-zoom` role is particularly useful for professionals, educators, and anyone who needs reliable video conferencing capabilities on their Linux system. With the increasing demand for remote communication, this role provides an efficient way to set up key video conferencing tools.

View File

@@ -20,5 +20,5 @@ galaxy_info:
- name: Archlinux
versions: [ all ]
dependencies:
- sys-pgm-aur
- dev-yay

View File

@@ -0,0 +1,20 @@
# dev-base-devel Role
This Ansible role installs the essential `base-devel` package group on Arch Linux systems.
## Description
The `base-devel` package group contains all fundamental development tools required for building and compiling software from source on Arch Linux and compatible distributions.
## Usage
After deploying this role, all common build dependencies will be available on the system, allowing you to compile and install software packages that require development tools.
## Features
- Installs all packages from the `base-devel` group
- Ensures your system is ready for software compilation and development
## Further Resources
- [Arch Wiki: base-devel](https://wiki.archlinux.org/title/Development_packages)

View File

@@ -0,0 +1,24 @@
---
galaxy_info:
author: "Kevin Veen-Birkenbach"
description: >
This role installs the base-devel package group, providing all core development tools needed for building software on Arch Linux systems.
license: "CyMaIS NonCommercial License (CNCL)"
license_url: "https://s.veen.world/cncl"
company: |
Kevin Veen-Birkenbach
Consulting & Coaching Solutions
https://www.veen.world
galaxy_tags:
- archlinux
- development
- base-devel
- build
- tools
repository: "https://github.com/kevinveenbirkenbach/cymais"
issue_tracker_url: "https://github.com/kevinveenbirkenbach/cymais/issues"
documentation: "https://wiki.archlinux.org/title/Development_packages"
logo:
class: "fas fa-tools"
run_after: []
dependencies: []

View File

@@ -0,0 +1,5 @@
- name: install base-devel
community.general.pacman:
name: base-devel
state: present
become: true

View File

@@ -21,5 +21,9 @@ galaxy_info:
repository: "https://s.veen.world/cymais"
issue_tracker_url: "https://s.veen.world/cymaisissues"
documentation: "https://s.veen.world/cymais"
logo:
class: "fas fa-archive"
dependencies:
- dev-fakeroot
- dev-git
- dev-base-devel

View File

@@ -1,9 +1,8 @@
- name: install yay
community.general.pacman:
name:
- yay
- patch
name:
- base-devel
- patch
state: present
- name: Create the `aur_builder` user
@@ -19,4 +18,22 @@
path: /etc/sudoers.d/11-install-aur_builder
line: 'aur_builder ALL=(ALL) NOPASSWD: /usr/bin/pacman'
create: yes
validate: 'visudo -cf %s'
validate: 'visudo -cf %s'
- name: Clone yay from AUR
become: yes
become_user: aur_builder
git:
repo: https://aur.archlinux.org/yay.git
dest: /home/aur_builder/yay
clone: yes
update: yes
- name: Build and install yay
become: yes
become_user: aur_builder
shell: |
cd /home/aur_builder/yay
makepkg -si --noconfirm
args:
creates: /usr/bin/yay

View File

@@ -0,0 +1,2 @@
# Todos
- Make network name equal to get_entity_name

View File

@@ -1,3 +1,3 @@
docker_compose_skipp_file_creation: false # If set to true the file creation will be skipped
docker_repository: false # Activates docker repository download and routine
docker_pull_git_repository: false # Activates docker repository download and routine
docker_compose_flush_handlers: false # Set to true in the vars/main.yml of the including role to autoflush after docker compose routine

View File

@@ -14,10 +14,12 @@
mode: '0755'
with_dict: "{{ docker_compose.directories }}"
- include_tasks: "repository.yml"
when: docker_repository | bool
- name: "Include routines to set up a git repository based installaion for '{{application_id}}'."
include_tasks: "repository.yml"
when: docker_pull_git_repository | bool
- include_tasks: "files.yml"
- name: "Include routines file management routines for '{{application_id}}'."
include_tasks: "files.yml"
when: not docker_compose_skipp_file_creation | bool
- name: "flush database, docker and proxy for '{{ application_id }}'"

View File

@@ -1,8 +1,10 @@
{# Base for docker services #}
restart: {{docker_restart_policy}}
{% if application_id | has_env %}
env_file:
- "{{docker_compose.files.env}}"
{% endif %}
logging:
driver: journald

View File

@@ -20,5 +20,5 @@ galaxy_info:
- name: Archlinux
versions: [ all ]
dependencies:
- sys-pgm-aur
- dev-yay

View File

@@ -8,5 +8,5 @@ galaxy_info:
versions:
- all
dependencies:
- sys-pgm-aur
- dev-yay
- sys-alm-compose

View File

@@ -0,0 +1,2 @@
# Todos
- Implement, that the already during a play installed apps get stored, so that the installation process isn't triggered twice

View File

@@ -1,7 +1,9 @@
# run_once_srv_web_7_7_inj_compose: deactivated
- name: "include role srv-web-7-7-inj-compose for {{domain}}"
include_role:
name: srv-web-7-7-inj-compose
- name: "include role srv-web-6-6-tls-core for {{domain}}"
include_role:
name: srv-web-6-6-tls-core
name: srv-web-6-6-tls-core

View File

@@ -1,3 +1,5 @@
# run_once_srv_web_7_7_dns_records: deactivated
- name: Create or update Cloudflare A-record for {{ item }}
community.general.cloudflare_dns:
api_token: "{{ cloudflare_api_token }}"

29
roles/sys-cli/README.md Normal file
View File

@@ -0,0 +1,29 @@
# CyMaIS CLI
This Ansible role installs and makes the CyMaIS CLI available on your system.
## Description
After deploying this role, you will have access to the `cymais` command-line interface (CLI), which is the central tool for managing and operating all aspects of your CyMaIS environment.
## Usage
Once this role has been applied, you can run all CLI commands using:
```
cymais --help
```
to get a list of available commands and options.
## Features
- Installs the CyMaIS CLI automatically
- Ensures the CLI is available system-wide
- All commands accessible via `cymais --help`
## Further Resources
- [CyMaIS Documentation](https://github.com/kevinveenbirkenbach/cymais/)

View File

@@ -0,0 +1,24 @@
galaxy_info:
author: "Kevin Veen-Birkenbach"
description: >
This role installs and provides the CyMaIS CLI, enabling you to manage your entire CyMaIS environment from the command line. After deployment, the `cymais` command is available.
license: "CyMaIS NonCommercial License (CNCL)"
license_url: "https://s.veen.world/cncl"
company: |
Kevin Veen-Birkenbach
Consulting & Coaching Solutions
https://www.veen.world
galaxy_tags:
- cymais
- cli
- management
- automation
repository: "https://github.com/kevinveenbirkenbach/cymais"
issue_tracker_url: "https://github.com/kevinveenbirkenbach/cymais/issues"
documentation: "https://github.com/kevinveenbirkenbach/cymais/"
logo:
class: "fa-solid fa-terminal"
run_after: []
dependencies:
- dev-yay

View File

@@ -0,0 +1,10 @@
- name: "pkgmgr install cymais"
include_role:
name: pkgmgr-install
vars:
package_name: cymais
- name: Get cymais base path
command: pkgmgr path cymais
register: cymais_path_cmd
changed_when: false

View File

@@ -1,6 +1,7 @@
- name: "reset (if enabled)"
include_tasks: reset.yml
when: mode_reset | bool and run_once_gen_timer is not defined
when: mode_reset | bool and run_once_sys_timer is not defined
- name: create {{service_name}}.cymais.timer
template:
@@ -18,5 +19,5 @@
- name: run {{ role_name }} once
set_fact:
run_once_gen_timer: true
when: run_once_gen_timer is not defined
run_once_sys_timer: true
when: run_once_sys_timer is not defined

View File

@@ -24,4 +24,4 @@ galaxy_info:
issue_tracker_url: "https://s.veen.world/cymaisissues"
documentation: "https://s.veen.world/cymais"
dependencies:
- sys-pgm-aur
- dev-yay

View File

@@ -21,5 +21,5 @@ galaxy_info:
- name: Archlinux
versions: [ all ]
dependencies:
- sys-pgm-aur
- dev-yay

View File

@@ -25,4 +25,5 @@ galaxy_info:
dependencies:
- desk-git
- dev-make
- dev-gcc
- dev-gcc
- dev-yay

View File

@@ -1,8 +1,7 @@
- name: install administration tools
community.general.pacman:
pacman:
name:
- base-devel
- yay
- cmake
- fdupes
- p7zip

View File

@@ -15,8 +15,4 @@ bbb_env_file_origine: "{{ bbb_repository_directory }}.env"
docker_compose_skipp_file_creation: true # Skipp creation of docker-compose.yml file
# Setup
bigbluebutton_setup: "{{ applications | get_app_conf(application_id, 'setup') }}"
# Credentials
bigbluebutton_shared_secret: "{{ applications | get_app_conf(application_id, 'credentials.shared_secret') }}"
bigbluebutton_api_suffix: "{{ applications | get_app_conf(application_id, 'api_suffix') }}"
bigbluebutton_setup: "{{ applications | get_app_conf(application_id, 'setup') }}"

View File

@@ -1,4 +1,4 @@
repository: "discourse_repository" # Name of the repository folder
repository: "discourse_repository" # Name of the repository folder
features:
matomo: true
css: true
@@ -24,12 +24,9 @@ docker:
enabled: true
redis:
enabled: true
# This container is propably wrong name.
# Chance is high that the name is discourse_application.
# @todo check this out and repair it if necessary
discourse:
name: "discourse"
image: "local_discourse/discourse_application" # Necessary to define this for the docker 2 loc backup
image: "local_discourse/<< defaults_applications[web-app-discourse].docker.services.discourse.name >>" # Necessary to define this for the docker 2 loc backup
backup:
no_stop_required: true
volumes:

View File

@@ -1,14 +1,14 @@
---
- name: "stop and remove discourse container if it exist"
docker_container:
name: "{{ discourse_name }}"
name: "{{ discourse_container }}"
state: absent
register: container_action
failed_when: container_action.failed and 'No such container' not in container_action.msg
listen: recreate discourse
- name: "add central database temporary to {{application_id}}_default"
command: docker network connect {{applications | get_app_conf(application_id, 'network', True)}} {{ database_host }}
- name: "add central database temporary to {{ discourse_network }}"
command: "docker network connect {{ discourse_network }} {{ database_host }}"
failed_when: >
result.rc != 0 and
'already exists in network' not in result.stderr
@@ -17,7 +17,7 @@
listen: recreate discourse
- name: rebuild discourse
shell: ./launcher rebuild {{ discourse_name }}
shell: ./launcher rebuild {{ discourse_container }}
args:
executable: /bin/bash
chdir: "{{docker_repository_directory }}"

View File

@@ -1,74 +1,79 @@
---
- name: "reset (if enabled)"
include_tasks: reset.yml
when: mode_reset | bool and run_once_docker_discourse is not defined
- name: "Setup {{ application_id }}"
when: run_once_web_app_discourse is not defined
block:
- name: "reset (if enabled)"
include_tasks: reset.yml
when: mode_reset | bool
# Necessary for building: https://chat.openai.com/share/99d258cc-294b-4924-8eef-02fe419bb838
- name: install which
pacman:
name: which
state: present
when: run_once_docker_discourse is not defined
# Necessary for building: https://chat.openai.com/share/99d258cc-294b-4924-8eef-02fe419bb838
- name: install which
pacman:
name: which
state: present
- name: "load docker, db and proxy for {{application_id}}"
include_role:
name: cmp-db-docker-proxy
when: run_once_docker_discourse is not defined
- name: "load docker, db and proxy for {{application_id}}"
include_role:
name: cmp-db-docker-proxy
- name: pull docker repository
git:
repo: "https://github.com/discourse/discourse_docker.git"
dest: "{{docker_repository_directory }}"
update: yes
notify: recreate discourse
become: true
ignore_errors: true
when: run_once_docker_discourse is not defined
- name: pull docker repository
git:
repo: "https://github.com/discourse/discourse_docker.git"
dest: "{{docker_repository_directory }}"
update: yes
notify: recreate discourse
become: true
ignore_errors: true
- name: set chmod 700 for {{docker_repository_directory }}containers
ansible.builtin.file:
path: "{{docker_repository_directory }}/containers"
mode: '700'
state: directory
when: run_once_docker_discourse is not defined
- name: set chmod 700 for {{docker_repository_directory }}containers
ansible.builtin.file:
path: "{{docker_repository_directory }}/containers"
mode: '700'
state: directory
- name: "copy configuration to {{discourse_application_yml_destination}}"
template:
src: discourse_application.yml.j2
dest: "{{discourse_application_yml_destination}}"
notify: recreate discourse
when: run_once_docker_discourse is not defined
- name: "copy configuration to {{discourse_application_yml_destination}}"
template:
src: config.yml.j2
dest: "{{ discourse_application_yml_destination }}"
notify: recreate discourse
- name: flush, to recreate discourse app
meta: flush_handlers
when: run_once_docker_discourse is not defined
- name: "Verify that {{ discourse_container }} is running"
command: docker compose ps --filter status=running --format '{{"{{"}}.Name{{"}}"}}' | grep -x {{ discourse_container }}
register: docker_ps
changed_when: docker_ps.rc == 1
failed_when: docker_ps.rc not in [0, 1]
notify: recreate discourse
- name: "Connect {{ discourse_name }} to network {{ applications | get_app_conf('svc-db-postgres', 'docker.network' ) }}"
command: >
docker network connect {{ applications | get_app_conf('svc-db-postgres', 'docker.network' ) }} {{ discourse_name }}
register: network_connect
failed_when: >
network_connect.rc != 0 and
"Error response from daemon: endpoint with name {{ discourse_name }} already exists in network {{ applications | get_app_conf('svc-db-postgres', 'docker.network' ) }}"
not in network_connect.stderr
changed_when: network_connect.rc == 0
when:
- applications | get_app_conf(application_id, 'features.central_database', False)
- run_once_docker_discourse is not defined
- name: flush, to recreate discourse app
meta: flush_handlers
- name: "Remove {{ discourse_network }} from {{ database_host }}"
command: >
docker network disconnect {{ discourse_network }} {{ database_host }}
register: network_disconnect
failed_when: >
network_disconnect.rc != 0 and
'is not connected to network {{ discourse_network }}' not in network_disconnect.stderr
changed_when: network_disconnect.rc == 0
when:
- applications | get_app_conf(application_id, 'features.central_database', False)
- run_once_docker_discourse is not defined
- name: Set error string for network already exists
set_fact:
docker_discourse_already_in_net: "Error response from daemon: endpoint with name {{ discourse_container }} already exists in network {{ discourse_pg_network }}"
- name: run the docker_discourse tasks once
set_fact:
run_once_docker_discourse: true
when: run_once_docker_discourse is not defined
- name: "Connect {{ discourse_container }} to network {{ discourse_pg_network }}"
command: >
docker network connect {{ discourse_pg_network }} {{ discourse_container }}
register: network_connect
failed_when: >
network_connect.rc != 0 and
docker_discourse_already_in_net not in network_connect.stderr
changed_when: network_connect.rc == 0
when:
- applications | get_app_conf(application_id, 'features.central_database', False)
- name: "Remove {{ discourse_network }} from {{ database_host }}"
command: >
docker network disconnect {{ discourse_network }} {{ database_host }}
register: network_disconnect
failed_when: >
network_disconnect.rc != 0 and
'is not connected to network {{ discourse_network }}' not in network_disconnect.stderr
changed_when: network_disconnect.rc == 0
when:
- applications | get_app_conf(application_id, 'features.central_database', False)
- name: run the docker_discourse tasks once
set_fact:
run_once_web_app_discourse: true
when: run_once_web_app_discourse is not defined

View File

@@ -6,9 +6,9 @@
cmd: "docker network disconnect {{applications | get_app_conf(application_id, 'network', True)}} {{ database_host }}"
ignore_errors: true
- name: "destroy container discourse_application"
- name: "destroy container {{ discourse_container }}"
command:
cmd: "./launcher destroy discourse_application"
cmd: "./launcher destroy {{ discourse_container }}"
chdir: "{{ docker_repository_directory }}"
ignore_errors: true
notify: recreate discourse

View File

@@ -41,7 +41,7 @@ env:
UNICORN_WORKERS: 8
## Required. Discourse will not work with a bare IP number.
DISCOURSE_HOSTNAME: {{domains | get_domain(application_id)}}
DISCOURSE_HOSTNAME: {{ domains | get_domain(application_id) }}
## Uncomment if you want the container to be started with the same
## hostname (-h option) as specified above (default "$hostname-$config")
@@ -139,7 +139,7 @@ run:
#- exec: rails r "User.find_by_email('{{ users.administrator.email }}').update(username: '{{users.administrator.username}}')"
# The following code is just an inspiration, how to connect with the oidc account. as long as this is not set the admini account needs to be manually connected with oidc
# docker exec -it discourse_application rails runner "user = User.find_by_email('test@cymais.cloud'); UserAuth.create(user_id: user.id, provider: 'oidc', uid: 'eindeutige_oidc_id', info: { name: user.username, email: user.email })"
# docker exec -it {{ discourse_container }} rails runner "user = User.find_by_email('test@cymais.cloud'); UserAuth.create(user_id: user.id, provider: 'oidc', uid: 'eindeutige_oidc_id', info: { name: user.username, email: user.email })"
# OIDC Activation
- exec: rails r "SiteSetting.openid_connect_enabled = true"
@@ -178,4 +178,4 @@ run:
docker_args:
- --network={{application_id}}_default
- --name={{ discourse_name }}
- --name={{ discourse_container }}

View File

@@ -1,15 +1,16 @@
application_id: "web-app-discourse"
# Database
database_password: "{{ applications | get_app_conf(application_id, 'credentials.database_password', True) }}"
database_password: "{{ applications | get_app_conf(application_id, 'credentials.database_password') }}"
database_type: "postgres"
# Discourse
discourse_name: "{{ applications | get_app_conf(application_id, 'docker.services.discourse.name', True) }}"
discourse_application_yml_destination: "{{ docker_repository_directory }}containers/{{discourse_name }}.yml"
discourse_network: "{{ applications | get_app_conf(application_id, 'docker.network', True) }}"
discourse_volume: "{{ applications | get_app_conf(application_id, 'docker.volumes.data', True) }}"
discourse_container: "{{ applications | get_app_conf(application_id, 'docker.services.discourse.name') }}"
discourse_application_yml_destination: "{{ docker_repository_directory }}containers/{{ discourse_container }}.yml"
discourse_network: "{{ applications | get_app_conf(application_id, 'docker.network') }}"
discourse_volume: "{{ applications | get_app_conf(application_id, 'docker.volumes.data') }}"
discourse_pg_network: "{{ applications | get_app_conf('svc-db-postgres', 'docker.network' ) }}"
# General Docker Configuration
docker_repository_directory : "{{ docker_compose.directories.services}}{{applications | get_app_conf(application_id, 'repository', True) }}/"
docker_repository_directory : "{{ docker_compose.directories.services}}{{applications | get_app_conf( application_id, 'repository') }}/"
docker_compose_flush_handlers: false

View File

@@ -34,6 +34,9 @@
loop_control:
label: "{{ item.key }}"
- name: flush handlers to ensure that friendica is up before friendica addon configuration
meta: flush_handlers
- name: Ensure Friendica addons are in sync
command: >
docker compose exec --user www-data

View File

@@ -0,0 +1,21 @@
# MIG
This folder contains the Ansible role to deploy the Meta Infinite Graph for CyMaIS.
## Description
This role sets up the [Ansible Meta Infinite Graph](https://github.com/kevinveenbirkenbach/meta-infinite-graph) for CyMaIS. The Meta Infinite Graph visualizes all dependencies and relationships between CyMaIS roles, making the overall infrastructure structure transparent and easy to understand.
## Overview
The Meta Infinite Graph is an essential tool for analyzing, auditing, and maintaining the modular structure of the CyMaIS ecosystem. It provides a clear overview of all roles and how they are interconnected.
## Features
- Automatic deployment of the Meta Infinite Graph web application
- Shows all dependencies and connections between CyMaIS roles
- Useful for documentation and architecture transparency
## Further Resources
- [Meta Infinite Graph Homepage](https://github.com/kevinveenbirkenbach/meta-infinite-graph)

View File

@@ -0,0 +1,2 @@
# Todos
- Use svc-meta-creator to speed up deployment

View File

@@ -0,0 +1,37 @@
docker:
services:
redis:
enabled: false # No redis needed
database:
enabled: false # No database needed
features:
matomo: true # activate tracking
css: true # use custom cymais stile
port-ui-desktop: true # Enable in port-ui
csp:
whitelist:
script-src-elem:
- https://cdn.jsdelivr.net
- https://kit.fontawesome.com
- https://code.jquery.com/
- https://unpkg.com/
style-src:
- https://cdn.jsdelivr.net
- https://cdnjs.cloudflare.com
font-src:
- https://cdnjs.cloudflare.com
- https://ka-f.fontawesome.com
- https://cdn.jsdelivr.net
connect-src:
- https://ka-f.fontawesome.com
#frame-src:
# - "{{ web_protocol }}://*.{{primary_domain}}"
flags:
style-src:
unsafe-inline: true
domains:
canonical:
- "mig.{{ primary_domain }}"
aliases:
- "meta-infinite-graph.{{ primary_domain }}"
build_data: true # Enables the building of the meta data which the graph requiers

View File

@@ -0,0 +1,25 @@
---
galaxy_info:
author: "Kevin Veen-Birkenbach"
description: >
The Meta Infinite Graph offers you an interactive, visual map of all CyMaIS roles and their dependencies—making it easy to explore, understand, and navigate the complete structure of your infrastructure.
license: "CyMaIS NonCommercial License (CNCL)"
license_url: "https://s.veen.world/cncl"
company: |
Kevin Veen-Birkenbach
Consulting & Coaching Solutions
https://www.veen.world
galaxy_tags:
- cymais
- meta
- visualization
- dependencies
- graph
repository: "https://github.com/kevinveenbirkenbach/meta-infinite-graph"
issue_tracker_url: "https://github.com/kevinveenbirkenbach/meta-infinite-graph/issues"
documentation: "https://github.com/kevinveenbirkenbach/meta-infinite-graph/"
logo:
class: ""
run_after: []
dependencies:
- sys-cli

View File

@@ -0,0 +1,35 @@
---
- block:
- name: Load docker compose vars
include_vars:
file: roles/docker-compose/vars/docker-compose.yml
name: mig_docker_compose
- name: Set roles volume variable
set_fact:
mig_roles_meta_volume: "{{ mig_docker_compose.docker_compose.directories.volumes }}/roles/"
- name: Set roles list variable
set_fact:
mig_roles_meta_list: "{{ mig_roles_meta_volume }}list.json"
- name: "load docker, proxy for '{{application_id}}'"
include_role:
name: cmp-docker-proxy
- name: Create tree
command: "cymais build tree --no-signal --alarm-timeout 0 -s {{ mig_roles_meta_volume }}"
when:
- mig_build_data
- name: Create roles list
command: "cymais build roles_list --no-signal --alarm-timeout 0 -o {{ mig_roles_meta_list }}"
when:
- mig_build_data
- name: run the web-app-mig tasks once
set_fact:
run_once_docker_web_app_mig: true
name: "Setup Meta Infinite Graph"
when: run_once_docker_web_app_mig is not defined

View File

@@ -0,0 +1,21 @@
{% include 'roles/docker-compose/templates/base.yml.j2' %}
{{ application_id | get_entity_name }}:
{% set container_port = 80 %}
{% include 'roles/docker-container/templates/base.yml.j2' %}
image: "{{ mig_image }}"
container_name: "{{ mig_container }}"
ports:
- 127.0.0.1:{{ports.localhost.http[application_id]}}:{{ container_port }}
build:
context: "{{docker_repository_path}}"
dockerfile: Dockerfile
volumes:
- "{{ mig_roles_meta_volume }}:/usr/share/nginx/html/roles:ro"
- "{{ docker_repository_path }}:/usr/share/nginx/html"
{% include 'roles/docker-container/templates/networks.yml.j2' %}
{% include 'roles/docker-container/templates/healthcheck/curl.yml.j2' %}
{% include 'roles/docker-compose/templates/networks.yml.j2' %}

View File

@@ -0,0 +1,12 @@
# General
application_id: web-app-mig # ID of the application, should be the name of the role folder
# Docker
docker_compose_flush_handlers: true
docker_pull_git_repository: true
docker_repository_address: "https://github.com/kevinveenbirkenbach/meta-infinite-graph"
# Helper variables
mig_image: "mig:latest"
mig_container: "mig"
mig_build_data: "{{ applications | get_app_conf(application_id, 'build_data') }}"

View File

@@ -23,12 +23,13 @@
include_tasks: oidc.yml
when: applications | get_app_conf(application_id, 'features.oidc', False)
- name: Run Moodle system check
command: >
docker exec --user {{ bitnami_user }} {{ moodle_container }}
php /opt/bitnami/moodle/admin/cli/checks.php
register: moodle_checks
changed_when: false
failed_when: >
moodle_checks.rc != 0 or
"OK: All" not in moodle_checks.stdout
# Deactivated because it doesn't give helpfull warnings back
#- name: Run Moodle system check
# command: >
# docker exec --user {{ bitnami_user }} {{ moodle_container }}
# php /opt/bitnami/moodle/admin/cli/checks.php
# register: moodle_checks
# changed_when: false
# failed_when: >
# moodle_checks.rc != 0 or
# "OK: All" not in moodle_checks.stdout

View File

@@ -1,7 +1,7 @@
plugin_configuration:
- appid: "bbb"
configkey: "api.secret"
configvalue: "{{ bigbluebutton_shared_secret }}"
configvalue: "{{ applications | get_app_conf('web-app-bigbluebutton', 'credentials.shared_secret', False,'') }}"
- appid: "bbb"
configkey: "api.url"
configvalue: "{{ domains | get_url('web-app-bigbluebutton', web_protocol) }}{{ bigbluebutton_api_suffix }}"
configvalue: "{{ domains | get_url('web-app-bigbluebutton', web_protocol) }}{{ applications | get_app_conf('web-app-bigbluebutton', 'api_suffix', False,'') }}"

View File

@@ -1,6 +1,6 @@
- name: "Transfering oauth2-proxy-keycloak.cfg.j2 to {{( path_docker_compose_instances | get_docker_paths(application_id)).directories.volumes }}"
- name: "Transfering oauth2-proxy-keycloak.cfg.j2 to {{( application_id | get_docker_paths(path_docker_compose_instances) ).directories.volumes }}"
template:
src: "{{ playbook_dir }}/roles/web-app-oauth2-proxy/templates/oauth2-proxy-keycloak.cfg.j2"
dest: "{{( path_docker_compose_instances | get_docker_paths(application_id)).directories.volumes }}{{applications | get_app_conf('oauth2-proxy','configuration_file')}}"
dest: "{{( application_id | get_docker_paths(path_docker_compose_instances) ).directories.volumes }}{{applications | get_app_conf('oauth2-proxy','configuration_file')}}"
notify:
- docker compose up

View File

@@ -1,7 +1,7 @@
application_id: "web-app-openproject"
docker_repository_address: "https://github.com/opf/openproject-deploy"
database_type: "postgres"
docker_repository: true
docker_pull_git_repository: true
openproject_version: "{{ applications | get_app_conf(application_id, 'docker.services.web.version', True) }}"
openproject_image: "{{ applications | get_app_conf(application_id, 'docker.services.web.image', True) }}"
openproject_volume: "{{ applications | get_app_conf(application_id, 'docker.volumes.data', True) }}"

View File

@@ -5,7 +5,7 @@
- name: "include create-domains.yml for peertube"
include_tasks: create-domains.yml
loop: "{{ domains.peertube }}"
loop: "{{ domains['web-app-peertube'] }}"
loop_control:
loop_var: domain
vars:

View File

@@ -1,7 +1,15 @@
application_id: "web-app-peertube"
database_type: "postgres"
oidc_plugin: "peertube-plugin-auth-openid-connect"
peertube_version: "{{ applications | get_app_conf(application_id, 'docker.services.peertube.version', True) }}"
peertube_image: "{{ applications | get_app_conf(application_id, 'docker.services.peertube.image', True) }}"
peertube_name: "{{ applications | get_app_conf(application_id, 'docker.services.peertube.name', True) }}"
peertube_volume: "{{ applications | get_app_conf(application_id, 'docker.volumes.data', True) }}"
# General
application_id: "web-app-peertube"
database_type: "postgres"
# Docker Specific
docker_compose_flush_handlers: true
# Role variables
peertube_version: "{{ applications | get_app_conf(application_id, 'docker.services.peertube.version', True) }}"
peertube_image: "{{ applications | get_app_conf(application_id, 'docker.services.peertube.image', True) }}"
peertube_name: "{{ applications | get_app_conf(application_id, 'docker.services.peertube.name', True) }}"
peertube_volume: "{{ applications | get_app_conf(application_id, 'docker.volumes.data', True) }}"
# OIDC
oidc_plugin: "peertube-plugin-auth-openid-connect"

View File

@@ -0,0 +1,2 @@
# Todos
- Solve OAuth2 Login Issue - See https://chatgpt.com/c/687a50b4-8d78-800f-a202-1631aa05fd4f

View File

@@ -1,7 +1,7 @@
features:
matomo: true
css: true
port-ui-desktop: false
port-ui-desktop: false
simpleicons: true # Activate Brand Icons for your groups
javascript: true # Necessary for URL sync
csp:

View File

@@ -1,4 +1,4 @@
application_id: "web-app-port-ui"
docker_repository_address: "https://github.com/kevinveenbirkenbach/port-ui"
config_inventory_path: "{{ inventory_dir }}/files/{{ inventory_hostname }}/docker/web-app-port-ui/config.yaml.j2"
docker_repository: true
docker_pull_git_repository: true

View File

@@ -10,7 +10,7 @@ taiga_image_frontend: >-
{{ 'robrotheram/taiga-front-openid' if applications | get_app_conf(application_id, 'features.oidc', True) and applications | get_app_conf(application_id, 'oidc.flavor', True) == 'robrotheram'
else 'taigaio/taiga-front' }}
taiga_frontend_conf_path: "{{docker_compose.directories.config}}conf.json"
docker_repository: true
docker_pull_git_repository: true
settings_files:
- urls
- local

View File

@@ -1,4 +1,6 @@
---
# run_once_web_opt_rdr_domains: deactivated
- name: "Include domains redirects"
include_tasks: redirect-domain.yml
vars:

View File

@@ -5,16 +5,16 @@ docker:
enabled: false # Enable Redis
database:
enabled: false # Enable the database
{{ application_id }}:
{{ application_id | get_entity_name }}:
backup:
no_stop_required: true # The images that don't need to stop
disabled: true # Disables the image
database_routine: true # Instead of copying a database routine will be triggered for this container
image: ""
version: "latest"
name: "web-app-{{ application_id }}"
image: "" # The docker image of the software you want to use
version: "latest" # The docker version of the software you want to use
container: "{{ application_id | get_entity_name }}" # The container name
volumes:
data: "web-app-{{ application_id }}_data"
data: "{{ application_id | get_entity_name }}_data"
features:
matomo: true # Enable Matomo Tracking
css: true # Enable Global CSS Styling
@@ -23,10 +23,22 @@ features:
central_database: false # Enable Central Database Network
recaptcha: false # Enable ReCaptcha
oauth2: false # Enable the OAuth2-Proy
javascript: false # Enables the custom JS in the javascript.js.j2 file
csp:
whitelist: {} # URL's which should be whitelisted
flags: {} # Flags which should be set
javascript: false # Enables the custom JS in the javascript.js.j2 file
csp:
whitelist: # URL's which should be whitelisted
script-src-elem: []
style-src: []
font-src: []
connect-src: []
frame-src: []
flags: # Flags which should be set
style-src:
unsafe-inline: false
script-src:
unsafe-inline: false
script-src-elem:
unsafe-inline: false
domains:
domains:
canonical: {} # Urls under which the domain should be directly accessible
aliases: [] # Alias redirections to the first element of the canonical domains

View File

@@ -5,19 +5,19 @@
- name: "load docker, db and proxy for '{{application_id}}'"
include_role:
name: cmp-db-docker-proxy
when: run_once_docker_{% endraw %}{{ application_id }}{% raw %} is not defined
when: run_once_docker_{% endraw %}{{ application_id | replace("_", "-") }}{% raw %} is not defined
{% endraw %}
{% else %}
{% raw %}
- name: "load docker, proxy for '{{application_id}}'"
include_role:
name: cmp-db-docker-proxy
when: run_once_docker_{% endraw %}{{ application_id }}{% raw %} is not defined
when: run_once_docker_{% endraw %}{{ application_id | replace("_", "-") }}{% raw %} is not defined
{% endraw %}
{% endif %}
{% raw %}
- name: run the {% endraw %}{{ application_id }}{% raw %} tasks once
- name: run the {% endraw %}{{ application_id | replace("_", "-") }}{% raw %} tasks once
set_fact:
run_once_docker_{% endraw %}{{ application_id }}{% raw %}: true
when: run_once_docker_{% endraw %}{{ application_id }}{% raw %} is not defined
run_once_docker_{% endraw %}{{ application_id | replace("_", "-") }}{% raw %}: true
when: run_once_docker_{% endraw %}{{ application_id | replace("_", "-") }}{% raw %} is not defined
{% endraw %}

View File

@@ -2,19 +2,25 @@
# All configuration possibilities are available in the config/main.yml file.
# General
application_id: {{ application_id }} # ID of the application, should be the name of the role folder
application_id: {{ application_id }} # ID of the application, should be the name of the role folder
# Database
database_type: 0 # Database type [postgres, mariadb]
database_type: 0 # Database type [postgres, mariadb]
# Docker
docker_compose_flush_handlers: true # When this is set to true an auto-flush after the docker-compose.yml, and env deploy is triggered, otherwise you have todo it manual.
docker_compose_skipp_file_creation: false # Skipp creation of docker-compose.yml file
# Checkout roles/docker-compose/defaults/main.yml for all configuration options
docker_compose_flush_handlers: true # When this is set to true an auto-flush after the docker-compose.yml, and env deploy is triggered, otherwise you have todo it manual.
docker_compose_skipp_file_creation: false # Skipp creation of docker-compose.yml file
# The following variable mapping is optional, but imt makes it easier to read the code.
# I recommend, to use this mappings, but you can skipp it and access the config entries direct via get_app_conf
{{ application_id | get_cymais_dir }}_version: "{% raw %}{{ applications | get_app_conf(application_id, 'docker.services.{% endraw %}{{ application_id | get_cymais_dir }}{% raw %}.version', True) }}"{% endraw %}
{{ application_id | get_cymais_dir }}_image: "{% raw %}{{ applications | get_app_conf(application_id, 'docker.services.{% endraw %}{{ application_id | get_cymais_dir }}{% raw %}.image', True) }}"{% endraw %}
{{ application_id | get_cymais_dir }}_name: "{% raw %}{{ applications | get_app_conf(application_id, 'docker.services.{% endraw %}{{ application_id | get_cymais_dir }}{% raw %}.name', True) }}"{% endraw %}
{{ application_id | get_cymais_dir }}_volume: "{% raw %}{{ applications | get_app_conf(application_id, 'docker.volumes.data', True) }}"{% endraw %}
docker_pull_git_repository: true # This will automaticly pull a repository from the 'docker_repository_address'
docker_repository_address: "" # The address of a repository which should be pulled
{# The following variable mapping is optional, but it makes it easier to read the code.#}
{# I recommend, to use this mappings, but you can skipp it and access the config entries direct via 'get_app_conf' #}
# This variables had been autocreated. For a detailled explanation checkout the config/main.yml file
{{ application_id | get_entity_name }}_version: "{% raw %}{{ applications | get_app_conf(application_id, 'docker.services.{% endraw %}{{ application_id | get_entity_name }}{% raw %}.version') }}"{% endraw %}
{{ application_id | get_entity_name }}_image: "{% raw %}{{ applications | get_app_conf(application_id, 'docker.services.{% endraw %}{{ application_id | get_entity_name }}{% raw %}.image') }}"{% endraw %}
{{ application_id | get_entity_name }}_container: "{% raw %}{{ applications | get_app_conf(application_id, 'docker.services.{% endraw %}{{ application_id | get_entity_name }}{% raw %}.name') }}"{% endraw %}
{{ application_id | get_entity_name }}_volume: "{% raw %}{{ applications | get_app_conf(application_id, 'docker.volumes.data') }}"{% endraw %}

View File

@@ -0,0 +1,22 @@
import os
import unittest
ROLES_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../roles'))
class TestRolesHaveMetaMain(unittest.TestCase):
def test_each_role_has_meta_main(self):
missing_meta = []
for role in os.listdir(ROLES_DIR):
role_path = os.path.join(ROLES_DIR, role)
if os.path.isdir(role_path):
meta_main = os.path.join(role_path, 'meta', 'main.yml')
if not os.path.isfile(meta_main):
missing_meta.append(role)
if missing_meta:
self.fail(
"The following roles are missing meta/main.yml:\n" +
"\n".join(missing_meta)
)
if __name__ == '__main__':
unittest.main()

View File

@@ -0,0 +1,89 @@
import os
import re
import unittest
from collections import defaultdict
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))
ROLES_DIR = os.path.join(PROJECT_ROOT, 'roles')
ROOT_TASKS_DIR = os.path.join(PROJECT_ROOT, 'tasks')
def is_under_root_tasks(fpath):
abs_path = os.path.abspath(fpath)
return abs_path.startswith(os.path.abspath(ROOT_TASKS_DIR) + os.sep)
def find_role_includes(roles_dir):
"""
Yields (filepath, line_number, role_name) for each import_role/include_role usage in roles/,
but ignores anything under the root-level tasks/ dir.
"""
for dirpath, _, filenames in os.walk(roles_dir):
for fname in filenames:
if not fname.endswith(('.yml', '.yaml')):
continue
fpath = os.path.join(dirpath, fname)
if is_under_root_tasks(fpath):
continue # Skip root-level tasks dir completely
try:
with open(fpath, 'r', encoding='utf-8') as f:
lines = f.readlines()
except Exception:
continue # Ignore unreadable files
for idx, line in enumerate(lines):
if 'import_role' in line or 'include_role' in line:
block = line + ''.join(lines[idx+1:idx+5])
match = re.search(r'name:\s*[\'"]?([\w\-]+)[\'"]?', block)
if match:
role_name = match.group(1)
yield fpath, idx + 1, role_name
def check_run_once_tag(content, role_name):
"""
Checks for run_once_{role_name} or # run_once_{role_name}: deactivated in content.
"""
pattern = (
rf'(run_once_{role_name.replace("-", "_")})'
rf'|(#\s*run_once_{role_name.replace("-", "_")}: deactivated)'
)
return re.search(pattern, content, re.IGNORECASE)
class TestRunOnceTag(unittest.TestCase):
def test_all_roles_have_run_once_tag(self):
role_to_locations = defaultdict(list)
role_to_first_missing = {}
# Collect all places where roles are included/imported
for fpath, line, role_name in find_role_includes(ROLES_DIR):
key = role_name.replace("-", "_")
role_to_locations[key].append((fpath, line, role_name))
# Now check only ONCE per role if the tag exists somewhere (the first location), and record missing
errors = {}
for key, usages in role_to_locations.items():
# Just pick the first usage for checking
fpath, line, role_name = usages[0]
try:
with open(fpath, 'r', encoding='utf-8') as f:
content = f.read()
except Exception:
continue
if not check_run_once_tag(content, role_name):
error_msg = (
f'Role "{role_name}" is imported/included but no "run_once_{key}" tag or deactivation comment found.\n'
f'First found at: {fpath}, line {line}\n'
f' → Add a line "run_once_{key}" to this file to prevent double execution.\n'
f' → To deliberately disable this warning for this role, add:\n'
f' # run_once_{key}: deactivated\n'
f'All occurrences:\n' +
''.join([f' - {fp}, line {ln}\n' for fp, ln, _ in usages])
)
errors[key] = error_msg
if errors:
msg = (
"Some included/imported roles in 'roles/' are missing a run_once tag or deactivation comment:\n\n"
+ "\n".join(errors.values())
)
self.fail(msg)
if __name__ == '__main__':
unittest.main()

View File

@@ -0,0 +1,66 @@
import os
import sys
import json
import tempfile
import shutil
import unittest
from unittest.mock import patch
# Import the script as a module (assumes the script is named tree.py)
SCRIPT_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../../../../cli/build/tree.py")
)
class TestTreeShadowFolder(unittest.TestCase):
def setUp(self):
# Create temp roles dir and a dummy role
self.roles_dir = tempfile.mkdtemp()
self.role_name = "dummyrole"
self.role_path = os.path.join(self.roles_dir, self.role_name)
os.makedirs(os.path.join(self.role_path, "meta"))
# Prepare shadow dir
self.shadow_dir = tempfile.mkdtemp()
# Patch sys.argv for the script
self.orig_argv = sys.argv[:]
sys.argv = [
SCRIPT_PATH,
"-d", self.roles_dir,
"-s", self.shadow_dir,
"-o", "json"
]
def tearDown(self):
sys.argv = self.orig_argv
shutil.rmtree(self.roles_dir)
shutil.rmtree(self.shadow_dir)
@patch("cli.build.tree.build_mappings")
@patch("cli.build.tree.output_graph")
def test_tree_json_written_to_shadow_folder(self, mock_output_graph, mock_build_mappings):
# Prepare dummy graph
dummy_graph = {"dummy": {"test": 42}}
mock_build_mappings.return_value = dummy_graph
# Run the script (as __main__)
import runpy
runpy.run_path(SCRIPT_PATH, run_name="__main__")
# Check file in shadow folder
expected_tree_path = os.path.join(
self.shadow_dir, self.role_name, "meta", "tree.json"
)
self.assertTrue(os.path.isfile(expected_tree_path), "tree.json not found in shadow folder")
# Check contents
with open(expected_tree_path) as f:
data = json.load(f)
self.assertEqual(data, dummy_graph, "tree.json content mismatch")
# Ensure nothing was written to original meta/
original_tree_path = os.path.join(self.role_path, "meta", "tree.json")
self.assertFalse(os.path.isfile(original_tree_path), "tree.json should NOT be in role's meta/")
if __name__ == "__main__":
unittest.main()

View File

@@ -0,0 +1,35 @@
import unittest
import os
import shutil
# Import the filter directly
from filter_plugins.has_env import has_env
class TestHasEnvFilter(unittest.TestCase):
def setUp(self):
# Create a test directory structure
self.base_dir = './testdata'
self.app_with_env = 'app_with_env'
self.app_without_env = 'app_without_env'
os.makedirs(os.path.join(self.base_dir, 'roles', self.app_with_env, 'templates'), exist_ok=True)
os.makedirs(os.path.join(self.base_dir, 'roles', self.app_without_env, 'templates'), exist_ok=True)
# Create an empty env.j2 file
with open(os.path.join(self.base_dir, 'roles', self.app_with_env, 'templates', 'env.j2'), 'w') as f:
f.write('')
def tearDown(self):
# Clean up the test data
if os.path.exists(self.base_dir):
shutil.rmtree(self.base_dir)
def test_env_exists(self):
"""Test that has_env returns True if env.j2 exists."""
self.assertTrue(has_env(self.app_with_env, base_dir=self.base_dir))
def test_env_not_exists(self):
"""Test that has_env returns False if env.j2 does not exist."""
self.assertFalse(has_env(self.app_without_env, base_dir=self.base_dir))
if __name__ == '__main__':
unittest.main()