Compare commits

...

15 Commits

Author SHA1 Message Date
944707ec41 Solved dependency bug which appeared due to autogeneration of meta/main.yml files 2025-07-09 18:01:58 +02:00
0b80ba6f54 Added auto generated meta/main.yml #1 for sphinx run 2025-07-09 17:47:43 +02:00
22049cd1ca Optimized README's and meta/main.ymls for Sphinx Build 2025-07-09 17:42:28 +02:00
b610d211c5 Updated portfolio categories 2025-07-09 17:16:11 +02:00
da0b339995 Solved bug which appeared during refactoring in ESPOCRM 2025-07-09 16:41:51 +02:00
5adcc5b931 Moved build before validation, because otherwise it will fail 2025-07-09 16:23:58 +02:00
338b09b755 Added flush 2025-07-09 15:48:59 +02:00
f3939661e4 Implemented filter functions to get roles by application_id 2025-07-09 14:52:51 +02:00
c9c73cbdb2 Decoupeld database, docker and proxy 2025-07-09 14:21:30 +02:00
73329506a9 Solved bug which existed due to user structure refactoring 2025-07-09 12:31:37 +02:00
e7322a239e Solved different bugs 2025-07-09 11:51:42 +02:00
a026681553 Task include tests implemented, nginx restart, etc. 2025-07-09 11:41:17 +02:00
46cf65f296 Optimized Matomo role 2025-07-09 10:59:36 +02:00
af3767fdfa General optimations 2025-07-09 10:17:32 +02:00
a69b2c9cb2 Solved run_after dependency bug 2025-07-09 06:47:10 +02:00
196 changed files with 2072 additions and 971 deletions

View File

@@ -3,13 +3,14 @@ APPLICATIONS_OUT := ./group_vars/all/04_applications.yml
APPLICATIONS_SCRIPT := ./cli/generate_applications.py APPLICATIONS_SCRIPT := ./cli/generate_applications.py
USERS_OUT := ./group_vars/all/03_users.yml USERS_OUT := ./group_vars/all/03_users.yml
USERS_SCRIPT := ./cli/generate_users.py USERS_SCRIPT := ./cli/generate_users.py
INCLUDES_OUT := ./tasks/utils/web-app-roles.yml INCLUDES_OUT := ./tasks/utils/server-roles.yml
INCLUDES_SCRIPT := ./cli/generate_playbook.py INCLUDES_SCRIPT := ./cli/generate_playbook.py
EXTRA_USERS := $(shell \ EXTRA_USERS := $(shell \
find $(ROLES_DIR) -maxdepth 1 -type d -name 'docker*' -printf '%f\n' \ find $(ROLES_DIR) -maxdepth 1 -type d -name '*' -printf '%f\n' \
| sed -E 's/^docker[_-]?//' \ | sed -E 's/.*-//' \
| grep -E -x '[a-z0-9]+' \ | grep -E -x '[a-z0-9]+' \
| sort -u \
| paste -sd, - \ | paste -sd, - \
) )
@@ -24,12 +25,19 @@ build:
@echo "🔧 Generating users defaults → $(USERS_OUT) from roles in $(ROLES_DIR)" @echo "🔧 Generating users defaults → $(USERS_OUT) from roles in $(ROLES_DIR)"
@echo "🔧 Generating Docker role includes → $(INCLUDES_OUT)" @echo "🔧 Generating Docker role includes → $(INCLUDES_OUT)"
@mkdir -p $(dir $(INCLUDES_OUT)) @mkdir -p $(dir $(INCLUDES_OUT))
python3 $(INCLUDES_SCRIPT) $(ROLES_DIR) -o $(INCLUDES_OUT) -p web-app- python3 $(INCLUDES_SCRIPT) $(ROLES_DIR) -o $(INCLUDES_OUT) \
-p web-app \
-p web-svc \
-p svc-openldap \
-p svc-rdbms-postgres \
-p svc-rdbms-mariadb
@echo "✅ Docker role includes written to $(INCLUDES_OUT)" @echo "✅ Docker role includes written to $(INCLUDES_OUT)"
install: build install: build
@echo "⚙️ Install complete." @echo "⚙️ Install complete."
test: test:
@echo "🧪 Running Tests..." @echo "🧪 Running Python Tests..."
python -m unittest discover -s tests python -m unittest discover -s tests
@echo "📑 Syntax Checking Ansible Playbook..."
ansible-playbook playbook.yml --syntax-check

View File

@@ -49,7 +49,7 @@ More informations about the features you will find [here](docs/overview/Features
### Use it online 🌐 ### Use it online 🌐
Give CyMaIS a spin at cymais.cloud sign up in seconds, click around, and see how easy infra magic can be! 🚀🔧✨ Give CyMaIS a spin at [CyMaIS.cloud](httpy://cymais.cloud) sign up in seconds, click around, and see how easy infra magic can be! 🚀🔧✨
### Install locally 💻 ### Install locally 💻
1. **Install CyMaIS** via [Kevin's Package Manager](https://github.com/kevinveenbirkenbach/package-manager) 1. **Install CyMaIS** via [Kevin's Package Manager](https://github.com/kevinveenbirkenbach/package-manager)

View File

@@ -7,12 +7,26 @@ import datetime
import sys import sys
def run_ansible_playbook(inventory, playbook, modes, limit=None, allowed_applications=None, password_file=None, verbose=0, skip_tests=False): def run_ansible_playbook(inventory, modes, limit=None, allowed_applications=None, password_file=None, verbose=0, skip_tests=False, skip_validation=False):
start_time = datetime.datetime.now() start_time = datetime.datetime.now()
print(f"\n▶️ Script started at: {start_time.isoformat()}\n") print(f"\n▶️ Script started at: {start_time.isoformat()}\n")
print("\n🛠️ Building project (make build)...\n") print("\n🛠️ Building project (make build)...\n")
subprocess.run(["make", "build"], check=True) subprocess.run(["make", "build"], check=True)
script_dir = os.path.dirname(os.path.realpath(__file__))
playbook = os.path.join(os.path.dirname(script_dir), "playbook.yml")
if not skip_validation:
print("\n🔍 Validating inventory before deployment...\n")
try:
subprocess.run(
[sys.executable, os.path.join(script_dir, "validate_inventory.py"), os.path.dirname(inventory)],
check=True
)
except subprocess.CalledProcessError:
print("\n❌ Inventory validation failed. Deployment aborted.\n", file=sys.stderr)
sys.exit(1)
if not skip_tests: if not skip_tests:
print("\n🧪 Running tests (make test)...\n") print("\n🧪 Running tests (make test)...\n")
@@ -52,7 +66,6 @@ def run_ansible_playbook(inventory, playbook, modes, limit=None, allowed_applica
def main(): def main():
script_dir = os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description="Run the central Ansible deployment script to manage infrastructure, updates, and tests." description="Run the central Ansible deployment script to manage infrastructure, updates, and tests."
) )
@@ -120,17 +133,6 @@ def main():
args = parser.parse_args() args = parser.parse_args()
if not args.skip_validation:
print("\n🔍 Validating inventory before deployment...\n")
try:
subprocess.run(
[sys.executable, os.path.join(script_dir, "validate_inventory.py"), os.path.dirname(args.inventory)],
check=True
)
except subprocess.CalledProcessError:
print("\n❌ Inventory validation failed. Deployment aborted.\n", file=sys.stderr)
sys.exit(1)
modes = { modes = {
"mode_reset": args.reset, "mode_reset": args.reset,
"mode_test": args.test, "mode_test": args.test,
@@ -141,17 +143,16 @@ def main():
"host_type": args.host_type "host_type": args.host_type
} }
playbook_file = os.path.join(os.path.dirname(script_dir), "playbook.yml")
run_ansible_playbook( run_ansible_playbook(
inventory=args.inventory, inventory=args.inventory,
playbook=playbook_file,
modes=modes, modes=modes,
limit=args.limit, limit=args.limit,
allowed_applications=args.id, allowed_applications=args.id,
password_file=args.password_file, password_file=args.password_file,
verbose=args.verbose, verbose=args.verbose,
skip_tests=args.skip_tests skip_tests=args.skip_tests,
skip_validation=args.skip_validation
) )
if __name__ == "__main__": if __name__ == "__main__":

View File

@@ -80,7 +80,7 @@ def main():
sys.exit(1) sys.exit(1)
config_data["group_id"] = gid_number config_data["group_id"] = gid_number
result["defaults_applications"][application_id] = config_data result["defaults_applications"][application_id] = config_data
users_meta_file = role_dir / "meta" / "users.yml" users_meta_file = role_dir / "users" / "main.yml"
transformed_users = {} transformed_users = {}
if users_meta_file.exists(): if users_meta_file.exists():
users_meta = load_yaml_file(users_meta_file) users_meta = load_yaml_file(users_meta_file)

View File

@@ -1,13 +1,21 @@
#!/usr/bin/env python3
import os import os
import sys
import yaml import yaml
import argparse import argparse
from collections import defaultdict, deque from collections import defaultdict, deque
def find_roles(roles_dir, prefix=None): def find_roles(roles_dir, prefixes=None):
"""Find all roles in the given directory.""" """
Find all roles in the given directory whose names start with
any of the provided prefixes. If prefixes is empty or None,
include all roles.
"""
for entry in os.listdir(roles_dir): for entry in os.listdir(roles_dir):
if prefix and not entry.startswith(prefix): if prefixes:
continue if not any(entry.startswith(pref) for pref in prefixes):
continue
path = os.path.join(roles_dir, entry) path = os.path.join(roles_dir, entry)
meta_file = os.path.join(path, 'meta', 'main.yml') meta_file = os.path.join(path, 'meta', 'main.yml')
if os.path.isdir(path) and os.path.isfile(meta_file): if os.path.isdir(path) and os.path.isfile(meta_file):
@@ -28,16 +36,21 @@ def load_application_id(role_path):
return data.get('application_id') return data.get('application_id')
return None return None
def build_dependency_graph(roles_dir, prefix=None): def build_dependency_graph(roles_dir, prefixes=None):
"""Build a dependency graph where each role points to the roles it depends on.""" """
Build a dependency graph where each key is a role name and
its value is a list of roles that depend on it.
Also return in_degree counts and the roles metadata map.
"""
graph = defaultdict(list) graph = defaultdict(list)
in_degree = defaultdict(int) in_degree = defaultdict(int)
roles = {} roles = {}
for role_path, meta_file in find_roles(roles_dir, prefix): for role_path, meta_file in find_roles(roles_dir, prefixes):
run_after = load_run_after(meta_file) run_after = load_run_after(meta_file)
application_id = load_application_id(role_path) application_id = load_application_id(role_path)
role_name = os.path.basename(role_path) role_name = os.path.basename(role_path)
roles[role_name] = { roles[role_name] = {
'role_name': role_name, 'role_name': role_name,
'run_after': run_after, 'run_after': run_after,
@@ -45,37 +58,87 @@ def build_dependency_graph(roles_dir, prefix=None):
'path': role_path 'path': role_path
} }
# If the role has dependencies, build the graph
for dependency in run_after: for dependency in run_after:
graph[dependency].append(role_name) graph[dependency].append(role_name)
in_degree[role_name] += 1 in_degree[role_name] += 1
# Ensure roles with no dependencies have an in-degree of 0
if role_name not in in_degree: if role_name not in in_degree:
in_degree[role_name] = 0 in_degree[role_name] = 0
return graph, in_degree, roles return graph, in_degree, roles
def topological_sort(graph, in_degree): def find_cycle(roles):
"""Perform topological sort on the dependency graph.""" """
# Queue for roles with no incoming dependencies (in_degree == 0) Detect a cycle in the run_after relations:
queue = deque([role for role, degree in in_degree.items() if degree == 0]) roles: dict mapping role_name -> { 'run_after': [...], ... }
Returns a list of role_names forming the cycle (with the start repeated at end), or None.
"""
visited = set()
stack = set()
def dfs(node, path):
visited.add(node)
stack.add(node)
path.append(node)
for dep in roles.get(node, {}).get('run_after', []):
if dep not in visited:
res = dfs(dep, path)
if res:
return res
elif dep in stack:
idx = path.index(dep)
return path[idx:] + [dep]
stack.remove(node)
path.pop()
return None
for role in roles:
if role not in visited:
cycle = dfs(role, [])
if cycle:
return cycle
return None
def topological_sort(graph, in_degree, roles=None):
"""
Perform topological sort on the dependency graph.
If `roles` is provided, on error it will include detailed debug info.
"""
queue = deque([r for r, d in in_degree.items() if d == 0])
sorted_roles = [] sorted_roles = []
local_in = dict(in_degree)
while queue: while queue:
role = queue.popleft() role = queue.popleft()
sorted_roles.append(role) sorted_roles.append(role)
for nbr in graph.get(role, []):
# Reduce in-degree for roles dependent on the current role local_in[nbr] -= 1
for neighbor in graph[role]: if local_in[nbr] == 0:
in_degree[neighbor] -= 1 queue.append(nbr)
if in_degree[neighbor] == 0:
queue.append(neighbor)
if len(sorted_roles) != len(in_degree): if len(sorted_roles) != len(in_degree):
# If the number of sorted roles doesn't match the number of roles, cycle = find_cycle(roles or {})
# there was a cycle in the graph (not all roles could be sorted) if roles is not None:
raise Exception("Circular dependency detected among the roles!") if cycle:
header = f"Circular dependency detected: {' -> '.join(cycle)}"
else:
header = "Circular dependency detected among the roles!"
unsorted = [r for r in in_degree if r not in sorted_roles]
detail_lines = ["Unsorted roles and their dependencies:"]
for r in unsorted:
deps = roles.get(r, {}).get('run_after', [])
detail_lines.append(f" - {r} depends on {deps!r}")
detail_lines.append("Full dependency graph:")
detail_lines.append(f" {dict(graph)!r}")
raise Exception("\n".join([header] + detail_lines))
else:
if cycle:
raise Exception(f"Circular dependency detected: {' -> '.join(cycle)}")
else:
raise Exception("Circular dependency detected among the roles!")
return sorted_roles return sorted_roles
@@ -83,48 +146,38 @@ def print_dependency_tree(graph):
"""Print the dependency tree visually on the console.""" """Print the dependency tree visually on the console."""
def print_node(role, indent=0): def print_node(role, indent=0):
print(" " * indent + role) print(" " * indent + role)
for dependency in graph[role]: for dep in graph.get(role, []):
print_node(dependency, indent + 1) print_node(dep, indent + 1)
# Print the tree starting from roles with no dependencies
all_roles = set(graph.keys()) all_roles = set(graph.keys())
dependent_roles = {role for dependencies in graph.values() for role in dependencies} dependent = {r for deps in graph.values() for r in deps}
root_roles = all_roles - dependent_roles roots = all_roles - dependent
printed_roles = [] for root in roots:
print_node(root)
def collect_roles(role, indent=0): def generate_playbook_entries(roles_dir, prefixes=None):
printed_roles.append(role) """
for dependency in graph[role]: Generate playbook entries based on the sorted order.
collect_roles(dependency, indent + 1) Raises a ValueError if application_id is missing.
"""
for root in root_roles: graph, in_degree, roles = build_dependency_graph(roles_dir, prefixes)
collect_roles(root) sorted_names = topological_sort(graph, in_degree, roles)
return printed_roles
def generate_playbook_entries(roles_dir, prefix=None):
"""Generate playbook entries based on the sorted order."""
graph, in_degree, roles = build_dependency_graph(roles_dir, prefix)
# Detect cycles and get correct topological order
sorted_role_names = topological_sort(graph, in_degree)
entries = [] entries = []
for role_name in sorted_role_names: for role_name in sorted_names:
role = roles[role_name] role = roles[role_name]
# --- new validation block ---
if role.get('application_id') is None: if role.get('application_id') is None:
raise ValueError(f"Role '{role_name}' is missing an application_id") vars_file = os.path.join(role['path'], 'vars', 'main.yml')
# ---------------------------- raise ValueError(f"'application_id' missing in {vars_file}")
app_id = role['application_id'] app_id = role['application_id']
entries.append( entries.append(
f"- name: setup {app_id}\n" f"- name: setup {app_id}\n"
f" when: ('{app_id}' | application_allowed(group_names, allowed_applications))\n" f" when: ('{app_id}' | application_allowed(group_names, allowed_applications))\n"
f" include_role:\n" f" include_role:\n"
f" name: {role['role_name']}\n" f" name: {role_name}\n"
) )
entries.append( entries.append(
f"- name: flush handlers after {app_id}\n" f"- name: flush handlers after {app_id}\n"
@@ -137,32 +190,30 @@ def main():
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description='Generate an Ansible playbook include file from Docker roles, sorted by run_after order.' description='Generate an Ansible playbook include file from Docker roles, sorted by run_after order.'
) )
parser.add_argument( parser.add_argument('roles_dir', help='Path to directory containing role folders')
'roles_dir',
help='Path to directory containing role folders'
)
parser.add_argument( parser.add_argument(
'-p', '--prefix', '-p', '--prefix',
help='Only include roles whose names start with this prefix (e.g. web-app-, desk-)', action='append',
default=None help='Only include roles whose names start with any of these prefixes; can be specified multiple times'
) )
parser.add_argument( parser.add_argument('-o', '--output', default=None,
'-o', '--output', help='Output file path (default: stdout)')
help='Output file path (default: stdout)', parser.add_argument('-t', '--tree', action='store_true',
default=None help='Display the dependency tree of roles and exit')
)
parser.add_argument(
'-t', '--tree',
action='store_true',
help='Display the dependency tree of roles visually'
)
args = parser.parse_args()
# Generate and output the playbook entries args = parser.parse_args()
entries = generate_playbook_entries(args.roles_dir, args.prefix) prefixes = args.prefix or []
if args.tree:
graph, _, _ = build_dependency_graph(args.roles_dir, prefixes)
print_dependency_tree(graph)
sys.exit(0)
entries = generate_playbook_entries(args.roles_dir, prefixes)
output = ''.join(entries) output = ''.join(entries)
if args.output: if args.output:
os.makedirs(os.path.dirname(args.output), exist_ok=True)
with open(args.output, 'w') as f: with open(args.output, 'w') as f:
f.write(output) f.write(output)
print(f"Playbook entries written to {args.output}") print(f"Playbook entries written to {args.output}")

View File

@@ -1,182 +0,0 @@
### *Guide to Create a New Docker Role for CyMaIS
This guide will walk you through the steps to add a new Docker role for a service (in this case, `my_service`) in **CyMaIS**. We will cover where to add the application settings, domain, and other required configuration to ensure that your new service is correctly integrated into the CyMaIS environment.
---
### **1. Define the Application Configuration in `templates/vars/applications.yml.j2`**
First, you'll need to add the default configuration for your new service under the `defaults_applications` section in `templates/vars/applications.yml.j2`.
#### **Steps:**
- Open `templates/vars/applications.yml.j2`
- Add the configuration for `my_service` under the `defaults_applications` section.
```yaml
defaults_applications:
## My Service Configuration
my_service:
version: "latest"
features: # Version of the service
matomo: true # Enable Matomo tracking for analytics
css: true # Enable or disable global CSS styling
portfolio_iframe: false # Allow embedding the landing page in an iframe (if true)
database: true # Enable central database integration
ldap: true # Enable ldap integration
oauth2: true # Enable oauth2 proxy
oidc: true # Enable oidc
```
---
### **2. Add the Domain for `my_service` in `group_vars/all/03_domains.yml`**
Next, define the domain for your service in the `group_vars/all/03_domains.yml` file. The domain should be dynamic, using the `{{ primary_domain }}` placeholder, which will automatically resolve to the correct domain based on the primary domain used for your environment.
#### **Steps:**
- Open `group_vars/all/03_domains.yml`
- Add the domain for `my_service`.
```yaml
defaults_domains:
# Other services...
my_service: "slides.{{ primary_domain }}" # Domain for the new service
```
---
### **3. Set the Application ID in `vars/main.yml`**
In the `vars/main.yml` file, set the `application_id` to `my_service`. This step is essential as it allows CyMaIS to correctly reference and configure the new service when deploying it via Docker.
#### **Steps:**
- Open `vars/main.yml`
- Add the `application_id` for the new service.
```yaml
application_id: "my_service" # Set the application ID for the service
```
---
### **4. Create the Docker Role for the New Service**
Now that you have defined the application settings, domain, and application ID, you need to create a Docker role that will build and run the containerized version of `my_service`.
#### **Steps:**
- Create a new directory under the `roles` directory, e.g., `roles/web-app-my_service`.
- Inside the `web-app-my_service` role, create the following files:
1. **`README.md`**:
- Provide documentation on the new service and how it works within CyMaIS.
2. **`tasks/main.yml`**:
- Define the tasks for building and running the Docker container for `my_service`.
Example `tasks/main.yml`:
```yaml
---
# Docker Routines for my_service
- name: "include docker-compose role"
include_role:
name: docker-compose
- name: install cymais-my_service
command:
cmd: "pkgmgr install cymais-my_service --clone-mode https"
notify: docker compose project build and setup
- name: Get path of cymais-my_service using pkgmgr
command: pkgmgr path cymais-my_service
register: path_cymais_my_service_output
- name: "include role srv-web-proxy-domain for {{ application_id }}"
include_role:
name: srv-web-proxy-domain
vars:
domain: "{{ domains | get_domain(application_id) }}"
http_port: "{{ ports.localhost.http[application_id] }}"
```
3. **`docker-compose.yml.j2`**:
- Define the `docker-compose.yml` template for building and running the Docker container for the new service.
Example `docker-compose.yml.j2`:
```yaml
services:
my_service:
build:
context: {{ path_cymais_my_service_output.stdout }}
dockerfile: {{ path_cymais_my_service_output.stdout }}/Dockerfile
ports:
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:5000"
volumes:
- {{ path_cymais_my_service_output.stdout }}:/app
- {{ path_cymais_output.stdout }}:/source
```
4. **`vars/main.yml`**:
- Define any specific variables for `my_service`.
Example `vars/main.yml`:
```yaml
application_id: "my_service"
```
5. **`meta/main.yml`**:
- Add metadata for your new role.
Example `meta/main.yml`:
```yaml
galaxy_info:
author: "Your Name"
description: "Docker role to deploy and manage my_service within CyMaIS."
license: "CyMaIS NonCommercial License (CNCL)"
company: "Your Company"
min_ansible_version: "2.9"
platforms:
- name: Docker
versions:
- all
- name: Linux
versions:
- all
repository: "https://github.com/yourrepo/my_service"
documentation: "https://yourdocumentationlink"
```
---
### **5. Test the Configuration**
Once you have defined the Docker role, configuration settings, and other necessary files, it is essential to test your changes:
#### **Steps:**
- Run the Ansible playbook for deploying your new service.
- Check if `my_service` is correctly deployed and if the domain is resolving as expected.
- Verify that the application is accessible via the assigned port (e.g., `http://slides.{{ primary_domain }}:5000`).
---
### **6. Additional Steps for Integration**
- You can add additional configurations or adjust existing settings based on the requirements for `my_service`. For instance:
- Modify the health check settings in the `docker-compose.yml` template.
- Update Nginx or other web servers to properly route traffic to your new service.
---
### **Conclusion**
By following this guide, you have successfully added a new Dockerized service (`my_service`) to the CyMaIS platform. You have:
- Configured the service settings in `templates/vars/applications.yml.j2`
- Added the domain for the service in `group_vars/all/03_domains.yml`
- Set the `application_id` in `vars/main.yml`
- Created the necessary Docker role for managing `my_service`.
This process allows you to extend the functionality of CyMaIS with new services while maintaining a consistent and reproducible deployment workflow.
---
For any further details or troubleshooting, please consult the official CyMaIS documentation or reach out to the CyMaIS community for assistance.

View File

@@ -0,0 +1,88 @@
# filter_plugins/role_path_by_app_id.py
import os
import glob
import yaml
from ansible.errors import AnsibleFilterError
def abs_role_path_by_application_id(application_id):
"""
Searches all roles/*/vars/main.yml for application_id and returns
the absolute path of the role that matches. Raises an error if
zero or more than one match is found.
"""
base_dir = os.getcwd()
pattern = os.path.join(base_dir, 'roles', '*', 'vars', 'main.yml')
matches = []
for filepath in glob.glob(pattern):
try:
with open(filepath, 'r') as f:
data = yaml.safe_load(f) or {}
except Exception:
continue
if data.get('application_id') == application_id:
role_dir = os.path.dirname(os.path.dirname(filepath))
abs_path = os.path.abspath(role_dir)
matches.append(abs_path)
if len(matches) > 1:
raise AnsibleFilterError(
f"Multiple roles found with application_id='{application_id}': {matches}. "
"The application_id must be unique."
)
if not matches:
raise AnsibleFilterError(
f"No role found with application_id='{application_id}'."
)
return matches[0]
def rel_role_path_by_application_id(application_id):
"""
Searches all roles/*/vars/main.yml for application_id and returns
the relative path (from the project root) of the role that matches.
Raises an error if zero or more than one match is found.
"""
base_dir = os.getcwd()
pattern = os.path.join(base_dir, 'roles', '*', 'vars', 'main.yml')
matches = []
for filepath in glob.glob(pattern):
try:
with open(filepath, 'r') as f:
data = yaml.safe_load(f) or {}
except Exception:
continue
if data.get('application_id') == application_id:
role_dir = os.path.dirname(os.path.dirname(filepath))
rel_path = os.path.relpath(role_dir, base_dir)
matches.append(rel_path)
if len(matches) > 1:
raise AnsibleFilterError(
f"Multiple roles found with application_id='{application_id}': {matches}. "
"The application_id must be unique."
)
if not matches:
raise AnsibleFilterError(
f"No role found with application_id='{application_id}'."
)
return matches[0]
class FilterModule(object):
"""
Provides the filters `abs_role_path_by_application_id` and
`rel_role_path_by_application_id`.
"""
def filters(self):
return {
'abs_role_path_by_application_id': abs_role_path_by_application_id,
'rel_role_path_by_application_id': rel_role_path_by_application_id,
}

View File

@@ -17,6 +17,7 @@ HOST_DECIMAL_MARK: ","
deployment_mode: "single" # Use single, if you deploy on one server. Use cluster if you setup in cluster mode. deployment_mode: "single" # Use single, if you deploy on one server. Use cluster if you setup in cluster mode.
web_protocol: "https" # Web protocol type. Use https or http. If you run local you need to change it to http web_protocol: "https" # Web protocol type. Use https or http. If you run local you need to change it to http
web_port: "{{ 443 if web_protocol == 'https' else 80 }}" # Default port web applications will listen to
## Domain ## Domain
primary_domain_tld: "localhost" # Top Level Domain of the server primary_domain_tld: "localhost" # Top Level Domain of the server

View File

@@ -16,7 +16,7 @@ ports:
gitea: 4188 gitea: 4188
snipe-it: 4189 snipe-it: 4189
ldap: ldap:
ldap: 389 openldap: 389
http: http:
nextcloud: 8001 nextcloud: 8001
gitea: 8002 gitea: 8002

View File

@@ -30,7 +30,7 @@ defaults_networks:
subnet: 192.168.101.144/28 subnet: 192.168.101.144/28
keycloak: keycloak:
subnet: 192.168.101.160/28 subnet: 192.168.101.160/28
ldap: openldap:
subnet: 192.168.101.176/28 subnet: 192.168.101.176/28
listmonk: listmonk:
subnet: 192.168.101.192/28 subnet: 192.168.101.192/28

View File

@@ -6,7 +6,7 @@
# Helper Variables: # Helper Variables:
# Keep in mind to mapp this variables if there is ever the possibility for the user to define them in the inventory # Keep in mind to mapp this variables if there is ever the possibility for the user to define them in the inventory
_ldap_dn_base: "dc={{primary_domain_sld}},dc={{primary_domain_tld}}" _ldap_dn_base: "dc={{primary_domain_sld}},dc={{primary_domain_tld}}"
_ldap_server_port: "{% if applications.ldap.network.docker | bool %}{{ ports.localhost.ldap.ldap }}{% else %}{{ ports.localhost.ldaps.ldap }}{% endif %}" _ldap_server_port: "{% if applications.openldap.network.docker | bool %}{{ ports.localhost.ldap.openldap }}{% else %}{{ ports.localhost.ldaps.ldap }}{% endif %}"
_ldap_user_id: "uid" _ldap_user_id: "uid"
_ldap_filters_users_all: "(|(objectclass=inetOrgPerson))" _ldap_filters_users_all: "(|(objectclass=inetOrgPerson))"
@@ -25,7 +25,7 @@ ldap:
# The DN used to authenticate for regular directory operations under # The DN used to authenticate for regular directory operations under
# the data tree (adding users, modifying attributes, creating OUs, etc.). # the data tree (adding users, modifying attributes, creating OUs, etc.).
# Typically: “cn=admin,dc=example,dc=com” # Typically: “cn=admin,dc=example,dc=com”
data: "cn={{ applications.ldap.users.administrator.username }},{{ _ldap_dn_base }}" data: "cn={{ applications.openldap.users.administrator.username }},{{ _ldap_dn_base }}"
# ------------------------------------------------------------------------- # -------------------------------------------------------------------------
# Config-Tree Administrator Bind DN # Config-Tree Administrator Bind DN
@@ -33,7 +33,7 @@ ldap:
# need to load or modify schema, overlays, modules, or other server- # need to load or modify schema, overlays, modules, or other server-
# level settings. # level settings.
# Typically: “cn=admin,cn=config” # Typically: “cn=admin,cn=config”
configuration: "cn={{ applications.ldap.users.administrator.username }},cn=config" configuration: "cn={{ applications.openldap.users.administrator.username }},cn=config"
ou: ou:
# ------------------------------------------------------------------------- # -------------------------------------------------------------------------
@@ -55,14 +55,14 @@ ldap:
# for ordinary user/group operations, and vice versa. # for ordinary user/group operations, and vice versa.
# Password to access dn.bind # Password to access dn.bind
bind_credential: "{{ applications.ldap.credentials.administrator_database_password }}" bind_credential: "{{ applications.openldap.credentials.administrator_database_password }}"
server: server:
domain: "{{applications.ldap.hostname if applications.ldap.network.docker | bool else domains.ldap}}" # Mapping for public or locale access domain: "{{applications.openldap.hostname if applications.openldap.network.docker | bool else domains.openldap}}" # Mapping for public or locale access
port: "{{_ldap_server_port}}" port: "{{_ldap_server_port}}"
uri: "{% if applications.ldap.network.docker | bool %}ldap://{{ applications.ldap.hostname }}{% else %}ldaps://{{ domains.ldap }}{% endif %}:{{ _ldap_server_port }}" uri: "{% if applications.openldap.network.docker | bool %}ldap://{{ applications.openldap.hostname }}{% else %}ldaps://{{ domains.openldap }}{% endif %}:{{ _ldap_server_port }}"
security: "" #TLS, SSL - Leave empty for none security: "" #TLS, SSL - Leave empty for none
network: network:
local: "{{applications.ldap.network.docker}}" # Uses the application configuration to define if local network should be available or not local: "{{applications.openldap.network.docker}}" # Uses the application configuration to define if local network should be available or not
user: user:
objects: objects:
structural: structural:

View File

@@ -18,7 +18,7 @@ For a complete list of role categories and detailed definitions, see:
Generic helpers and language/tool installers (e.g. `gen-git`, `gen-locales`, `gen-timer`) Generic helpers and language/tool installers (e.g. `gen-git`, `gen-locales`, `gen-timer`)
- **desk-*** - **desk-***
Desktop environment and application roles (e.g. `desk-gnome`, `utils-desk-browser`, `desk-libreoffice`) Desktop environment and application roles (e.g. `desk-gnome`, `desk-browser`, `desk-libreoffice`)
--- ---
@@ -28,7 +28,7 @@ For a complete list of role categories and detailed definitions, see:
Installs and configures the base Nginx server. Installs and configures the base Nginx server.
- **srv-web-tls-*** - **srv-web-tls-***
Manages TLS certificates and renewal (formerly “https”). Manages TLS certificates and renewal (formerly “https”; e.g. `srv-web-tls-deploy`, `srv-web-tls-renew`).
- **srv-web-proxy-*** - **srv-web-proxy-***
Proxy and vhost orchestration roles (domain setup, OAuth2 proxy, etc.) Proxy and vhost orchestration roles (domain setup, OAuth2 proxy, etc.)
@@ -43,43 +43,43 @@ For a complete list of role categories and detailed definitions, see:
Staticcontent servers (assets, HTML, legal pages, file hosting). Staticcontent servers (assets, HTML, legal pages, file hosting).
- **web-app-*** - **web-app-***
Application-specific Docker/Compose roles (e.g. GitLab, Nextcloud, Mastodon). Application-specific Docker/Compose roles (e.g. GitLab, Nextcloud, Mastodon, Redis).
--- ---
## Network ## Network
- **net-*** - **net-***
Network infrastructure (DNS records, WireGuard, Lets Encrypt entrypoints). Network infrastructure (DNS records, Lets Encrypt HTTP entrypoints, WireGuard, etc.)
- **svc-*** - **svc-***
Dockerdeployed services that arent “apps” (RDBMS, LDAP, Redis, OpenLDAP). Docker-deployed services that arent “apps” (RDBMS, LDAP, Redis, OpenLDAP).
--- ---
## Monitoring & Alerting ## Monitoring & Alerting
- **mon-bot-*** - **mon-bot-***
“Bot”-style health checks with alerts via Telegram, email, etc. “Bot”-style health checks (Btrfs, diskspace, Docker, journalctl, CSP crawler, webserver) with alerts.
- **monitor-core-*** - **monitor-core-***
Low-level system monitors (journalctl, Docker containers, disk space). Low-level system monitors (journalctl, Docker containers, disk space, etc.)
- **alert-*** - **alert-***
Failure or status notification handlers (core, email, Telegram). Notification handlers for failures (core, email, Telegram).
--- ---
## Maintenance & Healing ## Maintenance & Healing
- **maint-*** - **maint-***
Periodic maintenance tasks (Btrfs balancing, swapfile management). Periodic maintenance tasks (Btrfs balancing, swapfile management, etc.)
- **maint-docker-*** - **maint-docker-***
Automated recovery and restarts for Docker Compose workloads. Automated recovery and restarts for Docker Compose workloads.
- **cln-*** - **cln-***
Housekeeping tasks (old backups, certs, log rotation). Housekeeping tasks (old backups, expired certs, log rotation).
--- ---
@@ -96,7 +96,7 @@ For a complete list of role categories and detailed definitions, see:
Keeps OS and language packages up to date (`update-apt`, `update-docker`, `update-pip`, etc.) Keeps OS and language packages up to date (`update-apt`, `update-docker`, `update-pip`, etc.)
- **pkgmgr-*** - **pkgmgr-***
Language or platform package managers (npm, pip, AUR helper). Language or platform package managers (npm, pip, AUR helper, etc.)
--- ---
@@ -106,14 +106,15 @@ For a complete list of role categories and detailed definitions, see:
Creates user accounts and SSH keys. Creates user accounts and SSH keys.
- **user-administrator**, **user-root** - **user-administrator**, **user-root**
Specialized account configurations for privileged users. Specialized configurations for privileged users.
--- ---
> **Tip:** To find a role quickly, search for its prefix: > **Tip:** To find a role quickly, search for its prefix:
> `core-`, `gen-`, `desk-`, `srv-web-`, `web-svc-`, `web-app-`, > `core-`, `gen-`, `desk-`, `srv-web-`, `web-svc-`, `web-app-`,
> `net-`, `svc-`, `monitor-`, `alert-`, `maint-`, `cln-`, > `net-`, `svc-`, `mon-bot-`, `monitor-core-`, `alert-`,
> `bkp-`, `update-`, `pkgmgr-`, `user-`. > `maint-`, `maint-docker-`, `cln-`, `bkp-`, `update-`,
> `pkgmgr-`, `user-`.
--- ---

View File

@@ -24,3 +24,4 @@ galaxy_info:
documentation: "https://s.veen.world/cymais" documentation: "https://s.veen.world/cymais"
dependencies: dependencies:
- gen-msmtp - gen-msmtp
- core-daemon

View File

@@ -22,4 +22,5 @@ galaxy_info:
repository: "https://s.veen.world/cymais" repository: "https://s.veen.world/cymais"
issue_tracker_url: "https://s.veen.world/cymaisissues" issue_tracker_url: "https://s.veen.world/cymaisissues"
documentation: "https://s.veen.world/cymais" documentation: "https://s.veen.world/cymais"
dependencies: [] dependencies:
- core-daemon

View File

@@ -2,57 +2,55 @@
include_role: include_role:
name: pkgmgr-install name: pkgmgr-install
vars: vars:
package_name: bkp-docker-to-local package_name: "{{ bkp_docker_to_local_pkg }}"
when: run_once_backup_docker_to_local is not defined when: run_once_bkp_docker_to_local is not defined
- name: Retrieve bkp-docker-to-local path from pkgmgr - name: "Retrieve {{ bkp_docker_to_local_pkg }} path from pkgmgr"
command: pkgmgr path bkp-docker-to-local command: "pkgmgr path {{ bkp_docker_to_local_pkg }}"
register: pkgmgr_output register: pkgmgr_output
changed_when: false changed_when: false
when: run_once_backup_docker_to_local is not defined when: run_once_bkp_docker_to_local is not defined
- name: Set fact for backup_docker_to_local_folder - name: Set fact for backup_docker_to_local_folder
set_fact: set_fact:
backup_docker_to_local_folder: "{{ pkgmgr_output.stdout }}/" backup_docker_to_local_folder: "{{ pkgmgr_output.stdout }}/"
changed_when: false changed_when: false
when: run_once_backup_docker_to_local is not defined when: run_once_bkp_docker_to_local is not defined
- name: "reset (if enabled)"
include_tasks: reset.yml
when: mode_reset | bool and run_once_bkp_docker_to_local is not defined
- name: configure bkp-docker-to-local-everything.cymais.service - name: configure bkp-docker-to-local-everything.cymais.service
template: template:
src: bkp-docker-to-local-everything.service.j2 src: bkp-docker-to-local-everything.service.j2
dest: /etc/systemd/system/bkp-docker-to-local-everything.cymais.service dest: /etc/systemd/system/bkp-docker-to-local-everything.cymais.service
notify: reload bkp-docker-to-local-everything.cymais.service notify: reload bkp-docker-to-local-everything.cymais.service
when: run_once_backup_docker_to_local is not defined when: run_once_bkp_docker_to_local is not defined
- name: configure bkp-docker-to-local.cymais.service - name: configure bkp-docker-to-local.cymais.service
template: template:
src: bkp-docker-to-local.service.j2 src: bkp-docker-to-local.service.j2
dest: /etc/systemd/system/bkp-docker-to-local.cymais.service dest: /etc/systemd/system/bkp-docker-to-local.cymais.service
notify: reload bkp-docker-to-local.cymais.service notify: reload bkp-docker-to-local.cymais.service
when: run_once_backup_docker_to_local is not defined when: run_once_bkp_docker_to_local is not defined
- name: set service_name to the name of the current role - name: "set 'service_name' to '{{ role_name }}'"
set_fact: set_fact:
service_name: "{{ role_name }}" service_name: "{{ role_name }}"
when: run_once_backup_docker_to_local is not defined when: run_once_bkp_docker_to_local is not defined
- name: "include role for gen-timer for {{service_name}}" - name: "include role for gen-timer for {{service_name}}"
include_role: include_role:
name: gen-timer name: gen-timer
vars: vars:
on_calendar: "{{on_calendar_backup_docker_to_local}}" on_calendar: "{{on_calendar_backup_docker_to_local}}"
when: run_once_backup_docker_to_local is not defined when: run_once_bkp_docker_to_local is not defined
- name: "reset {{ backup_docker_to_local_folder }}databases.csv"
file:
path: "{{ backup_docker_to_local_folder }}databases.csv"
state: absent
when: mode_reset | bool and run_once_backup_docker_to_local is not defined
- name: "include seed-database-to-backup.yml" - name: "include seed-database-to-backup.yml"
include_tasks: seed-database-to-backup.yml include_tasks: seed-database-to-backup.yml
- name: run the backup_docker_to_local tasks once - name: run the backup_docker_to_local tasks once
set_fact: set_fact:
run_once_backup_docker_to_local: true run_once_bkp_docker_to_local: true
when: run_once_backup_docker_to_local is not defined when: run_once_bkp_docker_to_local is not defined

View File

@@ -0,0 +1,4 @@
- name: "reset {{ backup_docker_to_local_folder }}databases.csv"
file:
path: "{{ backup_docker_to_local_folder }}databases.csv"
state: absent

View File

@@ -51,10 +51,10 @@
database_name is defined and database_name is defined and
database_username is defined and database_username is defined and
database_password is defined) and database_password is defined) and
run_once_backup_docker_to_local_file_permission is not defined run_once_bkp_docker_to_local_file_permission is not defined
register: file_permission_result register: file_permission_result
- name: run the backup_docker_to_local_file_permission tasks once - name: run the backup_docker_to_local_file_permission tasks once
set_fact: set_fact:
run_once_backup_docker_to_local_file_permission: true run_once_bkp_docker_to_local_file_permission: true
when: run_once_backup_docker_to_local_file_permission is not defined and file_permission_result is defined and file_permission_result.changed when: run_once_bkp_docker_to_local_file_permission is not defined and file_permission_result is defined and file_permission_result.changed

View File

@@ -0,0 +1 @@
bkp_docker_to_local_pkg: backup-docker-to-local

View File

@@ -31,3 +31,4 @@ dependencies:
- cln-failed-docker-backups - cln-failed-docker-backups
- maint-lock - maint-lock
- user-root - user-root
- core-daemon

View File

@@ -22,7 +22,7 @@
dest: "{{docker_backup_remote_to_local_folder}}backups-remote-to-local.sh" dest: "{{docker_backup_remote_to_local_folder}}backups-remote-to-local.sh"
mode: 0755 mode: 0755
- name: set service_name to the name of the current role - name: "set 'service_name' to '{{ role_name }}'"
set_fact: set_fact:
service_name: "{{ role_name }}" service_name: "{{ role_name }}"

184
roles/categories.yml Normal file
View File

@@ -0,0 +1,184 @@
categories:
core:
title: "Core & System"
description: "Fundamental system configuration"
icon: "fas fa-cogs"
gen:
title: "gen-*"
description: "Helper roles & installers (git, locales, timer, etc.)"
icon: "fas fa-wrench"
desk:
title: "desk-*"
description: "Desktop environment roles & apps (GNOME, browser, LibreOffice, etc.)"
icon: "fas fa-desktop"
desk:
applications:
title: "Desktop Applications"
description: "Setup & utilities for desktop apps"
icon: "fas fa-desktop"
utils:
title: "utils-desk-*"
description: "Utility roles for desktop tools & development"
icon: "fas fa-tools"
srv:
web:
core:
title: "srv-web-core"
description: "Install & configure base Nginx server"
icon: "fas fa-server"
tls:
title: "srv-web-tls-*"
description: "Deploy & renew TLS certificates"
icon: "fas fa-lock"
proxy:
title: "srv-web-proxy-*"
description: "Proxy & vhost orchestration"
icon: "fas fa-project-diagram"
injector:
core:
title: "srv-web-injector-compose"
description: "Inject core HTML modifiers"
icon: "fas fa-code"
css:
title: "srv-web-injector-css"
description: "Inject CSS into responses"
icon: "fas fa-paint-brush"
iframe:
title: "srv-web-injector-iframe"
description: "Inject iframe notifier"
icon: "fas fa-window-maximize"
javascript:
title: "srv-web-injector-javascript"
description: "Inject JS into responses"
icon: "fas fa-code"
matomo:
title: "srv-web-injector-matomo"
description: "Inject Matomo tracking code"
icon: "fas fa-chart-pie"
composer:
title: "srv-web-composer"
description: "Compose multiple filters into one include"
icon: "fas fa-layer-group"
web:
svc:
title: "web-svc-*"
description: "Static content servers (assets, HTML, legal, files)"
icon: "fas fa-file"
app:
title: "web-app-*"
description: "Deployable web applications (GitLab, Nextcloud, Mastodon, etc.)"
icon: "fas fa-docker"
net:
general:
title: "net-*"
description: "Network setup (DNS, Let's Encrypt HTTP, WireGuard, etc.)"
icon: "fas fa-globe"
svc:
title: "svc-*"
description: "Docker infrastructure services (DBMS, LDAP, Redis, etc.)"
icon: "fas fa-database"
wireguard:
core:
title: "net-wireguard-core"
description: "Core WireGuard configuration"
icon: "fas fa-network-wired"
firewalled:
title: "net-wireguard-firewalled"
description: "WireGuard with firewall rules"
icon: "fas fa-shield-alt"
plain:
title: "net-wireguard-plain"
description: "WireGuard without extra firewall"
icon: "fas fa-network-wired"
monitoring:
bot:
title: "mon-bot-*"
description: "Bot-style health checks (disk, Docker, webserver, etc.)"
icon: "fas fa-robot"
core:
title: "monitor-core-*"
description: "Low-level monitors (journalctl, containers, disk space, etc.)"
icon: "fas fa-chart-area"
alerting:
title: "Alerting"
description: "Notification handlers for system events"
icon: "fas fa-bell"
subcategories:
email:
title: "alert-email"
description: "Send alerts via email"
icon: "fas fa-envelope"
telegram:
title: "alert-telegram"
description: "Send alerts via Telegram"
icon: "fab fa-telegram-plane"
compose:
title: "alert-compose"
description: "Compose multiple alert handlers"
icon: "fas fa-project-diagram"
maintenance:
title: "Maintenance & Healing"
description: "Periodic maintenance & auto-recovery"
icon: "fas fa-tools"
subcategories:
general:
title: "maint-*"
description: "Periodic tasks (Btrfs balancing, swapfile, etc.)"
icon: "fas fa-sync-alt"
docker:
title: "maint-docker-*"
description: "Automated Docker recovery & restarts"
icon: "fas fa-docker"
cleanup:
title: "cln-*"
description: "Housekeeping tasks (backups, certs, logs, etc.)"
icon: "fas fa-broom"
backup:
title: "Backup & Restore"
description: "Backup strategies & restore procedures"
icon: "fas fa-hdd"
subcategories:
general:
title: "bkp-*"
description: "Local & remote backups (files, volumes, DBs)"
icon: "fas fa-cloud-upload-alt"
updates:
title: "Updates & Package Management"
description: "OS & package updates"
icon: "fas fa-sync"
subcategories:
os:
title: "update-*"
description: "Automatic OS & package updates (apt, Docker, pip, etc.)"
icon: "fas fa-download"
pkgmgr:
title: "pkgmgr-*"
description: "Language/platform package managers (npm, pip, AUR, etc.)"
icon: "fas fa-box-open"
users:
title: "Users & Access"
description: "User accounts & access control"
icon: "fas fa-users"
subcategories:
general:
title: "user-*"
description: "Create user accounts & SSH keys"
icon: "fas fa-user"
administrator:
title: "user-administrator"
description: "Config for admin users"
icon: "fas fa-user-shield"
root:
title: "user-root"
description: "Config for root user"
icon: "fas fa-user-shield"

View File

@@ -25,3 +25,4 @@ dependencies:
- gen-python-pip - gen-python-pip
- alert-compose - alert-compose
- maint-lock - maint-lock
- core-daemon

View File

@@ -23,3 +23,4 @@ galaxy_info:
documentation: "https://s.veen.world/cymais" documentation: "https://s.veen.world/cymais"
dependencies: dependencies:
- cln-backups-service - cln-backups-service
- core-daemon

View File

@@ -25,3 +25,4 @@ galaxy_info:
dependencies: dependencies:
- alert-compose - alert-compose
- core-daemon

View File

@@ -12,7 +12,7 @@
notify: Reload and restart cln-certs.cymais.service notify: Reload and restart cln-certs.cymais.service
when: run_once_cleanup_certs is not defined when: run_once_cleanup_certs is not defined
- name: set service_name to the name of the current role - name: "set 'service_name' to '{{ role_name }}'"
set_fact: set_fact:
service_name: "{{ role_name }}" service_name: "{{ role_name }}"
when: run_once_cleanup_certs is not defined when: run_once_cleanup_certs is not defined

View File

@@ -24,3 +24,4 @@ galaxy_info:
dependencies: dependencies:
- alert-compose - alert-compose
- maint-lock - maint-lock
- core-daemon

View File

@@ -15,7 +15,7 @@
dest: /etc/systemd/system/cln-disc-space.cymais.service dest: /etc/systemd/system/cln-disc-space.cymais.service
notify: reload cln-disc-space.cymais.service notify: reload cln-disc-space.cymais.service
- name: set service_name to the name of the current role - name: "set 'service_name' to '{{ role_name }}'"
set_fact: set_fact:
service_name: "{{ role_name }}" service_name: "{{ role_name }}"

View File

@@ -22,4 +22,6 @@ galaxy_info:
- pkgmgr - pkgmgr
repository: "https://github.com/kevinveenbirkenbach/web-app-volume-cleaner" repository: "https://github.com/kevinveenbirkenbach/web-app-volume-cleaner"
issue_tracker_url: "https://github.com/kevinveenbirkenbach/web-app-volume-cleaner/issues" issue_tracker_url: "https://github.com/kevinveenbirkenbach/web-app-volume-cleaner/issues"
documentation: "https://github.com/kevinveenbirkenbach/web-app-volume-cleaner#readme" documentation: "https://github.com/kevinveenbirkenbach/web-app-volume-cleaner"
dependencies:
- core-daemon

View File

@@ -21,4 +21,5 @@ galaxy_info:
issue_tracker_url: "https://s.veen.world/cymaisissues" issue_tracker_url: "https://s.veen.world/cymaisissues"
documentation: "https://s.veen.world/cymais" documentation: "https://s.veen.world/cymais"
dependencies: dependencies:
- srv-web-core - srv-web-core
- core-daemon

View File

@@ -25,3 +25,4 @@ dependencies:
- alert-compose - alert-compose
- maint-lock - maint-lock
- bkp-directory-validator - bkp-directory-validator
- core-daemon

View File

@@ -2,41 +2,41 @@
include_role: include_role:
name: pkgmgr-install name: pkgmgr-install
vars: vars:
package_name: cln-failed-docker-backups package_name: "{{ cln_failed_docker_backups_pkg }}"
when: run_once_cleanup_failed_docker_backups is not defined when: run_once_cln_failed_docker_backups is not defined
- name: Retrieve bkp-docker-to-local path from pkgmgr - name: "Retrieve {{ cln_failed_docker_backups_pkg }} path from pkgmgr"
command: pkgmgr path cln-failed-docker-backups command: "pkgmgr path {{ cln_failed_docker_backups_pkg }}"
register: pkgmgr_output register: pkgmgr_output
changed_when: false changed_when: false
when: run_once_cleanup_failed_docker_backups is not defined when: run_once_cln_failed_docker_backups is not defined
- name: Set fact for backup_docker_to_local_cleanup_script - name: Set fact for backup_docker_to_local_cleanup_script
set_fact: set_fact:
backup_docker_to_local_cleanup_script: "{{ pkgmgr_output.stdout.rstrip('/') ~ '/cln-all.sh' }}" backup_docker_to_local_cleanup_script: "{{ pkgmgr_output.stdout.rstrip('/') ~ '/cln-all.sh' }}"
changed_when: false changed_when: false
when: run_once_cleanup_failed_docker_backups is not defined when: run_once_cln_failed_docker_backups is not defined
- name: configure cln-failed-docker-backups.cymais.service - name: configure cln-failed-docker-backups.cymais.service
template: template:
src: cln-failed-docker-backups.service.j2 src: cln-failed-docker-backups.service.j2
dest: /etc/systemd/system/cln-failed-docker-backups.cymais.service dest: /etc/systemd/system/cln-failed-docker-backups.cymais.service
notify: Reload cln-failed-docker-backups.cymais.service notify: Reload cln-failed-docker-backups.cymais.service
when: run_once_cleanup_failed_docker_backups is not defined when: run_once_cln_failed_docker_backups is not defined
- name: set service_name to the name of the current role - name: "set 'service_name' to '{{ role_name }}'"
set_fact: set_fact:
service_name: "{{ role_name }}" service_name: "{{ role_name }}"
when: run_once_cleanup_failed_docker_backups is not defined when: run_once_cln_failed_docker_backups is not defined
- name: "include role for gen-timer for {{service_name}}" - name: "include role for gen-timer for {{service_name}}"
include_role: include_role:
name: gen-timer name: gen-timer
vars: vars:
on_calendar: "{{on_calendar_cleanup_failed_docker}}" on_calendar: "{{on_calendar_cleanup_failed_docker}}"
when: run_once_cleanup_failed_docker_backups is not defined when: run_once_cln_failed_docker_backups is not defined
- name: run the cleanup_failed_docker_backups tasks once - name: run the cleanup_failed_docker_backups tasks once
set_fact: set_fact:
run_once_cleanup_failed_docker_backups: true run_once_cln_failed_docker_backups: true
when: run_once_cleanup_failed_docker_backups is not defined when: run_once_cln_failed_docker_backups is not defined

View File

@@ -0,0 +1 @@
cln_failed_docker_backups_pkg: cleanup-failed-docker-backups

View File

@@ -0,0 +1,25 @@
# Database Docker Composition
This role combines the central RDBMS role (`svc-rdbms-central`) with Docker Compose to deliver a ready-to-use containerized database environment.
## Features
- **Central RDBMS Integration**
Includes the `svc-rdbms-central` role, which handles backups, restores, user and permission management for your relational database system (PostgreSQL, MariaDB, etc.).
- **Docker Compose**
Utilizes the standalone `docker-compose` role to define and bring up containers, networks, and volumes automatically.
- **Variable Load Order**
1. Docker Compose variables (`roles/docker-compose/vars/docker-compose.yml`)
2. Database variables (`roles/svc-rdbms-central/vars/database.yml`)
Ensures compose ports and volumes are defined before the database role consumes them.
The role will load both sub-roles and satisfy all dependencies transparently.
## Task Breakdown
1. **Set Fact** `database_application_id` to work around lazyloading ordering.
2. **Include Vars** in the specified order.
3. **Invoke** `docker-compose` role to create containers, networks, and volumes.
4. **Invoke** `svc-rdbms-central` role to provision the database, backups, and users.

View File

@@ -0,0 +1,17 @@
galaxy_info:
author: "Kevin Veen-Birkenbach"
description: >
Combines Docker Compose with a central RDBMS role to automatically
provision database containers with backup, user, and permission management.
company: |
Kevin Veen-Birkenbach
Consulting & Coaching Solutions
https://www.veen.world
license: "CyMaIS NonCommercial License (CNCL)"
license_url: "https://s.veen.world/cncl"
galaxy_tags:
- docker
- database
- rdbms
- backup
- compose

View File

@@ -0,0 +1,17 @@
- name: "set database_application_id (Needed due to lazzy loading issue)"
set_fact:
database_application_id: "{{ application_id }}"
- name: "Load database variables"
include_vars: "{{ item }}"
loop:
- "{{ cmp_db_docker_vars_file_docker }}" # Important to load docker variables first so that database can use them
- "{{ cmp_db_docker_vars_file_db }}" # Important to load them before docker role so that backup can use them
- name: "Load docker-compose for {{ application_id }}"
include_role:
name: docker-compose
- name: "Load central rdbms for {{ application_id }}"
include_role:
name: svc-rdbms-central

View File

@@ -0,0 +1,2 @@
cmp_db_docker_vars_file_db: "{{ playbook_dir }}/roles/svc-rdbms-central/vars/database.yml"
cmp_db_docker_vars_file_docker: "{{ playbook_dir }}/roles/docker-compose/vars/docker-compose.yml"

View File

@@ -0,0 +1,24 @@
# Core Daemon Role
This Ansible role handles resetting and cleaning up “CyMaIS” systemd service units for the core daemon.
## Description
When enabled via the `mode_reset` flag, this role will:
1. Run its reset tasks exactly once per play (`run_once_core_daemon` guard).
2. Find all `/etc/systemd/system/*.cymais.service` units.
3. Stop and disable each unit.
4. Remove the unit files.
5. Reload the systemd daemon.
## License
This role is released under the CyMaIS NonCommercial License (CNCL).
See [license details](https://s.veen.world/cncl)
## Author Information
Kevin Veen-Birkenbach
Consulting & Coaching Solutions
[https://www.veen.world](https://www.veen.world)

View File

@@ -0,0 +1,19 @@
galaxy_info:
author: "Kevin Veen-Birkenbach"
description: "Role to reset and clean up CyMaIS systemd service units for the core daemon."
company: |
Kevin Veen-Birkenbach
Consulting & Coaching Solutions
https://www.veen.world
license: "CyMaIS NonCommercial License (CNCL)"
license_url: "https://s.veen.world/cncl"
min_ansible_version: "2.9"
galaxy_tags:
- systemd
- cleanup
- cymais
repository: "https://github.com/kevinveenbirkenbach/cymais"
issue_tracker_url: "https://github.com/kevinveenbirkenbach/cymais/issues"
documentation: "https://github.com/kevinveenbirkenbach/cymais/#core-daemon-role"
dependencies: []

View File

@@ -0,0 +1,8 @@
- name: "reset (if enabled)"
include_tasks: reset.yml
when: mode_reset | bool and run_once_core_daemon is not defined
- name: run {{ role_name }} once
set_fact:
run_once_core_daemon: true
when: run_once_core_daemon is not defined

View File

@@ -0,0 +1,28 @@
- name: Find all cymais.service units
find:
paths: /etc/systemd/system
patterns: '*.cymais.service'
register: cymais_services
- name: Disable and stop each cymais service
become: true
systemd:
name: "{{ item.path | basename }}"
enabled: no
state: stopped
loop: "{{ cymais_services.files }}"
loop_control:
label: "{{ item.path | basename }}"
- name: Remove all cymais.service files
become: true
file:
path: "{{ item.path }}"
state: absent
loop: "{{ cymais_services.files }}"
loop_control:
label: "{{ item.path | basename }}"
- name: Reload systemd daemon
become: true
command: systemctl daemon-reload

View File

@@ -0,0 +1,22 @@
---
galaxy_info:
author: "Kevin Veen-Birchenbach"
description: "Installs Docker and Docker Compose, and adds a user to the Docker group for non-root usage on development machines."
license: "CyMaIS NonCommercial License (CNCL)"
license_url: "https://s.veen.world/cncl"
company: |
Kevin Veen-Birchenbach
Consulting & Coaching Solutions
https://www.veen.world
galaxy_tags:
- docker
- development
repository: "https://github.com/kevinveenbirkenbach/cymais"
issue_tracker_url: "https://github.com/kevinveenbirkenbach/cymais/issues"
documentation: "https://github.com/kevinveenbirkenbach/cymais/tree/main/roles/desk-docker"
min_ansible_version: "2.9"
platforms:
- name: Archlinux
versions:
- all
dependencies: []

View File

@@ -0,0 +1,23 @@
---
galaxy_info:
author: "Kevin Veen-Birchenbach"
description: "Installs GnuCash finance management software on Pacman-based systems, ensuring the latest version is present."
license: "CyMaIS NonCommercial License (CNCL)"
license_url: "https://s.veen.world/cncl"
company: |
Kevin Veen-Birchenbach
Consulting & Coaching Solutions
https://www.veen.world
galaxy_tags:
- gnucash
- finance
- accounting
repository: "https://github.com/kevinveenbirkenbach/cymais"
issue_tracker_url: "https://github.com/kevinveenbirkenbach/cymais/issues"
documentation: "https://github.com/kevinveenbirkenbach/cymais/tree/main/roles/desk-gnucash"
min_ansible_version: "2.9"
platforms:
- name: Archlinux
versions:
- all
dependencies: []

View File

@@ -0,0 +1,23 @@
---
galaxy_info:
author: "Kevin Veen-Birchenbach"
description: "Installs Jrnl CLI journal application on Pacman-based systems for command-line journaling."
license: "CyMaIS NonCommercial License (CNCL)"
license_url: "https://s.veen.world/cncl"
company: |
Kevin Veen-Birchenbach
Consulting & Coaching Solutions
https://www.veen.world
galaxy_tags:
- jrnl
- journal
- cli
repository: "https://github.com/kevinveenbirkenbach/cymais"
issue_tracker_url: "https://github.com/kevinveenbirkenbach/cymais/issues"
documentation: "https://github.com/kevinveenbirkenbach/cymais/tree/main/roles/desk-jrnl"
min_ansible_version: "2.9"
platforms:
- name: Archlinux
versions:
- all
dependencies: []

View File

@@ -0,0 +1,23 @@
---
galaxy_info:
author: "Kevin Veen-Birchenbach"
description: "Installs KeePassXC password manager on Pacman-based systems."
license: "CyMaIS NonCommercial License (CNCL)"
license_url: "https://s.veen.world/cncl"
company: |
Kevin Veen-Birchenbach
Consulting & Coaching Solutions
https://www.veen.world
galaxy_tags:
- keepassxc
- security
- passwords
repository: "https://github.com/kevinveenbirkenbach/cymais"
issue_tracker_url: "https://github.com/kevinveenbirkenbach/cymais/issues"
documentation: "https://github.com/kevinveenbirkenbach/cymais/tree/main/roles/desk-keepassxc"
min_ansible_version: "2.9"
platforms:
- name: Archlinux
versions:
- all
dependencies: []

View File

@@ -0,0 +1,23 @@
---
galaxy_info:
author: "Kevin Veen-Birchenbach"
description: "Installs OBS Studio for streaming and recording on Pacman-based systems."
license: "CyMaIS NonCommercial License (CNCL)"
license_url: "https://s.veen.world/cncl"
company: |
Kevin Veen-Birchenbach
Consulting & Coaching Solutions
https://www.veen.world
galaxy_tags:
- obs
- streaming
- recording
repository: "https://github.com/kevinveenbirkenbach/cymais"
issue_tracker_url: "https://github.com/kevinveenbirkenbach/cymais/issues"
documentation: "https://github.com/kevinveenbirkenbach/cymais/tree/main/roles/desk-obs"
min_ansible_version: "2.9"
platforms:
- name: Archlinux
versions:
- all
dependencies: []

View File

@@ -1,31 +1,23 @@
# PC-TorBrowser Role # Torbrowser
## Description
This Ansible role installs and configures the Tor service and the Tor Browser Launcher, providing a privacy-focused web browsing environment on Pacman-based Linux distributions.
## Overview ## Overview
This README document is for the `desk-torbrowser` role, a crucial component of the `cymais` repository. This role is specifically designed for the installation and setup of Tor Browser on personal computers.
## Role Tasks The `desk-torbrowser` role uses the `community.general.pacman` module to:
The `main.yml` file under the `desk-torbrowser` role encompasses tasks for installing the Tor Browser:
1. **Install TorBrowser**: 1. Install **tor** (the core Tor network service)
- Utilizes the `community.general.pacman` module to install: 2. Install **torbrowser-launcher** (the launcher for Tor Browser)
- `tor`: The core Tor service which facilitates anonymous communication.
- `torbrowser-launcher`: A package for securely and easily launching the Tor Browser.
## Purpose and Usage ## Features
The `desk-torbrowser` role is tailored for users who value privacy and anonymity online. The Tor Browser is a specialized web browser that provides enhanced privacy features, making it an essential tool for secure browsing and accessing the deep web.
## Prerequisites * Idempotent installation of Tor and Tor Browser Launcher
- **Ansible**: Must be installed on your system to run this role. * Ensures the Tor service is available for anonymous network traffic
- **Arch Linux-based System**: As the role uses the `pacman` package manager, it's best suited for Arch Linux or similar distributions. * Simplifies first-time setup of Tor Browser
## Running the Role ## Further Resources
To use this role:
1. Clone the `cymais` repository.
2. Navigate to the `roles/desk-torbrowser` directory.
3. Run the role using Ansible, ensuring you have the necessary permissions for software installation.
## Customization * [Tor Project documentation](https://www.torproject.org/)
While this role primarily focuses on installing Tor and the Tor Browser Launcher, you can customize it to include additional privacy-focused tools or configurations based on your needs. * [CyMaIS GitHub repository](https://github.com/kevinveenbirkenbach/cymais)
## Support and Contributions
For support, feedback, or contributions, such as enhancing the role with more privacy tools or improving the installation process, please open an issue or submit a pull request in the `cymais` repository. Contributions that enhance the privacy and security aspects of this role are highly encouraged.

View File

@@ -0,0 +1,23 @@
galaxy_info:
author: "Kevin Veen-Birkenbach"
description: "Installs and configures the Tor service and Tor Browser Launcher for secure, anonymous web browsing on Pacman-based systems."
license: "CyMaIS NonCommercial License (CNCL)"
license_url: "https://s.veen.world/cncl"
company: |
Kevin Veen-Birkenbach
Consulting & Coaching Solutions
https://www.veen.world
galaxy_tags:
- tor
- privacy
- anonymity
- browser
repository: "https://github.com/kevinveenbirkenbach/cymais"
issue_tracker_url: "https://github.com/kevinveenbirkenbach/cymais/issues"
documentation: "https://github.com/kevinveenbirkenbach/cymais/tree/main/roles/desk-torbrowser"
min_ansible_version: "2.9"
platforms:
- name: Archlinux
versions:
- all
dependencies: []

View File

@@ -0,0 +1,23 @@
---
galaxy_info:
author: "Kevin Veen-Birchenbach"
description: "Installs and configures VirtualBox and its kernel modules on Pacman-based systems, including extension packs and user group setup."
license: "CyMaIS NonCommercial License (CNCL)"
license_url: "https://s.veen.world/cncl"
company: |
Kevin Veen-Birkenbach
Consulting & Coaching Solutions
https://www.veen.world
galaxy_tags:
- virtualbox
- virtualization
- kernel-modules
repository: "https://github.com/kevinveenbirkenbach/cymais"
issue_tracker_url: "https://github.com/kevinveenbirkenbach/cymais/issues"
documentation: "https://github.com/kevinveenbirkenbach/cymais/tree/main/roles/desk-virtual-box"
min_ansible_version: "2.9"
platforms:
- name: Archlinux
versions:
- all
dependencies: []

View File

@@ -1,13 +1,4 @@
--- ---
# It is necessary to shut the projects down, when reset is activated.
# Otherwise it can lead to this bug:
# https://github.com/ansible/ansible/issues/10244
#- name: shut down docker compose project
# command:
# cmd: docker-compose -p "{{ application_id }}" down
# listen: docker compose up
# when: mode_reset | bool
- name: rebuild docker repository - name: rebuild docker repository
command: command:
cmd: docker compose build cmd: docker compose build
@@ -16,7 +7,17 @@
COMPOSE_HTTP_TIMEOUT: 600 COMPOSE_HTTP_TIMEOUT: 600
DOCKER_CLIENT_TIMEOUT: 600 DOCKER_CLIENT_TIMEOUT: 600
# default setup for docker compose files - name: Validate Docker Compose configuration
command:
cmd: docker compose -f {{ docker_compose.files.docker_compose }} config --quiet
chdir: "{{ docker_compose.directories.instance }}"
register: dc_validate
changed_when: false
failed_when: dc_validate.rc != 0
listen:
- docker compose up
- docker compose restart
- name: docker compose up - name: docker compose up
shell: docker-compose -p {{ application_id }} up -d --force-recreate --remove-orphans --build shell: docker-compose -p {{ application_id }} up -d --force-recreate --remove-orphans --build
args: args:

View File

@@ -25,5 +25,4 @@ galaxy_info:
issue_tracker_url: https://s.veen.world/cymaisissues issue_tracker_url: https://s.veen.world/cymaisissues
documentation: https://s.veen.world/cymais documentation: https://s.veen.world/cymais
dependencies: dependencies:
- srv-web-proxy-core
- docker-container # Necessary for template use - docker-container # Necessary for template use

View File

@@ -3,8 +3,8 @@
src: "{{ item }}" src: "{{ item }}"
dest: "{{ docker_compose.files.dockerfile }}" dest: "{{ docker_compose.files.dockerfile }}"
loop: loop:
- "{{ playbook_dir }}/roles/web-app-{{ application_id }}/templates/Dockerfile.j2" - "{{ application_id | abs_role_path_by_application_id }}/templates/Dockerfile.j2"
- "{{ playbook_dir }}/roles/web-app-{{ application_id }}/files/Dockerfile" - "{{ application_id | abs_role_path_by_application_id }}/files/Dockerfile"
notify: docker compose up notify: docker compose up
register: create_dockerfile_result register: create_dockerfile_result
failed_when: failed_when:
@@ -20,8 +20,8 @@
notify: docker compose up notify: docker compose up
register: env_template register: env_template
loop: loop:
- "{{ playbook_dir }}/roles/web-app-{{ application_id }}/templates/env.j2" - "{{ application_id | abs_role_path_by_application_id }}/templates/env.j2"
- "{{ playbook_dir }}/roles/web-app-{{ application_id }}/files/env" - "{{ application_id | abs_role_path_by_application_id }}/files/env"
failed_when: failed_when:
- env_template is failed - env_template is failed
- "'Could not find or access' not in env_template.msg" - "'Could not find or access' not in env_template.msg"
@@ -40,4 +40,5 @@
register: docker_ps register: docker_ps
changed_when: (docker_ps.stdout | trim) == "" changed_when: (docker_ps.stdout | trim) == ""
notify: docker compose up notify: docker compose up
when: not (docker_compose_template.changed or env_template.changed) when: not (docker_compose_template.changed or env_template.changed)
ignore_errors: true

View File

@@ -1,10 +1,8 @@
- name: "Load variables from {{ role_path }}/vars/docker-compose.yml for whole play" - name: "Load variables from {{ docker_compose_variable_file }} for whole play"
include_vars: "{{ role_path }}/vars/docker-compose.yml" include_vars: "{{ docker_compose_variable_file }}"
- name: "Remove {{ docker_compose.directories.instance }} and all its contents" - name: "reset (if enabled)"
file: include_tasks: reset.yml
path: "{{ docker_compose.directories.instance }}"
state: absent
when: mode_reset | bool when: mode_reset | bool
# This could lead to problems in docker-compose directories which are based on a git repository # This could lead to problems in docker-compose directories which are based on a git repository

View File

@@ -0,0 +1,11 @@
# It is necessary to shut the projects down, when reset is activated.
# Otherwise it can lead to this bug:
# https://github.com/ansible/ansible/issues/10244
- name: shut down docker compose project
command:
cmd: "docker-compose -p {{ application_id }} down"
- name: "Remove {{ docker_compose.directories.instance }} and all its contents"
file:
path: "{{ docker_compose.directories.instance }}"
state: absent

View File

@@ -4,7 +4,7 @@ networks:
central_{{ database_type }}: central_{{ database_type }}:
external: true external: true
{% endif %} {% endif %}
{% if applications[application_id].get('features', {}).get('ldap', false) and applications.ldap.network.docker | bool %} {% if applications[application_id].get('features', {}).get('ldap', false) and applications.openldap.network.docker | bool %}
central_ldap: central_ldap:
external: true external: true
{% endif %} {% endif %}

View File

@@ -0,0 +1 @@
docker_compose_variable_file: "{{ role_path }}/vars/docker-compose.yml"

View File

@@ -0,0 +1,2 @@
dependencies:
- docker-core

View File

@@ -3,7 +3,7 @@
{% if applications | is_feature_enabled('central_database',application_id) | bool and database_type is defined %} {% if applications | is_feature_enabled('central_database',application_id) | bool and database_type is defined %}
central_{{ database_type }}: central_{{ database_type }}:
{% endif %} {% endif %}
{% if applications[application_id].get('features', {}).get('ldap', false) | bool and applications.ldap.network.docker|bool %} {% if applications[application_id].get('features', {}).get('ldap', false) | bool and applications.openldap.network.docker|bool %}
central_ldap: central_ldap:
{% endif %} {% endif %}
default: default:

View File

@@ -1,22 +1,22 @@
# Intel Drivers # drv-intel Role
## Description ## Description
This Ansible role installs Intel media drivers on systems that use the Pacman package manager (e.g., Arch Linux and derivatives). It ensures the `intel-media-driver` package is present and up-to-date. This Ansible role installs Intel media drivers on Pacman-based Linux distributions (e.g., Arch Linux), ensuring the `intel-media-driver` package is present and up-to-date.
## Overview ## Overview
The `drv-intel` role leverages the `community.general.pacman` module to: The `drv-intel` role uses the `community.general.pacman` module to:
1. Update the package cache. 1. Update the package cache
2. Install (or upgrade) the `intel-media-driver` package. 2. Install or upgrade the `intel-media-driver` package
3. Verify that the driver is correctly installed and ready for use in media pipelines. 3. Verify the driver installation for media pipelines
## Features ## Features
* Idempotent installation of Intel media drivers * Idempotent installation of Intel media drivers
* Automatic package cache update before installation * Automatic Pacman cache update
* Supports installation on any Pacman-based distribution * Support for all Pacman-based distributions
## Further Resources ## Further Resources

View File

@@ -0,0 +1,25 @@
---
galaxy_info:
author: "Kevin Veen-Birkenbach"
description: "Installs Intel media drivers on Pacman-based systems, ensuring the `intel-media-driver` package is present and up-to-date."
license: "CyMaIS NonCommercial License (CNCL)"
license_url: "https://s.veen.world/cncl"
company: |
Kevin Veen-Birkenbach
Consulting & Coaching Solutions
https://www.veen.world
galaxy_tags:
- intel
- media
- driver
- linux
- pacman
repository: "https://github.com/kevinveenbirkenbach/cymais"
issue_tracker_url: "https://github.com/kevinveenbirkenbach/cymais/issues"
documentation: "https://github.com/kevinveenbirkenbach/cymais/tree/main/roles/drv-intel"
min_ansible_version: "2.9"
platforms:
- name: Archlinux
versions:
- all
dependencies: []

View File

@@ -21,7 +21,7 @@
systemd: systemd:
daemon_reload: yes daemon_reload: yes
- name: set service_name to the name of the current role - name: "set 'service_name' to '{{ role_name }}'"
set_fact: set_fact:
service_name: "{{ role_name }}" service_name: "{{ role_name }}"

View File

@@ -0,0 +1,23 @@
---
galaxy_info:
author: "Kevin Veen-Birchenbach"
description: "Installs proprietary GPU drivers (`mhwd -a pci nonfree 0300`) on Arch-based systems."
license: "CyMaIS NonCommercial License (CNCL)"
license_url: "https://s.veen.world/cncl"
company: |
Kevin Veen-Birchenbach
Consulting & Coaching Solutions
https://www.veen.world
galaxy_tags:
- gpu
- drivers
- nonfree
repository: "https://github.com/kevinveenbirkenbach/cymais"
issue_tracker_url: "https://github.com/kevinveenbirkenbach/cymais/issues"
documentation: "https://github.com/kevinveenbirkenbach/cymais/tree/main/roles/drv-non-free"
min_ansible_version: "2.9"
platforms:
- name: Archlinux
versions:
- all
dependencies: []

View File

@@ -0,0 +1,23 @@
---
galaxy_info:
author: "Kevin Veen-Birkenbach"
description: "Installs Hunspell and configured language packs on Pacman-based systems for spell checking in multiple languages."
license: "CyMaIS NonCommercial License (CNCL)"
license_url: "https://s.veen.world/cncl"
company: |
Kevin Veen-Birkenbach
Consulting & Coaching Solutions
https://www.veen.world
galaxy_tags:
- hunspell
- spellcheck
- language
repository: "https://github.com/kevinveenbirkenbach/cymais"
issue_tracker_url: "https://github.com/kevinveenbirkenbach/cymais/issues"
documentation: "https://github.com/kevinveenbirkenbach/cymais/tree/main/roles/gen-hunspell"
min_ansible_version: "2.9"
platforms:
- name: Archlinux
versions:
- all
dependencies: []

View File

@@ -0,0 +1,23 @@
---
galaxy_info:
author: "Kevin Veen-Birkenbach"
description: "Installs OpenJDK 11 (`jdk11-openjdk`) on Pacman-based systems to provide a Java runtime and development environment."
license: "CyMaIS NonCommercial License (CNCL)"
license_url: "https://s.veen.world/cncl"
company: |
Kevin Veen-Birkenbach
Consulting & Coaching Solutions
https://www.veen.world
galaxy_tags:
- java
- jdk11
- openjdk
repository: "https://github.com/kevinveenbirkenbach/cymais"
issue_tracker_url: "https://github.com/kevinveenbirkenbach/cymais/issues"
documentation: "https://github.com/kevinveenbirkenbach/cymais/tree/main/roles/gen-java"
min_ansible_version: "2.9"
platforms:
- name: Archlinux
versions:
- all
dependencies: []

View File

@@ -1,3 +1,7 @@
- name: "reset (if enabled)"
include_tasks: reset.yml
when: mode_reset | bool and run_once_gen_timer is not defined
- name: create {{service_name}}.cymais.timer - name: create {{service_name}}.cymais.timer
template: template:
src: dummy.timer.j2 src: dummy.timer.j2
@@ -10,4 +14,9 @@
name: "{{service_name}}.cymais.timer" name: "{{service_name}}.cymais.timer"
state: restarted state: restarted
enabled: yes enabled: yes
when: dummy_timer.changed or activate_all_timers | bool when: dummy_timer.changed or activate_all_timers | bool
- name: run {{ role_name }} once
set_fact:
run_once_gen_timer: true
when: run_once_gen_timer is not defined

View File

@@ -0,0 +1,26 @@
- name: Find all cymais.timer units
find:
paths: /etc/systemd/system
patterns: '*.cymais.timer'
register: cymais_timers
- name: Disable and stop each cymais timer
systemd:
name: "{{ item.path | basename }}"
enabled: no
state: stopped
loop: "{{ cymais_timers.files }}"
loop_control:
label: "{{ item.path | basename }}"
- name: Remove all cymais.timer files
file:
path: "{{ item.path }}"
state: absent
loop: "{{ cymais_timers.files }}"
loop_control:
label: "{{ item.path | basename }}"
- name: Reload systemd daemon
command: systemctl daemon-reload
become: true

View File

@@ -12,7 +12,7 @@
notify: reload maint-btrfs-auto-balancer.cymais.service notify: reload maint-btrfs-auto-balancer.cymais.service
when: run_once_system_btrfs_auto_balancer is not defined when: run_once_system_btrfs_auto_balancer is not defined
- name: set service_name to the name of the current role - name: "set 'service_name' to '{{ role_name }}'"
set_fact: set_fact:
service_name: "{{ role_name }}" service_name: "{{ role_name }}"
when: run_once_system_btrfs_auto_balancer is not defined when: run_once_system_btrfs_auto_balancer is not defined

View File

@@ -19,7 +19,7 @@
notify: restart maint-docker-heal.cymais.service notify: restart maint-docker-heal.cymais.service
when: run_once_heal_docker is not defined when: run_once_heal_docker is not defined
- name: set service_name to the name of the current role - name: "set 'service_name' to '{{ role_name }}'"
set_fact: set_fact:
service_name: "{{ role_name }}" service_name: "{{ role_name }}"
when: run_once_heal_docker is not defined when: run_once_heal_docker is not defined

View File

@@ -16,7 +16,7 @@
dest: /etc/systemd/system/maint-docker-restart.cymais.service dest: /etc/systemd/system/maint-docker-restart.cymais.service
notify: "reload maint-docker-restart.cymais.service" notify: "reload maint-docker-restart.cymais.service"
- name: set service_name to the name of the current role - name: "set 'service_name' to '{{ role_name }}'"
set_fact: set_fact:
service_name: "{{ role_name }}" service_name: "{{ role_name }}"

View File

@@ -0,0 +1,23 @@
---
galaxy_info:
author: "Kevin Veen-Birchenbach"
description: "Automates swapfile creation on target systems by cloning and executing a swapfile script."
license: "CyMaIS NonCommercial License (CNCL)"
license_url: "https://s.veen.world/cncl"
company: |
Kevin Veen-Birchenbach
Consulting & Coaching Solutions
https://www.veen.world
galaxy_tags:
- swapfile
- performance
repository: "https://github.com/kevinveenbirkenbach/cymais"
issue_tracker_url: "https://github.com/kevinveenbirkenbach/cymais/issues"
documentation: "https://github.com/kevinveenbirkenbach/cymais/tree/main/roles/maint-swapfile"
min_ansible_version: "2.9"
platforms:
- name: Any
versions:
- all
dependencies:
- pkgmgr-install

View File

@@ -15,7 +15,7 @@
dest: /etc/systemd/system/mon-bot-btrfs.cymais.service dest: /etc/systemd/system/mon-bot-btrfs.cymais.service
notify: reload mon-bot-btrfs.cymais.service notify: reload mon-bot-btrfs.cymais.service
- name: set service_name to the name of the current role - name: "set 'service_name' to '{{ role_name }}'"
set_fact: set_fact:
service_name: "{{ role_name }}" service_name: "{{ role_name }}"

View File

@@ -15,7 +15,7 @@
dest: /etc/systemd/system/mon-bot-disc-space.cymais.service dest: /etc/systemd/system/mon-bot-disc-space.cymais.service
notify: reload mon-bot-disc-space.cymais.service notify: reload mon-bot-disc-space.cymais.service
- name: set service_name to the name of the current role - name: "set 'service_name' to '{{ role_name }}'"
set_fact: set_fact:
service_name: "{{ role_name }}" service_name: "{{ role_name }}"

View File

@@ -18,7 +18,7 @@
notify: reload mon-bot-docker-container.cymais.service notify: reload mon-bot-docker-container.cymais.service
when: run_once_health_docker_container is not defined when: run_once_health_docker_container is not defined
- name: set service_name to the name of the current role - name: "set 'service_name' to '{{ role_name }}'"
set_fact: set_fact:
service_name: "{{ role_name }}" service_name: "{{ role_name }}"
when: run_once_health_docker_container is not defined when: run_once_health_docker_container is not defined

View File

@@ -18,7 +18,7 @@
notify: reload mon-bot-docker-volumes.cymais.service notify: reload mon-bot-docker-volumes.cymais.service
when: run_once_health_docker_volumes is not defined when: run_once_health_docker_volumes is not defined
- name: set service_name to the name of the current role - name: "set 'service_name' to '{{ role_name }}'"
set_fact: set_fact:
service_name: "{{ role_name }}" service_name: "{{ role_name }}"
when: run_once_health_docker_volumes is not defined when: run_once_health_docker_volumes is not defined

View File

@@ -18,7 +18,7 @@
notify: reload mon-bot-journalctl.cymais.service notify: reload mon-bot-journalctl.cymais.service
when: run_once_health_journalctl is not defined when: run_once_health_journalctl is not defined
- name: set service_name to the name of the current role - name: "set 'service_name' to '{{ role_name }}'"
set_fact: set_fact:
service_name: "{{ role_name }}" service_name: "{{ role_name }}"
when: run_once_health_journalctl is not defined when: run_once_health_journalctl is not defined

View File

@@ -16,7 +16,7 @@
dest: /etc/systemd/system/mon-bot-msmtp.cymais.service dest: /etc/systemd/system/mon-bot-msmtp.cymais.service
notify: reload mon-bot-msmtp.cymais.service notify: reload mon-bot-msmtp.cymais.service
- name: set service_name to the name of the current role - name: "set 'service_name' to '{{ role_name }}'"
set_fact: set_fact:
service_name: "{{ role_name }}" service_name: "{{ role_name }}"

View File

@@ -24,7 +24,7 @@
notify: reload mon-bot-webserver.cymais.service notify: reload mon-bot-webserver.cymais.service
when: run_once_health_nginx is not defined when: run_once_health_nginx is not defined
- name: set service_name to the name of the current role - name: "set 'service_name' to '{{ role_name }}'"
set_fact: set_fact:
service_name: "{{ role_name }}" service_name: "{{ role_name }}"
when: run_once_health_nginx is not defined when: run_once_health_nginx is not defined

View File

@@ -2,7 +2,7 @@
This Ansible role composes and orchestrates all necessary HTTPS-layer tasks and HTML-content injections for your webserver domains. It integrates two key sub-roles into a unified workflow: This Ansible role composes and orchestrates all necessary HTTPS-layer tasks and HTML-content injections for your webserver domains. It integrates two key sub-roles into a unified workflow:
1. **`srv-web-injector-core`** 1. **`srv-web-injector-compose`**
Injects global HTML snippets (CSS, Matomo tracking, iFrame notifier, custom JavaScript) into responses using Nginx `sub_filter`. Injects global HTML snippets (CSS, Matomo tracking, iFrame notifier, custom JavaScript) into responses using Nginx `sub_filter`.
2. **`srv-web-tls-core`** 2. **`srv-web-tls-core`**
Handles issuing, renewing, and managing TLS certificates via ACME/Certbot. Handles issuing, renewing, and managing TLS certificates via ACME/Certbot.

View File

@@ -29,5 +29,5 @@ galaxy_info:
issue_tracker_url: "https://github.com/kevinveenbirkenbach/cymais/issues" issue_tracker_url: "https://github.com/kevinveenbirkenbach/cymais/issues"
documentation: "https://github.com/kevinveenbirkenbach/cymais/roles/srv-web-composer" documentation: "https://github.com/kevinveenbirkenbach/cymais/roles/srv-web-composer"
dependencies: dependencies:
- srv-web-injector-core - srv-web-injector-compose
- srv-web-tls-core - srv-web-tls-core

View File

@@ -1,6 +1,6 @@
- name: "include role srv-web-injector-core for {{domain}}" - name: "include role srv-web-injector-compose for {{domain}}"
include_role: include_role:
name: srv-web-injector-core name: srv-web-injector-compose
- name: "include role srv-web-tls-core for {{domain}}" - name: "include role srv-web-tls-core for {{domain}}"
include_role: include_role:

View File

@@ -1,3 +1,14 @@
--- ---
- name: Validate Nginx configuration
command: nginx -t
register: nginx_test
changed_when: false
failed_when: nginx_test.rc != 0
listen: restart nginx
- name: restart nginx - name: restart nginx
service: name=nginx state=restarted enabled=yes service:
name: nginx
state: restarted
enabled: yes
listen: restart nginx

View File

@@ -6,14 +6,12 @@
- nginx-mod-stream - nginx-mod-stream
state: present state: present
notify: restart nginx notify: restart nginx
when: run_once_nginx is not defined when: run_once_srv_web_core is not defined
- name: "Delete {{nginx.directories.configuration}} directory, when mode_reset"
file:
path: "{{ nginx.directories.configuration }}"
state: absent
when: mode_reset | bool and run_once_nginx is not defined
- name: "reset (if enabled)"
include_tasks: reset.yml
when: mode_reset | bool and run_once_srv_web_core is not defined
- name: Ensure nginx configuration directories are present - name: Ensure nginx configuration directories are present
file: file:
path: "{{ item }}" path: "{{ item }}"
@@ -28,7 +26,7 @@
(nginx.directories.http.values() | list) + (nginx.directories.http.values() | list) +
[ nginx.directories.streams ] [ nginx.directories.streams ]
}} }}
when: run_once_nginx is not defined when: run_once_srv_web_core is not defined
- name: Ensure nginx data storage directories are present - name: Ensure nginx data storage directories are present
file: file:
@@ -40,7 +38,7 @@
mode: '0755' mode: '0755'
loop: > loop: >
{{ nginx.directories.data.values() | list }} {{ nginx.directories.data.values() | list }}
when: run_once_nginx is not defined when: run_once_srv_web_core is not defined
- name: "Include tasks to create cache directories" - name: "Include tasks to create cache directories"
include_tasks: cache_directories.yml include_tasks: cache_directories.yml
@@ -50,13 +48,13 @@
src: nginx.conf.j2 src: nginx.conf.j2
dest: /etc/nginx/nginx.conf dest: /etc/nginx/nginx.conf
notify: restart nginx notify: restart nginx
when: run_once_nginx is not defined when: run_once_srv_web_core is not defined
- name: flush nginx service - name: flush nginx service
meta: flush_handlers meta: flush_handlers
when: run_once_nginx is not defined when: run_once_srv_web_core is not defined
- name: run the nginx tasks once - name: run {{ role_name }} once
set_fact: set_fact:
run_once_nginx: true run_once_srv_web_core: true
when: run_once_nginx is not defined when: run_once_srv_web_core is not defined

View File

@@ -0,0 +1,4 @@
- name: "Delete {{nginx.directories.configuration}} directory, when mode_reset"
file:
path: "{{ nginx.directories.configuration }}"
state: absent

View File

@@ -0,0 +1,27 @@
---
galaxy_info:
author: "Kevin Veen-Birkenbach"
description: "Core role for Nginx HTML injection of Matomo, theming, iFrame and JS snippets based on application feature flags."
license: "CyMaIS NonCommercial License (CNCL)"
license_url: "https://s.veen.world/cncl"
company: |
Kevin Veen-Birkenbach
Consulting & Coaching Solutions
https://www.veen.world
galaxy_tags:
- nginx
- injector
- matomo
- theming
repository: "https://github.com/kevinveenbirkenbach/cymais"
issue_tracker_url: "https://github.com/kevinveenbirkenbach/cymais/issues"
documentation: "https://github.com/kevinveenbirkenbach/cymais/tree/main/roles/srv-web-injector-compose"
min_ansible_version: "2.9"
platforms:
- name: Any
versions:
- all
dependencies:
# The injections are called in the script. Keep the logic there.
- srv-web-core

View File

@@ -1,4 +1,4 @@
# Load this role via srv-web-injector-core for consistency # Load this role via srv-web-injector-compose for consistency
- name: Generate color palette with colorscheme-generator - name: Generate color palette with colorscheme-generator
set_fact: set_fact:

View File

@@ -1 +1 @@
modifier_javascript_template_file: "{{ playbook_dir }}/roles/web-app-{{ application_id }}/templates/javascript.js.j2" modifier_javascript_template_file: "{{ application_id | abs_role_path_by_application_id }}/templates/javascript.js.j2"

View File

@@ -1,4 +1,4 @@
# Load this role via srv-web-injector-core for consistency # Load this role via srv-web-injector-compose for consistency
- name: "Relevant variables for role: {{ role_path | basename }}" - name: "Relevant variables for role: {{ role_path | basename }}"
debug: debug:

View File

@@ -24,5 +24,5 @@ galaxy_info:
issue_tracker_url: https://s.veen.world/cymaisissues issue_tracker_url: https://s.veen.world/cymaisissues
documentation: https://s.veen.world/cymais documentation: https://s.veen.world/cymais
dependencies: dependencies:
- docker - srv-web-https
- srv-web-https - srv-web-core

View File

@@ -6,7 +6,7 @@ server
{% include 'roles/web-app-oauth2-proxy/templates/endpoint.conf.j2'%} {% include 'roles/web-app-oauth2-proxy/templates/endpoint.conf.j2'%}
{% endif %} {% endif %}
{% include 'roles/srv-web-injector-core/templates/global.includes.conf.j2'%} {% include 'roles/srv-web-injector-compose/templates/global.includes.conf.j2'%}
{% if nginx_docker_reverse_proxy_extra_configuration is defined %} {% if nginx_docker_reverse_proxy_extra_configuration is defined %}
{# Additional Domain Specific Configuration #} {# Additional Domain Specific Configuration #}

View File

@@ -7,7 +7,7 @@ server {
server_name {{ domain }}; server_name {{ domain }};
{% include 'roles/net-letsencrypt/templates/ssl_header.j2' %} {% include 'roles/net-letsencrypt/templates/ssl_header.j2' %}
{% include 'roles/srv-web-injector-core/templates/global.includes.conf.j2' %} {% include 'roles/srv-web-injector-compose/templates/global.includes.conf.j2' %}
client_max_body_size {{ client_max_body_size | default('100m') }}; client_max_body_size {{ client_max_body_size | default('100m') }};
keepalive_timeout 70; keepalive_timeout 70;

View File

@@ -8,7 +8,7 @@ This role bootstraps **per-domain Nginx configuration**: it requests TLS certifi
A higher-level orchestration wrapper, *srv-web-proxy-domain* ties together several lower-level roles: A higher-level orchestration wrapper, *srv-web-proxy-domain* ties together several lower-level roles:
1. **`srv-web-injector-core`** applies global tweaks and includes. 1. **`srv-web-injector-compose`** applies global tweaks and includes.
2. **`srv-web-tls-core`** obtains Lets Encrypt certificates. 2. **`srv-web-tls-core`** obtains Lets Encrypt certificates.
3. **Domain template deployment** copies a Jinja2 vHost from *srv-web-proxy-core*. 3. **Domain template deployment** copies a Jinja2 vHost from *srv-web-proxy-core*.
4. **`web-app-oauth2-proxy`** *(optional)* protects the site with OAuth2. 4. **`web-app-oauth2-proxy`** *(optional)* protects the site with OAuth2.

View File

@@ -24,4 +24,4 @@ galaxy_info:
issue_tracker_url: https://s.veen.world/cymaisissues issue_tracker_url: https://s.veen.world/cymaisissues
documentation: https://s.veen.world/cymais documentation: https://s.veen.world/cymais
dependencies: dependencies:
- srv-web-core - srv-web-proxy-core

View File

@@ -2,12 +2,30 @@
include_role: include_role:
name: srv-web-composer name: srv-web-composer
- name: "copy nginx domain configuration to {{ configuration_destination }}" - name: "Copy nginx config to {{ configuration_destination }}"
template: template:
src: "{{ vhost_template_src }}" src: "{{ vhost_template_src }}"
dest: "{{ configuration_destination }}" dest: "{{ configuration_destination }}"
register: nginx_conf
notify: restart nginx notify: restart nginx
- name: "Check if {{ domains | get_domain(application_id) }} is reachable (only if config unchanged)"
uri:
url: "{{ domains | get_url(application_id, web_protocol) }}"
register: site_check
failed_when: false
changed_when: false
when: not nginx_conf.changed
- name: Restart nginx if site is down
command:
cmd: "true"
notify: restart nginx
when:
- not nginx_conf.changed
- site_check.status is defined
- not site_check.status in [200,301,302]
- name: "set oauth2_proxy_application_id (Needed due to lazzy loading issue)" - name: "set oauth2_proxy_application_id (Needed due to lazzy loading issue)"
set_fact: set_fact:
oauth2_proxy_application_id: "{{ application_id }}" oauth2_proxy_application_id: "{{ application_id }}"

Some files were not shown because too many files have changed in this diff Show More