Compare commits

...

5 Commits

370 changed files with 1428 additions and 977 deletions

1
.gitignore vendored
View File

@@ -4,3 +4,4 @@ venv
*.log *.log
*.bak *.bak
*tree.json *tree.json
roles/list.json

View File

@@ -3,10 +3,16 @@ import os
import argparse import argparse
import yaml import yaml
import json import json
from collections import deque import re
from typing import List, Dict, Any, Set from typing import List, Dict, Any, Set
JINJA_PATTERN = re.compile(r'{{.*}}')
ALL_DEP_TYPES = ['run_after', 'dependencies', 'include_tasks', 'import_tasks', 'include_role', 'import_role']
ALL_DIRECTIONS = ['to', 'from']
ALL_KEYS = [f"{dep}_{dir}" for dep in ALL_DEP_TYPES for dir in ALL_DIRECTIONS]
def find_role_meta(roles_dir: str, role: str) -> str: def find_role_meta(roles_dir: str, role: str) -> str:
path = os.path.join(roles_dir, role, 'meta', 'main.yml') path = os.path.join(roles_dir, role, 'meta', 'main.yml')
if not os.path.isfile(path): if not os.path.isfile(path):
@@ -14,10 +20,14 @@ def find_role_meta(roles_dir: str, role: str) -> str:
return path return path
def find_role_tasks(roles_dir: str, role: str) -> str:
path = os.path.join(roles_dir, role, 'tasks', 'main.yml')
if not os.path.isfile(path):
raise FileNotFoundError(f"Tasks not found for role: {role}")
return path
def load_meta(path: str) -> Dict[str, Any]: def load_meta(path: str) -> Dict[str, Any]:
"""
Load meta/main.yml → return galaxy_info + run_after + dependencies
"""
with open(path, 'r') as f: with open(path, 'r') as f:
data = yaml.safe_load(f) or {} data = yaml.safe_load(f) or {}
@@ -28,6 +38,24 @@ def load_meta(path: str) -> Dict[str, Any]:
'dependencies': data.get('dependencies', []) or [] 'dependencies': data.get('dependencies', []) or []
} }
def load_tasks(path: str, dep_type: str) -> List[str]:
with open(path, 'r') as f:
data = yaml.safe_load(f) or []
included_roles = []
for task in data:
if dep_type in task:
entry = task[dep_type]
if isinstance(entry, dict):
entry = entry.get('name', '')
if entry and not JINJA_PATTERN.search(entry):
included_roles.append(entry)
return included_roles
def build_single_graph( def build_single_graph(
start_role: str, start_role: str,
dep_type: str, dep_type: str,
@@ -35,71 +63,73 @@ def build_single_graph(
roles_dir: str, roles_dir: str,
max_depth: int max_depth: int
) -> Dict[str, Any]: ) -> Dict[str, Any]:
"""
Build one graph for one dependency type and direction:
- 'to': follow edges source→target
- 'from': reverse edges (find roles listing this role)
- max_depth > 0: limit hops to max_depth
- max_depth ≤ 0: stop when youd revisit a node already on the path
"""
nodes: Dict[str, Dict[str, Any]] = {} nodes: Dict[str, Dict[str, Any]] = {}
links: List[Dict[str, str]] = [] links: List[Dict[str, str]] = []
def traverse(role: str, depth: int, path: Set[str]): def traverse(role: str, depth: int, path: Set[str]):
# Register node once
if role not in nodes: if role not in nodes:
meta = load_meta(find_role_meta(roles_dir, role)) meta = load_meta(find_role_meta(roles_dir, role))
node = {'id': role} node = {'id': role}
node.update(meta['galaxy_info']) node.update(meta['galaxy_info'])
node['doc_url'] = f"https://docs.cymais.cloud/roles/{role}/README.html" node['doc_url'] = f"https://docs.cymais.cloud/roles/{role}/README.html"
node['source_url'] = ( node['source_url'] = f"https://github.com/kevinveenbirkenbach/cymais/tree/master/roles/{role}"
f"https://github.com/kevinveenbirkenbach/cymais/tree/master/roles/{role}"
)
nodes[role] = node nodes[role] = node
# Depth guard
if max_depth > 0 and depth >= max_depth: if max_depth > 0 and depth >= max_depth:
return return
# Determine neighbors according to direction neighbors = []
if dep_type in ['run_after', 'dependencies']:
meta = load_meta(find_role_meta(roles_dir, role))
neighbors = meta.get(dep_type, [])
else:
try:
neighbors = load_tasks(find_role_tasks(roles_dir, role), dep_type)
except FileNotFoundError:
neighbors = []
if direction == 'to': if direction == 'to':
neighbors = load_meta(find_role_meta(roles_dir, role)).get(dep_type, [])
for tgt in neighbors: for tgt in neighbors:
links.append({'source': role, 'target': tgt, 'type': dep_type}) links.append({'source': role, 'target': tgt, 'type': dep_type})
# General cycle check
if tgt in path: if tgt in path:
continue continue
traverse(tgt, depth + 1, path | {tgt}) traverse(tgt, depth + 1, path | {tgt})
else: # direction == 'from' else: # direction == 'from'
# Find all roles that list this role in their dep_type
for other in os.listdir(roles_dir): for other in os.listdir(roles_dir):
try: try:
other_neighbors = []
if dep_type in ['run_after', 'dependencies']:
meta_o = load_meta(find_role_meta(roles_dir, other)) meta_o = load_meta(find_role_meta(roles_dir, other))
except FileNotFoundError: other_neighbors = meta_o.get(dep_type, [])
continue else:
if role in meta_o.get(dep_type, []): other_neighbors = load_tasks(find_role_tasks(roles_dir, other), dep_type)
if role in other_neighbors:
links.append({'source': other, 'target': role, 'type': dep_type}) links.append({'source': other, 'target': role, 'type': dep_type})
if other in path: if other in path:
continue continue
traverse(other, depth + 1, path | {other}) traverse(other, depth + 1, path | {other})
# Kick off recursion except FileNotFoundError:
continue
traverse(start_role, depth=0, path={start_role}) traverse(start_role, depth=0, path={start_role})
return {'nodes': list(nodes.values()), 'links': links} return {'nodes': list(nodes.values()), 'links': links}
def build_mappings( def build_mappings(
start_role: str, start_role: str,
mappings: List[Dict[str, str]],
roles_dir: str, roles_dir: str,
max_depth: int max_depth: int
) -> Dict[str, Any]: ) -> Dict[str, Any]:
result: Dict[str, Any] = {} result: Dict[str, Any] = {}
for mapping in mappings: for key in ALL_KEYS:
for dep_type, direction in mapping.items(): dep_type, direction = key.rsplit('_', 1)
key = f"{dep_type}_{direction}" try:
result[key] = build_single_graph( result[key] = build_single_graph(start_role, dep_type, direction, roles_dir, max_depth)
start_role, dep_type, direction, roles_dir, max_depth) except Exception:
result[key] = {'nodes': [], 'links': []}
return result return result
@@ -124,64 +154,20 @@ def main():
script_dir = os.path.dirname(os.path.abspath(__file__)) script_dir = os.path.dirname(os.path.abspath(__file__))
default_roles_dir = os.path.abspath(os.path.join(script_dir, '..', '..', 'roles')) default_roles_dir = os.path.abspath(os.path.join(script_dir, '..', '..', 'roles'))
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(description="Generate dependency graphs")
description="Generate graphs based on dependency mappings" parser.add_argument('-r', '--role', required=True, help="Starting role name")
) parser.add_argument('-D', '--depth', type=int, default=0, help="Max recursion depth")
parser.add_argument( parser.add_argument('-o', '--output', choices=['yaml', 'json', 'console'], default='console')
'-r', '--role', parser.add_argument('--roles-dir', default=default_roles_dir, help="Roles directory")
required=True,
help="Starting role name"
)
parser.add_argument(
'-m', '--mapping',
nargs='+',
default=[
'run_after:to',
'run_after:from',
'dependencies:to',
'dependencies:from'
],
help="Mapping entries as type:direction (default all 4 combos)"
)
parser.add_argument(
'-D', '--depth',
type=int,
default=0,
help="Max recursion depth (>0) or <=0 to stop on cycle"
)
parser.add_argument(
'-o', '--output',
choices=['yaml', 'json', 'console'],
default='console',
help="Output format"
)
parser.add_argument(
'--roles-dir',
default=default_roles_dir,
help="Roles directory"
)
args = parser.parse_args() args = parser.parse_args()
mappings: List[Dict[str, str]] = [] graphs = build_mappings(args.role, args.roles_dir, args.depth)
for entry in args.mapping:
if ':' not in entry:
parser.error(f"Invalid mapping '{entry}', must be type:direction")
dep_type, direction = entry.split(':', 1)
if dep_type not in ('run_after', 'dependencies'):
parser.error(f"Unknown dependency type '{dep_type}'")
if direction not in ('to', 'from'):
parser.error(f"Unknown direction '{direction}'")
mappings.append({dep_type: direction})
graphs = build_mappings( for key in ALL_KEYS:
start_role=args.role, graph_data = graphs.get(key, {'nodes': [], 'links': []})
mappings=mappings,
roles_dir=args.roles_dir,
max_depth=args.depth
)
for key, graph_data in graphs.items():
output_graph(graph_data, args.output, args.role, key) output_graph(graph_data, args.output, args.role, key)
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View File

@@ -0,0 +1,65 @@
#!/usr/bin/env python3
"""
Generate a JSON file listing all Ansible role directories.
Usage:
python roles_list.py [--roles-dir path/to/roles] [--output path/to/roles/list.json | console]
"""
import os
import json
import argparse
def find_roles(roles_dir: str):
"""Return sorted list of role names under roles_dir."""
return sorted([
entry for entry in os.listdir(roles_dir)
if os.path.isdir(os.path.join(roles_dir, entry))
])
def write_roles_list(roles, out_file):
"""Write the list of roles to out_file as JSON."""
os.makedirs(os.path.dirname(out_file), exist_ok=True)
with open(out_file, 'w', encoding='utf-8') as f:
json.dump(roles, f, indent=2)
print(f"Wrote roles list to {out_file}")
def main():
# Determine default roles_dir relative to this script: ../../.. -> roles
script_dir = os.path.dirname(os.path.abspath(__file__))
default_roles_dir = os.path.abspath(
os.path.join(script_dir, '..', '..', 'roles')
)
default_output = os.path.join(default_roles_dir, 'list.json')
parser = argparse.ArgumentParser(description='Generate roles/list.json')
parser.add_argument(
'--roles-dir', '-r',
default=default_roles_dir,
help=f'Directory containing role subfolders (default: {default_roles_dir})'
)
parser.add_argument(
'--output', '-o',
default=default_output,
help=(
'Output path for roles list JSON '
'(or "console" to print to stdout, default: %(default)s)'
)
)
args = parser.parse_args()
if not os.path.isdir(args.roles_dir):
parser.error(f"Roles directory not found: {args.roles_dir}")
roles = find_roles(args.roles_dir)
if args.output.lower() == 'console':
# Print JSON to stdout
print(json.dumps(roles, indent=2))
else:
write_roles_list(roles, args.output)
if __name__ == '__main__':
main()

View File

@@ -2,7 +2,7 @@
import os import os
import argparse import argparse
import json import json
from typing import List, Dict, Any from typing import Dict, Any
from cli.generate.graph import build_mappings, output_graph from cli.generate.graph import build_mappings, output_graph
@@ -21,22 +21,13 @@ def main():
default_roles_dir = os.path.abspath(os.path.join(script_dir, '..', '..', 'roles')) default_roles_dir = os.path.abspath(os.path.join(script_dir, '..', '..', 'roles'))
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description="Generate mappings-based graphs for each role and write tree.json" description="Generate all graphs for each role and write meta/tree.json"
) )
parser.add_argument( parser.add_argument(
'-d', '--role_dir', '-d', '--role_dir',
default=default_roles_dir, default=default_roles_dir,
help=f"Path to roles directory (default: {default_roles_dir})" help=f"Path to roles directory (default: {default_roles_dir})"
) )
parser.add_argument(
'-m', '--mapping',
nargs='+',
default=[
'run_after:to', 'run_after:from',
'dependencies:to', 'dependencies:from'
],
help="Mapping entries as type:direction (default all 4 combos)"
)
parser.add_argument( parser.add_argument(
'-D', '--depth', '-D', '--depth',
type=int, type=int,
@@ -61,17 +52,8 @@ def main():
) )
args = parser.parse_args() args = parser.parse_args()
# parse mappings
mappings: List[Dict[str, str]] = []
for entry in args.mapping:
if ':' not in entry:
parser.error(f"Invalid mapping '{entry}', must be type:direction")
dep_type, direction = entry.split(':', 1)
mappings.append({dep_type: direction})
if args.verbose: if args.verbose:
print(f"Roles directory: {args.role_dir}") print(f"Roles directory: {args.role_dir}")
print(f"Mappings: {mappings}")
print(f"Max depth: {args.depth}") print(f"Max depth: {args.depth}")
print(f"Output format: {args.output}") print(f"Output format: {args.output}")
print(f"Preview mode: {args.preview}") print(f"Preview mode: {args.preview}")
@@ -80,15 +62,12 @@ def main():
if args.verbose: if args.verbose:
print(f"Processing role: {role_name}") print(f"Processing role: {role_name}")
# Build graphs for each mapping graphs: Dict[str, Any] = build_mappings(
graphs = build_mappings(
start_role=role_name, start_role=role_name,
mappings=mappings,
roles_dir=args.role_dir, roles_dir=args.role_dir,
max_depth=args.depth max_depth=args.depth
) )
# Prepare output file or previews
if args.preview: if args.preview:
for key, data in graphs.items(): for key, data in graphs.items():
if args.verbose: if args.verbose:
@@ -97,13 +76,10 @@ def main():
else: else:
tree_file = os.path.join(role_path, 'meta', 'tree.json') tree_file = os.path.join(role_path, 'meta', 'tree.json')
os.makedirs(os.path.dirname(tree_file), exist_ok=True) os.makedirs(os.path.dirname(tree_file), exist_ok=True)
# write combined JSON
with open(tree_file, 'w') as f: with open(tree_file, 'w') as f:
json.dump(graphs, f, indent=2) json.dump(graphs, f, indent=2)
if args.verbose:
print(f"Wrote {tree_file}")
else:
print(f"Wrote {tree_file}") print(f"Wrote {tree_file}")
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
""" """
CLI for extracting invokable role paths from a nested roles YAML file using argparse. CLI for extracting invokable or non-invokable role paths from a nested roles YAML file using argparse.
Assumes a default roles file at the project root if none is provided. Assumes a default roles file at the project root if none is provided.
""" """
@@ -18,12 +18,12 @@ sys.path.insert(0, project_root)
import argparse import argparse
import yaml import yaml
from filter_plugins.invokable_paths import get_invokable_paths from filter_plugins.invokable_paths import get_invokable_paths, get_non_invokable_paths
def main(): def main():
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description="Extract invokable role paths from a nested roles YAML file." description="Extract invokable or non-invokable role paths from a nested roles YAML file."
) )
parser.add_argument( parser.add_argument(
"roles_file", "roles_file",
@@ -33,12 +33,32 @@ def main():
) )
parser.add_argument( parser.add_argument(
"--suffix", "-s", "--suffix", "-s",
help="Optional suffix to append to each invokable path.", help="Optional suffix to append to each path.",
default=None default=None
) )
mode_group = parser.add_mutually_exclusive_group()
mode_group.add_argument(
"--non-invokable", "-n",
action='store_true',
help="List paths where 'invokable' is False or not set."
)
mode_group.add_argument(
"--invokable", "-i",
action='store_true',
help="List paths where 'invokable' is True. (default behavior)"
)
args = parser.parse_args() args = parser.parse_args()
# Default to invokable if neither flag is provided
list_non = args.non_invokable
list_inv = args.invokable or not (args.non_invokable or args.invokable)
try: try:
if list_non:
paths = get_non_invokable_paths(args.roles_file, args.suffix)
else:
paths = get_invokable_paths(args.roles_file, args.suffix) paths = get_invokable_paths(args.roles_file, args.suffix)
except FileNotFoundError as e: except FileNotFoundError as e:
print(f"Error: {e}", file=sys.stderr) print(f"Error: {e}", file=sys.stderr)

76
cli/meta/role_folder.py Normal file
View File

@@ -0,0 +1,76 @@
#!/usr/bin/env python3
"""
CLI Script: get_role_folder_cli.py
This script determines the appropriate Ansible role folder based on the provided application_id
by inspecting each role's vars/main.yml within the roles directory. By default, it assumes the
roles directory is located at the project root, relative to this script's location.
Example:
./get_role_folder_cli.py --application-id my-app-id
"""
import os
import sys
import argparse
import yaml
def get_role_folder(application_id, roles_path):
"""
Find the role directory under `roles_path` whose vars/main.yml contains the specified application_id.
:param application_id: The application_id to match.
:param roles_path: Path to the roles directory.
:return: The name of the matching role directory.
:raises RuntimeError: If no match is found or if an error occurs while reading files.
"""
if not os.path.isdir(roles_path):
raise RuntimeError(f"Roles path not found: {roles_path}")
for role in sorted(os.listdir(roles_path)):
role_dir = os.path.join(roles_path, role)
vars_file = os.path.join(role_dir, 'vars', 'main.yml')
if os.path.isfile(vars_file):
try:
with open(vars_file, 'r') as f:
data = yaml.safe_load(f) or {}
except Exception as e:
raise RuntimeError(f"Failed to load {vars_file}: {e}")
if data.get('application_id') == application_id:
return role
raise RuntimeError(f"No role found with application_id '{application_id}' in {roles_path}")
def main():
parser = argparse.ArgumentParser(
description='Determine the Ansible role folder by application_id'
)
parser.add_argument(
'application_id',
help='The application_id defined in vars/main.yml to search for'
)
parser.add_argument(
'-r', '--roles-path',
default=os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir, os.pardir,
'roles'
),
help='Path to the roles directory (default: roles/ at project root)'
)
args = parser.parse_args()
try:
folder = get_role_folder(args.application_id, args.roles_path)
print(folder)
sys.exit(0)
except RuntimeError as err:
print(f"Error: {err}", file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()

View File

@@ -12,10 +12,10 @@ CyMaIS covers everything from essential system setups to advanced configurations
Every business is unique, and so is CyMaIS! With a modular architecture, it adapts to specific requirements, whether for startups, growing businesses, NGOs, or large enterprises. Every business is unique, and so is CyMaIS! With a modular architecture, it adapts to specific requirements, whether for startups, growing businesses, NGOs, or large enterprises.
## Proactive Monitoring & Maintenance 🔍 ## Proactive Monitoring & Maintenance 🔍
With automated updates, system health checks, and security audits, CyMaIS ensures your infrastructure is always up-to-date and running smoothly. Roles such as `mon-bot-docker-container`, `mon-bot-btrfs`, and `mon-bot-webserver` help monitor system integrity. With automated updates, system health checks, and security audits, CyMaIS ensures your infrastructure is always up-to-date and running smoothly. Roles such as `sys-hlth-docker-container`, `sys-hlth-btrfs`, and `sys-hlth-webserver` help monitor system integrity.
## Uncompromised Security 🔒 ## Uncompromised Security 🔒
Security is a top priority! CyMaIS includes robust security features like full-disk encryption recommendations, 2FA enforcement, encrypted server deployments (`web-app-keycloak`, `svc-openldap`), and secure backup solutions (`bkp-remote-to-local`, `bkp-data-to-usb`). Security is a top priority! CyMaIS includes robust security features like full-disk encryption recommendations, 2FA enforcement, encrypted server deployments (`web-app-keycloak`, `svc-openldap`), and secure backup solutions (`sys-bkp-remote-to-local`, `svc-sys-bkp-data-to-usb`).
## User-Friendly with Expert Support 👩‍💻 ## User-Friendly with Expert Support 👩‍💻
No need to be a Linux or Docker expert! CyMaIS simplifies deployment with intuitive role-based automation. Documentation and community support make IT administration accessible to all experience levels. No need to be a Linux or Docker expert! CyMaIS simplifies deployment with intuitive role-based automation. Documentation and community support make IT administration accessible to all experience levels.

View File

@@ -10,13 +10,13 @@ Follow these guides to install and configure CyMaIS:
## Key Responsibilities 🔧 ## Key Responsibilities 🔧
- **User Management** - Configure LDAP, Keycloak, and user permissions. - **User Management** - Configure LDAP, Keycloak, and user permissions.
- **Security & Backups** - Set up `bkp-remote-to-local`, `bkp-data-to-usb`, and `core-security` roles. - **Security & Backups** - Set up `sys-bkp-remote-to-local`, `svc-sys-bkp-data-to-usb`, and `core-security` roles.
- **Application Hosting** - Deploy services like `Nextcloud`, `Matrix`, `Gitea`, and more. - **Application Hosting** - Deploy services like `Nextcloud`, `Matrix`, `Gitea`, and more.
- **Networking & VPN** - Configure `WireGuard`, `OpenVPN`, and `Nginx Reverse Proxy`. - **Networking & VPN** - Configure `WireGuard`, `OpenVPN`, and `Nginx Reverse Proxy`.
## Managing & Updating CyMaIS 🔄 ## Managing & Updating CyMaIS 🔄
- Regularly update services using `update-docker`, `update-pacman`, or `update-apt`. - Regularly update services using `update-docker`, `update-pacman`, or `update-apt`.
- Monitor system health with `mon-bot-btrfs`, `mon-bot-webserver`, and `mon-bot-docker-container`. - Monitor system health with `sys-hlth-btrfs`, `sys-hlth-webserver`, and `sys-hlth-docker-container`.
- Automate system maintenance with `maint-lock`, `cln-backups-service`, and `maint-docker-restart`. - Automate system maintenance with `sys-lock`, `sys-cln-bkps-service`, and `sys-rpr-docker-hard`.
For more details, refer to the specific guides above. For more details, refer to the specific guides above.

View File

@@ -5,14 +5,12 @@ from ansible.errors import AnsibleFilterError
class FilterModule(object): class FilterModule(object):
def filters(self): def filters(self):
# module_util-Verzeichnis ermitteln und zum Import-Pfad hinzufügen
plugin_dir = os.path.dirname(__file__) plugin_dir = os.path.dirname(__file__)
project_root = os.path.dirname(plugin_dir) project_root = os.path.dirname(plugin_dir)
module_utils = os.path.join(project_root, 'module_utils') module_utils = os.path.join(project_root, 'module_utils')
if module_utils not in sys.path: if module_utils not in sys.path:
sys.path.append(module_utils) sys.path.append(module_utils)
# jetzt kannst Du domain_utils importieren
try: try:
from domain_utils import get_domain from domain_utils import get_domain
except ImportError as e: except ImportError as e:

View File

@@ -0,0 +1,48 @@
'''
Ansible filter plugin: get_role_folder
This filter inspects each role under the given roles directory, loads its vars/main.yml,
and returns the role folder name whose application_id matches the provided value.
'''
from ansible.errors import AnsibleFilterError
import os
import yaml
def get_role_folder(application_id, roles_path='roles'):
"""
Find the role directory under `roles_path` whose vars/main.yml contains the given application_id.
:param application_id: The application_id to match.
:param roles_path: Path to the roles directory (default: 'roles').
:return: The name of the matching role directory.
:raises AnsibleFilterError: If vars file is unreadable or no match is found.
"""
if not os.path.isdir(roles_path):
raise AnsibleFilterError(f"Roles path not found: {roles_path}")
for role in os.listdir(roles_path):
role_dir = os.path.join(roles_path, role)
vars_file = os.path.join(role_dir, 'vars', 'main.yml')
if os.path.isfile(vars_file):
try:
with open(vars_file, 'r') as f:
data = yaml.safe_load(f) or {}
except Exception as e:
raise AnsibleFilterError(f"Failed to load {vars_file}: {e}")
if data.get('application_id') == application_id:
return role
raise AnsibleFilterError(f"No role found with application_id '{application_id}' in {roles_path}")
class FilterModule(object):
"""
Register the get_role_folder filter
"""
def filters(self):
return {
'get_role_folder': get_role_folder,
}

View File

@@ -2,29 +2,18 @@ import os
import yaml import yaml
from typing import Dict, List, Optional from typing import Dict, List, Optional
def get_invokable_paths( def get_invokable_paths(
roles_file: Optional[str] = None, roles_file: Optional[str] = None,
suffix: Optional[str] = None suffix: Optional[str] = None
) -> List[str]: ) -> List[str]:
""" """
Load nested roles YAML from the given file (or default at project root) and return Load nested roles YAML and return dash-joined paths where 'invokable' is True. Appends suffix if provided.
dash-joined paths where 'invokable' is True. Appends suffix if provided.
:param roles_file: Optional path to YAML file. Defaults to '<project_root>/roles/categories.yml'.
:param suffix: Optional suffix to append to each invokable path.
:return: List of invokable paths.
:raises FileNotFoundError: If the YAML file cannot be found.
:raises yaml.YAMLError: If the YAML file cannot be parsed.
:raises ValueError: If the root of the YAML is not a dictionary.
""" """
# Determine default roles_file if not provided
if not roles_file: if not roles_file:
script_dir = os.path.dirname(os.path.abspath(__file__)) script_dir = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.dirname(script_dir) project_root = os.path.dirname(script_dir)
roles_file = os.path.join(project_root, 'roles', 'categories.yml') roles_file = os.path.join(project_root, 'roles', 'categories.yml')
# Load and validate YAML
try: try:
with open(roles_file, 'r') as f: with open(roles_file, 'r') as f:
data = yaml.safe_load(f) or {} data = yaml.safe_load(f) or {}
@@ -36,7 +25,6 @@ def get_invokable_paths(
if not isinstance(data, dict): if not isinstance(data, dict):
raise ValueError("YAML root is not a dictionary") raise ValueError("YAML root is not a dictionary")
# Unwrap if single 'roles' key
roles = data roles = data
if 'roles' in roles and isinstance(roles['roles'], dict) and len(roles) == 1: if 'roles' in roles and isinstance(roles['roles'], dict) and len(roles) == 1:
roles = roles['roles'] roles = roles['roles']
@@ -54,7 +42,6 @@ def get_invokable_paths(
p += suffix p += suffix
found.append(p) found.append(p)
# Recurse into non-metadata child dicts
children = { children = {
ck: cv for ck, cv in cfg.items() ck: cv for ck, cv in cfg.items()
if ck not in METADATA and isinstance(cv, dict) if ck not in METADATA and isinstance(cv, dict)
@@ -66,6 +53,61 @@ def get_invokable_paths(
return _recurse(roles) return _recurse(roles)
def get_non_invokable_paths(
roles_file: Optional[str] = None,
suffix: Optional[str] = None
) -> List[str]:
"""
Load nested roles YAML and return dash-joined paths where 'invokable' is False or missing.
Appends suffix if provided.
"""
if not roles_file:
script_dir = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.dirname(script_dir)
roles_file = os.path.join(project_root, 'roles', 'categories.yml')
try:
with open(roles_file, 'r') as f:
data = yaml.safe_load(f) or {}
except FileNotFoundError:
raise FileNotFoundError(f"Roles file not found: {roles_file}")
except yaml.YAMLError as e:
raise yaml.YAMLError(f"Error parsing YAML {roles_file}: {e}")
if not isinstance(data, dict):
raise ValueError("YAML root is not a dictionary")
roles = data
if 'roles' in roles and isinstance(roles['roles'], dict) and len(roles) == 1:
roles = roles['roles']
def _recurse_non(subroles: Dict[str, dict], parent: List[str] = None) -> List[str]:
parent = parent or []
found: List[str] = []
METADATA = {'title', 'description', 'icon', 'invokable'}
for key, cfg in subroles.items():
path = parent + [key]
p = '-'.join(path)
inv = cfg.get('invokable', False)
if not inv:
entry = p + (suffix or "")
found.append(entry)
children = {
ck: cv for ck, cv in cfg.items()
if ck not in METADATA and isinstance(cv, dict)
}
if children:
found.extend(_recurse_non(children, path))
return found
return _recurse_non(roles)
class FilterModule: class FilterModule:
def filters(self): def filters(self):
return {'invokable_paths': get_invokable_paths} return {
'invokable_paths': get_invokable_paths,
'non_invokable_paths': get_non_invokable_paths
}

View File

@@ -13,23 +13,23 @@ system_maintenance_lock_timeout_restart_docker: "{{system_maintenance_lock_ti
### Defined Services for Backup Tasks ### Defined Services for Backup Tasks
system_maintenance_backup_services: system_maintenance_backup_services:
- "bkp-docker-to-local" - "sys-bkp-docker-to-local"
- "bkp-remote-to-local" - "svc-sys-bkp-rmt-2-loc"
- "bkp-data-to-usb" - "svc-sys-bkp-data-to-usb"
- "bkp-docker-to-local-everything" - "sys-bkp-docker-to-local-everything"
### Defined Services for System Cleanup ### Defined Services for System Cleanup
system_maintenance_cleanup_services: system_maintenance_cleanup_services:
- "cln-backups" - "sys-cln-backups"
- "cln-disc-space" - "sys-cln-disc-space"
- "cln-failed-docker-backups" - "sys-cln-faild-bkps"
### Services that Manipulate the System ### Services that Manipulate the System
system_maintenance_manipulation_services: system_maintenance_manipulation_services:
- "maint-docker-heal" - "sys-rpr-docker-soft"
- "update-docker" - "update-docker"
- "cln-docker-storage-optimizer" - "sys-opt-ssd-hdd"
- "maint-docker-restart" - "sys-rpr-docker-hard"
## Total System Maintenance Services ## Total System Maintenance Services
system_maintenance_services: "{{ system_maintenance_backup_services + system_maintenance_cleanup_services + system_maintenance_manipulation_services }}" system_maintenance_services: "{{ system_maintenance_backup_services + system_maintenance_cleanup_services + system_maintenance_manipulation_services }}"

View File

@@ -3,4 +3,4 @@
path_administrator_home: "/home/administrator/" path_administrator_home: "/home/administrator/"
path_administrator_scripts: "/opt/scripts/" path_administrator_scripts: "/opt/scripts/"
path_docker_compose_instances: "/opt/docker/" path_docker_compose_instances: "/opt/docker/"
path_system_lock_script: "/opt/scripts/maint-lock.py" path_system_lock_script: "/opt/scripts/sys-lock.py"

View File

@@ -15,7 +15,7 @@ For a complete list of role categories and detailed definitions, see:
Fundamental system configuration (SSH, journald, sudo, etc.) Fundamental system configuration (SSH, journald, sudo, etc.)
- **gen-*** - **gen-***
Generic helpers and language/tool installers (e.g. `gen-git`, `gen-locales`, `gen-timer`) Generic helpers and language/tool installers (e.g. `gen-git`, `gen-locales`, `sys-timer`)
- **desk-*** - **desk-***
Desktop environment and application roles (e.g. `desk-gnome`, `desk-browser`, `desk-libreoffice`) Desktop environment and application roles (e.g. `desk-gnome`, `desk-browser`, `desk-libreoffice`)
@@ -59,13 +59,13 @@ For a complete list of role categories and detailed definitions, see:
## Monitoring & Alerting ## Monitoring & Alerting
- **mon-bot-*** - **sys-hlth-***
“Bot”-style health checks (Btrfs, diskspace, Docker, journalctl, CSP crawler, webserver) with alerts. “Bot”-style health checks (Btrfs, diskspace, Docker, journalctl, CSP crawler, webserver) with alerts.
- **monitor-core-*** - **monitor-core-***
Low-level system monitors (journalctl, Docker containers, disk space, etc.) Low-level system monitors (journalctl, Docker containers, disk space, etc.)
- **alert-*** - **sys-alm-***
Notification handlers for failures (core, email, Telegram). Notification handlers for failures (core, email, Telegram).
--- ---
@@ -78,14 +78,14 @@ For a complete list of role categories and detailed definitions, see:
- **maint-docker-*** - **maint-docker-***
Automated recovery and restarts for Docker Compose workloads. Automated recovery and restarts for Docker Compose workloads.
- **cln-*** - **sys-cln-***
Housekeeping tasks (old backups, expired certs, log rotation). Housekeeping tasks (old backups, expired certs, log rotation).
--- ---
## Backup & Restore ## Backup & Restore
- **bkp-*** - **sys-bkp-***
Local and remote backup strategies for files, Docker volumes, databases. Local and remote backup strategies for files, Docker volumes, databases.
--- ---
@@ -112,8 +112,8 @@ For a complete list of role categories and detailed definitions, see:
> **Tip:** To find a role quickly, search for its prefix: > **Tip:** To find a role quickly, search for its prefix:
> `core-`, `gen-`, `desk-`, `srv-web-`, `web-svc-`, `web-app-`, > `core-`, `gen-`, `desk-`, `srv-web-`, `web-svc-`, `web-app-`,
> `net-`, `svc-`, `mon-bot-`, `monitor-core-`, `alert-`, > `net-`, `svc-`, `sys-hlth-`, `monitor-core-`, `sys-alm-`,
> `maint-`, `maint-docker-`, `cln-`, `bkp-`, `update-`, > `maint-`, `maint-docker-`, `sys-cln-`, `sys-bkp-`, `update-`,
> `pkgmgr-`, `user-`. > `pkgmgr-`, `user-`.
--- ---

View File

@@ -1,8 +0,0 @@
[Unit]
Description=Notifier for %i
[Service]
Type=oneshot
ExecStart=/usr/bin/systemctl start alert-telegram.cymais@%i.service alert-email.cymais@%i.service
User=root
Group=systemd-journal

View File

@@ -1 +0,0 @@
application_id: compose

View File

@@ -1,4 +0,0 @@
- name: "restart alert-email service"
systemd:
name: alert-email.cymais.service
daemon_reload: yes

View File

@@ -1,2 +0,0 @@
systemd_notifier_email_folder: '{{path_administrator_scripts}}alert-email/'
application_id: email

View File

@@ -1,4 +0,0 @@
- name: "restart alert-telegram service"
systemd:
name: alert-telegram.cymais.service
daemon_reload: yes

View File

@@ -1,3 +0,0 @@
systemd_telegram_folder: /opt/ansible-roles/alert-telegram/
systemd_telegram_script: '{{systemd_telegram_folder}}alert-telegram.sh'
application_id: telegram

View File

@@ -1,5 +0,0 @@
- name: "reload bkp-data-to-usb.cymais.service"
systemd:
name: bkp-data-to-usb.cymais.service
state: reloaded
daemon_reload: yes

View File

@@ -1,2 +0,0 @@
backup_directory_validator_folder: '{{path_administrator_scripts}}directory-validator/'
application_id: directory-validator

View File

@@ -1,9 +0,0 @@
- name: "reload bkp-docker-to-local-everything.cymais.service"
systemd:
name: bkp-docker-to-local-everything.cymais.service
daemon_reload: yes
- name: "reload bkp-docker-to-local.cymais.service"
systemd:
name: bkp-docker-to-local.cymais.service
daemon_reload: yes

View File

@@ -1 +0,0 @@
application_id: provider

View File

@@ -1,4 +0,0 @@
- name: "reload bkp-remote-to-local service"
systemd:
name: bkp-remote-to-local.cymais.service
daemon_reload: yes

View File

@@ -1,2 +0,0 @@
docker_backup_remote_to_local_folder: '{{path_administrator_scripts}}bkp-remote-to-local/'
application_id: bkp-remote-to-local

View File

@@ -1,14 +1,42 @@
roles: roles:
core: sys:
title: "Core & System" title: "System"
description: "Fundamental system configuration" invokable: false
icon: "fas fa-cogs" alm:
invokable: true title: "Alerting"
description: "Notification handlers for system events"
icon: "fas fa-bell"
invokable: false
cln:
title: "Cleanup"
description: "Roles for cleaning up various system resources—old backups, unused certificates, temporary files, Docker volumes, disk caches, deprecated domains, and more."
icon: "fas fa-trash-alt"
invokable: false
hlth:
title: "Monitoring"
description: "Roles for system monitoring and health checks—encompassing bot-style automated checks and core low-level monitors for logs, containers, disk usage, and more."
icon: "fas fa-chart-area"
invokable: false
bkp:
title: "Backup & Restore"
description: "Backup strategies & restore procedures"
icon: "fas fa-hdd"
invokable: false
update:
title: "Updates & Package Management"
description: "OS & package updates"
icon: "fas fa-sync"
invokable: false
drv: drv:
title: "Drivers" title: "Drivers"
description: "Roles for installing and configuring hardware drivers—covering printers, graphics, input devices, and other peripheral support." description: "Roles for installing and configuring hardware drivers—covering printers, graphics, input devices, and other peripheral support."
icon: "fas fa-microchip" icon: "fas fa-microchip"
invokable: true invokable: true
# core:
# title: "Core & System"
# description: "Fundamental system configuration"
# icon: "fas fa-cogs"
# invokable: true
gen: gen:
title: "Generic" title: "Generic"
description: "Helper roles & installers (git, locales, timer, etc.)" description: "Helper roles & installers (git, locales, timer, etc.)"
@@ -73,38 +101,8 @@ roles:
description: "Docker infrastructure services (DBMS, LDAP, Redis, etc.)" description: "Docker infrastructure services (DBMS, LDAP, Redis, etc.)"
icon: "fas fa-database" icon: "fas fa-database"
invokable: true invokable: true
mon:
title: "Monitoring"
description: "Roles for system monitoring and health checks—encompassing bot-style automated checks and core low-level monitors for logs, containers, disk usage, and more."
icon: "fas fa-chart-area"
invokable: true
alert:
title: "Alerting"
description: "Notification handlers for system events"
icon: "fas fa-bell"
invokable: true
maint:
title: "Maintenance & Healing"
description: "Periodic maintenance & auto-recovery"
icon: "fas fa-tools"
invokable: true
bkp:
title: "Backup & Restore"
description: "Backup strategies & restore procedures"
icon: "fas fa-hdd"
invokable: true
update:
title: "Updates & Package Management"
description: "OS & package updates"
icon: "fas fa-sync"
invokable: true
user: user:
title: "Users & Access" title: "Users & Access"
description: "User accounts & access control" description: "User accounts & access control"
icon: "fas fa-users" icon: "fas fa-users"
invokable: false invokable: false
cln:
title: "Cleanup"
description: "Roles for cleaning up various system resources—old backups, unused certificates, temporary files, Docker volumes, disk caches, deprecated domains, and more."
icon: "fas fa-trash-alt"
invokable: true

View File

@@ -1,5 +0,0 @@
- name: "reload cln-backups.cymais.service"
systemd:
name: cln-backups.cymais.service
enabled: yes
daemon_reload: yes

View File

@@ -1,2 +0,0 @@
cleanup_backups_directory: '{{path_administrator_scripts}}cln-backups/'
application_id: backups-service

View File

@@ -1 +0,0 @@
application_id: backups-timer

View File

@@ -1,6 +0,0 @@
- name: "Reload and restart cln-certs.cymais.service"
systemd:
name: cln-certs.cymais.service
enabled: yes
daemon_reload: yes
state: restarted

View File

@@ -1 +0,0 @@
application_id: certs

View File

@@ -1,5 +0,0 @@
- name: "reload cln-disc-space.cymais.service"
systemd:
name: cln-disc-space.cymais.service
enabled: yes
daemon_reload: yes

View File

@@ -1,26 +0,0 @@
- name: "create {{cleanup_disc_space_folder}}"
file:
path: "{{cleanup_disc_space_folder}}"
state: directory
mode: 0755
- name: create cln-disc-space.sh
template:
src: cln-disc-space.sh.j2
dest: "{{cleanup_disc_space_folder}}cln-disc-space.sh"
- name: create cln-disc-space.cymais.service
template:
src: cln-disc-space.service.j2
dest: /etc/systemd/system/cln-disc-space.cymais.service
notify: reload cln-disc-space.cymais.service
- name: "set 'service_name' to '{{ role_name }}'"
set_fact:
service_name: "{{ role_name }}"
- name: "include role for gen-timer for {{service_name}}"
include_role:
name: gen-timer
vars:
on_calendar: "{{on_calendar_cleanup_disc_space}}"

View File

@@ -1,2 +0,0 @@
cleanup_disc_space_folder: '{{path_administrator_scripts}}cln-disc-space/'
application_id: disc-space

View File

@@ -1 +0,0 @@
application_id: docker-anonymous-volumes

View File

@@ -1,5 +0,0 @@
- name: "reload cln-docker-storage-optimizer.cymais.service"
systemd:
name: cln-docker-storage-optimizer.cymais.service
state: reloaded
daemon_reload: yes

View File

@@ -1,22 +0,0 @@
- name: "create {{storage_optimizer_directory}}"
file:
path: "{{storage_optimizer_directory}}"
state: directory
mode: 0755
- name: create cln-docker-storage-optimizer.cymais.service
template:
src: cln-docker-storage-optimizer.service.j2
dest: /etc/systemd/system/cln-docker-storage-optimizer.cymais.service
notify: reload cln-docker-storage-optimizer.cymais.service
- name: create cln-docker-storage-optimizer.py
copy:
src: cln-docker-storage-optimizer.py
dest: "{{storage_optimizer_script}}"
mode: 0755
- name: "optimize storage performance"
systemd:
name: cln-docker-storage-optimizer.cymais.service
state: started

View File

@@ -1,3 +0,0 @@
storage_optimizer_directory: '{{path_administrator_scripts}}cln-docker-storage-optimizer/'
storage_optimizer_script: '{{storage_optimizer_directory}}cln-docker-storage-optimizer.py'
application_id: docker-storage-optimizer

View File

@@ -1 +0,0 @@
application_id: domains

View File

@@ -1,5 +0,0 @@
- name: "Reload cln-failed-docker-backups.cymais.service"
systemd:
name: cln-failed-docker-backups.cymais.service
enabled: yes
daemon_reload: yes

View File

@@ -13,4 +13,4 @@
when: applications | is_feature_enabled('central_database',application_id) when: applications | is_feature_enabled('central_database',application_id)
- name: "Add database to backup" - name: "Add database to backup"
include_tasks: "{{ playbook_dir }}/roles/bkp-docker-to-local/tasks/seed-database-to-backup.yml" include_tasks: "{{ playbook_dir }}/roles/sys-bkp-docker-to-local/tasks/seed-database-to-backup.yml"

View File

@@ -1 +0,0 @@
application_id: daemon

View File

@@ -1 +0,0 @@
application_id: journalctl

View File

@@ -1,21 +0,0 @@
# System Security
## Description
This role configures security-related settings on the target system to ensure that essential security services are properly managed. It reinforces system security by integrating with related roles and enforcing best practices.
## Overview
The role focuses on:
- Configuring key security services such as [SSH](https://en.wikipedia.org/wiki/Secure_Shell).
- Integrating with other security roles to maintain a robust system security posture.
- Ensuring that security configurations are applied consistently across the system.
## Purpose
The primary purpose of this role is to enhance overall system security by managing and configuring essential security services and settings.
## Features
- **Security Configuration:** Applies recommended security settings for core services.
- **Integration:** Works alongside related roles such as [sshd](https://en.wikipedia.org/wiki/SSH_daemon) to provide comprehensive security management.

View File

@@ -1,24 +0,0 @@
---
galaxy_info:
author: "Kevin Veen-Birkenbach"
description: "Configures security-related settings on the target system and ensures essential security services are properly managed."
license: "CyMaIS NonCommercial License (CNCL)"
license_url: "https://s.veen.world/cncl"
company: |
Kevin Veen-Birkenbach
Consulting & Coaching Solutions
https://www.veen.world
min_ansible_version: "2.9"
platforms:
- name: Linux
versions:
- all
galaxy_tags:
- security
- ssh
- system
repository: "https://s.veen.world/cymais"
issue_tracker_url: "https://s.veen.world/cymaisissues"
documentation: "https://s.veen.world/cymais"
dependencies:
- core-sshd

View File

@@ -1 +0,0 @@
application_id: security

View File

@@ -1 +0,0 @@
application_id: sshd

View File

@@ -1 +0,0 @@
application_id: sudo

View File

@@ -10,7 +10,7 @@ Targeting user environments on Arch Linux (e.g., Manjaro), this role sets up the
## Purpose ## Purpose
The purpose of this role is to automate the configuration of cloud-integrated user directories by ensuring that common folders like `Downloads`, `Music`, and `Workspaces` are transparently redirected into a centralized cloud structure. This makes it easier to maintain bkp-friendly, cloud-ready setups for homelab and professional workflows. The purpose of this role is to automate the configuration of cloud-integrated user directories by ensuring that common folders like `Downloads`, `Music`, and `Workspaces` are transparently redirected into a centralized cloud structure. This makes it easier to maintain sys-bkp-friendly, cloud-ready setups for homelab and professional workflows.
## Features ## Features

View File

@@ -27,9 +27,9 @@ galaxy_info:
documentation: "https://s.veen.world/cymais/docker" documentation: "https://s.veen.world/cymais/docker"
dependencies: dependencies:
- bkp-docker-to-local - sys-bkp-docker-to-local
- user-administrator - user-administrator
- mon-bot-docker-container - sys-hlth-docker-container
- mon-bot-docker-volumes - sys-hlth-docker-volumes
- maint-docker-heal - sys-rpr-docker-soft
- maint-docker-restart - sys-rpr-docker-hard

View File

@@ -16,7 +16,7 @@
group: administrator group: administrator
when: run_once_docker is not defined when: run_once_docker is not defined
- name: Set docker_enabled to true, to activate cln-docker-storage-optimizer - name: Set docker_enabled to true, to activate sys-opt-ssd-hdd
set_fact: set_fact:
docker_enabled: true docker_enabled: true
when: run_once_docker is not defined when: run_once_docker is not defined

View File

@@ -9,4 +9,4 @@ galaxy_info:
- all - all
dependencies: dependencies:
- gen-aur-helper - gen-aur-helper
- alert-compose - sys-alm-compose

View File

@@ -25,9 +25,9 @@
set_fact: set_fact:
service_name: "{{ role_name }}" service_name: "{{ role_name }}"
- name: "include role for gen-timer for {{service_name}}" - name: "include role for sys-timer for {{service_name}}"
include_role: include_role:
name: gen-timer name: sys-timer
vars: vars:
on_calendar: "{{on_calendar_msi_keyboard_color}}" on_calendar: "{{on_calendar_msi_keyboard_color}}"
persistent: "true" persistent: "true"

View File

@@ -1,6 +1,6 @@
[Unit] [Unit]
Description=Keyboard Color Service Description=Keyboard Color Service
OnFailure=alert-compose.cymais@%n.service OnFailure=sys-alm-compose.cymais@%n.service
[Service] [Service]
Type=oneshot Type=oneshot

View File

@@ -1 +0,0 @@
application_id: msmtp

View File

@@ -1 +0,0 @@
application_id: timer

View File

@@ -1,4 +0,0 @@
- name: "reload maint-btrfs-auto-balancer.cymais.service"
systemd:
name: maint-btrfs-auto-balancer.cymais.service
daemon_reload: yes

View File

@@ -1,5 +0,0 @@
- name: restart maint-docker-heal.cymais.service
systemd:
name: maint-docker-heal.cymais.service
state: restarted
daemon_reload: yes

View File

@@ -1,8 +0,0 @@
[Unit]
Description=restart unhealthy docker containers
OnFailure=alert-compose.cymais@%n.service
[Service]
Type=oneshot
ExecStartPre=/bin/sh -c '/usr/bin/python {{ path_system_lock_script }} {{ system_maintenance_services | join(' ') }} --ignore {{system_maintenance_cleanup_services| join(' ') }} maint-docker-heal --timeout "{{system_maintenance_lock_timeout_heal_docker}}"'
ExecStart=/bin/sh -c '/bin/python {{heal_docker}}maint-docker-heal.py {{path_docker_compose_instances}}'

View File

@@ -1,2 +0,0 @@
heal_docker: '{{path_administrator_scripts}}maint-docker-heal/'
application_id: docker-heal

View File

@@ -1,5 +0,0 @@
- name: "reload maint-docker-restart.cymais.service"
systemd:
name: maint-docker-restart.cymais.service
enabled: yes
daemon_reload: yes

View File

@@ -1,3 +0,0 @@
restart_docker_folder: '{{path_administrator_scripts}}maint-docker-restart/'
restart_docker_script: '{{restart_docker_folder}}maint-docker-restart.py'
application_id: docker-restart

View File

@@ -1 +0,0 @@
application_id: lock

View File

@@ -1 +0,0 @@
application_id: swapfile

View File

@@ -1,5 +0,0 @@
- name: "reload mon-bot-btrfs.cymais.service"
systemd:
name: mon-bot-btrfs.cymais.service
enabled: yes
daemon_reload: yes

View File

@@ -1,26 +0,0 @@
- name: "create {{docker_health_btrfs_folder}}"
file:
path: "{{docker_health_btrfs_folder}}"
state: directory
mode: 0755
- name: create mon-bot-btrfs.sh
copy:
src: mon-bot-btrfs.sh
dest: "{{docker_health_btrfs_folder}}mon-bot-btrfs.sh"
- name: create mon-bot-btrfs.cymais.service
template:
src: mon-bot-btrfs.service.j2
dest: /etc/systemd/system/mon-bot-btrfs.cymais.service
notify: reload mon-bot-btrfs.cymais.service
- name: "set 'service_name' to '{{ role_name }}'"
set_fact:
service_name: "{{ role_name }}"
- name: "include role for gen-timer for {{service_name}}"
include_role:
name: gen-timer
vars:
on_calendar: "{{on_calendar_health_btrfs}}"

View File

@@ -1,7 +0,0 @@
[Unit]
Description=Check btrfs status
OnFailure=alert-compose.cymais@%n.service
[Service]
Type=oneshot
ExecStart=/bin/bash {{docker_health_btrfs_folder}}mon-bot-btrfs.sh

View File

@@ -1,2 +0,0 @@
docker_health_btrfs_folder: '{{path_administrator_scripts}}mon-bot-btrfs/'
application_id: bot-btrfs

View File

@@ -1,5 +0,0 @@
- name: "reload mon-bot-csp.cymais.service"
systemd:
name: mon-bot-csp.cymais.service
enabled: yes
daemon_reload: yes

View File

@@ -1,3 +0,0 @@
health_csp_crawler_folder: '{{ path_administrator_scripts }}mon-bot-csp/'
health_csp_crawler_script: '{{ health_csp_crawler_folder }}mon-bot-csp.py'
application_id: bot-csp

View File

@@ -1,5 +0,0 @@
- name: "reload mon-bot-disc-space.cymais.service"
systemd:
name: mon-bot-disc-space.cymais.service
enabled: yes
daemon_reload: yes

View File

@@ -1,26 +0,0 @@
- name: "create {{health_disc_space_folder}}"
file:
path: "{{health_disc_space_folder}}"
state: directory
mode: 0755
- name: create mon-bot-disc-space.sh
copy:
src: mon-bot-disc-space.sh
dest: "{{health_disc_space_folder}}mon-bot-disc-space.sh"
- name: create mon-bot-disc-space.cymais.service
template:
src: mon-bot-disc-space.service.j2
dest: /etc/systemd/system/mon-bot-disc-space.cymais.service
notify: reload mon-bot-disc-space.cymais.service
- name: "set 'service_name' to '{{ role_name }}'"
set_fact:
service_name: "{{ role_name }}"
- name: "include role for gen-timer for {{service_name}}"
include_role:
name: gen-timer
vars:
on_calendar: "{{on_calendar_health_disc_space}}"

View File

@@ -1,7 +0,0 @@
[Unit]
Description=checking disc space
OnFailure=alert-compose.cymais@%n.service
[Service]
Type=oneshot
ExecStart=/bin/bash {{health_disc_space_folder}}mon-bot-disc-space.sh {{size_percent_disc_space_warning}}

View File

@@ -1,2 +0,0 @@
health_disc_space_folder: '{{path_administrator_scripts}}mon-bot-disc-space/'
application_id: bot-disc-space

View File

@@ -1,5 +0,0 @@
- name: "reload mon-bot-docker-container.cymais.service"
systemd:
name: mon-bot-docker-container.cymais.service
enabled: yes
daemon_reload: yes

View File

@@ -1,7 +0,0 @@
[Unit]
Description=Checking docker health
OnFailure=alert-compose.cymais@%n.service
[Service]
Type=oneshot
ExecStart=/bin/bash {{health_docker_container_folder}}mon-bot-docker-container.sh

View File

@@ -1,2 +0,0 @@
health_docker_container_folder: '{{path_administrator_scripts}}mon-bot-docker-container/'
application_id: bot-docker-container

View File

@@ -1,5 +0,0 @@
- name: "reload mon-bot-docker-volumes.cymais.service"
systemd:
name: mon-bot-docker-volumes.cymais.service
enabled: yes
daemon_reload: yes

View File

@@ -1,7 +0,0 @@
[Unit]
Description=Checking docker health
OnFailure=alert-compose.cymais@%n.service
[Service]
Type=oneshot
ExecStart=/bin/bash {{ health_docker_volumes_folder }}mon-bot-docker-volumes.sh "{{ whitelisted_anonymous_docker_volumes | join(' ') }}"

View File

@@ -1,2 +0,0 @@
health_docker_volumes_folder: '{{path_administrator_scripts}}mon-bot-docker-volumes/'
application_id: bot-docker-volumes

View File

@@ -1,5 +0,0 @@
- name: "reload mon-bot-journalctl.cymais.service"
systemd:
name: mon-bot-journalctl.cymais.service
enabled: yes
daemon_reload: yes

View File

@@ -1,7 +0,0 @@
[Unit]
Description=checking journalctl health
OnFailure=alert-compose.cymais@%n.service
[Service]
Type=oneshot
ExecStart=/bin/bash {{health_journalctl_folder}}mon-bot-journalctl.sh

View File

@@ -1,2 +0,0 @@
health_journalctl_folder: '{{path_administrator_scripts}}mon-bot-journalctl/'
application_id: bot-journalctl

View File

@@ -1,21 +0,0 @@
# mon-bot-msmtp
## Description
This Ansible role sends periodic health check emails using **msmtp** to verify that your mail transport agent is operational. It deploys a simple script and hooks it into a systemd service and timer, with failure notifications sent via Telegram.
## Overview
Optimized for Archlinux, this role creates the required directory structure, installs and configures the mon-bot-check script, and integrates with the **alert-telegram** role. It uses the **gen-timer** role to schedule regular checks based on your customizable `OnCalendar` setting.
## Purpose
The **mon-bot-msmtp** role ensures that your mail transport system stays available by sending a test email at defined intervals. If the email fails, a Telegram alert is triggered, allowing you to detect and address issues before they impact users.
## Features
- **Directory & Script Deployment:** Sets up `mon-bot-msmtp/` and deploys a templated Bash script to send test emails via msmtp.
- **Systemd Service & Timer:** Provides `.service` and `.timer` units to run the check and schedule it automatically.
- **Failure Notifications:** Leverages **alert-telegram** to push alerts when the script exits with an error.
- **Configurable Schedule:** Define your desired check frequency using the `on_calendar_health_msmtp` variable.
- **Email Destination:** Specify the recipient via the `users.administrator.email` variable.

View File

@@ -1,5 +0,0 @@
- name: reload mon-bot-msmtp.cymais.service
systemd:
name: mon-bot-msmtp.cymais.service
enabled: yes
daemon_reload: yes

View File

@@ -1,27 +0,0 @@
- name: "create {{ health_msmtp_folder }}"
file:
path: "{{ health_msmtp_folder }}"
state: directory
mode: 0755
- name: create mon-bot-msmtp.sh
template:
src: mon-bot-msmtp.sh.j2
dest: "{{ health_msmtp_folder }}mon-bot-msmtp.sh"
mode: '0755'
- name: create mon-bot-msmtp.cymais.service
template:
src: mon-bot-msmtp.service.j2
dest: /etc/systemd/system/mon-bot-msmtp.cymais.service
notify: reload mon-bot-msmtp.cymais.service
- name: "set 'service_name' to '{{ role_name }}'"
set_fact:
service_name: "{{ role_name }}"
- name: include role for gen-timer for {{ service_name }}
include_role:
name: gen-timer
vars:
on_calendar: "{{ on_calendar_health_msmtp }}"

View File

@@ -1,7 +0,0 @@
[Unit]
Description=Check msmtp liveliness
OnFailure=alert-telegram.cymais@%n.service
[Service]
Type=oneshot
ExecStart=/bin/bash {{ health_msmtp_folder }}mon-bot-msmtp.sh

View File

@@ -1,2 +0,0 @@
health_msmtp_folder: '{{ path_administrator_scripts }}mon-bot-msmtp/'
application_id: bot-msmtp

View File

@@ -1,5 +0,0 @@
- name: "reload mon-bot-webserver.cymais.service"
systemd:
name: mon-bot-webserver.cymais.service
enabled: yes
daemon_reload: yes

View File

@@ -1,7 +0,0 @@
[Unit]
Description=Check nginx configuration status
OnFailure=alert-compose.cymais@%n.service
[Service]
Type=oneshot
ExecStart=/usr/bin/python3 {{ health_nginx_folder }}mon-bot-webserver.py

View File

@@ -1,2 +0,0 @@
health_nginx_folder: '{{ path_administrator_scripts }}mon-bot-webserver/'
application_id: bot-webserver

View File

@@ -1 +0,0 @@
application_id: dns-records

View File

@@ -13,7 +13,7 @@ This Ansible role simplifies the deployment of **Let's Encrypt certificates** in
- Automatically **reloads or restarts Nginx services** when certificates are updated. - Automatically **reloads or restarts Nginx services** when certificates are updated.
- **Configures and manages a `systemd` service** for automated certificate deployment. - **Configures and manages a `systemd` service** for automated certificate deployment.
- **Includes a `systemd` timer** for scheduled renewals. - **Includes a `systemd` timer** for scheduled renewals.
- **Handles dependent services** like `alert-compose`. - **Handles dependent services** like `sys-alm-compose`.
--- ---
@@ -29,7 +29,7 @@ This Ansible role simplifies the deployment of **Let's Encrypt certificates** in
3. **Configure `systemd` Service** 3. **Configure `systemd` Service**
- Deploys a `systemd` service file for the deployment process. - Deploys a `systemd` service file for the deployment process.
4. **Include `gen-timer` Role** 4. **Include `sys-timer` Role**
- Schedules automatic certificate deployment using a `systemd` timer. - Schedules automatic certificate deployment using a `systemd` timer.
### **2⃣ Handlers** ### **2⃣ Handlers**

View File

@@ -21,5 +21,5 @@ galaxy_info:
- name: Any - name: Any
versions: [ all ] versions: [ all ]
dependencies: dependencies:
- alert-compose - sys-alm-compose

View File

@@ -23,9 +23,9 @@
dest: "/etc/systemd/system/srv-proxy-6-6-tls-deploy.{{application_id}}.cymais.service" dest: "/etc/systemd/system/srv-proxy-6-6-tls-deploy.{{application_id}}.cymais.service"
notify: restart srv-proxy-6-6-tls-deploy.cymais.service notify: restart srv-proxy-6-6-tls-deploy.cymais.service
- name: "include role for gen-timer for {{service_name}}" - name: "include role for sys-timer for {{service_name}}"
include_role: include_role:
name: gen-timer name: sys-timer
vars: vars:
on_calendar: "{{on_calendar_deploy_certificates}}" on_calendar: "{{on_calendar_deploy_certificates}}"
service_name: "srv-proxy-6-6-tls-deploy.{{application_id}}" service_name: "srv-proxy-6-6-tls-deploy.{{application_id}}"

View File

@@ -1,6 +1,6 @@
[Unit] [Unit]
Description=Let's Encrypt deploy to {{docker_compose.directories.instance}} Description=Let's Encrypt deploy to {{docker_compose.directories.instance}}
OnFailure=alert-compose.cymais@%n.service OnFailure=sys-alm-compose.cymais@%n.service
[Service] [Service]
Type=oneshot Type=oneshot

View File

@@ -13,7 +13,7 @@ server
{{ proxy_extra_configuration }} {{ proxy_extra_configuration }}
{% endif %} {% endif %}
{% include 'roles/net-letsencrypt/templates/ssl_header.j2' %} {% include 'roles/srv-web-7-7-letsencrypt/templates/ssl_header.j2' %}
{% if applications | is_feature_enabled('oauth2', application_id) %} {% if applications | is_feature_enabled('oauth2', application_id) %}
{% set acl = applications[application_id].oauth2_proxy.acl | default({}) %} {% set acl = applications[application_id].oauth2_proxy.acl | default({}) %}

View File

@@ -6,7 +6,7 @@ map $http_upgrade $connection_upgrade {
server { server {
server_name {{ domain }}; server_name {{ domain }};
{% include 'roles/net-letsencrypt/templates/ssl_header.j2' %} {% include 'roles/srv-web-7-7-letsencrypt/templates/ssl_header.j2' %}
{% include 'roles/srv-web-7-7-inj-compose/templates/global.includes.conf.j2' %} {% include 'roles/srv-web-7-7-inj-compose/templates/global.includes.conf.j2' %}
client_max_body_size {{ client_max_body_size | default('100m') }}; client_max_body_size {{ client_max_body_size | default('100m') }};

View File

@@ -6,12 +6,12 @@ This role automates the setup of an automatic [Let's Encrypt](https://letsencryp
## 📖 Overview ## 📖 Overview
Optimized for Archlinux systems, this role installs the `certbot-nginx` package, configures a dedicated `systemd` service for certificate renewal, and integrates with a `gen-timer` to schedule periodic renewals. After a renewal, Nginx is reloaded to apply the updated certificates immediately. Optimized for Archlinux systems, this role installs the `certbot-nginx` package, configures a dedicated `systemd` service for certificate renewal, and integrates with a `sys-timer` to schedule periodic renewals. After a renewal, Nginx is reloaded to apply the updated certificates immediately.
### Key Features ### Key Features
- **Automatic Renewal:** Schedules unattended certificate renewals using gen-timers. - **Automatic Renewal:** Schedules unattended certificate renewals using sys-timers.
- **Seamless Nginx Reload:** Reloads the Nginx service automatically after successful renewals. - **Seamless Nginx Reload:** Reloads the Nginx service automatically after successful renewals.
- **Systemd Integration:** Manages renewal operations reliably with `systemd` and `alert-compose`. - **Systemd Integration:** Manages renewal operations reliably with `systemd` and `sys-alm-compose`.
- **Quiet and Safe Operation:** Uses `--quiet` and `--agree-tos` flags to ensure non-interactive renewals. - **Quiet and Safe Operation:** Uses `--quiet` and `--agree-tos` flags to ensure non-interactive renewals.
## 🎯 Purpose ## 🎯 Purpose
@@ -22,8 +22,8 @@ The Nginx Certbot Automation role ensures that Let's Encrypt SSL/TLS certificate
- **Certbot-Nginx Package Installation:** Installs required certbot plugins for Nginx. - **Certbot-Nginx Package Installation:** Installs required certbot plugins for Nginx.
- **Custom Systemd Service:** Configures a lightweight, dedicated renewal service. - **Custom Systemd Service:** Configures a lightweight, dedicated renewal service.
- **Timer Setup:** Uses gen-timer to run certbot renewals periodically. - **Timer Setup:** Uses sys-timer to run certbot renewals periodically.
- **Failure Notification:** Integrated with `alert-compose` for alerting on failures. - **Failure Notification:** Integrated with `sys-alm-compose` for alerting on failures.
## 🔗 Learn More ## 🔗 Learn More

Some files were not shown because too many files have changed in this diff Show More