mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-09-08 03:07:14 +02:00
Compare commits
8 Commits
60ab31c623
...
072ad6f186
Author | SHA1 | Date | |
---|---|---|---|
072ad6f186 | |||
23bbe0520c | |||
3c63936970 | |||
9fa39e5f25 | |||
b494b80520 | |||
691b204512 | |||
7fba13b550 | |||
f9b3fb8cfa |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -2,4 +2,6 @@ site.retry
|
||||
*__pycache__
|
||||
venv
|
||||
*.log
|
||||
*.bak
|
||||
*.bak
|
||||
*tree.json
|
||||
roles/list.json
|
||||
|
6
Makefile
6
Makefile
@@ -3,7 +3,7 @@ APPLICATIONS_OUT := ./group_vars/all/04_applications.yml
|
||||
APPLICATIONS_SCRIPT := ./cli/generate/defaults/applications.py
|
||||
USERS_OUT := ./group_vars/all/03_users.yml
|
||||
USERS_SCRIPT := ./cli/generate/defaults/users.py
|
||||
INCLUDES_SCRIPT := ./cli/generate/conditional_role_include.py
|
||||
INCLUDES_SCRIPT := ./cli/generate/role_include.py
|
||||
|
||||
INCLUDE_GROUPS := $(shell python3 main.py meta invokable_paths -s "-" --no-signal | tr '\n' ' ')
|
||||
|
||||
@@ -21,6 +21,10 @@ EXTRA_USERS := $(shell \
|
||||
|
||||
.PHONY: build install test
|
||||
|
||||
tree:
|
||||
@echo Generating Tree
|
||||
python3 main.py generate tree -L 1
|
||||
|
||||
build:
|
||||
@echo "🔧 Generating users defaults → $(USERS_OUT)…"
|
||||
python3 $(USERS_SCRIPT) \
|
||||
|
187
cli/generate/graph.py
Normal file
187
cli/generate/graph.py
Normal file
@@ -0,0 +1,187 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import argparse
|
||||
import yaml
|
||||
import json
|
||||
from collections import deque
|
||||
from typing import List, Dict, Any, Set
|
||||
|
||||
|
||||
def find_role_meta(roles_dir: str, role: str) -> str:
|
||||
path = os.path.join(roles_dir, role, 'meta', 'main.yml')
|
||||
if not os.path.isfile(path):
|
||||
raise FileNotFoundError(f"Metadata not found for role: {role}")
|
||||
return path
|
||||
|
||||
|
||||
def load_meta(path: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Load meta/main.yml → return galaxy_info + run_after + dependencies
|
||||
"""
|
||||
with open(path, 'r') as f:
|
||||
data = yaml.safe_load(f) or {}
|
||||
|
||||
galaxy_info = data.get('galaxy_info', {}) or {}
|
||||
return {
|
||||
'galaxy_info': galaxy_info,
|
||||
'run_after': galaxy_info.get('run_after', []) or [],
|
||||
'dependencies': data.get('dependencies', []) or []
|
||||
}
|
||||
|
||||
def build_single_graph(
|
||||
start_role: str,
|
||||
dep_type: str,
|
||||
direction: str,
|
||||
roles_dir: str,
|
||||
max_depth: int
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Build one graph for one dependency type and direction:
|
||||
- 'to': follow edges source→target
|
||||
- 'from': reverse edges (find roles listing this role)
|
||||
- max_depth > 0: limit hops to max_depth
|
||||
- max_depth ≤ 0: stop when you’d revisit a node already on the path
|
||||
"""
|
||||
nodes: Dict[str, Dict[str, Any]] = {}
|
||||
links: List[Dict[str, str]] = []
|
||||
|
||||
def traverse(role: str, depth: int, path: Set[str]):
|
||||
# Register node once
|
||||
if role not in nodes:
|
||||
meta = load_meta(find_role_meta(roles_dir, role))
|
||||
node = {'id': role}
|
||||
node.update(meta['galaxy_info'])
|
||||
node['doc_url'] = f"https://docs.cymais.cloud/roles/{role}/README.html"
|
||||
node['source_url'] = (
|
||||
f"https://github.com/kevinveenbirkenbach/cymais/tree/master/roles/{role}"
|
||||
)
|
||||
nodes[role] = node
|
||||
|
||||
# Depth guard
|
||||
if max_depth > 0 and depth >= max_depth:
|
||||
return
|
||||
|
||||
# Determine neighbors according to direction
|
||||
if direction == 'to':
|
||||
neighbors = load_meta(find_role_meta(roles_dir, role)).get(dep_type, [])
|
||||
for tgt in neighbors:
|
||||
links.append({'source': role, 'target': tgt, 'type': dep_type})
|
||||
# General cycle check
|
||||
if tgt in path:
|
||||
continue
|
||||
traverse(tgt, depth + 1, path | {tgt})
|
||||
|
||||
else: # direction == 'from'
|
||||
# Find all roles that list this role in their dep_type
|
||||
for other in os.listdir(roles_dir):
|
||||
try:
|
||||
meta_o = load_meta(find_role_meta(roles_dir, other))
|
||||
except FileNotFoundError:
|
||||
continue
|
||||
if role in meta_o.get(dep_type, []):
|
||||
links.append({'source': other, 'target': role, 'type': dep_type})
|
||||
if other in path:
|
||||
continue
|
||||
traverse(other, depth + 1, path | {other})
|
||||
|
||||
# Kick off recursion
|
||||
traverse(start_role, depth=0, path={start_role})
|
||||
return {'nodes': list(nodes.values()), 'links': links}
|
||||
|
||||
def build_mappings(
|
||||
start_role: str,
|
||||
mappings: List[Dict[str, str]],
|
||||
roles_dir: str,
|
||||
max_depth: int
|
||||
) -> Dict[str, Any]:
|
||||
result: Dict[str, Any] = {}
|
||||
for mapping in mappings:
|
||||
for dep_type, direction in mapping.items():
|
||||
key = f"{dep_type}_{direction}"
|
||||
result[key] = build_single_graph(
|
||||
start_role, dep_type, direction, roles_dir, max_depth)
|
||||
return result
|
||||
|
||||
|
||||
def output_graph(graph_data: Any, fmt: str, start: str, key: str):
|
||||
base = f"{start}_{key}"
|
||||
if fmt == 'console':
|
||||
print(f"--- {base} ---")
|
||||
print(yaml.safe_dump(graph_data, sort_keys=False))
|
||||
elif fmt in ('yaml', 'json'):
|
||||
path = f"{base}.{fmt}"
|
||||
with open(path, 'w') as f:
|
||||
if fmt == 'yaml':
|
||||
yaml.safe_dump(graph_data, f, sort_keys=False)
|
||||
else:
|
||||
json.dump(graph_data, f, indent=2)
|
||||
print(f"Wrote {path}")
|
||||
else:
|
||||
raise ValueError(f"Unknown format: {fmt}")
|
||||
|
||||
|
||||
def main():
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
default_roles_dir = os.path.abspath(os.path.join(script_dir, '..', '..', 'roles'))
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate graphs based on dependency mappings"
|
||||
)
|
||||
parser.add_argument(
|
||||
'-r', '--role',
|
||||
required=True,
|
||||
help="Starting role name"
|
||||
)
|
||||
parser.add_argument(
|
||||
'-m', '--mapping',
|
||||
nargs='+',
|
||||
default=[
|
||||
'run_after:to',
|
||||
'run_after:from',
|
||||
'dependencies:to',
|
||||
'dependencies:from'
|
||||
],
|
||||
help="Mapping entries as type:direction (default all 4 combos)"
|
||||
)
|
||||
parser.add_argument(
|
||||
'-D', '--depth',
|
||||
type=int,
|
||||
default=0,
|
||||
help="Max recursion depth (>0) or <=0 to stop on cycle"
|
||||
)
|
||||
parser.add_argument(
|
||||
'-o', '--output',
|
||||
choices=['yaml', 'json', 'console'],
|
||||
default='console',
|
||||
help="Output format"
|
||||
)
|
||||
parser.add_argument(
|
||||
'--roles-dir',
|
||||
default=default_roles_dir,
|
||||
help="Roles directory"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
mappings: List[Dict[str, str]] = []
|
||||
for entry in args.mapping:
|
||||
if ':' not in entry:
|
||||
parser.error(f"Invalid mapping '{entry}', must be type:direction")
|
||||
dep_type, direction = entry.split(':', 1)
|
||||
if dep_type not in ('run_after', 'dependencies'):
|
||||
parser.error(f"Unknown dependency type '{dep_type}'")
|
||||
if direction not in ('to', 'from'):
|
||||
parser.error(f"Unknown direction '{direction}'")
|
||||
mappings.append({dep_type: direction})
|
||||
|
||||
graphs = build_mappings(
|
||||
start_role=args.role,
|
||||
mappings=mappings,
|
||||
roles_dir=args.roles_dir,
|
||||
max_depth=args.depth
|
||||
)
|
||||
|
||||
for key, graph_data in graphs.items():
|
||||
output_graph(graph_data, args.output, args.role, key)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
65
cli/generate/roles_list.py
Normal file
65
cli/generate/roles_list.py
Normal file
@@ -0,0 +1,65 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Generate a JSON file listing all Ansible role directories.
|
||||
|
||||
Usage:
|
||||
python roles_list.py [--roles-dir path/to/roles] [--output path/to/roles/list.json | console]
|
||||
"""
|
||||
import os
|
||||
import json
|
||||
import argparse
|
||||
|
||||
|
||||
def find_roles(roles_dir: str):
|
||||
"""Return sorted list of role names under roles_dir."""
|
||||
return sorted([
|
||||
entry for entry in os.listdir(roles_dir)
|
||||
if os.path.isdir(os.path.join(roles_dir, entry))
|
||||
])
|
||||
|
||||
|
||||
def write_roles_list(roles, out_file):
|
||||
"""Write the list of roles to out_file as JSON."""
|
||||
os.makedirs(os.path.dirname(out_file), exist_ok=True)
|
||||
with open(out_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(roles, f, indent=2)
|
||||
print(f"Wrote roles list to {out_file}")
|
||||
|
||||
|
||||
def main():
|
||||
# Determine default roles_dir relative to this script: ../../.. -> roles
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
default_roles_dir = os.path.abspath(
|
||||
os.path.join(script_dir, '..', '..', 'roles')
|
||||
)
|
||||
default_output = os.path.join(default_roles_dir, 'list.json')
|
||||
|
||||
parser = argparse.ArgumentParser(description='Generate roles/list.json')
|
||||
parser.add_argument(
|
||||
'--roles-dir', '-r',
|
||||
default=default_roles_dir,
|
||||
help=f'Directory containing role subfolders (default: {default_roles_dir})'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--output', '-o',
|
||||
default=default_output,
|
||||
help=(
|
||||
'Output path for roles list JSON '
|
||||
'(or "console" to print to stdout, default: %(default)s)'
|
||||
)
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if not os.path.isdir(args.roles_dir):
|
||||
parser.error(f"Roles directory not found: {args.roles_dir}")
|
||||
|
||||
roles = find_roles(args.roles_dir)
|
||||
|
||||
if args.output.lower() == 'console':
|
||||
# Print JSON to stdout
|
||||
print(json.dumps(roles, indent=2))
|
||||
else:
|
||||
write_roles_list(roles, args.output)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
109
cli/generate/tree.py
Normal file
109
cli/generate/tree.py
Normal file
@@ -0,0 +1,109 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import argparse
|
||||
import json
|
||||
from typing import List, Dict, Any
|
||||
|
||||
from cli.generate.graph import build_mappings, output_graph
|
||||
|
||||
|
||||
def find_roles(roles_dir: str):
|
||||
"""Yield (role_name, role_path) for every subfolder in roles_dir."""
|
||||
for entry in os.listdir(roles_dir):
|
||||
path = os.path.join(roles_dir, entry)
|
||||
if os.path.isdir(path):
|
||||
yield entry, path
|
||||
|
||||
|
||||
def main():
|
||||
# default roles dir is ../../roles relative to this script
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
default_roles_dir = os.path.abspath(os.path.join(script_dir, '..', '..', 'roles'))
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate mappings-based graphs for each role and write tree.json"
|
||||
)
|
||||
parser.add_argument(
|
||||
'-d', '--role_dir',
|
||||
default=default_roles_dir,
|
||||
help=f"Path to roles directory (default: {default_roles_dir})"
|
||||
)
|
||||
parser.add_argument(
|
||||
'-m', '--mapping',
|
||||
nargs='+',
|
||||
default=[
|
||||
'run_after:to', 'run_after:from',
|
||||
'dependencies:to', 'dependencies:from'
|
||||
],
|
||||
help="Mapping entries as type:direction (default all 4 combos)"
|
||||
)
|
||||
parser.add_argument(
|
||||
'-D', '--depth',
|
||||
type=int,
|
||||
default=0,
|
||||
help="Max recursion depth (>0) or <=0 to stop on cycle"
|
||||
)
|
||||
parser.add_argument(
|
||||
'-o', '--output',
|
||||
choices=['yaml', 'json', 'console'],
|
||||
default='json',
|
||||
help="Output format"
|
||||
)
|
||||
parser.add_argument(
|
||||
'-p', '--preview',
|
||||
action='store_true',
|
||||
help="Preview graphs to console instead of writing files"
|
||||
)
|
||||
parser.add_argument(
|
||||
'-v', '--verbose',
|
||||
action='store_true',
|
||||
help="Enable verbose logging"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# parse mappings
|
||||
mappings: List[Dict[str, str]] = []
|
||||
for entry in args.mapping:
|
||||
if ':' not in entry:
|
||||
parser.error(f"Invalid mapping '{entry}', must be type:direction")
|
||||
dep_type, direction = entry.split(':', 1)
|
||||
mappings.append({dep_type: direction})
|
||||
|
||||
if args.verbose:
|
||||
print(f"Roles directory: {args.role_dir}")
|
||||
print(f"Mappings: {mappings}")
|
||||
print(f"Max depth: {args.depth}")
|
||||
print(f"Output format: {args.output}")
|
||||
print(f"Preview mode: {args.preview}")
|
||||
|
||||
for role_name, role_path in find_roles(args.role_dir):
|
||||
if args.verbose:
|
||||
print(f"Processing role: {role_name}")
|
||||
|
||||
# Build graphs for each mapping
|
||||
graphs = build_mappings(
|
||||
start_role=role_name,
|
||||
mappings=mappings,
|
||||
roles_dir=args.role_dir,
|
||||
max_depth=args.depth
|
||||
)
|
||||
|
||||
# Prepare output file or previews
|
||||
if args.preview:
|
||||
for key, data in graphs.items():
|
||||
if args.verbose:
|
||||
print(f"Previewing graph '{key}' for role '{role_name}'")
|
||||
output_graph(data, 'console', role_name, key)
|
||||
else:
|
||||
tree_file = os.path.join(role_path, 'meta', 'tree.json')
|
||||
os.makedirs(os.path.dirname(tree_file), exist_ok=True)
|
||||
# write combined JSON
|
||||
with open(tree_file, 'w') as f:
|
||||
json.dump(graphs, f, indent=2)
|
||||
if args.verbose:
|
||||
print(f"Wrote {tree_file}")
|
||||
else:
|
||||
print(f"Wrote {tree_file}")
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@@ -9,12 +9,12 @@ defaults_service_provider:
|
||||
city: "Cybertown"
|
||||
postal_code: "00001"
|
||||
country: "Nexusland"
|
||||
logo: "{{ applications['assets-server'].url ~ '/img/logo.png' }}"
|
||||
logo: "{{ applications['asset'].url ~ '/img/logo.png' }}"
|
||||
platform:
|
||||
titel: "CyMaIS"
|
||||
subtitel: "One login. Infinite applications."
|
||||
logo: "{{ applications['assets-server'].url ~ '/img/logo.png' }}"
|
||||
favicon: "{{ applications['assets-server'].url ~ '/img/favicon.ico' }}"
|
||||
logo: "{{ applications['asset'].url ~ '/img/logo.png' }}"
|
||||
favicon: "{{ applications['asset'].url ~ '/img/favicon.ico' }}"
|
||||
contact:
|
||||
bluesky: >-
|
||||
{{ ('@' ~ users.contact.username ~ '.' ~ domains.bluesky.api)
|
||||
@@ -30,4 +30,4 @@ defaults_service_provider:
|
||||
legal:
|
||||
editorial_responsible: "Johannes Gutenberg"
|
||||
source_code: "https://github.com/kevinveenbirkenbach/cymais"
|
||||
imprint: "{{web_protocol}}://{{ domains | get_domain('html-server') }}/imprint.html"
|
||||
imprint: "{{web_protocol}}://{{ domains | get_domain('html') }}/imprint.html"
|
4
main.py
4
main.py
@@ -185,11 +185,11 @@ if __name__ == "__main__":
|
||||
Fore.CYAN
|
||||
))
|
||||
print(color_text(
|
||||
" e.g. `cymais generate defaults applications` →",
|
||||
" e.g. `cymais generate defaults users` →",
|
||||
Fore.CYAN
|
||||
))
|
||||
print(color_text(
|
||||
" corresponds to `cli/generate/defaults/applications.py`.",
|
||||
" corresponds to `cli/generate/defaults/users.py`.",
|
||||
Fore.CYAN
|
||||
))
|
||||
print()
|
||||
|
1
roles/alert-compose/vars/main.yml
Normal file
1
roles/alert-compose/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
application_id: compose
|
@@ -1 +1,2 @@
|
||||
systemd_notifier_email_folder: "{{path_administrator_scripts}}alert-email/"
|
||||
systemd_notifier_email_folder: '{{path_administrator_scripts}}alert-email/'
|
||||
application_id: email
|
||||
|
@@ -1,2 +1,3 @@
|
||||
systemd_telegram_folder: "/opt/ansible-roles/alert-telegram/"
|
||||
systemd_telegram_script: "{{systemd_telegram_folder}}alert-telegram.sh"
|
||||
systemd_telegram_folder: /opt/ansible-roles/alert-telegram/
|
||||
systemd_telegram_script: '{{systemd_telegram_folder}}alert-telegram.sh'
|
||||
application_id: telegram
|
||||
|
@@ -1,2 +1,2 @@
|
||||
docker_backup_remote_to_local_folder: '{{path_administrator_scripts}}bkp-remote-to-local/'
|
||||
application_id: remote-to-local
|
||||
application_id: bkp-remote-to-local
|
||||
|
@@ -3,7 +3,7 @@ roles:
|
||||
title: "Core & System"
|
||||
description: "Fundamental system configuration"
|
||||
icon: "fas fa-cogs"
|
||||
invokable: false
|
||||
invokable: true
|
||||
drv:
|
||||
title: "Drivers"
|
||||
description: "Roles for installing and configuring hardware drivers—covering printers, graphics, input devices, and other peripheral support."
|
||||
@@ -13,7 +13,7 @@ roles:
|
||||
title: "Generic"
|
||||
description: "Helper roles & installers (git, locales, timer, etc.)"
|
||||
icon: "fas fa-wrench"
|
||||
invokable: false
|
||||
invokable: true
|
||||
desk:
|
||||
title: "Desktop"
|
||||
description: "Desktop environment roles & apps (GNOME, browser, LibreOffice, etc.)"
|
||||
@@ -77,12 +77,12 @@ roles:
|
||||
title: "Monitoring"
|
||||
description: "Roles for system monitoring and health checks—encompassing bot-style automated checks and core low-level monitors for logs, containers, disk usage, and more."
|
||||
icon: "fas fa-chart-area"
|
||||
invokable: false
|
||||
invokable: true
|
||||
alert:
|
||||
title: "Alerting"
|
||||
description: "Notification handlers for system events"
|
||||
icon: "fas fa-bell"
|
||||
invokable: false
|
||||
invokable: true
|
||||
maint:
|
||||
title: "Maintenance & Healing"
|
||||
description: "Periodic maintenance & auto-recovery"
|
||||
|
@@ -1,5 +1,5 @@
|
||||
# The following env file will just be used from the dedicated mariadb container
|
||||
# and not the {{applications.mariadb.hostname }}-database
|
||||
# and not the {{applications['rdbms-mariadb'].hostname }}-database
|
||||
- name: "Create {{database_env}}"
|
||||
template:
|
||||
src: "env/{{database_type}}.env.j2"
|
||||
|
@@ -1,7 +1,7 @@
|
||||
# This template needs to be included in docker-compose.yml, which depend on a postgres database
|
||||
{% if not applications | is_feature_enabled('central_database',application_id) %}
|
||||
{{ database_host }}:
|
||||
image: postgres:{{applications.postgres.version}}-alpine
|
||||
image: postgres:{{applications['rdbms-postgres'].version}}-alpine
|
||||
container_name: {{application_id}}-database
|
||||
env_file:
|
||||
- {{database_env}}
|
||||
|
@@ -3,7 +3,7 @@ database_host: "{{ 'central-' + database_type if applications | is_feature_
|
||||
database_name: "{{ applications[database_application_id].database.name | default( database_application_id ) }}" # The overwritte configuration is needed by bigbluebutton
|
||||
database_username: "{{ applications[database_application_id].database.username | default( database_application_id )}}" # The overwritte configuration is needed by bigbluebutton
|
||||
database_password: "{{ applications[database_application_id].credentials.database_password }}"
|
||||
database_port: "{{ 3306 if database_type == 'mariadb' else applications.postgres.port }}"
|
||||
database_port: "{{ 3306 if database_type == 'mariadb' else applications['rdbms-postgres'].port }}"
|
||||
database_env: "{{docker_compose.directories.env}}{{database_type}}.env"
|
||||
database_url_jdbc: "jdbc:{{ database_type if database_type == 'mariadb' else 'postgresql' }}://{{ database_host }}:{{ database_port }}/{{ database_name }}"
|
||||
database_url_full: "{{database_type}}://{{database_username}}:{{database_password}}@{{database_host}}:{{database_port}}/{{ database_name }}"
|
1
roles/core-daemon/vars/main.yml
Normal file
1
roles/core-daemon/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
application_id: daemon
|
1
roles/core-journalctl/vars/main.yml
Normal file
1
roles/core-journalctl/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
application_id: journalctl
|
1
roles/core-security/vars/main.yml
Normal file
1
roles/core-security/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
application_id: security
|
1
roles/core-sshd/vars/main.yml
Normal file
1
roles/core-sshd/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
application_id: sshd
|
1
roles/core-sudo/vars/main.yml
Normal file
1
roles/core-sudo/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
application_id: sudo
|
@@ -1 +1 @@
|
||||
application_id: git
|
||||
application_id: desk-git
|
||||
|
1
roles/gen-aur-helper/vars/main.yml
Normal file
1
roles/gen-aur-helper/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
application_id: aur-helper
|
1
roles/gen-certbot/vars/main.yml
Normal file
1
roles/gen-certbot/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
application_id: certbot
|
1
roles/gen-fakeroot/vars/main.yml
Normal file
1
roles/gen-fakeroot/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
application_id: fakeroot
|
1
roles/gen-gcc/vars/main.yml
Normal file
1
roles/gen-gcc/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
application_id: gcc
|
1
roles/gen-git/vars/main.yml
Normal file
1
roles/gen-git/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
application_id: git
|
1
roles/gen-hostname/vars/main.yml
Normal file
1
roles/gen-hostname/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
application_id: hostname
|
1
roles/gen-hunspell/vars/main.yml
Normal file
1
roles/gen-hunspell/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
application_id: hunspell
|
1
roles/gen-java/vars/main.yml
Normal file
1
roles/gen-java/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
application_id: java
|
1
roles/gen-locales/vars/main.yml
Normal file
1
roles/gen-locales/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
application_id: locales
|
1
roles/gen-make/vars/main.yml
Normal file
1
roles/gen-make/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
application_id: make
|
1
roles/gen-msmtp/vars/main.yml
Normal file
1
roles/gen-msmtp/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
application_id: msmtp
|
1
roles/gen-nodejs/vars/main.yml
Normal file
1
roles/gen-nodejs/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
application_id: nodejs
|
1
roles/gen-npm/vars/main.yml
Normal file
1
roles/gen-npm/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
application_id: npm
|
1
roles/gen-postfix/vars/main.yml
Normal file
1
roles/gen-postfix/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
application_id: postfix
|
1
roles/gen-python-pip/vars/main.yml
Normal file
1
roles/gen-python-pip/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
application_id: python-pip
|
1
roles/gen-python-yaml/vars/main.yml
Normal file
1
roles/gen-python-yaml/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
application_id: python-yaml
|
1
roles/gen-shell/vars/main.yml
Normal file
1
roles/gen-shell/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
application_id: shell
|
1
roles/gen-timer/vars/main.yml
Normal file
1
roles/gen-timer/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
application_id: timer
|
@@ -1 +1,2 @@
|
||||
docker_health_btrfs_folder: "{{path_administrator_scripts}}mon-bot-btrfs/"
|
||||
docker_health_btrfs_folder: '{{path_administrator_scripts}}mon-bot-btrfs/'
|
||||
application_id: bot-btrfs
|
||||
|
@@ -1,2 +1,3 @@
|
||||
health_csp_crawler_folder: "{{ path_administrator_scripts }}mon-bot-csp/"
|
||||
health_csp_crawler_script: "{{ health_csp_crawler_folder }}mon-bot-csp.py"
|
||||
health_csp_crawler_folder: '{{ path_administrator_scripts }}mon-bot-csp/'
|
||||
health_csp_crawler_script: '{{ health_csp_crawler_folder }}mon-bot-csp.py'
|
||||
application_id: bot-csp
|
||||
|
@@ -1 +1,2 @@
|
||||
health_disc_space_folder: "{{path_administrator_scripts}}mon-bot-disc-space/"
|
||||
health_disc_space_folder: '{{path_administrator_scripts}}mon-bot-disc-space/'
|
||||
application_id: bot-disc-space
|
||||
|
@@ -1 +1,2 @@
|
||||
health_docker_container_folder: "{{path_administrator_scripts}}mon-bot-docker-container/"
|
||||
health_docker_container_folder: '{{path_administrator_scripts}}mon-bot-docker-container/'
|
||||
application_id: bot-docker-container
|
||||
|
@@ -1 +1,2 @@
|
||||
health_docker_volumes_folder: "{{path_administrator_scripts}}mon-bot-docker-volumes/"
|
||||
health_docker_volumes_folder: '{{path_administrator_scripts}}mon-bot-docker-volumes/'
|
||||
application_id: bot-docker-volumes
|
||||
|
@@ -1 +1,2 @@
|
||||
health_journalctl_folder: "{{path_administrator_scripts}}mon-bot-journalctl/"
|
||||
health_journalctl_folder: '{{path_administrator_scripts}}mon-bot-journalctl/'
|
||||
application_id: bot-journalctl
|
||||
|
@@ -1 +1,2 @@
|
||||
health_msmtp_folder: "{{ path_administrator_scripts }}mon-bot-msmtp/"
|
||||
health_msmtp_folder: '{{ path_administrator_scripts }}mon-bot-msmtp/'
|
||||
application_id: bot-msmtp
|
||||
|
@@ -1 +1,2 @@
|
||||
health_nginx_folder: "{{ path_administrator_scripts }}mon-bot-webserver/"
|
||||
health_nginx_folder: '{{ path_administrator_scripts }}mon-bot-webserver/'
|
||||
application_id: bot-webserver
|
||||
|
@@ -2,5 +2,5 @@
|
||||
|
||||
## Execute SQL commands
|
||||
```bash
|
||||
docker exec -it {{applications.mariadb.hostname }} mariadb -u root -p
|
||||
docker exec -it {{applications['rdbms-mariadb'].hostname }} mariadb -u root -p
|
||||
```
|
@@ -8,11 +8,11 @@
|
||||
|
||||
- name: install MariaDB
|
||||
docker_container:
|
||||
name: "{{applications.mariadb.hostname }}"
|
||||
image: "mariadb:{{applications.mariadb.version}}" #could lead to problems with nextcloud
|
||||
name: "{{applications['rdbms-mariadb'].hostname }}"
|
||||
image: "mariadb:{{applications['rdbms-mariadb'].version}}" #could lead to problems with nextcloud
|
||||
detach: yes
|
||||
env:
|
||||
MARIADB_ROOT_PASSWORD: "{{applications.mariadb.credentials.root_password}}"
|
||||
MARIADB_ROOT_PASSWORD: "{{applications['rdbms-mariadb'].credentials.root_password}}"
|
||||
MARIADB_AUTO_UPGRADE: "1"
|
||||
networks:
|
||||
- name: central_mariadb
|
||||
@@ -23,7 +23,7 @@
|
||||
command: "--transaction-isolation=READ-COMMITTED --binlog-format=ROW" #for nextcloud
|
||||
restart_policy: "{{docker_restart_policy}}"
|
||||
healthcheck:
|
||||
test: "/usr/bin/mariadb --user=root --password={{applications.mariadb.credentials.root_password}} --execute \"SHOW DATABASES;\""
|
||||
test: "/usr/bin/mariadb --user=root --password={{applications['rdbms-mariadb'].credentials.root_password}} --execute \"SHOW DATABASES;\""
|
||||
interval: 3s
|
||||
timeout: 1s
|
||||
retries: 5
|
||||
@@ -38,7 +38,7 @@
|
||||
|
||||
- name: Wait until the MariaDB container is healthy
|
||||
community.docker.docker_container_info:
|
||||
name: "{{ applications.mariadb.hostname }}"
|
||||
name: "{{ applications['rdbms-mariadb'].hostname }}"
|
||||
register: db_info
|
||||
until:
|
||||
- db_info.containers is defined
|
||||
@@ -56,7 +56,7 @@
|
||||
name: "{{ database_name }}"
|
||||
state: present
|
||||
login_user: root
|
||||
login_password: "{{ applications.mariadb.credentials.root_password }}"
|
||||
login_password: "{{ applications['rdbms-mariadb'].credentials.root_password }}"
|
||||
login_host: 127.0.0.1
|
||||
login_port: "{{ database_port }}"
|
||||
encoding: "{{ database_encoding }}"
|
||||
@@ -70,7 +70,7 @@
|
||||
priv: '{{database_name}}.*:ALL'
|
||||
state: present
|
||||
login_user: root
|
||||
login_password: "{{applications.mariadb.credentials.root_password}}"
|
||||
login_password: "{{applications['rdbms-mariadb'].credentials.root_password}}"
|
||||
login_host: 127.0.0.1
|
||||
login_port: "{{database_port}}"
|
||||
|
||||
@@ -78,7 +78,7 @@
|
||||
# @todo Remove if this works fine in the future.
|
||||
#- name: Grant database privileges
|
||||
# ansible.builtin.shell:
|
||||
# cmd: "docker exec {{applications.mariadb.hostname }} mariadb -u root -p{{ applications.mariadb.credentials.root_password }} -e \"GRANT ALL PRIVILEGES ON `{{database_name}}`.* TO '{{database_username}}'@'%';\""
|
||||
# cmd: "docker exec {{applications['rdbms-mariadb'].hostname }} mariadb -u root -p{{ applications['rdbms-mariadb'].credentials.root_password }} -e \"GRANT ALL PRIVILEGES ON `{{database_name}}`.* TO '{{database_username}}'@'%';\""
|
||||
# args:
|
||||
# executable: /bin/bash
|
||||
|
||||
|
@@ -3,5 +3,5 @@
|
||||
## Root Access
|
||||
To access the database via the root account execute the following on the server:
|
||||
```bash
|
||||
docker exec -it "{{ applications.postgres.hostname }}" psql -U postgres
|
||||
docker exec -it "{{ applications['rdbms-postgres'].hostname }}" psql -U postgres
|
||||
```
|
@@ -133,7 +133,7 @@ def update_discourse(directory):
|
||||
update_procedure("docker stop {{applications.discourse.container}}")
|
||||
update_procedure("docker rm {{applications.discourse.container}}")
|
||||
try:
|
||||
update_procedure("docker network connect {{applications.discourse.network}} {{ applications.postgres.hostname }}")
|
||||
update_procedure("docker network connect {{applications.discourse.network}} {{ applications['rdbms-postgres'].hostname }}")
|
||||
except subprocess.CalledProcessError as e:
|
||||
error_message = e.output.decode()
|
||||
if "already exists" in error_message or "is already connected" in error_message:
|
||||
|
@@ -9,11 +9,9 @@ galaxy_info:
|
||||
Consulting & Coaching Solutions
|
||||
https://www.veen.world
|
||||
galaxy_tags:
|
||||
- docker
|
||||
- akaunting
|
||||
- accounting
|
||||
- automation
|
||||
- docker-compose
|
||||
- finance
|
||||
repository: https://s.veen.world/cymais
|
||||
issue_tracker_url: https://s.veen.world/cymaisissues
|
||||
documentation: https://s.veen.world/cymais
|
||||
|
@@ -7,7 +7,8 @@ api_suffix: "/bigbluebutton/"
|
||||
features:
|
||||
matomo: true
|
||||
css: true
|
||||
portfolio_iframe: true
|
||||
portfolio_iframe: false # Videos can't open in frame due to iframe restrictions
|
||||
# @todo fix this
|
||||
ldap: false
|
||||
oidc: true
|
||||
central_database: false
|
||||
|
@@ -20,5 +20,3 @@ galaxy_info:
|
||||
- name: Any
|
||||
versions:
|
||||
- all
|
||||
dependencies:
|
||||
- docker-compose
|
||||
|
@@ -21,6 +21,3 @@ galaxy_info:
|
||||
- name: Any
|
||||
versions:
|
||||
- all
|
||||
dependencies:
|
||||
- docker-compose
|
||||
- srv-proxy-6-6-domain
|
||||
|
@@ -43,13 +43,13 @@
|
||||
meta: flush_handlers
|
||||
when: run_once_docker_discourse is not defined
|
||||
|
||||
- name: "Connect {{ applications[application_id].container }} to network {{ applications.postgres.network }}"
|
||||
- name: "Connect {{ applications[application_id].container }} to network {{ applications['rdbms-postgres'].network }}"
|
||||
command: >
|
||||
docker network connect {{ applications.postgres.network }} {{ applications[application_id].container }}
|
||||
docker network connect {{ applications['rdbms-postgres'].network }} {{ applications[application_id].container }}
|
||||
register: network_connect
|
||||
failed_when: >
|
||||
network_connect.rc != 0 and
|
||||
'Error response from daemon: endpoint with name {{ applications[application_id].container }} already exists in network {{ applications.postgres.network }}'
|
||||
'Error response from daemon: endpoint with name {{ applications[application_id].container }} already exists in network {{ applications['rdbms-postgres'].network }}'
|
||||
not in network_connect.stderr
|
||||
changed_when: network_connect.rc == 0
|
||||
when:
|
||||
|
@@ -14,7 +14,7 @@ The following environment variables need to be defined for successful operation:
|
||||
|
||||
To completely reset Friendica, including its database and volumes, run:
|
||||
```bash
|
||||
docker exec -i {{applications.mariadb.hostname }} mariadb -u root -p"${DB_ROOT_PASSWORD}" -e "DROP DATABASE IF EXISTS friendica; CREATE DATABASE friendica;"
|
||||
docker exec -i {{applications['rdbms-mariadb'].hostname }} mariadb -u root -p"${DB_ROOT_PASSWORD}" -e "DROP DATABASE IF EXISTS friendica; CREATE DATABASE friendica;"
|
||||
docker compose down
|
||||
rm -rv /mnt/hdd/data/docker/volumes/friendica_data
|
||||
docker volume rm friendica_data
|
||||
@@ -25,7 +25,7 @@ docker volume rm friendica_data
|
||||
## Manual Method:
|
||||
1. Connect to the MariaDB instance:
|
||||
```bash
|
||||
docker exec -it {{applications.mariadb.hostname }} mariadb -u root -p
|
||||
docker exec -it {{applications['rdbms-mariadb'].hostname }} mariadb -u root -p
|
||||
```
|
||||
2. Run the following commands:
|
||||
```sql
|
||||
@@ -37,7 +37,7 @@ docker volume rm friendica_data
|
||||
## Automatic Method:
|
||||
```bash
|
||||
DB_ROOT_PASSWORD="your_root_password"
|
||||
docker exec -i {{applications.mariadb.hostname }} mariadb -u root -p"${DB_ROOT_PASSWORD}" -e "DROP DATABASE IF EXISTS friendica; CREATE DATABASE friendica;"
|
||||
docker exec -i {{applications['rdbms-mariadb'].hostname }} mariadb -u root -p"${DB_ROOT_PASSWORD}" -e "DROP DATABASE IF EXISTS friendica; CREATE DATABASE friendica;"
|
||||
```
|
||||
|
||||
## Enter the Application Container 🔍
|
||||
|
@@ -21,6 +21,3 @@ galaxy_info:
|
||||
- name: Any
|
||||
versions:
|
||||
- all
|
||||
dependencies:
|
||||
- docker-compose
|
||||
- srv-proxy-6-6-domain
|
||||
|
@@ -9,6 +9,6 @@ pgadmin_servers:
|
||||
port: "{{ database_port }}"
|
||||
username: "postgres"
|
||||
maintenance_db: "postgres"
|
||||
password: "{{ applications.postgres.credentials.postgres_password }}"
|
||||
password: "{{ applications['rdbms-postgres'].credentials.postgres_password }}"
|
||||
|
||||
# Here you can add more databases
|
@@ -6,7 +6,8 @@ oauth2_proxy:
|
||||
features:
|
||||
matomo: true
|
||||
css: false
|
||||
portfolio_iframe: true
|
||||
portfolio_iframe: false # Opens itself in a new window, when it's loaded in an iframe.
|
||||
# it's anyhow not so enduser relevant, so it can be kept like this
|
||||
central_database: true
|
||||
oauth2: true
|
||||
csp:
|
||||
|
@@ -1,7 +1,7 @@
|
||||
# Configuration @see https://hub.docker.com/_/phpmyadmin
|
||||
|
||||
PMA_HOST={{applications.mariadb.hostname}}
|
||||
PMA_HOST={{applications['rdbms-mariadb'].hostname}}
|
||||
{% if applications[application_id].autologin | bool %}
|
||||
PMA_USER= root
|
||||
PMA_PASSWORD= "{{applications.mariadb.credentials.root_password}}"
|
||||
PMA_PASSWORD= "{{applications['rdbms-mariadb'].credentials.root_password}}"
|
||||
{% endif %}
|
@@ -20,5 +20,3 @@ galaxy_info:
|
||||
- name: Any
|
||||
versions:
|
||||
- all
|
||||
dependencies:
|
||||
- docker-compose
|
||||
|
@@ -23,5 +23,4 @@ galaxy_info:
|
||||
class: "fa-solid fa-dice"
|
||||
run_after:
|
||||
- web-app-matomo
|
||||
dependencies:
|
||||
- docker-compose
|
||||
|
||||
|
@@ -20,5 +20,3 @@ galaxy_info:
|
||||
- name: Any
|
||||
versions:
|
||||
- all
|
||||
dependencies:
|
||||
- docker-compose
|
||||
|
@@ -17,7 +17,7 @@ galaxy_info:
|
||||
galaxy_tags:
|
||||
- nginx
|
||||
- https
|
||||
- file-server
|
||||
- file
|
||||
- static-files
|
||||
- ssl
|
||||
- letsencrypt
|
||||
|
@@ -48,7 +48,7 @@
|
||||
# The following mapping is necessary to define the exceptions for domains which are created, but which aren't used
|
||||
redirect_domain_mappings: "{{
|
||||
[] |
|
||||
add_redirect_if_group('assets-server', domains | get_domain('assets-server'), domains | get_domain('file-server'), group_names) |
|
||||
add_redirect_if_group('asset', domains | get_domain('asset'), domains | get_domain('file'), group_names) |
|
||||
merge_mapping(redirect_domain_mappings| default([]), 'source')
|
||||
}}"
|
||||
|
||||
|
@@ -2,7 +2,7 @@ import os
|
||||
import unittest
|
||||
|
||||
# import the functions from your CLI script
|
||||
from cli.generate.conditional_role_include import build_dependency_graph, find_cycle
|
||||
from cli.generate.role_include import build_dependency_graph, find_cycle
|
||||
|
||||
class TestCircularDependencies(unittest.TestCase):
|
||||
"""
|
||||
|
70
tests/integration/test_valid_applications.py
Normal file
70
tests/integration/test_valid_applications.py
Normal file
@@ -0,0 +1,70 @@
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import unittest
|
||||
from cli.meta.applications import find_application_ids
|
||||
|
||||
# ensure project root is on PYTHONPATH so we can import the CLI code
|
||||
# project root is two levels up from this file (tests/integration -> project root)
|
||||
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
|
||||
sys.path.insert(0, ROOT)
|
||||
|
||||
class TestValidApplicationUsage(unittest.TestCase):
|
||||
"""
|
||||
Integration test to ensure that only valid application IDs
|
||||
are used in all .yml, .yaml, .yml.j2, .yaml.j2, and .py files.
|
||||
Methods like applications.items() and calls to get_domain() can
|
||||
be whitelisted or validated against valid IDs.
|
||||
"""
|
||||
# regex patterns to capture applications['name'], applications.get('name'), applications.name, and get_domain('name')
|
||||
APPLICATION_SUBSCRIPT_RE = re.compile(r"applications\[['\"](?P<name>[^'\"]+)['\"]\]")
|
||||
APPLICATION_GET_RE = re.compile(r"applications\.get\(\s*['\"](?P<name>[^'\"]+)['\"]")
|
||||
APPLICATION_ATTR_RE = re.compile(r"applications\.(?P<name>[A-Za-z_]\w*)")
|
||||
APPLICATION_DOMAIN_RE = re.compile(r"get_domain\(\s*['\"](?P<name>[^'\"]+)['\"]\s*\)")
|
||||
|
||||
# methods and exceptions that should not be validated as application IDs
|
||||
WHITELIST = {'items', 'yml', 'get'}
|
||||
|
||||
def test_application_references_use_valid_ids(self):
|
||||
valid_apps = find_application_ids()
|
||||
|
||||
tests_dir = os.path.join(ROOT, 'tests')
|
||||
for dirpath, _, filenames in os.walk(ROOT):
|
||||
# skip the tests/ directory and all its subdirectories
|
||||
if dirpath == tests_dir or dirpath.startswith(tests_dir + os.sep):
|
||||
continue
|
||||
|
||||
for filename in filenames:
|
||||
if not filename.lower().endswith(('.yml', '.yaml', '.yml.j2', '.yaml.j2', '.py')):
|
||||
continue
|
||||
filepath = os.path.join(dirpath, filename)
|
||||
try:
|
||||
with open(filepath, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
except Exception:
|
||||
# skip files that cannot be opened
|
||||
continue
|
||||
|
||||
for pattern in (
|
||||
self.APPLICATION_SUBSCRIPT_RE,
|
||||
self.APPLICATION_GET_RE,
|
||||
self.APPLICATION_ATTR_RE,
|
||||
self.APPLICATION_DOMAIN_RE,
|
||||
):
|
||||
for match in pattern.finditer(content):
|
||||
name = match.group('name')
|
||||
# skip whitelisted methods/exceptions
|
||||
if name in self.WHITELIST:
|
||||
continue
|
||||
# each found reference must be in valid_apps
|
||||
self.assertIn(
|
||||
name,
|
||||
valid_apps,
|
||||
msg=(
|
||||
f"{filepath}: reference to application '{name}' "
|
||||
f"is invalid. Known IDs: {sorted(valid_apps)}"
|
||||
)
|
||||
)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
@@ -9,7 +9,7 @@ import yaml
|
||||
# Adjust path to include cli/ folder
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../../..", "cli")))
|
||||
|
||||
from cli.generate.conditional_role_include import build_dependency_graph, topological_sort, gen_condi_role_incl
|
||||
from cli.generate.role_include import build_dependency_graph, topological_sort, gen_condi_role_incl
|
||||
|
||||
class TestGeneratePlaybook(unittest.TestCase):
|
||||
def setUp(self):
|
||||
|
@@ -15,9 +15,9 @@ class TestLoadConfigurationFilter(unittest.TestCase):
|
||||
def setUp(self):
|
||||
_cfg_cache.clear()
|
||||
self.f = FilterModule().filters()['load_configuration']
|
||||
self.app = 'html-server'
|
||||
self.app = 'html'
|
||||
self.nested_cfg = {
|
||||
'html-server': {
|
||||
'html': {
|
||||
'features': {'matomo': True},
|
||||
'domains': {'canonical': ['html.example.com']}
|
||||
}
|
||||
@@ -76,8 +76,8 @@ class TestLoadConfigurationFilter(unittest.TestCase):
|
||||
@patch('load_configuration.os.listdir', return_value=['r1'])
|
||||
@patch('load_configuration.os.path.isdir', return_value=True)
|
||||
@patch('load_configuration.os.path.exists', return_value=True)
|
||||
@patch('load_configuration.open', mock_open(read_data="html-server: {}"))
|
||||
@patch('load_configuration.yaml.safe_load', return_value={'html-server': {}})
|
||||
@patch('load_configuration.open', mock_open(read_data="html: {}"))
|
||||
@patch('load_configuration.yaml.safe_load', return_value={'html': {}})
|
||||
def test_key_not_found_after_load(self, *_):
|
||||
with self.assertRaises(AnsibleFilterError):
|
||||
self.f(self.app, 'does.not.exist')
|
||||
@@ -104,14 +104,14 @@ class TestLoadConfigurationFilter(unittest.TestCase):
|
||||
# Testing with an indexed key like domains.canonical[0]
|
||||
mock_exists.side_effect = lambda p: p.endswith('config/main.yml')
|
||||
mock_yaml.return_value = {
|
||||
'file-server': {
|
||||
'file': {
|
||||
'domains': {
|
||||
'canonical': ['files.example.com', 'extra.example.com']
|
||||
}
|
||||
}
|
||||
}
|
||||
# should get the first element of the canonical domains list
|
||||
self.assertEqual(self.f('file-server', 'domains.canonical[0]'),
|
||||
self.assertEqual(self.f('file', 'domains.canonical[0]'),
|
||||
'files.example.com')
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
Reference in New Issue
Block a user