mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-12-07 09:56:41 +00:00
Compare commits
81 Commits
eca567fefd
...
features/w
| Author | SHA1 | Date | |
|---|---|---|---|
| 05ff3d3d61 | |||
| 873607246c | |||
| ac5fdeafd2 | |||
| cc817f3967 | |||
| 4f48cf633b | |||
| bbebf7964d | |||
| c5afae42cf | |||
| d7e0123af2 | |||
| 3fe83f26d5 | |||
| bee833feb4 | |||
| b80cfbdc9d | |||
| 6d6b0fdea6 | |||
| d97d34a822 | |||
| 4b2c7eef88 | |||
| e6cb0cbed9 | |||
| 254a685b05 | |||
| 9cbb74b7c8 | |||
| 62d20fbb71 | |||
| da8dc3b53a | |||
| 287cccf6cb | |||
| 61ee993ff1 | |||
| 2e490ed238 | |||
| c11ea9b699 | |||
| 2f5ead2212 | |||
| 13e74a86a6 | |||
| 962c68fdab | |||
| f8899e9493 | |||
| 9c65bd4839 | |||
| aca2da885d | |||
| d6422a7881 | |||
| 8cf3dbd5bf | |||
| dfa5e26582 | |||
| a312f353fb | |||
| e333c9d85b | |||
| 854e6902d3 | |||
| cc1ed2b125 | |||
| 28caa495e7 | |||
| 19de04c475 | |||
| 002f8de3ec | |||
| 68a8128d38 | |||
| 36f9573fdf | |||
| 493d5bbbda | |||
| 2fcbae8fc7 | |||
| 02f38d60db | |||
| d66ad37c5d | |||
| 0c16f9c43c | |||
| 7330aeb8ec | |||
| d3aad632c0 | |||
| d1bad3d7a6 | |||
| 43056a8b92 | |||
| 0bf286f62a | |||
| df8390f386 | |||
| 48557b06e3 | |||
| 1cff5778d3 | |||
| 60e2c972d6 | |||
| 637de6a190 | |||
| f5efbce205 | |||
| d6f3618d70 | |||
| 773655efb5 | |||
| 7bc9f7abd9 | |||
| ec7b8662dd | |||
| d1ccfd9cdd | |||
| d61c81634c | |||
| 265f815b48 | |||
| f8e5110730 | |||
| 37b213f96a | |||
| 5ef525eac9 | |||
| 295ae7e477 | |||
| c67ccc1df6 | |||
| cb483f60d1 | |||
| 2be73502ca | |||
| 57d5269b07 | |||
| 1eefdea050 | |||
| 561160504e | |||
| 9a4bf91276 | |||
| 468b6e734c | |||
| 83cb94b6ff | |||
| 6857295969 | |||
| 8ab398f679 | |||
| 31133ddd90 | |||
| 783b1e152d |
@@ -6,168 +6,347 @@ import json
|
||||
import re
|
||||
from typing import List, Dict, Any, Set
|
||||
|
||||
from module_utils.role_dependency_resolver import RoleDependencyResolver
|
||||
|
||||
# Regex used to ignore Jinja expressions inside include/import statements
|
||||
JINJA_PATTERN = re.compile(r'{{.*}}')
|
||||
ALL_DEP_TYPES = ['run_after', 'dependencies', 'include_tasks', 'import_tasks', 'include_role', 'import_role']
|
||||
ALL_DIRECTIONS = ['to', 'from']
|
||||
ALL_KEYS = [f"{dep}_{dir}" for dep in ALL_DEP_TYPES for dir in ALL_DIRECTIONS]
|
||||
|
||||
# All dependency types the graph builder supports
|
||||
ALL_DEP_TYPES = [
|
||||
"run_after",
|
||||
"dependencies",
|
||||
"include_tasks",
|
||||
"import_tasks",
|
||||
"include_role",
|
||||
"import_role",
|
||||
]
|
||||
|
||||
# Graph directions: outgoing edges ("to") vs incoming edges ("from")
|
||||
ALL_DIRECTIONS = ["to", "from"]
|
||||
|
||||
# Combined keys: e.g. "include_role_to", "dependencies_from", etc.
|
||||
ALL_KEYS = [f"{dep}_{direction}" for dep in ALL_DEP_TYPES for direction in ALL_DIRECTIONS]
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Helpers for locating meta and task files
|
||||
# ------------------------------------------------------------
|
||||
|
||||
def find_role_meta(roles_dir: str, role: str) -> str:
|
||||
path = os.path.join(roles_dir, role, 'meta', 'main.yml')
|
||||
"""Return path to meta/main.yml of a role or raise FileNotFoundError."""
|
||||
path = os.path.join(roles_dir, role, "meta", "main.yml")
|
||||
if not os.path.isfile(path):
|
||||
raise FileNotFoundError(f"Metadata not found for role: {role}")
|
||||
return path
|
||||
|
||||
|
||||
def find_role_tasks(roles_dir: str, role: str) -> str:
|
||||
path = os.path.join(roles_dir, role, 'tasks', 'main.yml')
|
||||
"""Return path to tasks/main.yml of a role or raise FileNotFoundError."""
|
||||
path = os.path.join(roles_dir, role, "tasks", "main.yml")
|
||||
if not os.path.isfile(path):
|
||||
raise FileNotFoundError(f"Tasks not found for role: {role}")
|
||||
return path
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Parsers for meta and tasks
|
||||
# ------------------------------------------------------------
|
||||
|
||||
def load_meta(path: str) -> Dict[str, Any]:
|
||||
with open(path, 'r') as f:
|
||||
"""
|
||||
Load metadata from meta/main.yml.
|
||||
Returns a dict with:
|
||||
- galaxy_info
|
||||
- run_after
|
||||
- dependencies
|
||||
"""
|
||||
with open(path, "r") as f:
|
||||
data = yaml.safe_load(f) or {}
|
||||
|
||||
galaxy_info = data.get('galaxy_info', {}) or {}
|
||||
galaxy_info = data.get("galaxy_info", {}) or {}
|
||||
return {
|
||||
'galaxy_info': galaxy_info,
|
||||
'run_after': galaxy_info.get('run_after', []) or [],
|
||||
'dependencies': data.get('dependencies', []) or []
|
||||
"galaxy_info": galaxy_info,
|
||||
"run_after": galaxy_info.get("run_after", []) or [],
|
||||
"dependencies": data.get("dependencies", []) or [],
|
||||
}
|
||||
|
||||
|
||||
def load_tasks(path: str, dep_type: str) -> List[str]:
|
||||
with open(path, 'r') as f:
|
||||
"""
|
||||
Parse include_tasks/import_tasks from tasks/main.yml.
|
||||
Only accepts simple, non-Jinja names.
|
||||
"""
|
||||
with open(path, "r") as f:
|
||||
data = yaml.safe_load(f) or []
|
||||
|
||||
included_roles = []
|
||||
roles: List[str] = []
|
||||
|
||||
for task in data:
|
||||
if not isinstance(task, dict):
|
||||
continue
|
||||
|
||||
if dep_type in task:
|
||||
entry = task[dep_type]
|
||||
if isinstance(entry, dict):
|
||||
entry = entry.get('name', '')
|
||||
if entry and not JINJA_PATTERN.search(entry):
|
||||
included_roles.append(entry)
|
||||
entry = entry.get("name", "")
|
||||
if isinstance(entry, str) and entry and not JINJA_PATTERN.search(entry):
|
||||
roles.append(entry)
|
||||
|
||||
return included_roles
|
||||
return roles
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Graph builder using precomputed caches (fast)
|
||||
# ------------------------------------------------------------
|
||||
|
||||
def build_single_graph(
|
||||
start_role: str,
|
||||
dep_type: str,
|
||||
direction: str,
|
||||
roles_dir: str,
|
||||
max_depth: int
|
||||
max_depth: int,
|
||||
caches: Dict[str, Any],
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Build a graph (nodes + links) for one role, one dep_type, one direction.
|
||||
Uses only precomputed in-memory caches, no filesystem access.
|
||||
|
||||
caches structure:
|
||||
caches["meta"][role] -> meta information
|
||||
caches["deps"][dep_type][role] -> outgoing targets
|
||||
caches["rev"][dep_type][target] -> set of source roles
|
||||
"""
|
||||
|
||||
nodes: Dict[str, Dict[str, Any]] = {}
|
||||
links: List[Dict[str, str]] = []
|
||||
|
||||
meta_cache = caches["meta"]
|
||||
deps_cache = caches["deps"]
|
||||
rev_cache = caches["rev"]
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Ensure a role exists as a node
|
||||
# --------------------------------------------------------
|
||||
def ensure_node(role: str):
|
||||
if role in nodes:
|
||||
return
|
||||
|
||||
# Try retrieving cached meta; fallback: lazy load
|
||||
meta = meta_cache.get(role)
|
||||
if meta is None:
|
||||
try:
|
||||
meta = load_meta(find_role_meta(roles_dir, role))
|
||||
meta_cache[role] = meta
|
||||
except FileNotFoundError:
|
||||
meta = {"galaxy_info": {}}
|
||||
|
||||
galaxy_info = meta.get("galaxy_info", {}) or {}
|
||||
|
||||
node = {
|
||||
"id": role,
|
||||
**galaxy_info,
|
||||
"doc_url": f"https://docs.infinito.nexus/roles/{role}/README.html",
|
||||
"source_url": f"https://github.com/kevinveenbirkenbach/infinito-nexus/tree/master/roles/{role}",
|
||||
}
|
||||
nodes[role] = node
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Outgoing edges: role -> targets
|
||||
# --------------------------------------------------------
|
||||
def outgoing(role: str) -> List[str]:
|
||||
return deps_cache.get(dep_type, {}).get(role, []) or []
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Incoming edges: sources -> role
|
||||
# --------------------------------------------------------
|
||||
def incoming(role: str) -> Set[str]:
|
||||
return rev_cache.get(dep_type, {}).get(role, set())
|
||||
|
||||
# --------------------------------------------------------
|
||||
# DFS traversal
|
||||
# --------------------------------------------------------
|
||||
def traverse(role: str, depth: int, path: Set[str]):
|
||||
if role not in nodes:
|
||||
meta = load_meta(find_role_meta(roles_dir, role))
|
||||
node = {'id': role}
|
||||
node.update(meta['galaxy_info'])
|
||||
node['doc_url'] = f"https://docs.infinito.nexus/roles/{role}/README.html"
|
||||
node['source_url'] = f"https://s.infinito.nexus/code/tree/master/roles/{role}"
|
||||
nodes[role] = node
|
||||
ensure_node(role)
|
||||
|
||||
if max_depth > 0 and depth >= max_depth:
|
||||
return
|
||||
|
||||
neighbors = []
|
||||
if dep_type in ['run_after', 'dependencies']:
|
||||
meta = load_meta(find_role_meta(roles_dir, role))
|
||||
neighbors = meta.get(dep_type, [])
|
||||
else:
|
||||
try:
|
||||
neighbors = load_tasks(find_role_tasks(roles_dir, role), dep_type)
|
||||
except FileNotFoundError:
|
||||
neighbors = []
|
||||
if direction == "to":
|
||||
for tgt in outgoing(role):
|
||||
ensure_node(tgt)
|
||||
links.append({"source": role, "target": tgt, "type": dep_type})
|
||||
if tgt not in path:
|
||||
traverse(tgt, depth + 1, path | {tgt})
|
||||
|
||||
if direction == 'to':
|
||||
for tgt in neighbors:
|
||||
links.append({'source': role, 'target': tgt, 'type': dep_type})
|
||||
if tgt in path:
|
||||
continue
|
||||
traverse(tgt, depth + 1, path | {tgt})
|
||||
else: # direction == "from"
|
||||
for src in incoming(role):
|
||||
ensure_node(src)
|
||||
links.append({"source": src, "target": role, "type": dep_type})
|
||||
if src not in path:
|
||||
traverse(src, depth + 1, path | {src})
|
||||
|
||||
else: # direction == 'from'
|
||||
for other in os.listdir(roles_dir):
|
||||
try:
|
||||
other_neighbors = []
|
||||
if dep_type in ['run_after', 'dependencies']:
|
||||
meta_o = load_meta(find_role_meta(roles_dir, other))
|
||||
other_neighbors = meta_o.get(dep_type, [])
|
||||
else:
|
||||
other_neighbors = load_tasks(find_role_tasks(roles_dir, other), dep_type)
|
||||
traverse(start_role, 0, {start_role})
|
||||
|
||||
if role in other_neighbors:
|
||||
links.append({'source': other, 'target': role, 'type': dep_type})
|
||||
if other in path:
|
||||
continue
|
||||
traverse(other, depth + 1, path | {other})
|
||||
return {"nodes": list(nodes.values()), "links": links}
|
||||
|
||||
except FileNotFoundError:
|
||||
continue
|
||||
|
||||
traverse(start_role, depth=0, path={start_role})
|
||||
return {'nodes': list(nodes.values()), 'links': links}
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Build all graph variants for one role
|
||||
# ------------------------------------------------------------
|
||||
|
||||
def build_mappings(
|
||||
start_role: str,
|
||||
roles_dir: str,
|
||||
max_depth: int
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Build all 12 graph variants (6 dep types × 2 directions).
|
||||
Accelerated version:
|
||||
- One-time scan of all metadata
|
||||
- One-time scan of all include_role/import_role
|
||||
- One-time scan of include_tasks/import_tasks
|
||||
- Build reverse-index tables
|
||||
- Then generate all graphs purely from memory
|
||||
"""
|
||||
|
||||
result: Dict[str, Any] = {}
|
||||
for key in ALL_KEYS:
|
||||
dep_type, direction = key.rsplit('_', 1)
|
||||
|
||||
roles = [
|
||||
r for r in os.listdir(roles_dir)
|
||||
if os.path.isdir(os.path.join(roles_dir, r))
|
||||
]
|
||||
|
||||
# Pre-caches
|
||||
meta_cache: Dict[str, Dict[str, Any]] = {}
|
||||
deps_cache: Dict[str, Dict[str, List[str]]] = {dep: {} for dep in ALL_DEP_TYPES}
|
||||
rev_cache: Dict[str, Dict[str, Set[str]]] = {dep: {} for dep in ALL_DEP_TYPES}
|
||||
|
||||
resolver = RoleDependencyResolver(roles_dir)
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Step 1: Preload meta-based deps (run_after, dependencies)
|
||||
# --------------------------------------------------------
|
||||
for role in roles:
|
||||
try:
|
||||
result[key] = build_single_graph(start_role, dep_type, direction, roles_dir, max_depth)
|
||||
meta = load_meta(find_role_meta(roles_dir, role))
|
||||
except FileNotFoundError:
|
||||
continue
|
||||
|
||||
meta_cache[role] = meta
|
||||
|
||||
for dep_key in ["run_after", "dependencies"]:
|
||||
values = meta.get(dep_key, []) or []
|
||||
if isinstance(values, list) and values:
|
||||
deps_cache[dep_key][role] = values
|
||||
|
||||
for tgt in values:
|
||||
if isinstance(tgt, str) and tgt.strip():
|
||||
rev_cache[dep_key].setdefault(tgt.strip(), set()).add(role)
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Step 2: Preload include_role/import_role (resolver)
|
||||
# --------------------------------------------------------
|
||||
for role in roles:
|
||||
role_path = os.path.join(roles_dir, role)
|
||||
inc, imp = resolver._scan_tasks(role_path)
|
||||
|
||||
if inc:
|
||||
inc_list = sorted(inc)
|
||||
deps_cache["include_role"][role] = inc_list
|
||||
for tgt in inc_list:
|
||||
rev_cache["include_role"].setdefault(tgt, set()).add(role)
|
||||
|
||||
if imp:
|
||||
imp_list = sorted(imp)
|
||||
deps_cache["import_role"][role] = imp_list
|
||||
for tgt in imp_list:
|
||||
rev_cache["import_role"].setdefault(tgt, set()).add(role)
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Step 3: Preload include_tasks/import_tasks
|
||||
# --------------------------------------------------------
|
||||
for role in roles:
|
||||
try:
|
||||
tasks_path = find_role_tasks(roles_dir, role)
|
||||
except FileNotFoundError:
|
||||
continue
|
||||
|
||||
for dep_key in ["include_tasks", "import_tasks"]:
|
||||
values = load_tasks(tasks_path, dep_key)
|
||||
if values:
|
||||
deps_cache[dep_key][role] = values
|
||||
|
||||
for tgt in values:
|
||||
rev_cache[dep_key].setdefault(tgt, set()).add(role)
|
||||
|
||||
caches = {
|
||||
"meta": meta_cache,
|
||||
"deps": deps_cache,
|
||||
"rev": rev_cache,
|
||||
}
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Step 4: Build all graphs from caches
|
||||
# --------------------------------------------------------
|
||||
for key in ALL_KEYS:
|
||||
dep_type, direction = key.rsplit("_", 1)
|
||||
try:
|
||||
result[key] = build_single_graph(
|
||||
start_role=start_role,
|
||||
dep_type=dep_type,
|
||||
direction=direction,
|
||||
roles_dir=roles_dir,
|
||||
max_depth=max_depth,
|
||||
caches=caches,
|
||||
)
|
||||
except Exception:
|
||||
result[key] = {'nodes': [], 'links': []}
|
||||
result[key] = {"nodes": [], "links": []}
|
||||
|
||||
return result
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Output helper
|
||||
# ------------------------------------------------------------
|
||||
|
||||
def output_graph(graph_data: Any, fmt: str, start: str, key: str):
|
||||
base = f"{start}_{key}"
|
||||
if fmt == 'console':
|
||||
if fmt == "console":
|
||||
print(f"--- {base} ---")
|
||||
print(yaml.safe_dump(graph_data, sort_keys=False))
|
||||
elif fmt in ('yaml', 'json'):
|
||||
|
||||
else:
|
||||
path = f"{base}.{fmt}"
|
||||
with open(path, 'w') as f:
|
||||
if fmt == 'yaml':
|
||||
with open(path, "w") as f:
|
||||
if fmt == "yaml":
|
||||
yaml.safe_dump(graph_data, f, sort_keys=False)
|
||||
else:
|
||||
json.dump(graph_data, f, indent=2)
|
||||
print(f"Wrote {path}")
|
||||
else:
|
||||
raise ValueError(f"Unknown format: {fmt}")
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# CLI entrypoint
|
||||
# ------------------------------------------------------------
|
||||
|
||||
def main():
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
default_roles_dir = os.path.abspath(os.path.join(script_dir, '..', '..', 'roles'))
|
||||
default_roles_dir = os.path.abspath(os.path.join(script_dir, "..", "..", "roles"))
|
||||
|
||||
parser = argparse.ArgumentParser(description="Generate dependency graphs")
|
||||
parser.add_argument('-r', '--role', required=True, help="Starting role name")
|
||||
parser.add_argument('-D', '--depth', type=int, default=0, help="Max recursion depth")
|
||||
parser.add_argument('-o', '--output', choices=['yaml', 'json', 'console'], default='console')
|
||||
parser.add_argument('--roles-dir', default=default_roles_dir, help="Roles directory")
|
||||
parser.add_argument("-r", "--role", required=True, help="Starting role name")
|
||||
parser.add_argument("-D", "--depth", type=int, default=0, help="Max recursion depth")
|
||||
parser.add_argument("-o", "--output", choices=["yaml", "json", "console"], default="console")
|
||||
parser.add_argument("--roles-dir", default=default_roles_dir, help="Roles directory")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
graphs = build_mappings(args.role, args.roles_dir, args.depth)
|
||||
|
||||
for key in ALL_KEYS:
|
||||
graph_data = graphs.get(key, {'nodes': [], 'links': []})
|
||||
graph_data = graphs.get(key, {"nodes": [], "links": []})
|
||||
output_graph(graph_data, args.output, args.role, key)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -2,19 +2,76 @@
|
||||
import os
|
||||
import argparse
|
||||
import json
|
||||
from typing import Dict, Any
|
||||
from typing import Dict, Any, Optional, Iterable, Tuple
|
||||
from concurrent.futures import ProcessPoolExecutor, as_completed
|
||||
|
||||
from cli.build.graph import build_mappings, output_graph
|
||||
from module_utils.role_dependency_resolver import RoleDependencyResolver
|
||||
|
||||
|
||||
def find_roles(roles_dir: str):
|
||||
def find_roles(roles_dir: str) -> Iterable[Tuple[str, str]]:
|
||||
"""
|
||||
Yield (role_name, role_path) for all roles in the given roles_dir.
|
||||
"""
|
||||
for entry in os.listdir(roles_dir):
|
||||
path = os.path.join(roles_dir, entry)
|
||||
if os.path.isdir(path):
|
||||
yield entry, path
|
||||
|
||||
|
||||
def process_role(
|
||||
role_name: str,
|
||||
roles_dir: str,
|
||||
depth: int,
|
||||
shadow_folder: Optional[str],
|
||||
output: str,
|
||||
preview: bool,
|
||||
verbose: bool,
|
||||
no_include_role: bool, # currently unused, kept for CLI compatibility
|
||||
no_import_role: bool, # currently unused, kept for CLI compatibility
|
||||
no_dependencies: bool, # currently unused, kept for CLI compatibility
|
||||
no_run_after: bool, # currently unused, kept for CLI compatibility
|
||||
) -> None:
|
||||
"""
|
||||
Worker function: build graphs and (optionally) write meta/tree.json for a single role.
|
||||
|
||||
Note:
|
||||
This version no longer adds a custom top-level "dependencies" bucket.
|
||||
Only the graphs returned by build_mappings() are written.
|
||||
"""
|
||||
role_path = os.path.join(roles_dir, role_name)
|
||||
|
||||
if verbose:
|
||||
print(f"[worker] Processing role: {role_name}")
|
||||
|
||||
# Build the full graph structure (all dep types / directions) for this role
|
||||
graphs: Dict[str, Any] = build_mappings(
|
||||
start_role=role_name,
|
||||
roles_dir=roles_dir,
|
||||
max_depth=depth,
|
||||
)
|
||||
|
||||
# Preview mode: dump graphs to console instead of writing tree.json
|
||||
if preview:
|
||||
for key, data in graphs.items():
|
||||
if verbose:
|
||||
print(f"[worker] Previewing graph '{key}' for role '{role_name}'")
|
||||
# In preview mode we always output as console
|
||||
output_graph(data, "console", role_name, key)
|
||||
return
|
||||
|
||||
# Non-preview: write meta/tree.json for this role
|
||||
if shadow_folder:
|
||||
tree_file = os.path.join(shadow_folder, role_name, "meta", "tree.json")
|
||||
else:
|
||||
tree_file = os.path.join(role_path, "meta", "tree.json")
|
||||
|
||||
os.makedirs(os.path.dirname(tree_file), exist_ok=True)
|
||||
with open(tree_file, "w", encoding="utf-8") as f:
|
||||
json.dump(graphs, f, indent=2)
|
||||
|
||||
print(f"Wrote {tree_file}")
|
||||
|
||||
|
||||
def main():
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
default_roles_dir = os.path.abspath(os.path.join(script_dir, "..", "..", "roles"))
|
||||
@@ -22,24 +79,67 @@ def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate all graphs for each role and write meta/tree.json"
|
||||
)
|
||||
parser.add_argument("-d", "--role_dir", default=default_roles_dir,
|
||||
help=f"Path to roles directory (default: {default_roles_dir})")
|
||||
parser.add_argument("-D", "--depth", type=int, default=0,
|
||||
help="Max recursion depth (>0) or <=0 to stop on cycle")
|
||||
parser.add_argument("-o", "--output", choices=["yaml", "json", "console"],
|
||||
default="json", help="Output format")
|
||||
parser.add_argument("-p", "--preview", action="store_true",
|
||||
help="Preview graphs to console instead of writing files")
|
||||
parser.add_argument("-s", "--shadow-folder", type=str, default=None,
|
||||
help="If set, writes tree.json to this shadow folder instead of the role's actual meta/ folder")
|
||||
parser.add_argument("-v", "--verbose", action="store_true", help="Enable verbose logging")
|
||||
parser.add_argument(
|
||||
"-d",
|
||||
"--role_dir",
|
||||
default=default_roles_dir,
|
||||
help=f"Path to roles directory (default: {default_roles_dir})",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-D",
|
||||
"--depth",
|
||||
type=int,
|
||||
default=0,
|
||||
help="Max recursion depth (>0) or <=0 to stop on cycle",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-o",
|
||||
"--output",
|
||||
choices=["yaml", "json", "console"],
|
||||
default="json",
|
||||
help="Output format for preview mode",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-p",
|
||||
"--preview",
|
||||
action="store_true",
|
||||
help="Preview graphs to console instead of writing files",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-s",
|
||||
"--shadow-folder",
|
||||
type=str,
|
||||
default=None,
|
||||
help="If set, writes tree.json to this shadow folder instead of the role's actual meta/ folder",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-v",
|
||||
"--verbose",
|
||||
action="store_true",
|
||||
help="Enable verbose logging",
|
||||
)
|
||||
|
||||
# Toggles
|
||||
parser.add_argument("--no-include-role", action="store_true", help="Do not scan include_role")
|
||||
parser.add_argument("--no-import-role", action="store_true", help="Do not scan import_role")
|
||||
parser.add_argument("--no-dependencies", action="store_true", help="Do not read meta/main.yml dependencies")
|
||||
parser.add_argument("--no-run-after", action="store_true",
|
||||
help="Do not read galaxy_info.run_after from meta/main.yml")
|
||||
# Toggles (kept for CLI compatibility, currently only meaningful for future extensions)
|
||||
parser.add_argument(
|
||||
"--no-include-role",
|
||||
action="store_true",
|
||||
help="Reserved: do not include include_role in custom dependency bucket",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-import-role",
|
||||
action="store_true",
|
||||
help="Reserved: do not include import_role in custom dependency bucket",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-dependencies",
|
||||
action="store_true",
|
||||
help="Reserved: do not include meta dependencies in custom dependency bucket",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-run-after",
|
||||
action="store_true",
|
||||
help="Reserved: do not include run_after in custom dependency bucket",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -50,54 +150,53 @@ def main():
|
||||
print(f"Preview mode: {args.preview}")
|
||||
print(f"Shadow folder: {args.shadow_folder}")
|
||||
|
||||
resolver = RoleDependencyResolver(args.role_dir)
|
||||
roles = [role_name for role_name, _ in find_roles(args.role_dir)]
|
||||
|
||||
for role_name, role_path in find_roles(args.role_dir):
|
||||
if args.verbose:
|
||||
print(f"Processing role: {role_name}")
|
||||
# For preview, run sequentially to avoid completely interleaved output.
|
||||
if args.preview:
|
||||
for role_name in roles:
|
||||
process_role(
|
||||
role_name=role_name,
|
||||
roles_dir=args.role_dir,
|
||||
depth=args.depth,
|
||||
shadow_folder=args.shadow_folder,
|
||||
output=args.output,
|
||||
preview=True,
|
||||
verbose=args.verbose,
|
||||
no_include_role=args.no_include_role,
|
||||
no_import_role=args.no_import_role,
|
||||
no_dependencies=args.no_dependencies,
|
||||
no_run_after=args.no_run_after,
|
||||
)
|
||||
return
|
||||
|
||||
graphs: Dict[str, Any] = build_mappings(
|
||||
start_role=role_name,
|
||||
roles_dir=args.role_dir,
|
||||
max_depth=args.depth
|
||||
)
|
||||
# Non-preview: roles are processed in parallel
|
||||
with ProcessPoolExecutor() as executor:
|
||||
futures = {
|
||||
executor.submit(
|
||||
process_role,
|
||||
role_name,
|
||||
args.role_dir,
|
||||
args.depth,
|
||||
args.shadow_folder,
|
||||
args.output,
|
||||
False, # preview=False in parallel mode
|
||||
args.verbose,
|
||||
args.no_include_role,
|
||||
args.no_import_role,
|
||||
args.no_dependencies,
|
||||
args.no_run_after,
|
||||
): role_name
|
||||
for role_name in roles
|
||||
}
|
||||
|
||||
# Direct deps (depth=1) – getrennt erfasst für buckets
|
||||
inc_roles, imp_roles = resolver._scan_tasks(role_path)
|
||||
meta_deps = resolver._extract_meta_dependencies(role_path)
|
||||
run_after = set()
|
||||
if not args.no_run_after:
|
||||
run_after = resolver._extract_meta_run_after(role_path)
|
||||
|
||||
if any([not args.no_include_role and inc_roles,
|
||||
not args.no_import_role and imp_roles,
|
||||
not args.no_dependencies and meta_deps,
|
||||
not args.no_run_after and run_after]):
|
||||
deps_root = graphs.setdefault("dependencies", {})
|
||||
if not args.no_include_role and inc_roles:
|
||||
deps_root["include_role"] = sorted(inc_roles)
|
||||
if not args.no_import_role and imp_roles:
|
||||
deps_root["import_role"] = sorted(imp_roles)
|
||||
if not args.no_dependencies and meta_deps:
|
||||
deps_root["dependencies"] = sorted(meta_deps)
|
||||
if not args.no_run_after and run_after:
|
||||
deps_root["run_after"] = sorted(run_after)
|
||||
graphs["dependencies"] = deps_root
|
||||
|
||||
if args.preview:
|
||||
for key, data in graphs.items():
|
||||
if args.verbose:
|
||||
print(f"Previewing graph '{key}' for role '{role_name}'")
|
||||
output_graph(data, "console", role_name, key)
|
||||
else:
|
||||
if args.shadow_folder:
|
||||
tree_file = os.path.join(args.shadow_folder, role_name, "meta", "tree.json")
|
||||
else:
|
||||
tree_file = os.path.join(role_path, "meta", "tree.json")
|
||||
os.makedirs(os.path.dirname(tree_file), exist_ok=True)
|
||||
with open(tree_file, "w", encoding="utf-8") as f:
|
||||
json.dump(graphs, f, indent=2)
|
||||
print(f"Wrote {tree_file}")
|
||||
for future in as_completed(futures):
|
||||
role_name = futures[future]
|
||||
try:
|
||||
future.result()
|
||||
except Exception as exc:
|
||||
# Do not crash the whole run; report the failing role instead.
|
||||
print(f"[ERROR] Role '{role_name}' failed: {exc}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -10,9 +10,23 @@ from module_utils.config_utils import get_app_conf
|
||||
from module_utils.get_url import get_url
|
||||
|
||||
|
||||
def _dedup_preserve(seq):
|
||||
"""Return a list with stable order and unique items."""
|
||||
seen = set()
|
||||
out = []
|
||||
for x in seq:
|
||||
if x not in seen:
|
||||
seen.add(x)
|
||||
out.append(x)
|
||||
return out
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
"""
|
||||
Custom filters for Content Security Policy generation and CSP-related utilities.
|
||||
Jinja filters for building a robust, CSP3-aware Content-Security-Policy header.
|
||||
Safari/CSP2 compatibility is ensured by merging the -elem/-attr variants into the base
|
||||
directives (style-src, script-src). We intentionally do NOT mirror back into -elem/-attr
|
||||
to allow true CSP3 granularity on modern browsers.
|
||||
"""
|
||||
|
||||
def filters(self):
|
||||
@@ -61,11 +75,14 @@ class FilterModule(object):
|
||||
"""
|
||||
Returns CSP flag tokens (e.g., "'unsafe-eval'", "'unsafe-inline'") for a directive,
|
||||
merging sane defaults with app config.
|
||||
Default: 'unsafe-inline' is enabled for style-src and style-src-elem.
|
||||
|
||||
Defaults:
|
||||
- For styles we enable 'unsafe-inline' by default (style-src, style-src-elem, style-src-attr),
|
||||
because many apps rely on inline styles / style attributes.
|
||||
- For scripts we do NOT enable 'unsafe-inline' by default.
|
||||
"""
|
||||
# Defaults that apply to all apps
|
||||
default_flags = {}
|
||||
if directive in ('style-src', 'style-src-elem'):
|
||||
if directive in ('style-src', 'style-src-elem', 'style-src-attr'):
|
||||
default_flags = {'unsafe-inline': True}
|
||||
|
||||
configured = get_app_conf(
|
||||
@@ -76,7 +93,6 @@ class FilterModule(object):
|
||||
{}
|
||||
)
|
||||
|
||||
# Merge defaults with configured flags (configured overrides defaults)
|
||||
merged = {**default_flags, **configured}
|
||||
|
||||
tokens = []
|
||||
@@ -131,82 +147,154 @@ class FilterModule(object):
|
||||
):
|
||||
"""
|
||||
Builds the Content-Security-Policy header value dynamically based on application settings.
|
||||
- Flags (e.g., 'unsafe-eval', 'unsafe-inline') are read from server.csp.flags.<directive>,
|
||||
with sane defaults applied in get_csp_flags (always 'unsafe-inline' for style-src and style-src-elem).
|
||||
- Inline hashes are read from server.csp.hashes.<directive>.
|
||||
- Whitelists are read from server.csp.whitelist.<directive>.
|
||||
- Inline hashes are added only if the final tokens do NOT include 'unsafe-inline'.
|
||||
|
||||
Key points:
|
||||
- CSP3-aware: supports base/elem/attr for styles and scripts.
|
||||
- Safari/CSP2 fallback: base directives (style-src, script-src) always include
|
||||
the union of their -elem/-attr variants.
|
||||
- We do NOT mirror back into -elem/-attr; finer CSP3 rules remain effective
|
||||
on modern browsers if you choose to use them.
|
||||
- If the app explicitly disables a token on the *base* (e.g. style-src.unsafe-inline: false),
|
||||
that token is removed from the merged base even if present in elem/attr.
|
||||
- Inline hashes are added ONLY if that directive does NOT include 'unsafe-inline'.
|
||||
- Whitelists/flags/hashes read from:
|
||||
server.csp.whitelist.<directive>
|
||||
server.csp.flags.<directive>
|
||||
server.csp.hashes.<directive>
|
||||
- “Smart defaults”:
|
||||
* internal CDN for style/script elem and connect
|
||||
* Matomo endpoints (if feature enabled) for script-elem/connect
|
||||
* Simpleicons (if feature enabled) for connect
|
||||
* reCAPTCHA (if feature enabled) for script-elem/frame-src
|
||||
* frame-ancestors extended for desktop/logout/keycloak if enabled
|
||||
"""
|
||||
try:
|
||||
directives = [
|
||||
'default-src', # Fallback source list for content types not explicitly listed
|
||||
'connect-src', # Allowed URLs for XHR, WebSockets, EventSource, fetch()
|
||||
'frame-ancestors', # Who may embed this page
|
||||
'frame-src', # Sources for nested browsing contexts (e.g., <iframe>)
|
||||
'script-src', # Sources for script execution
|
||||
'script-src-elem', # Sources for <script> elements
|
||||
'style-src', # Sources for inline styles and <style>/<link> elements
|
||||
'style-src-elem', # Sources for <style> and <link rel="stylesheet">
|
||||
'font-src', # Sources for fonts
|
||||
'worker-src', # Sources for workers
|
||||
'manifest-src', # Sources for web app manifests
|
||||
'media-src', # Sources for audio and video
|
||||
'default-src',
|
||||
'connect-src',
|
||||
'frame-ancestors',
|
||||
'frame-src',
|
||||
'script-src',
|
||||
'script-src-elem',
|
||||
'script-src-attr',
|
||||
'style-src',
|
||||
'style-src-elem',
|
||||
'style-src-attr',
|
||||
'font-src',
|
||||
'worker-src',
|
||||
'manifest-src',
|
||||
'media-src',
|
||||
]
|
||||
|
||||
parts = []
|
||||
tokens_by_dir = {}
|
||||
explicit_flags_by_dir = {}
|
||||
|
||||
for directive in directives:
|
||||
# Collect explicit flags (to later respect explicit "False" on base during merge)
|
||||
explicit_flags = get_app_conf(
|
||||
applications,
|
||||
application_id,
|
||||
'server.csp.flags.' + directive,
|
||||
False,
|
||||
{}
|
||||
)
|
||||
explicit_flags_by_dir[directive] = explicit_flags
|
||||
|
||||
tokens = ["'self'"]
|
||||
|
||||
# Load flags (includes defaults from get_csp_flags)
|
||||
# 1) Flags (with sane defaults)
|
||||
flags = self.get_csp_flags(applications, application_id, directive)
|
||||
tokens += flags
|
||||
|
||||
# Allow fetching from internal CDN by default for selected directives
|
||||
if directive in ['script-src-elem', 'connect-src', 'style-src-elem']:
|
||||
# 2) Internal CDN defaults for selected directives
|
||||
if directive in ('script-src-elem', 'connect-src', 'style-src-elem', 'style-src'):
|
||||
tokens.append(get_url(domains, 'web-svc-cdn', web_protocol))
|
||||
|
||||
# Matomo integration if feature is enabled
|
||||
if directive in ['script-src-elem', 'connect-src']:
|
||||
# 3) Matomo (if enabled)
|
||||
if directive in ('script-src-elem', 'connect-src'):
|
||||
if self.is_feature_enabled(applications, matomo_feature_name, application_id):
|
||||
tokens.append(get_url(domains, 'web-app-matomo', web_protocol))
|
||||
|
||||
# Simpleicons integration if feature is enabled
|
||||
if directive in ['connect-src']:
|
||||
# 4) Simpleicons (if enabled) – typically used via connect-src (fetch)
|
||||
if directive == 'connect-src':
|
||||
if self.is_feature_enabled(applications, 'simpleicons', application_id):
|
||||
tokens.append(get_url(domains, 'web-svc-simpleicons', web_protocol))
|
||||
|
||||
# ReCaptcha integration (scripts + frames) if feature is enabled
|
||||
# 5) reCAPTCHA (if enabled) – scripts + frames
|
||||
if self.is_feature_enabled(applications, 'recaptcha', application_id):
|
||||
if directive in ['script-src-elem', 'frame-src']:
|
||||
if directive in ('script-src-elem', 'frame-src'):
|
||||
tokens.append('https://www.gstatic.com')
|
||||
tokens.append('https://www.google.com')
|
||||
|
||||
# Frame ancestors handling (desktop + logout support)
|
||||
# 6) Frame ancestors (desktop + logout)
|
||||
if directive == 'frame-ancestors':
|
||||
if self.is_feature_enabled(applications, 'desktop', application_id):
|
||||
# Allow being embedded by the desktop app domain (and potentially its parent)
|
||||
# Allow being embedded by the desktop app domain's site
|
||||
domain = domains.get('web-app-desktop')[0]
|
||||
sld_tld = ".".join(domain.split(".")[-2:]) # e.g., example.com
|
||||
tokens.append(f"{sld_tld}")
|
||||
if self.is_feature_enabled(applications, 'logout', application_id):
|
||||
# Allow embedding via logout proxy and Keycloak app
|
||||
tokens.append(get_url(domains, 'web-svc-logout', web_protocol))
|
||||
tokens.append(get_url(domains, 'web-app-keycloak', web_protocol))
|
||||
|
||||
# 6b) Logout support requires inline handlers (script-src-attr)
|
||||
if directive in ('script-src-attr','script-src-elem'):
|
||||
if self.is_feature_enabled(applications, 'logout', application_id):
|
||||
tokens.append("'unsafe-inline'")
|
||||
|
||||
# Custom whitelist entries
|
||||
|
||||
# 7) Custom whitelist
|
||||
tokens += self.get_csp_whitelist(applications, application_id, directive)
|
||||
|
||||
# Add inline content hashes ONLY if final tokens do NOT include 'unsafe-inline'
|
||||
# (Check tokens, not flags, to include defaults and later modifications.)
|
||||
# 8) Inline hashes (only if this directive does NOT include 'unsafe-inline')
|
||||
if "'unsafe-inline'" not in tokens:
|
||||
for snippet in self.get_csp_inline_content(applications, application_id, directive):
|
||||
tokens.append(self.get_csp_hash(snippet))
|
||||
|
||||
# Append directive
|
||||
parts.append(f"{directive} {' '.join(tokens)};")
|
||||
tokens_by_dir[directive] = _dedup_preserve(tokens)
|
||||
|
||||
# Static img-src directive (kept permissive for data/blob and any host)
|
||||
# ----------------------------------------------------------
|
||||
# CSP3 families → ensure CSP2 fallback (Safari-safe)
|
||||
# Merge style/script families so base contains union of elem/attr.
|
||||
# Respect explicit disables on the base (e.g. unsafe-inline=False).
|
||||
# Do NOT mirror back into elem/attr (keep granularity).
|
||||
# ----------------------------------------------------------
|
||||
def _strip_if_disabled(unioned_tokens, explicit_flags, name):
|
||||
"""
|
||||
Remove a token (e.g. 'unsafe-inline') from the unioned token list
|
||||
if it is explicitly disabled in the base directive flags.
|
||||
"""
|
||||
if isinstance(explicit_flags, dict) and explicit_flags.get(name) is False:
|
||||
tok = f"'{name}'"
|
||||
return [t for t in unioned_tokens if t != tok]
|
||||
return unioned_tokens
|
||||
|
||||
def merge_family(base_key, elem_key, attr_key):
|
||||
base = tokens_by_dir.get(base_key, [])
|
||||
elem = tokens_by_dir.get(elem_key, [])
|
||||
attr = tokens_by_dir.get(attr_key, [])
|
||||
union = _dedup_preserve(base + elem + attr)
|
||||
|
||||
# Respect explicit disables on the base
|
||||
explicit_base = explicit_flags_by_dir.get(base_key, {})
|
||||
# The most relevant flags for script/style:
|
||||
for flag_name in ('unsafe-inline', 'unsafe-eval'):
|
||||
union = _strip_if_disabled(union, explicit_base, flag_name)
|
||||
|
||||
tokens_by_dir[base_key] = union # write back only to base
|
||||
|
||||
merge_family('style-src', 'style-src-elem', 'style-src-attr')
|
||||
merge_family('script-src', 'script-src-elem', 'script-src-attr')
|
||||
|
||||
# ----------------------------------------------------------
|
||||
# Assemble header
|
||||
# ----------------------------------------------------------
|
||||
parts = []
|
||||
for directive in directives:
|
||||
if directive in tokens_by_dir:
|
||||
parts.append(f"{directive} {' '.join(tokens_by_dir[directive])};")
|
||||
|
||||
# Keep permissive img-src for data/blob + any host (as before)
|
||||
parts.append("img-src * data: blob:;")
|
||||
|
||||
return ' '.join(parts)
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys, os, re
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
|
||||
from ansible.errors import AnsibleFilterError
|
||||
from module_utils.config_utils import get_app_conf
|
||||
from module_utils.entity_name_utils import get_entity_name
|
||||
|
||||
_UNIT_RE = re.compile(r'^\s*(\d+(?:\.\d+)?)\s*([kKmMgGtT]?[bB]?)?\s*$')
|
||||
_FACTORS = {
|
||||
'': 1, 'b': 1,
|
||||
'k': 1024, 'kb': 1024,
|
||||
'm': 1024**2, 'mb': 1024**2,
|
||||
'g': 1024**3, 'gb': 1024**3,
|
||||
't': 1024**4, 'tb': 1024**4,
|
||||
}
|
||||
|
||||
def _to_bytes(v: str) -> int:
|
||||
if v is None:
|
||||
raise AnsibleFilterError("jvm_filters: size value is None")
|
||||
s = str(v).strip()
|
||||
m = _UNIT_RE.match(s)
|
||||
if not m:
|
||||
raise AnsibleFilterError(f"jvm_filters: invalid size '{v}'")
|
||||
num, unit = m.group(1), (m.group(2) or '').lower()
|
||||
try:
|
||||
val = float(num)
|
||||
except ValueError as e:
|
||||
raise AnsibleFilterError(f"jvm_filters: invalid numeric size '{v}'") from e
|
||||
factor = _FACTORS.get(unit)
|
||||
if factor is None:
|
||||
raise AnsibleFilterError(f"jvm_filters: unknown unit in '{v}'")
|
||||
return int(val * factor)
|
||||
|
||||
def _to_mb(v: str) -> int:
|
||||
return max(0, _to_bytes(v) // (1024 * 1024))
|
||||
|
||||
def _svc(app_id: str) -> str:
|
||||
return get_entity_name(app_id)
|
||||
|
||||
def _mem_limit_mb(apps: dict, app_id: str) -> int:
|
||||
svc = _svc(app_id)
|
||||
raw = get_app_conf(apps, app_id, f"docker.services.{svc}.mem_limit")
|
||||
mb = _to_mb(raw)
|
||||
if mb <= 0:
|
||||
raise AnsibleFilterError(f"jvm_filters: mem_limit for '{svc}' must be > 0 MB (got '{raw}')")
|
||||
return mb
|
||||
|
||||
def _mem_res_mb(apps: dict, app_id: str) -> int:
|
||||
svc = _svc(app_id)
|
||||
raw = get_app_conf(apps, app_id, f"docker.services.{svc}.mem_reservation")
|
||||
mb = _to_mb(raw)
|
||||
if mb <= 0:
|
||||
raise AnsibleFilterError(f"jvm_filters: mem_reservation for '{svc}' must be > 0 MB (got '{raw}')")
|
||||
return mb
|
||||
|
||||
def jvm_max_mb(apps: dict, app_id: str) -> int:
|
||||
"""Xmx = min( floor(0.7*limit), limit-1024, 12288 ) with floor at 1024 MB."""
|
||||
limit_mb = _mem_limit_mb(apps, app_id)
|
||||
c1 = (limit_mb * 7) // 10
|
||||
c2 = max(0, limit_mb - 1024)
|
||||
c3 = 12288
|
||||
return max(1024, min(c1, c2, c3))
|
||||
|
||||
def jvm_min_mb(apps: dict, app_id: str) -> int:
|
||||
"""Xms = min( floor(Xmx/2), mem_reservation, Xmx ) with floor at 512 MB."""
|
||||
xmx = jvm_max_mb(apps, app_id)
|
||||
res = _mem_res_mb(apps, app_id)
|
||||
return max(512, min(xmx // 2, res, xmx))
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
"jvm_max_mb": jvm_max_mb,
|
||||
"jvm_min_mb": jvm_min_mb,
|
||||
}
|
||||
179
filter_plugins/memory_filters.py
Normal file
179
filter_plugins/memory_filters.py
Normal file
@@ -0,0 +1,179 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys, os, re
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
|
||||
from ansible.errors import AnsibleFilterError
|
||||
from module_utils.config_utils import get_app_conf
|
||||
from module_utils.entity_name_utils import get_entity_name
|
||||
|
||||
# Regex and unit conversion table
|
||||
_UNIT_RE = re.compile(r'^\s*(\d+(?:\.\d+)?)\s*([kKmMgGtT]?[bB]?)?\s*$')
|
||||
_FACTORS = {
|
||||
'': 1, 'b': 1,
|
||||
'k': 1024, 'kb': 1024,
|
||||
'm': 1024**2, 'mb': 1024**2,
|
||||
'g': 1024**3, 'gb': 1024**3,
|
||||
't': 1024**4, 'tb': 1024**4,
|
||||
}
|
||||
|
||||
# ------------------------------------------------------
|
||||
# Helpers: unit conversion
|
||||
# ------------------------------------------------------
|
||||
|
||||
def _to_bytes(v: str) -> int:
|
||||
"""Convert a human-readable size string (e.g., '2g', '512m') to bytes."""
|
||||
if v is None:
|
||||
raise AnsibleFilterError("memory_filters: size value is None")
|
||||
|
||||
s = str(v).strip()
|
||||
m = _UNIT_RE.match(s)
|
||||
if not m:
|
||||
raise AnsibleFilterError(f"memory_filters: invalid size '{v}'")
|
||||
|
||||
num, unit = m.group(1), (m.group(2) or '').lower()
|
||||
|
||||
try:
|
||||
val = float(num)
|
||||
except ValueError as e:
|
||||
raise AnsibleFilterError(f"memory_filters: invalid numeric size '{v}'") from e
|
||||
|
||||
factor = _FACTORS.get(unit)
|
||||
if factor is None:
|
||||
raise AnsibleFilterError(f"memory_filters: unknown unit in '{v}'")
|
||||
|
||||
return int(val * factor)
|
||||
|
||||
|
||||
def _to_mb(v: str) -> int:
|
||||
"""Convert human-readable size to megabytes."""
|
||||
return max(0, _to_bytes(v) // (1024 * 1024))
|
||||
|
||||
|
||||
# ------------------------------------------------------
|
||||
# JVM-specific helpers
|
||||
# ------------------------------------------------------
|
||||
|
||||
def _svc(app_id: str) -> str:
|
||||
"""Resolve the internal service name for JVM-based applications."""
|
||||
return get_entity_name(app_id)
|
||||
|
||||
|
||||
def _mem_limit_mb(apps: dict, app_id: str) -> int:
|
||||
"""Resolve mem_limit for the JVM service of the given application."""
|
||||
svc = _svc(app_id)
|
||||
raw = get_app_conf(apps, app_id, f"docker.services.{svc}.mem_limit")
|
||||
mb = _to_mb(raw)
|
||||
|
||||
if mb <= 0:
|
||||
raise AnsibleFilterError(
|
||||
f"memory_filters: mem_limit for '{svc}' must be > 0 MB (got '{raw}')"
|
||||
)
|
||||
return mb
|
||||
|
||||
|
||||
def _mem_res_mb(apps: dict, app_id: str) -> int:
|
||||
"""Resolve mem_reservation for the JVM service of the given application."""
|
||||
svc = _svc(app_id)
|
||||
raw = get_app_conf(apps, app_id, f"docker.services.{svc}.mem_reservation")
|
||||
mb = _to_mb(raw)
|
||||
|
||||
if mb <= 0:
|
||||
raise AnsibleFilterError(
|
||||
f"memory_filters: mem_reservation for '{svc}' must be > 0 MB (got '{raw}')"
|
||||
)
|
||||
return mb
|
||||
|
||||
|
||||
def jvm_max_mb(apps: dict, app_id: str) -> int:
|
||||
"""
|
||||
Compute recommended JVM Xmx in MB using:
|
||||
Xmx = min(
|
||||
floor(0.7 * mem_limit),
|
||||
mem_limit - 1024,
|
||||
12288
|
||||
)
|
||||
with a lower bound of 1024 MB.
|
||||
"""
|
||||
limit_mb = _mem_limit_mb(apps, app_id)
|
||||
c1 = (limit_mb * 7) // 10
|
||||
c2 = max(0, limit_mb - 1024)
|
||||
c3 = 12288
|
||||
|
||||
return max(1024, min(c1, c2, c3))
|
||||
|
||||
|
||||
def jvm_min_mb(apps: dict, app_id: str) -> int:
|
||||
"""
|
||||
Compute recommended JVM Xms in MB using:
|
||||
Xms = min(
|
||||
floor(Xmx / 2),
|
||||
mem_reservation,
|
||||
Xmx
|
||||
)
|
||||
with a lower bound of 512 MB.
|
||||
"""
|
||||
xmx = jvm_max_mb(apps, app_id)
|
||||
res = _mem_res_mb(apps, app_id)
|
||||
|
||||
return max(512, min(xmx // 2, res, xmx))
|
||||
|
||||
|
||||
# ------------------------------------------------------
|
||||
# Redis-specific helpers (always service name "redis")
|
||||
# ------------------------------------------------------
|
||||
|
||||
def _redis_mem_limit_mb(apps: dict, app_id: str, default_mb: int = 256) -> int:
|
||||
"""
|
||||
Resolve mem_limit for the Redis service of an application.
|
||||
Unlike JVM-based services, Redis always uses the service name "redis".
|
||||
|
||||
If no mem_limit is defined, fall back to default_mb.
|
||||
"""
|
||||
raw = get_app_conf(
|
||||
apps,
|
||||
app_id,
|
||||
"docker.services.redis.mem_limit",
|
||||
strict=False,
|
||||
default=f"{default_mb}m",
|
||||
)
|
||||
|
||||
mb = _to_mb(raw)
|
||||
|
||||
if mb <= 0:
|
||||
raise AnsibleFilterError(
|
||||
f"memory_filters: mem_limit for 'redis' must be > 0 MB (got '{raw}')"
|
||||
)
|
||||
|
||||
return mb
|
||||
|
||||
|
||||
def redis_maxmemory_mb(
|
||||
apps: dict,
|
||||
app_id: str,
|
||||
factor: float = 0.8,
|
||||
min_mb: int = 64
|
||||
) -> int:
|
||||
"""
|
||||
Compute recommended Redis `maxmemory` in MB.
|
||||
|
||||
* factor: fraction of allowed memory used for Redis data (default 0.8)
|
||||
* min_mb: minimum floor value (default 64 MB)
|
||||
|
||||
maxmemory = max(min_mb, floor(factor * mem_limit))
|
||||
"""
|
||||
limit_mb = _redis_mem_limit_mb(apps, app_id)
|
||||
return max(min_mb, int(limit_mb * factor))
|
||||
|
||||
|
||||
# ------------------------------------------------------
|
||||
# Filter module
|
||||
# ------------------------------------------------------
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
"jvm_max_mb": jvm_max_mb,
|
||||
"jvm_min_mb": jvm_min_mb,
|
||||
"redis_maxmemory_mb": redis_maxmemory_mb,
|
||||
}
|
||||
141
filter_plugins/node_autosize.py
Normal file
141
filter_plugins/node_autosize.py
Normal file
@@ -0,0 +1,141 @@
|
||||
# filter_plugins/node_autosize.py
|
||||
# Reuse app config to derive sensible Node.js heap sizes for containers.
|
||||
#
|
||||
# Usage example (Jinja):
|
||||
# {{ applications | node_max_old_space_size('web-app-nextcloud', 'whiteboard') }}
|
||||
#
|
||||
# Heuristics (defaults):
|
||||
# - candidate = 35% of mem_limit
|
||||
# - min = 768 MB (required minimum)
|
||||
# - cap = min(3072 MB, 60% of mem_limit)
|
||||
#
|
||||
# NEW: If mem_limit (container cgroup RAM) is smaller than min_mb, we raise an
|
||||
# exception — to prevent a misconfiguration where Node's heap could exceed the cgroup
|
||||
# and be OOM-killed.
|
||||
|
||||
from __future__ import annotations
|
||||
import re
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
# Import the shared config resolver from module_utils
|
||||
try:
|
||||
from module_utils.config_utils import get_app_conf, AppConfigKeyError
|
||||
except Exception as e:
|
||||
raise AnsibleFilterError(
|
||||
f"Failed to import get_app_conf from module_utils.config_utils: {e}"
|
||||
)
|
||||
|
||||
_SIZE_RE = re.compile(r"^\s*(\d+(?:\.\d+)?)\s*([kmgtp]?i?b?)?\s*$", re.IGNORECASE)
|
||||
_MULT = {
|
||||
"": 1,
|
||||
"b": 1,
|
||||
"k": 10**3, "kb": 10**3,
|
||||
"m": 10**6, "mb": 10**6,
|
||||
"g": 10**9, "gb": 10**9,
|
||||
"t": 10**12, "tb": 10**12,
|
||||
"p": 10**15, "pb": 10**15,
|
||||
"kib": 1024,
|
||||
"mib": 1024**2,
|
||||
"gib": 1024**3,
|
||||
"tib": 1024**4,
|
||||
"pib": 1024**5,
|
||||
}
|
||||
|
||||
|
||||
def _to_bytes(val):
|
||||
"""Convert numeric or string memory limits (e.g. '512m', '2GiB') to bytes."""
|
||||
if val is None or val == "":
|
||||
return None
|
||||
if isinstance(val, (int, float)):
|
||||
return int(val)
|
||||
if not isinstance(val, str):
|
||||
raise AnsibleFilterError(f"Unsupported mem_limit type: {type(val).__name__}")
|
||||
m = _SIZE_RE.match(val)
|
||||
if not m:
|
||||
raise AnsibleFilterError(f"Unrecognized mem_limit string: {val!r}")
|
||||
num = float(m.group(1))
|
||||
unit = (m.group(2) or "").lower()
|
||||
if unit not in _MULT:
|
||||
raise AnsibleFilterError(f"Unknown unit in mem_limit: {unit!r}")
|
||||
return int(num * _MULT[unit])
|
||||
|
||||
|
||||
def _mb(bytes_val: int) -> int:
|
||||
"""Return decimal MB (10^6) as integer — Node expects MB units."""
|
||||
return int(round(bytes_val / 10**6))
|
||||
|
||||
|
||||
def _compute_old_space_mb(
|
||||
total_mb: int, pct: float, min_mb: int, hardcap_mb: int, safety_cap_pct: float
|
||||
) -> int:
|
||||
"""
|
||||
Compute Node.js old-space heap (MB) with safe minimum and cap handling.
|
||||
|
||||
NOTE: The calling function ensures total_mb >= min_mb; here we only
|
||||
apply the sizing heuristics and caps.
|
||||
"""
|
||||
candidate = int(total_mb * float(pct))
|
||||
safety_cap = int(total_mb * float(safety_cap_pct))
|
||||
final_cap = min(int(hardcap_mb), safety_cap)
|
||||
|
||||
# Enforce minimum first; only apply cap if it's above the minimum
|
||||
candidate = max(candidate, int(min_mb))
|
||||
if final_cap >= int(min_mb):
|
||||
candidate = min(candidate, final_cap)
|
||||
|
||||
# Never below a tiny hard floor
|
||||
return max(candidate, 128)
|
||||
|
||||
|
||||
def node_max_old_space_size(
|
||||
applications: dict,
|
||||
application_id: str,
|
||||
service_name: str,
|
||||
pct: float = 0.35,
|
||||
min_mb: int = 768,
|
||||
hardcap_mb: int = 3072,
|
||||
safety_cap_pct: float = 0.60,
|
||||
) -> int:
|
||||
"""
|
||||
Derive Node.js --max-old-space-size (MB) from the service's mem_limit in app config.
|
||||
|
||||
Looks up: docker.services.<service_name>.mem_limit for the given application_id.
|
||||
|
||||
Raises:
|
||||
AnsibleFilterError if mem_limit is missing/invalid OR if mem_limit (MB) < min_mb.
|
||||
"""
|
||||
try:
|
||||
mem_limit = get_app_conf(
|
||||
applications=applications,
|
||||
application_id=application_id,
|
||||
config_path=f"docker.services.{service_name}.mem_limit",
|
||||
strict=True,
|
||||
default=None,
|
||||
)
|
||||
except AppConfigKeyError as e:
|
||||
raise AnsibleFilterError(str(e))
|
||||
|
||||
if mem_limit in (None, False, ""):
|
||||
raise AnsibleFilterError(
|
||||
f"mem_limit not set for application '{application_id}', service '{service_name}'"
|
||||
)
|
||||
|
||||
total_bytes = _to_bytes(mem_limit)
|
||||
total_mb = _mb(total_bytes)
|
||||
|
||||
# NEW: guardrail — refuse to size a heap larger than the cgroup limit
|
||||
if total_mb < int(min_mb):
|
||||
raise AnsibleFilterError(
|
||||
f"mem_limit ({total_mb} MB) is below the required minimum heap ({int(min_mb)} MB) "
|
||||
f"for application '{application_id}', service '{service_name}'. "
|
||||
f"Increase mem_limit or lower min_mb."
|
||||
)
|
||||
|
||||
return _compute_old_space_mb(total_mb, pct, min_mb, hardcap_mb, safety_cap_pct)
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
"node_max_old_space_size": node_max_old_space_size,
|
||||
}
|
||||
@@ -9,6 +9,7 @@ SYS_SERVICE_CLEANUP_BACKUPS: "{{ 'sys-ctl-cln-bkps' | get_se
|
||||
SYS_SERVICE_CLEANUP_BACKUPS_FAILED: "{{ 'sys-ctl-cln-faild-bkps' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_CLEANUP_ANONYMOUS_VOLUMES: "{{ 'sys-ctl-cln-anon-volumes' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_CLEANUP_DISC_SPACE: "{{ 'sys-ctl-cln-disc-space' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_CLEANUP_DOCKER: "{{ 'sys-ctl-cln-docker' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_OPTIMIZE_DRIVE: "{{ 'svc-opt-ssd-hdd' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_BACKUP_RMT_2_LOC: "{{ 'svc-bkp-rmt-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_BACKUP_DOCKER_2_LOC: "{{ 'sys-ctl-bkp-docker-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
||||
|
||||
@@ -32,7 +32,8 @@ SYS_SCHEDULE_HEALTH_MSMTP: "*-*-* 00:00:00"
|
||||
SYS_SCHEDULE_CLEANUP_CERTS: "*-*-* 20:00" # Deletes and revokes unused certs once per day
|
||||
SYS_SCHEDULE_CLEANUP_FAILED_BACKUPS: "*-*-* 21:00" # Clean up failed docker backups once per day
|
||||
SYS_SCHEDULE_CLEANUP_BACKUPS: "*-*-* 22:00" # Cleanup backups once per day, MUST be called before disc space cleanup
|
||||
SYS_SCHEDULE_CLEANUP_DISC_SPACE: "*-*-* 23:00" # Cleanup disc space once per day
|
||||
SYS_SCHEDULE_CLEANUP_DOCKER: "*-*-* 23:00" # Cleanup docker anonymous volumes and prune ones per day
|
||||
SYS_SCHEDULE_CLEANUP_DISC_SPACE: "*-*-* 23:30" # Cleanup disc space once per day
|
||||
|
||||
### Schedule for repair services
|
||||
SYS_SCHEDULE_REPAIR_BTRFS_AUTO_BALANCER: "Sat *-*-01..07 00:00:00" # Execute btrfs auto balancer every first Saturday of a month
|
||||
|
||||
@@ -112,6 +112,14 @@ defaults_networks:
|
||||
subnet: 192.168.104.32/28
|
||||
web-svc-coturn:
|
||||
subnet: 192.168.104.48/28
|
||||
web-app-mini-qr:
|
||||
subnet: 192.168.104.64/28
|
||||
web-app-shopware:
|
||||
subnet: 192.168.104.80/28
|
||||
web-svc-onlyoffice:
|
||||
subnet: 192.168.104.96/28
|
||||
web-app-suitecrm:
|
||||
subnet: 192.168.104.112/28
|
||||
|
||||
# /24 Networks / 254 Usable Clients
|
||||
web-app-bigbluebutton:
|
||||
|
||||
@@ -18,6 +18,7 @@ ports:
|
||||
web-app-fusiondirectory: 4187
|
||||
web-app-gitea: 4188
|
||||
web-app-snipe-it: 4189
|
||||
web-app-suitecrm: 4190
|
||||
ldap:
|
||||
svc-db-openldap: 389
|
||||
http:
|
||||
@@ -80,6 +81,10 @@ ports:
|
||||
web-app-flowise: 8056
|
||||
web-app-minio_api: 8057
|
||||
web-app-minio_console: 8058
|
||||
web-app-mini-qr: 8059
|
||||
web-app-shopware: 8060
|
||||
web-svc-onlyoffice: 8061
|
||||
web-app-suitecrm: 8062
|
||||
web-app-bigbluebutton: 48087 # This port is predefined by bbb. @todo Try to change this to a 8XXX port
|
||||
public:
|
||||
# The following ports should be changed to 22 on the subdomain via stream mapping
|
||||
|
||||
@@ -87,7 +87,7 @@ LDAP:
|
||||
ID: "{{ _ldap_user_id }}"
|
||||
MAIL: "mail"
|
||||
FULLNAME: "cn"
|
||||
FIRSTNAME: "givenname"
|
||||
FIRSTNAME: "givenName"
|
||||
SURNAME: "sn"
|
||||
SSH_PUBLIC_KEY: "sshPublicKey"
|
||||
NEXTCLOUD_QUOTA: "nextcloudQuota"
|
||||
|
||||
@@ -4,5 +4,6 @@ collections:
|
||||
- name: hetzner.hcloud
|
||||
yay:
|
||||
- python-simpleaudio
|
||||
- python-numpy
|
||||
pacman:
|
||||
- ansible
|
||||
@@ -14,8 +14,10 @@
|
||||
|
||||
- name: "create {{ PATH_DOCKER_COMPOSE_INSTANCES }}"
|
||||
file:
|
||||
path: "{{ PATH_DOCKER_COMPOSE_INSTANCES }}"
|
||||
state: directory
|
||||
mode: 0700
|
||||
owner: root
|
||||
group: root
|
||||
path: "{{ PATH_DOCKER_COMPOSE_INSTANCES }}"
|
||||
state: directory
|
||||
mode: 0700
|
||||
owner: root
|
||||
group: root
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
@@ -1,6 +1,4 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_docker_compose is not defined
|
||||
|
||||
- name: "Load variables from {{ DOCKER_COMPOSE_VARIABLE_FILE }} for whole play"
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
DOCKER_COMPOSE_VARIABLE_FILE: "{{ role_path }}/vars/docker-compose.yml"
|
||||
DOCKER_COMPOSE_VARIABLE_FILE: "{{ [ role_path, 'vars/docker-compose.yml' ] | path_join }}"
|
||||
DOCKER_COMPOSE_DOWN_ALL_PACKAGE: "docodol"
|
||||
31
roles/docker-container/templates/healthcheck/http.yml.j2
Normal file
31
roles/docker-container/templates/healthcheck/http.yml.j2
Normal file
@@ -0,0 +1,31 @@
|
||||
{# ------------------------------------------------------------------------------
|
||||
Healthcheck: HTTP Local
|
||||
------------------------------------------------------------------------------
|
||||
This template defines a generic HTTP healthcheck for containers exposing
|
||||
a web service on a local port (e.g., Nginx, Apache, PHP-FPM, Shopware, etc.).
|
||||
|
||||
It uses `wget` or `curl` (as fallback) to test if the container responds on
|
||||
http://127.0.0.1:{{ container_port }}/. If the request succeeds, Docker marks
|
||||
the container as "healthy"; otherwise, as "unhealthy".
|
||||
|
||||
Parameters:
|
||||
- container_port: The internal port the service listens on.
|
||||
|
||||
Timing:
|
||||
- interval: 30s → Check every 30 seconds
|
||||
- timeout: 5s → Each check must complete within 5 seconds
|
||||
- retries: 5 → Mark unhealthy after 5 consecutive failures
|
||||
- start_period: 20s → Grace period before health checks begin
|
||||
|
||||
Usage:
|
||||
{% filter indent(4) %}
|
||||
{% include 'roles/docker-container/templates/healthcheck/http.yml.j2' %}
|
||||
{% endfilter %}
|
||||
------------------------------------------------------------------------------
|
||||
#}
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget -qO- http://127.0.0.1:{{ container_port }}/ >/dev/null || curl -fsS http://127.0.0.1:{{ container_port }}/ >/dev/null"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 20s
|
||||
@@ -6,7 +6,7 @@ entity_name: "{{ application_id | get_entity_name }
|
||||
docker_compose_flush_handlers: true
|
||||
|
||||
# Docker Compose
|
||||
database_type: "{{ application_id | get_entity_name }}"
|
||||
database_type: "{{ entity_name }}"
|
||||
|
||||
## Postgres
|
||||
POSTGRES_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.data') }}"
|
||||
|
||||
@@ -9,6 +9,16 @@
|
||||
driver: journald
|
||||
volumes:
|
||||
- redis:/data
|
||||
# Just save in memory and prevent huge redis_volumes
|
||||
command:
|
||||
- redis-server
|
||||
- --appendonly
|
||||
- "no"
|
||||
- --save
|
||||
- ""
|
||||
- --maxmemory {{ applications | redis_maxmemory_mb(application_id, 0.8, RESOURCE_MEM_LIMIT | int ) }}mb
|
||||
- --maxmemory-policy
|
||||
- "allkeys-lru"
|
||||
healthcheck:
|
||||
test: ["CMD", "redis-cli", "ping"]
|
||||
interval: 1s
|
||||
@@ -16,5 +26,12 @@
|
||||
retries: 30
|
||||
networks:
|
||||
- default
|
||||
{{ lookup('template', 'roles/docker-container/templates/resource.yml.j2',vars={'service_name':'redis'}) | indent(4) }}
|
||||
{% macro include_resource_for(svc, indent=4) -%}
|
||||
{% set service_name = svc -%}
|
||||
{%- set _snippet -%}
|
||||
{% include 'roles/docker-container/templates/resource.yml.j2' %}
|
||||
{%- endset -%}
|
||||
{{ _snippet | indent(indent, true) }}
|
||||
{%- endmacro %}
|
||||
{{ include_resource_for('redis') }}
|
||||
{{ "\n" }}
|
||||
@@ -21,6 +21,7 @@
|
||||
system_service_timer_enabled: true
|
||||
system_service_force_linear_sync: true
|
||||
system_service_force_flush: "{{ MODE_BACKUP | bool }}"
|
||||
system_service_suppress_flush: "{{ not MODE_BACKUP | bool }}"
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_BACKUP_DOCKER_TO_LOCAL }}"
|
||||
system_service_tpl_exec_start_pre: '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(" ") }} --ignore {{ SYS_SERVICE_BACKUP_DOCKER_2_LOC }} --timeout "{{ SYS_TIMEOUT_BACKUP_SERVICES }}"'
|
||||
system_service_tpl_exec_start: "/bin/sh -c '{{ BKP_DOCKER_2_LOC_EXEC }}'"
|
||||
|
||||
@@ -28,8 +28,8 @@ if [ "$force_freeing" = true ]; then
|
||||
{% endif %}
|
||||
|
||||
if command -v docker >/dev/null 2>&1 ; then
|
||||
echo "cleaning up docker" &&
|
||||
docker system prune -f || exit 3
|
||||
echo "cleaning up docker (prune + anonymous volumes) via systemd service" &&
|
||||
systemctl start {{ SYS_SERVICE_CLEANUP_DOCKER }} || exit 3
|
||||
|
||||
nextcloud_application_container="{{ applications | get_app_conf('web-app-nextcloud', 'docker.services.nextcloud.name') }}"
|
||||
if [ -n "$nextcloud_application_container" ] && [ "$(docker ps -a -q -f name=$nextcloud_application_container)" ] ; then
|
||||
|
||||
47
roles/sys-ctl-cln-docker/README.md
Normal file
47
roles/sys-ctl-cln-docker/README.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# Cleanup Docker Resources
|
||||
|
||||
## Description
|
||||
|
||||
This role performs a complete cleanup of Docker resources by invoking a systemd-managed script.
|
||||
It removes unused Docker images, stopped containers, networks, build cache, and anonymous volumes.
|
||||
The cleanup is fully automated and can run on a schedule or be triggered manually.
|
||||
|
||||
## Overview
|
||||
|
||||
Optimized for maintaining a clean and efficient Docker environment, this role:
|
||||
|
||||
* Loads and triggers the anonymous volume cleanup role.
|
||||
* Installs a systemd service and timer for Docker pruning.
|
||||
* Deploys a cleanup script that invokes:
|
||||
|
||||
* The anonymous volume cleanup service.
|
||||
* `docker system prune -a -f` to remove unused Docker resources.
|
||||
* Allows forced execution during maintenance runs (`MODE_CLEANUP`).
|
||||
|
||||
## Purpose
|
||||
|
||||
The primary purpose of this role is to prevent storage bloat caused by unused Docker images, volumes, and build artifacts.
|
||||
Regular pruning ensures:
|
||||
|
||||
* Reduced disk usage
|
||||
* Improved system performance
|
||||
* Faster CI/CD and container deployments
|
||||
* More predictable Docker engine behavior
|
||||
|
||||
## Features
|
||||
|
||||
* **Anonymous Volume Cleanup:** Integrates with `sys-ctl-cln-anon-volumes` to remove stale volumes.
|
||||
* **Full Docker Prune:** Executes `docker system prune -a -f` to reclaim space.
|
||||
* **Systemd Integration:** Registers a systemd unit and timer for automated cleanup.
|
||||
* **Scheduled Execution:** Runs daily (or as configured) based on `SYS_SCHEDULE_CLEANUP_DOCKER`.
|
||||
* **Force Execution Mode:** When `MODE_CLEANUP=true`, cleanup is executed immediately.
|
||||
* **Safe Execution:** Includes validation for missing services and Docker availability.
|
||||
|
||||
## Script Behavior
|
||||
|
||||
The cleanup script:
|
||||
|
||||
1. Checks whether the anonymous volume cleanup service is defined and available.
|
||||
2. Starts the service if present.
|
||||
3. Runs `docker system prune -a -f` if Docker is installed.
|
||||
4. Stops execution immediately on errors (`set -e` behavior).
|
||||
27
roles/sys-ctl-cln-docker/meta/main.yml
Normal file
27
roles/sys-ctl-cln-docker/meta/main.yml
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birkenbach"
|
||||
description: >
|
||||
Cleans up anonymous Docker volumes and performs a full `docker system prune -a -f`
|
||||
via a dedicated systemd service.
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birkenbach
|
||||
Consulting & Coaching Solutions
|
||||
https://www.veen.world
|
||||
min_ansible_version: "2.9"
|
||||
platforms:
|
||||
- name: Linux
|
||||
versions:
|
||||
- all
|
||||
galaxy_tags:
|
||||
- docker
|
||||
- cleanup
|
||||
- prune
|
||||
- automation
|
||||
- maintenance
|
||||
- system
|
||||
repository: "https://s.infinito.nexus/code"
|
||||
issue_tracker_url: "https://s.infinito.nexus/issues"
|
||||
documentation: "https://docs.infinito.nexus"
|
||||
23
roles/sys-ctl-cln-docker/tasks/main.yml
Normal file
23
roles/sys-ctl-cln-docker/tasks/main.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
- block:
|
||||
- name: Load role to delete anonymous volumes
|
||||
include_role:
|
||||
name: sys-ctl-cln-anon-volumes
|
||||
vars:
|
||||
system_service_force_flush: true
|
||||
when: run_once_sys_ctl_cln_anon_volumes is not defined
|
||||
|
||||
- name: "Register Docker prune system service"
|
||||
include_role:
|
||||
name: sys-service
|
||||
vars:
|
||||
system_service_timer_enabled: true
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_CLEANUP_DOCKER }}"
|
||||
system_service_copy_files: true
|
||||
system_service_tpl_exec_start: "{{ system_service_script_exec }}"
|
||||
system_service_tpl_exec_start_pre: ""
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_force_linear_sync: false
|
||||
system_service_force_flush: "{{ MODE_CLEANUP }}"
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_ctl_cln_docker is not defined
|
||||
10
roles/sys-ctl-cln-docker/templates/script.sh.j2
Normal file
10
roles/sys-ctl-cln-docker/templates/script.sh.j2
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/bin/sh
|
||||
# Cleans up anonymous Docker volumes and performs a full Docker system prune.
|
||||
|
||||
set -e
|
||||
|
||||
echo "Cleaning up anonymous Docker volumes via systemd service..."
|
||||
systemctl start {{ SYS_SERVICE_CLEANUP_ANONYMOUS_VOLUMES }} || exit 1
|
||||
echo "Pruning Docker system resources (images, containers, networks, build cache)..."
|
||||
docker system prune -a -f || exit 2
|
||||
echo "Docker prune cleanup finished."
|
||||
1
roles/sys-ctl-cln-docker/vars/main.yml
Normal file
1
roles/sys-ctl-cln-docker/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
system_service_id: "sys-ctl-cln-docker"
|
||||
58
roles/sys-ctl-hlth-disc-space/files/script.py
Normal file
58
roles/sys-ctl-hlth-disc-space/files/script.py
Normal file
@@ -0,0 +1,58 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
def get_disk_usage_percentages():
|
||||
"""
|
||||
Returns a list of filesystem usage percentages as integers.
|
||||
Equivalent to: df --output=pcent | sed 1d | tr -d '%'
|
||||
"""
|
||||
result = subprocess.run(
|
||||
["df", "--output=pcent"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True
|
||||
)
|
||||
|
||||
lines = result.stdout.strip().split("\n")[1:] # Skip header
|
||||
percentages = []
|
||||
|
||||
for line in lines:
|
||||
value = line.strip().replace("%", "")
|
||||
if value.isdigit():
|
||||
percentages.append(int(value))
|
||||
|
||||
return percentages
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Check disk usage and report if any filesystem exceeds the given threshold."
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"minimum_percent_cleanup_disk_space",
|
||||
type=int,
|
||||
help="Minimum free disk space percentage threshold that triggers a warning."
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
threshold = args.minimum_percent_cleanup_disk_space
|
||||
|
||||
print("Checking disk space usage...")
|
||||
subprocess.run(["df"]) # Show the same df output as the original script
|
||||
|
||||
errors = 0
|
||||
percentages = get_disk_usage_percentages()
|
||||
|
||||
for usage in percentages:
|
||||
if usage > threshold:
|
||||
print(f"WARNING: {usage}% exceeds the limit of {threshold}%.")
|
||||
errors += 1
|
||||
|
||||
sys.exit(errors)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,15 +0,0 @@
|
||||
#!/bin/sh
|
||||
# @param $1 mimimum free disc space
|
||||
errors=0
|
||||
minimum_percent_cleanup_disc_space="$1"
|
||||
echo "checking disc space use..."
|
||||
df
|
||||
for disc_use_percent in $(df --output=pcent | sed 1d)
|
||||
do
|
||||
disc_use_percent_number=$(echo "$disc_use_percent" | sed "s/%//")
|
||||
if [ "$disc_use_percent_number" -gt "$minimum_percent_cleanup_disc_space" ]; then
|
||||
echo "WARNING: $disc_use_percent_number exceeds the limit of $minimum_percent_cleanup_disc_space%."
|
||||
errors+=1;
|
||||
fi
|
||||
done
|
||||
exit $errors;
|
||||
@@ -8,4 +8,5 @@
|
||||
vars:
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_HEALTH_DISC_SPACE }}"
|
||||
system_service_timer_enabled: true
|
||||
system_service_tpl_exec_start: "{{ system_service_script_exec }} {{ SIZE_PERCENT_CLEANUP_DISC_SPACE }}"
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }} {{ SYS_SERVICE_CLEANUP_DISC_SPACE }}"
|
||||
@@ -10,17 +10,6 @@
|
||||
|
||||
lua_need_request_body on;
|
||||
|
||||
header_filter_by_lua_block {
|
||||
local ct = ngx.header.content_type or ""
|
||||
if ct:lower():find("^text/html") then
|
||||
ngx.ctx.is_html = true
|
||||
-- IMPORTANT: body will be modified → drop Content-Length to avoid mismatches
|
||||
ngx.header.content_length = nil
|
||||
else
|
||||
ngx.ctx.is_html = false
|
||||
end
|
||||
}
|
||||
|
||||
body_filter_by_lua_block {
|
||||
-- Only process HTML responses
|
||||
if not ngx.ctx.is_html then
|
||||
|
||||
@@ -1,10 +1,24 @@
|
||||
- name: Generate color palette with colorscheme-generator
|
||||
set_fact:
|
||||
color_palette: "{{ lookup('colorscheme', CSS_BASE_COLOR, count=CSS_COUNT, shades=CSS_SHADES) }}"
|
||||
CSS_COLOR_PALETTE: "{{ lookup('colorscheme', CSS_BASE_COLOR, count=CSS_COUNT, shades=CSS_SHADES) }}"
|
||||
|
||||
- name: Generate inverted color palette with colorscheme-generator
|
||||
set_fact:
|
||||
inverted_color_palette: "{{ lookup('colorscheme', CSS_BASE_COLOR, count=CSS_COUNT, shades=CSS_SHADES, invert_lightness=True) }}"
|
||||
CSS_COLOR_PALETTE_INVERTED: "{{ lookup('colorscheme', CSS_BASE_COLOR, count=CSS_COUNT, shades=CSS_SHADES, invert_lightness=True) }}"
|
||||
|
||||
- name: "Compute deterministic gradient angle from default.css template mtime"
|
||||
set_fact:
|
||||
CSS_GRADIENT_ANGLE: >-
|
||||
{{
|
||||
(
|
||||
lookup(
|
||||
'local_mtime_qs',
|
||||
[playbook_dir, 'roles', 'sys-front-inj-css', 'templates', 'css', 'default.css.j2'] | path_join
|
||||
)
|
||||
| regex_replace('^.*=', '')
|
||||
| int
|
||||
) % 360
|
||||
}}
|
||||
|
||||
- name: Deploy default CSS files
|
||||
template:
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
Now using a button background that is only slightly darker than the overall background */
|
||||
html[native-dark-active] .btn, .btn {
|
||||
background-color: var(--color-01-87);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-70), var(--color-01-91), var(--color-01-95), var(--color-01-95));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-70), var(--color-01-91), var(--color-01-95), var(--color-01-95));
|
||||
color: var(--color-01-50);
|
||||
border-color: var(--color-01-80);
|
||||
cursor: pointer;
|
||||
@@ -13,7 +13,7 @@ html[native-dark-active] .btn, .btn {
|
||||
.navbar, .navbar-light, .navbar-dark, .navbar.bg-light {
|
||||
background-color: var(--color-01-90);
|
||||
/* New Gradient based on original background (90 -5, 90, 90 +1, 90 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-85), var(--color-01-90), var(--color-01-91), var(--color-01-95));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-85), var(--color-01-90), var(--color-01-91), var(--color-01-95));
|
||||
color: var(--color-01-50);
|
||||
border-color: var(--color-01-85);
|
||||
}
|
||||
@@ -31,7 +31,7 @@ html[native-dark-active] .btn, .btn {
|
||||
.card {
|
||||
background-color: var(--color-01-90);
|
||||
/* New Gradient based on original background (90 -5, 90, 90 +1, 90 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-85), var(--color-01-90), var(--color-01-91), var(--color-01-95));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-85), var(--color-01-90), var(--color-01-91), var(--color-01-95));
|
||||
border-color: var(--color-01-85);
|
||||
color: var(--color-01-12);
|
||||
}
|
||||
@@ -45,7 +45,7 @@ html[native-dark-active] .btn, .btn {
|
||||
.nav-item .dropdown-menu {
|
||||
background-color: var(--color-01-80);
|
||||
/* New Gradient based on original background (80 -5, 80, 80 +1, 80 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-75), var(--color-01-80), var(--color-01-81), var(--color-01-85));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-75), var(--color-01-80), var(--color-01-81), var(--color-01-85));
|
||||
color: var(--color-01-40);
|
||||
}
|
||||
|
||||
@@ -57,13 +57,13 @@ html[native-dark-active] .btn, .btn {
|
||||
color: var(--color-01-40);
|
||||
background-color: var(--color-01-80);
|
||||
/* New Gradient based on original background (80 -5, 80, 80 +1, 80 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-75), var(--color-01-80), var(--color-01-81), var(--color-01-85));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-75), var(--color-01-80), var(--color-01-81), var(--color-01-85));
|
||||
}
|
||||
|
||||
.dropdown-item:hover,
|
||||
.dropdown-item:focus {
|
||||
background-color: var(--color-01-65);
|
||||
/* New Gradient based on original background (65 -5, 65, 65 +1, 65 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-60), var(--color-01-65), var(--color-01-66), var(--color-01-70));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-60), var(--color-01-65), var(--color-01-66), var(--color-01-70));
|
||||
color: var(--color-01-40);
|
||||
}
|
||||
|
||||
@@ -15,14 +15,14 @@ HINT:
|
||||
/* Auto-generated by colorscheme-generator */
|
||||
|
||||
:root {
|
||||
{% for var_name, color in color_palette.items() %}
|
||||
{% for var_name, color in CSS_COLOR_PALETTE.items() %}
|
||||
{{ var_name }}: {{ color }};
|
||||
{% endfor %}
|
||||
}
|
||||
|
||||
@media (prefers-color-scheme: dark) {
|
||||
:root {
|
||||
{% for var_name, color in inverted_color_palette.items() %}
|
||||
{% for var_name, color in CSS_COLOR_PALETTE_INVERTED.items() %}
|
||||
{{ var_name }}: {{ color }};
|
||||
{% endfor %}
|
||||
}
|
||||
@@ -102,7 +102,7 @@ HINT:
|
||||
/* Global Defaults (Colors Only) */
|
||||
body, html[native-dark-active] {
|
||||
background-color: var(--color-01-93);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-93), var(--color-01-91), var(--color-01-95), var(--color-01-93));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-93), var(--color-01-91), var(--color-01-95), var(--color-01-93));
|
||||
background-attachment: fixed;
|
||||
color: var(--color-01-40);
|
||||
font-family: {{design.font.type}};
|
||||
@@ -147,7 +147,7 @@ input:invalid,
|
||||
textarea:invalid,
|
||||
select:invalid {
|
||||
background-color: var(--color-01-01);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-01), var(--color-01-10));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-01), var(--color-01-10));
|
||||
/* Use Bootstrap danger color for error messages */
|
||||
color: var(--bs-danger);
|
||||
border-color: var(--color-01-20);
|
||||
@@ -158,7 +158,7 @@ input:valid,
|
||||
textarea:valid,
|
||||
select:valid {
|
||||
background-color: var(--color-01-80);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-80), var(--color-01-90));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-80), var(--color-01-90));
|
||||
/* Use Bootstrap success color for confirmation messages */
|
||||
color: var(--bs-success);
|
||||
border-color: var(--color-01-70);
|
||||
@@ -169,7 +169,7 @@ input:required,
|
||||
textarea:required,
|
||||
select:required {
|
||||
background-color: var(--color-01-50);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-50), var(--color-01-60));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-50), var(--color-01-60));
|
||||
/* Use Bootstrap warning color to indicate a required field */
|
||||
color: var(--bs-warning);
|
||||
border-color: var(--color-01-70);
|
||||
@@ -180,7 +180,7 @@ input:optional,
|
||||
textarea:optional,
|
||||
select:optional {
|
||||
background-color: var(--color-01-60);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-60), var(--color-01-70));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-60), var(--color-01-70));
|
||||
/* Use Bootstrap info color to indicate optional information */
|
||||
color: var(--bs-info);
|
||||
border-color: var(--color-01-70);
|
||||
@@ -191,7 +191,7 @@ input:read-only,
|
||||
textarea:read-only,
|
||||
select:read-only {
|
||||
background-color: var(--color-01-80);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-90), var(--color-01-70));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-90), var(--color-01-70));
|
||||
color: var(--color-01-20);
|
||||
border-color: var(--color-01-50);
|
||||
}
|
||||
@@ -201,7 +201,7 @@ input:read-write,
|
||||
textarea:read-write,
|
||||
select:read-write {
|
||||
background-color: var(--color-01-70);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-70), var(--color-01-80));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-70), var(--color-01-80));
|
||||
color: var(--color-01-40);
|
||||
border-color: var(--color-01-70);
|
||||
}
|
||||
@@ -211,7 +211,7 @@ input:in-range,
|
||||
textarea:in-range,
|
||||
select:in-range {
|
||||
background-color: var(--color-01-70);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-70), var(--color-01-85));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-70), var(--color-01-85));
|
||||
color: var(--color-01-40);
|
||||
border-color: var(--color-01-70);
|
||||
}
|
||||
@@ -221,7 +221,7 @@ input:out-of-range,
|
||||
textarea:out-of-range,
|
||||
select:out-of-range {
|
||||
background-color: var(--color-01-10);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-10), var(--color-01-30));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-10), var(--color-01-30));
|
||||
color: var(--color-01-10);
|
||||
border-color: var(--color-01-50);
|
||||
}
|
||||
@@ -231,7 +231,7 @@ input:placeholder-shown,
|
||||
textarea:placeholder-shown,
|
||||
select:placeholder-shown {
|
||||
background-color: var(--color-01-82);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-82), var(--color-01-90));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-82), var(--color-01-90));
|
||||
color: var(--color-01-40);
|
||||
border-color: var(--color-01-70);
|
||||
}
|
||||
@@ -241,7 +241,7 @@ input:focus,
|
||||
textarea:focus,
|
||||
select:focus {
|
||||
background-color: var(--color-01-75);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-75), var(--color-01-85));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-75), var(--color-01-85));
|
||||
color: var(--color-01-40);
|
||||
border-color: var(--color-01-50);
|
||||
}
|
||||
@@ -251,7 +251,7 @@ input:hover,
|
||||
textarea:hover,
|
||||
select:hover {
|
||||
background-color: var(--color-01-78);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-78), var(--color-01-88));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-78), var(--color-01-88));
|
||||
color: var(--color-01-40);
|
||||
border-color: var(--color-01-65);
|
||||
}
|
||||
@@ -261,7 +261,7 @@ input:active,
|
||||
textarea:active,
|
||||
select:active {
|
||||
background-color: var(--color-01-68);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-68), var(--color-01-78));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-68), var(--color-01-78));
|
||||
color: var(--color-01-40);
|
||||
border-color: var(--color-01-60);
|
||||
}
|
||||
@@ -269,11 +269,18 @@ select:active {
|
||||
/* {# Checked state: specifically for radio buttons and checkboxes when selected. #} */
|
||||
input:checked {
|
||||
background-color: var(--color-01-90);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-90), var(--color-01-99));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-90), var(--color-01-99));
|
||||
color: var(--color-01-40);
|
||||
border-color: var(--color-01-70);
|
||||
}
|
||||
|
||||
input[type="checkbox"] {
|
||||
appearance: auto;
|
||||
-webkit-appearance: auto;
|
||||
-moz-appearance: auto;
|
||||
background: none;
|
||||
}
|
||||
|
||||
option {
|
||||
background-color: var(--color-01-82);
|
||||
color: var(--color-01-07);
|
||||
@@ -287,7 +294,7 @@ th, td {
|
||||
thead {
|
||||
background-color: var(--color-01-80);
|
||||
/* New Gradient based on original background (80 -5, 80, 80 +1, 80 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-75), var(--color-01-80), var(--color-01-81), var(--color-01-85));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-75), var(--color-01-80), var(--color-01-81), var(--color-01-85));
|
||||
color: var(--color-01-40);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Constants
|
||||
CSS_FILES: ['default.css','bootstrap.css']
|
||||
CSS_BASE_COLOR: "{{ design.css.colors.base }}"
|
||||
CSS_COUNT: 7
|
||||
CSS_SHADES: 100
|
||||
CSS_FILES: ['default.css','bootstrap.css']
|
||||
CSS_BASE_COLOR: "{{ design.css.colors.base }}"
|
||||
CSS_COUNT: 7
|
||||
CSS_SHADES: 100
|
||||
|
||||
# Variables
|
||||
css_app_dst: "{{ [cdn_paths_all.role.release.css, 'style.css'] | path_join }}"
|
||||
css_app_dst: "{{ [cdn_paths_all.role.release.css, 'style.css'] | path_join }}"
|
||||
@@ -16,14 +16,11 @@
|
||||
include_tasks: "02_reset.yml"
|
||||
when: MODE_RESET | bool
|
||||
|
||||
- name: "Load cleanup tasks when MODE_CLEANUP or MODE_RESET is enabled"
|
||||
include_tasks: "03_cleanup.yml"
|
||||
when: MODE_CLEANUP | bool or MODE_RESET | bool
|
||||
|
||||
- name: Include backup, repair and health services for docker
|
||||
include_role:
|
||||
name: "{{ item }}"
|
||||
loop:
|
||||
- sys-ctl-cln-docker
|
||||
- sys-ctl-bkp-docker-2-loc
|
||||
- sys-ctl-hlth-docker-container
|
||||
- sys-ctl-hlth-docker-volumes
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
- block:
|
||||
- name: Load role to delete anonymous volumes
|
||||
include_role:
|
||||
name: sys-ctl-cln-anon-volumes
|
||||
vars:
|
||||
system_service_force_flush: true
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_ctl_cln_anon_volumes is not defined
|
||||
|
||||
- name: Prune Docker resources
|
||||
become: true
|
||||
ansible.builtin.command: docker system prune -f
|
||||
@@ -1,3 +1,3 @@
|
||||
ssl_certificate {{ [ LETSENCRYPT_LIVE_PATH, ssl_cert_folder, 'fullchain.pem'] | path_join }};
|
||||
ssl_certificate_key {{ [ LETSENCRYPT_LIVE_PATH, ssl_cert_folder, 'privkey.pem' ] | path_join }};
|
||||
ssl_trusted_certificate {{ [ LETSENCRYPT_LIVE_PATH, ssl_cert_folder, 'chain.pem' ] | path_join }};
|
||||
ssl_certificate {{ [ LETSENCRYPT_LIVE_PATH | mandatory, ssl_cert_folder | mandatory, 'fullchain.pem'] | path_join }};
|
||||
ssl_certificate_key {{ [ LETSENCRYPT_LIVE_PATH | mandatory, ssl_cert_folder | mandatory, 'privkey.pem' ] | path_join }};
|
||||
ssl_trusted_certificate {{ [ LETSENCRYPT_LIVE_PATH | mandatory, ssl_cert_folder | mandatory, 'chain.pem' ] | path_join }};
|
||||
@@ -1,2 +1,33 @@
|
||||
add_header Content-Security-Policy "{{ applications | build_csp_header(application_id, domains) }}" always;
|
||||
proxy_hide_header Content-Security-Policy; # Todo: Make this optional
|
||||
# ===== Content Security Policy: only for documents and workers (no locations needed) =====
|
||||
|
||||
# 1) Define your CSP once (Jinja: escape double quotes to be safe)
|
||||
set $csp "{{ applications | build_csp_header(application_id, domains) | replace('\"','\\\"') }}";
|
||||
|
||||
# 2) Send CSP ONLY for document responses; also for workers via Sec-Fetch-Dest
|
||||
header_filter_by_lua_block {
|
||||
local ct = ngx.header.content_type or ngx.header["Content-Type"] or ""
|
||||
local dest = ngx.var.http_sec_fetch_dest or ""
|
||||
|
||||
local lct = ct:lower()
|
||||
local is_html = lct:find("^text/html") or lct:find("^application/xhtml+xml")
|
||||
local is_worker = (dest == "worker") or (dest == "serviceworker")
|
||||
|
||||
if is_html or is_worker then
|
||||
ngx.header["Content-Security-Policy"] = ngx.var.csp
|
||||
else
|
||||
ngx.header["Content-Security-Policy"] = nil
|
||||
ngx.header["Content-Security-Policy-Report-Only"] = nil
|
||||
end
|
||||
|
||||
-- If you'll modify the body later, drop Content-Length on HTML
|
||||
if is_html then
|
||||
ngx.ctx.is_html = true
|
||||
ngx.header.content_length = nil
|
||||
else
|
||||
ngx.ctx.is_html = false
|
||||
end
|
||||
}
|
||||
|
||||
# 3) Prevent upstream/app CSP (duplicates)
|
||||
proxy_hide_header Content-Security-Policy;
|
||||
proxy_hide_header Content-Security-Policy-Report-Only;
|
||||
|
||||
@@ -5,28 +5,32 @@ location {{location}}
|
||||
{% include 'roles/web-app-oauth2-proxy/templates/following_directives.conf.j2'%}
|
||||
{% endif %}
|
||||
|
||||
{% include 'roles/sys-svc-proxy/templates/headers/content_security_policy.conf.j2' %}
|
||||
|
||||
{% include 'roles/sys-svc-proxy/templates/headers/access_control_allow.conf.j2' %}
|
||||
|
||||
# Client Limits for HTML
|
||||
client_max_body_size {{ client_max_body_size | default('100m') }};
|
||||
|
||||
{% set _loc = location|trim %}
|
||||
proxy_pass http://127.0.0.1:{{ http_port }}{{ (_loc|regex_replace('^(?:=|\\^~)\\s*','')) if not (_loc is match('^(@|~)')) else '' }};
|
||||
|
||||
# headers
|
||||
# Proxyconfiguration for Upload
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header Authorization $http_authorization;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Port {{ WEB_PORT }};
|
||||
proxy_set_header X-Forwarded-Ssl on;
|
||||
proxy_pass_request_headers on;
|
||||
|
||||
{% include 'roles/sys-svc-proxy/templates/headers/content_security_policy.conf.j2' %}
|
||||
|
||||
{% include 'roles/sys-svc-proxy/templates/headers/access_control_allow.conf.j2' %}
|
||||
|
||||
# WebSocket specific header
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
|
||||
# timeouts
|
||||
# Timeouts
|
||||
proxy_connect_timeout 5s;
|
||||
proxy_send_timeout 900s;
|
||||
proxy_read_timeout 900s;
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
location {{ location_upload }} {
|
||||
|
||||
# Proxyconfiguration for Upload
|
||||
proxy_pass http://127.0.0.1:{{ http_port }};
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
client_max_body_size {{ client_max_body_size }};
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
proxy_read_timeout 120s;
|
||||
proxy_connect_timeout 120s;
|
||||
proxy_send_timeout 120s;
|
||||
|
||||
# Client Limits for Upload
|
||||
client_max_body_size {{ client_max_body_size }};
|
||||
}
|
||||
@@ -17,7 +17,7 @@ server
|
||||
{% include 'roles/sys-svc-letsencrypt/templates/ssl_header.j2' %}
|
||||
|
||||
{% if applications | get_app_conf(application_id, 'features.oauth2', False) %}
|
||||
{% set acl = applications | get_app_conf(application_id, 'oauth2_proxy.acl', False, {}) %}
|
||||
{% set acl = applications | get_app_conf(application_id, 'docker.services.oauth2_proxy.acl', False, {}) %}
|
||||
|
||||
{% if acl.blacklist is defined %}
|
||||
{# 1. Expose everything by default, then protect blacklisted paths #}
|
||||
|
||||
@@ -18,10 +18,10 @@ server:
|
||||
flags:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
script-src:
|
||||
script-src-attr:
|
||||
unsafe-inline: true
|
||||
unsafe-eval: true
|
||||
style-src:
|
||||
style-src-attr:
|
||||
unsafe-inline: true
|
||||
whitelist:
|
||||
font-src:
|
||||
|
||||
@@ -37,5 +37,5 @@ server:
|
||||
flags:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
style-src:
|
||||
style-src-attr:
|
||||
unsafe-inline: true
|
||||
@@ -13,7 +13,7 @@ server:
|
||||
flags:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
style-src:
|
||||
style-src-attr:
|
||||
unsafe-inline: true
|
||||
domains:
|
||||
canonical:
|
||||
|
||||
@@ -27,7 +27,7 @@ server:
|
||||
flags:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
script-src:
|
||||
script-src-attr:
|
||||
unsafe-inline: true
|
||||
domains:
|
||||
canonical:
|
||||
|
||||
21
roles/web-app-bookwyrm/files/style.css
Normal file
21
roles/web-app-bookwyrm/files/style.css
Normal file
@@ -0,0 +1,21 @@
|
||||
.title, .subtitle {
|
||||
color: var(--color-01-10);
|
||||
}
|
||||
|
||||
.is-child {
|
||||
background-color: rgba( var(--color-rgb-01-80), 0.3 );
|
||||
color: var(--color-01-10);
|
||||
}
|
||||
|
||||
.footer{
|
||||
background-color: rgba( var(--color-rgb-01-80), 0.3 );
|
||||
color: var(--color-01-90);
|
||||
}
|
||||
|
||||
.has-background-primary-light{
|
||||
background-color: rgba( var(--color-rgb-01-80), 0.3 ) !important;
|
||||
}
|
||||
|
||||
.has-background-secondary{
|
||||
background-color: rgba( var(--color-rgb-01-80), 0.3 ) !important;
|
||||
}
|
||||
@@ -29,7 +29,7 @@ server:
|
||||
flags:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
script-src:
|
||||
script-src-attr:
|
||||
unsafe-inline: true
|
||||
domains:
|
||||
canonical:
|
||||
|
||||
@@ -15,6 +15,8 @@ server:
|
||||
- https://code.jquery.com/
|
||||
style-src-elem:
|
||||
- https://cdn.jsdelivr.net
|
||||
- https://kit.fontawesome.com
|
||||
- https://code.jquery.com/
|
||||
font-src:
|
||||
- https://ka-f.fontawesome.com
|
||||
- https://cdn.jsdelivr.net
|
||||
@@ -25,7 +27,7 @@ server:
|
||||
frame-src:
|
||||
- "{{ WEB_PROTOCOL }}://*.{{ PRIMARY_DOMAIN }}"
|
||||
flags:
|
||||
script-src:
|
||||
script-src-attr:
|
||||
unsafe-inline: true
|
||||
domains:
|
||||
canonical:
|
||||
|
||||
@@ -4,11 +4,13 @@ __metaclass__ = type
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.errors import AnsibleError
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
"""
|
||||
Group the given cards into categorized and uncategorized lists
|
||||
based on the tags from menu_categories.
|
||||
Categories are sorted alphabetically before returning.
|
||||
"""
|
||||
if len(terms) < 2:
|
||||
raise AnsibleError("Missing required arguments")
|
||||
@@ -19,6 +21,7 @@ class LookupModule(LookupBase):
|
||||
categorized = {}
|
||||
uncategorized = []
|
||||
|
||||
# Categorize cards
|
||||
for card in cards:
|
||||
found = False
|
||||
for category, data in menu_categories.items():
|
||||
@@ -29,10 +32,14 @@ class LookupModule(LookupBase):
|
||||
if not found:
|
||||
uncategorized.append(card)
|
||||
|
||||
# Sort categories alphabetically
|
||||
sorted_categorized = {
|
||||
k: categorized[k] for k in sorted(categorized.keys(), key=str.lower)
|
||||
}
|
||||
|
||||
return [
|
||||
{
|
||||
'categorized': categorized,
|
||||
'categorized': sorted_categorized,
|
||||
'uncategorized': uncategorized,
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
@@ -11,8 +11,8 @@ contact:
|
||||
description: Send {{ 'us' if service_provider.type == 'legal' else 'me' }} an email
|
||||
icon:
|
||||
class: fa-solid fa-envelope
|
||||
url: mailto:{{service_provider.contact.email}}
|
||||
identifier: {{service_provider.contact.email}}
|
||||
url: mailto:{{ service_provider.contact.email }}
|
||||
identifier: {{ service_provider.contact.email }}
|
||||
|
||||
{% endif %}
|
||||
{% if service_provider.contact.phone is defined %}
|
||||
@@ -32,6 +32,6 @@ contact:
|
||||
description: Chat with {{ 'us' if service_provider.type == 'legal' else 'me' }} on Matrix
|
||||
icon:
|
||||
class: fa-solid fa-cubes
|
||||
identifier: "{{service_provider.contact.matrix}}"
|
||||
identifier: "{{ service_provider.contact.matrix }}"
|
||||
|
||||
{% endif %}
|
||||
|
||||
@@ -25,7 +25,6 @@ portfolio_menu_categories:
|
||||
- ollama
|
||||
- openwebui
|
||||
- flowise
|
||||
- minio
|
||||
- qdrant
|
||||
- litellm
|
||||
|
||||
@@ -63,6 +62,8 @@ portfolio_menu_categories:
|
||||
- games
|
||||
- chess
|
||||
- boardgame
|
||||
- game
|
||||
- roulette
|
||||
|
||||
Communication:
|
||||
description: "Tools for communication"
|
||||
@@ -102,14 +103,12 @@ portfolio_menu_categories:
|
||||
- fusiondirectory
|
||||
- user-management
|
||||
|
||||
Customer Relationship Management:
|
||||
description: "Tools for managing customer relationships, sales pipelines, marketing, and support activities."
|
||||
Customer Relationship:
|
||||
description: "Customer Relationship Management (CRM) software for managing customer relationships, sales pipelines, marketing, and support activities."
|
||||
icon: "fa-solid fa-address-book"
|
||||
tags:
|
||||
- crm
|
||||
- customer
|
||||
- relationship
|
||||
- sales
|
||||
- marketing
|
||||
- support
|
||||
- espocrm
|
||||
@@ -222,7 +221,7 @@ portfolio_menu_categories:
|
||||
- snipe-it
|
||||
|
||||
Content Management:
|
||||
description: "CMS and web publishing platforms"
|
||||
description: "Content Management Systems (CMS) and web publishing platforms"
|
||||
icon: "fa-solid fa-file-alt"
|
||||
tags:
|
||||
- cms
|
||||
@@ -231,4 +230,27 @@ portfolio_menu_categories:
|
||||
- website
|
||||
- joomla
|
||||
- wordpress
|
||||
- blog
|
||||
- blog
|
||||
|
||||
Commerce:
|
||||
description: "Platforms for building and managing online shops, product catalogs, and digital sales channels — including payment, inventory, and customer features."
|
||||
icon: "fa-solid fa-cart-shopping"
|
||||
tags:
|
||||
- commerce
|
||||
- ecommerce
|
||||
- shopware
|
||||
- shop
|
||||
- sales
|
||||
- store
|
||||
- magento
|
||||
- pretix
|
||||
|
||||
Storage:
|
||||
description: "High-performance, self-hosted storage solutions for managing, scaling, and accessing unstructured data — including object storage compatible with Amazon S3 APIs."
|
||||
icon: "fa-solid fa-database"
|
||||
tags:
|
||||
- storage
|
||||
- object-storage
|
||||
- s3
|
||||
- minio
|
||||
- datasets
|
||||
|
||||
@@ -10,7 +10,7 @@ features:
|
||||
server:
|
||||
csp:
|
||||
flags:
|
||||
style-src:
|
||||
style-src-attr:
|
||||
unsafe-inline: true
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
|
||||
@@ -99,4 +99,43 @@
|
||||
/* Float Kit */
|
||||
--float-kit-arrow-stroke-color: var(--primary-low); /* already mapped above */
|
||||
--float-kit-arrow-fill-color: var(--secondary); /* already mapped above */
|
||||
|
||||
--d-content-background: rgba( var(--color-rgb-01-83),1 );
|
||||
}
|
||||
|
||||
|
||||
.search-input--header{
|
||||
background-color: rgba( var(--color-rgb-01-83),0.2 ) !important;
|
||||
color: var(--color-01-01)
|
||||
}
|
||||
|
||||
div#main-outlet, #list-area{
|
||||
background-color: rgba( var(--color-rgb-02-90),1 ) !important;
|
||||
}
|
||||
|
||||
.list-controls{
|
||||
background-color: rgba( var(--color-rgb-01-90), 0.9) !important;
|
||||
border-radius: 30px;
|
||||
}
|
||||
|
||||
.topic-list-item{
|
||||
background-color: rgba( var(--color-rgb-01-85), 0.4) !important;
|
||||
}
|
||||
|
||||
.topic-list-item a{
|
||||
color: var(--color-01-10) !important;
|
||||
}
|
||||
|
||||
div#main-outlet div.regular.ember-view{
|
||||
background-color: rgba( var(--color-rgb-01-85),0.3 );
|
||||
border-radius: 06px;
|
||||
}
|
||||
|
||||
button.btn{
|
||||
background-color: rgba( var(--color-rgb-01-85),0.9 );
|
||||
}
|
||||
|
||||
div.timeline-scrollarea div.timeline-scroller{
|
||||
background-color: rgba( var(--color-rgb-01-85),0.9 ) !important;
|
||||
border-radius: 06px;
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
features:
|
||||
logout: false # Just deactivated to oppress warnings, elk is anyhow not running
|
||||
matomo: true
|
||||
server:
|
||||
domains:
|
||||
canonical:
|
||||
|
||||
@@ -12,9 +12,7 @@ server:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
unsafe-eval: true
|
||||
style-src:
|
||||
unsafe-inline: true
|
||||
script-src:
|
||||
script-src-attr:
|
||||
unsafe-eval: true
|
||||
whitelist:
|
||||
connect-src:
|
||||
|
||||
@@ -18,18 +18,19 @@ server:
|
||||
flags:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
script-src:
|
||||
script-src-attr:
|
||||
unsafe-inline: true
|
||||
unsafe-eval: true
|
||||
style-src:
|
||||
style-src-attr:
|
||||
unsafe-inline: true
|
||||
oauth2_proxy:
|
||||
application: "application"
|
||||
port: "80"
|
||||
addons:
|
||||
keycloakpassword: {}
|
||||
ldapauth: {}
|
||||
docker:
|
||||
services:
|
||||
database:
|
||||
enabled: true
|
||||
enabled: true
|
||||
oauth2_proxy:
|
||||
origin:
|
||||
host: "application"
|
||||
port: "80"
|
||||
@@ -7,10 +7,10 @@ docker_compose_flush_handlers: false
|
||||
|
||||
# Friendica
|
||||
friendica_container: "friendica"
|
||||
friendica_no_validation: "{{ applications | get_app_conf(application_id, 'features.oidc', True) }}" # Email validation is not neccessary if OIDC is active
|
||||
friendica_no_validation: "{{ applications | get_app_conf(application_id, 'features.oidc') }}" # Email validation is not neccessary if OIDC is active
|
||||
friendica_application_base: "/var/www/html"
|
||||
friendica_docker_ldap_config: "{{ friendica_application_base }}/config/ldapauth.config.php"
|
||||
friendica_host_ldap_config: "{{ docker_compose.directories.volumes }}ldapauth.config.php"
|
||||
friendica_config_dir: "{{ friendica_application_base }}/config"
|
||||
friendica_config_file: "{{ friendica_config_dir }}/local.config.php"
|
||||
friendica_docker_ldap_config: "{{ [ friendica_application_base, 'config/ldapauth.config.php' ] | path_join }}"
|
||||
friendica_host_ldap_config: "{{ [ docker_compose.directories.volumes, 'ldapauth.config.php' ] | path_join }}"
|
||||
friendica_config_dir: "{{ [ friendica_application_base, 'config' ] | path_join }}"
|
||||
friendica_config_file: "{{ [ friendica_config_dir, 'local.config.php' ] | path_join }}"
|
||||
friendica_user: "www-data"
|
||||
|
||||
@@ -12,6 +12,13 @@ docker:
|
||||
enabled: true
|
||||
database:
|
||||
enabled: true
|
||||
oauth2_proxy:
|
||||
origin:
|
||||
host: "front"
|
||||
port: "80"
|
||||
acl:
|
||||
blacklist:
|
||||
- "/login"
|
||||
features:
|
||||
matomo: true
|
||||
css: false
|
||||
@@ -27,14 +34,8 @@ server:
|
||||
aliases: []
|
||||
csp:
|
||||
flags:
|
||||
style-src:
|
||||
style-src-attr:
|
||||
unsafe-inline: true
|
||||
whitelist:
|
||||
font-src:
|
||||
- "data:"
|
||||
oauth2_proxy:
|
||||
application: "front"
|
||||
port: "80"
|
||||
acl:
|
||||
blacklist:
|
||||
- "/login"
|
||||
|
||||
@@ -13,18 +13,13 @@ features:
|
||||
oauth2: true
|
||||
oidc: false # Deactivated because users aren't auto-created.
|
||||
logout: true
|
||||
oauth2_proxy:
|
||||
application: "application"
|
||||
port: "<< defaults_applications[web-app-gitea].docker.services.gitea.port >>"
|
||||
acl:
|
||||
blacklist:
|
||||
- "/user/login"
|
||||
|
||||
server:
|
||||
csp:
|
||||
flags:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
style-src:
|
||||
style-src-attr:
|
||||
unsafe-inline: true
|
||||
whitelist:
|
||||
font-src:
|
||||
@@ -59,5 +54,12 @@ docker:
|
||||
mem_reservation: 0.2g
|
||||
mem_limit: 0.3g
|
||||
pids_limit: 512
|
||||
oauth2_proxy:
|
||||
origin:
|
||||
host: "application"
|
||||
port: "<< defaults_applications[web-app-gitea].docker.services.gitea.port >>"
|
||||
acl:
|
||||
blacklist:
|
||||
- "/user/login"
|
||||
volumes:
|
||||
data: "gitea_data"
|
||||
|
||||
@@ -27,3 +27,7 @@ server:
|
||||
domains:
|
||||
canonical:
|
||||
- lab.git.{{ PRIMARY_DOMAIN }}
|
||||
csp:
|
||||
flags:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
features:
|
||||
logout: true # Same like with elk, anyhow not active atm
|
||||
matomo: true
|
||||
server:
|
||||
domains:
|
||||
canonical:
|
||||
|
||||
@@ -29,7 +29,7 @@ server:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
unsafe-eval: true
|
||||
script-src:
|
||||
script-src-attr:
|
||||
unsafe-inline: true
|
||||
unsafe-eval: true
|
||||
domains:
|
||||
|
||||
@@ -14,7 +14,7 @@ server:
|
||||
aliases: []
|
||||
csp:
|
||||
flags:
|
||||
style-src:
|
||||
style-src-attr:
|
||||
unsafe-inline: true
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
@@ -28,5 +28,8 @@ docker:
|
||||
name: joomla
|
||||
backup:
|
||||
no_stop_required: true
|
||||
upload:
|
||||
# MB Integer for upload size
|
||||
size_mb: 100
|
||||
volumes:
|
||||
data: "joomla_data"
|
||||
|
||||
5
roles/web-app-joomla/tasks/01_config.yml
Normal file
5
roles/web-app-joomla/tasks/01_config.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
- name: "Render php-upload.ini for Joomla"
|
||||
template:
|
||||
src: php-upload.ini.j2
|
||||
dest: "{{ JOOMLA_UPLOAD_CONFIG }}"
|
||||
mode: "0644"
|
||||
@@ -11,7 +11,7 @@
|
||||
# (Optional) specifically wait for the CLI installer script
|
||||
- name: "Check for CLI installer"
|
||||
command:
|
||||
argv: [ docker, exec, "{{ JOOMLA_CONTAINER }}", test, -f, /var/www/html/installation/joomla.php ]
|
||||
argv: [ docker, exec, "{{ JOOMLA_CONTAINER }}", test, -f, "{{ JOOMLA_INSTALLER_CLI_FILE }}" ]
|
||||
register: has_installer
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
@@ -30,9 +30,11 @@
|
||||
argv:
|
||||
- docker
|
||||
- exec
|
||||
- --user
|
||||
- "{{ JOOMLA_WEB_USER }}"
|
||||
- "{{ JOOMLA_CONTAINER }}"
|
||||
- php
|
||||
- /var/www/html/installation/joomla.php
|
||||
- "{{ JOOMLA_INSTALLER_CLI_FILE }}"
|
||||
- install
|
||||
- "--db-type={{ JOOMLA_DB_CONNECTOR }}"
|
||||
- "--db-host={{ database_host }}"
|
||||
18
roles/web-app-joomla/tasks/06_reset_admin_password.yml
Normal file
18
roles/web-app-joomla/tasks/06_reset_admin_password.yml
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
# Reset Joomla admin password via CLI (inside the container)
|
||||
- name: "Reset Joomla admin password (non-interactive CLI)"
|
||||
command:
|
||||
argv:
|
||||
- docker
|
||||
- exec
|
||||
- "{{ JOOMLA_CONTAINER }}"
|
||||
- php
|
||||
- "{{ JOOMLA_CLI_FILE }}"
|
||||
- user:reset-password
|
||||
- "--username"
|
||||
- "{{ JOOMLA_USER_NAME }}"
|
||||
- "--password"
|
||||
- "{{ JOOMLA_USER_PASSWORD }}"
|
||||
register: j_password_reset
|
||||
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||
changed_when: j_password_reset.rc == 0
|
||||
@@ -12,15 +12,25 @@
|
||||
include_role:
|
||||
name: sys-stk-back-stateful
|
||||
vars:
|
||||
docker_compose_flush_handlers: true
|
||||
docker_compose_flush_handlers: false
|
||||
|
||||
- name: Include PHP Config tasks
|
||||
include_tasks: 01_config.yml
|
||||
|
||||
- name: flush docker service
|
||||
meta: flush_handlers
|
||||
|
||||
- name: Include install routines
|
||||
include_tasks: "{{ item }}"
|
||||
loop:
|
||||
- 01_install.yml
|
||||
- 02_debug.yml
|
||||
- 03_patch.yml
|
||||
- 02_install.yml
|
||||
- 03_debug.yml
|
||||
- 04_patch.yml
|
||||
|
||||
- name: Include assert routines
|
||||
include_tasks: "04_assert.yml"
|
||||
include_tasks: "05_assert.yml"
|
||||
when: MODE_ASSERT | bool
|
||||
|
||||
- name: Reset Admin Password
|
||||
include_tasks: 06_reset_admin_password.yml
|
||||
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
{% include 'roles/docker-compose/templates/base.yml.j2' %}
|
||||
application:
|
||||
build:
|
||||
context: {{ docker_compose.directories.instance }}
|
||||
dockerfile: Dockerfile
|
||||
{{ lookup('template', 'roles/docker-container/templates/build.yml.j2') | indent(4) }}
|
||||
image: "{{ JOOMLA_CUSTOM_IMAGE }}"
|
||||
container_name: {{ JOOMLA_CONTAINER }}
|
||||
pull_policy: never
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
volumes:
|
||||
- data:/var/www/html
|
||||
- {{ JOOMLA_UPLOAD_CONFIG }}:/usr/local/etc/php/conf.d/uploads.ini:ro
|
||||
ports:
|
||||
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:{{ container_port }}"
|
||||
{% include 'roles/docker-container/templates/healthcheck/curl.yml.j2' %}
|
||||
|
||||
2
roles/web-app-joomla/templates/php-upload.ini.j2
Normal file
2
roles/web-app-joomla/templates/php-upload.ini.j2
Normal file
@@ -0,0 +1,2 @@
|
||||
upload_max_filesize = {{ JOOMLA_UPLOAD_MAX_FILESIZE }}
|
||||
post_max_size = {{ JOOMLA_POST_MAX_SIZE }}
|
||||
@@ -2,6 +2,7 @@
|
||||
application_id: "web-app-joomla"
|
||||
database_type: "mariadb"
|
||||
container_port: 80
|
||||
client_max_body_size: "{{ JOOMLA_POST_MAX_SIZE }}"
|
||||
|
||||
# Joomla
|
||||
JOOMLA_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.joomla.version') }}"
|
||||
@@ -13,9 +14,18 @@ JOOMLA_DOMAINS: "{{ applications | get_app_conf(application_id
|
||||
JOOMLA_SITE_NAME: "{{ SOFTWARE_NAME }} Joomla - CMS"
|
||||
JOOMLA_DB_CONNECTOR: "{{ 'pgsql' if database_type == 'postgres' else 'mysqli' }}"
|
||||
JOOMLA_CONFIG_FILE: "/var/www/html/configuration.php"
|
||||
JOOMLA_INSTALLER_CLI_FILE: "/var/www/html/installation/joomla.php"
|
||||
JOOMLA_CLI_FILE: "/var/www/html/cli/joomla.php"
|
||||
|
||||
## Upload
|
||||
JOOMLA_UPLOAD_CONFIG: "{{ [ docker_compose.directories.instance, 'php-upload.ini' ] | path_join }}"
|
||||
JOOMLA_UPLOAD_SIZE: "{{ applications | get_app_conf(application_id, 'docker.services.joomla.upload.size_mb') }}"
|
||||
JOOMLA_UPLOAD_MAX_FILESIZE: "{{ (JOOMLA_UPLOAD_SIZE | int) }}M"
|
||||
JOOMLA_POST_MAX_SIZE: "{{ ((JOOMLA_UPLOAD_SIZE | int) * 12 // 10) }}M"
|
||||
|
||||
# User
|
||||
JOOMLA_USER_NAME: "{{ users.administrator.username }}"
|
||||
JOOMLA_USER: "{{ JOOMLA_USER_NAME | capitalize }}"
|
||||
JOOMLA_USER_PASSWORD: "{{ users.administrator.password }}"
|
||||
JOOMLA_USER_EMAIL: "{{ users.administrator.email }}"
|
||||
JOOMLA_WEB_USER: "www-data"
|
||||
@@ -19,9 +19,9 @@ server:
|
||||
flags:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
script-src:
|
||||
script-src-attr:
|
||||
unsafe-inline: true
|
||||
style-src:
|
||||
style-src-attr:
|
||||
unsafe-inline: true
|
||||
whitelist:
|
||||
frame-src:
|
||||
|
||||
@@ -104,6 +104,6 @@ a.pf-v5-c-nav__link{
|
||||
div#app header{
|
||||
background-color: var(--color-01-60);
|
||||
/* New Gradient based on original background (60 -5, 60, 60 +1, 60 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-55), var(--color-01-60), var(--color-01-61), var(--color-01-65));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-55), var(--color-01-60), var(--color-01-61), var(--color-01-65));
|
||||
color: var(--color-01-98);
|
||||
}
|
||||
|
||||
@@ -1,29 +1,30 @@
|
||||
docker:
|
||||
services:
|
||||
lam:
|
||||
image: ghcr.io/ldapaccountmanager/lam
|
||||
version: latest
|
||||
oauth2_proxy:
|
||||
application: application
|
||||
port: 80
|
||||
allowed_groups:
|
||||
- "{{ [RBAC.GROUP.NAME, 'web-app-lam-administrator'] | path_join }}"
|
||||
image: ghcr.io/ldapaccountmanager/lam
|
||||
version: latest
|
||||
oauth2_proxy:
|
||||
origin:
|
||||
host: application
|
||||
port: 80
|
||||
allowed_groups:
|
||||
- "{{ [RBAC.GROUP.NAME, 'web-app-lam-administrator'] | path_join }}"
|
||||
features:
|
||||
matomo: true
|
||||
css: true
|
||||
desktop: true
|
||||
desktop: true
|
||||
ldap: true
|
||||
oauth2: true
|
||||
logout: true
|
||||
server:
|
||||
csp:
|
||||
flags:
|
||||
style-src:
|
||||
style-src-attr:
|
||||
unsafe-inline: true
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
unsafe-eval: true
|
||||
script-src:
|
||||
script-src-attr:
|
||||
unsafe-inline: true
|
||||
domains:
|
||||
aliases: []
|
||||
|
||||
@@ -37,7 +37,7 @@ ul.lam-tab-navigation {
|
||||
.titleBar {
|
||||
background-image: linear-gradient(var(--color-01-83), var(--color-01-92));
|
||||
/* New Gradient based on original background (83 -5, 83, 83 +1, 83 +5) */
|
||||
background-image: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-78), var(--color-01-83), var(--color-01-84), var(--color-01-88));
|
||||
background-image: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-78), var(--color-01-83), var(--color-01-84), var(--color-01-88));
|
||||
border-top-color: var(--color-01-78);
|
||||
border-left-color: var(--color-01-87);
|
||||
border-right-color: var(--color-01-87);
|
||||
@@ -46,6 +46,6 @@ ul.lam-tab-navigation {
|
||||
div.statusInfo {
|
||||
background-color: var(--color-01-81);
|
||||
/* New Gradient based on original background (81 -5, 81, 81 +1, 81 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-76), var(--color-01-81), var(--color-01-82), var(--color-01-86));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-76), var(--color-01-81), var(--color-01-82), var(--color-01-86));
|
||||
color: var(--color-01-23);
|
||||
}
|
||||
|
||||
@@ -13,6 +13,16 @@ server:
|
||||
aliases: []
|
||||
status_codes:
|
||||
default: 404
|
||||
csp:
|
||||
flags:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
whitelist:
|
||||
script-src-elem:
|
||||
- "https://www.hcaptcha.com"
|
||||
- "https://js.hcaptcha.com"
|
||||
frame-src:
|
||||
- "https://newassets.hcaptcha.com/"
|
||||
docker:
|
||||
services:
|
||||
database:
|
||||
|
||||
@@ -92,7 +92,7 @@ LISTMONK_SETTINGS:
|
||||
{{ [
|
||||
{
|
||||
"host": SYSTEM_EMAIL.HOST,
|
||||
"port": SYSTEM_EMAIL.PORT,
|
||||
"port": 995,
|
||||
"type": "pop",
|
||||
"uuid": "471fd0e9-8c33-4e4a-9183-c4679699faca",
|
||||
"enabled": true,
|
||||
|
||||
@@ -16,11 +16,11 @@ server:
|
||||
aliases: []
|
||||
csp:
|
||||
flags:
|
||||
style-src:
|
||||
style-src-attr:
|
||||
unsafe-inline: true
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
script-src:
|
||||
script-src-attr:
|
||||
unsafe-inline: true
|
||||
unsafe-eval: true
|
||||
rbac:
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
[class*=sidebar-dark-], .bg-mailu-logo {
|
||||
background-color: var(--color-01-90);
|
||||
/* New Gradient based on original background (90 -5, 90, 90 +1, 90 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-85), var(--color-01-90), var(--color-01-91), var(--color-01-95));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-85), var(--color-01-90), var(--color-01-91), var(--color-01-95));
|
||||
}
|
||||
|
||||
div.statusError {
|
||||
background-color: var(--color-01-60);
|
||||
/* New Gradient based on original background (60 -5, 60, 60 +1, 60 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-55), var(--color-01-60), var(--color-01-61), var(--color-01-65));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-55), var(--color-01-60), var(--color-01-61), var(--color-01-65));
|
||||
}
|
||||
|
||||
div.wrapper footer.main-footer, div.wrapper div.content-wrapper{
|
||||
background-color: var(--color-01-85);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-89), var(--color-01-85), var(--color-01-80), var(--color-01-79));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-89), var(--color-01-85), var(--color-01-80), var(--color-01-79));
|
||||
color: var(--color-01-39);
|
||||
}
|
||||
|
||||
|
||||
4
roles/web-app-mastodon/tasks/01_setup.yml
Normal file
4
roles/web-app-mastodon/tasks/01_setup.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
- name: "Execute migration for '{{ application_id }}'"
|
||||
command:
|
||||
chdir: "{{ docker_compose.directories.instance }}"
|
||||
cmd: "docker compose run --rm {{ MASTODON_SERVICE_NAME }} bundle exec rails db:migrate"
|
||||
@@ -11,9 +11,9 @@
|
||||
delay: 5
|
||||
until: healthcheck.stdout == "healthy"
|
||||
loop:
|
||||
- mastodon
|
||||
- streaming
|
||||
- sidekiq
|
||||
- "{{ MASTODON_SERVICE_NAME }}"
|
||||
- "{{ MASTODON_STREAMING_SERVICE_NAME }}"
|
||||
- "{{ MASTODON_SIDEKIQ_SERVICE_NAME }}"
|
||||
loop_control:
|
||||
label: "{{ item }}"
|
||||
changed_when: false
|
||||
@@ -1,3 +0,0 @@
|
||||
- name: "Execute migration for '{{ application_id }}'"
|
||||
command:
|
||||
cmd: "docker exec {{ MASTODON_CONTAINER }} bundle exec rails db:migrate"
|
||||
@@ -18,15 +18,15 @@
|
||||
vars:
|
||||
docker_compose_flush_handlers: true
|
||||
|
||||
- name: "start setup procedures for mastodon"
|
||||
include_tasks: 01_setup.yml
|
||||
|
||||
- name: "Wait for Mastodon"
|
||||
include_tasks: 01_wait.yml
|
||||
include_tasks: 02_wait.yml
|
||||
|
||||
- name: "Cleanup Mastodon caches when MODE_CLEANUP is true"
|
||||
include_tasks: 02_cleanup.yml
|
||||
include_tasks: 03_cleanup.yml
|
||||
when: MODE_CLEANUP | bool
|
||||
|
||||
- name: "start setup procedures for mastodon"
|
||||
include_tasks: 03_setup.yml
|
||||
|
||||
- name: "Include administrator routines for '{{ application_id }}'"
|
||||
include_tasks: 04_administrator.yml
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
{% include 'roles/docker-compose/templates/base.yml.j2' %}
|
||||
|
||||
mastodon:
|
||||
{% set service_name = 'mastodon' %}
|
||||
{% set service_name = MASTODON_SERVICE_NAME %}
|
||||
{% set container_port = 3000 %}
|
||||
{% set container_healthcheck = 'health' %}
|
||||
{{ service_name }}:
|
||||
container_name: {{ MASTODON_CONTAINER }}
|
||||
image: "{{ MASTODON_IMAGE }}:{{ MASTODON_VERSION }}"
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
@@ -16,10 +16,10 @@
|
||||
- data:/mastodon/public/system
|
||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||
|
||||
streaming:
|
||||
{% set service_name = 'streaming' %}
|
||||
{% set service_name = MASTODON_STREAMING_SERVICE_NAME %}
|
||||
{% set container_port = 4000 %}
|
||||
{% set container_healthcheck = 'api/v1/streaming/health' %}
|
||||
{{ service_name }}:
|
||||
container_name: {{ MASTODON_STREAMING_CONTAINER }}
|
||||
image: "{{ MASTODON_STREAMING_IMAGE }}:{{ MASTODON_STREAMING_VERSION }}"
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
@@ -30,8 +30,8 @@
|
||||
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
|
||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||
|
||||
sidekiq:
|
||||
{% set service_name = 'sidekiq' %}
|
||||
{% set service_name = MASTODON_SIDEKIQ_SERVICE_NAME %}
|
||||
{{ service_name }}:
|
||||
container_name: {{ MASTODON_SIDEKIQ_CONTAINER }}
|
||||
image: "{{ MASTODON_IMAGE }}:{{ MASTODON_VERSION }}"
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
|
||||
@@ -2,13 +2,24 @@
|
||||
application_id: "web-app-mastodon"
|
||||
database_type: "postgres"
|
||||
|
||||
# Mastodon Specific
|
||||
# Mastodon
|
||||
|
||||
## Main
|
||||
MASTODON_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.mastodon.version') }}"
|
||||
MASTODON_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.mastodon.image') }}"
|
||||
MASTODON_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.mastodon.name') }}"
|
||||
MASTODON_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.data') }}"
|
||||
MASTODON_SERVICE_NAME: "mastodon"
|
||||
|
||||
## Streaming
|
||||
MASTODON_STREAMING_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.streaming.version') }}"
|
||||
MASTODON_STREAMING_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.streaming.image') }}"
|
||||
MASTODON_STREAMING_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.streaming.name') }}"
|
||||
MASTODON_SIDEKIQ_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.mastodon.name') }}_sidekiq"
|
||||
MASTODON_STREAMING_SERVICE_NAME: "streaming"
|
||||
|
||||
## Sidekiq
|
||||
MASTODON_SIDEKIQ_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.mastodon.name') }}-sidekiq"
|
||||
MASTODON_SIDEKIQ_SERVICE_NAME: "sidekiq"
|
||||
|
||||
## General
|
||||
MASTODON_ALLOWED_PRIVATE_ADDRESSES: "{{ networks.local['svc-db-postgres'].subnet if 'web-app-chess' in group_names else ''}}"
|
||||
@@ -17,12 +17,12 @@ server:
|
||||
style-src-elem:
|
||||
- https://fonts.googleapis.com
|
||||
flags:
|
||||
script-src:
|
||||
script-src-attr:
|
||||
unsafe-eval: true
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
unsafe-eval: true
|
||||
style-src:
|
||||
style-src-attr:
|
||||
unsafe-inline: true
|
||||
unsafe-eval: true
|
||||
domains:
|
||||
|
||||
@@ -27,12 +27,12 @@ features:
|
||||
server:
|
||||
csp:
|
||||
flags:
|
||||
script-src:
|
||||
script-src-attr:
|
||||
unsafe-eval: true
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
unsafe-eval: true
|
||||
style-src:
|
||||
style-src-attr:
|
||||
unsafe-inline: true
|
||||
whitelist:
|
||||
connect-src:
|
||||
|
||||
@@ -4,6 +4,11 @@ server:
|
||||
canonical:
|
||||
- "m.wiki.{{ PRIMARY_DOMAIN }}"
|
||||
aliases: []
|
||||
csp:
|
||||
flags:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
|
||||
docker:
|
||||
services:
|
||||
database:
|
||||
@@ -22,3 +27,4 @@ features:
|
||||
css: false
|
||||
desktop: true
|
||||
oidc: true
|
||||
matomo: true
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
body.skin-vector,
|
||||
.skin-vector .mw-page-container {
|
||||
background-color: var(--mw-surface);
|
||||
background-image: linear-gradient({{ range(0, 361) | random }}deg,
|
||||
background-image: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg,
|
||||
var(--mw-surface),
|
||||
var(--mw-surface-variant),
|
||||
var(--mw-surface-muted),
|
||||
@@ -54,7 +54,7 @@ body.skin-vector,
|
||||
.skin-vector .vector-header-container,
|
||||
.skin-vector .mw-header {
|
||||
background-color: var(--color-01-80);
|
||||
background-image: linear-gradient({{ range(0, 361) | random }}deg,
|
||||
background-image: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg,
|
||||
var(--color-01-75), var(--color-01-80), var(--color-01-81), var(--color-01-85)
|
||||
);
|
||||
color: var(--color-01-17);
|
||||
@@ -211,7 +211,7 @@ table.wikitable > * > tr > td {
|
||||
|
||||
table.wikitable > * > tr > th {
|
||||
background-color: var(--color-01-80);
|
||||
background-image: linear-gradient({{ range(0, 361) | random }}deg,
|
||||
background-image: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg,
|
||||
var(--color-01-75), var(--color-01-80), var(--color-01-81), var(--color-01-85)
|
||||
);
|
||||
color: var(--mw-heading);
|
||||
|
||||
@@ -11,7 +11,7 @@ MEDIAWIKI_URL: "{{ domains | get_url(application_id, WEB_PROT
|
||||
MEDIAWIKI_HTML_DIR: "/var/www/html"
|
||||
MEDIAWIKI_CONFIG_DIR: "{{ docker_compose.directories.config }}"
|
||||
MEDIAWIKI_VOLUMES_DIR: "{{ docker_compose.directories.volumes }}"
|
||||
MEDIAWIKI_LOCAL_MOUNT_DIR: "{{ MEDIAWIKI_VOLUMES_DIR }}/mw-local"
|
||||
MEDIAWIKI_LOCAL_MOUNT_DIR: "{{ [ MEDIAWIKI_VOLUMES_DIR, 'mw-local' ] | path_join }}"
|
||||
MEDIAWIKI_LOCAL_PATH: "/opt/mw-local"
|
||||
|
||||
## Docker
|
||||
|
||||
@@ -26,10 +26,11 @@ server:
|
||||
- https://cdn.jsdelivr.net
|
||||
connect-src:
|
||||
- https://ka-f.fontawesome.com
|
||||
- https://cdn.jsdelivr.net
|
||||
frame-ancestors:
|
||||
- "*" # No damage if it's used somewhere on other websites, it anyhow looks like art
|
||||
flags:
|
||||
style-src:
|
||||
style-src-attr:
|
||||
unsafe-inline: true
|
||||
domains:
|
||||
canonical:
|
||||
|
||||
3
roles/web-app-mig/files/style.css
Normal file
3
roles/web-app-mig/files/style.css
Normal file
@@ -0,0 +1,3 @@
|
||||
#details h6, #details p{
|
||||
color: var(--color-01-73)
|
||||
}
|
||||
@@ -3,23 +3,17 @@
|
||||
name: sys-cli
|
||||
when: run_once_sys_cli is not defined
|
||||
|
||||
- name: Load docker compose vars
|
||||
include_vars:
|
||||
file: roles/docker-compose/vars/docker-compose.yml
|
||||
name: mig_docker_compose
|
||||
|
||||
- name: Set roles volume variable
|
||||
set_fact:
|
||||
mig_roles_meta_volume: "{{ mig_docker_compose.docker_compose.directories.volumes }}/roles/"
|
||||
|
||||
- name: Set roles list variable
|
||||
set_fact:
|
||||
mig_roles_meta_list: "{{ mig_roles_meta_volume }}list.json"
|
||||
|
||||
- name: "load docker, proxy for '{{ application_id }}'"
|
||||
include_role:
|
||||
name: sys-stk-full-stateless
|
||||
vars:
|
||||
docker_compose_flush_handlers: true
|
||||
docker_pull_git_repository: true
|
||||
|
||||
- name: Build data (single async task)
|
||||
include_tasks: 02_build_data.yml
|
||||
- include_tasks: 02_cleanup.yml
|
||||
when: MODE_CLEANUP | bool
|
||||
|
||||
- include_tasks: 03_build_data.yml
|
||||
when: MIG_BUILD_DATA | bool
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
|
||||
5
roles/web-app-mig/tasks/02_cleanup.yml
Normal file
5
roles/web-app-mig/tasks/02_cleanup.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
- name: "Cleanup MIG roles directory (remove all contents safely)"
|
||||
file:
|
||||
path: "{{ MIG_ROLES_DIRECTORY }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
@@ -1,8 +1,7 @@
|
||||
- name: Build data (single async task)
|
||||
shell: |
|
||||
set -euo pipefail
|
||||
infinito build tree --no-signal --alarm-timeout 0 -s {{ mig_roles_meta_volume }}
|
||||
infinito build roles_list --no-signal --alarm-timeout 0 -o {{ mig_roles_meta_list }}
|
||||
infinito build tree --no-signal --alarm-timeout 0 -s {{ MIG_ROLES_DIRECTORY }}
|
||||
infinito build roles_list --no-signal --alarm-timeout 0 -o {{ MIG_ROLES_LIST }}
|
||||
async: "{{ (3600 if ASYNC_ENABLED | bool else omit) }}"
|
||||
poll: "{{ ASYNC_POLL if ASYNC_ENABLED | bool else omit }}"
|
||||
register: mig_build_job
|
||||
@@ -1,7 +1,4 @@
|
||||
---
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
name: "Setup Meta Infinite Graph"
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_web_app_mig is not defined
|
||||
|
||||
@@ -12,11 +12,9 @@
|
||||
dockerfile: Dockerfile
|
||||
pull_policy: never
|
||||
volumes:
|
||||
- "{{ mig_roles_meta_volume }}:/usr/share/nginx/html/roles:ro"
|
||||
- "{{ MIG_ROLES_DIRECTORY }}:/usr/share/nginx/html/roles:ro"
|
||||
- "{{ docker_repository_path }}:/usr/share/nginx/html"
|
||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||
{% include 'roles/docker-container/templates/healthcheck/curl.yml.j2' %}
|
||||
|
||||
{% include 'roles/docker-compose/templates/networks.yml.j2' %}
|
||||
|
||||
|
||||
|
||||
@@ -2,11 +2,11 @@
|
||||
application_id: web-app-mig
|
||||
|
||||
# Docker
|
||||
docker_compose_flush_handlers: true
|
||||
docker_pull_git_repository: true
|
||||
docker_repository_address: "https://github.com/kevinveenbirkenbach/meta-infinite-graph"
|
||||
|
||||
# Helper variables
|
||||
MIG_IMAGE: "mig:latest"
|
||||
MIG_CONTAINER: "mig"
|
||||
MIG_BUILD_DATA: "{{ applications | get_app_conf(application_id, 'build_data.enabled') }}"
|
||||
MIG_BUILD_DATA: "{{ applications | get_app_conf(application_id, 'build_data.enabled') }}"
|
||||
MIG_ROLES_DIRECTORY: "{{ [ docker_compose.directories.volumes, 'roles' ] | path_join }}"
|
||||
MIG_ROLES_LIST: "{{ [ MIG_ROLES_DIRECTORY, 'list.json' ] | path_join }}"
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user