mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-12-07 18:05:09 +00:00
Compare commits
67 Commits
295ae7e477
...
features/w
| Author | SHA1 | Date | |
|---|---|---|---|
| 05ff3d3d61 | |||
| 873607246c | |||
| ac5fdeafd2 | |||
| cc817f3967 | |||
| 4f48cf633b | |||
| bbebf7964d | |||
| c5afae42cf | |||
| d7e0123af2 | |||
| 3fe83f26d5 | |||
| bee833feb4 | |||
| b80cfbdc9d | |||
| 6d6b0fdea6 | |||
| d97d34a822 | |||
| 4b2c7eef88 | |||
| e6cb0cbed9 | |||
| 254a685b05 | |||
| 9cbb74b7c8 | |||
| 62d20fbb71 | |||
| da8dc3b53a | |||
| 287cccf6cb | |||
| 61ee993ff1 | |||
| 2e490ed238 | |||
| c11ea9b699 | |||
| 2f5ead2212 | |||
| 13e74a86a6 | |||
| 962c68fdab | |||
| f8899e9493 | |||
| 9c65bd4839 | |||
| aca2da885d | |||
| d6422a7881 | |||
| 8cf3dbd5bf | |||
| dfa5e26582 | |||
| a312f353fb | |||
| e333c9d85b | |||
| 854e6902d3 | |||
| cc1ed2b125 | |||
| 28caa495e7 | |||
| 19de04c475 | |||
| 002f8de3ec | |||
| 68a8128d38 | |||
| 36f9573fdf | |||
| 493d5bbbda | |||
| 2fcbae8fc7 | |||
| 02f38d60db | |||
| d66ad37c5d | |||
| 0c16f9c43c | |||
| 7330aeb8ec | |||
| d3aad632c0 | |||
| d1bad3d7a6 | |||
| 43056a8b92 | |||
| 0bf286f62a | |||
| df8390f386 | |||
| 48557b06e3 | |||
| 1cff5778d3 | |||
| 60e2c972d6 | |||
| 637de6a190 | |||
| f5efbce205 | |||
| d6f3618d70 | |||
| 773655efb5 | |||
| 7bc9f7abd9 | |||
| ec7b8662dd | |||
| d1ccfd9cdd | |||
| d61c81634c | |||
| 265f815b48 | |||
| f8e5110730 | |||
| 37b213f96a | |||
| 5ef525eac9 |
@@ -6,168 +6,347 @@ import json
|
||||
import re
|
||||
from typing import List, Dict, Any, Set
|
||||
|
||||
from module_utils.role_dependency_resolver import RoleDependencyResolver
|
||||
|
||||
# Regex used to ignore Jinja expressions inside include/import statements
|
||||
JINJA_PATTERN = re.compile(r'{{.*}}')
|
||||
ALL_DEP_TYPES = ['run_after', 'dependencies', 'include_tasks', 'import_tasks', 'include_role', 'import_role']
|
||||
ALL_DIRECTIONS = ['to', 'from']
|
||||
ALL_KEYS = [f"{dep}_{dir}" for dep in ALL_DEP_TYPES for dir in ALL_DIRECTIONS]
|
||||
|
||||
# All dependency types the graph builder supports
|
||||
ALL_DEP_TYPES = [
|
||||
"run_after",
|
||||
"dependencies",
|
||||
"include_tasks",
|
||||
"import_tasks",
|
||||
"include_role",
|
||||
"import_role",
|
||||
]
|
||||
|
||||
# Graph directions: outgoing edges ("to") vs incoming edges ("from")
|
||||
ALL_DIRECTIONS = ["to", "from"]
|
||||
|
||||
# Combined keys: e.g. "include_role_to", "dependencies_from", etc.
|
||||
ALL_KEYS = [f"{dep}_{direction}" for dep in ALL_DEP_TYPES for direction in ALL_DIRECTIONS]
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Helpers for locating meta and task files
|
||||
# ------------------------------------------------------------
|
||||
|
||||
def find_role_meta(roles_dir: str, role: str) -> str:
|
||||
path = os.path.join(roles_dir, role, 'meta', 'main.yml')
|
||||
"""Return path to meta/main.yml of a role or raise FileNotFoundError."""
|
||||
path = os.path.join(roles_dir, role, "meta", "main.yml")
|
||||
if not os.path.isfile(path):
|
||||
raise FileNotFoundError(f"Metadata not found for role: {role}")
|
||||
return path
|
||||
|
||||
|
||||
def find_role_tasks(roles_dir: str, role: str) -> str:
|
||||
path = os.path.join(roles_dir, role, 'tasks', 'main.yml')
|
||||
"""Return path to tasks/main.yml of a role or raise FileNotFoundError."""
|
||||
path = os.path.join(roles_dir, role, "tasks", "main.yml")
|
||||
if not os.path.isfile(path):
|
||||
raise FileNotFoundError(f"Tasks not found for role: {role}")
|
||||
return path
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Parsers for meta and tasks
|
||||
# ------------------------------------------------------------
|
||||
|
||||
def load_meta(path: str) -> Dict[str, Any]:
|
||||
with open(path, 'r') as f:
|
||||
"""
|
||||
Load metadata from meta/main.yml.
|
||||
Returns a dict with:
|
||||
- galaxy_info
|
||||
- run_after
|
||||
- dependencies
|
||||
"""
|
||||
with open(path, "r") as f:
|
||||
data = yaml.safe_load(f) or {}
|
||||
|
||||
galaxy_info = data.get('galaxy_info', {}) or {}
|
||||
galaxy_info = data.get("galaxy_info", {}) or {}
|
||||
return {
|
||||
'galaxy_info': galaxy_info,
|
||||
'run_after': galaxy_info.get('run_after', []) or [],
|
||||
'dependencies': data.get('dependencies', []) or []
|
||||
"galaxy_info": galaxy_info,
|
||||
"run_after": galaxy_info.get("run_after", []) or [],
|
||||
"dependencies": data.get("dependencies", []) or [],
|
||||
}
|
||||
|
||||
|
||||
def load_tasks(path: str, dep_type: str) -> List[str]:
|
||||
with open(path, 'r') as f:
|
||||
"""
|
||||
Parse include_tasks/import_tasks from tasks/main.yml.
|
||||
Only accepts simple, non-Jinja names.
|
||||
"""
|
||||
with open(path, "r") as f:
|
||||
data = yaml.safe_load(f) or []
|
||||
|
||||
included_roles = []
|
||||
roles: List[str] = []
|
||||
|
||||
for task in data:
|
||||
if not isinstance(task, dict):
|
||||
continue
|
||||
|
||||
if dep_type in task:
|
||||
entry = task[dep_type]
|
||||
if isinstance(entry, dict):
|
||||
entry = entry.get('name', '')
|
||||
if entry and not JINJA_PATTERN.search(entry):
|
||||
included_roles.append(entry)
|
||||
entry = entry.get("name", "")
|
||||
if isinstance(entry, str) and entry and not JINJA_PATTERN.search(entry):
|
||||
roles.append(entry)
|
||||
|
||||
return included_roles
|
||||
return roles
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Graph builder using precomputed caches (fast)
|
||||
# ------------------------------------------------------------
|
||||
|
||||
def build_single_graph(
|
||||
start_role: str,
|
||||
dep_type: str,
|
||||
direction: str,
|
||||
roles_dir: str,
|
||||
max_depth: int
|
||||
max_depth: int,
|
||||
caches: Dict[str, Any],
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Build a graph (nodes + links) for one role, one dep_type, one direction.
|
||||
Uses only precomputed in-memory caches, no filesystem access.
|
||||
|
||||
caches structure:
|
||||
caches["meta"][role] -> meta information
|
||||
caches["deps"][dep_type][role] -> outgoing targets
|
||||
caches["rev"][dep_type][target] -> set of source roles
|
||||
"""
|
||||
|
||||
nodes: Dict[str, Dict[str, Any]] = {}
|
||||
links: List[Dict[str, str]] = []
|
||||
|
||||
meta_cache = caches["meta"]
|
||||
deps_cache = caches["deps"]
|
||||
rev_cache = caches["rev"]
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Ensure a role exists as a node
|
||||
# --------------------------------------------------------
|
||||
def ensure_node(role: str):
|
||||
if role in nodes:
|
||||
return
|
||||
|
||||
# Try retrieving cached meta; fallback: lazy load
|
||||
meta = meta_cache.get(role)
|
||||
if meta is None:
|
||||
try:
|
||||
meta = load_meta(find_role_meta(roles_dir, role))
|
||||
meta_cache[role] = meta
|
||||
except FileNotFoundError:
|
||||
meta = {"galaxy_info": {}}
|
||||
|
||||
galaxy_info = meta.get("galaxy_info", {}) or {}
|
||||
|
||||
node = {
|
||||
"id": role,
|
||||
**galaxy_info,
|
||||
"doc_url": f"https://docs.infinito.nexus/roles/{role}/README.html",
|
||||
"source_url": f"https://github.com/kevinveenbirkenbach/infinito-nexus/tree/master/roles/{role}",
|
||||
}
|
||||
nodes[role] = node
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Outgoing edges: role -> targets
|
||||
# --------------------------------------------------------
|
||||
def outgoing(role: str) -> List[str]:
|
||||
return deps_cache.get(dep_type, {}).get(role, []) or []
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Incoming edges: sources -> role
|
||||
# --------------------------------------------------------
|
||||
def incoming(role: str) -> Set[str]:
|
||||
return rev_cache.get(dep_type, {}).get(role, set())
|
||||
|
||||
# --------------------------------------------------------
|
||||
# DFS traversal
|
||||
# --------------------------------------------------------
|
||||
def traverse(role: str, depth: int, path: Set[str]):
|
||||
if role not in nodes:
|
||||
meta = load_meta(find_role_meta(roles_dir, role))
|
||||
node = {'id': role}
|
||||
node.update(meta['galaxy_info'])
|
||||
node['doc_url'] = f"https://docs.infinito.nexus/roles/{role}/README.html"
|
||||
node['source_url'] = f"https://s.infinito.nexus/code/tree/master/roles/{role}"
|
||||
nodes[role] = node
|
||||
ensure_node(role)
|
||||
|
||||
if max_depth > 0 and depth >= max_depth:
|
||||
return
|
||||
|
||||
neighbors = []
|
||||
if dep_type in ['run_after', 'dependencies']:
|
||||
meta = load_meta(find_role_meta(roles_dir, role))
|
||||
neighbors = meta.get(dep_type, [])
|
||||
else:
|
||||
try:
|
||||
neighbors = load_tasks(find_role_tasks(roles_dir, role), dep_type)
|
||||
except FileNotFoundError:
|
||||
neighbors = []
|
||||
if direction == "to":
|
||||
for tgt in outgoing(role):
|
||||
ensure_node(tgt)
|
||||
links.append({"source": role, "target": tgt, "type": dep_type})
|
||||
if tgt not in path:
|
||||
traverse(tgt, depth + 1, path | {tgt})
|
||||
|
||||
if direction == 'to':
|
||||
for tgt in neighbors:
|
||||
links.append({'source': role, 'target': tgt, 'type': dep_type})
|
||||
if tgt in path:
|
||||
continue
|
||||
traverse(tgt, depth + 1, path | {tgt})
|
||||
else: # direction == "from"
|
||||
for src in incoming(role):
|
||||
ensure_node(src)
|
||||
links.append({"source": src, "target": role, "type": dep_type})
|
||||
if src not in path:
|
||||
traverse(src, depth + 1, path | {src})
|
||||
|
||||
else: # direction == 'from'
|
||||
for other in os.listdir(roles_dir):
|
||||
try:
|
||||
other_neighbors = []
|
||||
if dep_type in ['run_after', 'dependencies']:
|
||||
meta_o = load_meta(find_role_meta(roles_dir, other))
|
||||
other_neighbors = meta_o.get(dep_type, [])
|
||||
else:
|
||||
other_neighbors = load_tasks(find_role_tasks(roles_dir, other), dep_type)
|
||||
traverse(start_role, 0, {start_role})
|
||||
|
||||
if role in other_neighbors:
|
||||
links.append({'source': other, 'target': role, 'type': dep_type})
|
||||
if other in path:
|
||||
continue
|
||||
traverse(other, depth + 1, path | {other})
|
||||
return {"nodes": list(nodes.values()), "links": links}
|
||||
|
||||
except FileNotFoundError:
|
||||
continue
|
||||
|
||||
traverse(start_role, depth=0, path={start_role})
|
||||
return {'nodes': list(nodes.values()), 'links': links}
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Build all graph variants for one role
|
||||
# ------------------------------------------------------------
|
||||
|
||||
def build_mappings(
|
||||
start_role: str,
|
||||
roles_dir: str,
|
||||
max_depth: int
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Build all 12 graph variants (6 dep types × 2 directions).
|
||||
Accelerated version:
|
||||
- One-time scan of all metadata
|
||||
- One-time scan of all include_role/import_role
|
||||
- One-time scan of include_tasks/import_tasks
|
||||
- Build reverse-index tables
|
||||
- Then generate all graphs purely from memory
|
||||
"""
|
||||
|
||||
result: Dict[str, Any] = {}
|
||||
for key in ALL_KEYS:
|
||||
dep_type, direction = key.rsplit('_', 1)
|
||||
|
||||
roles = [
|
||||
r for r in os.listdir(roles_dir)
|
||||
if os.path.isdir(os.path.join(roles_dir, r))
|
||||
]
|
||||
|
||||
# Pre-caches
|
||||
meta_cache: Dict[str, Dict[str, Any]] = {}
|
||||
deps_cache: Dict[str, Dict[str, List[str]]] = {dep: {} for dep in ALL_DEP_TYPES}
|
||||
rev_cache: Dict[str, Dict[str, Set[str]]] = {dep: {} for dep in ALL_DEP_TYPES}
|
||||
|
||||
resolver = RoleDependencyResolver(roles_dir)
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Step 1: Preload meta-based deps (run_after, dependencies)
|
||||
# --------------------------------------------------------
|
||||
for role in roles:
|
||||
try:
|
||||
result[key] = build_single_graph(start_role, dep_type, direction, roles_dir, max_depth)
|
||||
meta = load_meta(find_role_meta(roles_dir, role))
|
||||
except FileNotFoundError:
|
||||
continue
|
||||
|
||||
meta_cache[role] = meta
|
||||
|
||||
for dep_key in ["run_after", "dependencies"]:
|
||||
values = meta.get(dep_key, []) or []
|
||||
if isinstance(values, list) and values:
|
||||
deps_cache[dep_key][role] = values
|
||||
|
||||
for tgt in values:
|
||||
if isinstance(tgt, str) and tgt.strip():
|
||||
rev_cache[dep_key].setdefault(tgt.strip(), set()).add(role)
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Step 2: Preload include_role/import_role (resolver)
|
||||
# --------------------------------------------------------
|
||||
for role in roles:
|
||||
role_path = os.path.join(roles_dir, role)
|
||||
inc, imp = resolver._scan_tasks(role_path)
|
||||
|
||||
if inc:
|
||||
inc_list = sorted(inc)
|
||||
deps_cache["include_role"][role] = inc_list
|
||||
for tgt in inc_list:
|
||||
rev_cache["include_role"].setdefault(tgt, set()).add(role)
|
||||
|
||||
if imp:
|
||||
imp_list = sorted(imp)
|
||||
deps_cache["import_role"][role] = imp_list
|
||||
for tgt in imp_list:
|
||||
rev_cache["import_role"].setdefault(tgt, set()).add(role)
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Step 3: Preload include_tasks/import_tasks
|
||||
# --------------------------------------------------------
|
||||
for role in roles:
|
||||
try:
|
||||
tasks_path = find_role_tasks(roles_dir, role)
|
||||
except FileNotFoundError:
|
||||
continue
|
||||
|
||||
for dep_key in ["include_tasks", "import_tasks"]:
|
||||
values = load_tasks(tasks_path, dep_key)
|
||||
if values:
|
||||
deps_cache[dep_key][role] = values
|
||||
|
||||
for tgt in values:
|
||||
rev_cache[dep_key].setdefault(tgt, set()).add(role)
|
||||
|
||||
caches = {
|
||||
"meta": meta_cache,
|
||||
"deps": deps_cache,
|
||||
"rev": rev_cache,
|
||||
}
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Step 4: Build all graphs from caches
|
||||
# --------------------------------------------------------
|
||||
for key in ALL_KEYS:
|
||||
dep_type, direction = key.rsplit("_", 1)
|
||||
try:
|
||||
result[key] = build_single_graph(
|
||||
start_role=start_role,
|
||||
dep_type=dep_type,
|
||||
direction=direction,
|
||||
roles_dir=roles_dir,
|
||||
max_depth=max_depth,
|
||||
caches=caches,
|
||||
)
|
||||
except Exception:
|
||||
result[key] = {'nodes': [], 'links': []}
|
||||
result[key] = {"nodes": [], "links": []}
|
||||
|
||||
return result
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Output helper
|
||||
# ------------------------------------------------------------
|
||||
|
||||
def output_graph(graph_data: Any, fmt: str, start: str, key: str):
|
||||
base = f"{start}_{key}"
|
||||
if fmt == 'console':
|
||||
if fmt == "console":
|
||||
print(f"--- {base} ---")
|
||||
print(yaml.safe_dump(graph_data, sort_keys=False))
|
||||
elif fmt in ('yaml', 'json'):
|
||||
|
||||
else:
|
||||
path = f"{base}.{fmt}"
|
||||
with open(path, 'w') as f:
|
||||
if fmt == 'yaml':
|
||||
with open(path, "w") as f:
|
||||
if fmt == "yaml":
|
||||
yaml.safe_dump(graph_data, f, sort_keys=False)
|
||||
else:
|
||||
json.dump(graph_data, f, indent=2)
|
||||
print(f"Wrote {path}")
|
||||
else:
|
||||
raise ValueError(f"Unknown format: {fmt}")
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# CLI entrypoint
|
||||
# ------------------------------------------------------------
|
||||
|
||||
def main():
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
default_roles_dir = os.path.abspath(os.path.join(script_dir, '..', '..', 'roles'))
|
||||
default_roles_dir = os.path.abspath(os.path.join(script_dir, "..", "..", "roles"))
|
||||
|
||||
parser = argparse.ArgumentParser(description="Generate dependency graphs")
|
||||
parser.add_argument('-r', '--role', required=True, help="Starting role name")
|
||||
parser.add_argument('-D', '--depth', type=int, default=0, help="Max recursion depth")
|
||||
parser.add_argument('-o', '--output', choices=['yaml', 'json', 'console'], default='console')
|
||||
parser.add_argument('--roles-dir', default=default_roles_dir, help="Roles directory")
|
||||
parser.add_argument("-r", "--role", required=True, help="Starting role name")
|
||||
parser.add_argument("-D", "--depth", type=int, default=0, help="Max recursion depth")
|
||||
parser.add_argument("-o", "--output", choices=["yaml", "json", "console"], default="console")
|
||||
parser.add_argument("--roles-dir", default=default_roles_dir, help="Roles directory")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
graphs = build_mappings(args.role, args.roles_dir, args.depth)
|
||||
|
||||
for key in ALL_KEYS:
|
||||
graph_data = graphs.get(key, {'nodes': [], 'links': []})
|
||||
graph_data = graphs.get(key, {"nodes": [], "links": []})
|
||||
output_graph(graph_data, args.output, args.role, key)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -2,19 +2,76 @@
|
||||
import os
|
||||
import argparse
|
||||
import json
|
||||
from typing import Dict, Any
|
||||
from typing import Dict, Any, Optional, Iterable, Tuple
|
||||
from concurrent.futures import ProcessPoolExecutor, as_completed
|
||||
|
||||
from cli.build.graph import build_mappings, output_graph
|
||||
from module_utils.role_dependency_resolver import RoleDependencyResolver
|
||||
|
||||
|
||||
def find_roles(roles_dir: str):
|
||||
def find_roles(roles_dir: str) -> Iterable[Tuple[str, str]]:
|
||||
"""
|
||||
Yield (role_name, role_path) for all roles in the given roles_dir.
|
||||
"""
|
||||
for entry in os.listdir(roles_dir):
|
||||
path = os.path.join(roles_dir, entry)
|
||||
if os.path.isdir(path):
|
||||
yield entry, path
|
||||
|
||||
|
||||
def process_role(
|
||||
role_name: str,
|
||||
roles_dir: str,
|
||||
depth: int,
|
||||
shadow_folder: Optional[str],
|
||||
output: str,
|
||||
preview: bool,
|
||||
verbose: bool,
|
||||
no_include_role: bool, # currently unused, kept for CLI compatibility
|
||||
no_import_role: bool, # currently unused, kept for CLI compatibility
|
||||
no_dependencies: bool, # currently unused, kept for CLI compatibility
|
||||
no_run_after: bool, # currently unused, kept for CLI compatibility
|
||||
) -> None:
|
||||
"""
|
||||
Worker function: build graphs and (optionally) write meta/tree.json for a single role.
|
||||
|
||||
Note:
|
||||
This version no longer adds a custom top-level "dependencies" bucket.
|
||||
Only the graphs returned by build_mappings() are written.
|
||||
"""
|
||||
role_path = os.path.join(roles_dir, role_name)
|
||||
|
||||
if verbose:
|
||||
print(f"[worker] Processing role: {role_name}")
|
||||
|
||||
# Build the full graph structure (all dep types / directions) for this role
|
||||
graphs: Dict[str, Any] = build_mappings(
|
||||
start_role=role_name,
|
||||
roles_dir=roles_dir,
|
||||
max_depth=depth,
|
||||
)
|
||||
|
||||
# Preview mode: dump graphs to console instead of writing tree.json
|
||||
if preview:
|
||||
for key, data in graphs.items():
|
||||
if verbose:
|
||||
print(f"[worker] Previewing graph '{key}' for role '{role_name}'")
|
||||
# In preview mode we always output as console
|
||||
output_graph(data, "console", role_name, key)
|
||||
return
|
||||
|
||||
# Non-preview: write meta/tree.json for this role
|
||||
if shadow_folder:
|
||||
tree_file = os.path.join(shadow_folder, role_name, "meta", "tree.json")
|
||||
else:
|
||||
tree_file = os.path.join(role_path, "meta", "tree.json")
|
||||
|
||||
os.makedirs(os.path.dirname(tree_file), exist_ok=True)
|
||||
with open(tree_file, "w", encoding="utf-8") as f:
|
||||
json.dump(graphs, f, indent=2)
|
||||
|
||||
print(f"Wrote {tree_file}")
|
||||
|
||||
|
||||
def main():
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
default_roles_dir = os.path.abspath(os.path.join(script_dir, "..", "..", "roles"))
|
||||
@@ -22,24 +79,67 @@ def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate all graphs for each role and write meta/tree.json"
|
||||
)
|
||||
parser.add_argument("-d", "--role_dir", default=default_roles_dir,
|
||||
help=f"Path to roles directory (default: {default_roles_dir})")
|
||||
parser.add_argument("-D", "--depth", type=int, default=0,
|
||||
help="Max recursion depth (>0) or <=0 to stop on cycle")
|
||||
parser.add_argument("-o", "--output", choices=["yaml", "json", "console"],
|
||||
default="json", help="Output format")
|
||||
parser.add_argument("-p", "--preview", action="store_true",
|
||||
help="Preview graphs to console instead of writing files")
|
||||
parser.add_argument("-s", "--shadow-folder", type=str, default=None,
|
||||
help="If set, writes tree.json to this shadow folder instead of the role's actual meta/ folder")
|
||||
parser.add_argument("-v", "--verbose", action="store_true", help="Enable verbose logging")
|
||||
parser.add_argument(
|
||||
"-d",
|
||||
"--role_dir",
|
||||
default=default_roles_dir,
|
||||
help=f"Path to roles directory (default: {default_roles_dir})",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-D",
|
||||
"--depth",
|
||||
type=int,
|
||||
default=0,
|
||||
help="Max recursion depth (>0) or <=0 to stop on cycle",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-o",
|
||||
"--output",
|
||||
choices=["yaml", "json", "console"],
|
||||
default="json",
|
||||
help="Output format for preview mode",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-p",
|
||||
"--preview",
|
||||
action="store_true",
|
||||
help="Preview graphs to console instead of writing files",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-s",
|
||||
"--shadow-folder",
|
||||
type=str,
|
||||
default=None,
|
||||
help="If set, writes tree.json to this shadow folder instead of the role's actual meta/ folder",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-v",
|
||||
"--verbose",
|
||||
action="store_true",
|
||||
help="Enable verbose logging",
|
||||
)
|
||||
|
||||
# Toggles
|
||||
parser.add_argument("--no-include-role", action="store_true", help="Do not scan include_role")
|
||||
parser.add_argument("--no-import-role", action="store_true", help="Do not scan import_role")
|
||||
parser.add_argument("--no-dependencies", action="store_true", help="Do not read meta/main.yml dependencies")
|
||||
parser.add_argument("--no-run-after", action="store_true",
|
||||
help="Do not read galaxy_info.run_after from meta/main.yml")
|
||||
# Toggles (kept for CLI compatibility, currently only meaningful for future extensions)
|
||||
parser.add_argument(
|
||||
"--no-include-role",
|
||||
action="store_true",
|
||||
help="Reserved: do not include include_role in custom dependency bucket",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-import-role",
|
||||
action="store_true",
|
||||
help="Reserved: do not include import_role in custom dependency bucket",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-dependencies",
|
||||
action="store_true",
|
||||
help="Reserved: do not include meta dependencies in custom dependency bucket",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-run-after",
|
||||
action="store_true",
|
||||
help="Reserved: do not include run_after in custom dependency bucket",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -50,54 +150,53 @@ def main():
|
||||
print(f"Preview mode: {args.preview}")
|
||||
print(f"Shadow folder: {args.shadow_folder}")
|
||||
|
||||
resolver = RoleDependencyResolver(args.role_dir)
|
||||
roles = [role_name for role_name, _ in find_roles(args.role_dir)]
|
||||
|
||||
for role_name, role_path in find_roles(args.role_dir):
|
||||
if args.verbose:
|
||||
print(f"Processing role: {role_name}")
|
||||
# For preview, run sequentially to avoid completely interleaved output.
|
||||
if args.preview:
|
||||
for role_name in roles:
|
||||
process_role(
|
||||
role_name=role_name,
|
||||
roles_dir=args.role_dir,
|
||||
depth=args.depth,
|
||||
shadow_folder=args.shadow_folder,
|
||||
output=args.output,
|
||||
preview=True,
|
||||
verbose=args.verbose,
|
||||
no_include_role=args.no_include_role,
|
||||
no_import_role=args.no_import_role,
|
||||
no_dependencies=args.no_dependencies,
|
||||
no_run_after=args.no_run_after,
|
||||
)
|
||||
return
|
||||
|
||||
graphs: Dict[str, Any] = build_mappings(
|
||||
start_role=role_name,
|
||||
roles_dir=args.role_dir,
|
||||
max_depth=args.depth
|
||||
)
|
||||
# Non-preview: roles are processed in parallel
|
||||
with ProcessPoolExecutor() as executor:
|
||||
futures = {
|
||||
executor.submit(
|
||||
process_role,
|
||||
role_name,
|
||||
args.role_dir,
|
||||
args.depth,
|
||||
args.shadow_folder,
|
||||
args.output,
|
||||
False, # preview=False in parallel mode
|
||||
args.verbose,
|
||||
args.no_include_role,
|
||||
args.no_import_role,
|
||||
args.no_dependencies,
|
||||
args.no_run_after,
|
||||
): role_name
|
||||
for role_name in roles
|
||||
}
|
||||
|
||||
# Direct deps (depth=1) – getrennt erfasst für buckets
|
||||
inc_roles, imp_roles = resolver._scan_tasks(role_path)
|
||||
meta_deps = resolver._extract_meta_dependencies(role_path)
|
||||
run_after = set()
|
||||
if not args.no_run_after:
|
||||
run_after = resolver._extract_meta_run_after(role_path)
|
||||
|
||||
if any([not args.no_include_role and inc_roles,
|
||||
not args.no_import_role and imp_roles,
|
||||
not args.no_dependencies and meta_deps,
|
||||
not args.no_run_after and run_after]):
|
||||
deps_root = graphs.setdefault("dependencies", {})
|
||||
if not args.no_include_role and inc_roles:
|
||||
deps_root["include_role"] = sorted(inc_roles)
|
||||
if not args.no_import_role and imp_roles:
|
||||
deps_root["import_role"] = sorted(imp_roles)
|
||||
if not args.no_dependencies and meta_deps:
|
||||
deps_root["dependencies"] = sorted(meta_deps)
|
||||
if not args.no_run_after and run_after:
|
||||
deps_root["run_after"] = sorted(run_after)
|
||||
graphs["dependencies"] = deps_root
|
||||
|
||||
if args.preview:
|
||||
for key, data in graphs.items():
|
||||
if args.verbose:
|
||||
print(f"Previewing graph '{key}' for role '{role_name}'")
|
||||
output_graph(data, "console", role_name, key)
|
||||
else:
|
||||
if args.shadow_folder:
|
||||
tree_file = os.path.join(args.shadow_folder, role_name, "meta", "tree.json")
|
||||
else:
|
||||
tree_file = os.path.join(role_path, "meta", "tree.json")
|
||||
os.makedirs(os.path.dirname(tree_file), exist_ok=True)
|
||||
with open(tree_file, "w", encoding="utf-8") as f:
|
||||
json.dump(graphs, f, indent=2)
|
||||
print(f"Wrote {tree_file}")
|
||||
for future in as_completed(futures):
|
||||
role_name = futures[future]
|
||||
try:
|
||||
future.result()
|
||||
except Exception as exc:
|
||||
# Do not crash the whole run; report the failing role instead.
|
||||
print(f"[ERROR] Role '{role_name}' failed: {exc}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -236,6 +236,12 @@ class FilterModule(object):
|
||||
if self.is_feature_enabled(applications, 'logout', application_id):
|
||||
tokens.append(get_url(domains, 'web-svc-logout', web_protocol))
|
||||
tokens.append(get_url(domains, 'web-app-keycloak', web_protocol))
|
||||
|
||||
# 6b) Logout support requires inline handlers (script-src-attr)
|
||||
if directive in ('script-src-attr','script-src-elem'):
|
||||
if self.is_feature_enabled(applications, 'logout', application_id):
|
||||
tokens.append("'unsafe-inline'")
|
||||
|
||||
|
||||
# 7) Custom whitelist
|
||||
tokens += self.get_csp_whitelist(applications, application_id, directive)
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys, os, re
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
|
||||
from ansible.errors import AnsibleFilterError
|
||||
from module_utils.config_utils import get_app_conf
|
||||
from module_utils.entity_name_utils import get_entity_name
|
||||
|
||||
_UNIT_RE = re.compile(r'^\s*(\d+(?:\.\d+)?)\s*([kKmMgGtT]?[bB]?)?\s*$')
|
||||
_FACTORS = {
|
||||
'': 1, 'b': 1,
|
||||
'k': 1024, 'kb': 1024,
|
||||
'm': 1024**2, 'mb': 1024**2,
|
||||
'g': 1024**3, 'gb': 1024**3,
|
||||
't': 1024**4, 'tb': 1024**4,
|
||||
}
|
||||
|
||||
def _to_bytes(v: str) -> int:
|
||||
if v is None:
|
||||
raise AnsibleFilterError("jvm_filters: size value is None")
|
||||
s = str(v).strip()
|
||||
m = _UNIT_RE.match(s)
|
||||
if not m:
|
||||
raise AnsibleFilterError(f"jvm_filters: invalid size '{v}'")
|
||||
num, unit = m.group(1), (m.group(2) or '').lower()
|
||||
try:
|
||||
val = float(num)
|
||||
except ValueError as e:
|
||||
raise AnsibleFilterError(f"jvm_filters: invalid numeric size '{v}'") from e
|
||||
factor = _FACTORS.get(unit)
|
||||
if factor is None:
|
||||
raise AnsibleFilterError(f"jvm_filters: unknown unit in '{v}'")
|
||||
return int(val * factor)
|
||||
|
||||
def _to_mb(v: str) -> int:
|
||||
return max(0, _to_bytes(v) // (1024 * 1024))
|
||||
|
||||
def _svc(app_id: str) -> str:
|
||||
return get_entity_name(app_id)
|
||||
|
||||
def _mem_limit_mb(apps: dict, app_id: str) -> int:
|
||||
svc = _svc(app_id)
|
||||
raw = get_app_conf(apps, app_id, f"docker.services.{svc}.mem_limit")
|
||||
mb = _to_mb(raw)
|
||||
if mb <= 0:
|
||||
raise AnsibleFilterError(f"jvm_filters: mem_limit for '{svc}' must be > 0 MB (got '{raw}')")
|
||||
return mb
|
||||
|
||||
def _mem_res_mb(apps: dict, app_id: str) -> int:
|
||||
svc = _svc(app_id)
|
||||
raw = get_app_conf(apps, app_id, f"docker.services.{svc}.mem_reservation")
|
||||
mb = _to_mb(raw)
|
||||
if mb <= 0:
|
||||
raise AnsibleFilterError(f"jvm_filters: mem_reservation for '{svc}' must be > 0 MB (got '{raw}')")
|
||||
return mb
|
||||
|
||||
def jvm_max_mb(apps: dict, app_id: str) -> int:
|
||||
"""Xmx = min( floor(0.7*limit), limit-1024, 12288 ) with floor at 1024 MB."""
|
||||
limit_mb = _mem_limit_mb(apps, app_id)
|
||||
c1 = (limit_mb * 7) // 10
|
||||
c2 = max(0, limit_mb - 1024)
|
||||
c3 = 12288
|
||||
return max(1024, min(c1, c2, c3))
|
||||
|
||||
def jvm_min_mb(apps: dict, app_id: str) -> int:
|
||||
"""Xms = min( floor(Xmx/2), mem_reservation, Xmx ) with floor at 512 MB."""
|
||||
xmx = jvm_max_mb(apps, app_id)
|
||||
res = _mem_res_mb(apps, app_id)
|
||||
return max(512, min(xmx // 2, res, xmx))
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
"jvm_max_mb": jvm_max_mb,
|
||||
"jvm_min_mb": jvm_min_mb,
|
||||
}
|
||||
179
filter_plugins/memory_filters.py
Normal file
179
filter_plugins/memory_filters.py
Normal file
@@ -0,0 +1,179 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys, os, re
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
|
||||
from ansible.errors import AnsibleFilterError
|
||||
from module_utils.config_utils import get_app_conf
|
||||
from module_utils.entity_name_utils import get_entity_name
|
||||
|
||||
# Regex and unit conversion table
|
||||
_UNIT_RE = re.compile(r'^\s*(\d+(?:\.\d+)?)\s*([kKmMgGtT]?[bB]?)?\s*$')
|
||||
_FACTORS = {
|
||||
'': 1, 'b': 1,
|
||||
'k': 1024, 'kb': 1024,
|
||||
'm': 1024**2, 'mb': 1024**2,
|
||||
'g': 1024**3, 'gb': 1024**3,
|
||||
't': 1024**4, 'tb': 1024**4,
|
||||
}
|
||||
|
||||
# ------------------------------------------------------
|
||||
# Helpers: unit conversion
|
||||
# ------------------------------------------------------
|
||||
|
||||
def _to_bytes(v: str) -> int:
|
||||
"""Convert a human-readable size string (e.g., '2g', '512m') to bytes."""
|
||||
if v is None:
|
||||
raise AnsibleFilterError("memory_filters: size value is None")
|
||||
|
||||
s = str(v).strip()
|
||||
m = _UNIT_RE.match(s)
|
||||
if not m:
|
||||
raise AnsibleFilterError(f"memory_filters: invalid size '{v}'")
|
||||
|
||||
num, unit = m.group(1), (m.group(2) or '').lower()
|
||||
|
||||
try:
|
||||
val = float(num)
|
||||
except ValueError as e:
|
||||
raise AnsibleFilterError(f"memory_filters: invalid numeric size '{v}'") from e
|
||||
|
||||
factor = _FACTORS.get(unit)
|
||||
if factor is None:
|
||||
raise AnsibleFilterError(f"memory_filters: unknown unit in '{v}'")
|
||||
|
||||
return int(val * factor)
|
||||
|
||||
|
||||
def _to_mb(v: str) -> int:
|
||||
"""Convert human-readable size to megabytes."""
|
||||
return max(0, _to_bytes(v) // (1024 * 1024))
|
||||
|
||||
|
||||
# ------------------------------------------------------
|
||||
# JVM-specific helpers
|
||||
# ------------------------------------------------------
|
||||
|
||||
def _svc(app_id: str) -> str:
|
||||
"""Resolve the internal service name for JVM-based applications."""
|
||||
return get_entity_name(app_id)
|
||||
|
||||
|
||||
def _mem_limit_mb(apps: dict, app_id: str) -> int:
|
||||
"""Resolve mem_limit for the JVM service of the given application."""
|
||||
svc = _svc(app_id)
|
||||
raw = get_app_conf(apps, app_id, f"docker.services.{svc}.mem_limit")
|
||||
mb = _to_mb(raw)
|
||||
|
||||
if mb <= 0:
|
||||
raise AnsibleFilterError(
|
||||
f"memory_filters: mem_limit for '{svc}' must be > 0 MB (got '{raw}')"
|
||||
)
|
||||
return mb
|
||||
|
||||
|
||||
def _mem_res_mb(apps: dict, app_id: str) -> int:
|
||||
"""Resolve mem_reservation for the JVM service of the given application."""
|
||||
svc = _svc(app_id)
|
||||
raw = get_app_conf(apps, app_id, f"docker.services.{svc}.mem_reservation")
|
||||
mb = _to_mb(raw)
|
||||
|
||||
if mb <= 0:
|
||||
raise AnsibleFilterError(
|
||||
f"memory_filters: mem_reservation for '{svc}' must be > 0 MB (got '{raw}')"
|
||||
)
|
||||
return mb
|
||||
|
||||
|
||||
def jvm_max_mb(apps: dict, app_id: str) -> int:
|
||||
"""
|
||||
Compute recommended JVM Xmx in MB using:
|
||||
Xmx = min(
|
||||
floor(0.7 * mem_limit),
|
||||
mem_limit - 1024,
|
||||
12288
|
||||
)
|
||||
with a lower bound of 1024 MB.
|
||||
"""
|
||||
limit_mb = _mem_limit_mb(apps, app_id)
|
||||
c1 = (limit_mb * 7) // 10
|
||||
c2 = max(0, limit_mb - 1024)
|
||||
c3 = 12288
|
||||
|
||||
return max(1024, min(c1, c2, c3))
|
||||
|
||||
|
||||
def jvm_min_mb(apps: dict, app_id: str) -> int:
|
||||
"""
|
||||
Compute recommended JVM Xms in MB using:
|
||||
Xms = min(
|
||||
floor(Xmx / 2),
|
||||
mem_reservation,
|
||||
Xmx
|
||||
)
|
||||
with a lower bound of 512 MB.
|
||||
"""
|
||||
xmx = jvm_max_mb(apps, app_id)
|
||||
res = _mem_res_mb(apps, app_id)
|
||||
|
||||
return max(512, min(xmx // 2, res, xmx))
|
||||
|
||||
|
||||
# ------------------------------------------------------
|
||||
# Redis-specific helpers (always service name "redis")
|
||||
# ------------------------------------------------------
|
||||
|
||||
def _redis_mem_limit_mb(apps: dict, app_id: str, default_mb: int = 256) -> int:
|
||||
"""
|
||||
Resolve mem_limit for the Redis service of an application.
|
||||
Unlike JVM-based services, Redis always uses the service name "redis".
|
||||
|
||||
If no mem_limit is defined, fall back to default_mb.
|
||||
"""
|
||||
raw = get_app_conf(
|
||||
apps,
|
||||
app_id,
|
||||
"docker.services.redis.mem_limit",
|
||||
strict=False,
|
||||
default=f"{default_mb}m",
|
||||
)
|
||||
|
||||
mb = _to_mb(raw)
|
||||
|
||||
if mb <= 0:
|
||||
raise AnsibleFilterError(
|
||||
f"memory_filters: mem_limit for 'redis' must be > 0 MB (got '{raw}')"
|
||||
)
|
||||
|
||||
return mb
|
||||
|
||||
|
||||
def redis_maxmemory_mb(
|
||||
apps: dict,
|
||||
app_id: str,
|
||||
factor: float = 0.8,
|
||||
min_mb: int = 64
|
||||
) -> int:
|
||||
"""
|
||||
Compute recommended Redis `maxmemory` in MB.
|
||||
|
||||
* factor: fraction of allowed memory used for Redis data (default 0.8)
|
||||
* min_mb: minimum floor value (default 64 MB)
|
||||
|
||||
maxmemory = max(min_mb, floor(factor * mem_limit))
|
||||
"""
|
||||
limit_mb = _redis_mem_limit_mb(apps, app_id)
|
||||
return max(min_mb, int(limit_mb * factor))
|
||||
|
||||
|
||||
# ------------------------------------------------------
|
||||
# Filter module
|
||||
# ------------------------------------------------------
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
"jvm_max_mb": jvm_max_mb,
|
||||
"jvm_min_mb": jvm_min_mb,
|
||||
"redis_maxmemory_mb": redis_maxmemory_mb,
|
||||
}
|
||||
141
filter_plugins/node_autosize.py
Normal file
141
filter_plugins/node_autosize.py
Normal file
@@ -0,0 +1,141 @@
|
||||
# filter_plugins/node_autosize.py
|
||||
# Reuse app config to derive sensible Node.js heap sizes for containers.
|
||||
#
|
||||
# Usage example (Jinja):
|
||||
# {{ applications | node_max_old_space_size('web-app-nextcloud', 'whiteboard') }}
|
||||
#
|
||||
# Heuristics (defaults):
|
||||
# - candidate = 35% of mem_limit
|
||||
# - min = 768 MB (required minimum)
|
||||
# - cap = min(3072 MB, 60% of mem_limit)
|
||||
#
|
||||
# NEW: If mem_limit (container cgroup RAM) is smaller than min_mb, we raise an
|
||||
# exception — to prevent a misconfiguration where Node's heap could exceed the cgroup
|
||||
# and be OOM-killed.
|
||||
|
||||
from __future__ import annotations
|
||||
import re
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
# Import the shared config resolver from module_utils
|
||||
try:
|
||||
from module_utils.config_utils import get_app_conf, AppConfigKeyError
|
||||
except Exception as e:
|
||||
raise AnsibleFilterError(
|
||||
f"Failed to import get_app_conf from module_utils.config_utils: {e}"
|
||||
)
|
||||
|
||||
_SIZE_RE = re.compile(r"^\s*(\d+(?:\.\d+)?)\s*([kmgtp]?i?b?)?\s*$", re.IGNORECASE)
|
||||
_MULT = {
|
||||
"": 1,
|
||||
"b": 1,
|
||||
"k": 10**3, "kb": 10**3,
|
||||
"m": 10**6, "mb": 10**6,
|
||||
"g": 10**9, "gb": 10**9,
|
||||
"t": 10**12, "tb": 10**12,
|
||||
"p": 10**15, "pb": 10**15,
|
||||
"kib": 1024,
|
||||
"mib": 1024**2,
|
||||
"gib": 1024**3,
|
||||
"tib": 1024**4,
|
||||
"pib": 1024**5,
|
||||
}
|
||||
|
||||
|
||||
def _to_bytes(val):
|
||||
"""Convert numeric or string memory limits (e.g. '512m', '2GiB') to bytes."""
|
||||
if val is None or val == "":
|
||||
return None
|
||||
if isinstance(val, (int, float)):
|
||||
return int(val)
|
||||
if not isinstance(val, str):
|
||||
raise AnsibleFilterError(f"Unsupported mem_limit type: {type(val).__name__}")
|
||||
m = _SIZE_RE.match(val)
|
||||
if not m:
|
||||
raise AnsibleFilterError(f"Unrecognized mem_limit string: {val!r}")
|
||||
num = float(m.group(1))
|
||||
unit = (m.group(2) or "").lower()
|
||||
if unit not in _MULT:
|
||||
raise AnsibleFilterError(f"Unknown unit in mem_limit: {unit!r}")
|
||||
return int(num * _MULT[unit])
|
||||
|
||||
|
||||
def _mb(bytes_val: int) -> int:
|
||||
"""Return decimal MB (10^6) as integer — Node expects MB units."""
|
||||
return int(round(bytes_val / 10**6))
|
||||
|
||||
|
||||
def _compute_old_space_mb(
|
||||
total_mb: int, pct: float, min_mb: int, hardcap_mb: int, safety_cap_pct: float
|
||||
) -> int:
|
||||
"""
|
||||
Compute Node.js old-space heap (MB) with safe minimum and cap handling.
|
||||
|
||||
NOTE: The calling function ensures total_mb >= min_mb; here we only
|
||||
apply the sizing heuristics and caps.
|
||||
"""
|
||||
candidate = int(total_mb * float(pct))
|
||||
safety_cap = int(total_mb * float(safety_cap_pct))
|
||||
final_cap = min(int(hardcap_mb), safety_cap)
|
||||
|
||||
# Enforce minimum first; only apply cap if it's above the minimum
|
||||
candidate = max(candidate, int(min_mb))
|
||||
if final_cap >= int(min_mb):
|
||||
candidate = min(candidate, final_cap)
|
||||
|
||||
# Never below a tiny hard floor
|
||||
return max(candidate, 128)
|
||||
|
||||
|
||||
def node_max_old_space_size(
|
||||
applications: dict,
|
||||
application_id: str,
|
||||
service_name: str,
|
||||
pct: float = 0.35,
|
||||
min_mb: int = 768,
|
||||
hardcap_mb: int = 3072,
|
||||
safety_cap_pct: float = 0.60,
|
||||
) -> int:
|
||||
"""
|
||||
Derive Node.js --max-old-space-size (MB) from the service's mem_limit in app config.
|
||||
|
||||
Looks up: docker.services.<service_name>.mem_limit for the given application_id.
|
||||
|
||||
Raises:
|
||||
AnsibleFilterError if mem_limit is missing/invalid OR if mem_limit (MB) < min_mb.
|
||||
"""
|
||||
try:
|
||||
mem_limit = get_app_conf(
|
||||
applications=applications,
|
||||
application_id=application_id,
|
||||
config_path=f"docker.services.{service_name}.mem_limit",
|
||||
strict=True,
|
||||
default=None,
|
||||
)
|
||||
except AppConfigKeyError as e:
|
||||
raise AnsibleFilterError(str(e))
|
||||
|
||||
if mem_limit in (None, False, ""):
|
||||
raise AnsibleFilterError(
|
||||
f"mem_limit not set for application '{application_id}', service '{service_name}'"
|
||||
)
|
||||
|
||||
total_bytes = _to_bytes(mem_limit)
|
||||
total_mb = _mb(total_bytes)
|
||||
|
||||
# NEW: guardrail — refuse to size a heap larger than the cgroup limit
|
||||
if total_mb < int(min_mb):
|
||||
raise AnsibleFilterError(
|
||||
f"mem_limit ({total_mb} MB) is below the required minimum heap ({int(min_mb)} MB) "
|
||||
f"for application '{application_id}', service '{service_name}'. "
|
||||
f"Increase mem_limit or lower min_mb."
|
||||
)
|
||||
|
||||
return _compute_old_space_mb(total_mb, pct, min_mb, hardcap_mb, safety_cap_pct)
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
"node_max_old_space_size": node_max_old_space_size,
|
||||
}
|
||||
@@ -9,6 +9,7 @@ SYS_SERVICE_CLEANUP_BACKUPS: "{{ 'sys-ctl-cln-bkps' | get_se
|
||||
SYS_SERVICE_CLEANUP_BACKUPS_FAILED: "{{ 'sys-ctl-cln-faild-bkps' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_CLEANUP_ANONYMOUS_VOLUMES: "{{ 'sys-ctl-cln-anon-volumes' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_CLEANUP_DISC_SPACE: "{{ 'sys-ctl-cln-disc-space' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_CLEANUP_DOCKER: "{{ 'sys-ctl-cln-docker' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_OPTIMIZE_DRIVE: "{{ 'svc-opt-ssd-hdd' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_BACKUP_RMT_2_LOC: "{{ 'svc-bkp-rmt-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_BACKUP_DOCKER_2_LOC: "{{ 'sys-ctl-bkp-docker-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
||||
|
||||
@@ -32,7 +32,8 @@ SYS_SCHEDULE_HEALTH_MSMTP: "*-*-* 00:00:00"
|
||||
SYS_SCHEDULE_CLEANUP_CERTS: "*-*-* 20:00" # Deletes and revokes unused certs once per day
|
||||
SYS_SCHEDULE_CLEANUP_FAILED_BACKUPS: "*-*-* 21:00" # Clean up failed docker backups once per day
|
||||
SYS_SCHEDULE_CLEANUP_BACKUPS: "*-*-* 22:00" # Cleanup backups once per day, MUST be called before disc space cleanup
|
||||
SYS_SCHEDULE_CLEANUP_DISC_SPACE: "*-*-* 23:00" # Cleanup disc space once per day
|
||||
SYS_SCHEDULE_CLEANUP_DOCKER: "*-*-* 23:00" # Cleanup docker anonymous volumes and prune ones per day
|
||||
SYS_SCHEDULE_CLEANUP_DISC_SPACE: "*-*-* 23:30" # Cleanup disc space once per day
|
||||
|
||||
### Schedule for repair services
|
||||
SYS_SCHEDULE_REPAIR_BTRFS_AUTO_BALANCER: "Sat *-*-01..07 00:00:00" # Execute btrfs auto balancer every first Saturday of a month
|
||||
|
||||
@@ -114,6 +114,12 @@ defaults_networks:
|
||||
subnet: 192.168.104.48/28
|
||||
web-app-mini-qr:
|
||||
subnet: 192.168.104.64/28
|
||||
web-app-shopware:
|
||||
subnet: 192.168.104.80/28
|
||||
web-svc-onlyoffice:
|
||||
subnet: 192.168.104.96/28
|
||||
web-app-suitecrm:
|
||||
subnet: 192.168.104.112/28
|
||||
|
||||
# /24 Networks / 254 Usable Clients
|
||||
web-app-bigbluebutton:
|
||||
|
||||
@@ -18,6 +18,7 @@ ports:
|
||||
web-app-fusiondirectory: 4187
|
||||
web-app-gitea: 4188
|
||||
web-app-snipe-it: 4189
|
||||
web-app-suitecrm: 4190
|
||||
ldap:
|
||||
svc-db-openldap: 389
|
||||
http:
|
||||
@@ -81,6 +82,9 @@ ports:
|
||||
web-app-minio_api: 8057
|
||||
web-app-minio_console: 8058
|
||||
web-app-mini-qr: 8059
|
||||
web-app-shopware: 8060
|
||||
web-svc-onlyoffice: 8061
|
||||
web-app-suitecrm: 8062
|
||||
web-app-bigbluebutton: 48087 # This port is predefined by bbb. @todo Try to change this to a 8XXX port
|
||||
public:
|
||||
# The following ports should be changed to 22 on the subdomain via stream mapping
|
||||
|
||||
@@ -87,7 +87,7 @@ LDAP:
|
||||
ID: "{{ _ldap_user_id }}"
|
||||
MAIL: "mail"
|
||||
FULLNAME: "cn"
|
||||
FIRSTNAME: "givenname"
|
||||
FIRSTNAME: "givenName"
|
||||
SURNAME: "sn"
|
||||
SSH_PUBLIC_KEY: "sshPublicKey"
|
||||
NEXTCLOUD_QUOTA: "nextcloudQuota"
|
||||
|
||||
@@ -14,8 +14,10 @@
|
||||
|
||||
- name: "create {{ PATH_DOCKER_COMPOSE_INSTANCES }}"
|
||||
file:
|
||||
path: "{{ PATH_DOCKER_COMPOSE_INSTANCES }}"
|
||||
state: directory
|
||||
mode: 0700
|
||||
owner: root
|
||||
group: root
|
||||
path: "{{ PATH_DOCKER_COMPOSE_INSTANCES }}"
|
||||
state: directory
|
||||
mode: 0700
|
||||
owner: root
|
||||
group: root
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
@@ -1,6 +1,4 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_docker_compose is not defined
|
||||
|
||||
- name: "Load variables from {{ DOCKER_COMPOSE_VARIABLE_FILE }} for whole play"
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
DOCKER_COMPOSE_VARIABLE_FILE: "{{ role_path }}/vars/docker-compose.yml"
|
||||
DOCKER_COMPOSE_VARIABLE_FILE: "{{ [ role_path, 'vars/docker-compose.yml' ] | path_join }}"
|
||||
DOCKER_COMPOSE_DOWN_ALL_PACKAGE: "docodol"
|
||||
31
roles/docker-container/templates/healthcheck/http.yml.j2
Normal file
31
roles/docker-container/templates/healthcheck/http.yml.j2
Normal file
@@ -0,0 +1,31 @@
|
||||
{# ------------------------------------------------------------------------------
|
||||
Healthcheck: HTTP Local
|
||||
------------------------------------------------------------------------------
|
||||
This template defines a generic HTTP healthcheck for containers exposing
|
||||
a web service on a local port (e.g., Nginx, Apache, PHP-FPM, Shopware, etc.).
|
||||
|
||||
It uses `wget` or `curl` (as fallback) to test if the container responds on
|
||||
http://127.0.0.1:{{ container_port }}/. If the request succeeds, Docker marks
|
||||
the container as "healthy"; otherwise, as "unhealthy".
|
||||
|
||||
Parameters:
|
||||
- container_port: The internal port the service listens on.
|
||||
|
||||
Timing:
|
||||
- interval: 30s → Check every 30 seconds
|
||||
- timeout: 5s → Each check must complete within 5 seconds
|
||||
- retries: 5 → Mark unhealthy after 5 consecutive failures
|
||||
- start_period: 20s → Grace period before health checks begin
|
||||
|
||||
Usage:
|
||||
{% filter indent(4) %}
|
||||
{% include 'roles/docker-container/templates/healthcheck/http.yml.j2' %}
|
||||
{% endfilter %}
|
||||
------------------------------------------------------------------------------
|
||||
#}
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget -qO- http://127.0.0.1:{{ container_port }}/ >/dev/null || curl -fsS http://127.0.0.1:{{ container_port }}/ >/dev/null"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 20s
|
||||
@@ -6,7 +6,7 @@ entity_name: "{{ application_id | get_entity_name }
|
||||
docker_compose_flush_handlers: true
|
||||
|
||||
# Docker Compose
|
||||
database_type: "{{ application_id | get_entity_name }}"
|
||||
database_type: "{{ entity_name }}"
|
||||
|
||||
## Postgres
|
||||
POSTGRES_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.data') }}"
|
||||
|
||||
@@ -9,6 +9,16 @@
|
||||
driver: journald
|
||||
volumes:
|
||||
- redis:/data
|
||||
# Just save in memory and prevent huge redis_volumes
|
||||
command:
|
||||
- redis-server
|
||||
- --appendonly
|
||||
- "no"
|
||||
- --save
|
||||
- ""
|
||||
- --maxmemory {{ applications | redis_maxmemory_mb(application_id, 0.8, RESOURCE_MEM_LIMIT | int ) }}mb
|
||||
- --maxmemory-policy
|
||||
- "allkeys-lru"
|
||||
healthcheck:
|
||||
test: ["CMD", "redis-cli", "ping"]
|
||||
interval: 1s
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
system_service_timer_enabled: true
|
||||
system_service_force_linear_sync: true
|
||||
system_service_force_flush: "{{ MODE_BACKUP | bool }}"
|
||||
system_service_suppress_flush: "{{ not MODE_BACKUP | bool }}"
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_BACKUP_DOCKER_TO_LOCAL }}"
|
||||
system_service_tpl_exec_start_pre: '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(" ") }} --ignore {{ SYS_SERVICE_BACKUP_DOCKER_2_LOC }} --timeout "{{ SYS_TIMEOUT_BACKUP_SERVICES }}"'
|
||||
system_service_tpl_exec_start: "/bin/sh -c '{{ BKP_DOCKER_2_LOC_EXEC }}'"
|
||||
|
||||
@@ -28,8 +28,8 @@ if [ "$force_freeing" = true ]; then
|
||||
{% endif %}
|
||||
|
||||
if command -v docker >/dev/null 2>&1 ; then
|
||||
echo "cleaning up docker" &&
|
||||
docker system prune -f || exit 3
|
||||
echo "cleaning up docker (prune + anonymous volumes) via systemd service" &&
|
||||
systemctl start {{ SYS_SERVICE_CLEANUP_DOCKER }} || exit 3
|
||||
|
||||
nextcloud_application_container="{{ applications | get_app_conf('web-app-nextcloud', 'docker.services.nextcloud.name') }}"
|
||||
if [ -n "$nextcloud_application_container" ] && [ "$(docker ps -a -q -f name=$nextcloud_application_container)" ] ; then
|
||||
|
||||
47
roles/sys-ctl-cln-docker/README.md
Normal file
47
roles/sys-ctl-cln-docker/README.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# Cleanup Docker Resources
|
||||
|
||||
## Description
|
||||
|
||||
This role performs a complete cleanup of Docker resources by invoking a systemd-managed script.
|
||||
It removes unused Docker images, stopped containers, networks, build cache, and anonymous volumes.
|
||||
The cleanup is fully automated and can run on a schedule or be triggered manually.
|
||||
|
||||
## Overview
|
||||
|
||||
Optimized for maintaining a clean and efficient Docker environment, this role:
|
||||
|
||||
* Loads and triggers the anonymous volume cleanup role.
|
||||
* Installs a systemd service and timer for Docker pruning.
|
||||
* Deploys a cleanup script that invokes:
|
||||
|
||||
* The anonymous volume cleanup service.
|
||||
* `docker system prune -a -f` to remove unused Docker resources.
|
||||
* Allows forced execution during maintenance runs (`MODE_CLEANUP`).
|
||||
|
||||
## Purpose
|
||||
|
||||
The primary purpose of this role is to prevent storage bloat caused by unused Docker images, volumes, and build artifacts.
|
||||
Regular pruning ensures:
|
||||
|
||||
* Reduced disk usage
|
||||
* Improved system performance
|
||||
* Faster CI/CD and container deployments
|
||||
* More predictable Docker engine behavior
|
||||
|
||||
## Features
|
||||
|
||||
* **Anonymous Volume Cleanup:** Integrates with `sys-ctl-cln-anon-volumes` to remove stale volumes.
|
||||
* **Full Docker Prune:** Executes `docker system prune -a -f` to reclaim space.
|
||||
* **Systemd Integration:** Registers a systemd unit and timer for automated cleanup.
|
||||
* **Scheduled Execution:** Runs daily (or as configured) based on `SYS_SCHEDULE_CLEANUP_DOCKER`.
|
||||
* **Force Execution Mode:** When `MODE_CLEANUP=true`, cleanup is executed immediately.
|
||||
* **Safe Execution:** Includes validation for missing services and Docker availability.
|
||||
|
||||
## Script Behavior
|
||||
|
||||
The cleanup script:
|
||||
|
||||
1. Checks whether the anonymous volume cleanup service is defined and available.
|
||||
2. Starts the service if present.
|
||||
3. Runs `docker system prune -a -f` if Docker is installed.
|
||||
4. Stops execution immediately on errors (`set -e` behavior).
|
||||
27
roles/sys-ctl-cln-docker/meta/main.yml
Normal file
27
roles/sys-ctl-cln-docker/meta/main.yml
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birkenbach"
|
||||
description: >
|
||||
Cleans up anonymous Docker volumes and performs a full `docker system prune -a -f`
|
||||
via a dedicated systemd service.
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birkenbach
|
||||
Consulting & Coaching Solutions
|
||||
https://www.veen.world
|
||||
min_ansible_version: "2.9"
|
||||
platforms:
|
||||
- name: Linux
|
||||
versions:
|
||||
- all
|
||||
galaxy_tags:
|
||||
- docker
|
||||
- cleanup
|
||||
- prune
|
||||
- automation
|
||||
- maintenance
|
||||
- system
|
||||
repository: "https://s.infinito.nexus/code"
|
||||
issue_tracker_url: "https://s.infinito.nexus/issues"
|
||||
documentation: "https://docs.infinito.nexus"
|
||||
23
roles/sys-ctl-cln-docker/tasks/main.yml
Normal file
23
roles/sys-ctl-cln-docker/tasks/main.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
- block:
|
||||
- name: Load role to delete anonymous volumes
|
||||
include_role:
|
||||
name: sys-ctl-cln-anon-volumes
|
||||
vars:
|
||||
system_service_force_flush: true
|
||||
when: run_once_sys_ctl_cln_anon_volumes is not defined
|
||||
|
||||
- name: "Register Docker prune system service"
|
||||
include_role:
|
||||
name: sys-service
|
||||
vars:
|
||||
system_service_timer_enabled: true
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_CLEANUP_DOCKER }}"
|
||||
system_service_copy_files: true
|
||||
system_service_tpl_exec_start: "{{ system_service_script_exec }}"
|
||||
system_service_tpl_exec_start_pre: ""
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_force_linear_sync: false
|
||||
system_service_force_flush: "{{ MODE_CLEANUP }}"
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_ctl_cln_docker is not defined
|
||||
10
roles/sys-ctl-cln-docker/templates/script.sh.j2
Normal file
10
roles/sys-ctl-cln-docker/templates/script.sh.j2
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/bin/sh
|
||||
# Cleans up anonymous Docker volumes and performs a full Docker system prune.
|
||||
|
||||
set -e
|
||||
|
||||
echo "Cleaning up anonymous Docker volumes via systemd service..."
|
||||
systemctl start {{ SYS_SERVICE_CLEANUP_ANONYMOUS_VOLUMES }} || exit 1
|
||||
echo "Pruning Docker system resources (images, containers, networks, build cache)..."
|
||||
docker system prune -a -f || exit 2
|
||||
echo "Docker prune cleanup finished."
|
||||
1
roles/sys-ctl-cln-docker/vars/main.yml
Normal file
1
roles/sys-ctl-cln-docker/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
system_service_id: "sys-ctl-cln-docker"
|
||||
58
roles/sys-ctl-hlth-disc-space/files/script.py
Normal file
58
roles/sys-ctl-hlth-disc-space/files/script.py
Normal file
@@ -0,0 +1,58 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
def get_disk_usage_percentages():
|
||||
"""
|
||||
Returns a list of filesystem usage percentages as integers.
|
||||
Equivalent to: df --output=pcent | sed 1d | tr -d '%'
|
||||
"""
|
||||
result = subprocess.run(
|
||||
["df", "--output=pcent"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True
|
||||
)
|
||||
|
||||
lines = result.stdout.strip().split("\n")[1:] # Skip header
|
||||
percentages = []
|
||||
|
||||
for line in lines:
|
||||
value = line.strip().replace("%", "")
|
||||
if value.isdigit():
|
||||
percentages.append(int(value))
|
||||
|
||||
return percentages
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Check disk usage and report if any filesystem exceeds the given threshold."
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"minimum_percent_cleanup_disk_space",
|
||||
type=int,
|
||||
help="Minimum free disk space percentage threshold that triggers a warning."
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
threshold = args.minimum_percent_cleanup_disk_space
|
||||
|
||||
print("Checking disk space usage...")
|
||||
subprocess.run(["df"]) # Show the same df output as the original script
|
||||
|
||||
errors = 0
|
||||
percentages = get_disk_usage_percentages()
|
||||
|
||||
for usage in percentages:
|
||||
if usage > threshold:
|
||||
print(f"WARNING: {usage}% exceeds the limit of {threshold}%.")
|
||||
errors += 1
|
||||
|
||||
sys.exit(errors)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,15 +0,0 @@
|
||||
#!/bin/sh
|
||||
# @param $1 mimimum free disc space
|
||||
errors=0
|
||||
minimum_percent_cleanup_disc_space="$1"
|
||||
echo "checking disc space use..."
|
||||
df
|
||||
for disc_use_percent in $(df --output=pcent | sed 1d)
|
||||
do
|
||||
disc_use_percent_number=$(echo "$disc_use_percent" | sed "s/%//")
|
||||
if [ "$disc_use_percent_number" -gt "$minimum_percent_cleanup_disc_space" ]; then
|
||||
echo "WARNING: $disc_use_percent_number exceeds the limit of $minimum_percent_cleanup_disc_space%."
|
||||
errors+=1;
|
||||
fi
|
||||
done
|
||||
exit $errors;
|
||||
@@ -8,4 +8,5 @@
|
||||
vars:
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_HEALTH_DISC_SPACE }}"
|
||||
system_service_timer_enabled: true
|
||||
system_service_tpl_exec_start: "{{ system_service_script_exec }} {{ SIZE_PERCENT_CLEANUP_DISC_SPACE }}"
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }} {{ SYS_SERVICE_CLEANUP_DISC_SPACE }}"
|
||||
@@ -1,10 +1,24 @@
|
||||
- name: Generate color palette with colorscheme-generator
|
||||
set_fact:
|
||||
color_palette: "{{ lookup('colorscheme', CSS_BASE_COLOR, count=CSS_COUNT, shades=CSS_SHADES) }}"
|
||||
CSS_COLOR_PALETTE: "{{ lookup('colorscheme', CSS_BASE_COLOR, count=CSS_COUNT, shades=CSS_SHADES) }}"
|
||||
|
||||
- name: Generate inverted color palette with colorscheme-generator
|
||||
set_fact:
|
||||
inverted_color_palette: "{{ lookup('colorscheme', CSS_BASE_COLOR, count=CSS_COUNT, shades=CSS_SHADES, invert_lightness=True) }}"
|
||||
CSS_COLOR_PALETTE_INVERTED: "{{ lookup('colorscheme', CSS_BASE_COLOR, count=CSS_COUNT, shades=CSS_SHADES, invert_lightness=True) }}"
|
||||
|
||||
- name: "Compute deterministic gradient angle from default.css template mtime"
|
||||
set_fact:
|
||||
CSS_GRADIENT_ANGLE: >-
|
||||
{{
|
||||
(
|
||||
lookup(
|
||||
'local_mtime_qs',
|
||||
[playbook_dir, 'roles', 'sys-front-inj-css', 'templates', 'css', 'default.css.j2'] | path_join
|
||||
)
|
||||
| regex_replace('^.*=', '')
|
||||
| int
|
||||
) % 360
|
||||
}}
|
||||
|
||||
- name: Deploy default CSS files
|
||||
template:
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
Now using a button background that is only slightly darker than the overall background */
|
||||
html[native-dark-active] .btn, .btn {
|
||||
background-color: var(--color-01-87);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-70), var(--color-01-91), var(--color-01-95), var(--color-01-95));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-70), var(--color-01-91), var(--color-01-95), var(--color-01-95));
|
||||
color: var(--color-01-50);
|
||||
border-color: var(--color-01-80);
|
||||
cursor: pointer;
|
||||
@@ -13,7 +13,7 @@ html[native-dark-active] .btn, .btn {
|
||||
.navbar, .navbar-light, .navbar-dark, .navbar.bg-light {
|
||||
background-color: var(--color-01-90);
|
||||
/* New Gradient based on original background (90 -5, 90, 90 +1, 90 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-85), var(--color-01-90), var(--color-01-91), var(--color-01-95));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-85), var(--color-01-90), var(--color-01-91), var(--color-01-95));
|
||||
color: var(--color-01-50);
|
||||
border-color: var(--color-01-85);
|
||||
}
|
||||
@@ -31,7 +31,7 @@ html[native-dark-active] .btn, .btn {
|
||||
.card {
|
||||
background-color: var(--color-01-90);
|
||||
/* New Gradient based on original background (90 -5, 90, 90 +1, 90 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-85), var(--color-01-90), var(--color-01-91), var(--color-01-95));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-85), var(--color-01-90), var(--color-01-91), var(--color-01-95));
|
||||
border-color: var(--color-01-85);
|
||||
color: var(--color-01-12);
|
||||
}
|
||||
@@ -45,7 +45,7 @@ html[native-dark-active] .btn, .btn {
|
||||
.nav-item .dropdown-menu {
|
||||
background-color: var(--color-01-80);
|
||||
/* New Gradient based on original background (80 -5, 80, 80 +1, 80 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-75), var(--color-01-80), var(--color-01-81), var(--color-01-85));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-75), var(--color-01-80), var(--color-01-81), var(--color-01-85));
|
||||
color: var(--color-01-40);
|
||||
}
|
||||
|
||||
@@ -57,13 +57,13 @@ html[native-dark-active] .btn, .btn {
|
||||
color: var(--color-01-40);
|
||||
background-color: var(--color-01-80);
|
||||
/* New Gradient based on original background (80 -5, 80, 80 +1, 80 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-75), var(--color-01-80), var(--color-01-81), var(--color-01-85));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-75), var(--color-01-80), var(--color-01-81), var(--color-01-85));
|
||||
}
|
||||
|
||||
.dropdown-item:hover,
|
||||
.dropdown-item:focus {
|
||||
background-color: var(--color-01-65);
|
||||
/* New Gradient based on original background (65 -5, 65, 65 +1, 65 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-60), var(--color-01-65), var(--color-01-66), var(--color-01-70));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-60), var(--color-01-65), var(--color-01-66), var(--color-01-70));
|
||||
color: var(--color-01-40);
|
||||
}
|
||||
|
||||
@@ -15,14 +15,14 @@ HINT:
|
||||
/* Auto-generated by colorscheme-generator */
|
||||
|
||||
:root {
|
||||
{% for var_name, color in color_palette.items() %}
|
||||
{% for var_name, color in CSS_COLOR_PALETTE.items() %}
|
||||
{{ var_name }}: {{ color }};
|
||||
{% endfor %}
|
||||
}
|
||||
|
||||
@media (prefers-color-scheme: dark) {
|
||||
:root {
|
||||
{% for var_name, color in inverted_color_palette.items() %}
|
||||
{% for var_name, color in CSS_COLOR_PALETTE_INVERTED.items() %}
|
||||
{{ var_name }}: {{ color }};
|
||||
{% endfor %}
|
||||
}
|
||||
@@ -102,7 +102,7 @@ HINT:
|
||||
/* Global Defaults (Colors Only) */
|
||||
body, html[native-dark-active] {
|
||||
background-color: var(--color-01-93);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-93), var(--color-01-91), var(--color-01-95), var(--color-01-93));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-93), var(--color-01-91), var(--color-01-95), var(--color-01-93));
|
||||
background-attachment: fixed;
|
||||
color: var(--color-01-40);
|
||||
font-family: {{design.font.type}};
|
||||
@@ -147,7 +147,7 @@ input:invalid,
|
||||
textarea:invalid,
|
||||
select:invalid {
|
||||
background-color: var(--color-01-01);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-01), var(--color-01-10));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-01), var(--color-01-10));
|
||||
/* Use Bootstrap danger color for error messages */
|
||||
color: var(--bs-danger);
|
||||
border-color: var(--color-01-20);
|
||||
@@ -158,7 +158,7 @@ input:valid,
|
||||
textarea:valid,
|
||||
select:valid {
|
||||
background-color: var(--color-01-80);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-80), var(--color-01-90));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-80), var(--color-01-90));
|
||||
/* Use Bootstrap success color for confirmation messages */
|
||||
color: var(--bs-success);
|
||||
border-color: var(--color-01-70);
|
||||
@@ -169,7 +169,7 @@ input:required,
|
||||
textarea:required,
|
||||
select:required {
|
||||
background-color: var(--color-01-50);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-50), var(--color-01-60));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-50), var(--color-01-60));
|
||||
/* Use Bootstrap warning color to indicate a required field */
|
||||
color: var(--bs-warning);
|
||||
border-color: var(--color-01-70);
|
||||
@@ -180,7 +180,7 @@ input:optional,
|
||||
textarea:optional,
|
||||
select:optional {
|
||||
background-color: var(--color-01-60);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-60), var(--color-01-70));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-60), var(--color-01-70));
|
||||
/* Use Bootstrap info color to indicate optional information */
|
||||
color: var(--bs-info);
|
||||
border-color: var(--color-01-70);
|
||||
@@ -191,7 +191,7 @@ input:read-only,
|
||||
textarea:read-only,
|
||||
select:read-only {
|
||||
background-color: var(--color-01-80);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-90), var(--color-01-70));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-90), var(--color-01-70));
|
||||
color: var(--color-01-20);
|
||||
border-color: var(--color-01-50);
|
||||
}
|
||||
@@ -201,7 +201,7 @@ input:read-write,
|
||||
textarea:read-write,
|
||||
select:read-write {
|
||||
background-color: var(--color-01-70);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-70), var(--color-01-80));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-70), var(--color-01-80));
|
||||
color: var(--color-01-40);
|
||||
border-color: var(--color-01-70);
|
||||
}
|
||||
@@ -211,7 +211,7 @@ input:in-range,
|
||||
textarea:in-range,
|
||||
select:in-range {
|
||||
background-color: var(--color-01-70);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-70), var(--color-01-85));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-70), var(--color-01-85));
|
||||
color: var(--color-01-40);
|
||||
border-color: var(--color-01-70);
|
||||
}
|
||||
@@ -221,7 +221,7 @@ input:out-of-range,
|
||||
textarea:out-of-range,
|
||||
select:out-of-range {
|
||||
background-color: var(--color-01-10);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-10), var(--color-01-30));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-10), var(--color-01-30));
|
||||
color: var(--color-01-10);
|
||||
border-color: var(--color-01-50);
|
||||
}
|
||||
@@ -231,7 +231,7 @@ input:placeholder-shown,
|
||||
textarea:placeholder-shown,
|
||||
select:placeholder-shown {
|
||||
background-color: var(--color-01-82);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-82), var(--color-01-90));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-82), var(--color-01-90));
|
||||
color: var(--color-01-40);
|
||||
border-color: var(--color-01-70);
|
||||
}
|
||||
@@ -241,7 +241,7 @@ input:focus,
|
||||
textarea:focus,
|
||||
select:focus {
|
||||
background-color: var(--color-01-75);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-75), var(--color-01-85));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-75), var(--color-01-85));
|
||||
color: var(--color-01-40);
|
||||
border-color: var(--color-01-50);
|
||||
}
|
||||
@@ -251,7 +251,7 @@ input:hover,
|
||||
textarea:hover,
|
||||
select:hover {
|
||||
background-color: var(--color-01-78);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-78), var(--color-01-88));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-78), var(--color-01-88));
|
||||
color: var(--color-01-40);
|
||||
border-color: var(--color-01-65);
|
||||
}
|
||||
@@ -261,7 +261,7 @@ input:active,
|
||||
textarea:active,
|
||||
select:active {
|
||||
background-color: var(--color-01-68);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-68), var(--color-01-78));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-68), var(--color-01-78));
|
||||
color: var(--color-01-40);
|
||||
border-color: var(--color-01-60);
|
||||
}
|
||||
@@ -269,11 +269,18 @@ select:active {
|
||||
/* {# Checked state: specifically for radio buttons and checkboxes when selected. #} */
|
||||
input:checked {
|
||||
background-color: var(--color-01-90);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-90), var(--color-01-99));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-90), var(--color-01-99));
|
||||
color: var(--color-01-40);
|
||||
border-color: var(--color-01-70);
|
||||
}
|
||||
|
||||
input[type="checkbox"] {
|
||||
appearance: auto;
|
||||
-webkit-appearance: auto;
|
||||
-moz-appearance: auto;
|
||||
background: none;
|
||||
}
|
||||
|
||||
option {
|
||||
background-color: var(--color-01-82);
|
||||
color: var(--color-01-07);
|
||||
@@ -287,7 +294,7 @@ th, td {
|
||||
thead {
|
||||
background-color: var(--color-01-80);
|
||||
/* New Gradient based on original background (80 -5, 80, 80 +1, 80 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-75), var(--color-01-80), var(--color-01-81), var(--color-01-85));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-75), var(--color-01-80), var(--color-01-81), var(--color-01-85));
|
||||
color: var(--color-01-40);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Constants
|
||||
CSS_FILES: ['default.css','bootstrap.css']
|
||||
CSS_BASE_COLOR: "{{ design.css.colors.base }}"
|
||||
CSS_COUNT: 7
|
||||
CSS_SHADES: 100
|
||||
CSS_FILES: ['default.css','bootstrap.css']
|
||||
CSS_BASE_COLOR: "{{ design.css.colors.base }}"
|
||||
CSS_COUNT: 7
|
||||
CSS_SHADES: 100
|
||||
|
||||
# Variables
|
||||
css_app_dst: "{{ [cdn_paths_all.role.release.css, 'style.css'] | path_join }}"
|
||||
css_app_dst: "{{ [cdn_paths_all.role.release.css, 'style.css'] | path_join }}"
|
||||
@@ -16,14 +16,11 @@
|
||||
include_tasks: "02_reset.yml"
|
||||
when: MODE_RESET | bool
|
||||
|
||||
- name: "Load cleanup tasks when MODE_CLEANUP or MODE_RESET is enabled"
|
||||
include_tasks: "03_cleanup.yml"
|
||||
when: MODE_CLEANUP | bool or MODE_RESET | bool
|
||||
|
||||
- name: Include backup, repair and health services for docker
|
||||
include_role:
|
||||
name: "{{ item }}"
|
||||
loop:
|
||||
- sys-ctl-cln-docker
|
||||
- sys-ctl-bkp-docker-2-loc
|
||||
- sys-ctl-hlth-docker-container
|
||||
- sys-ctl-hlth-docker-volumes
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
- block:
|
||||
- name: Load role to delete anonymous volumes
|
||||
include_role:
|
||||
name: sys-ctl-cln-anon-volumes
|
||||
vars:
|
||||
system_service_force_flush: true
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_sys_ctl_cln_anon_volumes is not defined
|
||||
|
||||
- name: Prune Docker resources
|
||||
become: true
|
||||
ansible.builtin.command: docker system prune -f
|
||||
@@ -5,28 +5,32 @@ location {{location}}
|
||||
{% include 'roles/web-app-oauth2-proxy/templates/following_directives.conf.j2'%}
|
||||
{% endif %}
|
||||
|
||||
{% include 'roles/sys-svc-proxy/templates/headers/content_security_policy.conf.j2' %}
|
||||
|
||||
{% include 'roles/sys-svc-proxy/templates/headers/access_control_allow.conf.j2' %}
|
||||
|
||||
# Client Limits for HTML
|
||||
client_max_body_size {{ client_max_body_size | default('100m') }};
|
||||
|
||||
{% set _loc = location|trim %}
|
||||
proxy_pass http://127.0.0.1:{{ http_port }}{{ (_loc|regex_replace('^(?:=|\\^~)\\s*','')) if not (_loc is match('^(@|~)')) else '' }};
|
||||
|
||||
# headers
|
||||
# Proxyconfiguration for Upload
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header Authorization $http_authorization;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Port {{ WEB_PORT }};
|
||||
proxy_set_header X-Forwarded-Ssl on;
|
||||
proxy_pass_request_headers on;
|
||||
|
||||
{% include 'roles/sys-svc-proxy/templates/headers/content_security_policy.conf.j2' %}
|
||||
|
||||
{% include 'roles/sys-svc-proxy/templates/headers/access_control_allow.conf.j2' %}
|
||||
|
||||
# WebSocket specific header
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
|
||||
# timeouts
|
||||
# Timeouts
|
||||
proxy_connect_timeout 5s;
|
||||
proxy_send_timeout 900s;
|
||||
proxy_read_timeout 900s;
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
location {{ location_upload }} {
|
||||
|
||||
# Proxyconfiguration for Upload
|
||||
proxy_pass http://127.0.0.1:{{ http_port }};
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
client_max_body_size {{ client_max_body_size }};
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
proxy_read_timeout 120s;
|
||||
proxy_connect_timeout 120s;
|
||||
proxy_send_timeout 120s;
|
||||
|
||||
# Client Limits for Upload
|
||||
client_max_body_size {{ client_max_body_size }};
|
||||
}
|
||||
@@ -17,7 +17,7 @@ server
|
||||
{% include 'roles/sys-svc-letsencrypt/templates/ssl_header.j2' %}
|
||||
|
||||
{% if applications | get_app_conf(application_id, 'features.oauth2', False) %}
|
||||
{% set acl = applications | get_app_conf(application_id, 'oauth2_proxy.acl', False, {}) %}
|
||||
{% set acl = applications | get_app_conf(application_id, 'docker.services.oauth2_proxy.acl', False, {}) %}
|
||||
|
||||
{% if acl.blacklist is defined %}
|
||||
{# 1. Expose everything by default, then protect blacklisted paths #}
|
||||
|
||||
21
roles/web-app-bookwyrm/files/style.css
Normal file
21
roles/web-app-bookwyrm/files/style.css
Normal file
@@ -0,0 +1,21 @@
|
||||
.title, .subtitle {
|
||||
color: var(--color-01-10);
|
||||
}
|
||||
|
||||
.is-child {
|
||||
background-color: rgba( var(--color-rgb-01-80), 0.3 );
|
||||
color: var(--color-01-10);
|
||||
}
|
||||
|
||||
.footer{
|
||||
background-color: rgba( var(--color-rgb-01-80), 0.3 );
|
||||
color: var(--color-01-90);
|
||||
}
|
||||
|
||||
.has-background-primary-light{
|
||||
background-color: rgba( var(--color-rgb-01-80), 0.3 ) !important;
|
||||
}
|
||||
|
||||
.has-background-secondary{
|
||||
background-color: rgba( var(--color-rgb-01-80), 0.3 ) !important;
|
||||
}
|
||||
@@ -4,11 +4,13 @@ __metaclass__ = type
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.errors import AnsibleError
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
"""
|
||||
Group the given cards into categorized and uncategorized lists
|
||||
based on the tags from menu_categories.
|
||||
Categories are sorted alphabetically before returning.
|
||||
"""
|
||||
if len(terms) < 2:
|
||||
raise AnsibleError("Missing required arguments")
|
||||
@@ -19,6 +21,7 @@ class LookupModule(LookupBase):
|
||||
categorized = {}
|
||||
uncategorized = []
|
||||
|
||||
# Categorize cards
|
||||
for card in cards:
|
||||
found = False
|
||||
for category, data in menu_categories.items():
|
||||
@@ -29,10 +32,14 @@ class LookupModule(LookupBase):
|
||||
if not found:
|
||||
uncategorized.append(card)
|
||||
|
||||
# Sort categories alphabetically
|
||||
sorted_categorized = {
|
||||
k: categorized[k] for k in sorted(categorized.keys(), key=str.lower)
|
||||
}
|
||||
|
||||
return [
|
||||
{
|
||||
'categorized': categorized,
|
||||
'categorized': sorted_categorized,
|
||||
'uncategorized': uncategorized,
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
@@ -11,8 +11,8 @@ contact:
|
||||
description: Send {{ 'us' if service_provider.type == 'legal' else 'me' }} an email
|
||||
icon:
|
||||
class: fa-solid fa-envelope
|
||||
url: mailto:{{service_provider.contact.email}}
|
||||
identifier: {{service_provider.contact.email}}
|
||||
url: mailto:{{ service_provider.contact.email }}
|
||||
identifier: {{ service_provider.contact.email }}
|
||||
|
||||
{% endif %}
|
||||
{% if service_provider.contact.phone is defined %}
|
||||
@@ -32,6 +32,6 @@ contact:
|
||||
description: Chat with {{ 'us' if service_provider.type == 'legal' else 'me' }} on Matrix
|
||||
icon:
|
||||
class: fa-solid fa-cubes
|
||||
identifier: "{{service_provider.contact.matrix}}"
|
||||
identifier: "{{ service_provider.contact.matrix }}"
|
||||
|
||||
{% endif %}
|
||||
|
||||
@@ -25,7 +25,6 @@ portfolio_menu_categories:
|
||||
- ollama
|
||||
- openwebui
|
||||
- flowise
|
||||
- minio
|
||||
- qdrant
|
||||
- litellm
|
||||
|
||||
@@ -63,6 +62,8 @@ portfolio_menu_categories:
|
||||
- games
|
||||
- chess
|
||||
- boardgame
|
||||
- game
|
||||
- roulette
|
||||
|
||||
Communication:
|
||||
description: "Tools for communication"
|
||||
@@ -102,14 +103,12 @@ portfolio_menu_categories:
|
||||
- fusiondirectory
|
||||
- user-management
|
||||
|
||||
Customer Relationship Management:
|
||||
description: "Tools for managing customer relationships, sales pipelines, marketing, and support activities."
|
||||
Customer Relationship:
|
||||
description: "Customer Relationship Management (CRM) software for managing customer relationships, sales pipelines, marketing, and support activities."
|
||||
icon: "fa-solid fa-address-book"
|
||||
tags:
|
||||
- crm
|
||||
- customer
|
||||
- relationship
|
||||
- sales
|
||||
- marketing
|
||||
- support
|
||||
- espocrm
|
||||
@@ -222,7 +221,7 @@ portfolio_menu_categories:
|
||||
- snipe-it
|
||||
|
||||
Content Management:
|
||||
description: "CMS and web publishing platforms"
|
||||
description: "Content Management Systems (CMS) and web publishing platforms"
|
||||
icon: "fa-solid fa-file-alt"
|
||||
tags:
|
||||
- cms
|
||||
@@ -231,4 +230,27 @@ portfolio_menu_categories:
|
||||
- website
|
||||
- joomla
|
||||
- wordpress
|
||||
- blog
|
||||
- blog
|
||||
|
||||
Commerce:
|
||||
description: "Platforms for building and managing online shops, product catalogs, and digital sales channels — including payment, inventory, and customer features."
|
||||
icon: "fa-solid fa-cart-shopping"
|
||||
tags:
|
||||
- commerce
|
||||
- ecommerce
|
||||
- shopware
|
||||
- shop
|
||||
- sales
|
||||
- store
|
||||
- magento
|
||||
- pretix
|
||||
|
||||
Storage:
|
||||
description: "High-performance, self-hosted storage solutions for managing, scaling, and accessing unstructured data — including object storage compatible with Amazon S3 APIs."
|
||||
icon: "fa-solid fa-database"
|
||||
tags:
|
||||
- storage
|
||||
- object-storage
|
||||
- s3
|
||||
- minio
|
||||
- datasets
|
||||
|
||||
@@ -99,4 +99,43 @@
|
||||
/* Float Kit */
|
||||
--float-kit-arrow-stroke-color: var(--primary-low); /* already mapped above */
|
||||
--float-kit-arrow-fill-color: var(--secondary); /* already mapped above */
|
||||
|
||||
--d-content-background: rgba( var(--color-rgb-01-83),1 );
|
||||
}
|
||||
|
||||
|
||||
.search-input--header{
|
||||
background-color: rgba( var(--color-rgb-01-83),0.2 ) !important;
|
||||
color: var(--color-01-01)
|
||||
}
|
||||
|
||||
div#main-outlet, #list-area{
|
||||
background-color: rgba( var(--color-rgb-02-90),1 ) !important;
|
||||
}
|
||||
|
||||
.list-controls{
|
||||
background-color: rgba( var(--color-rgb-01-90), 0.9) !important;
|
||||
border-radius: 30px;
|
||||
}
|
||||
|
||||
.topic-list-item{
|
||||
background-color: rgba( var(--color-rgb-01-85), 0.4) !important;
|
||||
}
|
||||
|
||||
.topic-list-item a{
|
||||
color: var(--color-01-10) !important;
|
||||
}
|
||||
|
||||
div#main-outlet div.regular.ember-view{
|
||||
background-color: rgba( var(--color-rgb-01-85),0.3 );
|
||||
border-radius: 06px;
|
||||
}
|
||||
|
||||
button.btn{
|
||||
background-color: rgba( var(--color-rgb-01-85),0.9 );
|
||||
}
|
||||
|
||||
div.timeline-scrollarea div.timeline-scroller{
|
||||
background-color: rgba( var(--color-rgb-01-85),0.9 ) !important;
|
||||
border-radius: 06px;
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
features:
|
||||
logout: false # Just deactivated to oppress warnings, elk is anyhow not running
|
||||
matomo: true
|
||||
server:
|
||||
domains:
|
||||
canonical:
|
||||
|
||||
@@ -23,13 +23,14 @@ server:
|
||||
unsafe-eval: true
|
||||
style-src-attr:
|
||||
unsafe-inline: true
|
||||
oauth2_proxy:
|
||||
application: "application"
|
||||
port: "80"
|
||||
addons:
|
||||
keycloakpassword: {}
|
||||
ldapauth: {}
|
||||
docker:
|
||||
services:
|
||||
database:
|
||||
enabled: true
|
||||
enabled: true
|
||||
oauth2_proxy:
|
||||
origin:
|
||||
host: "application"
|
||||
port: "80"
|
||||
@@ -12,6 +12,13 @@ docker:
|
||||
enabled: true
|
||||
database:
|
||||
enabled: true
|
||||
oauth2_proxy:
|
||||
origin:
|
||||
host: "front"
|
||||
port: "80"
|
||||
acl:
|
||||
blacklist:
|
||||
- "/login"
|
||||
features:
|
||||
matomo: true
|
||||
css: false
|
||||
@@ -32,9 +39,3 @@ server:
|
||||
whitelist:
|
||||
font-src:
|
||||
- "data:"
|
||||
oauth2_proxy:
|
||||
application: "front"
|
||||
port: "80"
|
||||
acl:
|
||||
blacklist:
|
||||
- "/login"
|
||||
|
||||
@@ -13,12 +13,7 @@ features:
|
||||
oauth2: true
|
||||
oidc: false # Deactivated because users aren't auto-created.
|
||||
logout: true
|
||||
oauth2_proxy:
|
||||
application: "application"
|
||||
port: "<< defaults_applications[web-app-gitea].docker.services.gitea.port >>"
|
||||
acl:
|
||||
blacklist:
|
||||
- "/user/login"
|
||||
|
||||
server:
|
||||
csp:
|
||||
flags:
|
||||
@@ -59,5 +54,12 @@ docker:
|
||||
mem_reservation: 0.2g
|
||||
mem_limit: 0.3g
|
||||
pids_limit: 512
|
||||
oauth2_proxy:
|
||||
origin:
|
||||
host: "application"
|
||||
port: "<< defaults_applications[web-app-gitea].docker.services.gitea.port >>"
|
||||
acl:
|
||||
blacklist:
|
||||
- "/user/login"
|
||||
volumes:
|
||||
data: "gitea_data"
|
||||
|
||||
@@ -27,3 +27,7 @@ server:
|
||||
domains:
|
||||
canonical:
|
||||
- lab.git.{{ PRIMARY_DOMAIN }}
|
||||
csp:
|
||||
flags:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
features:
|
||||
logout: true # Same like with elk, anyhow not active atm
|
||||
matomo: true
|
||||
server:
|
||||
domains:
|
||||
canonical:
|
||||
|
||||
@@ -28,5 +28,8 @@ docker:
|
||||
name: joomla
|
||||
backup:
|
||||
no_stop_required: true
|
||||
upload:
|
||||
# MB Integer for upload size
|
||||
size_mb: 100
|
||||
volumes:
|
||||
data: "joomla_data"
|
||||
|
||||
5
roles/web-app-joomla/tasks/01_config.yml
Normal file
5
roles/web-app-joomla/tasks/01_config.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
- name: "Render php-upload.ini for Joomla"
|
||||
template:
|
||||
src: php-upload.ini.j2
|
||||
dest: "{{ JOOMLA_UPLOAD_CONFIG }}"
|
||||
mode: "0644"
|
||||
@@ -11,7 +11,7 @@
|
||||
# (Optional) specifically wait for the CLI installer script
|
||||
- name: "Check for CLI installer"
|
||||
command:
|
||||
argv: [ docker, exec, "{{ JOOMLA_CONTAINER }}", test, -f, /var/www/html/installation/joomla.php ]
|
||||
argv: [ docker, exec, "{{ JOOMLA_CONTAINER }}", test, -f, "{{ JOOMLA_INSTALLER_CLI_FILE }}" ]
|
||||
register: has_installer
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
@@ -30,9 +30,11 @@
|
||||
argv:
|
||||
- docker
|
||||
- exec
|
||||
- --user
|
||||
- "{{ JOOMLA_WEB_USER }}"
|
||||
- "{{ JOOMLA_CONTAINER }}"
|
||||
- php
|
||||
- /var/www/html/installation/joomla.php
|
||||
- "{{ JOOMLA_INSTALLER_CLI_FILE }}"
|
||||
- install
|
||||
- "--db-type={{ JOOMLA_DB_CONNECTOR }}"
|
||||
- "--db-host={{ database_host }}"
|
||||
18
roles/web-app-joomla/tasks/06_reset_admin_password.yml
Normal file
18
roles/web-app-joomla/tasks/06_reset_admin_password.yml
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
# Reset Joomla admin password via CLI (inside the container)
|
||||
- name: "Reset Joomla admin password (non-interactive CLI)"
|
||||
command:
|
||||
argv:
|
||||
- docker
|
||||
- exec
|
||||
- "{{ JOOMLA_CONTAINER }}"
|
||||
- php
|
||||
- "{{ JOOMLA_CLI_FILE }}"
|
||||
- user:reset-password
|
||||
- "--username"
|
||||
- "{{ JOOMLA_USER_NAME }}"
|
||||
- "--password"
|
||||
- "{{ JOOMLA_USER_PASSWORD }}"
|
||||
register: j_password_reset
|
||||
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||
changed_when: j_password_reset.rc == 0
|
||||
@@ -12,15 +12,25 @@
|
||||
include_role:
|
||||
name: sys-stk-back-stateful
|
||||
vars:
|
||||
docker_compose_flush_handlers: true
|
||||
docker_compose_flush_handlers: false
|
||||
|
||||
- name: Include PHP Config tasks
|
||||
include_tasks: 01_config.yml
|
||||
|
||||
- name: flush docker service
|
||||
meta: flush_handlers
|
||||
|
||||
- name: Include install routines
|
||||
include_tasks: "{{ item }}"
|
||||
loop:
|
||||
- 01_install.yml
|
||||
- 02_debug.yml
|
||||
- 03_patch.yml
|
||||
- 02_install.yml
|
||||
- 03_debug.yml
|
||||
- 04_patch.yml
|
||||
|
||||
- name: Include assert routines
|
||||
include_tasks: "04_assert.yml"
|
||||
include_tasks: "05_assert.yml"
|
||||
when: MODE_ASSERT | bool
|
||||
|
||||
- name: Reset Admin Password
|
||||
include_tasks: 06_reset_admin_password.yml
|
||||
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
{% include 'roles/docker-compose/templates/base.yml.j2' %}
|
||||
application:
|
||||
build:
|
||||
context: {{ docker_compose.directories.instance }}
|
||||
dockerfile: Dockerfile
|
||||
{{ lookup('template', 'roles/docker-container/templates/build.yml.j2') | indent(4) }}
|
||||
image: "{{ JOOMLA_CUSTOM_IMAGE }}"
|
||||
container_name: {{ JOOMLA_CONTAINER }}
|
||||
pull_policy: never
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
volumes:
|
||||
- data:/var/www/html
|
||||
- {{ JOOMLA_UPLOAD_CONFIG }}:/usr/local/etc/php/conf.d/uploads.ini:ro
|
||||
ports:
|
||||
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:{{ container_port }}"
|
||||
{% include 'roles/docker-container/templates/healthcheck/curl.yml.j2' %}
|
||||
|
||||
2
roles/web-app-joomla/templates/php-upload.ini.j2
Normal file
2
roles/web-app-joomla/templates/php-upload.ini.j2
Normal file
@@ -0,0 +1,2 @@
|
||||
upload_max_filesize = {{ JOOMLA_UPLOAD_MAX_FILESIZE }}
|
||||
post_max_size = {{ JOOMLA_POST_MAX_SIZE }}
|
||||
@@ -2,6 +2,7 @@
|
||||
application_id: "web-app-joomla"
|
||||
database_type: "mariadb"
|
||||
container_port: 80
|
||||
client_max_body_size: "{{ JOOMLA_POST_MAX_SIZE }}"
|
||||
|
||||
# Joomla
|
||||
JOOMLA_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.joomla.version') }}"
|
||||
@@ -13,9 +14,18 @@ JOOMLA_DOMAINS: "{{ applications | get_app_conf(application_id
|
||||
JOOMLA_SITE_NAME: "{{ SOFTWARE_NAME }} Joomla - CMS"
|
||||
JOOMLA_DB_CONNECTOR: "{{ 'pgsql' if database_type == 'postgres' else 'mysqli' }}"
|
||||
JOOMLA_CONFIG_FILE: "/var/www/html/configuration.php"
|
||||
JOOMLA_INSTALLER_CLI_FILE: "/var/www/html/installation/joomla.php"
|
||||
JOOMLA_CLI_FILE: "/var/www/html/cli/joomla.php"
|
||||
|
||||
## Upload
|
||||
JOOMLA_UPLOAD_CONFIG: "{{ [ docker_compose.directories.instance, 'php-upload.ini' ] | path_join }}"
|
||||
JOOMLA_UPLOAD_SIZE: "{{ applications | get_app_conf(application_id, 'docker.services.joomla.upload.size_mb') }}"
|
||||
JOOMLA_UPLOAD_MAX_FILESIZE: "{{ (JOOMLA_UPLOAD_SIZE | int) }}M"
|
||||
JOOMLA_POST_MAX_SIZE: "{{ ((JOOMLA_UPLOAD_SIZE | int) * 12 // 10) }}M"
|
||||
|
||||
# User
|
||||
JOOMLA_USER_NAME: "{{ users.administrator.username }}"
|
||||
JOOMLA_USER: "{{ JOOMLA_USER_NAME | capitalize }}"
|
||||
JOOMLA_USER_PASSWORD: "{{ users.administrator.password }}"
|
||||
JOOMLA_USER_EMAIL: "{{ users.administrator.email }}"
|
||||
JOOMLA_WEB_USER: "www-data"
|
||||
@@ -104,6 +104,6 @@ a.pf-v5-c-nav__link{
|
||||
div#app header{
|
||||
background-color: var(--color-01-60);
|
||||
/* New Gradient based on original background (60 -5, 60, 60 +1, 60 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-55), var(--color-01-60), var(--color-01-61), var(--color-01-65));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-55), var(--color-01-60), var(--color-01-61), var(--color-01-65));
|
||||
color: var(--color-01-98);
|
||||
}
|
||||
|
||||
@@ -1,17 +1,18 @@
|
||||
docker:
|
||||
services:
|
||||
lam:
|
||||
image: ghcr.io/ldapaccountmanager/lam
|
||||
version: latest
|
||||
oauth2_proxy:
|
||||
application: application
|
||||
port: 80
|
||||
allowed_groups:
|
||||
- "{{ [RBAC.GROUP.NAME, 'web-app-lam-administrator'] | path_join }}"
|
||||
image: ghcr.io/ldapaccountmanager/lam
|
||||
version: latest
|
||||
oauth2_proxy:
|
||||
origin:
|
||||
host: application
|
||||
port: 80
|
||||
allowed_groups:
|
||||
- "{{ [RBAC.GROUP.NAME, 'web-app-lam-administrator'] | path_join }}"
|
||||
features:
|
||||
matomo: true
|
||||
css: true
|
||||
desktop: true
|
||||
desktop: true
|
||||
ldap: true
|
||||
oauth2: true
|
||||
logout: true
|
||||
|
||||
@@ -37,7 +37,7 @@ ul.lam-tab-navigation {
|
||||
.titleBar {
|
||||
background-image: linear-gradient(var(--color-01-83), var(--color-01-92));
|
||||
/* New Gradient based on original background (83 -5, 83, 83 +1, 83 +5) */
|
||||
background-image: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-78), var(--color-01-83), var(--color-01-84), var(--color-01-88));
|
||||
background-image: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-78), var(--color-01-83), var(--color-01-84), var(--color-01-88));
|
||||
border-top-color: var(--color-01-78);
|
||||
border-left-color: var(--color-01-87);
|
||||
border-right-color: var(--color-01-87);
|
||||
@@ -46,6 +46,6 @@ ul.lam-tab-navigation {
|
||||
div.statusInfo {
|
||||
background-color: var(--color-01-81);
|
||||
/* New Gradient based on original background (81 -5, 81, 81 +1, 81 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-76), var(--color-01-81), var(--color-01-82), var(--color-01-86));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-76), var(--color-01-81), var(--color-01-82), var(--color-01-86));
|
||||
color: var(--color-01-23);
|
||||
}
|
||||
|
||||
@@ -13,6 +13,16 @@ server:
|
||||
aliases: []
|
||||
status_codes:
|
||||
default: 404
|
||||
csp:
|
||||
flags:
|
||||
script-src-elem:
|
||||
unsafe-inline: true
|
||||
whitelist:
|
||||
script-src-elem:
|
||||
- "https://www.hcaptcha.com"
|
||||
- "https://js.hcaptcha.com"
|
||||
frame-src:
|
||||
- "https://newassets.hcaptcha.com/"
|
||||
docker:
|
||||
services:
|
||||
database:
|
||||
|
||||
@@ -92,7 +92,7 @@ LISTMONK_SETTINGS:
|
||||
{{ [
|
||||
{
|
||||
"host": SYSTEM_EMAIL.HOST,
|
||||
"port": SYSTEM_EMAIL.PORT,
|
||||
"port": 995,
|
||||
"type": "pop",
|
||||
"uuid": "471fd0e9-8c33-4e4a-9183-c4679699faca",
|
||||
"enabled": true,
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
[class*=sidebar-dark-], .bg-mailu-logo {
|
||||
background-color: var(--color-01-90);
|
||||
/* New Gradient based on original background (90 -5, 90, 90 +1, 90 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-85), var(--color-01-90), var(--color-01-91), var(--color-01-95));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-85), var(--color-01-90), var(--color-01-91), var(--color-01-95));
|
||||
}
|
||||
|
||||
div.statusError {
|
||||
background-color: var(--color-01-60);
|
||||
/* New Gradient based on original background (60 -5, 60, 60 +1, 60 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-55), var(--color-01-60), var(--color-01-61), var(--color-01-65));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-55), var(--color-01-60), var(--color-01-61), var(--color-01-65));
|
||||
}
|
||||
|
||||
div.wrapper footer.main-footer, div.wrapper div.content-wrapper{
|
||||
background-color: var(--color-01-85);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-89), var(--color-01-85), var(--color-01-80), var(--color-01-79));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-89), var(--color-01-85), var(--color-01-80), var(--color-01-79));
|
||||
color: var(--color-01-39);
|
||||
}
|
||||
|
||||
|
||||
4
roles/web-app-mastodon/tasks/01_setup.yml
Normal file
4
roles/web-app-mastodon/tasks/01_setup.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
- name: "Execute migration for '{{ application_id }}'"
|
||||
command:
|
||||
chdir: "{{ docker_compose.directories.instance }}"
|
||||
cmd: "docker compose run --rm {{ MASTODON_SERVICE_NAME }} bundle exec rails db:migrate"
|
||||
@@ -11,9 +11,9 @@
|
||||
delay: 5
|
||||
until: healthcheck.stdout == "healthy"
|
||||
loop:
|
||||
- mastodon
|
||||
- streaming
|
||||
- sidekiq
|
||||
- "{{ MASTODON_SERVICE_NAME }}"
|
||||
- "{{ MASTODON_STREAMING_SERVICE_NAME }}"
|
||||
- "{{ MASTODON_SIDEKIQ_SERVICE_NAME }}"
|
||||
loop_control:
|
||||
label: "{{ item }}"
|
||||
changed_when: false
|
||||
@@ -1,3 +0,0 @@
|
||||
- name: "Execute migration for '{{ application_id }}'"
|
||||
command:
|
||||
cmd: "docker exec {{ MASTODON_CONTAINER }} bundle exec rails db:migrate"
|
||||
@@ -18,15 +18,15 @@
|
||||
vars:
|
||||
docker_compose_flush_handlers: true
|
||||
|
||||
- name: "start setup procedures for mastodon"
|
||||
include_tasks: 01_setup.yml
|
||||
|
||||
- name: "Wait for Mastodon"
|
||||
include_tasks: 01_wait.yml
|
||||
include_tasks: 02_wait.yml
|
||||
|
||||
- name: "Cleanup Mastodon caches when MODE_CLEANUP is true"
|
||||
include_tasks: 02_cleanup.yml
|
||||
include_tasks: 03_cleanup.yml
|
||||
when: MODE_CLEANUP | bool
|
||||
|
||||
- name: "start setup procedures for mastodon"
|
||||
include_tasks: 03_setup.yml
|
||||
|
||||
- name: "Include administrator routines for '{{ application_id }}'"
|
||||
include_tasks: 04_administrator.yml
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
{% include 'roles/docker-compose/templates/base.yml.j2' %}
|
||||
|
||||
mastodon:
|
||||
{% set service_name = 'mastodon' %}
|
||||
{% set service_name = MASTODON_SERVICE_NAME %}
|
||||
{% set container_port = 3000 %}
|
||||
{% set container_healthcheck = 'health' %}
|
||||
{{ service_name }}:
|
||||
container_name: {{ MASTODON_CONTAINER }}
|
||||
image: "{{ MASTODON_IMAGE }}:{{ MASTODON_VERSION }}"
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
@@ -16,10 +16,10 @@
|
||||
- data:/mastodon/public/system
|
||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||
|
||||
streaming:
|
||||
{% set service_name = 'streaming' %}
|
||||
{% set service_name = MASTODON_STREAMING_SERVICE_NAME %}
|
||||
{% set container_port = 4000 %}
|
||||
{% set container_healthcheck = 'api/v1/streaming/health' %}
|
||||
{{ service_name }}:
|
||||
container_name: {{ MASTODON_STREAMING_CONTAINER }}
|
||||
image: "{{ MASTODON_STREAMING_IMAGE }}:{{ MASTODON_STREAMING_VERSION }}"
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
@@ -30,8 +30,8 @@
|
||||
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
|
||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||
|
||||
sidekiq:
|
||||
{% set service_name = 'sidekiq' %}
|
||||
{% set service_name = MASTODON_SIDEKIQ_SERVICE_NAME %}
|
||||
{{ service_name }}:
|
||||
container_name: {{ MASTODON_SIDEKIQ_CONTAINER }}
|
||||
image: "{{ MASTODON_IMAGE }}:{{ MASTODON_VERSION }}"
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
|
||||
@@ -2,13 +2,24 @@
|
||||
application_id: "web-app-mastodon"
|
||||
database_type: "postgres"
|
||||
|
||||
# Mastodon Specific
|
||||
# Mastodon
|
||||
|
||||
## Main
|
||||
MASTODON_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.mastodon.version') }}"
|
||||
MASTODON_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.mastodon.image') }}"
|
||||
MASTODON_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.mastodon.name') }}"
|
||||
MASTODON_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.data') }}"
|
||||
MASTODON_SERVICE_NAME: "mastodon"
|
||||
|
||||
## Streaming
|
||||
MASTODON_STREAMING_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.streaming.version') }}"
|
||||
MASTODON_STREAMING_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.streaming.image') }}"
|
||||
MASTODON_STREAMING_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.streaming.name') }}"
|
||||
MASTODON_SIDEKIQ_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.mastodon.name') }}_sidekiq"
|
||||
MASTODON_STREAMING_SERVICE_NAME: "streaming"
|
||||
|
||||
## Sidekiq
|
||||
MASTODON_SIDEKIQ_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.mastodon.name') }}-sidekiq"
|
||||
MASTODON_SIDEKIQ_SERVICE_NAME: "sidekiq"
|
||||
|
||||
## General
|
||||
MASTODON_ALLOWED_PRIVATE_ADDRESSES: "{{ networks.local['svc-db-postgres'].subnet if 'web-app-chess' in group_names else ''}}"
|
||||
@@ -27,3 +27,4 @@ features:
|
||||
css: false
|
||||
desktop: true
|
||||
oidc: true
|
||||
matomo: true
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
body.skin-vector,
|
||||
.skin-vector .mw-page-container {
|
||||
background-color: var(--mw-surface);
|
||||
background-image: linear-gradient({{ range(0, 361) | random }}deg,
|
||||
background-image: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg,
|
||||
var(--mw-surface),
|
||||
var(--mw-surface-variant),
|
||||
var(--mw-surface-muted),
|
||||
@@ -54,7 +54,7 @@ body.skin-vector,
|
||||
.skin-vector .vector-header-container,
|
||||
.skin-vector .mw-header {
|
||||
background-color: var(--color-01-80);
|
||||
background-image: linear-gradient({{ range(0, 361) | random }}deg,
|
||||
background-image: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg,
|
||||
var(--color-01-75), var(--color-01-80), var(--color-01-81), var(--color-01-85)
|
||||
);
|
||||
color: var(--color-01-17);
|
||||
@@ -211,7 +211,7 @@ table.wikitable > * > tr > td {
|
||||
|
||||
table.wikitable > * > tr > th {
|
||||
background-color: var(--color-01-80);
|
||||
background-image: linear-gradient({{ range(0, 361) | random }}deg,
|
||||
background-image: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg,
|
||||
var(--color-01-75), var(--color-01-80), var(--color-01-81), var(--color-01-85)
|
||||
);
|
||||
color: var(--mw-heading);
|
||||
|
||||
@@ -26,6 +26,7 @@ server:
|
||||
- https://cdn.jsdelivr.net
|
||||
connect-src:
|
||||
- https://ka-f.fontawesome.com
|
||||
- https://cdn.jsdelivr.net
|
||||
frame-ancestors:
|
||||
- "*" # No damage if it's used somewhere on other websites, it anyhow looks like art
|
||||
flags:
|
||||
|
||||
3
roles/web-app-mig/files/style.css
Normal file
3
roles/web-app-mig/files/style.css
Normal file
@@ -0,0 +1,3 @@
|
||||
#details h6, #details p{
|
||||
color: var(--color-01-73)
|
||||
}
|
||||
@@ -3,25 +3,17 @@
|
||||
name: sys-cli
|
||||
when: run_once_sys_cli is not defined
|
||||
|
||||
- name: Load docker compose vars
|
||||
include_vars:
|
||||
file: roles/docker-compose/vars/docker-compose.yml
|
||||
name: mig_docker_compose
|
||||
|
||||
- name: Set roles volume variable
|
||||
set_fact:
|
||||
mig_roles_meta_volume: "{{ mig_docker_compose.docker_compose.directories.volumes }}/roles/"
|
||||
|
||||
- name: Set roles list variable
|
||||
set_fact:
|
||||
mig_roles_meta_list: "{{ mig_roles_meta_volume }}list.json"
|
||||
|
||||
- name: "load docker, proxy for '{{ application_id }}'"
|
||||
include_role:
|
||||
name: sys-stk-full-stateless
|
||||
vars:
|
||||
docker_compose_flush_handlers: true
|
||||
docker_pull_git_repository: true
|
||||
|
||||
- name: Build data (single async task)
|
||||
include_tasks: 02_build_data.yml
|
||||
- include_tasks: 02_cleanup.yml
|
||||
when: MODE_CLEANUP | bool
|
||||
|
||||
- include_tasks: 03_build_data.yml
|
||||
when: MIG_BUILD_DATA | bool
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
|
||||
5
roles/web-app-mig/tasks/02_cleanup.yml
Normal file
5
roles/web-app-mig/tasks/02_cleanup.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
- name: "Cleanup MIG roles directory (remove all contents safely)"
|
||||
file:
|
||||
path: "{{ MIG_ROLES_DIRECTORY }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
@@ -1,8 +1,7 @@
|
||||
- name: Build data (single async task)
|
||||
shell: |
|
||||
set -euo pipefail
|
||||
infinito build tree --no-signal --alarm-timeout 0 -s {{ mig_roles_meta_volume }}
|
||||
infinito build roles_list --no-signal --alarm-timeout 0 -o {{ mig_roles_meta_list }}
|
||||
infinito build tree --no-signal --alarm-timeout 0 -s {{ MIG_ROLES_DIRECTORY }}
|
||||
infinito build roles_list --no-signal --alarm-timeout 0 -o {{ MIG_ROLES_LIST }}
|
||||
async: "{{ (3600 if ASYNC_ENABLED | bool else omit) }}"
|
||||
poll: "{{ ASYNC_POLL if ASYNC_ENABLED | bool else omit }}"
|
||||
register: mig_build_job
|
||||
@@ -12,11 +12,9 @@
|
||||
dockerfile: Dockerfile
|
||||
pull_policy: never
|
||||
volumes:
|
||||
- "{{ mig_roles_meta_volume }}:/usr/share/nginx/html/roles:ro"
|
||||
- "{{ MIG_ROLES_DIRECTORY }}:/usr/share/nginx/html/roles:ro"
|
||||
- "{{ docker_repository_path }}:/usr/share/nginx/html"
|
||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||
{% include 'roles/docker-container/templates/healthcheck/curl.yml.j2' %}
|
||||
|
||||
{% include 'roles/docker-compose/templates/networks.yml.j2' %}
|
||||
|
||||
|
||||
|
||||
@@ -2,11 +2,11 @@
|
||||
application_id: web-app-mig
|
||||
|
||||
# Docker
|
||||
docker_compose_flush_handlers: true
|
||||
docker_pull_git_repository: true
|
||||
docker_repository_address: "https://github.com/kevinveenbirkenbach/meta-infinite-graph"
|
||||
|
||||
# Helper variables
|
||||
MIG_IMAGE: "mig:latest"
|
||||
MIG_CONTAINER: "mig"
|
||||
MIG_BUILD_DATA: "{{ applications | get_app_conf(application_id, 'build_data.enabled') }}"
|
||||
MIG_BUILD_DATA: "{{ applications | get_app_conf(application_id, 'build_data.enabled') }}"
|
||||
MIG_ROLES_DIRECTORY: "{{ [ docker_compose.directories.volumes, 'roles' ] | path_join }}"
|
||||
MIG_ROLES_LIST: "{{ [ MIG_ROLES_DIRECTORY, 'list.json' ] | path_join }}"
|
||||
@@ -21,9 +21,10 @@ server:
|
||||
connect-src:
|
||||
- https://q.clarity.ms
|
||||
- https://n.clarity.ms
|
||||
- https://z.clarity.ms
|
||||
- "data:"
|
||||
style-src-elem: []
|
||||
font-src: []
|
||||
font-src: []
|
||||
frame-ancestors: []
|
||||
flags:
|
||||
style-src-attr:
|
||||
|
||||
@@ -2,6 +2,6 @@
|
||||
include_role:
|
||||
name: sys-stk-full-stateless
|
||||
vars:
|
||||
docker_compose_flush_handlers: false
|
||||
docker_compose_flush_handlers: true
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
|
||||
@@ -5,6 +5,7 @@ features:
|
||||
matomo: true
|
||||
desktop: true
|
||||
logout: true
|
||||
css: true
|
||||
server:
|
||||
csp:
|
||||
flags:
|
||||
@@ -22,6 +23,6 @@ docker:
|
||||
database:
|
||||
enabled: true
|
||||
mobilizon:
|
||||
image: "docker.io/framasoft/mobilizon"
|
||||
image: "docker.io/kaihuri/mobilizon"
|
||||
name: "mobilizon"
|
||||
version: ""
|
||||
version: "5.2.0"
|
||||
|
||||
@@ -8,19 +8,31 @@ server:
|
||||
unsafe-inline: true
|
||||
script-src-attr:
|
||||
unsafe-eval: true
|
||||
unsafe-inline: true # Required for ONLYOFFICE
|
||||
whitelist:
|
||||
script-src-elem:
|
||||
- "https://www.hcaptcha.com"
|
||||
- "https://js.hcaptcha.com"
|
||||
- "{{ WEB_PROTOCOL }}://onlyoffice.{{ PRIMARY_DOMAIN }}"
|
||||
font-src:
|
||||
- "data:"
|
||||
connect-src:
|
||||
- "{{ WEBSOCKET_PROTOCOL }}://collabora.{{ PRIMARY_DOMAIN }}"
|
||||
- "{{ WEBSOCKET_PROTOCOL }}://cloud.{{ PRIMARY_DOMAIN }}"
|
||||
- "{{ WEB_PROTOCOL }}://collabora.{{ PRIMARY_DOMAIN }}"
|
||||
- "{{ WEB_PROTOCOL }}://onlyoffice.{{ PRIMARY_DOMAIN }}"
|
||||
- "{{ WEB_PROTOCOL }}://cloud.{{ PRIMARY_DOMAIN }}"
|
||||
- "*" # Required to load all external websites in Whiteboard
|
||||
frame-src:
|
||||
- "{{ WEBSOCKET_PROTOCOL }}://collabora.{{ PRIMARY_DOMAIN }}"
|
||||
- "{{ WEB_PROTOCOL }}://onlyoffice.{{ PRIMARY_DOMAIN }}"
|
||||
- "{{ WEB_PROTOCOL }}://collabora.{{ PRIMARY_DOMAIN }}"
|
||||
- "https://newassets.hcaptcha.com/"
|
||||
- "*" # Required to load all external websites in Whiteboard
|
||||
worker-src:
|
||||
- "blob:"
|
||||
media-src:
|
||||
- "*" # Required to load all external websites in Whiteboard
|
||||
domains:
|
||||
canonical:
|
||||
- "next.cloud.{{ PRIMARY_DOMAIN }}"
|
||||
@@ -30,6 +42,7 @@ docker:
|
||||
data: nextcloud_data
|
||||
whiteboard_tmp: nextcloud_whiteboard_tmp
|
||||
whiteboard_fontcache: nextcloud_whiteboard_fontcache
|
||||
talk_recording_tmp: nextcloud_talk_recording_tmp
|
||||
services:
|
||||
redis:
|
||||
enabled: true
|
||||
@@ -89,10 +102,20 @@ docker:
|
||||
version: "latest"
|
||||
backup:
|
||||
no_stop_required: true
|
||||
cpus: "0.25"
|
||||
cpus: "1"
|
||||
mem_reservation: "128m"
|
||||
mem_limit: "512m"
|
||||
mem_limit: "1g"
|
||||
pids_limit: 1024
|
||||
talk_recording:
|
||||
name: "nextcloud-talk-recording"
|
||||
image: "nextcloud/aio-talk-recording"
|
||||
version: "latest"
|
||||
backup:
|
||||
no_stop_required: true
|
||||
cpus: "2.0"
|
||||
mem_reservation: "2g"
|
||||
mem_limit: "4g"
|
||||
pids_limit: 1024
|
||||
enabled: "{{ applications | get_app_conf('web-app-nextcloud', 'features.oidc', False, True, True) }}" # Activate OIDC for Nextcloud
|
||||
# floavor decides which OICD plugin should be used.
|
||||
# Available options: oidc_login, sociallogin
|
||||
@@ -196,7 +219,7 @@ plugins:
|
||||
enabled: false
|
||||
fileslibreofficeedit:
|
||||
# Nextcloud LibreOffice integration: allows online editing of documents with LibreOffice (https://apps.nextcloud.com/apps/fileslibreofficeedit)
|
||||
enabled: "{{ not (applications | get_app_conf('web-app-nextcloud', 'plugins.richdocuments.enabled', False, True, True)) }}"
|
||||
enabled: false
|
||||
forms:
|
||||
# Nextcloud forms: facilitates creation of forms and surveys (https://apps.nextcloud.com/apps/forms)
|
||||
enabled: true
|
||||
@@ -273,7 +296,13 @@ plugins:
|
||||
enabled: false # Deactivated because it let to bugs
|
||||
richdocuments:
|
||||
# Nextcloud Rich Documents: provides collaborative document editing capabilities (https://apps.nextcloud.com/apps/richdocuments)
|
||||
enabled: true # @todo To set it default to true activate https://hub.docker.com/r/collabora/code before
|
||||
enabled: false
|
||||
onlyoffice:
|
||||
# ONLYOFFICE Document Server integration (https://apps.nextcloud.com/apps/onlyoffice)
|
||||
enabled: true
|
||||
incompatible_plugins:
|
||||
- richdocuments
|
||||
- fileslibreofficeedit
|
||||
sociallogin:
|
||||
# Nextcloud social login: allows authentication using social networks (https://apps.nextcloud.com/apps/sociallogin)
|
||||
enabled: "{{ _applications_nextcloud_oidc_flavor=='sociallogin' | lower }}"
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
credentials:
|
||||
talk_recording_secret:
|
||||
description: "Shared secret between Nextcloud Talk and the recording backend"
|
||||
algorithm: "random_hex_32"
|
||||
validation: "^[a-f0-9]{64}$"
|
||||
whiteboard_jwt_secret:
|
||||
description: "Secret used for JWT signing"
|
||||
algorithm: "base64_prefixed_32"
|
||||
|
||||
@@ -7,6 +7,9 @@
|
||||
command: "{{ NEXTCLOUD_DOCKER_EXEC_OCC }} maintenance:repair --include-expensive"
|
||||
register: occ_repair
|
||||
changed_when: "'No repairs needed' not in occ_repair.stdout"
|
||||
retries: 3
|
||||
delay: 10
|
||||
until: occ_repair.rc == 0
|
||||
|
||||
- name: Nextcloud | App update (retry once)
|
||||
command: "{{ NEXTCLOUD_DOCKER_EXEC_OCC }} app:update --all"
|
||||
|
||||
@@ -16,6 +16,13 @@
|
||||
- name: Flush all handlers immediately so that occ can be used
|
||||
meta: flush_handlers
|
||||
|
||||
- name: Wait until Redis is ready (PONG)
|
||||
command: "docker exec {{ NEXTCLOUD_REDIS_CONTAINER }} redis-cli ping"
|
||||
register: redis_ping
|
||||
retries: 60
|
||||
delay: 2
|
||||
until: (redis_ping.stdout | default('')) is search('PONG')
|
||||
|
||||
- name: Update\Upgrade Nextcloud
|
||||
include_tasks: 03_upgrade.yml
|
||||
when: MODE_UPDATE | bool
|
||||
|
||||
7
roles/web-app-nextcloud/tasks/plugins/onlyoffice.yml
Normal file
7
roles/web-app-nextcloud/tasks/plugins/onlyoffice.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
- name: "Install OnlyOffice dependency (Document Server)"
|
||||
include_role:
|
||||
name: web-svc-onlyoffice
|
||||
vars:
|
||||
flush_handlers: true
|
||||
when:
|
||||
- run_once_web_svc_onlyoffice is not defined
|
||||
5
roles/web-app-nextcloud/templates/Dockerfile.j2
Normal file
5
roles/web-app-nextcloud/templates/Dockerfile.j2
Normal file
@@ -0,0 +1,5 @@
|
||||
FROM {{ NEXTCLOUD_IMAGE }}:{{ NEXTCLOUD_VERSION }}
|
||||
{% if NEXTCLOUD_ONLYOFFICE_ENABLED | bool %}
|
||||
# Required for ONLYOFFICE
|
||||
RUN apk add --no-cache ca-certificates && update-ca-certificates
|
||||
{% endif %}
|
||||
@@ -24,8 +24,10 @@
|
||||
|
||||
{% set service_name = NEXTCLOUD_SERVICE %}
|
||||
{{ service_name }}:
|
||||
image: "{{ NEXTCLOUD_IMAGE }}:{{ NEXTCLOUD_VERSION }}"
|
||||
image: "{{ NEXTCLOUD_CUSTOM_IMAGE }}"
|
||||
container_name: {{ NEXTCLOUD_CONTAINER }}
|
||||
{{ lookup('template', 'roles/docker-container/templates/build.yml.j2') | indent(4) }}
|
||||
|
||||
volumes:
|
||||
- data:{{ NEXTCLOUD_DOCKER_WORK_DIRECTORY }}
|
||||
- {{ NEXTCLOUD_HOST_CONF_ADD_PATH }}:{{ NEXTCLOUD_DOCKER_CONF_ADD_PATH }}:ro
|
||||
@@ -77,7 +79,8 @@
|
||||
volumes:
|
||||
- whiteboard_tmp:/tmp
|
||||
- whiteboard_fontcache:/var/cache/fontconfig
|
||||
|
||||
environment:
|
||||
- NODE_OPTIONS=--max-old-space-size={{ NEXTCLOUD_WHITEBOARD_MAX_OLD_SPACE_SIZE }}
|
||||
expose:
|
||||
- "{{ container_port }}"
|
||||
shm_size: 1g
|
||||
@@ -109,6 +112,27 @@
|
||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||
ipv4_address: 192.168.102.70
|
||||
|
||||
{% if NEXTCLOUD_RECORDING_ENABLED | bool %}
|
||||
{% set service_name = 'talk_recording' %}
|
||||
{{ service_name }}:
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
image: "{{ NEXTCLOUD_RECORDING_IMAGE }}:{{ NEXTCLOUD_RECORDING_VERSION }}"
|
||||
container_name: "{{ NEXTCLOUD_RECORDING_CONTAINER }}"
|
||||
environment:
|
||||
- HPB_DOMAIN={{ NEXTCLOUD_HPB_DOMAIN }}
|
||||
- NC_DOMAIN={{ NEXTCLOUD_DOMAIN }}
|
||||
- RECORDING_SECRET={{ NEXTCLOUD_RECORDING_SECRET }}
|
||||
- INTERNAL_SECRET={{ NEXTCLOUD_HPB_INTERNAL_SECRET }}
|
||||
- TZ={{ HOST_TIMEZONE }}
|
||||
expose:
|
||||
- "{{ NEXTCLOUD_RECORDING_PORT }}"
|
||||
networks:
|
||||
default:
|
||||
ipv4_address: 192.168.102.72
|
||||
volumes:
|
||||
- talk_recording_tmp:/tmp
|
||||
{% endif %}
|
||||
|
||||
{% include 'roles/docker-compose/templates/volumes.yml.j2' %}
|
||||
data:
|
||||
name: {{ NEXTCLOUD_VOLUME }}
|
||||
@@ -118,5 +142,9 @@
|
||||
whiteboard_fontcache:
|
||||
name: {{ NEXTCLOUD_WHITEBOARD_FRONTCACHE_VOLUME }}
|
||||
{% endif %}
|
||||
{% if NEXTCLOUD_RECORDING_ENABLED | bool %}
|
||||
talk_recording_tmp:
|
||||
name: {{ NEXTCLOUD_RECORDING_TMP_VOLUME }}
|
||||
{% endif %}
|
||||
|
||||
{% include 'roles/docker-compose/templates/networks.yml.j2' %}
|
||||
|
||||
@@ -62,7 +62,6 @@ STORAGE_STRATEGY=redis
|
||||
REDIS_URL=redis://redis:6379/0
|
||||
# Chromium (headless) hardening for Whiteboard
|
||||
CHROMIUM_FLAGS=--headless=new --no-sandbox --disable-gpu --disable-dev-shm-usage --use-gl=swiftshader --disable-software-rasterizer
|
||||
# Falls das Image Chromium mitbringt – Pfad meistens /usr/bin/chromium oder /usr/bin/chromium-browser:
|
||||
PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium
|
||||
PUPPETEER_SKIP_DOWNLOAD=true
|
||||
{% endif %}
|
||||
@@ -14,7 +14,7 @@
|
||||
html.ng-csp header#header{
|
||||
background-color: var(--color-01-80);
|
||||
/* New Gradient based on original background (80 -5, 80, 80 +1, 80 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-75), var(--color-01-80), var(--color-01-81), var(--color-01-85));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-75), var(--color-01-80), var(--color-01-81), var(--color-01-85));
|
||||
color: var(--color-01-17);
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ div#mastodon .column-back-button {
|
||||
div#mastodon textarea, div#mastodon input, div#mastodon .compose-form__highlightable {
|
||||
background-color: var(--color-01-89);
|
||||
/* New Gradient based on original background (89 -5, 89, 89 +1, 89 +5) */
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-84), var(--color-01-89), var(--color-01-90), var(--color-01-94));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-84), var(--color-01-89), var(--color-01-90), var(--color-01-94));
|
||||
color: var(--color-01-19);
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ div#mastodon .dropdown-button{
|
||||
|
||||
div#mastodon .button, div#mastodon .button:active, div#mastodon .button:focus, div#mastodon .button:hover{
|
||||
background-color: var(--color-01-71);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-66), var(--color-01-71), var(--color-01-72), var(--color-01-76));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-66), var(--color-01-71), var(--color-01-72), var(--color-01-76));
|
||||
}
|
||||
|
||||
.compose-form__actions .icon-button {
|
||||
|
||||
@@ -50,6 +50,7 @@ NEXTCLOUD_SERVICE: "{{ entity_name }}"
|
||||
NEXTCLOUD_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.'~ NEXTCLOUD_SERVICE ~'.version') }}"
|
||||
NEXTCLOUD_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.'~ NEXTCLOUD_SERVICE ~'.image') }}"
|
||||
NEXTCLOUD_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.'~ NEXTCLOUD_SERVICE ~'.name') }}"
|
||||
NEXTCLOUD_CUSTOM_IMAGE: "nextcloud_custom"
|
||||
|
||||
### Proxy
|
||||
NEXTCLOUD_PROXY_SERVICE: "proxy"
|
||||
@@ -130,10 +131,25 @@ NEXTCLOUD_WHITEBOARD_TMP_VOLUME: "{{ applications | get_app_conf(applic
|
||||
NEXTCLOUD_WHITEBOARD_FRONTCACHE_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.whiteboard_fontcache') }}"
|
||||
NEXTCLOUD_WHITEBOARD_SERVICE_DIRECTORY: "{{ [ docker_compose.directories.services, 'whiteboard' ] | path_join }}"
|
||||
NEXTCLOUD_WHITEBOARD_SERVICE_DOCKERFILE: "{{ [ NEXTCLOUD_WHITEBOARD_SERVICE_DIRECTORY, 'Dockerfile' ] | path_join }}"
|
||||
NEXTCLOUD_WHITEBOARD_MAX_OLD_SPACE_SIZE: "{{ applications | node_max_old_space_size(application_id, NEXTCLOUD_WHITEBOARD_SERVICE) }}"
|
||||
|
||||
### Talk Recording backend
|
||||
NEXTCLOUD_RECORDING_SERVICE: "talk_recording"
|
||||
NEXTCLOUD_RECORDING_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.' ~ NEXTCLOUD_RECORDING_SERVICE ~ '.name') }}"
|
||||
NEXTCLOUD_RECORDING_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.' ~ NEXTCLOUD_RECORDING_SERVICE ~ '.image') }}"
|
||||
NEXTCLOUD_RECORDING_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.' ~ NEXTCLOUD_RECORDING_SERVICE ~ '.version') }}"
|
||||
NEXTCLOUD_RECORDING_ENABLED: "{{ NEXTCLOUD_HPB_PLUGIN_ENABLED }}"
|
||||
NEXTCLOUD_RECORDING_PORT: 1234
|
||||
NEXTCLOUD_RECORDING_SECRET: "{{ applications | get_app_conf(application_id, 'credentials.talk_recording_secret') }}"
|
||||
NEXTCLOUD_RECORDING_TMP_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.talk_recording_tmp') }}"
|
||||
|
||||
### Collabora
|
||||
NEXTCLOUD_COLLABORA_URL: "{{ domains | get_url('web-svc-collabora', WEB_PROTOCOL) }}"
|
||||
|
||||
### OnlyOffice
|
||||
NEXTCLOUD_ONLYOFFICE_URL: "{{ domains | get_url('web-svc-onlyoffice', WEB_PROTOCOL) }}"
|
||||
NEXTCLOUD_ONLYOFFICE_ENABLED: "{{ applications | get_app_conf(application_id, 'plugins.onlyoffice.enabled') }}"
|
||||
|
||||
## User Configuration
|
||||
NEXTCLOUD_DOCKER_USER_ID: 82 # UID of the www-data user
|
||||
NEXTCLOUD_DOCKER_USER: "www-data" # Name of the www-data user (Set here to easy change it in the future)
|
||||
@@ -141,4 +157,7 @@ NEXTCLOUD_DOCKER_USER: "www-data" # Name of the www-data user
|
||||
## Execution
|
||||
NEXTCLOUD_INTERNAL_OCC_COMMAND: "{{ [ NEXTCLOUD_DOCKER_WORK_DIRECTORY, 'occ'] | path_join }}"
|
||||
NEXTCLOUD_DOCKER_EXEC: "docker exec -u {{ NEXTCLOUD_DOCKER_USER }} {{ NEXTCLOUD_CONTAINER }}" # General execute composition
|
||||
NEXTCLOUD_DOCKER_EXEC_OCC: "{{ NEXTCLOUD_DOCKER_EXEC }} {{ NEXTCLOUD_INTERNAL_OCC_COMMAND }}" # Execute docker occ command
|
||||
NEXTCLOUD_DOCKER_EXEC_OCC: "{{ NEXTCLOUD_DOCKER_EXEC }} {{ NEXTCLOUD_INTERNAL_OCC_COMMAND }}" # Execute docker occ command
|
||||
|
||||
## Redis
|
||||
NEXTCLOUD_REDIS_CONTAINER: "{{ entity_name }}-redis"
|
||||
24
roles/web-app-nextcloud/vars/plugins/onlyoffice.yml
Normal file
24
roles/web-app-nextcloud/vars/plugins/onlyoffice.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
plugin_configuration:
|
||||
- appid: "onlyoffice"
|
||||
configkey: "DocumentServerUrl"
|
||||
configvalue: "{{ NEXTCLOUD_ONLYOFFICE_URL }}/"
|
||||
|
||||
- appid: "onlyoffice"
|
||||
configkey: "DocumentServerInternalUrl"
|
||||
configvalue: "{{ NEXTCLOUD_ONLYOFFICE_URL }}/"
|
||||
|
||||
- appid: "onlyoffice"
|
||||
configkey: "StorageUrl"
|
||||
configvalue: "{{ NEXTCLOUD_URL }}/"
|
||||
|
||||
- appid: "onlyoffice"
|
||||
configkey: "jwt_secret"
|
||||
configvalue: "{{ applications | get_app_conf('web-svc-onlyoffice', 'credentials.onlyoffice_jwt_secret', False, '') }}"
|
||||
|
||||
- appid: "onlyoffice"
|
||||
configkey: "jwt_header"
|
||||
configvalue: "Authorization"
|
||||
|
||||
- appid: "onlyoffice"
|
||||
configkey: "verify_peer_off"
|
||||
configvalue: "false"
|
||||
@@ -39,3 +39,19 @@ plugin_configuration:
|
||||
- appid: "spreed"
|
||||
configkey: "internal_secret"
|
||||
configvalue: "{{ NEXTCLOUD_HPB_INTERNAL_SECRET }}"
|
||||
|
||||
- appid: "spreed"
|
||||
configkey: "recording_servers"
|
||||
configvalue: >-
|
||||
{{
|
||||
{
|
||||
'servers': [
|
||||
{
|
||||
'server': 'http://' ~ NEXTCLOUD_RECORDING_SERVICE ~ ':' ~ NEXTCLOUD_RECORDING_PORT ~ '/',
|
||||
'verify': false
|
||||
}
|
||||
],
|
||||
'secret': NEXTCLOUD_RECORDING_SECRET
|
||||
}
|
||||
| to_json
|
||||
}}
|
||||
|
||||
@@ -2,10 +2,10 @@ configuration_file: "oauth2-proxy-keycloak.cfg" # Needs to be set true in the ro
|
||||
version: "latest" # Docker Image version
|
||||
allowed_roles: "admin" # Restrict it default to admin role. Use the vars/main.yml to open the specific role for other groups
|
||||
features:
|
||||
matomo: true
|
||||
css: true
|
||||
matomo: true
|
||||
css: true
|
||||
desktop: false
|
||||
logout: true
|
||||
logout: true
|
||||
server:
|
||||
domains:
|
||||
canonical:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
http_address = "0.0.0.0:4180"
|
||||
cookie_secret = "{{ applications | get_app_conf(oauth2_proxy_application_id, 'credentials.oauth2_proxy_cookie_secret', True) }}"
|
||||
cookie_secure = "true" # True is necessary to force the cookie set via https
|
||||
upstreams = "http://{{ applications | get_app_conf(oauth2_proxy_application_id, 'oauth2_proxy.application', True) }}:{{ applications | get_app_conf(oauth2_proxy_application_id, 'oauth2_proxy.port', True) }}"
|
||||
cookie_secure = "true" # True is necessary to force the cookie set via https
|
||||
upstreams = "http://{{ applications | get_app_conf(oauth2_proxy_application_id, 'docker.services.oauth2_proxy.origin.host') }}:{{ applications | get_app_conf(oauth2_proxy_application_id, 'docker.services.oauth2_proxy.origin.port') }}"
|
||||
cookie_domains = ["{{ domains | get_domain(oauth2_proxy_application_id) }}", "{{ domains | get_domain('web-app-keycloak') }}"] # Required so cookie can be read on all subdomains.
|
||||
whitelist_domains = [".{{ PRIMARY_DOMAIN }}"] # Required to allow redirection back to original requested target.
|
||||
|
||||
@@ -13,11 +13,11 @@ oidc_issuer_url = "{{ OIDC.CLIENT.ISSUER_URL }}"
|
||||
provider = "oidc"
|
||||
provider_display_name = "{{ OIDC.BUTTON_TEXT }}"
|
||||
|
||||
{% if applications | get_app_conf(oauth2_proxy_application_id, 'oauth2_proxy.allowed_groups', False) %}
|
||||
{% if applications | get_app_conf(oauth2_proxy_application_id, 'docker.services.oauth2_proxy.allowed_groups', False) %}
|
||||
{# role based restrictions #}
|
||||
scope = "openid email profile {{ RBAC.GROUP.CLAIM }}"
|
||||
oidc_groups_claim = "{{ RBAC.GROUP.CLAIM }}"
|
||||
allowed_groups = {{ applications | get_app_conf(oauth2_proxy_application_id, 'oauth2_proxy.allowed_groups', True) | to_json }}
|
||||
allowed_groups = {{ applications | get_app_conf(oauth2_proxy_application_id, 'docker.services.oauth2_proxy.allowed_groups') | to_json }}
|
||||
email_domains = ["*"]
|
||||
{% else %}
|
||||
email_domains = "{{ PRIMARY_DOMAIN }}"
|
||||
|
||||
@@ -1,11 +1,3 @@
|
||||
oauth2_proxy:
|
||||
application: "proxy"
|
||||
port: "80"
|
||||
acl:
|
||||
whitelist:
|
||||
- "/users/me" # Necessary for Nextcloud Plugin to work
|
||||
- "/api/" # Necessary for Nextcloud Plugin to work
|
||||
- "/oauth/token" # Necessary for Nextcloud Plugin to work
|
||||
ldap:
|
||||
filters:
|
||||
administrators: False # Set true to filter administrators
|
||||
@@ -78,6 +70,14 @@ docker:
|
||||
mem_reservation: "512m"
|
||||
mem_limit: "512m"
|
||||
pids_limit: 256
|
||||
|
||||
oauth2_proxy:
|
||||
origin:
|
||||
host: "proxy"
|
||||
port: "80"
|
||||
acl:
|
||||
whitelist:
|
||||
- "/users/me" # Necessary for Nextcloud Plugin to work
|
||||
- "/api/" # Necessary for Nextcloud Plugin to work
|
||||
- "/oauth/token" # Necessary for Nextcloud Plugin to work
|
||||
volumes:
|
||||
data: "openproject_data"
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
- name: "load docker, db and proxy for {{ application_id }}"
|
||||
include_role:
|
||||
name: sys-stk-full-stateful
|
||||
vars:
|
||||
client_max_body_size: "30m"
|
||||
|
||||
- name: "Create {{ OPENPROJECT_PLUGINS_FOLDER }}"
|
||||
file:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
header.op-app-header{
|
||||
background-color: var(--color-01-40);
|
||||
background: linear-gradient({{ range(0, 361) | random }}deg, var(--color-01-35), var(--color-01-40), var(--color-01-41), var(--color-01-45));
|
||||
background: linear-gradient({{ CSS_GRADIENT_ANGLE }}deg, var(--color-01-35), var(--color-01-40), var(--color-01-41), var(--color-01-45));
|
||||
color: var(--color-01-40);
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user