mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-09-09 19:57:16 +02:00
Compare commits
14 Commits
82cc24a7f5
...
78ee3e3c64
Author | SHA1 | Date | |
---|---|---|---|
78ee3e3c64 | |||
d7ece2a8c3 | |||
3794aa87b0 | |||
4cf996b1bb | |||
79517b2fe9 | |||
a84ee1240a | |||
7019b307c5 | |||
838a8fc7a1 | |||
95aba805c0 | |||
0856c340c7 | |||
b90a2f6c87 | |||
98e045196b | |||
a10dd402b8 | |||
6e538eabc8 |
@@ -102,8 +102,10 @@ def find_cycle(roles):
|
|||||||
def topological_sort(graph, in_degree, roles=None):
|
def topological_sort(graph, in_degree, roles=None):
|
||||||
"""
|
"""
|
||||||
Perform topological sort on the dependency graph.
|
Perform topological sort on the dependency graph.
|
||||||
If `roles` is provided, on error it will include detailed debug info.
|
If a cycle is detected, raise an Exception with detailed debug info.
|
||||||
"""
|
"""
|
||||||
|
from collections import deque
|
||||||
|
|
||||||
queue = deque([r for r, d in in_degree.items() if d == 0])
|
queue = deque([r for r, d in in_degree.items() if d == 0])
|
||||||
sorted_roles = []
|
sorted_roles = []
|
||||||
local_in = dict(in_degree)
|
local_in = dict(in_degree)
|
||||||
@@ -117,28 +119,26 @@ def topological_sort(graph, in_degree, roles=None):
|
|||||||
queue.append(nbr)
|
queue.append(nbr)
|
||||||
|
|
||||||
if len(sorted_roles) != len(in_degree):
|
if len(sorted_roles) != len(in_degree):
|
||||||
|
# Something went wrong: likely a cycle
|
||||||
cycle = find_cycle(roles or {})
|
cycle = find_cycle(roles or {})
|
||||||
if roles is not None:
|
unsorted = [r for r in in_degree if r not in sorted_roles]
|
||||||
if cycle:
|
|
||||||
header = f"Circular dependency detected: {' -> '.join(cycle)}"
|
|
||||||
else:
|
|
||||||
header = "Circular dependency detected among the roles!"
|
|
||||||
|
|
||||||
unsorted = [r for r in in_degree if r not in sorted_roles]
|
header = "❌ Dependency resolution failed"
|
||||||
detail_lines = ["Unsorted roles and their dependencies:"]
|
if cycle:
|
||||||
|
reason = f"Circular dependency detected: {' -> '.join(cycle)}"
|
||||||
|
else:
|
||||||
|
reason = "Unresolved dependencies among roles (possible cycle or missing role)."
|
||||||
|
|
||||||
|
details = []
|
||||||
|
if unsorted:
|
||||||
|
details.append("Unsorted roles and their declared run_after dependencies:")
|
||||||
for r in unsorted:
|
for r in unsorted:
|
||||||
deps = roles.get(r, {}).get('run_after', [])
|
deps = roles.get(r, {}).get('run_after', [])
|
||||||
detail_lines.append(f" - {r} depends on {deps!r}")
|
details.append(f" - {r} depends on {deps!r}")
|
||||||
|
|
||||||
detail_lines.append("Full dependency graph:")
|
graph_repr = f"Full dependency graph: {dict(graph)!r}"
|
||||||
detail_lines.append(f" {dict(graph)!r}")
|
|
||||||
|
|
||||||
raise Exception("\n".join([header] + detail_lines))
|
raise Exception("\n".join([header, reason] + details + [graph_repr]))
|
||||||
else:
|
|
||||||
if cycle:
|
|
||||||
raise Exception(f"Circular dependency detected: {' -> '.join(cycle)}")
|
|
||||||
else:
|
|
||||||
raise Exception("Circular dependency detected among the roles!")
|
|
||||||
|
|
||||||
return sorted_roles
|
return sorted_roles
|
||||||
|
|
||||||
|
@@ -2,8 +2,12 @@
|
|||||||
import os
|
import os
|
||||||
import argparse
|
import argparse
|
||||||
import json
|
import json
|
||||||
|
import fnmatch
|
||||||
|
import re
|
||||||
from typing import Dict, Any
|
from typing import Dict, Any
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
from cli.build.graph import build_mappings, output_graph
|
from cli.build.graph import build_mappings, output_graph
|
||||||
|
|
||||||
|
|
||||||
@@ -15,45 +19,155 @@ def find_roles(roles_dir: str):
|
|||||||
yield entry, path
|
yield entry, path
|
||||||
|
|
||||||
|
|
||||||
|
def _is_pure_jinja_var(s: str) -> bool:
|
||||||
|
"""Check if string is exactly a single {{ var }} expression."""
|
||||||
|
return bool(re.fullmatch(r"\s*\{\{\s*[^}]+\s*\}\}\s*", s))
|
||||||
|
|
||||||
|
|
||||||
|
def _jinja_to_glob(s: str) -> str:
|
||||||
|
"""Convert Jinja placeholders {{ ... }} into * for fnmatch."""
|
||||||
|
pattern = re.sub(r"\{\{[^}]+\}\}", "*", s)
|
||||||
|
pattern = re.sub(r"\*{2,}", "*", pattern)
|
||||||
|
return pattern.strip()
|
||||||
|
|
||||||
|
|
||||||
|
def _list_role_dirs(roles_dir: str) -> list[str]:
|
||||||
|
"""Return a list of role directory names inside roles_dir."""
|
||||||
|
return [
|
||||||
|
d for d in os.listdir(roles_dir)
|
||||||
|
if os.path.isdir(os.path.join(roles_dir, d))
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def find_include_role_dependencies(role_path: str, roles_dir: str) -> set[str]:
|
||||||
|
"""
|
||||||
|
Scan all tasks/*.yml(.yaml) files of a role and collect include_role dependencies.
|
||||||
|
|
||||||
|
Rules:
|
||||||
|
- loop/with_items with literal strings -> add those as roles
|
||||||
|
- name contains jinja AND surrounding literals -> convert to glob and match existing roles
|
||||||
|
- name is a pure jinja variable only -> ignore
|
||||||
|
- name is a pure literal -> add as-is
|
||||||
|
"""
|
||||||
|
deps: set[str] = set()
|
||||||
|
tasks_dir = os.path.join(role_path, "tasks")
|
||||||
|
if not os.path.isdir(tasks_dir):
|
||||||
|
return deps
|
||||||
|
|
||||||
|
candidates = []
|
||||||
|
for root, _, files in os.walk(tasks_dir):
|
||||||
|
for f in files:
|
||||||
|
if f.endswith(".yml") or f.endswith(".yaml"):
|
||||||
|
candidates.append(os.path.join(root, f))
|
||||||
|
|
||||||
|
all_roles = _list_role_dirs(roles_dir)
|
||||||
|
|
||||||
|
def add_literal_loop_items(loop_val):
|
||||||
|
if isinstance(loop_val, list):
|
||||||
|
for item in loop_val:
|
||||||
|
if isinstance(item, str) and item.strip():
|
||||||
|
deps.add(item.strip())
|
||||||
|
|
||||||
|
for file_path in candidates:
|
||||||
|
try:
|
||||||
|
with open(file_path, "r", encoding="utf-8") as f:
|
||||||
|
docs = list(yaml.safe_load_all(f))
|
||||||
|
except Exception:
|
||||||
|
# Be tolerant to any parsing issues; skip unreadable files
|
||||||
|
continue
|
||||||
|
|
||||||
|
for doc in docs:
|
||||||
|
if not isinstance(doc, list):
|
||||||
|
continue
|
||||||
|
for task in doc:
|
||||||
|
if not isinstance(task, dict):
|
||||||
|
continue
|
||||||
|
if "include_role" not in task:
|
||||||
|
continue
|
||||||
|
inc = task.get("include_role")
|
||||||
|
if not isinstance(inc, dict):
|
||||||
|
continue
|
||||||
|
name = inc.get("name")
|
||||||
|
if not isinstance(name, str) or not name.strip():
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 1) Handle loop/with_items
|
||||||
|
loop_val = task.get("loop", task.get("with_items"))
|
||||||
|
if loop_val is not None:
|
||||||
|
add_literal_loop_items(loop_val)
|
||||||
|
# still check name for surrounding literals
|
||||||
|
if not _is_pure_jinja_var(name):
|
||||||
|
pattern = (
|
||||||
|
_jinja_to_glob(name)
|
||||||
|
if ("{{" in name and "}}" in name)
|
||||||
|
else name
|
||||||
|
)
|
||||||
|
if "*" in pattern:
|
||||||
|
for r in all_roles:
|
||||||
|
if fnmatch.fnmatch(r, pattern):
|
||||||
|
deps.add(r)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 2) No loop: evaluate name
|
||||||
|
if "{{" in name and "}}" in name:
|
||||||
|
if _is_pure_jinja_var(name):
|
||||||
|
continue # ignore pure variable
|
||||||
|
pattern = _jinja_to_glob(name)
|
||||||
|
if "*" in pattern:
|
||||||
|
for r in all_roles:
|
||||||
|
if fnmatch.fnmatch(r, pattern):
|
||||||
|
deps.add(r)
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
deps.add(pattern)
|
||||||
|
else:
|
||||||
|
# pure literal
|
||||||
|
deps.add(name.strip())
|
||||||
|
|
||||||
|
return deps
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
# default roles dir is ../../roles relative to this script
|
# default roles dir is ../../roles relative to this script
|
||||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||||
default_roles_dir = os.path.abspath(os.path.join(script_dir, '..', '..', 'roles'))
|
default_roles_dir = os.path.abspath(
|
||||||
|
os.path.join(script_dir, "..", "..", "roles")
|
||||||
|
)
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="Generate all graphs for each role and write meta/tree.json"
|
description="Generate all graphs for each role and write meta/tree.json"
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-d', '--role_dir',
|
"-d", "--role_dir",
|
||||||
default=default_roles_dir,
|
default=default_roles_dir,
|
||||||
help=f"Path to roles directory (default: {default_roles_dir})"
|
help=f"Path to roles directory (default: {default_roles_dir})"
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-D', '--depth',
|
"-D", "--depth",
|
||||||
type=int,
|
type=int,
|
||||||
default=0,
|
default=0,
|
||||||
help="Max recursion depth (>0) or <=0 to stop on cycle"
|
help="Max recursion depth (>0) or <=0 to stop on cycle"
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-o', '--output',
|
"-o", "--output",
|
||||||
choices=['yaml', 'json', 'console'],
|
choices=["yaml", "json", "console"],
|
||||||
default='json',
|
default="json",
|
||||||
help="Output format"
|
help="Output format"
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-p', '--preview',
|
"-p", "--preview",
|
||||||
action='store_true',
|
action="store_true",
|
||||||
help="Preview graphs to console instead of writing files"
|
help="Preview graphs to console instead of writing files"
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-s', '--shadow-folder',
|
"-s", "--shadow-folder",
|
||||||
type=str,
|
type=str,
|
||||||
default=None,
|
default=None,
|
||||||
help="If set, writes tree.json to this shadow folder instead of the role's actual meta/ folder"
|
help="If set, writes tree.json to this shadow folder instead of the role's actual meta/ folder"
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-v', '--verbose',
|
"-v", "--verbose",
|
||||||
action='store_true',
|
action="store_true",
|
||||||
help="Enable verbose logging"
|
help="Enable verbose logging"
|
||||||
)
|
)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
@@ -75,24 +189,32 @@ def main():
|
|||||||
max_depth=args.depth
|
max_depth=args.depth
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# add include_role dependencies from tasks
|
||||||
|
include_deps = find_include_role_dependencies(role_path, args.role_dir)
|
||||||
|
if include_deps:
|
||||||
|
deps_root = graphs.setdefault("dependencies", {})
|
||||||
|
inc_list = set(deps_root.get("include_role", []))
|
||||||
|
inc_list.update(include_deps)
|
||||||
|
deps_root["include_role"] = sorted(inc_list)
|
||||||
|
graphs["dependencies"] = deps_root
|
||||||
|
|
||||||
if args.preview:
|
if args.preview:
|
||||||
for key, data in graphs.items():
|
for key, data in graphs.items():
|
||||||
if args.verbose:
|
if args.verbose:
|
||||||
print(f"Previewing graph '{key}' for role '{role_name}'")
|
print(f"Previewing graph '{key}' for role '{role_name}'")
|
||||||
output_graph(data, 'console', role_name, key)
|
output_graph(data, "console", role_name, key)
|
||||||
else:
|
else:
|
||||||
# Decide on output folder
|
|
||||||
if args.shadow_folder:
|
if args.shadow_folder:
|
||||||
tree_file = os.path.join(
|
tree_file = os.path.join(
|
||||||
args.shadow_folder, role_name, 'meta', 'tree.json'
|
args.shadow_folder, role_name, "meta", "tree.json"
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
tree_file = os.path.join(role_path, 'meta', 'tree.json')
|
tree_file = os.path.join(role_path, "meta", "tree.json")
|
||||||
os.makedirs(os.path.dirname(tree_file), exist_ok=True)
|
os.makedirs(os.path.dirname(tree_file), exist_ok=True)
|
||||||
with open(tree_file, 'w') as f:
|
with open(tree_file, "w") as f:
|
||||||
json.dump(graphs, f, indent=2)
|
json.dump(graphs, f, indent=2)
|
||||||
print(f"Wrote {tree_file}")
|
print(f"Wrote {tree_file}")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
@@ -15,8 +15,8 @@ Suffix handling:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def get_service_name(systemctl_id, software_name, suffix=""):
|
def get_service_name(systemctl_id, software_name, suffix=""):
|
||||||
sid = str(systemctl_id).strip().lower()
|
sid = str(systemctl_id).strip().lower()
|
||||||
sw = str(software_name).strip().lower()
|
software_name = str(software_name).strip().lower()
|
||||||
|
|
||||||
# Determine suffix
|
# Determine suffix
|
||||||
if suffix is False:
|
if suffix is False:
|
||||||
@@ -24,14 +24,13 @@ def get_service_name(systemctl_id, software_name, suffix=""):
|
|||||||
elif suffix == "" or suffix is None:
|
elif suffix == "" or suffix is None:
|
||||||
sfx = ".service"
|
sfx = ".service"
|
||||||
else:
|
else:
|
||||||
sfx = "." + str(suffix).strip().lower()
|
sfx = str(suffix).strip().lower()
|
||||||
|
|
||||||
if sid.endswith("@"):
|
if sid.endswith("@"):
|
||||||
base = sid[:-1] # drop the trailing '@'
|
base = sid[:-1] # drop the trailing '@'
|
||||||
return f"{base}.{sw}@{sfx}"
|
return f"{base}.{software_name}@{sfx}"
|
||||||
else:
|
else:
|
||||||
return f"{sid}.{sw}{sfx}"
|
return f"{sid}.{software_name}{sfx}"
|
||||||
|
|
||||||
|
|
||||||
class FilterModule(object):
|
class FilterModule(object):
|
||||||
def filters(self):
|
def filters(self):
|
||||||
|
@@ -7,3 +7,4 @@ MODE_BACKUP: true # Activates the backup before the update procedure
|
|||||||
MODE_CLEANUP: true # Cleanup unused files and configurations
|
MODE_CLEANUP: true # Cleanup unused files and configurations
|
||||||
MODE_DEBUG: false # This enables debugging in ansible and in the apps, You SHOULD NOT enable this on production servers
|
MODE_DEBUG: false # This enables debugging in ansible and in the apps, You SHOULD NOT enable this on production servers
|
||||||
MODE_RESET: false # Cleans up all Infinito.Nexus files. It's necessary to run to whole playbook and not particial roles when using this function.
|
MODE_RESET: false # Cleans up all Infinito.Nexus files. It's necessary to run to whole playbook and not particial roles when using this function.
|
||||||
|
MODE_ASSERT: false # Executes validation tasks during the run.
|
@@ -5,7 +5,6 @@
|
|||||||
SYS_SERVICE_SUFFIX: ".{{ SOFTWARE_NAME | lower }}.service"
|
SYS_SERVICE_SUFFIX: ".{{ SOFTWARE_NAME | lower }}.service"
|
||||||
|
|
||||||
## Names
|
## Names
|
||||||
SYS_SERVICE_CLEANUP_BACKUPS_OLD: "{{ 'sys-ctl-cln-bkps' | get_service_name(SOFTWARE_NAME) }}"
|
|
||||||
SYS_SERVICE_CLEANUP_BACKUPS_FAILED: "{{ 'sys-ctl-cln-faild-bkps' | get_service_name(SOFTWARE_NAME) }}"
|
SYS_SERVICE_CLEANUP_BACKUPS_FAILED: "{{ 'sys-ctl-cln-faild-bkps' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
SYS_SERVICE_OPTIMIZE_DRIVE: "{{ 'svc-opt-ssd-hdd' | get_service_name(SOFTWARE_NAME) }}"
|
SYS_SERVICE_OPTIMIZE_DRIVE: "{{ 'svc-opt-ssd-hdd' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
SYS_SERVICE_BACKUP_RMT_2_LOC: "{{ 'svc-bkp-rmt-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
SYS_SERVICE_BACKUP_RMT_2_LOC: "{{ 'svc-bkp-rmt-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
@@ -14,7 +13,7 @@ SYS_SERVICE_REPAIR_DOCKER_HARD: "{{ 'sys-ctl-rpr-docker-hard' | get_servic
|
|||||||
SYS_SERVICE_UPDATE_DOCKER: "{{ 'update-docker' | get_service_name(SOFTWARE_NAME) }}"
|
SYS_SERVICE_UPDATE_DOCKER: "{{ 'update-docker' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
|
|
||||||
## On Failure
|
## On Failure
|
||||||
SYS_SERVICE_ON_FAILURE_COMPOSE: "{{ 'sys-ctl-alm-compose' | get_service_name(SOFTWARE_NAME,'%i.service') }}"
|
SYS_SERVICE_ON_FAILURE_COMPOSE: "{{ ('sys-ctl-alm-compose@') | get_service_name(SOFTWARE_NAME, False) }}%n.service"
|
||||||
|
|
||||||
## Groups
|
## Groups
|
||||||
SYS_SERVICE_GROUP_BACKUPS: >
|
SYS_SERVICE_GROUP_BACKUPS: >
|
||||||
|
@@ -2,7 +2,6 @@
|
|||||||
# Service Timers
|
# Service Timers
|
||||||
|
|
||||||
## Meta
|
## Meta
|
||||||
SYS_TIMER_SUFFIX: ".{{ SOFTWARE_NAME | lower }}.timer"
|
|
||||||
SYS_TIMER_ALL_ENABLED: "{{ not MODE_DEBUG }}" # Runtime Variables for Process Control - Activates all timers, independend if the handlers had been triggered
|
SYS_TIMER_ALL_ENABLED: "{{ not MODE_DEBUG }}" # Runtime Variables for Process Control - Activates all timers, independend if the handlers had been triggered
|
||||||
|
|
||||||
## Server Tact Variables
|
## Server Tact Variables
|
||||||
|
@@ -84,7 +84,7 @@ defaults_networks:
|
|||||||
subnet: 192.168.103.64/28
|
subnet: 192.168.103.64/28
|
||||||
web-app-syncope:
|
web-app-syncope:
|
||||||
subnet: 192.168.103.80/28
|
subnet: 192.168.103.80/28
|
||||||
web-app-collabora:
|
web-svc-collabora:
|
||||||
subnet: 192.168.103.96/28
|
subnet: 192.168.103.96/28
|
||||||
web-svc-simpleicons:
|
web-svc-simpleicons:
|
||||||
subnet: 192.168.103.112/28
|
subnet: 192.168.103.112/28
|
||||||
|
@@ -63,7 +63,7 @@ ports:
|
|||||||
web-app-navigator: 8039
|
web-app-navigator: 8039
|
||||||
web-app-espocrm: 8040
|
web-app-espocrm: 8040
|
||||||
web-app-syncope: 8041
|
web-app-syncope: 8041
|
||||||
web-app-collabora: 8042
|
web-svc-collabora: 8042
|
||||||
web-app-mobilizon: 8043
|
web-app-mobilizon: 8043
|
||||||
web-svc-simpleicons: 8044
|
web-svc-simpleicons: 8044
|
||||||
web-app-libretranslate: 8045
|
web-app-libretranslate: 8045
|
||||||
|
@@ -12,6 +12,7 @@
|
|||||||
name: github.com
|
name: github.com
|
||||||
key: "{{ lookup('pipe', 'ssh-keyscan -t ed25519 github.com | grep -v \"^#\"') }}"
|
key: "{{ lookup('pipe', 'ssh-keyscan -t ed25519 github.com | grep -v \"^#\"') }}"
|
||||||
become: true
|
become: true
|
||||||
|
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||||
|
|
||||||
- name: Create installation directory for Kevin's Package Manager
|
- name: Create installation directory for Kevin's Package Manager
|
||||||
file:
|
file:
|
||||||
@@ -37,7 +38,7 @@
|
|||||||
- name: create config.yaml
|
- name: create config.yaml
|
||||||
template:
|
template:
|
||||||
src: config.yaml.j2
|
src: config.yaml.j2
|
||||||
dest: "{{pkgmgr_config_path}}"
|
dest: "{{ pkgmgr_config_path }}"
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
- name: Run the Package Manager install command to create an alias for Kevins package manager
|
- name: Run the Package Manager install command to create an alias for Kevins package manager
|
||||||
|
@@ -36,8 +36,6 @@ http
|
|||||||
'"X-Forwarded-For: $http_x_forwarded_for" '
|
'"X-Forwarded-For: $http_x_forwarded_for" '
|
||||||
'"Scheme: $scheme" "Protocol: $server_protocol" "ServerName: $server_name"';
|
'"Scheme: $scheme" "Protocol: $server_protocol" "ServerName: $server_name"';
|
||||||
access_log /dev/stdout debug;
|
access_log /dev/stdout debug;
|
||||||
{% else %}
|
|
||||||
access_log /dev/stdout debug;
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
error_log /dev/stderr info;
|
error_log /dev/stderr info;
|
||||||
|
|
||||||
|
@@ -21,6 +21,7 @@
|
|||||||
- target
|
- target
|
||||||
- source
|
- source
|
||||||
to non‑empty values in your configuration file.
|
to non‑empty values in your configuration file.
|
||||||
|
when: MODE_ASSERT | bool
|
||||||
|
|
||||||
- include_role:
|
- include_role:
|
||||||
name: sys-service
|
name: sys-service
|
||||||
|
@@ -6,7 +6,6 @@ OnFailure={{ SYS_SERVICE_ON_FAILURE_COMPOSE }}
|
|||||||
[Service]
|
[Service]
|
||||||
Type=oneshot
|
Type=oneshot
|
||||||
ExecStart={{ system_service_script_exec }} {{ BACKUP_TO_USB_SOURCE }} {{ BACKUP_TO_USB_DESTINATION }}
|
ExecStart={{ system_service_script_exec }} {{ BACKUP_TO_USB_SOURCE }} {{ BACKUP_TO_USB_DESTINATION }}
|
||||||
ExecStartPost=/bin/systemctl start {{ SYS_SERVICE_CLEANUP_BACKUPS_OLD }}
|
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
@@ -4,6 +4,6 @@ OnFailure={{ SYS_SERVICE_ON_FAILURE_COMPOSE }} {{ SYS_SERVICE_CLEANUP_BACKUPS_FA
|
|||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=oneshot
|
Type=oneshot
|
||||||
ExecStartPre=/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(' ') }} --ignore {{ SYS_SERVICE_GROUP_BACKUPS| join(' ') }} --timeout "{{ SYS_TIMEOUT_BACKUP_SERVICES }}"
|
ExecStartPre=/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(' ') }} --ignore {{ SYS_SERVICE_GROUP_BACKUPS | join(' ') }} --timeout "{{ SYS_TIMEOUT_BACKUP_SERVICES }}"
|
||||||
ExecStart={{ system_service_script_exec }}
|
ExecStart={{ system_service_script_exec }}
|
||||||
ExecStartPost=/bin/systemctl start {{ SYS_SERVICE_CLEANUP_BACKUPS_OLD }}
|
|
||||||
|
@@ -4,8 +4,8 @@ credentials:
|
|||||||
algorithm: "bcrypt"
|
algorithm: "bcrypt"
|
||||||
validation: "^\\$2[aby]\\$.{56}$"
|
validation: "^\\$2[aby]\\$.{56}$"
|
||||||
|
|
||||||
OPT_DRIVE_RAPID_STORAGE_PATH:
|
rapid_storage:
|
||||||
description: "Mount path of the servers SSD"
|
description: "Mount path of the servers SSD"
|
||||||
|
|
||||||
OPT_DRIVE_MASS_STORAGE_PATH:
|
mass_storage:
|
||||||
description: "Mount path of the servers HDD"
|
description: "Mount path of the servers HDD"
|
@@ -1,2 +1,5 @@
|
|||||||
- include_role:
|
- include_role:
|
||||||
name: sys-service
|
name: sys-service
|
||||||
|
vars:
|
||||||
|
system_service_tpl_exec_start_pre: '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(" ") }} --ignore {{ SYS_SERVICE_OPTIMIZE_DRIVE }} {{ SYS_SERVICE_BACKUP_RMT_2_LOC }} --timeout "{{ SYS_TIMEOUT_STORAGE_OPTIMIZER }}"'
|
||||||
|
system_service_tpl_exec_start: '{{ system_service_script_exec }} --mass-storage-path {{ OPT_DRIVE_MASS_STORAGE_PATH }} --rapid-storage-path {{ OPT_DRIVE_RAPID_STORAGE_PATH }}'
|
@@ -1,8 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=Optimize storage paths
|
|
||||||
OnFailure={{ SYS_SERVICE_ON_FAILURE_COMPOSE }}
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=oneshot
|
|
||||||
ExecStartPre=/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(' ') }} --ignore {{ SYS_SERVICE_OPTIMIZE_DRIVE }} {{ SYS_SERVICE_BACKUP_RMT_2_LOC }} --timeout "{{ SYS_TIMEOUT_STORAGE_OPTIMIZER }}"
|
|
||||||
ExecStart={{ system_service_script_exec }} --mass-storage-path {{ OPT_DRIVE_MASS_STORAGE_PATH }}
|
|
@@ -1,4 +1,4 @@
|
|||||||
application_id: svc-opt-ssd-hdd
|
application_id: svc-opt-ssd-hdd
|
||||||
system_service_id: "{{ application_id }}"
|
system_service_id: "{{ application_id }}"
|
||||||
OPT_DRIVE_RAPID_STORAGE_PATH: "{{ applications | get_app_conf(application_id, 'volumes.rapid_storage') }}"
|
OPT_DRIVE_RAPID_STORAGE_PATH: "{{ applications | get_app_conf(application_id, 'volumes.rapid_storage') }}"
|
||||||
OPT_DRIVE_MASS_STORAGE_PATH: "{{ applications | get_app_conf(application_id, 'volumes.mass_storage') }}"
|
OPT_DRIVE_MASS_STORAGE_PATH: "{{ applications | get_app_conf(application_id, 'volumes.mass_storage') }}"
|
||||||
|
@@ -1,13 +1,4 @@
|
|||||||
---
|
---
|
||||||
- name: Wait until OpenResty container is running
|
|
||||||
command: docker inspect -f '{{.State.Running}}' {{ OPENRESTY_CONTAINER }}
|
|
||||||
register: openresty_status
|
|
||||||
retries: 10
|
|
||||||
delay: 3
|
|
||||||
until: openresty_status.stdout.strip() == "true"
|
|
||||||
changed_when: false
|
|
||||||
listen: restart openresty
|
|
||||||
|
|
||||||
- name: Validate OpenResty configuration
|
- name: Validate OpenResty configuration
|
||||||
command: >
|
command: >
|
||||||
docker exec {{ OPENRESTY_CONTAINER }} openresty -t -q
|
docker exec {{ OPENRESTY_CONTAINER }} openresty -t -q
|
||||||
|
@@ -6,20 +6,32 @@
|
|||||||
- sys-ctl-alm-email
|
- sys-ctl-alm-email
|
||||||
vars:
|
vars:
|
||||||
flush_handlers: true
|
flush_handlers: true
|
||||||
systemctl_timer_enabled: false
|
system_service_timer_enabled: false
|
||||||
systemctl_copy_files: true
|
system_service_copy_files: true
|
||||||
|
system_service_tpl_exec_start: "{{ system_service_script_exec }} %I"
|
||||||
|
system_service_tpl_on_failure: ""
|
||||||
|
|
||||||
- name: "Include core service for '{{ system_service_id }}'"
|
- name: "Include core service for '{{ system_service_id }}'"
|
||||||
include_role:
|
include_role:
|
||||||
name: sys-service
|
name: sys-service
|
||||||
vars:
|
vars:
|
||||||
flush_handlers: true
|
flush_handlers: true
|
||||||
systemctl_timer_enabled: false
|
system_service_timer_enabled: false
|
||||||
systemctl_copy_files: true
|
system_service_copy_files: true
|
||||||
systemctl_tpl_exec_start: "{{ system_service_script_exec }} %i"
|
system_service_tpl_exec_start: "{{ system_service_script_exec }} %I"
|
||||||
systemctl_tpl_on_failure: "" # No on failure needed, because it's anyhow the default on failure procedure
|
system_service_tpl_on_failure: "" # No on failure needed, because it's anyhow the default on failure procedure
|
||||||
|
|
||||||
- name: "Send message to test service."
|
- block:
|
||||||
systemd:
|
- name: Escape instance name for systemctl call
|
||||||
name: "sys-ctl-alm-compose@{{ SYSTEMCTL_ALARM_COMPOSER_DUMMY_MESSAGE }}.service"
|
ansible.builtin.command:
|
||||||
state: started
|
argv:
|
||||||
|
- systemd-escape
|
||||||
|
- "{{ SYSTEMCTL_ALARM_COMPOSER_DUMMY_MESSAGE }}"
|
||||||
|
register: escaped_name
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Start sys-ctl-alm-compose instance
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: "{{ ('sys-ctl-alm-compose@') | get_service_name(SOFTWARE_NAME, False) ~ escaped_name.stdout ~ '.service' }}"
|
||||||
|
state: started
|
||||||
|
when: MODE_ASSERT | bool
|
||||||
|
@@ -1,10 +1,11 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
err=0
|
err=0
|
||||||
set -u
|
set -u
|
||||||
{% for alarm_service in SYSTEMCTL_ALARM_COMPOSER_SUBSERVICES %}
|
{% for alarm in SYSTEMCTL_ALARM_COMPOSER_SUBSERVICES %}
|
||||||
{% set alarm_service_full_name = alarm_service | get_service_name(SOFTWARE_NAME, '"$1".service') %}
|
# sys-ctl-alm-email.infinito.nexus@<escaped>.service (no extra dot!)
|
||||||
if ! /usr/bin/systemctl start {{ alarm_service_full_name }}; then
|
unit="{{ (alarm ~ '@') | get_service_name(SOFTWARE_NAME, False) }}$(systemd-escape "$1").service"
|
||||||
echo "ERROR: Failed to start {{ alarm_service_full_name }}" >&2
|
if ! /usr/bin/systemctl start -- "$unit"; then
|
||||||
|
echo "ERROR: Failed to start $unit" >&2
|
||||||
err=1
|
err=1
|
||||||
fi
|
fi
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
@@ -6,7 +6,5 @@
|
|||||||
|
|
||||||
- include_role:
|
- include_role:
|
||||||
name: sys-service
|
name: sys-service
|
||||||
vars:
|
|
||||||
system_service_copy_files: true
|
|
||||||
|
|
||||||
|
|
||||||
|
@@ -1,15 +1,24 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
set -u
|
||||||
|
# Reverse systemd escaping for human-readable mail subject/body
|
||||||
|
friendly="$(systemd-escape --unescape "$1")"
|
||||||
|
|
||||||
|
# Try status with given arg; if empty, also try the escaped version
|
||||||
|
STATUS_OUT="$(systemctl status --full "$1" 2>/dev/null | head -n 30)"
|
||||||
|
if [ -z "$STATUS_OUT" ]; then
|
||||||
|
esc="$(systemd-escape "$1")"
|
||||||
|
STATUS_OUT="$(systemctl status --full "$esc" 2>/dev/null | head -n 30)"
|
||||||
|
fi
|
||||||
|
|
||||||
/usr/bin/sendmail -t <<ERRMAIL
|
/usr/bin/sendmail -t <<ERRMAIL
|
||||||
To: {{ users.administrator.email }}
|
To: {{ users.administrator.email }}
|
||||||
From: systemd <{{ users['no-reply'].email }}>
|
From: systemd <{{ users['no-reply'].email }}>
|
||||||
Subject: $1
|
Subject: ${friendly}
|
||||||
Content-Transfer-Encoding: 8bit
|
Content-Transfer-Encoding: 8bit
|
||||||
Content-Type: text/plain; charset=UTF-8
|
Content-Type: text/plain; charset=UTF-8
|
||||||
|
|
||||||
A problem with the service $1 occured:
|
A problem with the service ${friendly} occurred:
|
||||||
|
|
||||||
$(systemctl status --full "$1" | head -n 30)
|
|
||||||
|
|
||||||
|
$STATUS_OUT
|
||||||
|
|
||||||
ERRMAIL
|
ERRMAIL
|
||||||
|
@@ -1,8 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=status email for %i to user
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=oneshot
|
|
||||||
ExecStart={{ system_service_script_exec }} %i
|
|
||||||
User=root
|
|
||||||
Group=systemd-journal
|
|
@@ -8,11 +8,10 @@
|
|||||||
Please provide non‑empty values for:
|
Please provide non‑empty values for:
|
||||||
- telegram_bot_token # Your Telegram bot’s API token
|
- telegram_bot_token # Your Telegram bot’s API token
|
||||||
- telegram_chat_id # The Telegram chat ID to send messages to
|
- telegram_chat_id # The Telegram chat ID to send messages to
|
||||||
|
when: MODE_ASSERT | bool
|
||||||
|
|
||||||
- include_role:
|
- include_role:
|
||||||
name: sys-service
|
name: sys-service
|
||||||
vars:
|
|
||||||
system_service_copy_files: true
|
|
||||||
|
|
||||||
- name: install curl
|
- name: install curl
|
||||||
community.general.pacman:
|
community.general.pacman:
|
||||||
|
@@ -1,4 +1,8 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
set -u
|
||||||
|
|
||||||
|
# Make the instance text human readable again (reverse systemd escaping)
|
||||||
|
friendly="$(systemd-escape --unescape "$1")"
|
||||||
|
|
||||||
# determine host name: try hostname command, otherwise use $HOSTNAME
|
# determine host name: try hostname command, otherwise use $HOSTNAME
|
||||||
if command -v hostname &>/dev/null; then
|
if command -v hostname &>/dev/null; then
|
||||||
@@ -11,4 +15,4 @@ fi
|
|||||||
/usr/bin/curl -s -X POST \
|
/usr/bin/curl -s -X POST \
|
||||||
"https://api.telegram.org/bot{{ telegram_bot_token }}/sendMessage" \
|
"https://api.telegram.org/bot{{ telegram_bot_token }}/sendMessage" \
|
||||||
-d chat_id="{{ telegram_chat_id }}" \
|
-d chat_id="{{ telegram_chat_id }}" \
|
||||||
-d text="service $1 on ${host} failed"
|
--data-urlencode text="service ${friendly} on ${host} failed"
|
||||||
|
@@ -1,8 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=status Telegram message for %i to user
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=oneshot
|
|
||||||
ExecStart={{ system_service_script_exec }} %i
|
|
||||||
User=root
|
|
||||||
Group=systemd-journal
|
|
@@ -6,4 +6,3 @@ OnFailure={{ SYS_SERVICE_ON_FAILURE_COMPOSE }} {{ SYS_SERVICE_CLEANUP_BACKUPS_FA
|
|||||||
Type=oneshot
|
Type=oneshot
|
||||||
ExecStartPre=/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(' ') }} --ignore {{ SYS_SERVICE_GROUP_BACKUPS | reject('equalto', role_name ~ '-everything') | join(' ') }} --timeout "{{ SYS_TIMEOUT_BACKUP_SERVICES }}"
|
ExecStartPre=/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(' ') }} --ignore {{ SYS_SERVICE_GROUP_BACKUPS | reject('equalto', role_name ~ '-everything') | join(' ') }} --timeout "{{ SYS_TIMEOUT_BACKUP_SERVICES }}"
|
||||||
ExecStart=/bin/sh -c '{{ BKP_DOCKER_2_LOC_EXEC }}'
|
ExecStart=/bin/sh -c '{{ BKP_DOCKER_2_LOC_EXEC }}'
|
||||||
ExecStartPost=/bin/systemctl start {{ SYS_SERVICE_CLEANUP_BACKUPS_OLD }}
|
|
@@ -7,7 +7,9 @@
|
|||||||
that:
|
that:
|
||||||
- SYSTEMD_MANAGER_CONF_DIR | regex_search('^/etc/systemd/system\.conf\.d/?$')
|
- SYSTEMD_MANAGER_CONF_DIR | regex_search('^/etc/systemd/system\.conf\.d/?$')
|
||||||
fail_msg: "SYSTEMD_MANAGER_CONF_DIR must be /etc/systemd/system.conf.d"
|
fail_msg: "SYSTEMD_MANAGER_CONF_DIR must be /etc/systemd/system.conf.d"
|
||||||
when: SYSTEMD_MANAGER_RESET_PURGE | bool
|
when:
|
||||||
|
- SYSTEMD_MANAGER_RESET_PURGE | bool
|
||||||
|
- MODE_ASSERT | bool
|
||||||
|
|
||||||
- name: "Purge manager drop-in directory (remove)"
|
- name: "Purge manager drop-in directory (remove)"
|
||||||
file:
|
file:
|
||||||
|
@@ -5,6 +5,7 @@
|
|||||||
ansible.builtin.assert:
|
ansible.builtin.assert:
|
||||||
that: [ "CLOUDFLARE_API_TOKEN | length > 0" ]
|
that: [ "CLOUDFLARE_API_TOKEN | length > 0" ]
|
||||||
no_log: "{{ cloudflare_no_log | bool }}"
|
no_log: "{{ cloudflare_no_log | bool }}"
|
||||||
|
when: MODE_ASSERT | bool
|
||||||
|
|
||||||
- name: Apply A/AAAA
|
- name: Apply A/AAAA
|
||||||
community.general.cloudflare_dns:
|
community.general.cloudflare_dns:
|
||||||
|
@@ -14,6 +14,7 @@
|
|||||||
that: [ "_hz_token | length > 0" ]
|
that: [ "_hz_token | length > 0" ]
|
||||||
fail_msg: "HETZNER_API_TOKEN is required for the Cloud flavor."
|
fail_msg: "HETZNER_API_TOKEN is required for the Cloud flavor."
|
||||||
no_log: "{{ hetzner_no_log | bool }}"
|
no_log: "{{ hetzner_no_log | bool }}"
|
||||||
|
when: MODE_ASSERT | bool
|
||||||
|
|
||||||
- name: Collect hcloud servers if needed (server records without identifier)
|
- name: Collect hcloud servers if needed (server records without identifier)
|
||||||
hetzner.hcloud.server_info:
|
hetzner.hcloud.server_info:
|
||||||
@@ -61,6 +62,7 @@
|
|||||||
)
|
)
|
||||||
fail_msg: "Could not resolve hcloud server by IPv4 for one or more records."
|
fail_msg: "Could not resolve hcloud server by IPv4 for one or more records."
|
||||||
no_log: "{{ hetzner_no_log | bool }}"
|
no_log: "{{ hetzner_no_log | bool }}"
|
||||||
|
when: MODE_ASSERT | bool
|
||||||
|
|
||||||
- name: Validate records (cloud)
|
- name: Validate records (cloud)
|
||||||
ansible.builtin.assert:
|
ansible.builtin.assert:
|
||||||
@@ -74,6 +76,7 @@
|
|||||||
+ ((_rdns_records | default(rdns_records)) | rejectattr('resource','equalto','server') | list | length)
|
+ ((_rdns_records | default(rdns_records)) | rejectattr('resource','equalto','server') | list | length)
|
||||||
) == ((_rdns_records | default(rdns_records)) | length)
|
) == ((_rdns_records | default(rdns_records)) | length)
|
||||||
no_log: "{{ hetzner_no_log | bool }}"
|
no_log: "{{ hetzner_no_log | bool }}"
|
||||||
|
when: MODE_ASSERT | bool
|
||||||
|
|
||||||
- name: Apply rDNS via hcloud
|
- name: Apply rDNS via hcloud
|
||||||
hetzner.hcloud.hcloud_rdns:
|
hetzner.hcloud.hcloud_rdns:
|
||||||
|
@@ -7,6 +7,7 @@
|
|||||||
- (HETZNER_ROBOT_PASSWORD | default('') | length) > 0
|
- (HETZNER_ROBOT_PASSWORD | default('') | length) > 0
|
||||||
fail_msg: "Robot credentials required: HETZNER_ROBOT_USER / HETZNER_ROBOT_PASSWORD."
|
fail_msg: "Robot credentials required: HETZNER_ROBOT_USER / HETZNER_ROBOT_PASSWORD."
|
||||||
no_log: "{{ hetzner_no_log | bool }}"
|
no_log: "{{ hetzner_no_log | bool }}"
|
||||||
|
when: MODE_ASSERT | bool
|
||||||
|
|
||||||
- name: Validate records (robot)
|
- name: Validate records (robot)
|
||||||
ansible.builtin.assert:
|
ansible.builtin.assert:
|
||||||
@@ -16,6 +17,7 @@
|
|||||||
- (rdns_records | selectattr('dns_ptr','defined') | list | length) == (rdns_records | length)
|
- (rdns_records | selectattr('dns_ptr','defined') | list | length) == (rdns_records | length)
|
||||||
fail_msg: "Each record must have ip_address and dns_ptr for Robot rDNS."
|
fail_msg: "Each record must have ip_address and dns_ptr for Robot rDNS."
|
||||||
no_log: "{{ hetzner_no_log | bool }}"
|
no_log: "{{ hetzner_no_log | bool }}"
|
||||||
|
when: MODE_ASSERT | bool
|
||||||
|
|
||||||
- name: Apply rDNS via Hetzner Robot API
|
- name: Apply rDNS via Hetzner Robot API
|
||||||
vars:
|
vars:
|
||||||
|
@@ -1,4 +1,3 @@
|
|||||||
# 1) Find the template (prefer target role, then fall back to this role)
|
|
||||||
- name: Resolve systemctl template source
|
- name: Resolve systemctl template source
|
||||||
set_fact:
|
set_fact:
|
||||||
system_service_template_src: >-
|
system_service_template_src: >-
|
||||||
@@ -17,31 +16,29 @@
|
|||||||
errors='strict'
|
errors='strict'
|
||||||
) }}
|
) }}
|
||||||
|
|
||||||
# Optional: sanity check with a clear error if truly nothing found
|
|
||||||
- name: Ensure a systemctl template was found
|
- name: Ensure a systemctl template was found
|
||||||
assert:
|
assert:
|
||||||
that: system_service_template_src | length > 0
|
that: system_service_template_src | length > 0
|
||||||
fail_msg: >-
|
fail_msg: >-
|
||||||
Could not resolve any systemctl template. Looked in:
|
Could not resolve any systemctl template. Looked in:
|
||||||
{{ system_service_role_dir }}/templates/ and {{ role_path }}/templates/.
|
{{ system_service_role_dir }}/templates/ and {{ role_path }}/templates/.
|
||||||
|
when: MODE_ASSERT | bool
|
||||||
|
|
||||||
# 2) Now we may safely derive whether it’s the “@” variant
|
|
||||||
- name: Flag whether @-template is used
|
- name: Flag whether @-template is used
|
||||||
set_fact:
|
set_fact:
|
||||||
system_service_uses_at: "{{ (system_service_template_src | basename) is search('@\\.service\\.j2$') }}"
|
system_service_uses_at: "{{ system_service_id.endswith('@') }}"
|
||||||
|
|
||||||
# 3) Use it
|
|
||||||
- name: "setup systemctl '{{ system_service_id }}'"
|
- name: "setup systemctl '{{ system_service_id }}'"
|
||||||
template:
|
template:
|
||||||
src: "{{ system_service_template_src }}"
|
src: "{{ system_service_template_src }}"
|
||||||
dest: "{{ [ PATH_SYSTEM_SERVICE_DIR, system_service_id | get_service_name(SOFTWARE_NAME) ] | path_join }}"
|
dest: "{{ [ PATH_SYSTEM_SERVICE_DIR, system_service_id | get_service_name(SOFTWARE_NAME) ] | path_join }}"
|
||||||
notify: "{{ 'reload system daemon' if system_service_uses_at else 'refresh systemctl service' }}"
|
notify: "{{ 'reload system daemon' if system_service_uses_at else 'refresh systemctl service' }}"
|
||||||
|
|
||||||
- name: refresh systemctl service when SYS_SERVICE_ALL_ENABLED
|
- name: refresh systemctl service when SYS_SERVICE_ALL_ENABLE
|
||||||
command: /bin/true
|
block:
|
||||||
notify:
|
- command: /bin/true
|
||||||
- reload system daemon
|
notify: reload system daemon
|
||||||
- refresh systemctl service
|
- command: /bin/true
|
||||||
when:
|
notify: refresh systemctl service
|
||||||
- SYS_SERVICE_ALL_ENABLED | bool
|
when: not system_service_uses_at
|
||||||
- not system_service_uses_at
|
when: SYS_SERVICE_ALL_ENABLED | bool
|
||||||
|
@@ -3,6 +3,7 @@
|
|||||||
that:
|
that:
|
||||||
- "'@' not in system_service_id"
|
- "'@' not in system_service_id"
|
||||||
fail_msg: "Invalid system_service_id '{{ system_service_id }}' → must not contain '@'."
|
fail_msg: "Invalid system_service_id '{{ system_service_id }}' → must not contain '@'."
|
||||||
|
when: MODE_ASSERT | bool
|
||||||
|
|
||||||
- name: "Make '{{ system_service_id }}' available for sys-timer"
|
- name: "Make '{{ system_service_id }}' available for sys-timer"
|
||||||
set_fact:
|
set_fact:
|
||||||
|
@@ -1,9 +1,14 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description={{ SOFTWARE_NAME }} - Service for role '{{ system_service_id }}'
|
Description={{ SOFTWARE_NAME }} - Service for role '{{ system_service_id }}'
|
||||||
|
{% if system_service_tpl_on_failure |length > 0 %}
|
||||||
OnFailure={{ system_service_tpl_on_failure }}
|
OnFailure={{ system_service_tpl_on_failure }}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type={{ system_service_tpl_type }}
|
Type={{ system_service_tpl_type }}
|
||||||
|
{% if system_service_tpl_exec_start_pre |length > 0 %}
|
||||||
|
ExecStartPre={{ system_service_tpl_exec_start_pre }}
|
||||||
|
{% endif %}
|
||||||
ExecStart={{ system_service_tpl_exec_start }}
|
ExecStart={{ system_service_tpl_exec_start }}
|
||||||
{% if system_service_tpl_runtime |length > 0 %}
|
{% if system_service_tpl_runtime |length > 0 %}
|
||||||
RuntimeMaxSec={{ system_service_tpl_runtime }}
|
RuntimeMaxSec={{ system_service_tpl_runtime }}
|
||||||
|
@@ -17,7 +17,8 @@ system_service_script_inter: "/bin/{{ 'bash' if system_service_script_type ==
|
|||||||
system_service_script_exec: "{{ system_service_script_inter }} {{ system_service_id | get_service_script_path( system_service_script_type ) }}"
|
system_service_script_exec: "{{ system_service_script_inter }} {{ system_service_id | get_service_script_path( system_service_script_type ) }}"
|
||||||
|
|
||||||
# Service template
|
# Service template
|
||||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||||
system_service_tpl_type: "oneshot"
|
system_service_tpl_type: "oneshot"
|
||||||
system_service_tpl_exec_start: "{{ system_service_script_exec }}"
|
system_service_tpl_exec_start: "{{ system_service_script_exec }}"
|
||||||
system_service_tpl_runtime: "{{ SYS_SERVICE_DEFAULT_RUNTIME }}"
|
system_service_tpl_runtime: "{{ SYS_SERVICE_DEFAULT_RUNTIME }}"
|
||||||
|
system_service_tpl_exec_start_pre: ""
|
@@ -10,11 +10,11 @@ tls_trust_file /etc/ssl/certs/ca-certificates.crt
|
|||||||
tls off
|
tls off
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
account {{ DEFAULT_EMAIL_ACCOUNT }}
|
account system_email_no_reply
|
||||||
host {{ SYSTEM_EMAIL.HOST }}
|
host {{ SYSTEM_EMAIL.HOST }}
|
||||||
port {{ SYSTEM_EMAIL.PORT }}
|
port {{ SYSTEM_EMAIL.PORT }}
|
||||||
from {{ users['no-reply'].email }}
|
from {{ users['no-reply'].email }}
|
||||||
user {{ users['no-reply'].email }}
|
user {{ users['no-reply'].email }}
|
||||||
password {{ users['no-reply'].mailu_token }}
|
password {{ users['no-reply'].mailu_token }}
|
||||||
|
|
||||||
account default : {{ DEFAULT_EMAIL_ACCOUNT }}
|
account default : system_email_no_reply
|
@@ -1 +0,0 @@
|
|||||||
DEFAULT_EMAIL_ACCOUNT: "{{ SOFTWARE_NAME | replace('.', '_') | lower }}__default_email_account"
|
|
@@ -1 +1 @@
|
|||||||
sys_timer_file: "{{ system_service_timer_service }}{{ SYS_TIMER_SUFFIX }}"
|
sys_timer_file: "{{ system_service_timer_service | get_service_name(SOFTWARE_NAME,'.timer') }}"
|
@@ -12,6 +12,7 @@
|
|||||||
generate_ssh_key: yes
|
generate_ssh_key: yes
|
||||||
ssh_key_type: rsa
|
ssh_key_type: rsa
|
||||||
ssh_key_bits: 8192
|
ssh_key_bits: 8192
|
||||||
|
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||||
|
|
||||||
- name: "set correct rights for {{ PATH_ADMINISTRATOR_HOME }}"
|
- name: "set correct rights for {{ PATH_ADMINISTRATOR_HOME }}"
|
||||||
file:
|
file:
|
||||||
|
@@ -3,7 +3,7 @@
|
|||||||
web:
|
web:
|
||||||
image: "{{ applications | get_app_conf(application_id, 'images.web', True) }}"
|
image: "{{ applications | get_app_conf(application_id, 'images.web', True) }}"
|
||||||
ports:
|
ports:
|
||||||
- "{{ports.localhost.http[application_id]}}:80"
|
- "{{ ports.localhost.http[application_id] }}:80"
|
||||||
volumes:
|
volumes:
|
||||||
- .:/usr/share/nginx/html
|
- .:/usr/share/nginx/html
|
||||||
- .:/var/www
|
- .:/var/www
|
||||||
|
@@ -7,7 +7,7 @@
|
|||||||
volumes:
|
volumes:
|
||||||
- data:/baserow/data
|
- data:/baserow/data
|
||||||
ports:
|
ports:
|
||||||
- "{{ports.localhost.http[application_id]}}:80"
|
- "{{ ports.localhost.http[application_id] }}:80"
|
||||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||||
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
|
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
|
||||||
|
|
||||||
|
@@ -1,17 +0,0 @@
|
|||||||
- name: create collabora proxy configuration file
|
|
||||||
template:
|
|
||||||
src: "nginx.conf.j2"
|
|
||||||
dest: "{{ NGINX.DIRECTORIES.HTTP.SERVERS }}{{ domains | get_domain(application_id) }}.conf"
|
|
||||||
notify: restart openresty
|
|
||||||
|
|
||||||
- name: "Include docker-compose role"
|
|
||||||
include_role:
|
|
||||||
name: docker-compose
|
|
||||||
|
|
||||||
- name: Create Docker network for Collabora
|
|
||||||
community.docker.docker_network:
|
|
||||||
name: svc-db-mariadb
|
|
||||||
state: present
|
|
||||||
ipam_config:
|
|
||||||
- subnet: "{{ networks.local[application_id].subnet }}"
|
|
||||||
|
|
@@ -1,13 +0,0 @@
|
|||||||
{% include 'roles/docker-compose/templates/base.yml.j2' %}
|
|
||||||
|
|
||||||
collabora:
|
|
||||||
image: collabora/code
|
|
||||||
container_name: collabora
|
|
||||||
ports:
|
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:80"
|
|
||||||
|
|
||||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
|
||||||
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
|
|
||||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
|
||||||
|
|
||||||
{% include 'roles/docker-compose/templates/networks.yml.j2' %}
|
|
@@ -1,4 +0,0 @@
|
|||||||
domain=nxsrv
|
|
||||||
username=admin
|
|
||||||
password=${COLLABRA_PASSWORD}
|
|
||||||
extra_params=--o:ssl.enable=false --o:ssl.termination=true
|
|
@@ -1,2 +0,0 @@
|
|||||||
---
|
|
||||||
application_id: web-app-collabora
|
|
@@ -16,6 +16,7 @@
|
|||||||
- docker_compose is defined
|
- docker_compose is defined
|
||||||
- ports is defined
|
- ports is defined
|
||||||
fail_msg: "Load roles/docker-compose/vars/docker-compose.yml and set `database_type` first."
|
fail_msg: "Load roles/docker-compose/vars/docker-compose.yml and set `database_type` first."
|
||||||
|
when: MODE_ASSERT | bool
|
||||||
|
|
||||||
- name: "Disconnect DB container from Discourse networks"
|
- name: "Disconnect DB container from Discourse networks"
|
||||||
ansible.builtin.command:
|
ansible.builtin.command:
|
||||||
|
@@ -15,7 +15,7 @@ templates:
|
|||||||
## If you want Discourse to share a port with another webserver like Apache or nginx,
|
## If you want Discourse to share a port with another webserver like Apache or nginx,
|
||||||
## see https://meta.discourse.org/t/17247 for details
|
## see https://meta.discourse.org/t/17247 for details
|
||||||
expose:
|
expose:
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:80" # http
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:80" # http
|
||||||
|
|
||||||
params:
|
params:
|
||||||
db_default_text_search_config: "pg_catalog.english"
|
db_default_text_search_config: "pg_catalog.english"
|
||||||
|
@@ -57,7 +57,7 @@
|
|||||||
target: /usr/share/kibana/config/kibana.yml
|
target: /usr/share/kibana/config/kibana.yml
|
||||||
read_only: true
|
read_only: true
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:5601"
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:5601"
|
||||||
depends_on:
|
depends_on:
|
||||||
- elasticsearch
|
- elasticsearch
|
||||||
|
|
||||||
|
@@ -8,7 +8,7 @@
|
|||||||
- data:/var/www/data # I assume that this one is unnessecarry
|
- data:/var/www/data # I assume that this one is unnessecarry
|
||||||
- {{ friendica_host_ldap_config }}:{{ friendica_docker_ldap_config }}:ro
|
- {{ friendica_host_ldap_config }}:{{ friendica_docker_ldap_config }}:ro
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:80"
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:80"
|
||||||
|
|
||||||
{% include 'roles/docker-container/templates/healthcheck/msmtp_curl.yml.j2' %}
|
{% include 'roles/docker-container/templates/healthcheck/msmtp_curl.yml.j2' %}
|
||||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||||
|
@@ -49,7 +49,7 @@
|
|||||||
- "data:{{funkwhale_media_root}}:ro"
|
- "data:{{funkwhale_media_root}}:ro"
|
||||||
#- "{{funkwhale_static_root}}:{{funkwhale_static_root}}:ro"
|
#- "{{funkwhale_static_root}}:{{funkwhale_static_root}}:ro"
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:80"
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:80"
|
||||||
|
|
||||||
typesense:
|
typesense:
|
||||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||||
|
@@ -5,7 +5,7 @@
|
|||||||
image: "{{ gitea_image }}:{{ gitea_version }}"
|
image: "{{ gitea_image }}:{{ gitea_version }}"
|
||||||
container_name: "{{ gitea_container }}"
|
container_name: "{{ gitea_container }}"
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:{{ container_port }}"
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:{{ container_port }}"
|
||||||
- "{{ports.public.ssh[application_id]}}:22"
|
- "{{ports.public.ssh[application_id]}}:22"
|
||||||
volumes:
|
volumes:
|
||||||
- data:/data
|
- data:/data
|
||||||
|
@@ -5,7 +5,7 @@
|
|||||||
hostname: '{{ domains | get_domain(application_id) }}'
|
hostname: '{{ domains | get_domain(application_id) }}'
|
||||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:80"
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:80"
|
||||||
- "{{ports.public.ssh[application_id]}}:22"
|
- "{{ports.public.ssh[application_id]}}:22"
|
||||||
volumes:
|
volumes:
|
||||||
- 'config:/etc/gitlab'
|
- 'config:/etc/gitlab'
|
||||||
|
@@ -3,7 +3,7 @@
|
|||||||
image: jenkins/jenkins:lts
|
image: jenkins/jenkins:lts
|
||||||
restart: "{{ DOCKER_RESTART_POLICY }}"
|
restart: "{{ DOCKER_RESTART_POLICY }}"
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:8080"
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:8080"
|
||||||
volumes:
|
volumes:
|
||||||
- jenkins_data:/var/jenkins_home
|
- jenkins_data:/var/jenkins_home
|
||||||
log_driver: journald
|
log_driver: journald
|
||||||
|
@@ -5,7 +5,7 @@
|
|||||||
volumes:
|
volumes:
|
||||||
- data:/var/www/html
|
- data:/var/www/html
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:80"
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:80"
|
||||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||||
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
|
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
|
||||||
|
|
||||||
|
@@ -46,6 +46,7 @@
|
|||||||
- scope_id_rbac | length > 0
|
- scope_id_rbac | length > 0
|
||||||
- (app_client_id_cmd.stdout | trim) is match('^[0-9a-f-]+$')
|
- (app_client_id_cmd.stdout | trim) is match('^[0-9a-f-]+$')
|
||||||
fail_msg: "Could not determine client or scope ID."
|
fail_msg: "Could not determine client or scope ID."
|
||||||
|
when: MODE_ASSERT | bool
|
||||||
|
|
||||||
- name: Get current optional client scopes
|
- name: Get current optional client scopes
|
||||||
shell: >
|
shell: >
|
||||||
|
@@ -26,6 +26,7 @@
|
|||||||
assert:
|
assert:
|
||||||
that: [ "(ldap_cmp_id.stdout | trim) not in ['', 'null']" ]
|
that: [ "(ldap_cmp_id.stdout | trim) not in ['', 'null']" ]
|
||||||
fail_msg: "LDAP component '{{ KEYCLOAK_LDAP_CMP_NAME }}' not found in Keycloak."
|
fail_msg: "LDAP component '{{ KEYCLOAK_LDAP_CMP_NAME }}' not found in Keycloak."
|
||||||
|
when: MODE_ASSERT | bool
|
||||||
|
|
||||||
- name: Pull LDAP component from dictionary (by name)
|
- name: Pull LDAP component from dictionary (by name)
|
||||||
set_fact:
|
set_fact:
|
||||||
@@ -42,6 +43,7 @@
|
|||||||
- ldap_component_tpl | length > 0
|
- ldap_component_tpl | length > 0
|
||||||
- (ldap_component_tpl.subComponents | default({})) | length > 0
|
- (ldap_component_tpl.subComponents | default({})) | length > 0
|
||||||
fail_msg: "LDAP component '{{ KEYCLOAK_LDAP_CMP_NAME }}' not found in KEYCLOAK_DICTIONARY_REALM."
|
fail_msg: "LDAP component '{{ KEYCLOAK_LDAP_CMP_NAME }}' not found in KEYCLOAK_DICTIONARY_REALM."
|
||||||
|
when: MODE_ASSERT | bool
|
||||||
|
|
||||||
- name: Extract mapper 'ldap-roles' from template (raw)
|
- name: Extract mapper 'ldap-roles' from template (raw)
|
||||||
set_fact:
|
set_fact:
|
||||||
@@ -59,6 +61,7 @@
|
|||||||
assert:
|
assert:
|
||||||
that: [ "desired_group_mapper_raw | length > 0" ]
|
that: [ "desired_group_mapper_raw | length > 0" ]
|
||||||
fail_msg: "'ldap-roles' mapper not found in dictionary under LDAP component."
|
fail_msg: "'ldap-roles' mapper not found in dictionary under LDAP component."
|
||||||
|
when: MODE_ASSERT | bool
|
||||||
|
|
||||||
- name: Build clean mapper payload
|
- name: Build clean mapper payload
|
||||||
set_fact:
|
set_fact:
|
||||||
|
@@ -18,6 +18,7 @@
|
|||||||
- kc_lookup_value is defined
|
- kc_lookup_value is defined
|
||||||
- kc_desired is defined
|
- kc_desired is defined
|
||||||
fail_msg: "kc_object_kind, kc_lookup_value, kc_desired are required."
|
fail_msg: "kc_object_kind, kc_lookup_value, kc_desired are required."
|
||||||
|
when: MODE_ASSERT | bool
|
||||||
|
|
||||||
- name: Derive API endpoint and lookup field
|
- name: Derive API endpoint and lookup field
|
||||||
set_fact:
|
set_fact:
|
||||||
@@ -67,6 +68,7 @@
|
|||||||
- (kc_obj_id | trim) != ''
|
- (kc_obj_id | trim) != ''
|
||||||
- (kc_obj_id | trim) != 'null'
|
- (kc_obj_id | trim) != 'null'
|
||||||
fail_msg: "{{ kc_object_kind | capitalize }} '{{ kc_lookup_value }}' not found."
|
fail_msg: "{{ kc_object_kind | capitalize }} '{{ kc_lookup_value }}' not found."
|
||||||
|
when: MODE_ASSERT | bool
|
||||||
|
|
||||||
- name: Read current object
|
- name: Read current object
|
||||||
shell: >
|
shell: >
|
||||||
@@ -85,6 +87,7 @@
|
|||||||
when:
|
when:
|
||||||
- kc_object_kind == 'component'
|
- kc_object_kind == 'component'
|
||||||
- (kc_desired.providerId is defined)
|
- (kc_desired.providerId is defined)
|
||||||
|
- MODE_ASSERT | bool
|
||||||
assert:
|
assert:
|
||||||
that:
|
that:
|
||||||
- cur_obj.providerId == kc_desired.providerId
|
- cur_obj.providerId == kc_desired.providerId
|
||||||
|
@@ -4,7 +4,7 @@
|
|||||||
container_name: "{{ lam_container }}"
|
container_name: "{{ lam_container }}"
|
||||||
image: "{{ lam_image }}"
|
image: "{{ lam_image }}"
|
||||||
ports:
|
ports:
|
||||||
- 127.0.0.1:{{ports.localhost.http[application_id]}}:80
|
- 127.0.0.1:{{ ports.localhost.http[application_id] }}:80
|
||||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||||
|
|
||||||
|
@@ -5,7 +5,7 @@
|
|||||||
image: "{{ LISTMONK_IMAGE }}:{{ LISTMONK_VERSION }}"
|
image: "{{ LISTMONK_IMAGE }}:{{ LISTMONK_VERSION }}"
|
||||||
container_name: "{{ LISTMONK_NAME }}"
|
container_name: "{{ LISTMONK_NAME }}"
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:{{ container_port }}"
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:{{ container_port }}"
|
||||||
volumes:
|
volumes:
|
||||||
- {{docker_compose.directories.config}}config.toml:/listmonk/config.toml
|
- {{docker_compose.directories.config}}config.toml:/listmonk/config.toml
|
||||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||||
|
@@ -6,6 +6,7 @@
|
|||||||
- MAILU_HOSTNAMES | length <= 1
|
- MAILU_HOSTNAMES | length <= 1
|
||||||
fail_msg: "MAILU_HOSTNAMES must be a list with at most one entry (only one host is supported). You can set the other ones as alias."
|
fail_msg: "MAILU_HOSTNAMES must be a list with at most one entry (only one host is supported). You can set the other ones as alias."
|
||||||
success_msg: "MAILU_HOSTNAMES is valid."
|
success_msg: "MAILU_HOSTNAMES is valid."
|
||||||
|
when: MODE_ASSERT | bool
|
||||||
|
|
||||||
- name: "Mailu Docker and Webserver Setup"
|
- name: "Mailu Docker and Webserver Setup"
|
||||||
block:
|
block:
|
||||||
|
@@ -13,7 +13,7 @@
|
|||||||
image: {{ MAILU_DOCKER_FLAVOR }}/nginx:{{ MAILU_VERSION }}
|
image: {{ MAILU_DOCKER_FLAVOR }}/nginx:{{ MAILU_VERSION }}
|
||||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:80"
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:80"
|
||||||
- "{{ MAILU_IP4_PUBLIC }}:25:25"
|
- "{{ MAILU_IP4_PUBLIC }}:25:25"
|
||||||
- "{{ MAILU_IP4_PUBLIC }}:465:465"
|
- "{{ MAILU_IP4_PUBLIC }}:465:465"
|
||||||
- "{{ MAILU_IP4_PUBLIC }}:587:587"
|
- "{{ MAILU_IP4_PUBLIC }}:587:587"
|
||||||
|
@@ -9,7 +9,7 @@
|
|||||||
command: bash -c "rm -f /mastodon/tmp/pids/server.pid; bundle exec rails s -p {{ container_port }}"
|
command: bash -c "rm -f /mastodon/tmp/pids/server.pid; bundle exec rails s -p {{ container_port }}"
|
||||||
{% include 'roles/docker-container/templates/healthcheck/wget.yml.j2' %}
|
{% include 'roles/docker-container/templates/healthcheck/wget.yml.j2' %}
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:{{ container_port }}"
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:{{ container_port }}"
|
||||||
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
|
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
|
||||||
volumes:
|
volumes:
|
||||||
- data:/mastodon/public/system
|
- data:/mastodon/public/system
|
||||||
|
@@ -5,7 +5,7 @@
|
|||||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||||
image: "{{ matomo_image }}:{{ matomo_version }}"
|
image: "{{ matomo_image }}:{{ matomo_version }}"
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:{{ container_port }}"
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:{{ container_port }}"
|
||||||
volumes:
|
volumes:
|
||||||
- data:/var/www/html
|
- data:/var/www/html
|
||||||
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
|
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
|
||||||
|
@@ -6,7 +6,7 @@
|
|||||||
volumes:
|
volumes:
|
||||||
- "data:/var/www/html/"
|
- "data:/var/www/html/"
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:80"
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:80"
|
||||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||||
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
|
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
|
||||||
|
|
||||||
|
@@ -1,14 +1,14 @@
|
|||||||
docker:
|
docker:
|
||||||
services:
|
services:
|
||||||
redis:
|
redis:
|
||||||
enabled: false # No redis needed
|
enabled: false # No redis needed
|
||||||
database:
|
database:
|
||||||
enabled: false # No database needed
|
enabled: false # No database needed
|
||||||
features:
|
features:
|
||||||
matomo: true # activate tracking
|
matomo: true # activate tracking
|
||||||
css: true # use custom infinito stile
|
css: true # use custom infinito stile
|
||||||
desktop: true # Enable in port-ui
|
desktop: true # Enable in port-ui
|
||||||
logout: false
|
logout: false
|
||||||
server:
|
server:
|
||||||
csp:
|
csp:
|
||||||
whitelist:
|
whitelist:
|
||||||
|
@@ -6,7 +6,7 @@
|
|||||||
image: "{{ MIG_IMAGE }}"
|
image: "{{ MIG_IMAGE }}"
|
||||||
container_name: "{{ MIG_CONTAINER }}"
|
container_name: "{{ MIG_CONTAINER }}"
|
||||||
ports:
|
ports:
|
||||||
- 127.0.0.1:{{ports.localhost.http[application_id]}}:{{ container_port }}
|
- 127.0.0.1:{{ ports.localhost.http[application_id] }}:{{ container_port }}
|
||||||
build:
|
build:
|
||||||
context: "{{docker_repository_path}}"
|
context: "{{docker_repository_path}}"
|
||||||
dockerfile: Dockerfile
|
dockerfile: Dockerfile
|
||||||
|
@@ -8,7 +8,7 @@
|
|||||||
dockerfile: Dockerfile
|
dockerfile: Dockerfile
|
||||||
image: moodle_custom
|
image: moodle_custom
|
||||||
ports:
|
ports:
|
||||||
- 127.0.0.1:{{ports.localhost.http[application_id]}}:{{ container_port }}
|
- 127.0.0.1:{{ ports.localhost.http[application_id] }}:{{ container_port }}
|
||||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||||
volumes:
|
volumes:
|
||||||
- 'code:{{ bitnami_code_link }}'
|
- 'code:{{ bitnami_code_link }}'
|
||||||
|
@@ -20,7 +20,7 @@
|
|||||||
image: nginx:mainline
|
image: nginx:mainline
|
||||||
restart: {{ DOCKER_RESTART_POLICY }}
|
restart: {{ DOCKER_RESTART_POLICY }}
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:80"
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:80"
|
||||||
volumes:
|
volumes:
|
||||||
- "{{docker_compose_instance_confd_directory}}:{{target_mount_conf_d_directory}}:ro"
|
- "{{docker_compose_instance_confd_directory}}:{{target_mount_conf_d_directory}}:ro"
|
||||||
- "data:/var/www/html:ro"
|
- "data:/var/www/html:ro"
|
||||||
|
@@ -4,7 +4,7 @@
|
|||||||
context: {{ path_infinito_presentation_output.stdout }}
|
context: {{ path_infinito_presentation_output.stdout }}
|
||||||
dockerfile: {{ path_infinito_presentation_output.stdout }}/Dockerfile
|
dockerfile: {{ path_infinito_presentation_output.stdout }}/Dockerfile
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:5000"
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:5000"
|
||||||
volumes:
|
volumes:
|
||||||
- {{ path_infinito_presentation_output.stdout }}:/app
|
- {{ path_infinito_presentation_output.stdout }}:/app
|
||||||
- {{ path_infinito_output.stdout }}:/source
|
- {{ path_infinito_output.stdout }}:/source
|
||||||
|
@@ -9,11 +9,14 @@ server:
|
|||||||
whitelist:
|
whitelist:
|
||||||
font-src:
|
font-src:
|
||||||
- "data:"
|
- "data:"
|
||||||
|
#frame-src:
|
||||||
|
# - ""
|
||||||
domains:
|
domains:
|
||||||
canonical:
|
canonical:
|
||||||
- "cloud.{{ PRIMARY_DOMAIN }}"
|
- "cloud.{{ PRIMARY_DOMAIN }}"
|
||||||
# nextcloud: "cloud.{{ PRIMARY_DOMAIN }}"
|
|
||||||
# talk: "talk.{{ PRIMARY_DOMAIN }}" @todo needs to be activated
|
# talk: "talk.{{ PRIMARY_DOMAIN }}" @todo needs to be activated
|
||||||
|
helpers:
|
||||||
|
collabora: "{{ WEB_PROTOCOL ~ '://' ~ applications | get_app_conf('web-svc-collabora','server.domains.canonical[0]',False,'<< defaults_applications[web-svc-collabora].server.domains.canonical[0]>>') }}"
|
||||||
docker:
|
docker:
|
||||||
volumes:
|
volumes:
|
||||||
data: nextcloud_data
|
data: nextcloud_data
|
||||||
@@ -41,13 +44,8 @@ docker:
|
|||||||
image: "nextcloud/aio-talk"
|
image: "nextcloud/aio-talk"
|
||||||
version: "latest"
|
version: "latest"
|
||||||
enabled: false # Not enabled yet, because just implemented during refactoring and not tested yet. if tested activate
|
enabled: false # Not enabled yet, because just implemented during refactoring and not tested yet. if tested activate
|
||||||
# Its in a own role. @todo remove it if it gets implemented via the other role
|
|
||||||
#collabora:
|
|
||||||
# name: "nextcloud-collabora"
|
|
||||||
# image: "nextcloud-collabora"
|
|
||||||
# version: "latest"
|
|
||||||
oidc:
|
oidc:
|
||||||
enabled: " {{ applications | get_app_conf('web-app-nextcloud', 'features.oidc', False, True) }}" # Activate OIDC for Nextcloud
|
enabled: "{{ applications | get_app_conf('web-app-nextcloud', 'features.oidc', False, True) }}" # Activate OIDC for Nextcloud
|
||||||
# floavor decides which OICD plugin should be used.
|
# floavor decides which OICD plugin should be used.
|
||||||
# Available options: oidc_login, sociallogin
|
# Available options: oidc_login, sociallogin
|
||||||
# @see https://apps.nextcloud.com/apps/oidc_login
|
# @see https://apps.nextcloud.com/apps/oidc_login
|
||||||
@@ -149,7 +147,7 @@ plugins:
|
|||||||
enabled: false
|
enabled: false
|
||||||
fileslibreofficeedit:
|
fileslibreofficeedit:
|
||||||
# Nextcloud LibreOffice integration: allows online editing of documents with LibreOffice (https://apps.nextcloud.com/apps/fileslibreofficeedit)
|
# Nextcloud LibreOffice integration: allows online editing of documents with LibreOffice (https://apps.nextcloud.com/apps/fileslibreofficeedit)
|
||||||
enabled: true
|
enabled: "{{ not (applications | get_app_conf('web-app-nextcloud', 'plugins.richdocuments.enabled', False, True)) }}"
|
||||||
forms:
|
forms:
|
||||||
# Nextcloud forms: facilitates creation of forms and surveys (https://apps.nextcloud.com/apps/forms)
|
# Nextcloud forms: facilitates creation of forms and surveys (https://apps.nextcloud.com/apps/forms)
|
||||||
enabled: true
|
enabled: true
|
||||||
@@ -225,7 +223,7 @@ plugins:
|
|||||||
enabled: false # Deactivated because it let to bugs
|
enabled: false # Deactivated because it let to bugs
|
||||||
richdocuments:
|
richdocuments:
|
||||||
# Nextcloud Rich Documents: provides collaborative document editing capabilities (https://apps.nextcloud.com/apps/richdocuments)
|
# Nextcloud Rich Documents: provides collaborative document editing capabilities (https://apps.nextcloud.com/apps/richdocuments)
|
||||||
enabled: false # @todo To set it default to true activate https://hub.docker.com/r/collabora/code before
|
enabled: true # @todo To set it default to true activate https://hub.docker.com/r/collabora/code before
|
||||||
sociallogin:
|
sociallogin:
|
||||||
# Nextcloud social login: allows authentication using social networks (https://apps.nextcloud.com/apps/sociallogin)
|
# Nextcloud social login: allows authentication using social networks (https://apps.nextcloud.com/apps/sociallogin)
|
||||||
enabled: "{{ _applications_nextcloud_oidc_flavor=='sociallogin' | lower }}"
|
enabled: "{{ _applications_nextcloud_oidc_flavor=='sociallogin' | lower }}"
|
||||||
|
@@ -29,6 +29,5 @@ galaxy_info:
|
|||||||
logo:
|
logo:
|
||||||
class: "fa-solid fa-cloud"
|
class: "fa-solid fa-cloud"
|
||||||
run_after:
|
run_after:
|
||||||
- web-app-collabora
|
|
||||||
- web-app-keycloak
|
- web-app-keycloak
|
||||||
- web-app-mastodon
|
- web-app-mastodon
|
||||||
|
@@ -9,10 +9,10 @@
|
|||||||
- name: Flush handlers so Nextcloud container is restarted and ready
|
- name: Flush handlers so Nextcloud container is restarted and ready
|
||||||
meta: flush_handlers
|
meta: flush_handlers
|
||||||
|
|
||||||
- name: "Wait until Nextcloud is reachable on port {{ports.localhost.http[application_id]}}"
|
- name: "Wait until Nextcloud is reachable on port {{ ports.localhost.http[application_id] }}"
|
||||||
wait_for:
|
wait_for:
|
||||||
host: 127.0.0.1
|
host: 127.0.0.1
|
||||||
port: "{{ports.localhost.http[application_id]}}"
|
port: "{{ ports.localhost.http[application_id] }}"
|
||||||
timeout: 120
|
timeout: 120
|
||||||
delay: 2
|
delay: 2
|
||||||
state: started
|
state: started
|
||||||
|
@@ -1,4 +1,11 @@
|
|||||||
---
|
---
|
||||||
|
#- name: "Install Collabora Dependency"
|
||||||
|
# include_role:
|
||||||
|
# name: web-svc-collabora
|
||||||
|
# vars:
|
||||||
|
# flush_handlers: true
|
||||||
|
# when: NEXTCLOUD_COLLABORA_ENABLED
|
||||||
|
|
||||||
- name: "include role for {{ application_id }} to receive certs & do modification routines"
|
- name: "include role for {{ application_id }} to receive certs & do modification routines"
|
||||||
include_role:
|
include_role:
|
||||||
name: srv-web-7-6-composer
|
name: srv-web-7-6-composer
|
||||||
|
@@ -4,10 +4,10 @@
|
|||||||
image: "{{ nextcloud_image }}:{{ nextcloud_version }}"
|
image: "{{ nextcloud_image }}:{{ nextcloud_version }}"
|
||||||
container_name: {{ nextcloud_container }}
|
container_name: {{ nextcloud_container }}
|
||||||
volumes:
|
volumes:
|
||||||
- data:{{nextcloud_docker_work_directory}}
|
- data:{{ NEXTCLOUD_DOCKER_WORK_DIRECTORY }}
|
||||||
- {{nextcloud_host_config_additives_directory}}:{{nextcloud_docker_config_additives_directory}}:ro
|
- {{nextcloud_host_config_additives_directory}}:{{nextcloud_docker_config_additives_directory}}:ro
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "su", "www-data", "-s", "/bin/sh", "-c", "php {{nextcloud_docker_work_directory}}occ status"]
|
test: ["CMD", "su", "www-data", "-s", "/bin/sh", "-c", "php {{ NEXTCLOUD_DOCKER_WORK_DIRECTORY }}occ status"]
|
||||||
interval: 1m
|
interval: 1m
|
||||||
timeout: 10s
|
timeout: 10s
|
||||||
retries: 3
|
retries: 3
|
||||||
@@ -39,7 +39,7 @@
|
|||||||
driver: journald
|
driver: journald
|
||||||
restart: {{ DOCKER_RESTART_POLICY }}
|
restart: {{ DOCKER_RESTART_POLICY }}
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:{{ container_port }}"
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:{{ container_port }}"
|
||||||
volumes:
|
volumes:
|
||||||
- "{{ docker_compose.directories.volumes }}nginx.conf:/etc/nginx/nginx.conf:ro"
|
- "{{ docker_compose.directories.volumes }}nginx.conf:/etc/nginx/nginx.conf:ro"
|
||||||
volumes_from:
|
volumes_from:
|
||||||
@@ -57,10 +57,10 @@
|
|||||||
logging:
|
logging:
|
||||||
driver: journald
|
driver: journald
|
||||||
volumes:
|
volumes:
|
||||||
- data:{{nextcloud_docker_work_directory}}
|
- data:{{ NEXTCLOUD_DOCKER_WORK_DIRECTORY }}
|
||||||
entrypoint: /cron.sh
|
entrypoint: /cron.sh
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "su", "www-data", "-s", "/bin/sh", "-c", "php {{nextcloud_docker_work_directory}}occ status"]
|
test: ["CMD", "su", "www-data", "-s", "/bin/sh", "-c", "php {{ NEXTCLOUD_DOCKER_WORK_DIRECTORY }}occ status"]
|
||||||
interval: 1m
|
interval: 1m
|
||||||
timeout: 10s
|
timeout: 10s
|
||||||
retries: 3
|
retries: 3
|
||||||
|
@@ -11,7 +11,7 @@ http_port: "{{ ports.localhost.http[applica
|
|||||||
database_password: "{{ applications | get_app_conf(application_id, 'credentials.database_password', True)}}"
|
database_password: "{{ applications | get_app_conf(application_id, 'credentials.database_password', True)}}"
|
||||||
database_type: "mariadb" # Database flavor
|
database_type: "mariadb" # Database flavor
|
||||||
|
|
||||||
nextcloud_plugins_enabled: "{{ applications | get_app_conf(application_id, 'plugins_enabled', True) }}"
|
nextcloud_plugins_enabled: "{{ applications | get_app_conf(application_id, 'plugins_enabled') }}"
|
||||||
nextcloud_administrator_username: "{{ applications | get_app_conf(application_id, 'users.administrator.username') }}"
|
nextcloud_administrator_username: "{{ applications | get_app_conf(application_id, 'users.administrator.username') }}"
|
||||||
|
|
||||||
# Control Node
|
# Control Node
|
||||||
@@ -27,38 +27,45 @@ nextcloud_host_nginx_path: "{{ NGINX.DIRECTORIES.HTTP.SERVE
|
|||||||
|
|
||||||
# Docker
|
# Docker
|
||||||
|
|
||||||
nextcloud_volume: "{{ applications | get_app_conf(application_id, 'docker.volumes.data', True) }}"
|
nextcloud_volume: "{{ applications | get_app_conf(application_id, 'docker.volumes.data') }}"
|
||||||
|
|
||||||
nextcloud_version: "{{ applications | get_app_conf(application_id, 'docker.services.nextcloud.version', True) }}"
|
nextcloud_version: "{{ applications | get_app_conf(application_id, 'docker.services.nextcloud.version') }}"
|
||||||
nextcloud_image: "{{ applications | get_app_conf(application_id, 'docker.services.nextcloud.image', True) }}"
|
nextcloud_image: "{{ applications | get_app_conf(application_id, 'docker.services.nextcloud.image') }}"
|
||||||
nextcloud_container: "{{ applications | get_app_conf(application_id, 'docker.services.nextcloud.name', True) }}"
|
nextcloud_container: "{{ applications | get_app_conf(application_id, 'docker.services.nextcloud.name') }}"
|
||||||
|
|
||||||
nextcloud_proxy_name: "{{ applications | get_app_conf(application_id, 'docker.services.proxy.name', True) }}"
|
nextcloud_proxy_name: "{{ applications | get_app_conf(application_id, 'docker.services.proxy.name') }}"
|
||||||
nextcloud_proxy_image: "{{ applications | get_app_conf(application_id, 'docker.services.proxy.image', True) }}"
|
nextcloud_proxy_image: "{{ applications | get_app_conf(application_id, 'docker.services.proxy.image') }}"
|
||||||
nextcloud_proxy_version: "{{ applications | get_app_conf(application_id, 'docker.services.proxy.version', True) }}"
|
nextcloud_proxy_version: "{{ applications | get_app_conf(application_id, 'docker.services.proxy.version') }}"
|
||||||
|
|
||||||
nextcloud_cron_name: "{{ applications | get_app_conf(application_id, 'docker.services.cron.name', True) }}"
|
nextcloud_cron_name: "{{ applications | get_app_conf(application_id, 'docker.services.cron.name') }}"
|
||||||
|
|
||||||
nextcloud_talk_name: "{{ applications | get_app_conf(application_id, 'docker.services.talk.name', True) }}"
|
# Plugins
|
||||||
nextcloud_talk_image: "{{ applications | get_app_conf(application_id, 'docker.services.talk.image', True) }}"
|
|
||||||
nextcloud_talk_version: "{{ applications | get_app_conf(application_id, 'docker.services.talk.version', True) }}"
|
## Talk
|
||||||
|
nextcloud_talk_name: "{{ applications | get_app_conf(application_id, 'docker.services.talk.name') }}"
|
||||||
|
nextcloud_talk_image: "{{ applications | get_app_conf(application_id, 'docker.services.talk.image') }}"
|
||||||
|
nextcloud_talk_version: "{{ applications | get_app_conf(application_id, 'docker.services.talk.version') }}"
|
||||||
nextcloud_talk_enabled: "{{ applications | is_docker_service_enabled(application_id, 'talk') }}"
|
nextcloud_talk_enabled: "{{ applications | is_docker_service_enabled(application_id, 'talk') }}"
|
||||||
nextcloud_talk_stun_port: "{{ ports.public.stun[application_id] }}"
|
nextcloud_talk_stun_port: "{{ ports.public.stun[application_id] }}"
|
||||||
# nextcloud_talk_domain: "{{ domains[application_id].talk }}"
|
# nextcloud_talk_domain: "{{ domains[application_id].talk }}"
|
||||||
|
|
||||||
#nextcloud_collabora_name: "{{ applications | get_app_conf(application_id, 'docker.services.collabora.name', True) }}"
|
# Collabora
|
||||||
|
#nextcloud_collabora_name: "{{ applications | get_app_conf(application_id, 'docker.services.collabora.name') }}"
|
||||||
|
NEXTCLOUD_COLLABORA_URL: "{{ domains | get_url('web-svc-collabora', WEB_PROTOCOL) }}"
|
||||||
|
#NEXTCLOUD_COLLABORA_DOMAIN: "{{ domains | get_domain('web-svc-collabora') }}"
|
||||||
|
NEXTCLOUD_COLLABORA_ENABLED: "{{ applications | get_app_conf(application_id, 'plugins.richdocuments.enabled') }}"
|
||||||
|
|
||||||
## User Configuration
|
## User Configuration
|
||||||
nextcloud_docker_user_id: 82 # UID of the www-data user
|
nextcloud_docker_user_id: 82 # UID of the www-data user
|
||||||
nextcloud_docker_user: "www-data" # Name of the www-data user (Set here to easy change it in the future)
|
nextcloud_docker_user: "www-data" # Name of the www-data user (Set here to easy change it in the future)
|
||||||
|
|
||||||
## Internal Paths
|
## Internal Paths
|
||||||
nextcloud_docker_work_directory: "/var/www/html/" # Name of the workdir in which the application is stored
|
NEXTCLOUD_DOCKER_WORK_DIRECTORY: "/var/www/html/" # Name of the workdir in which the application is stored
|
||||||
nextcloud_docker_config_directory: "{{nextcloud_docker_work_directory}}config/" # Folder in which the Nextcloud configurations are stored
|
NEXTCLOUD_DOCKER_CONFIG_DIRECTORY: "{{ NEXTCLOUD_DOCKER_WORK_DIRECTORY }}config/" # Folder in which the Nextcloud configurations are stored
|
||||||
nextcloud_docker_config_file: "{{nextcloud_docker_config_directory}}config.php" # Path to the Nextcloud configuration file
|
nextcloud_docker_config_file: "{{ NEXTCLOUD_DOCKER_CONFIG_DIRECTORY }}config.php" # Path to the Nextcloud configuration file
|
||||||
nextcloud_docker_config_additives_directory: "{{nextcloud_docker_config_directory}}infinito/" # Path to the folder which contains additional configurations
|
nextcloud_docker_config_additives_directory: "{{ NEXTCLOUD_DOCKER_CONFIG_DIRECTORY }}infinito/" # Path to the folder which contains additional configurations
|
||||||
nextcloud_docker_include_instructions_file: "/tmp/includes.php" # Path to the temporary file which will be included to the config.php to load the additional configurations
|
nextcloud_docker_include_instructions_file: "/tmp/includes.php" # Path to the temporary file which will be included to the config.php to load the additional configurations
|
||||||
|
|
||||||
## Execution
|
## Execution
|
||||||
nextcloud_docker_exec: "docker exec -u {{ nextcloud_docker_user }} {{ nextcloud_container }}" # General execute composition
|
nextcloud_docker_exec: "docker exec -u {{ nextcloud_docker_user }} {{ nextcloud_container }}" # General execute composition
|
||||||
nextcloud_docker_exec_occ: "{{nextcloud_docker_exec}} {{ nextcloud_docker_work_directory }}occ" # Execute docker occ command
|
nextcloud_docker_exec_occ: "{{nextcloud_docker_exec}} {{ NEXTCLOUD_DOCKER_WORK_DIRECTORY }}occ" # Execute docker occ command
|
15
roles/web-app-nextcloud/vars/plugins/richdocuments.yml
Normal file
15
roles/web-app-nextcloud/vars/plugins/richdocuments.yml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
plugin_configuration:
|
||||||
|
- appid: "richdocuments"
|
||||||
|
configkey: "wopi_url"
|
||||||
|
configvalue: "{{ NEXTCLOUD_COLLABORA_URL }}"
|
||||||
|
|
||||||
|
# Optional, but helpful if you ever front Collabora by a CDN:
|
||||||
|
- appid: "richdocuments"
|
||||||
|
configkey: "public_wopi_url"
|
||||||
|
configvalue: "{{ NEXTCLOUD_COLLABORA_URL }}"
|
||||||
|
|
||||||
|
# Only use this if you terminate TLS in front and run CODE plain HTTP behind it.
|
||||||
|
# 0 = verify certs (recommended), 1 = skip verification
|
||||||
|
- appid: "richdocuments"
|
||||||
|
configkey: "disable_certificate_verification"
|
||||||
|
configvalue: 0
|
@@ -20,7 +20,7 @@ x-op-app: &app
|
|||||||
container_name: {{ openproject_proxy_name }}
|
container_name: {{ openproject_proxy_name }}
|
||||||
command: "./docker/prod/proxy"
|
command: "./docker/prod/proxy"
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:80"
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:80"
|
||||||
environment:
|
environment:
|
||||||
APP_HOST: web
|
APP_HOST: web
|
||||||
depends_on:
|
depends_on:
|
||||||
|
@@ -7,7 +7,7 @@
|
|||||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||||
ports:
|
ports:
|
||||||
- "1935:1935" # @todo Add to ports
|
- "1935:1935" # @todo Add to ports
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:{{ container_port }}"
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:{{ container_port }}"
|
||||||
volumes:
|
volumes:
|
||||||
- assets:/app/client/dist
|
- assets:/app/client/dist
|
||||||
- data:/data
|
- data:/data
|
||||||
|
@@ -6,7 +6,7 @@
|
|||||||
container_name: pgadmin
|
container_name: pgadmin
|
||||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:80"
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:80"
|
||||||
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
|
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
|
||||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||||
{% include 'roles/docker-container/templates/healthcheck/wget.yml.j2' %}
|
{% include 'roles/docker-container/templates/healthcheck/wget.yml.j2' %}
|
||||||
|
@@ -4,7 +4,7 @@
|
|||||||
container_name: {{ application_id }}
|
container_name: {{ application_id }}
|
||||||
image: leenooks/phpldapadmin:{{applications | get_app_conf(application_id, 'version', True)}}
|
image: leenooks/phpldapadmin:{{applications | get_app_conf(application_id, 'version', True)}}
|
||||||
ports:
|
ports:
|
||||||
- 127.0.0.1:{{ports.localhost.http[application_id]}}:8080
|
- 127.0.0.1:{{ ports.localhost.http[application_id] }}:8080
|
||||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||||
|
|
||||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||||
|
@@ -6,7 +6,7 @@
|
|||||||
container_name: "{{ phpmyadmin_container }}"
|
container_name: "{{ phpmyadmin_container }}"
|
||||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:{{ container_port }}"
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:{{ container_port }}"
|
||||||
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
|
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
|
||||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||||
{% include 'roles/docker-container/templates/healthcheck/tcp.yml.j2' %}
|
{% include 'roles/docker-container/templates/healthcheck/tcp.yml.j2' %}
|
||||||
|
@@ -8,7 +8,7 @@
|
|||||||
- "data:/var/www/storage"
|
- "data:/var/www/storage"
|
||||||
- "./env:/var/www/.env"
|
- "./env:/var/www/.env"
|
||||||
ports:
|
ports:
|
||||||
- "{{ports.localhost.http[application_id]}}:80"
|
- "{{ ports.localhost.http[application_id] }}:80"
|
||||||
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
|
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
|
||||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||||
worker:
|
worker:
|
||||||
|
@@ -1,5 +1,6 @@
|
|||||||
- name: "Validate configuration"
|
- name: "Validate configuration"
|
||||||
include_tasks: "02_validate.yml"
|
include_tasks: "02_validate.yml"
|
||||||
|
when: MODE_ASSERT | bool
|
||||||
|
|
||||||
- name: "load docker, proxy for '{{ application_id }}'"
|
- name: "load docker, proxy for '{{ application_id }}'"
|
||||||
include_role:
|
include_role:
|
||||||
|
@@ -7,7 +7,7 @@
|
|||||||
image: application-portfolio
|
image: application-portfolio
|
||||||
container_name: portfolio
|
container_name: portfolio
|
||||||
ports:
|
ports:
|
||||||
- 127.0.0.1:{{ports.localhost.http[application_id]}}:{{ container_port }}
|
- 127.0.0.1:{{ ports.localhost.http[application_id] }}:{{ container_port }}
|
||||||
volumes:
|
volumes:
|
||||||
- {{docker_repository_path}}app:/app
|
- {{docker_repository_path}}app:/app
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
@@ -4,7 +4,7 @@
|
|||||||
build:
|
build:
|
||||||
context: .
|
context: .
|
||||||
ports:
|
ports:
|
||||||
- 127.0.0.1:{{ports.localhost.http[application_id]}}:8080
|
- 127.0.0.1:{{ ports.localhost.http[application_id] }}:8080
|
||||||
restart: {{ DOCKER_RESTART_POLICY }}
|
restart: {{ DOCKER_RESTART_POLICY }}
|
||||||
|
|
||||||
{% include 'roles/docker-compose/templates/networks.yml.j2' %}
|
{% include 'roles/docker-compose/templates/networks.yml.j2' %}
|
@@ -10,7 +10,7 @@
|
|||||||
volumes:
|
volumes:
|
||||||
- data:/var/lib/snipeit
|
- data:/var/lib/snipeit
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:{{ container_port }}"
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:{{ container_port }}"
|
||||||
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
|
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
|
||||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||||
{% include 'roles/docker-container/templates/healthcheck/tcp.yml.j2' %}
|
{% include 'roles/docker-container/templates/healthcheck/tcp.yml.j2' %}
|
||||||
|
@@ -5,7 +5,7 @@
|
|||||||
context: {{ path_infinito_sphinx_output.stdout }}
|
context: {{ path_infinito_sphinx_output.stdout }}
|
||||||
dockerfile: {{ path_infinito_sphinx_output.stdout }}/Dockerfile
|
dockerfile: {{ path_infinito_sphinx_output.stdout }}/Dockerfile
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:{{ container_port }}"
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:{{ container_port }}"
|
||||||
{% include 'roles/docker-container/templates/healthcheck/curl.yml.j2' %}
|
{% include 'roles/docker-container/templates/healthcheck/curl.yml.j2' %}
|
||||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||||
|
@@ -112,7 +112,7 @@
|
|||||||
taiga-gateway:
|
taiga-gateway:
|
||||||
image: nginx:alpine
|
image: nginx:alpine
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:80"
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:80"
|
||||||
volumes:
|
volumes:
|
||||||
- {{docker_repository_path}}taiga-gateway/taiga.conf:/etc/nginx/conf.d/default.conf
|
- {{docker_repository_path}}taiga-gateway/taiga.conf:/etc/nginx/conf.d/default.conf
|
||||||
- static-data:/taiga/static
|
- static-data:/taiga/static
|
||||||
|
@@ -6,7 +6,7 @@
|
|||||||
build:
|
build:
|
||||||
context: .
|
context: .
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:80"
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:80"
|
||||||
volumes:
|
volumes:
|
||||||
- data:{{ wordpress_docker_html_path }}
|
- data:{{ wordpress_docker_html_path }}
|
||||||
|
|
||||||
|
@@ -7,7 +7,7 @@
|
|||||||
container_name: "{{ yourls_container }}"
|
container_name: "{{ yourls_container }}"
|
||||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:{{ports.localhost.http[application_id]}}:{{ container_port }}"
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:{{ container_port }}"
|
||||||
{% include 'roles/docker-container/templates/healthcheck/curl.yml.j2' %}
|
{% include 'roles/docker-container/templates/healthcheck/curl.yml.j2' %}
|
||||||
|
|
||||||
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
|
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
|
||||||
|
@@ -21,7 +21,7 @@ This Ansible role deploys Collabora Online (CODE) in Docker to enable real-time,
|
|||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
See the role’s `README.md`, task files, and Jinja2 templates in the `roles/web-app-collabora` directory for usage examples and variable definitions.
|
See the role’s `README.md`, task files, and Jinja2 templates in the `roles/web-svc-collabora` directory for usage examples and variable definitions.
|
||||||
|
|
||||||
## Further Resources
|
## Further Resources
|
||||||
|
|
@@ -8,5 +8,9 @@ docker:
|
|||||||
enabled: true
|
enabled: true
|
||||||
database:
|
database:
|
||||||
enabled: false # May this is wrong. Just set during refactoring
|
enabled: false # May this is wrong. Just set during refactoring
|
||||||
|
collabora:
|
||||||
|
image: collabora/code
|
||||||
|
version: latest
|
||||||
|
name: collabora
|
||||||
features:
|
features:
|
||||||
logout: false # I think collabora is more a service then a app. So no login neccessary Propably it makes sense to rename it ;)
|
logout: false # I think collabora is more a service then a app. So no login neccessary Propably it makes sense to rename it ;)
|
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
galaxy_info:
|
galaxy_info:
|
||||||
author: "Kevin Veen-Birkenbach"
|
author: "Kevin Veen-Birkenbach"
|
||||||
description: "Deploy Collabora Online CODE in Docker with automated proxy, networking, and environment configuration."
|
description: "Collabora Online CODE with automated proxy, networking, and environment configuration."
|
||||||
license: "Infinito.Nexus NonCommercial License"
|
license: "Infinito.Nexus NonCommercial License"
|
||||||
license_url: "https://s.infinito.nexus/license"
|
license_url: "https://s.infinito.nexus/license"
|
||||||
company: |
|
company: |
|
||||||
@@ -22,7 +22,6 @@ galaxy_info:
|
|||||||
- code
|
- code
|
||||||
repository: "https://s.infinito.nexus/code"
|
repository: "https://s.infinito.nexus/code"
|
||||||
issue_tracker_url: "https://s.infinito.nexus/issues"
|
issue_tracker_url: "https://s.infinito.nexus/issues"
|
||||||
documentation: "https://s.infinito.nexus/code/web-app-collabora"
|
documentation: "https://s.infinito.nexus/code/web-svc-collabora"
|
||||||
logo:
|
logo:
|
||||||
class: "fa-solid fa-file-code"
|
class: "fa-solid fa-file-code"
|
||||||
run_after: []
|
|
6
roles/web-svc-collabora/tasks/main.yml
Normal file
6
roles/web-svc-collabora/tasks/main.yml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
- block:
|
||||||
|
- name: "load docker, proxy for '{{ application_id }}'"
|
||||||
|
include_role:
|
||||||
|
name: cmp-docker-proxy
|
||||||
|
- include_tasks: utils/run_once.yml
|
||||||
|
when: run_once_web_svc_collabora is not defined
|
18
roles/web-svc-collabora/templates/docker-compose.yml.j2
Normal file
18
roles/web-svc-collabora/templates/docker-compose.yml.j2
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
{% include 'roles/docker-compose/templates/base.yml.j2' %}
|
||||||
|
|
||||||
|
collabora:
|
||||||
|
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||||
|
image: {{ COLLABORA_IMAGE }}
|
||||||
|
version: {{ COLLABORA_VERSION }}
|
||||||
|
container_name: {{ COLLABORA_CONTAINER }}
|
||||||
|
ports:
|
||||||
|
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:{{ container_port }}"
|
||||||
|
{% include 'roles/docker-container/templates/healthcheck/curl.yml.j2' %}
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://127.0.0.1:9980/hosting/discovery"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 5
|
||||||
|
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||||
|
|
||||||
|
{% include 'roles/docker-compose/templates/networks.yml.j2' %}
|
4
roles/web-svc-collabora/templates/env.j2
Normal file
4
roles/web-svc-collabora/templates/env.j2
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
domain={{ (domains | get_domain('web-app-nextcloud')) | regex_replace('\\.', '\\\\.') }}
|
||||||
|
{# username=admin #}
|
||||||
|
{# password={{ applications | get_app_conf('web-svc-collabora', 'credentials.admin_password', False, 'ChangeMe!') }}" #}
|
||||||
|
extra_params=--o:ssl.enable=false --o:ssl.termination=true
|
@@ -2,14 +2,21 @@ server {
|
|||||||
server_name {{ domain }};
|
server_name {{ domain }};
|
||||||
|
|
||||||
{% include 'roles/srv-web-7-7-letsencrypt/templates/ssl_header.j2' %}
|
{% include 'roles/srv-web-7-7-letsencrypt/templates/ssl_header.j2' %}
|
||||||
|
|
||||||
{% include 'roles/sys-srv-web-inj-compose/templates/server.conf.j2'%}
|
{% include 'roles/sys-srv-web-inj-compose/templates/server.conf.j2'%}
|
||||||
|
|
||||||
{% include 'roles/srv-proxy-7-4-core/templates/headers/content_security_policy.conf.j2' %}
|
{% include 'roles/srv-proxy-7-4-core/templates/headers/content_security_policy.conf.j2' %}
|
||||||
|
|
||||||
|
{# Normal HTTP routes (discovery, browser, assets) – no Lua injection #}
|
||||||
|
{% set proxy_lua_enabled = false %}
|
||||||
|
{% set location = "/" %}
|
||||||
{% include 'roles/srv-proxy-7-4-core/templates/location/html.conf.j2' %}
|
{% include 'roles/srv-proxy-7-4-core/templates/location/html.conf.j2' %}
|
||||||
|
|
||||||
{% set location = '^~ /cool/' %}
|
{# Optional explicit fast path for discovery #}
|
||||||
|
{% set location = "= /hosting/discovery" %}
|
||||||
{% include 'roles/srv-proxy-7-4-core/templates/location/html.conf.j2' %}
|
{% include 'roles/srv-proxy-7-4-core/templates/location/html.conf.j2' %}
|
||||||
|
|
||||||
|
{# WebSocket handling for Collabora #}
|
||||||
|
{% set location_ws = '^~ /cool/' %}
|
||||||
|
{% set ws_port = http_port %}
|
||||||
|
{% include 'roles/srv-proxy-7-4-core/templates/location/ws.conf.j2' %}
|
||||||
}
|
}
|
11
roles/web-svc-collabora/vars/main.yml
Normal file
11
roles/web-svc-collabora/vars/main.yml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
# General
|
||||||
|
application_id: web-svc-collabora
|
||||||
|
# Container
|
||||||
|
container_port: 9980
|
||||||
|
container_healthcheck: "/hosting/discovery"
|
||||||
|
|
||||||
|
# Collabora
|
||||||
|
COLLABORA_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.collabora.name') }}"
|
||||||
|
COLLABORA_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.collabora.image') }}"
|
||||||
|
COLLABORA_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.collabora.version') }}"
|
@@ -7,7 +7,7 @@
|
|||||||
image: logout
|
image: logout
|
||||||
container_name: logout
|
container_name: logout
|
||||||
ports:
|
ports:
|
||||||
- 127.0.0.1:{{ports.localhost.http[application_id]}}:{{ container_port }}
|
- 127.0.0.1:{{ ports.localhost.http[application_id] }}:{{ container_port }}
|
||||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||||
{% include 'roles/docker-container/templates/healthcheck/tcp.yml.j2' %}
|
{% include 'roles/docker-container/templates/healthcheck/tcp.yml.j2' %}
|
||||||
|
|
||||||
|
@@ -6,7 +6,7 @@
|
|||||||
image: simpleicons-server:latest
|
image: simpleicons-server:latest
|
||||||
container_name: simpleicons-server
|
container_name: simpleicons-server
|
||||||
ports:
|
ports:
|
||||||
- "{{ports.localhost.http[application_id]}}:{{ container_port }}"
|
- "{{ ports.localhost.http[application_id] }}:{{ container_port }}"
|
||||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||||
{% include 'roles/docker-container/templates/healthcheck/curl.yml.j2' %}
|
{% include 'roles/docker-container/templates/healthcheck/curl.yml.j2' %}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user