mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-09-08 11:17:17 +02:00
Compare commits
4 Commits
eb781dbf8b
...
b916173422
Author | SHA1 | Date | |
---|---|---|---|
b916173422 | |||
9756a0f75f | |||
e417bc19bd | |||
7ad14673e1 |
@@ -155,7 +155,7 @@ class FilterModule(object):
|
||||
if directive == 'frame-ancestors':
|
||||
# Enable loading via ancestors
|
||||
if self.is_feature_enabled(applications, 'desktop', application_id):
|
||||
domain = domains.get('web-app-port-ui')[0]
|
||||
domain = domains.get('web-app-desktop')[0]
|
||||
sld_tld = ".".join(domain.split(".")[-2:]) # yields "example.com"
|
||||
tokens.append(f"{sld_tld}") # yields "*.example.com"
|
||||
|
||||
|
@@ -50,7 +50,7 @@ ports:
|
||||
web-app-moodle: 8026
|
||||
web-app-taiga: 8027
|
||||
web-app-friendica: 8028
|
||||
web-app-port-ui: 8029
|
||||
web-app-desktop: 8029
|
||||
web-app-bluesky_api: 8030
|
||||
web-app-bluesky_web: 8031
|
||||
web-app-keycloak: 8032
|
||||
|
@@ -6,6 +6,7 @@
|
||||
- include_role:
|
||||
name: sys-service
|
||||
vars:
|
||||
system_service_on_calendar: "{{SYS_SCHEDULE_HEALTH_DOCKER_VOLUMES}}"
|
||||
system_service_timer_enabled: true
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_HEALTH_DOCKER_VOLUMES }}"
|
||||
system_service_timer_enabled: true
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }} {{ SYS_SERVICE_CLEANUP_ANONYMOUS_VOLUMES }}"
|
||||
system_service_tpl_exec_start: '{{ system_service_script_exec }} "{{ DOCKER_WHITELISTET_ANON_VOLUMES | join(" ") }}"'
|
||||
|
@@ -1,7 +0,0 @@
|
||||
[Unit]
|
||||
Description=Checking docker health
|
||||
OnFailure={{ SYS_SERVICE_ON_FAILURE_COMPOSE }}
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart={{ system_service_script_exec }} "{{ DOCKER_WHITELISTET_ANON_VOLUMES | join(' ') }}"
|
@@ -3,15 +3,41 @@ import sys
|
||||
import subprocess
|
||||
import argparse
|
||||
|
||||
|
||||
def detect_env_file(dir_path: str) -> str | None:
|
||||
"""
|
||||
Return the path to a Compose env file if present (.env preferred, fallback to env).
|
||||
"""
|
||||
candidates = [os.path.join(dir_path, ".env"), os.path.join(dir_path, ".env", "env")]
|
||||
for candidate in candidates:
|
||||
if os.path.isfile(candidate):
|
||||
return candidate
|
||||
return None
|
||||
|
||||
|
||||
def hard_restart_docker_services(dir_path):
|
||||
"""
|
||||
Perform a hard restart of docker-compose services in the given directory
|
||||
using docker-compose down and docker-compose up -d.
|
||||
using docker-compose down and docker-compose up -d, adding --env-file if present.
|
||||
"""
|
||||
try:
|
||||
print(f"Performing hard restart for docker-compose services in: {dir_path}")
|
||||
subprocess.run(["docker-compose", "down"], cwd=dir_path, check=True)
|
||||
subprocess.run(["docker-compose", "up", "-d"], cwd=dir_path, check=True)
|
||||
|
||||
env_file = detect_env_file(dir_path)
|
||||
base = ["docker-compose"]
|
||||
down_cmd = base.copy()
|
||||
up_cmd = base.copy()
|
||||
|
||||
if env_file:
|
||||
down_cmd += ["--env-file", env_file]
|
||||
up_cmd += ["--env-file", env_file]
|
||||
|
||||
down_cmd += ["down"]
|
||||
up_cmd += ["up", "-d"]
|
||||
|
||||
subprocess.run(down_cmd, cwd=dir_path, check=True)
|
||||
subprocess.run(up_cmd, cwd=dir_path, check=True)
|
||||
|
||||
print(f"Hard restart completed successfully in: {dir_path}")
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Error during hard restart in {dir_path}: {e}")
|
||||
|
@@ -6,6 +6,10 @@
|
||||
- include_role:
|
||||
name: sys-service
|
||||
vars:
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_REPAIR_DOCKER_HARD }}"
|
||||
system_service_timer_enabled: true
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_CLEANUP_ANONYMOUS_VOLUMES }}"
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_REPAIR_DOCKER_HARD }}"
|
||||
system_service_timer_enabled: true
|
||||
system_service_tpl_exec_start_pre: '/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(" ") }} --ignore {{ SYS_SERVICE_REPAIR_DOCKER_HARD }} --timeout "{{ SYS_TIMEOUT_RESTART_DOCKER }}"'
|
||||
system_service_tpl_exec_start: '{{ system_service_script_exec }} {{ PATH_DOCKER_COMPOSE_INSTANCES }}'
|
||||
system_service_tpl_exec_start_post: "/usr/bin/systemctl start {{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_CLEANUP_ANONYMOUS_VOLUMES }}"
|
||||
|
||||
|
@@ -1,8 +0,0 @@
|
||||
[Unit]
|
||||
Description=Restart Docker Instances
|
||||
OnFailure={{ SYS_SERVICE_ON_FAILURE_COMPOSE }}
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStartPre=/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(' ') }} --ignore {{ SYS_SERVICE_REPAIR_DOCKER_HARD }} --timeout "{{ SYS_TIMEOUT_RESTART_DOCKER }}"
|
||||
ExecStart={{ system_service_script_exec }} {{ PATH_DOCKER_COMPOSE_INSTANCES }}
|
196
roles/sys-ctl-rpr-docker-soft/files/script.py
Normal file
196
roles/sys-ctl-rpr-docker-soft/files/script.py
Normal file
@@ -0,0 +1,196 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Restart Docker-Compose configurations with exited or unhealthy containers.
|
||||
This version receives the *manipulation services* via argparse (no Jinja).
|
||||
"""
|
||||
import subprocess
|
||||
import time
|
||||
import os
|
||||
import argparse
|
||||
from typing import List
|
||||
|
||||
|
||||
def bash(command: str) -> List[str]:
|
||||
print(command)
|
||||
process = subprocess.Popen(
|
||||
[command], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
|
||||
)
|
||||
out, err = process.communicate()
|
||||
stdout = out.splitlines()
|
||||
stderr = err.decode("utf-8", errors="replace").strip()
|
||||
output = [line.decode("utf-8", errors="replace") for line in stdout]
|
||||
if process.returncode > 0:
|
||||
print(command, out, err)
|
||||
raise Exception(stderr or f"Command failed with code {process.returncode}")
|
||||
return output
|
||||
|
||||
|
||||
def list_to_string(lst: List[str]) -> str:
|
||||
return " ".join(lst)
|
||||
|
||||
|
||||
def print_bash(command: str) -> List[str]:
|
||||
output = bash(command)
|
||||
if output:
|
||||
print(list_to_string(output))
|
||||
return output
|
||||
|
||||
|
||||
def find_docker_compose_file(directory: str) -> str | None:
|
||||
for root, _, files in os.walk(directory):
|
||||
if "docker-compose.yml" in files:
|
||||
return os.path.join(root, "docker-compose.yml")
|
||||
return None
|
||||
|
||||
|
||||
def detect_env_file(project_path: str) -> str | None:
|
||||
"""
|
||||
Return the path to a Compose env file if present (.env preferred, fallback to env).
|
||||
"""
|
||||
candidates = [os.path.join(project_path, ".env"), os.path.join(project_path, ".env", "env")]
|
||||
for candidate in candidates:
|
||||
if os.path.isfile(candidate):
|
||||
return candidate
|
||||
return None
|
||||
|
||||
|
||||
def compose_cmd(subcmd: str, project_path: str, project_name: str | None = None) -> str:
|
||||
"""
|
||||
Build a docker-compose command string with optional -p and --env-file if present.
|
||||
Example: compose_cmd("restart", "/opt/docker/foo", "foo")
|
||||
"""
|
||||
parts: List[str] = [f'cd "{project_path}" && docker-compose']
|
||||
if project_name:
|
||||
parts += ['-p', f'"{project_name}"']
|
||||
env_file = detect_env_file(project_path)
|
||||
if env_file:
|
||||
parts += ['--env-file', f'"{env_file}"']
|
||||
parts += subcmd.split()
|
||||
return " ".join(parts)
|
||||
|
||||
|
||||
def normalize_services_arg(raw: List[str] | None, raw_str: str | None) -> List[str]:
|
||||
"""
|
||||
Accept either:
|
||||
- multiple --manipulation SERVICE flags (nargs='*')
|
||||
- a single --manipulation-string "svc1 svc2 ..." (space or comma separated)
|
||||
"""
|
||||
if raw:
|
||||
return [s for s in raw if s.strip()]
|
||||
if raw_str:
|
||||
# split on comma or whitespace
|
||||
parts = [p.strip() for chunk in raw_str.split(",") for p in chunk.split()]
|
||||
return [p for p in parts if p]
|
||||
return []
|
||||
|
||||
|
||||
def wait_while_manipulation_running(
|
||||
services: List[str],
|
||||
waiting_time: int = 600,
|
||||
timeout: int | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Wait until none of the given services are active anymore.
|
||||
Stops waiting if timeout (in seconds) is reached.
|
||||
"""
|
||||
if not services:
|
||||
print("No manipulation services provided. Continuing without wait.")
|
||||
return
|
||||
|
||||
start = time.time()
|
||||
while True:
|
||||
any_active = False
|
||||
for svc in services:
|
||||
res = subprocess.run(f"systemctl is-active --quiet {svc}", shell=True)
|
||||
if res.returncode == 0:
|
||||
any_active = True
|
||||
break
|
||||
|
||||
if any_active:
|
||||
# Check timeout
|
||||
elapsed = time.time() - start
|
||||
if timeout and elapsed >= timeout:
|
||||
print(f"Timeout ({timeout}s) reached while waiting for services. Continuing anyway.")
|
||||
break
|
||||
print(f"Manipulation service is running. Trying again in {waiting_time} seconds.")
|
||||
time.sleep(waiting_time)
|
||||
else:
|
||||
print("No blocking service is running.")
|
||||
break
|
||||
|
||||
|
||||
def main(base_directory: str, manipulation_services: List[str], timeout: int | None) -> int:
|
||||
errors = 0
|
||||
wait_while_manipulation_running(manipulation_services, waiting_time=600, timeout=timeout)
|
||||
|
||||
unhealthy_container_names = print_bash(
|
||||
"docker ps --filter health=unhealthy --format '{{{{.Names}}}}'"
|
||||
)
|
||||
exited_container_names = print_bash(
|
||||
"docker ps --filter status=exited --format '{{{{.Names}}}}'"
|
||||
)
|
||||
failed_containers = unhealthy_container_names + exited_container_names
|
||||
|
||||
unfiltered_failed_docker_compose_repositories = [
|
||||
container.split("-")[0] for container in failed_containers
|
||||
]
|
||||
filtered_failed_docker_compose_repositories = list(
|
||||
dict.fromkeys(unfiltered_failed_docker_compose_repositories)
|
||||
)
|
||||
|
||||
for repo in filtered_failed_docker_compose_repositories:
|
||||
compose_file_path = find_docker_compose_file(os.path.join(base_directory, repo))
|
||||
|
||||
if compose_file_path:
|
||||
print("Restarting unhealthy container in:", compose_file_path)
|
||||
project_path = os.path.dirname(compose_file_path)
|
||||
try:
|
||||
# restart with optional --env-file and -p
|
||||
print_bash(compose_cmd("restart", project_path, repo))
|
||||
except Exception as e:
|
||||
if "port is already allocated" in str(e):
|
||||
print("Detected port allocation problem. Executing recovery steps...")
|
||||
# down (no -p needed), then engine restart, then up -d with -p
|
||||
print_bash(compose_cmd("down", project_path))
|
||||
print_bash("systemctl restart docker")
|
||||
print_bash(compose_cmd("up -d", project_path, repo))
|
||||
else:
|
||||
print("Unhandled exception during restart:", e)
|
||||
errors += 1
|
||||
else:
|
||||
print("Error: Docker Compose file not found for:", repo)
|
||||
errors += 1
|
||||
|
||||
print("Finished restart procedure.")
|
||||
return errors
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Restart Docker-Compose configurations with exited or unhealthy containers."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--manipulation",
|
||||
metavar="SERVICE",
|
||||
nargs="*",
|
||||
help="Blocking systemd services to wait for (can be specified multiple times).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--manipulation-string",
|
||||
type=str,
|
||||
help='Blocking services as a single string (space- or comma-separated), e.g. "svc1 svc2" or "svc1,svc2".',
|
||||
)
|
||||
parser.add_argument(
|
||||
"--timeout",
|
||||
type=int,
|
||||
default=60,
|
||||
help="Maximum time in seconds to wait for manipulation services before continuing.(Default 1min)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"base_directory",
|
||||
type=str,
|
||||
help="Base directory where Docker Compose configurations are located.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
services = normalize_services_arg(args.manipulation, args.manipulation_string)
|
||||
exit(main(args.base_directory, services, args.timeout))
|
@@ -6,6 +6,10 @@
|
||||
- include_role:
|
||||
name: sys-service
|
||||
vars:
|
||||
system_service_on_calendar: "{{SYS_SCHEDULE_REPAIR_DOCKER_SOFT}}"
|
||||
system_service_timer_enabled: true
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_on_calendar: "{{ SYS_SCHEDULE_REPAIR_DOCKER_SOFT }}"
|
||||
system_service_timer_enabled: true
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_tpl_exec_start_pre: "/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(' ') }} --ignore {{ SYS_SERVICE_GROUP_CLEANUP| join(' ') }} {{ SYS_SERVICE_REPAIR_DOCKER_SOFT }} --timeout '{{ SYS_TIMEOUT_HEAL_DOCKER }}'"
|
||||
system_service_tpl_exec_start: >
|
||||
/bin/sh -c '{{ system_service_script_exec }} --manipulation-string "{{ SYS_SERVICE_GROUP_MANIPULATION | join(" ") }}" {{ PATH_DOCKER_COMPOSE_INSTANCES }}'
|
||||
|
||||
|
@@ -1,90 +0,0 @@
|
||||
#!/bin/python
|
||||
#
|
||||
# Restart Docker-Compose configurations with exited or unhealthy containers
|
||||
#
|
||||
import subprocess
|
||||
import time
|
||||
import os
|
||||
import argparse
|
||||
|
||||
def bash(command):
|
||||
print(command)
|
||||
process = subprocess.Popen([command], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
|
||||
out, err = process.communicate()
|
||||
stdout = out.splitlines()
|
||||
stderr = err.decode("utf-8").strip() # decode stderr
|
||||
output = [line.decode("utf-8") for line in stdout]
|
||||
if process.returncode > 0:
|
||||
print(command, out, err)
|
||||
raise Exception(stderr) # pass the actual error text
|
||||
return output
|
||||
|
||||
def list_to_string(lst):
|
||||
return ' '.join(lst)
|
||||
|
||||
def print_bash(command):
|
||||
output = bash(command)
|
||||
print(list_to_string(output))
|
||||
return output
|
||||
|
||||
def find_docker_compose_file(directory):
|
||||
for root, _, files in os.walk(directory):
|
||||
if 'docker-compose.yml' in files:
|
||||
return os.path.join(root, 'docker-compose.yml')
|
||||
return None
|
||||
|
||||
def main(base_directory):
|
||||
errors = 0
|
||||
waiting_time = 600
|
||||
blocker_running = True
|
||||
|
||||
while blocker_running:
|
||||
try:
|
||||
{% for manipulation_service in SYS_SERVICE_GROUP_MANIPULATION %}
|
||||
bash("systemctl is-active --quiet {{ manipulation_service }}")
|
||||
{% endfor %}
|
||||
print("Manipulation service is running.")
|
||||
print(f"Trying again in {waiting_time} seconds.")
|
||||
time.sleep(waiting_time)
|
||||
except:
|
||||
blocker_running = False
|
||||
print("No blocking service is running.")
|
||||
|
||||
unhealthy_container_names = print_bash("docker ps --filter health=unhealthy --format '{% raw %}{{.Names}}{% endraw %}'")
|
||||
exited_container_names = print_bash("docker ps --filter status=exited --format '{% raw %}{{.Names}}{% endraw %}'")
|
||||
failed_containers = unhealthy_container_names + exited_container_names
|
||||
|
||||
unfiltered_failed_docker_compose_repositories = [container.split('-')[0] for container in failed_containers]
|
||||
filtered_failed_docker_compose_repositories = list(dict.fromkeys(unfiltered_failed_docker_compose_repositories))
|
||||
|
||||
for repo in filtered_failed_docker_compose_repositories:
|
||||
compose_file_path = find_docker_compose_file(os.path.join(base_directory, repo))
|
||||
|
||||
if compose_file_path:
|
||||
print("Restarting unhealthy container in:", compose_file_path)
|
||||
project_path = os.path.dirname(compose_file_path)
|
||||
try:
|
||||
print_bash(f'cd {project_path} && docker-compose -p "{repo}" restart')
|
||||
except Exception as e:
|
||||
if "port is already allocated" in str(e):
|
||||
print("Detected port allocation problem. Executing recovery steps...")
|
||||
print_bash(f'cd {project_path} && docker-compose down')
|
||||
print_bash('systemctl restart docker')
|
||||
print_bash(f'cd {project_path} && docker-compose -p "{repo}" up -d')
|
||||
else:
|
||||
print("Unhandled exception during restart:", e)
|
||||
errors += 1
|
||||
else:
|
||||
print("Error: Docker Compose file not found for:", repo)
|
||||
errors += 1
|
||||
|
||||
|
||||
print("Finished restart procedure.")
|
||||
exit(errors)
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Restart Docker-Compose configurations with exited or unhealthy containers.")
|
||||
parser.add_argument("base_directory", type=str, help="Base directory where Docker Compose configurations are located.")
|
||||
args = parser.parse_args()
|
||||
|
||||
main(args.base_directory)
|
@@ -1,8 +0,0 @@
|
||||
[Unit]
|
||||
Description=restart unhealthy docker containers
|
||||
OnFailure={{ SYS_SERVICE_ON_FAILURE_COMPOSE }}
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStartPre=/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(' ') }} --ignore {{ SYS_SERVICE_GROUP_CLEANUP| join(' ') }} {{ SYS_SERVICE_REPAIR_DOCKER_SOFT }} --timeout "{{ SYS_TIMEOUT_HEAL_DOCKER }}"
|
||||
ExecStart=/bin/sh -c '{{ system_service_script_exec }} {{ PATH_DOCKER_COMPOSE_INSTANCES }}'
|
@@ -11,7 +11,8 @@ Type={{ system_service_tpl_type }}
|
||||
('TimeoutStartSec', system_service_tpl_timeout_start_sec),
|
||||
('ExecStartPre', system_service_tpl_exec_start_pre),
|
||||
('ExecStart', system_service_tpl_exec_start),
|
||||
('RuntimeMaxSec', system_service_tpl_runtime)
|
||||
('ExecStartPost', system_service_tpl_exec_start_post),
|
||||
('RuntimeMaxSec', system_service_tpl_runtime),
|
||||
] %}
|
||||
{{ val | systemd_directive(key) }}
|
||||
{% endfor %}
|
@@ -20,7 +20,8 @@ system_service_script_exec: "{{ system_service_script_inter }} {{ system_servi
|
||||
# Service template
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_tpl_type: "oneshot"
|
||||
system_service_tpl_exec_start: "{{ system_service_script_exec }}"
|
||||
system_service_tpl_runtime: "{{ '' if system_service_tpl_type == 'oneshot' else SYS_SERVICE_DEFAULT_RUNTIME }}"
|
||||
system_service_tpl_exec_start_pre: ""
|
||||
system_service_tpl_exec_start: "{{ system_service_script_exec }}"
|
||||
system_service_tpl_exec_start_post: ""
|
||||
system_service_tpl_timeout_start_sec: "60s"
|
@@ -2,7 +2,7 @@ document.addEventListener('DOMContentLoaded', function () {
|
||||
initIframeHandler(
|
||||
'{{ PRIMARY_DOMAIN }}',
|
||||
'{{ domain }}',
|
||||
'{{ domains | get_url("web-app-port-ui", WEB_PROTOCOL) }}'
|
||||
'{{ domains | get_url("web-app-desktop", WEB_PROTOCOL) }}'
|
||||
);
|
||||
});
|
||||
{% if MODE_DEBUG | bool %}
|
||||
|
@@ -26,4 +26,4 @@ galaxy_info:
|
||||
dependencies:
|
||||
- web-svc-legal
|
||||
- web-svc-asset
|
||||
- web-app-port-ui
|
||||
- web-app-desktop
|
@@ -1,4 +1,4 @@
|
||||
# PortUI
|
||||
# Desktop
|
||||
|
||||
## Description
|
||||
|
@@ -5,11 +5,11 @@
|
||||
(
|
||||
not (item.value.features['desktop'] | default(false) | bool)
|
||||
)
|
||||
or (domains | get_domain(item.key)).endswith(domains | get_domain('web-app-port-ui'))
|
||||
or (domains | get_domain(item.key)).endswith(domains | get_domain('web-app-desktop'))
|
||||
fail_msg: >
|
||||
Application {{ item.key }}
|
||||
has domain {{ domains | get_domain(item.key) }}
|
||||
but it does not end with {{ domains | get_domain('web-app-port-ui') }}!
|
||||
but it does not end with {{ domains | get_domain('web-app-desktop') }}!
|
||||
loop: "{{ applications
|
||||
| dict2items
|
||||
| selectattr('key', 'match', '^(web-app-|web-svc-)')
|
@@ -2,4 +2,4 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_web_app_port_ui is not defined
|
||||
when: run_once_web_app_desktop is not defined
|
@@ -17,4 +17,4 @@
|
||||
description: Reload the application
|
||||
icon:
|
||||
class: fa-solid fa-rotate-right
|
||||
url: "{{ WEB_PROTOCOL }}://{{ domains | get_domain('web-app-port-ui') }}"
|
||||
url: "{{ WEB_PROTOCOL }}://{{ domains | get_domain('web-app-desktop') }}"
|
@@ -1,4 +1,4 @@
|
||||
application_id: "web-app-port-ui"
|
||||
application_id: "web-app-desktop"
|
||||
docker_repository_address: "https://github.com/kevinveenbirkenbach/port-ui"
|
||||
config_inventory_path: "{{ inventory_dir }}/files/{{ inventory_hostname }}/docker/web-app-port-ui/config.yaml.j2"
|
||||
config_inventory_path: "{{ inventory_dir }}/files/{{ inventory_hostname }}/docker/web-app-desktop/config.yaml.j2"
|
||||
docker_pull_git_repository: true
|
@@ -7,7 +7,7 @@ docker:
|
||||
features:
|
||||
matomo: true # activate tracking
|
||||
css: true # use custom infinito stile
|
||||
desktop: true # Enable in port-ui
|
||||
desktop: true # Enable in desktop
|
||||
logout: false
|
||||
server:
|
||||
csp:
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# Unit Tests
|
||||
|
||||
This directory contains unit tests for various custom components in the project, such as the custom lookup plugin `docker_cards` used in the `web-app-port-ui` role.
|
||||
This directory contains unit tests for various custom components in the project, such as the custom lookup plugin `docker_cards` used in the `web-app-desktop` role.
|
||||
|
||||
## Overview
|
||||
|
||||
@@ -31,7 +31,7 @@ You can run the tests using one of the following methods:
|
||||
## How It Works
|
||||
|
||||
- **Setup:**
|
||||
The test script creates a temporary directory to simulate your roles folder. It then creates a sample role (`web-app-port-ui`) with a `README.md` file (containing a header for the title) and a `meta/main.yml` file (with the required metadata).
|
||||
The test script creates a temporary directory to simulate your roles folder. It then creates a sample role (`web-app-desktop`) with a `README.md` file (containing a header for the title) and a `meta/main.yml` file (with the required metadata).
|
||||
|
||||
- **Execution:**
|
||||
Dummy variable values for `domains` and `applications` are provided (these are the variables the lookup plugin expects). The lookup plugin is then run, which processes the sample role and returns the card information.
|
||||
|
@@ -22,7 +22,7 @@ class TestApplicationsIfGroupAndDeps(unittest.TestCase):
|
||||
'web-svc-legal': {},
|
||||
'web-svc-file': {},
|
||||
'web-svc-asset': {},
|
||||
'web-app-port-ui': {},
|
||||
'web-app-desktop': {},
|
||||
'util-srv-corporate-identity': {},
|
||||
}
|
||||
|
||||
|
@@ -182,7 +182,7 @@ class TestCspFilters(unittest.TestCase):
|
||||
# Ensure feature enabled and domain set
|
||||
self.apps['app1']['features']['desktop'] = True
|
||||
# simulate a subdomain for the application
|
||||
self.domains['web-app-port-ui'] = ['domain-example.com']
|
||||
self.domains['web-app-desktop'] = ['domain-example.com']
|
||||
|
||||
header = self.filter.build_csp_header(self.apps, 'app1', self.domains, web_protocol='https')
|
||||
# Expect '*.domain-example.com' in the frame-ancestors directive
|
||||
|
@@ -22,27 +22,27 @@ class TestDomainMappings(unittest.TestCase):
|
||||
self.assertEqual(result, [])
|
||||
|
||||
def test_app_without_domains(self):
|
||||
apps = {'web-app-port-ui': {}}
|
||||
apps = {'web-app-desktop': {}}
|
||||
# no domains key → no mappings
|
||||
result = self.filter.domain_mappings(apps, self.primary)
|
||||
self.assertEqual(result, [])
|
||||
|
||||
def test_empty_domains_cfg(self):
|
||||
apps = {'web-app-port-ui': {'domains': {}}}
|
||||
default = 'port-ui.example.com'
|
||||
apps = {'web-app-desktop': {'domains': {}}}
|
||||
default = 'desktop.example.com'
|
||||
expected = []
|
||||
result = self.filter.domain_mappings(apps, self.primary)
|
||||
self.assertEqual(result, expected)
|
||||
|
||||
def test_explicit_aliases(self):
|
||||
apps = {
|
||||
'web-app-port-ui': {
|
||||
'web-app-desktop': {
|
||||
'server':{
|
||||
'domains': {'aliases': ['alias.com']}
|
||||
}
|
||||
}
|
||||
}
|
||||
default = 'port-ui.example.com'
|
||||
default = 'desktop.example.com'
|
||||
expected = [
|
||||
{'source': 'alias.com', 'target': default},
|
||||
]
|
||||
@@ -52,21 +52,21 @@ class TestDomainMappings(unittest.TestCase):
|
||||
|
||||
def test_canonical_not_default(self):
|
||||
apps = {
|
||||
'web-app-port-ui': {
|
||||
'web-app-desktop': {
|
||||
'server':{
|
||||
'domains': {'canonical': ['foo.com']}
|
||||
}
|
||||
}
|
||||
}
|
||||
expected = [
|
||||
{'source': 'port-ui.example.com', 'target': 'foo.com'}
|
||||
{'source': 'desktop.example.com', 'target': 'foo.com'}
|
||||
]
|
||||
result = self.filter.domain_mappings(apps, self.primary)
|
||||
self.assertEqual(result, expected)
|
||||
|
||||
def test_canonical_dict(self):
|
||||
apps = {
|
||||
'web-app-port-ui': {
|
||||
'web-app-desktop': {
|
||||
'server':{
|
||||
'domains': {
|
||||
'canonical': {'one': 'one.com', 'two': 'two.com'}
|
||||
@@ -76,14 +76,14 @@ class TestDomainMappings(unittest.TestCase):
|
||||
}
|
||||
# first canonical key 'one' → one.com
|
||||
expected = [
|
||||
{'source': 'port-ui.example.com', 'target': 'one.com'}
|
||||
{'source': 'desktop.example.com', 'target': 'one.com'}
|
||||
]
|
||||
result = self.filter.domain_mappings(apps, self.primary)
|
||||
self.assertEqual(result, expected)
|
||||
|
||||
def test_multiple_apps(self):
|
||||
apps = {
|
||||
'web-app-port-ui': {
|
||||
'web-app-desktop': {
|
||||
'server':{'domains': {'aliases': ['a1.com']}}
|
||||
},
|
||||
'web-app-mastodon': {
|
||||
@@ -91,7 +91,7 @@ class TestDomainMappings(unittest.TestCase):
|
||||
},
|
||||
}
|
||||
expected = [
|
||||
{'source': 'a1.com', 'target': 'port-ui.example.com'},
|
||||
{'source': 'a1.com', 'target': 'desktop.example.com'},
|
||||
{'source': 'mastodon.example.com', 'target': 'c2.com'},
|
||||
]
|
||||
result = self.filter.domain_mappings(apps, self.primary)
|
||||
@@ -99,21 +99,21 @@ class TestDomainMappings(unittest.TestCase):
|
||||
|
||||
def test_multiple_aliases(self):
|
||||
apps = {
|
||||
'web-app-port-ui': {
|
||||
'web-app-desktop': {
|
||||
'server':{'domains': {'aliases': ['a1.com','a2.com']}
|
||||
}
|
||||
}
|
||||
}
|
||||
expected = [
|
||||
{'source': 'a1.com', 'target': 'port-ui.example.com'},
|
||||
{'source': 'a2.com', 'target': 'port-ui.example.com'}
|
||||
{'source': 'a1.com', 'target': 'desktop.example.com'},
|
||||
{'source': 'a2.com', 'target': 'desktop.example.com'}
|
||||
]
|
||||
result = self.filter.domain_mappings(apps, self.primary)
|
||||
self.assertCountEqual(result, expected)
|
||||
|
||||
def test_invalid_aliases_type(self):
|
||||
apps = {
|
||||
'web-app-port-ui': {'server':{'domains': {'aliases': 123}}}
|
||||
'web-app-desktop': {'server':{'domains': {'aliases': 123}}}
|
||||
}
|
||||
with self.assertRaises(AnsibleFilterError):
|
||||
self.filter.domain_mappings(apps, self.primary)
|
||||
|
@@ -6,8 +6,8 @@ import tempfile
|
||||
import shutil
|
||||
import unittest
|
||||
|
||||
# Adjust the PYTHONPATH to include the lookup_plugins folder from the web-app-port-ui role.
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../../../roles/web-app-port-ui/lookup_plugins'))
|
||||
# Adjust the PYTHONPATH to include the lookup_plugins folder from the web-app-desktop role.
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../../../roles/web-app-desktop/lookup_plugins'))
|
||||
|
||||
from docker_cards import LookupModule
|
||||
|
||||
@@ -17,8 +17,8 @@ class TestDockerCardsLookup(unittest.TestCase):
|
||||
# Create a temporary directory to simulate the roles directory.
|
||||
self.test_roles_dir = tempfile.mkdtemp(prefix="test_roles_")
|
||||
|
||||
# Create a sample role "web-app-port-ui" under that directory.
|
||||
self.role_name = "web-app-port-ui"
|
||||
# Create a sample role "web-app-desktop" under that directory.
|
||||
self.role_name = "web-app-desktop"
|
||||
self.role_dir = os.path.join(self.test_roles_dir, self.role_name)
|
||||
os.makedirs(os.path.join(self.role_dir, "meta"))
|
||||
os.makedirs(os.path.join(self.role_dir, "vars"))
|
||||
|
140
tests/unit/roles/sys-ctl-rpr-docker-hard/files/test_script.py
Normal file
140
tests/unit/roles/sys-ctl-rpr-docker-hard/files/test_script.py
Normal file
@@ -0,0 +1,140 @@
|
||||
import unittest
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from importlib.util import spec_from_file_location, module_from_spec
|
||||
|
||||
|
||||
def load_script_module():
|
||||
"""
|
||||
Import the script under test from roles/sys-ctl-rpr-docker-hard/files/script.py
|
||||
"""
|
||||
test_file = Path(__file__).resolve()
|
||||
repo_root = test_file.parents[5] # .../tests/unit/roles/sys-ctl-rpr-docker-hard/files -> repo root
|
||||
script_path = repo_root / "roles" / "sys-ctl-rpr-docker-hard" / "files" / "script.py"
|
||||
if not script_path.exists():
|
||||
raise FileNotFoundError(f"script.py not found at {script_path}")
|
||||
spec = spec_from_file_location("rpr_hard_script", str(script_path))
|
||||
mod = module_from_spec(spec)
|
||||
assert spec.loader is not None
|
||||
spec.loader.exec_module(mod) # type: ignore[attr-defined]
|
||||
return mod
|
||||
|
||||
|
||||
class TestRepairDockerHard(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.script = load_script_module()
|
||||
|
||||
def test_detect_env_file_priority(self):
|
||||
s = self.script
|
||||
base = "/proj"
|
||||
old_isfile = s.os.path.isfile
|
||||
try:
|
||||
# only .env
|
||||
s.os.path.isfile = lambda p: p == f"{base}/.env"
|
||||
self.assertEqual(s.detect_env_file(base), f"{base}/.env")
|
||||
|
||||
# only .env/env
|
||||
s.os.path.isfile = lambda p: p == f"{base}/.env/env"
|
||||
self.assertEqual(s.detect_env_file(base), f"{base}/.env/env")
|
||||
|
||||
# both -> prefer .env
|
||||
s.os.path.isfile = lambda p: p in (f"{base}/.env", f"{base}/.env/env")
|
||||
self.assertEqual(s.detect_env_file(base), f"{base}/.env")
|
||||
|
||||
# none
|
||||
s.os.path.isfile = lambda p: False
|
||||
self.assertIsNone(s.detect_env_file(base))
|
||||
finally:
|
||||
s.os.path.isfile = old_isfile
|
||||
|
||||
def test_hard_restart_uses_envfile_and_cwd(self):
|
||||
s = self.script
|
||||
calls = []
|
||||
|
||||
def fake_run(cmd, cwd=None, check=None):
|
||||
calls.append({"cmd": cmd, "cwd": cwd, "check": check})
|
||||
class R: pass
|
||||
return R()
|
||||
|
||||
old_run = s.subprocess.run
|
||||
old_detect = s.detect_env_file
|
||||
try:
|
||||
s.subprocess.run = fake_run
|
||||
s.detect_env_file = lambda d: f"{d}/.env/env" # erzwinge .env/env
|
||||
|
||||
s.hard_restart_docker_services("/X/APP")
|
||||
|
||||
# Wir erwarten zwei Aufrufe: docker-compose --env-file ... down / up -d
|
||||
self.assertEqual(len(calls), 2)
|
||||
self.assertEqual(calls[0]["cwd"], "/X/APP")
|
||||
self.assertEqual(calls[1]["cwd"], "/X/APP")
|
||||
# down
|
||||
self.assertIn("docker-compose", calls[0]["cmd"])
|
||||
self.assertIn("--env-file", calls[0]["cmd"])
|
||||
self.assertIn("/X/APP/.env/env", calls[0]["cmd"])
|
||||
self.assertIn("down", calls[0]["cmd"])
|
||||
# up -d
|
||||
self.assertIn("docker-compose", calls[1]["cmd"])
|
||||
self.assertIn("--env-file", calls[1]["cmd"])
|
||||
self.assertIn("/X/APP/.env/env", calls[1]["cmd"])
|
||||
self.assertIn("up", calls[1]["cmd"])
|
||||
self.assertIn("-d", calls[1]["cmd"])
|
||||
finally:
|
||||
s.subprocess.run = old_run
|
||||
s.detect_env_file = old_detect
|
||||
|
||||
def test_main_scans_parent_and_filters_only(self):
|
||||
s = self.script
|
||||
seen = {"scandir": [], "called": []}
|
||||
|
||||
class FakeDirEntry:
|
||||
def __init__(self, path, is_dir=True):
|
||||
self.path = path
|
||||
self._is_dir = is_dir
|
||||
def is_dir(self):
|
||||
return self._is_dir
|
||||
|
||||
def fake_scandir(parent):
|
||||
seen["scandir"].append(parent)
|
||||
return [
|
||||
FakeDirEntry(f"{parent}/app1"),
|
||||
FakeDirEntry(f"{parent}/app2"),
|
||||
FakeDirEntry(f"{parent}/notdir", is_dir=False),
|
||||
]
|
||||
|
||||
def fake_isdir(p):
|
||||
return p == "/PARENT"
|
||||
|
||||
def fake_isfile(p):
|
||||
# Nur app2 hat docker-compose.yml
|
||||
return p in ("/PARENT/app2/docker-compose.yml",)
|
||||
|
||||
def fake_hard_restart(dir_path):
|
||||
seen["called"].append(dir_path)
|
||||
|
||||
old_scandir = s.os.scandir
|
||||
old_isdir = s.os.path.isdir
|
||||
old_isfile = s.os.path.isfile
|
||||
old_restart = s.hard_restart_docker_services
|
||||
try:
|
||||
s.os.scandir = fake_scandir
|
||||
s.os.path.isdir = fake_isdir
|
||||
s.os.path.isfile = fake_isfile
|
||||
s.hard_restart_docker_services = fake_hard_restart
|
||||
|
||||
# Mit --only app2 -> nur app2 wird aufgerufen
|
||||
sys_argv = sys.argv
|
||||
sys.argv = ["x", "/PARENT", "--only", "app2"]
|
||||
s.main()
|
||||
self.assertEqual(seen["called"], ["/PARENT/app2"])
|
||||
finally:
|
||||
s.os.scandir = old_scandir
|
||||
s.os.path.isdir = old_isdir
|
||||
s.os.path.isfile = old_isfile
|
||||
s.hard_restart_docker_services = old_restart
|
||||
sys.argv = sys_argv
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
178
tests/unit/roles/sys-ctl-rpr-docker-soft/files/test_script.py
Normal file
178
tests/unit/roles/sys-ctl-rpr-docker-soft/files/test_script.py
Normal file
@@ -0,0 +1,178 @@
|
||||
import unittest
|
||||
import types
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from importlib.util import spec_from_file_location, module_from_spec
|
||||
|
||||
|
||||
def load_script_module():
|
||||
"""
|
||||
Import the script under test from roles/sys-ctl-rpr-docker-soft/files/script.py
|
||||
"""
|
||||
test_file = Path(__file__).resolve()
|
||||
repo_root = test_file.parents[5] # .../tests/unit/roles/sys-ctl-rpr-docker-soft/files -> repo root
|
||||
script_path = repo_root / "roles" / "sys-ctl-rpr-docker-soft" / "files" / "script.py"
|
||||
if not script_path.exists():
|
||||
raise FileNotFoundError(f"script.py not found at {script_path}")
|
||||
spec = spec_from_file_location("rpr_soft_script", str(script_path))
|
||||
mod = module_from_spec(spec)
|
||||
assert spec.loader is not None
|
||||
spec.loader.exec_module(mod) # type: ignore[attr-defined]
|
||||
return mod
|
||||
|
||||
|
||||
class TestRepairDockerSoft(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.script = load_script_module()
|
||||
|
||||
def test_normalize_services_arg(self):
|
||||
s = self.script
|
||||
self.assertEqual(
|
||||
s.normalize_services_arg(["svc-a.service", " ", "svc-b.service"], None),
|
||||
["svc-a.service", "svc-b.service"],
|
||||
)
|
||||
self.assertEqual(
|
||||
s.normalize_services_arg(None, "svc-a.service svc-b.service"),
|
||||
["svc-a.service", "svc-b.service"],
|
||||
)
|
||||
self.assertEqual(
|
||||
s.normalize_services_arg(None, "svc-a.service, svc-b.service, svc-c.service"),
|
||||
["svc-a.service", "svc-b.service", "svc-c.service"],
|
||||
)
|
||||
self.assertEqual(s.normalize_services_arg([], ""), [])
|
||||
|
||||
def test_detect_env_file_priority(self):
|
||||
s = self.script
|
||||
base = "/proj"
|
||||
# Monkeypatch os.path.isfile
|
||||
old_isfile = s.os.path.isfile
|
||||
try:
|
||||
def fake_isfile(path):
|
||||
# Only .env exists
|
||||
return path == f"{base}/.env"
|
||||
s.os.path.isfile = fake_isfile
|
||||
self.assertEqual(s.detect_env_file(base), f"{base}/.env")
|
||||
|
||||
# Only .env/env exists
|
||||
def fake_isfile2(path):
|
||||
return path == f"{base}/.env/env"
|
||||
s.os.path.isfile = fake_isfile2
|
||||
self.assertEqual(s.detect_env_file(base), f"{base}/.env/env")
|
||||
|
||||
# Both exist -> prefer .env
|
||||
def fake_isfile3(path):
|
||||
return path in (f"{base}/.env", f"{base}/.env/env")
|
||||
s.os.path.isfile = fake_isfile3
|
||||
self.assertEqual(s.detect_env_file(base), f"{base}/.env")
|
||||
|
||||
# Neither exists
|
||||
def fake_isfile4(path):
|
||||
return False
|
||||
s.os.path.isfile = fake_isfile4
|
||||
self.assertIsNone(s.detect_env_file(base))
|
||||
finally:
|
||||
s.os.path.isfile = old_isfile
|
||||
|
||||
def test_wait_while_manipulation_running_respects_timeout(self):
|
||||
s = self.script
|
||||
calls = {"checks": 0, "sleeps": 0}
|
||||
t = {"now": 0}
|
||||
|
||||
def fake_run(cmd, shell):
|
||||
self.assertIn("systemctl is-active --quiet", cmd)
|
||||
calls["checks"] += 1
|
||||
return types.SimpleNamespace(returncode=0)
|
||||
|
||||
def fake_sleep(_secs):
|
||||
calls["sleeps"] += 1
|
||||
|
||||
def fake_time():
|
||||
# each call advances time by 610s
|
||||
t["now"] += 610
|
||||
return t["now"]
|
||||
|
||||
old_run = s.subprocess.run
|
||||
old_sleep = s.time.sleep
|
||||
old_time = s.time.time
|
||||
try:
|
||||
s.subprocess.run = fake_run
|
||||
s.time.sleep = fake_sleep
|
||||
s.time.time = fake_time
|
||||
|
||||
s.wait_while_manipulation_running(["svc-a", "svc-b"], waiting_time=600, timeout=1200)
|
||||
|
||||
self.assertGreaterEqual(calls["sleeps"], 1)
|
||||
self.assertGreaterEqual(calls["checks"], 1)
|
||||
finally:
|
||||
s.subprocess.run = old_run
|
||||
s.time.sleep = old_sleep
|
||||
s.time.time = old_time
|
||||
|
||||
def test_main_restarts_and_counts_errors_and_envfile_usage(self):
|
||||
s = self.script
|
||||
cmd_log = []
|
||||
|
||||
def fake_print_bash(cmd):
|
||||
cmd_log.append(cmd)
|
||||
if cmd.startswith("docker ps --filter health=unhealthy"):
|
||||
return ["app1-web-1", "db-1"]
|
||||
if cmd.startswith("docker ps --filter status=exited"):
|
||||
return ["app1-worker-1", "other-2"]
|
||||
if "docker-compose" in cmd:
|
||||
return []
|
||||
return []
|
||||
|
||||
def fake_find_docker_compose(path):
|
||||
# Compose-Projekte: app1, db -> vorhanden; "other" -> nicht vorhanden
|
||||
if path.endswith("/app1") or path.endswith("/db"):
|
||||
return str(Path(path) / "docker-compose.yml")
|
||||
return None
|
||||
|
||||
# Steuere die detect_env_file-Antwort:
|
||||
# - Für app1 existiert nur .env/env
|
||||
# - Für db existiert .env
|
||||
def fake_detect_env_file(project_path: str):
|
||||
if project_path.endswith("/app1"):
|
||||
return f"{project_path}/.env/env"
|
||||
if project_path.endswith("/db"):
|
||||
return f"{project_path}/.env"
|
||||
return None
|
||||
|
||||
old_print_bash = s.print_bash
|
||||
old_find = s.find_docker_compose_file
|
||||
old_detect = s.detect_env_file
|
||||
try:
|
||||
s.print_bash = fake_print_bash
|
||||
s.find_docker_compose_file = fake_find_docker_compose
|
||||
s.detect_env_file = fake_detect_env_file
|
||||
|
||||
errors = s.main("/BASE", manipulation_services=[], timeout=None)
|
||||
# one error expected for "other" (no compose file)
|
||||
self.assertEqual(errors, 1)
|
||||
|
||||
restart_cmds = [c for c in cmd_log if ' docker-compose' in c and " restart" in c]
|
||||
# app1: --env-file "/BASE/app1/.env/env" + -p "app1"
|
||||
self.assertTrue(any(
|
||||
'cd "/BASE/app1"' in c and
|
||||
'--env-file "/BASE/app1/.env/env"' in c and
|
||||
'-p "app1"' in c and
|
||||
' restart' in c
|
||||
for c in restart_cmds
|
||||
))
|
||||
# db: --env-file "/BASE/db/.env" + -p "db"
|
||||
self.assertTrue(any(
|
||||
'cd "/BASE/db"' in c and
|
||||
'--env-file "/BASE/db/.env"' in c and
|
||||
'-p "db"' in c and
|
||||
' restart' in c
|
||||
for c in restart_cmds
|
||||
))
|
||||
finally:
|
||||
s.print_bash = old_print_bash
|
||||
s.find_docker_compose_file = old_find
|
||||
s.detect_env_file = old_detect
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
Reference in New Issue
Block a user