10 Commits

Author SHA1 Message Date
68a8128d38 Increase WordPress container resources (cpus=1, mem_reservation=0.5g, mem_limit=1.5g, pids_limit=512)
Ref: Updated based on performance guidance from ChatGPT conversation:
https://chatgpt.com/share/691c8f1f-306c-800f-92b0-1bbe8e2ba5c4
2025-11-18 16:22:33 +01:00
36f9573fdf feat(filters): enforce safe Node.js heap sizing via reusable filter
- Add node_autosize filter (node_max_old_space_size) using get_app_conf
- Raise error when mem_limit < min_mb to prevent OOM-kill misconfigurations
- Wire Whiteboard NODE_OPTIONS and increase mem_limit to 1g; set cpus=1
- Refactor PeerTube to use the same filter; simplify vars
- Add unit tests; keep integration filters usage green

Context: https://chatgpt.com/share/690e0499-6a94-800f-b8ed-2c5124690103
2025-11-07 15:39:54 +01:00
493d5bbbda refactor(web-app-shopware): externalize trusted proxy and host configuration with mounted framework.yaml
- added new file roles/web-app-shopware/files/framework.yaml defining trusted_proxies and trusted_headers for Symfony
 - mounted framework.yaml into /var/www/html/config/packages/ in docker-compose
 - exposed new role vars SHOPWARE_FRAMEWORK_HOST/DOCKER for mounting path
 - rendered framework.yaml via Ansible copy task with proper permissions
 - adjusted env.j2 to set TRUSTED_PROXIES and TRUSTED_HOSTS dynamically from domains and networks
 - added SHOPWARE_DOMAIN var to vars/main.yml
 - removed inline framework.yaml creation from Dockerfile (now managed via mount)
 - updated proxy template (html.conf.j2) to include X-Forwarded-Ssl header
 - improved init.sh permission handling for shared volumes

See ChatGPT conversation for implementation details and rationale:
https://chatgpt.com/share/690d4fe7-2830-800f-8b6d-b868e7fe0e97
2025-11-07 02:48:49 +01:00
2fcbae8fc7 Added z.clarity.ms to mini-qr 2025-11-07 00:18:01 +01:00
02f38d60db Added z.clarity.ms to mini-qr 2025-11-07 00:02:36 +01:00
d66ad37c5d enh(shopware): improve healthchecks and proxy configuration
Removed obsolete EXPOSE/healthcheck from Dockerfile and added robust service-specific healthchecks:

- web: HTTP robots.txt check

- worker/scheduler: php -v runtime check

- opensearch: cluster health API check

Added TRUSTED_PROXIES=* for proxy-aware headers and centralized OPENSEARCH_PORT in vars.

Context: discussed implementation details in ChatGPT conversation on 2025-11-06 — https://chatgpt.com/share/690c9fb3-79f4-800f-bbdf-ea370c8f142c
2025-11-06 14:17:00 +01:00
0c16f9c43c Optimized code 2025-11-05 20:46:33 +01:00
7330aeb8ec feat(web-app-peertube): add dynamic performance tuning for heap and transcoding concurrency
- Dynamically calculate PEERTUBE_MAX_OLD_SPACE_SIZE (~35% of container RAM, clamped between 768–3072 MB)
- Dynamically calculate PEERTUBE_TRANSCODING_CONCURRENCY (~½ vCPUs, min 1, max 8)
- Added default resource limits for Redis and Peertube containers
- Updated test suite to include human_to_bytes filter in built-in filter list

https://chatgpt.com/share/690914d2-6100-800f-a850-94e6d226e7c9
2025-11-03 21:47:38 +01:00
d3aad632c0 Merge branch 'master' of github.com:kevinveenbirkenbach/infinito-nexus 2025-11-03 16:41:13 +01:00
d1bad3d7a6 Added joomla user for install 2025-11-03 11:24:56 +01:00
22 changed files with 342 additions and 46 deletions

View File

@@ -0,0 +1,141 @@
# filter_plugins/node_autosize.py
# Reuse app config to derive sensible Node.js heap sizes for containers.
#
# Usage example (Jinja):
# {{ applications | node_max_old_space_size('web-app-nextcloud', 'whiteboard') }}
#
# Heuristics (defaults):
# - candidate = 35% of mem_limit
# - min = 768 MB (required minimum)
# - cap = min(3072 MB, 60% of mem_limit)
#
# NEW: If mem_limit (container cgroup RAM) is smaller than min_mb, we raise an
# exception — to prevent a misconfiguration where Node's heap could exceed the cgroup
# and be OOM-killed.
from __future__ import annotations
import re
from ansible.errors import AnsibleFilterError
# Import the shared config resolver from module_utils
try:
from module_utils.config_utils import get_app_conf, AppConfigKeyError
except Exception as e:
raise AnsibleFilterError(
f"Failed to import get_app_conf from module_utils.config_utils: {e}"
)
_SIZE_RE = re.compile(r"^\s*(\d+(?:\.\d+)?)\s*([kmgtp]?i?b?)?\s*$", re.IGNORECASE)
_MULT = {
"": 1,
"b": 1,
"k": 10**3, "kb": 10**3,
"m": 10**6, "mb": 10**6,
"g": 10**9, "gb": 10**9,
"t": 10**12, "tb": 10**12,
"p": 10**15, "pb": 10**15,
"kib": 1024,
"mib": 1024**2,
"gib": 1024**3,
"tib": 1024**4,
"pib": 1024**5,
}
def _to_bytes(val):
"""Convert numeric or string memory limits (e.g. '512m', '2GiB') to bytes."""
if val is None or val == "":
return None
if isinstance(val, (int, float)):
return int(val)
if not isinstance(val, str):
raise AnsibleFilterError(f"Unsupported mem_limit type: {type(val).__name__}")
m = _SIZE_RE.match(val)
if not m:
raise AnsibleFilterError(f"Unrecognized mem_limit string: {val!r}")
num = float(m.group(1))
unit = (m.group(2) or "").lower()
if unit not in _MULT:
raise AnsibleFilterError(f"Unknown unit in mem_limit: {unit!r}")
return int(num * _MULT[unit])
def _mb(bytes_val: int) -> int:
"""Return decimal MB (10^6) as integer — Node expects MB units."""
return int(round(bytes_val / 10**6))
def _compute_old_space_mb(
total_mb: int, pct: float, min_mb: int, hardcap_mb: int, safety_cap_pct: float
) -> int:
"""
Compute Node.js old-space heap (MB) with safe minimum and cap handling.
NOTE: The calling function ensures total_mb >= min_mb; here we only
apply the sizing heuristics and caps.
"""
candidate = int(total_mb * float(pct))
safety_cap = int(total_mb * float(safety_cap_pct))
final_cap = min(int(hardcap_mb), safety_cap)
# Enforce minimum first; only apply cap if it's above the minimum
candidate = max(candidate, int(min_mb))
if final_cap >= int(min_mb):
candidate = min(candidate, final_cap)
# Never below a tiny hard floor
return max(candidate, 128)
def node_max_old_space_size(
applications: dict,
application_id: str,
service_name: str,
pct: float = 0.35,
min_mb: int = 768,
hardcap_mb: int = 3072,
safety_cap_pct: float = 0.60,
) -> int:
"""
Derive Node.js --max-old-space-size (MB) from the service's mem_limit in app config.
Looks up: docker.services.<service_name>.mem_limit for the given application_id.
Raises:
AnsibleFilterError if mem_limit is missing/invalid OR if mem_limit (MB) < min_mb.
"""
try:
mem_limit = get_app_conf(
applications=applications,
application_id=application_id,
config_path=f"docker.services.{service_name}.mem_limit",
strict=True,
default=None,
)
except AppConfigKeyError as e:
raise AnsibleFilterError(str(e))
if mem_limit in (None, False, ""):
raise AnsibleFilterError(
f"mem_limit not set for application '{application_id}', service '{service_name}'"
)
total_bytes = _to_bytes(mem_limit)
total_mb = _mb(total_bytes)
# NEW: guardrail — refuse to size a heap larger than the cgroup limit
if total_mb < int(min_mb):
raise AnsibleFilterError(
f"mem_limit ({total_mb} MB) is below the required minimum heap ({int(min_mb)} MB) "
f"for application '{application_id}', service '{service_name}'. "
f"Increase mem_limit or lower min_mb."
)
return _compute_old_space_mb(total_mb, pct, min_mb, hardcap_mb, safety_cap_pct)
class FilterModule(object):
def filters(self):
return {
"node_max_old_space_size": node_max_old_space_size,
}

View File

@@ -15,6 +15,7 @@ location {{location}}
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port {{ WEB_PORT }};
proxy_set_header X-Forwarded-Ssl on;
proxy_pass_request_headers on;
{% include 'roles/sys-svc-proxy/templates/headers/content_security_policy.conf.j2' %}

View File

@@ -11,8 +11,8 @@ contact:
description: Send {{ 'us' if service_provider.type == 'legal' else 'me' }} an email
icon:
class: fa-solid fa-envelope
url: mailto:{{service_provider.contact.email}}
identifier: {{service_provider.contact.email}}
url: mailto:{{ service_provider.contact.email }}
identifier: {{ service_provider.contact.email }}
{% endif %}
{% if service_provider.contact.phone is defined %}
@@ -32,6 +32,6 @@ contact:
description: Chat with {{ 'us' if service_provider.type == 'legal' else 'me' }} on Matrix
icon:
class: fa-solid fa-cubes
identifier: "{{service_provider.contact.matrix}}"
identifier: "{{ service_provider.contact.matrix }}"
{% endif %}

View File

@@ -30,6 +30,8 @@
argv:
- docker
- exec
- --user
- "{{ JOOMLA_WEB_USER }}"
- "{{ JOOMLA_CONTAINER }}"
- php
- "{{ JOOMLA_INSTALLER_CLI_FILE }}"

View File

@@ -21,3 +21,4 @@ JOOMLA_USER_NAME: "{{ users.administrator.username }}"
JOOMLA_USER: "{{ JOOMLA_USER_NAME | capitalize }}"
JOOMLA_USER_PASSWORD: "{{ users.administrator.password }}"
JOOMLA_USER_EMAIL: "{{ users.administrator.email }}"
JOOMLA_WEB_USER: "www-data"

View File

@@ -21,9 +21,10 @@ server:
connect-src:
- https://q.clarity.ms
- https://n.clarity.ms
- https://z.clarity.ms
- "data:"
style-src-elem: []
font-src: []
font-src: []
frame-ancestors: []
flags:
style-src-attr:

View File

@@ -93,9 +93,9 @@ docker:
version: "latest"
backup:
no_stop_required: true
cpus: "0.25"
cpus: "1"
mem_reservation: "128m"
mem_limit: "512m"
mem_limit: "1g"
pids_limit: 1024
enabled: "{{ applications | get_app_conf('web-app-nextcloud', 'features.oidc', False, True, True) }}" # Activate OIDC for Nextcloud
# floavor decides which OICD plugin should be used.

View File

@@ -77,7 +77,8 @@
volumes:
- whiteboard_tmp:/tmp
- whiteboard_fontcache:/var/cache/fontconfig
environment:
- NODE_OPTIONS=--max-old-space-size={{ NEXTCLOUD_WHITEBOARD_MAX_OLD_SPACE_SIZE }}
expose:
- "{{ container_port }}"
shm_size: 1g

View File

@@ -130,6 +130,7 @@ NEXTCLOUD_WHITEBOARD_TMP_VOLUME: "{{ applications | get_app_conf(applic
NEXTCLOUD_WHITEBOARD_FRONTCACHE_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.whiteboard_fontcache') }}"
NEXTCLOUD_WHITEBOARD_SERVICE_DIRECTORY: "{{ [ docker_compose.directories.services, 'whiteboard' ] | path_join }}"
NEXTCLOUD_WHITEBOARD_SERVICE_DOCKERFILE: "{{ [ NEXTCLOUD_WHITEBOARD_SERVICE_DIRECTORY, 'Dockerfile' ] | path_join }}"
NEXTCLOUD_WHITEBOARD_MAX_OLD_SPACE_SIZE: "{{ applications | node_max_old_space_size(application_id, NEXTCLOUD_WHITEBOARD_SERVICE) }}"
### Collabora
NEXTCLOUD_COLLABORA_URL: "{{ domains | get_url('web-svc-collabora', WEB_PROTOCOL) }}"

View File

@@ -30,6 +30,10 @@ docker:
services:
redis:
enabled: true
cpus: "0.5"
mem_reservation: "256m"
mem_limit: "512m"
pids_limit: 512
database:
enabled: true
peertube:
@@ -38,6 +42,10 @@ docker:
image: "chocobozzz/peertube"
backup:
no_stop_required: true
cpus: 4
mem_reservation: "4g"
mem_limit: "8g"
pids_limit: 2048 # ffmpeg spawnt Threads/Prozesse
volumes:
data: peertube_data
config: peertube_config

View File

@@ -12,6 +12,17 @@
- assets:/app/client/dist
- data:/data
- config:/config
environment:
- NODE_OPTIONS=--max-old-space-size={{ PEERTUBE_MAX_OLD_SPACE_SIZE }}
- PEERTUBE_TRANSCODING_CONCURRENCY={{ PEERTUBE_TRANSCODING_CONCURRENCY }}
shm_size: "512m"
tmpfs:
- /tmp:size=1g,exec
ulimits:
nofile:
soft: 131072
hard: 131072
nproc: 8192
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
{% include 'roles/docker-container/templates/networks.yml.j2' %}
{% include 'roles/docker-container/templates/healthcheck/tcp.yml.j2' %}

View File

@@ -1,17 +1,24 @@
# General
application_id: "web-app-peertube"
database_type: "postgres"
application_id: "web-app-peertube"
database_type: "postgres"
entity_name: "{{ application_id | get_entity_name }}"
# Docker
docker_compose_flush_handlers: true
docker_compose_flush_handlers: true
# Role variables
PEERTUBE_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.peertube.version') }}"
PEERTUBE_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.peertube.image') }}"
PEERTUBE_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.peertube.name') }}"
PEERTUBE_DATA_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.data') }}"
PEERTUBE_CONFIG_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.config') }}"
PEERTUBE_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.peertube.version') }}"
PEERTUBE_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.peertube.image') }}"
PEERTUBE_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.peertube.name') }}"
PEERTUBE_DATA_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.data') }}"
PEERTUBE_CONFIG_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.config') }}"
# OIDC
PEERTUBE_OIDC_PLUGIN: "peertube-plugin-auth-openid-connect"
PEERTUBE_OIDC_ENABLED: "{{ applications | get_app_conf(application_id, 'features.oidc', False) }}"
PEERTUBE_OIDC_PLUGIN: "peertube-plugin-auth-openid-connect"
PEERTUBE_OIDC_ENABLED: "{{ applications | get_app_conf(application_id, 'features.oidc') }}"
# Performance
PEERTUBE_CPUS: "{{ applications | get_app_conf(application_id, 'docker.services.peertube.cpus') | float }}"
PEERTUBE_MAX_OLD_SPACE_SIZE: "{{ applications | node_max_old_space_size(application_id, entity_name) }}"
_peertube_concurrency_candidate: "{{ ((PEERTUBE_CPUS | float) * 0.5) | round(0, 'floor') | int }}"
PEERTUBE_TRANSCODING_CONCURRENCY: "{{ [ ( [ (_peertube_concurrency_candidate | int), 1 ] | max ), 8 ] | min }}"

View File

@@ -0,0 +1,7 @@
framework:
trusted_proxies: '%env(TRUSTED_PROXIES)%'
trusted_headers:
- x-forwarded-for
- x-forwarded-proto
- x-forwarded-host
- x-forwarded-port

View File

@@ -29,8 +29,14 @@ if [ "$(id -u)" -eq 0 ]; then
"$APP_ROOT/var"
log "Fixing permissions on shared volumes..."
chown -R www-data:www-data "$APP_ROOT/public" "$APP_ROOT/var" || true
chmod -R 775 "$APP_ROOT/public" "$APP_ROOT/var" || true
chown -R www-data:www-data \
"$APP_ROOT/public" \
"$APP_ROOT/var" \
"$APP_ROOT/.infinito" || true
chmod -R 775 \
"$APP_ROOT/public" \
"$APP_ROOT/var" \
"$APP_ROOT/.infinito" || true
# Switch to www-data for all subsequent operations
exec su -s /bin/sh www-data "$0" "$@"

View File

@@ -14,6 +14,14 @@
- docker compose up
- docker compose build
- name: "Render framework.yaml (trusted proxies/headers/hosts)"
copy:
src: "framework.yaml"
dest: "{{ SHOPWARE_FRAMEWORK_HOST }}"
mode: "0644"
notify:
- docker compose up
- name: "Flush docker compose handlers"
meta: flush_handlers

View File

@@ -76,17 +76,5 @@ RUN set -eux; \
/var/www/html/public/theme; \
chown -R www-data:www-data /var/www/html
# Add trusted proxies wiring (Symfony reads env TRUSTED_PROXIES)
RUN set -eux; \
mkdir -p /var/www/html/config/packages; \
if [ ! -f /var/www/html/config/packages/framework.yaml ]; then \
printf "framework:\n trusted_proxies: '%%env(TRUSTED_PROXIES)%%'\n" > /var/www/html/config/packages/framework.yaml; \
fi
# Drop back to the app user
USER www-data
# Expose internal port & add a lightweight healthcheck
EXPOSE 8000
HEALTHCHECK --interval=30s --timeout=5s --retries=5 --start-period=20s \
CMD php -r '$s=@fsockopen("127.0.0.1", 8000, $e, $t, 3); if(!$s) exit(1); fclose($s);'

View File

@@ -8,6 +8,7 @@ x-environment: &shopware
- sitemap:/var/www/html/public/sitemap
- "{{ SHOPWARE_INIT_HOST }}:{{ SHOPWARE_INIT_DOCKER }}:ro"
- bundles:/var/www/html/public/bundles
- "{{ SHOPWARE_FRAMEWORK_HOST }}:{{ SHOPWARE_FRAMEWORK_DOCKER }}:ro"
working_dir: {{ SHOPWARE_ROOT }}
{% include 'roles/docker-compose/templates/base.yml.j2' %}
@@ -42,9 +43,13 @@ x-environment: &shopware
depends_on:
init:
condition: service_completed_successfully
{% filter indent(4) %}
{% include 'roles/docker-container/templates/healthcheck/http.yml.j2' %}
{% endfilter %}
healthcheck:
test: ["CMD-SHELL", "wget -q --spider http://127.0.0.1:{{ container_port }}/robots.txt || wget -q --spider http://127.0.0.1:{{ container_port }}/ || exit 1"]
interval: 30s
timeout: 5s
retries: 10
start_period: 120s
{% include 'roles/docker-container/templates/networks.yml.j2' %}
@@ -64,6 +69,12 @@ x-environment: &shopware
# @todo Activate for swarm deploy
# deploy:
# replicas: {{ SHOPWARE_WORKER_REPLICAS }}
healthcheck:
test: ["CMD", "php", "-v"]
interval: 30s
timeout: 5s
retries: 3
start_period: 10s
{% include 'roles/docker-container/templates/networks.yml.j2' %}
@@ -80,6 +91,13 @@ x-environment: &shopware
depends_on:
init:
condition: service_completed_successfully
healthcheck:
test: ["CMD", "php", "-v"]
interval: 30s
timeout: 5s
retries: 3
start_period: 10s
{% include 'roles/docker-container/templates/networks.yml.j2' %}
@@ -99,6 +117,12 @@ x-environment: &shopware
depends_on:
init:
condition: service_completed_successfully
healthcheck:
test: ["CMD-SHELL", "curl -fsSL http://127.0.0.1:{{ SHOPWARE_OPENSEARCH_PORT }}/_cluster/health || exit 1"]
interval: 30s
timeout: 10s
retries: 5
start_period: 60s
{% include 'roles/docker-container/templates/networks.yml.j2' %}
{% endif %}

View File

@@ -1,13 +1,16 @@
# DOMAIN/URL
DOMAIN={{ domains | get_domain(application_id) }}
DOMAIN={{ SHOPWARE_DOMAIN }}
APP_URL="{{ domains | get_url(application_id, WEB_PROTOCOL) }}"
APP_DEBUG="{{ MODE_DEBUG | ternary(1, 0) }}"
# Shopware
APP_ENV={{ 'dev' if (ENVIRONMENT | lower) == 'development' else 'prod' }}
#TRUSTED_PROXIES=127.0.0.1
INSTANCE_ID={{ application_id }}
# Proxy
TRUSTED_PROXIES="{{ networks.internet.values() | select | join(',') }},127.0.0.1,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
TRUSTED_HOSTS="{{ SHOPWARE_DOMAIN }}"
# Database
DATABASE_URL="mysql://{{ database_username }}:{{ database_password }}@{{ database_host }}:{{ database_port }}/{{ database_name }}"
@@ -22,10 +25,10 @@ CACHE_URL="file://cache"
{% if SHOPWARE_OPENSEARCH_ENABLED %}
# Search
ELASTICSEARCH_URL="http://opensearch:9200"
OPENSEARCH_URL="http://opensearch:9200"
ELASTICSEARCH_URL="http://opensearch:{{ SHOPWARE_OPENSEARCH_PORT }}"
OPENSEARCH_URL="http://opensearch:{{ SHOPWARE_OPENSEARCH_PORT }}"
OPENSEARCH_HOST="opensearch"
OPENSEARCH_PORT_NUMBER="9200"
OPENSEARCH_PORT_NUMBER="{{ SHOPWARE_OPENSEARCH_PORT }}"
OPENSEARCH_INITIAL_ADMIN_PASSWORD="{{ users.administrator.password }}"
{% endif %}

View File

@@ -7,6 +7,8 @@ entity_name: "{{ application_id | get_entity_name }}"
container_port: "{{ applications | get_app_conf(application_id, 'docker.services.web.port') }}"
docker_compose_flush_handlers: true
SHOPWARE_DOMAIN: "{{ domains | get_domain(application_id) }}"
# Shopware container/image vars
SHOPWARE_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.shopware.version') }}"
SHOPWARE_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.shopware.image') }}"
@@ -22,22 +24,22 @@ SHOPWARE_WORKER_CONTAINER: "{{ applications | get_app_conf(application_id,
SHOPWARE_SCHED_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.scheduler.name') }}"
SHOPWARE_INIT_HOST: "{{ [ docker_compose.directories.volumes, 'init.sh' ] | path_join }}"
SHOPWARE_INIT_DOCKER: "/usr/local/bin/init.sh"
SHOPWARE_FRAMEWORK_HOST: "{{ [ docker_compose.directories.config, 'framework.yaml' ] | path_join }}"
SHOPWARE_FRAMEWORK_DOCKER: "/var/www/html/config/packages/framework.yaml"
# Entrypoints & replicas
SHOPWARE_WORKER_ENTRYPOINT: "{{ applications | get_app_conf(application_id, 'docker.services.worker.entrypoint') }}"
SHOPWARE_SCHED_ENTRYPOINT: "{{ applications | get_app_conf(application_id, 'docker.services.scheduler.entrypoint') }}"
SHOPWARE_WORKER_REPLICAS: "{{ applications | get_app_conf(application_id, 'docker.services.worker.replicas') }}"
# Search/Cache
# Redis Cache
SHOPWARE_REDIS_ENABLED: "{{ applications | get_app_conf(application_id, 'docker.services.redis.enabled') }}"
SHOPWARE_REDIS_ADDRESS: "redis:6379"
# Opensearch
SHOPWARE_OPENSEARCH_PORT: "9200"
SHOPWARE_OPENSEARCH_ENABLED: "{{ applications | get_app_conf(application_id, 'docker.services.opensearch.enabled') }}"
SHOPWARE_OPENSEARCH_ENGINE: "{{ applications | get_app_conf(application_id, 'docker.services.opensearch.engine') }}"
SHOPWARE_OPENSEARCH_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.opensearch.image') }}"
SHOPWARE_OPENSEARCH_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.opensearch.version') }}"
SHOPWARE_OPENSEARCH_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.opensearch.name') }}"
SHOPWARE_OPENSEARCH_MEM_RESERVATION: "{{ applications | get_app_conf(application_id, 'docker.services.opensearch.mem_reservation') }}"
SHOPWARE_OPENSEARCH_MEM_LIMIT: "{{ applications | get_app_conf(application_id, 'docker.services.opensearch.mem_limit') }}"
# IAM (true if either OIDC or LDAP is enabled)
SHOPWARE_IAM_ENABLED: "{{ applications | get_app_conf(application_id, 'features.oidc') or applications | get_app_conf(application_id, 'features.ldap') }}"

View File

@@ -52,6 +52,10 @@ docker:
name: wordpress
backup:
no_stop_required: true
cpus: 1
mem_reservation: 0.5g
mem_limit: 1.5g
pids_limit: 512
volumes:
data: wordpress_data
rbac:

View File

@@ -37,7 +37,7 @@ BUILTIN_FILTERS: Set[str] = {
"type_debug", "json_query", "mandatory", "hash", "checksum",
"lower", "upper", "capitalize", "unique", "dict2items", "items2dict",
"password_hash", "path_join", "product", "quote", "split", "ternary", "to_nice_yaml",
"tojson", "to_nice_json",
"tojson", "to_nice_json", "human_to_bytes",
# Date/time-ish

View File

@@ -0,0 +1,80 @@
# tests/unit/filter_plugins/test_node_autosize.py
import unittest
from unittest.mock import patch
# Module under test
import filter_plugins.node_autosize as na
try:
from ansible.errors import AnsibleFilterError # type: ignore
except Exception:
AnsibleFilterError = Exception
class TestNodeAutosizeFilter(unittest.TestCase):
"""Unit tests for the node_autosize filter plugin."""
def setUp(self):
# Default parameters used by all tests
self.applications = {"web-app-nextcloud": {"docker": {"services": {"whiteboard": {}}}}}
self.application_id = "web-app-nextcloud"
self.service_name = "whiteboard"
# Patch get_app_conf (imported from module_utils.config_utils) inside the filter plugin
self.patcher = patch("filter_plugins.node_autosize.get_app_conf")
self.mock_get_app_conf = self.patcher.start()
def tearDown(self):
self.patcher.stop()
def _set_mem_limit(self, value):
"""Helper: mock get_app_conf to return a specific mem_limit value."""
def _fake_get_app_conf(applications, application_id, config_path, strict=True, default=None, **_kwargs):
assert application_id == self.application_id
assert config_path == f"docker.services.{self.service_name}.mem_limit"
return value
self.mock_get_app_conf.side_effect = _fake_get_app_conf
# --- Tests for node_max_old_space_size (MB) ---
def test_512m_below_minimum_raises(self):
# mem_limit=512 MB < min_mb=768 -> must raise
self._set_mem_limit("512m")
with self.assertRaises(AnsibleFilterError):
na.node_max_old_space_size(self.applications, self.application_id, self.service_name)
def test_2g_caps_to_minimum_768(self):
self._set_mem_limit("2g")
mb = na.node_max_old_space_size(self.applications, self.application_id, self.service_name)
self.assertEqual(mb, 768) # 35% of 2g = 700 < 768 -> min wins
def test_8g_uses_35_percent_without_hitting_hardcap(self):
self._set_mem_limit("8g")
mb = na.node_max_old_space_size(self.applications, self.application_id, self.service_name)
self.assertEqual(mb, 2800) # 8g -> 8000 MB * 0.35 = 2800
def test_16g_hits_hardcap_3072(self):
self._set_mem_limit("16g")
mb = na.node_max_old_space_size(self.applications, self.application_id, self.service_name)
self.assertEqual(mb, 3072) # 35% of 16g = 5600, hardcap=3072
def test_numeric_bytes_input(self):
# 2 GiB in bytes (IEC): 2 * 1024 ** 3 = 2147483648
self._set_mem_limit(2147483648)
mb = na.node_max_old_space_size(self.applications, self.application_id, self.service_name)
# 2 GiB ≈ 2147 MB; 35% => ~751, min 768 => 768
self.assertEqual(mb, 768)
def test_invalid_unit_raises_error(self):
self._set_mem_limit("12x") # invalid unit
with self.assertRaises(AnsibleFilterError):
na.node_max_old_space_size(self.applications, self.application_id, self.service_name)
def test_missing_mem_limit_raises_error(self):
self._set_mem_limit(None)
with self.assertRaises(AnsibleFilterError):
na.node_max_old_space_size(self.applications, self.application_id, self.service_name)
if __name__ == "__main__":
unittest.main()