mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-11-25 22:42:06 +00:00
Refactor JVM memory filters, add Redis sizing and Docker cleanup service
- Replace jvm_filters with unified memory_filters (JVM + Redis helpers) - Add redis_maxmemory_mb filter and unit tests - Introduce sys-ctl-cln-docker role (systemd-based Docker prune + anon volumes) - Refactor disk space health check to Python script and wire SIZE_PERCENT_CLEANUP_DISC_SPACE - Adjust schedules and services for Docker cleanup and disk space health See discussion: https://chatgpt.com/share/6925c1c5-ee38-800f-84b6-da29ccfa7537
This commit is contained in:
@@ -1,77 +0,0 @@
|
|||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import sys, os, re
|
|
||||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
|
||||||
|
|
||||||
from ansible.errors import AnsibleFilterError
|
|
||||||
from module_utils.config_utils import get_app_conf
|
|
||||||
from module_utils.entity_name_utils import get_entity_name
|
|
||||||
|
|
||||||
_UNIT_RE = re.compile(r'^\s*(\d+(?:\.\d+)?)\s*([kKmMgGtT]?[bB]?)?\s*$')
|
|
||||||
_FACTORS = {
|
|
||||||
'': 1, 'b': 1,
|
|
||||||
'k': 1024, 'kb': 1024,
|
|
||||||
'm': 1024**2, 'mb': 1024**2,
|
|
||||||
'g': 1024**3, 'gb': 1024**3,
|
|
||||||
't': 1024**4, 'tb': 1024**4,
|
|
||||||
}
|
|
||||||
|
|
||||||
def _to_bytes(v: str) -> int:
|
|
||||||
if v is None:
|
|
||||||
raise AnsibleFilterError("jvm_filters: size value is None")
|
|
||||||
s = str(v).strip()
|
|
||||||
m = _UNIT_RE.match(s)
|
|
||||||
if not m:
|
|
||||||
raise AnsibleFilterError(f"jvm_filters: invalid size '{v}'")
|
|
||||||
num, unit = m.group(1), (m.group(2) or '').lower()
|
|
||||||
try:
|
|
||||||
val = float(num)
|
|
||||||
except ValueError as e:
|
|
||||||
raise AnsibleFilterError(f"jvm_filters: invalid numeric size '{v}'") from e
|
|
||||||
factor = _FACTORS.get(unit)
|
|
||||||
if factor is None:
|
|
||||||
raise AnsibleFilterError(f"jvm_filters: unknown unit in '{v}'")
|
|
||||||
return int(val * factor)
|
|
||||||
|
|
||||||
def _to_mb(v: str) -> int:
|
|
||||||
return max(0, _to_bytes(v) // (1024 * 1024))
|
|
||||||
|
|
||||||
def _svc(app_id: str) -> str:
|
|
||||||
return get_entity_name(app_id)
|
|
||||||
|
|
||||||
def _mem_limit_mb(apps: dict, app_id: str) -> int:
|
|
||||||
svc = _svc(app_id)
|
|
||||||
raw = get_app_conf(apps, app_id, f"docker.services.{svc}.mem_limit")
|
|
||||||
mb = _to_mb(raw)
|
|
||||||
if mb <= 0:
|
|
||||||
raise AnsibleFilterError(f"jvm_filters: mem_limit for '{svc}' must be > 0 MB (got '{raw}')")
|
|
||||||
return mb
|
|
||||||
|
|
||||||
def _mem_res_mb(apps: dict, app_id: str) -> int:
|
|
||||||
svc = _svc(app_id)
|
|
||||||
raw = get_app_conf(apps, app_id, f"docker.services.{svc}.mem_reservation")
|
|
||||||
mb = _to_mb(raw)
|
|
||||||
if mb <= 0:
|
|
||||||
raise AnsibleFilterError(f"jvm_filters: mem_reservation for '{svc}' must be > 0 MB (got '{raw}')")
|
|
||||||
return mb
|
|
||||||
|
|
||||||
def jvm_max_mb(apps: dict, app_id: str) -> int:
|
|
||||||
"""Xmx = min( floor(0.7*limit), limit-1024, 12288 ) with floor at 1024 MB."""
|
|
||||||
limit_mb = _mem_limit_mb(apps, app_id)
|
|
||||||
c1 = (limit_mb * 7) // 10
|
|
||||||
c2 = max(0, limit_mb - 1024)
|
|
||||||
c3 = 12288
|
|
||||||
return max(1024, min(c1, c2, c3))
|
|
||||||
|
|
||||||
def jvm_min_mb(apps: dict, app_id: str) -> int:
|
|
||||||
"""Xms = min( floor(Xmx/2), mem_reservation, Xmx ) with floor at 512 MB."""
|
|
||||||
xmx = jvm_max_mb(apps, app_id)
|
|
||||||
res = _mem_res_mb(apps, app_id)
|
|
||||||
return max(512, min(xmx // 2, res, xmx))
|
|
||||||
|
|
||||||
class FilterModule(object):
|
|
||||||
def filters(self):
|
|
||||||
return {
|
|
||||||
"jvm_max_mb": jvm_max_mb,
|
|
||||||
"jvm_min_mb": jvm_min_mb,
|
|
||||||
}
|
|
||||||
179
filter_plugins/memory_filters.py
Normal file
179
filter_plugins/memory_filters.py
Normal file
@@ -0,0 +1,179 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import sys, os, re
|
||||||
|
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||||
|
|
||||||
|
from ansible.errors import AnsibleFilterError
|
||||||
|
from module_utils.config_utils import get_app_conf
|
||||||
|
from module_utils.entity_name_utils import get_entity_name
|
||||||
|
|
||||||
|
# Regex and unit conversion table
|
||||||
|
_UNIT_RE = re.compile(r'^\s*(\d+(?:\.\d+)?)\s*([kKmMgGtT]?[bB]?)?\s*$')
|
||||||
|
_FACTORS = {
|
||||||
|
'': 1, 'b': 1,
|
||||||
|
'k': 1024, 'kb': 1024,
|
||||||
|
'm': 1024**2, 'mb': 1024**2,
|
||||||
|
'g': 1024**3, 'gb': 1024**3,
|
||||||
|
't': 1024**4, 'tb': 1024**4,
|
||||||
|
}
|
||||||
|
|
||||||
|
# ------------------------------------------------------
|
||||||
|
# Helpers: unit conversion
|
||||||
|
# ------------------------------------------------------
|
||||||
|
|
||||||
|
def _to_bytes(v: str) -> int:
|
||||||
|
"""Convert a human-readable size string (e.g., '2g', '512m') to bytes."""
|
||||||
|
if v is None:
|
||||||
|
raise AnsibleFilterError("memory_filters: size value is None")
|
||||||
|
|
||||||
|
s = str(v).strip()
|
||||||
|
m = _UNIT_RE.match(s)
|
||||||
|
if not m:
|
||||||
|
raise AnsibleFilterError(f"memory_filters: invalid size '{v}'")
|
||||||
|
|
||||||
|
num, unit = m.group(1), (m.group(2) or '').lower()
|
||||||
|
|
||||||
|
try:
|
||||||
|
val = float(num)
|
||||||
|
except ValueError as e:
|
||||||
|
raise AnsibleFilterError(f"memory_filters: invalid numeric size '{v}'") from e
|
||||||
|
|
||||||
|
factor = _FACTORS.get(unit)
|
||||||
|
if factor is None:
|
||||||
|
raise AnsibleFilterError(f"memory_filters: unknown unit in '{v}'")
|
||||||
|
|
||||||
|
return int(val * factor)
|
||||||
|
|
||||||
|
|
||||||
|
def _to_mb(v: str) -> int:
|
||||||
|
"""Convert human-readable size to megabytes."""
|
||||||
|
return max(0, _to_bytes(v) // (1024 * 1024))
|
||||||
|
|
||||||
|
|
||||||
|
# ------------------------------------------------------
|
||||||
|
# JVM-specific helpers
|
||||||
|
# ------------------------------------------------------
|
||||||
|
|
||||||
|
def _svc(app_id: str) -> str:
|
||||||
|
"""Resolve the internal service name for JVM-based applications."""
|
||||||
|
return get_entity_name(app_id)
|
||||||
|
|
||||||
|
|
||||||
|
def _mem_limit_mb(apps: dict, app_id: str) -> int:
|
||||||
|
"""Resolve mem_limit for the JVM service of the given application."""
|
||||||
|
svc = _svc(app_id)
|
||||||
|
raw = get_app_conf(apps, app_id, f"docker.services.{svc}.mem_limit")
|
||||||
|
mb = _to_mb(raw)
|
||||||
|
|
||||||
|
if mb <= 0:
|
||||||
|
raise AnsibleFilterError(
|
||||||
|
f"memory_filters: mem_limit for '{svc}' must be > 0 MB (got '{raw}')"
|
||||||
|
)
|
||||||
|
return mb
|
||||||
|
|
||||||
|
|
||||||
|
def _mem_res_mb(apps: dict, app_id: str) -> int:
|
||||||
|
"""Resolve mem_reservation for the JVM service of the given application."""
|
||||||
|
svc = _svc(app_id)
|
||||||
|
raw = get_app_conf(apps, app_id, f"docker.services.{svc}.mem_reservation")
|
||||||
|
mb = _to_mb(raw)
|
||||||
|
|
||||||
|
if mb <= 0:
|
||||||
|
raise AnsibleFilterError(
|
||||||
|
f"memory_filters: mem_reservation for '{svc}' must be > 0 MB (got '{raw}')"
|
||||||
|
)
|
||||||
|
return mb
|
||||||
|
|
||||||
|
|
||||||
|
def jvm_max_mb(apps: dict, app_id: str) -> int:
|
||||||
|
"""
|
||||||
|
Compute recommended JVM Xmx in MB using:
|
||||||
|
Xmx = min(
|
||||||
|
floor(0.7 * mem_limit),
|
||||||
|
mem_limit - 1024,
|
||||||
|
12288
|
||||||
|
)
|
||||||
|
with a lower bound of 1024 MB.
|
||||||
|
"""
|
||||||
|
limit_mb = _mem_limit_mb(apps, app_id)
|
||||||
|
c1 = (limit_mb * 7) // 10
|
||||||
|
c2 = max(0, limit_mb - 1024)
|
||||||
|
c3 = 12288
|
||||||
|
|
||||||
|
return max(1024, min(c1, c2, c3))
|
||||||
|
|
||||||
|
|
||||||
|
def jvm_min_mb(apps: dict, app_id: str) -> int:
|
||||||
|
"""
|
||||||
|
Compute recommended JVM Xms in MB using:
|
||||||
|
Xms = min(
|
||||||
|
floor(Xmx / 2),
|
||||||
|
mem_reservation,
|
||||||
|
Xmx
|
||||||
|
)
|
||||||
|
with a lower bound of 512 MB.
|
||||||
|
"""
|
||||||
|
xmx = jvm_max_mb(apps, app_id)
|
||||||
|
res = _mem_res_mb(apps, app_id)
|
||||||
|
|
||||||
|
return max(512, min(xmx // 2, res, xmx))
|
||||||
|
|
||||||
|
|
||||||
|
# ------------------------------------------------------
|
||||||
|
# Redis-specific helpers (always service name "redis")
|
||||||
|
# ------------------------------------------------------
|
||||||
|
|
||||||
|
def _redis_mem_limit_mb(apps: dict, app_id: str, default_mb: int = 256) -> int:
|
||||||
|
"""
|
||||||
|
Resolve mem_limit for the Redis service of an application.
|
||||||
|
Unlike JVM-based services, Redis always uses the service name "redis".
|
||||||
|
|
||||||
|
If no mem_limit is defined, fall back to default_mb.
|
||||||
|
"""
|
||||||
|
raw = get_app_conf(
|
||||||
|
apps,
|
||||||
|
app_id,
|
||||||
|
"docker.services.redis.mem_limit",
|
||||||
|
strict=False,
|
||||||
|
default=f"{default_mb}m",
|
||||||
|
)
|
||||||
|
|
||||||
|
mb = _to_mb(raw)
|
||||||
|
|
||||||
|
if mb <= 0:
|
||||||
|
raise AnsibleFilterError(
|
||||||
|
f"memory_filters: mem_limit for 'redis' must be > 0 MB (got '{raw}')"
|
||||||
|
)
|
||||||
|
|
||||||
|
return mb
|
||||||
|
|
||||||
|
|
||||||
|
def redis_maxmemory_mb(
|
||||||
|
apps: dict,
|
||||||
|
app_id: str,
|
||||||
|
factor: float = 0.8,
|
||||||
|
min_mb: int = 64
|
||||||
|
) -> int:
|
||||||
|
"""
|
||||||
|
Compute recommended Redis `maxmemory` in MB.
|
||||||
|
|
||||||
|
* factor: fraction of allowed memory used for Redis data (default 0.8)
|
||||||
|
* min_mb: minimum floor value (default 64 MB)
|
||||||
|
|
||||||
|
maxmemory = max(min_mb, floor(factor * mem_limit))
|
||||||
|
"""
|
||||||
|
limit_mb = _redis_mem_limit_mb(apps, app_id)
|
||||||
|
return max(min_mb, int(limit_mb * factor))
|
||||||
|
|
||||||
|
|
||||||
|
# ------------------------------------------------------
|
||||||
|
# Filter module
|
||||||
|
# ------------------------------------------------------
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
"jvm_max_mb": jvm_max_mb,
|
||||||
|
"jvm_min_mb": jvm_min_mb,
|
||||||
|
"redis_maxmemory_mb": redis_maxmemory_mb,
|
||||||
|
}
|
||||||
@@ -9,6 +9,7 @@ SYS_SERVICE_CLEANUP_BACKUPS: "{{ 'sys-ctl-cln-bkps' | get_se
|
|||||||
SYS_SERVICE_CLEANUP_BACKUPS_FAILED: "{{ 'sys-ctl-cln-faild-bkps' | get_service_name(SOFTWARE_NAME) }}"
|
SYS_SERVICE_CLEANUP_BACKUPS_FAILED: "{{ 'sys-ctl-cln-faild-bkps' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
SYS_SERVICE_CLEANUP_ANONYMOUS_VOLUMES: "{{ 'sys-ctl-cln-anon-volumes' | get_service_name(SOFTWARE_NAME) }}"
|
SYS_SERVICE_CLEANUP_ANONYMOUS_VOLUMES: "{{ 'sys-ctl-cln-anon-volumes' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
SYS_SERVICE_CLEANUP_DISC_SPACE: "{{ 'sys-ctl-cln-disc-space' | get_service_name(SOFTWARE_NAME) }}"
|
SYS_SERVICE_CLEANUP_DISC_SPACE: "{{ 'sys-ctl-cln-disc-space' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
|
SYS_SERVICE_CLEANUP_DOCKER: "{{ 'sys-ctl-cln-docker' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
SYS_SERVICE_OPTIMIZE_DRIVE: "{{ 'svc-opt-ssd-hdd' | get_service_name(SOFTWARE_NAME) }}"
|
SYS_SERVICE_OPTIMIZE_DRIVE: "{{ 'svc-opt-ssd-hdd' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
SYS_SERVICE_BACKUP_RMT_2_LOC: "{{ 'svc-bkp-rmt-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
SYS_SERVICE_BACKUP_RMT_2_LOC: "{{ 'svc-bkp-rmt-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
SYS_SERVICE_BACKUP_DOCKER_2_LOC: "{{ 'sys-ctl-bkp-docker-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
SYS_SERVICE_BACKUP_DOCKER_2_LOC: "{{ 'sys-ctl-bkp-docker-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
|
|||||||
@@ -32,7 +32,8 @@ SYS_SCHEDULE_HEALTH_MSMTP: "*-*-* 00:00:00"
|
|||||||
SYS_SCHEDULE_CLEANUP_CERTS: "*-*-* 20:00" # Deletes and revokes unused certs once per day
|
SYS_SCHEDULE_CLEANUP_CERTS: "*-*-* 20:00" # Deletes and revokes unused certs once per day
|
||||||
SYS_SCHEDULE_CLEANUP_FAILED_BACKUPS: "*-*-* 21:00" # Clean up failed docker backups once per day
|
SYS_SCHEDULE_CLEANUP_FAILED_BACKUPS: "*-*-* 21:00" # Clean up failed docker backups once per day
|
||||||
SYS_SCHEDULE_CLEANUP_BACKUPS: "*-*-* 22:00" # Cleanup backups once per day, MUST be called before disc space cleanup
|
SYS_SCHEDULE_CLEANUP_BACKUPS: "*-*-* 22:00" # Cleanup backups once per day, MUST be called before disc space cleanup
|
||||||
SYS_SCHEDULE_CLEANUP_DISC_SPACE: "*-*-* 23:00" # Cleanup disc space once per day
|
SYS_SCHEDULE_CLEANUP_DOCKER: "*-*-* 23:00" # Cleanup docker anonymous volumes and prune ones per day
|
||||||
|
SYS_SCHEDULE_CLEANUP_DISC_SPACE: "*-*-* 23:30" # Cleanup disc space once per day
|
||||||
|
|
||||||
### Schedule for repair services
|
### Schedule for repair services
|
||||||
SYS_SCHEDULE_REPAIR_BTRFS_AUTO_BALANCER: "Sat *-*-01..07 00:00:00" # Execute btrfs auto balancer every first Saturday of a month
|
SYS_SCHEDULE_REPAIR_BTRFS_AUTO_BALANCER: "Sat *-*-01..07 00:00:00" # Execute btrfs auto balancer every first Saturday of a month
|
||||||
|
|||||||
@@ -9,6 +9,16 @@
|
|||||||
driver: journald
|
driver: journald
|
||||||
volumes:
|
volumes:
|
||||||
- redis:/data
|
- redis:/data
|
||||||
|
# Just save in memory and prevent huge redis_volumes
|
||||||
|
command:
|
||||||
|
- redis-server
|
||||||
|
- --appendonly
|
||||||
|
- "no"
|
||||||
|
- --save
|
||||||
|
- ""
|
||||||
|
- --maxmemory {{ applications | redis_maxmemory_mb(application_id, 0.8, RESOURCE_MEM_LIMIT | int ) }}mb
|
||||||
|
- --maxmemory-policy
|
||||||
|
- "allkeys-lru"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "redis-cli", "ping"]
|
test: ["CMD", "redis-cli", "ping"]
|
||||||
interval: 1s
|
interval: 1s
|
||||||
|
|||||||
@@ -28,8 +28,8 @@ if [ "$force_freeing" = true ]; then
|
|||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
if command -v docker >/dev/null 2>&1 ; then
|
if command -v docker >/dev/null 2>&1 ; then
|
||||||
echo "cleaning up docker" &&
|
echo "cleaning up docker (prune + anonymous volumes) via systemd service" &&
|
||||||
docker system prune -f || exit 3
|
systemctl start {{ SYS_SERVICE_CLEANUP_DOCKER }} || exit 3
|
||||||
|
|
||||||
nextcloud_application_container="{{ applications | get_app_conf('web-app-nextcloud', 'docker.services.nextcloud.name') }}"
|
nextcloud_application_container="{{ applications | get_app_conf('web-app-nextcloud', 'docker.services.nextcloud.name') }}"
|
||||||
if [ -n "$nextcloud_application_container" ] && [ "$(docker ps -a -q -f name=$nextcloud_application_container)" ] ; then
|
if [ -n "$nextcloud_application_container" ] && [ "$(docker ps -a -q -f name=$nextcloud_application_container)" ] ; then
|
||||||
|
|||||||
47
roles/sys-ctl-cln-docker/README.md
Normal file
47
roles/sys-ctl-cln-docker/README.md
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
# Cleanup Docker Resources
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
This role performs a complete cleanup of Docker resources by invoking a systemd-managed script.
|
||||||
|
It removes unused Docker images, stopped containers, networks, build cache, and anonymous volumes.
|
||||||
|
The cleanup is fully automated and can run on a schedule or be triggered manually.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Optimized for maintaining a clean and efficient Docker environment, this role:
|
||||||
|
|
||||||
|
* Loads and triggers the anonymous volume cleanup role.
|
||||||
|
* Installs a systemd service and timer for Docker pruning.
|
||||||
|
* Deploys a cleanup script that invokes:
|
||||||
|
|
||||||
|
* The anonymous volume cleanup service.
|
||||||
|
* `docker system prune -a -f` to remove unused Docker resources.
|
||||||
|
* Allows forced execution during maintenance runs (`MODE_CLEANUP`).
|
||||||
|
|
||||||
|
## Purpose
|
||||||
|
|
||||||
|
The primary purpose of this role is to prevent storage bloat caused by unused Docker images, volumes, and build artifacts.
|
||||||
|
Regular pruning ensures:
|
||||||
|
|
||||||
|
* Reduced disk usage
|
||||||
|
* Improved system performance
|
||||||
|
* Faster CI/CD and container deployments
|
||||||
|
* More predictable Docker engine behavior
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
* **Anonymous Volume Cleanup:** Integrates with `sys-ctl-cln-anon-volumes` to remove stale volumes.
|
||||||
|
* **Full Docker Prune:** Executes `docker system prune -a -f` to reclaim space.
|
||||||
|
* **Systemd Integration:** Registers a systemd unit and timer for automated cleanup.
|
||||||
|
* **Scheduled Execution:** Runs daily (or as configured) based on `SYS_SCHEDULE_CLEANUP_DOCKER`.
|
||||||
|
* **Force Execution Mode:** When `MODE_CLEANUP=true`, cleanup is executed immediately.
|
||||||
|
* **Safe Execution:** Includes validation for missing services and Docker availability.
|
||||||
|
|
||||||
|
## Script Behavior
|
||||||
|
|
||||||
|
The cleanup script:
|
||||||
|
|
||||||
|
1. Checks whether the anonymous volume cleanup service is defined and available.
|
||||||
|
2. Starts the service if present.
|
||||||
|
3. Runs `docker system prune -a -f` if Docker is installed.
|
||||||
|
4. Stops execution immediately on errors (`set -e` behavior).
|
||||||
27
roles/sys-ctl-cln-docker/meta/main.yml
Normal file
27
roles/sys-ctl-cln-docker/meta/main.yml
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
galaxy_info:
|
||||||
|
author: "Kevin Veen-Birkenbach"
|
||||||
|
description: >
|
||||||
|
Cleans up anonymous Docker volumes and performs a full `docker system prune -a -f`
|
||||||
|
via a dedicated systemd service.
|
||||||
|
license: "Infinito.Nexus NonCommercial License"
|
||||||
|
license_url: "https://s.infinito.nexus/license"
|
||||||
|
company: |
|
||||||
|
Kevin Veen-Birkenbach
|
||||||
|
Consulting & Coaching Solutions
|
||||||
|
https://www.veen.world
|
||||||
|
min_ansible_version: "2.9"
|
||||||
|
platforms:
|
||||||
|
- name: Linux
|
||||||
|
versions:
|
||||||
|
- all
|
||||||
|
galaxy_tags:
|
||||||
|
- docker
|
||||||
|
- cleanup
|
||||||
|
- prune
|
||||||
|
- automation
|
||||||
|
- maintenance
|
||||||
|
- system
|
||||||
|
repository: "https://s.infinito.nexus/code"
|
||||||
|
issue_tracker_url: "https://s.infinito.nexus/issues"
|
||||||
|
documentation: "https://docs.infinito.nexus"
|
||||||
23
roles/sys-ctl-cln-docker/tasks/main.yml
Normal file
23
roles/sys-ctl-cln-docker/tasks/main.yml
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
- block:
|
||||||
|
- name: Load role to delete anonymous volumes
|
||||||
|
include_role:
|
||||||
|
name: sys-ctl-cln-anon-volumes
|
||||||
|
vars:
|
||||||
|
system_service_force_flush: true
|
||||||
|
when: run_once_sys_ctl_cln_anon_volumes is not defined
|
||||||
|
|
||||||
|
- name: "Register Docker prune system service"
|
||||||
|
include_role:
|
||||||
|
name: sys-service
|
||||||
|
vars:
|
||||||
|
system_service_timer_enabled: true
|
||||||
|
system_service_on_calendar: "{{ SYS_SCHEDULE_CLEANUP_DOCKER }}"
|
||||||
|
system_service_copy_files: true
|
||||||
|
system_service_tpl_exec_start: "{{ system_service_script_exec }}"
|
||||||
|
system_service_tpl_exec_start_pre: ""
|
||||||
|
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||||
|
system_service_force_linear_sync: false
|
||||||
|
system_service_force_flush: "{{ MODE_CLEANUP }}"
|
||||||
|
|
||||||
|
- include_tasks: utils/run_once.yml
|
||||||
|
when: run_once_sys_ctl_cln_docker is not defined
|
||||||
10
roles/sys-ctl-cln-docker/templates/script.sh.j2
Normal file
10
roles/sys-ctl-cln-docker/templates/script.sh.j2
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# Cleans up anonymous Docker volumes and performs a full Docker system prune.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
echo "Cleaning up anonymous Docker volumes via systemd service..."
|
||||||
|
systemctl start {{ SYS_SERVICE_CLEANUP_ANONYMOUS_VOLUMES }} || exit 1
|
||||||
|
echo "Pruning Docker system resources (images, containers, networks, build cache)..."
|
||||||
|
docker system prune -a -f || exit 2
|
||||||
|
echo "Docker prune cleanup finished."
|
||||||
1
roles/sys-ctl-cln-docker/vars/main.yml
Normal file
1
roles/sys-ctl-cln-docker/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
|||||||
|
system_service_id: "sys-ctl-cln-docker"
|
||||||
58
roles/sys-ctl-hlth-disc-space/files/script.py
Normal file
58
roles/sys-ctl-hlth-disc-space/files/script.py
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
import argparse
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
|
def get_disk_usage_percentages():
|
||||||
|
"""
|
||||||
|
Returns a list of filesystem usage percentages as integers.
|
||||||
|
Equivalent to: df --output=pcent | sed 1d | tr -d '%'
|
||||||
|
"""
|
||||||
|
result = subprocess.run(
|
||||||
|
["df", "--output=pcent"],
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
check=True
|
||||||
|
)
|
||||||
|
|
||||||
|
lines = result.stdout.strip().split("\n")[1:] # Skip header
|
||||||
|
percentages = []
|
||||||
|
|
||||||
|
for line in lines:
|
||||||
|
value = line.strip().replace("%", "")
|
||||||
|
if value.isdigit():
|
||||||
|
percentages.append(int(value))
|
||||||
|
|
||||||
|
return percentages
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Check disk usage and report if any filesystem exceeds the given threshold."
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"minimum_percent_cleanup_disk_space",
|
||||||
|
type=int,
|
||||||
|
help="Minimum free disk space percentage threshold that triggers a warning."
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
threshold = args.minimum_percent_cleanup_disk_space
|
||||||
|
|
||||||
|
print("Checking disk space usage...")
|
||||||
|
subprocess.run(["df"]) # Show the same df output as the original script
|
||||||
|
|
||||||
|
errors = 0
|
||||||
|
percentages = get_disk_usage_percentages()
|
||||||
|
|
||||||
|
for usage in percentages:
|
||||||
|
if usage > threshold:
|
||||||
|
print(f"WARNING: {usage}% exceeds the limit of {threshold}%.")
|
||||||
|
errors += 1
|
||||||
|
|
||||||
|
sys.exit(errors)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
# @param $1 mimimum free disc space
|
|
||||||
errors=0
|
|
||||||
minimum_percent_cleanup_disc_space="$1"
|
|
||||||
echo "checking disc space use..."
|
|
||||||
df
|
|
||||||
for disc_use_percent in $(df --output=pcent | sed 1d)
|
|
||||||
do
|
|
||||||
disc_use_percent_number=$(echo "$disc_use_percent" | sed "s/%//")
|
|
||||||
if [ "$disc_use_percent_number" -gt "$minimum_percent_cleanup_disc_space" ]; then
|
|
||||||
echo "WARNING: $disc_use_percent_number exceeds the limit of $minimum_percent_cleanup_disc_space%."
|
|
||||||
errors+=1;
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
exit $errors;
|
|
||||||
@@ -8,4 +8,5 @@
|
|||||||
vars:
|
vars:
|
||||||
system_service_on_calendar: "{{ SYS_SCHEDULE_HEALTH_DISC_SPACE }}"
|
system_service_on_calendar: "{{ SYS_SCHEDULE_HEALTH_DISC_SPACE }}"
|
||||||
system_service_timer_enabled: true
|
system_service_timer_enabled: true
|
||||||
|
system_service_tpl_exec_start: "{{ system_service_script_exec }} {{ SIZE_PERCENT_CLEANUP_DISC_SPACE }}"
|
||||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }} {{ SYS_SERVICE_CLEANUP_DISC_SPACE }}"
|
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }} {{ SYS_SERVICE_CLEANUP_DISC_SPACE }}"
|
||||||
@@ -16,14 +16,11 @@
|
|||||||
include_tasks: "02_reset.yml"
|
include_tasks: "02_reset.yml"
|
||||||
when: MODE_RESET | bool
|
when: MODE_RESET | bool
|
||||||
|
|
||||||
- name: "Load cleanup tasks when MODE_CLEANUP or MODE_RESET is enabled"
|
|
||||||
include_tasks: "03_cleanup.yml"
|
|
||||||
when: MODE_CLEANUP | bool or MODE_RESET | bool
|
|
||||||
|
|
||||||
- name: Include backup, repair and health services for docker
|
- name: Include backup, repair and health services for docker
|
||||||
include_role:
|
include_role:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
loop:
|
loop:
|
||||||
|
- sys-ctl-cln-docker
|
||||||
- sys-ctl-bkp-docker-2-loc
|
- sys-ctl-bkp-docker-2-loc
|
||||||
- sys-ctl-hlth-docker-container
|
- sys-ctl-hlth-docker-container
|
||||||
- sys-ctl-hlth-docker-volumes
|
- sys-ctl-hlth-docker-volumes
|
||||||
|
|||||||
@@ -1,12 +0,0 @@
|
|||||||
- block:
|
|
||||||
- name: Load role to delete anonymous volumes
|
|
||||||
include_role:
|
|
||||||
name: sys-ctl-cln-anon-volumes
|
|
||||||
vars:
|
|
||||||
system_service_force_flush: true
|
|
||||||
- include_tasks: utils/run_once.yml
|
|
||||||
when: run_once_sys_ctl_cln_anon_volumes is not defined
|
|
||||||
|
|
||||||
- name: Prune Docker resources
|
|
||||||
become: true
|
|
||||||
ansible.builtin.command: docker system prune -f
|
|
||||||
@@ -1,123 +0,0 @@
|
|||||||
import unittest
|
|
||||||
from unittest.mock import patch
|
|
||||||
|
|
||||||
# Importiere das Filtermodul
|
|
||||||
# Pfad relativ zum Projekt; falls nötig, passe den Importpfad an
|
|
||||||
import importlib
|
|
||||||
jvm_filters = importlib.import_module("filter_plugins.jvm_filters")
|
|
||||||
|
|
||||||
|
|
||||||
class TestJvmFilters(unittest.TestCase):
|
|
||||||
def setUp(self):
|
|
||||||
# Dummy applications dict – Inhalt egal, da get_app_conf gemockt wird
|
|
||||||
self.apps = {"whatever": True}
|
|
||||||
self.app_id = "web-app-confluence" # entity_name wird gemockt
|
|
||||||
|
|
||||||
# -----------------------------
|
|
||||||
# Helpers
|
|
||||||
# -----------------------------
|
|
||||||
def _with_conf(self, mem_limit: str, mem_res: str):
|
|
||||||
"""
|
|
||||||
Context manager der get_app_conf/get_entity_name passend patched.
|
|
||||||
"""
|
|
||||||
patches = [
|
|
||||||
patch("filter_plugins.jvm_filters.get_entity_name", return_value="confluence"),
|
|
||||||
patch(
|
|
||||||
"filter_plugins.jvm_filters.get_app_conf",
|
|
||||||
side_effect=lambda apps, app_id, key, required=True: (
|
|
||||||
mem_limit if key.endswith(".mem_limit")
|
|
||||||
else mem_res if key.endswith(".mem_reservation")
|
|
||||||
else None
|
|
||||||
),
|
|
||||||
),
|
|
||||||
]
|
|
||||||
ctxs = [p.start() for p in patches]
|
|
||||||
self.addCleanup(lambda: [p.stop() for p in patches])
|
|
||||||
return ctxs
|
|
||||||
|
|
||||||
# -----------------------------
|
|
||||||
# Tests: jvm_max_mb / jvm_min_mb Sizing
|
|
||||||
# -----------------------------
|
|
||||||
def test_sizing_8g_limit_6g_reservation(self):
|
|
||||||
# mem_limit=8g → candidates: 70% = 5734MB (floor 8*0.7=5.6GB→ 5734MB via int math 8*7//10=5)
|
|
||||||
# int math: (8*1024)*7//10 = (8192)*7//10 = 5734
|
|
||||||
# limit-1024 = 8192-1024 = 7168
|
|
||||||
# 12288
|
|
||||||
# → Xmx = min(5734, 7168, 12288) = 5734 → floor at 1024 keeps 5734
|
|
||||||
# Xms = min(Xmx//2=2867, res=6144, Xmx=5734) = 2867 (>=512)
|
|
||||||
self._with_conf("8g", "6g")
|
|
||||||
xmx = jvm_filters.jvm_max_mb(self.apps, self.app_id)
|
|
||||||
xms = jvm_filters.jvm_min_mb(self.apps, self.app_id)
|
|
||||||
self.assertEqual(xmx, 5734)
|
|
||||||
self.assertEqual(xms, 2867)
|
|
||||||
|
|
||||||
def test_sizing_6g_limit_4g_reservation(self):
|
|
||||||
# limit=6g → 70%: (6144*7)//10 = 4300, limit-1024=5120, 12288 → Xmx=4300
|
|
||||||
# Xms=min(4300//2=2150, 4096, 4300)=2150
|
|
||||||
self._with_conf("6g", "4g")
|
|
||||||
xmx = jvm_filters.jvm_max_mb(self.apps, self.app_id)
|
|
||||||
xms = jvm_filters.jvm_min_mb(self.apps, self.app_id)
|
|
||||||
self.assertEqual(xmx, 4300)
|
|
||||||
self.assertEqual(xms, 2150)
|
|
||||||
|
|
||||||
def test_sizing_16g_limit_12g_reservation_cap_12288(self):
|
|
||||||
# limit=16g → 70%: (16384*7)//10 = 11468, limit-1024=15360, cap=12288 → Xmx=min(11468,15360,12288)=11468
|
|
||||||
# Xms=min(11468//2=5734, 12288 (12g), 11468) = 5734
|
|
||||||
self._with_conf("16g", "12g")
|
|
||||||
xmx = jvm_filters.jvm_max_mb(self.apps, self.app_id)
|
|
||||||
xms = jvm_filters.jvm_min_mb(self.apps, self.app_id)
|
|
||||||
self.assertEqual(xmx, 11468)
|
|
||||||
self.assertEqual(xms, 5734)
|
|
||||||
|
|
||||||
def test_floor_small_limit_results_in_min_1024(self):
|
|
||||||
# limit=1g → 70%: 716, limit-1024=0, 12288 → min=0 → floor → 1024
|
|
||||||
self._with_conf("1g", "512m")
|
|
||||||
xmx = jvm_filters.jvm_max_mb(self.apps, self.app_id)
|
|
||||||
self.assertEqual(xmx, 1024)
|
|
||||||
|
|
||||||
def test_floor_small_reservation_results_in_min_512(self):
|
|
||||||
# limit groß genug, aber reservation sehr klein → Xms floored to 512
|
|
||||||
self._with_conf("4g", "128m")
|
|
||||||
xms = jvm_filters.jvm_min_mb(self.apps, self.app_id)
|
|
||||||
self.assertEqual(xms, 512)
|
|
||||||
|
|
||||||
# -----------------------------
|
|
||||||
# Tests: Fehlerfälle / Validierung
|
|
||||||
# -----------------------------
|
|
||||||
def test_invalid_unit_raises(self):
|
|
||||||
with patch("filter_plugins.jvm_filters.get_entity_name", return_value="confluence"), \
|
|
||||||
patch("filter_plugins.jvm_filters.get_app_conf", side_effect=lambda apps, app_id, key, required=True:
|
|
||||||
"8Q" if key.endswith(".mem_limit") else "4g"):
|
|
||||||
with self.assertRaises(jvm_filters.AnsibleFilterError):
|
|
||||||
jvm_filters.jvm_max_mb(self.apps, self.app_id)
|
|
||||||
|
|
||||||
def test_zero_limit_raises(self):
|
|
||||||
with patch("filter_plugins.jvm_filters.get_entity_name", return_value="confluence"), \
|
|
||||||
patch("filter_plugins.jvm_filters.get_app_conf", side_effect=lambda apps, app_id, key, required=True:
|
|
||||||
"0" if key.endswith(".mem_limit") else "4g"):
|
|
||||||
with self.assertRaises(jvm_filters.AnsibleFilterError):
|
|
||||||
jvm_filters.jvm_max_mb(self.apps, self.app_id)
|
|
||||||
|
|
||||||
def test_zero_reservation_raises(self):
|
|
||||||
with patch("filter_plugins.jvm_filters.get_entity_name", return_value="confluence"), \
|
|
||||||
patch("filter_plugins.jvm_filters.get_app_conf", side_effect=lambda apps, app_id, key, required=True:
|
|
||||||
"8g" if key.endswith(".mem_limit") else "0"):
|
|
||||||
with self.assertRaises(jvm_filters.AnsibleFilterError):
|
|
||||||
jvm_filters.jvm_min_mb(self.apps, self.app_id)
|
|
||||||
|
|
||||||
def test_entity_name_is_derived_not_passed(self):
|
|
||||||
# Sicherstellen, dass get_entity_name() aufgerufen wird und kein externer Parameter nötig ist
|
|
||||||
with patch("filter_plugins.jvm_filters.get_entity_name", return_value="confluence") as mock_entity, \
|
|
||||||
patch("filter_plugins.jvm_filters.get_app_conf", side_effect=lambda apps, app_id, key, required=True:
|
|
||||||
"8g" if key.endswith(".mem_limit") else "6g"):
|
|
||||||
xmx = jvm_filters.jvm_max_mb(self.apps, self.app_id)
|
|
||||||
xms = jvm_filters.jvm_min_mb(self.apps, self.app_id)
|
|
||||||
self.assertGreater(xmx, 0)
|
|
||||||
self.assertGreater(xms, 0)
|
|
||||||
self.assertEqual(mock_entity.call_count, 3)
|
|
||||||
for call in mock_entity.call_args_list:
|
|
||||||
self.assertEqual(call.args[0], self.app_id)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
unittest.main()
|
|
||||||
225
tests/unit/filter_plugins/test_memory_filters.py
Normal file
225
tests/unit/filter_plugins/test_memory_filters.py
Normal file
@@ -0,0 +1,225 @@
|
|||||||
|
import unittest
|
||||||
|
from unittest.mock import patch
|
||||||
|
|
||||||
|
import importlib
|
||||||
|
memory_filters = importlib.import_module("filter_plugins.memory_filters")
|
||||||
|
|
||||||
|
|
||||||
|
class TestMemoryFilters(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
# Dummy applications dict – content does not matter because get_app_conf is mocked
|
||||||
|
self.apps = {"whatever": True}
|
||||||
|
self.app_id = "web-app-confluence" # entity_name will be mocked
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
# Helpers
|
||||||
|
# -----------------------------
|
||||||
|
def _with_conf(self, mem_limit: str, mem_res: str):
|
||||||
|
"""
|
||||||
|
Patch get_app_conf/get_entity_name so that mem_limit and mem_reservation
|
||||||
|
can be controlled in tests.
|
||||||
|
"""
|
||||||
|
patches = [
|
||||||
|
patch("filter_plugins.memory_filters.get_entity_name", return_value="confluence"),
|
||||||
|
patch(
|
||||||
|
"filter_plugins.memory_filters.get_app_conf",
|
||||||
|
side_effect=lambda apps, app_id, key, required=True, **kwargs: (
|
||||||
|
mem_limit if key.endswith(".mem_limit")
|
||||||
|
else mem_res if key.endswith(".mem_reservation")
|
||||||
|
else None
|
||||||
|
),
|
||||||
|
),
|
||||||
|
]
|
||||||
|
mocks = [p.start() for p in patches]
|
||||||
|
self.addCleanup(lambda: [p.stop() for p in patches])
|
||||||
|
return mocks
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
# Tests: jvm_max_mb / jvm_min_mb sizing
|
||||||
|
# -----------------------------
|
||||||
|
def test_sizing_8g_limit_6g_reservation(self):
|
||||||
|
# mem_limit = 8g
|
||||||
|
# candidates:
|
||||||
|
# 70%: (8 * 1024) * 7 // 10 = 5734
|
||||||
|
# limit - 1024: 8192 - 1024 = 7168
|
||||||
|
# cap: 12288
|
||||||
|
# -> Xmx = 5734
|
||||||
|
# Xms = min(5734 // 2 = 2867, 6144, 5734) = 2867
|
||||||
|
self._with_conf("8g", "6g")
|
||||||
|
xmx = memory_filters.jvm_max_mb(self.apps, self.app_id)
|
||||||
|
xms = memory_filters.jvm_min_mb(self.apps, self.app_id)
|
||||||
|
self.assertEqual(xmx, 5734)
|
||||||
|
self.assertEqual(xms, 2867)
|
||||||
|
|
||||||
|
def test_sizing_6g_limit_4g_reservation(self):
|
||||||
|
# mem_limit = 6g
|
||||||
|
# 70%: (6144 * 7) // 10 = 4300
|
||||||
|
# limit - 1024: 6144 - 1024 = 5120
|
||||||
|
# cap: 12288
|
||||||
|
# -> Xmx = 4300
|
||||||
|
# Xms = min(4300 // 2 = 2150, 4096, 4300) = 2150
|
||||||
|
self._with_conf("6g", "4g")
|
||||||
|
xmx = memory_filters.jvm_max_mb(self.apps, self.app_id)
|
||||||
|
xms = memory_filters.jvm_min_mb(self.apps, self.app_id)
|
||||||
|
self.assertEqual(xmx, 4300)
|
||||||
|
self.assertEqual(xms, 2150)
|
||||||
|
|
||||||
|
def test_sizing_16g_limit_12g_reservation_cap_12288(self):
|
||||||
|
# mem_limit = 16g
|
||||||
|
# 70%: (16384 * 7) // 10 = 11468
|
||||||
|
# limit - 1024: 16384 - 1024 = 15360
|
||||||
|
# cap: 12288
|
||||||
|
# -> Xmx = 11468
|
||||||
|
# Xms = min(11468 // 2 = 5734, 12288, 11468) = 5734
|
||||||
|
self._with_conf("16g", "12g")
|
||||||
|
xmx = memory_filters.jvm_max_mb(self.apps, self.app_id)
|
||||||
|
xms = memory_filters.jvm_min_mb(self.apps, self.app_id)
|
||||||
|
self.assertEqual(xmx, 11468)
|
||||||
|
self.assertEqual(xms, 5734)
|
||||||
|
|
||||||
|
def test_floor_small_limit_results_in_min_1024(self):
|
||||||
|
# mem_limit = 1g
|
||||||
|
# 70%: ~716 MB, limit - 1024 = 0, cap: 12288
|
||||||
|
# -> min candidates = 0 => floored to 1024 MB
|
||||||
|
self._with_conf("1g", "512m")
|
||||||
|
xmx = memory_filters.jvm_max_mb(self.apps, self.app_id)
|
||||||
|
self.assertEqual(xmx, 1024)
|
||||||
|
|
||||||
|
def test_floor_small_reservation_results_in_min_512(self):
|
||||||
|
# mem_limit is large enough, but reservation is tiny -> floored to 512
|
||||||
|
self._with_conf("4g", "128m")
|
||||||
|
xms = memory_filters.jvm_min_mb(self.apps, self.app_id)
|
||||||
|
self.assertEqual(xms, 512)
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
# Tests: JVM failure cases / validation
|
||||||
|
# -----------------------------
|
||||||
|
def test_invalid_unit_raises(self):
|
||||||
|
with patch("filter_plugins.memory_filters.get_entity_name", return_value="confluence"), \
|
||||||
|
patch("filter_plugins.memory_filters.get_app_conf",
|
||||||
|
side_effect=lambda apps, app_id, key, required=True, **kwargs:
|
||||||
|
"8Q" if key.endswith(".mem_limit") else "4g"):
|
||||||
|
with self.assertRaises(memory_filters.AnsibleFilterError):
|
||||||
|
memory_filters.jvm_max_mb(self.apps, self.app_id)
|
||||||
|
|
||||||
|
def test_zero_limit_raises(self):
|
||||||
|
with patch("filter_plugins.memory_filters.get_entity_name", return_value="confluence"), \
|
||||||
|
patch("filter_plugins.memory_filters.get_app_conf",
|
||||||
|
side_effect=lambda apps, app_id, key, required=True, **kwargs:
|
||||||
|
"0" if key.endswith(".mem_limit") else "4g"):
|
||||||
|
with self.assertRaises(memory_filters.AnsibleFilterError):
|
||||||
|
memory_filters.jvm_max_mb(self.apps, self.app_id)
|
||||||
|
|
||||||
|
def test_zero_reservation_raises(self):
|
||||||
|
with patch("filter_plugins.memory_filters.get_entity_name", return_value="confluence"), \
|
||||||
|
patch("filter_plugins.memory_filters.get_app_conf",
|
||||||
|
side_effect=lambda apps, app_id, key, required=True, **kwargs:
|
||||||
|
"8g" if key.endswith(".mem_limit") else "0"):
|
||||||
|
with self.assertRaises(memory_filters.AnsibleFilterError):
|
||||||
|
memory_filters.jvm_min_mb(self.apps, self.app_id)
|
||||||
|
|
||||||
|
def test_entity_name_is_derived_not_passed(self):
|
||||||
|
"""
|
||||||
|
Ensure get_entity_name() is called internally and the app_id is not
|
||||||
|
passed around manually from the template.
|
||||||
|
"""
|
||||||
|
with patch("filter_plugins.memory_filters.get_entity_name", return_value="confluence") as mock_entity, \
|
||||||
|
patch("filter_plugins.memory_filters.get_app_conf",
|
||||||
|
side_effect=lambda apps, app_id, key, required=True, **kwargs:
|
||||||
|
"8g" if key.endswith(".mem_limit") else "6g"):
|
||||||
|
xmx = memory_filters.jvm_max_mb(self.apps, self.app_id)
|
||||||
|
xms = memory_filters.jvm_min_mb(self.apps, self.app_id)
|
||||||
|
self.assertGreater(xmx, 0)
|
||||||
|
self.assertGreater(xms, 0)
|
||||||
|
self.assertEqual(mock_entity.call_count, 3)
|
||||||
|
for call in mock_entity.call_args_list:
|
||||||
|
self.assertEqual(call.args[0], self.app_id)
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
# Tests: redis_maxmemory_mb
|
||||||
|
# -----------------------------
|
||||||
|
def test_redis_maxmemory_default_factor_uses_80_percent_of_limit(self):
|
||||||
|
# mem_limit = 1g → 1024 MB
|
||||||
|
# factor = 0.8 → int(1024 * 0.8) = 819
|
||||||
|
self._with_conf("1g", "512m")
|
||||||
|
maxmem = memory_filters.redis_maxmemory_mb(self.apps, self.app_id)
|
||||||
|
self.assertEqual(maxmem, 819)
|
||||||
|
|
||||||
|
def test_redis_maxmemory_custom_factor_and_min_mb(self):
|
||||||
|
# mem_limit = 1g → 1024 MB
|
||||||
|
# factor = 0.5 → 512 MB
|
||||||
|
# min_mb = 128 → result stays 512
|
||||||
|
self._with_conf("1g", "512m")
|
||||||
|
maxmem = memory_filters.redis_maxmemory_mb(
|
||||||
|
self.apps,
|
||||||
|
self.app_id,
|
||||||
|
factor=0.5,
|
||||||
|
min_mb=128,
|
||||||
|
)
|
||||||
|
self.assertEqual(maxmem, 512)
|
||||||
|
|
||||||
|
def test_redis_maxmemory_honors_minimum_floor(self):
|
||||||
|
# mem_limit = 32m → 32 MB
|
||||||
|
# factor = 0.8 → int(32 * 0.8) = 25 < min_mb(64)
|
||||||
|
# → result = 64
|
||||||
|
self._with_conf("32m", "16m")
|
||||||
|
maxmem = memory_filters.redis_maxmemory_mb(self.apps, self.app_id)
|
||||||
|
self.assertEqual(maxmem, 64)
|
||||||
|
|
||||||
|
def test_redis_maxmemory_zero_limit_raises(self):
|
||||||
|
# mem_limit = 0 → must raise AnsibleFilterError
|
||||||
|
self._with_conf("0", "512m")
|
||||||
|
with self.assertRaises(memory_filters.AnsibleFilterError):
|
||||||
|
memory_filters.redis_maxmemory_mb(self.apps, self.app_id)
|
||||||
|
|
||||||
|
def test_redis_maxmemory_invalid_unit_raises(self):
|
||||||
|
# mem_limit = "8Q" → invalid unit → must raise
|
||||||
|
self._with_conf("8Q", "512m")
|
||||||
|
with self.assertRaises(memory_filters.AnsibleFilterError):
|
||||||
|
memory_filters.redis_maxmemory_mb(self.apps, self.app_id)
|
||||||
|
|
||||||
|
def test_redis_maxmemory_does_not_call_get_entity_name(self):
|
||||||
|
"""
|
||||||
|
Ensure redis_maxmemory_mb does NOT rely on entity name resolution
|
||||||
|
(it should always use the hard-coded 'redis' service name).
|
||||||
|
"""
|
||||||
|
patches = [
|
||||||
|
patch("filter_plugins.memory_filters.get_entity_name"),
|
||||||
|
patch(
|
||||||
|
"filter_plugins.memory_filters.get_app_conf",
|
||||||
|
side_effect=lambda apps, app_id, key, required=True, **kwargs: (
|
||||||
|
"4g" if key.endswith(".mem_limit") else "2g"
|
||||||
|
),
|
||||||
|
),
|
||||||
|
]
|
||||||
|
mocks = [p.start() for p in patches]
|
||||||
|
self.addCleanup(lambda: [p.stop() for p in patches])
|
||||||
|
|
||||||
|
entity_mock = mocks[0]
|
||||||
|
|
||||||
|
maxmem = memory_filters.redis_maxmemory_mb(self.apps, self.app_id)
|
||||||
|
# 4g → 4096 MB, factor 0.8 → 3276
|
||||||
|
self.assertEqual(maxmem, 3276)
|
||||||
|
entity_mock.assert_not_called()
|
||||||
|
|
||||||
|
def test_redis_maxmemory_uses_default_when_mem_limit_missing(self):
|
||||||
|
"""
|
||||||
|
When docker.services.redis.mem_limit is not configured, the filter
|
||||||
|
should fall back to its internal default (256m).
|
||||||
|
"""
|
||||||
|
def fake_get_app_conf(apps, app_id, key, required=True, **kwargs):
|
||||||
|
# Simulate missing mem_limit: return the provided default
|
||||||
|
if key.endswith(".mem_limit"):
|
||||||
|
return kwargs.get("default")
|
||||||
|
return None
|
||||||
|
|
||||||
|
with patch("filter_plugins.memory_filters.get_app_conf", side_effect=fake_get_app_conf), \
|
||||||
|
patch("filter_plugins.memory_filters.get_entity_name", return_value="confluence"):
|
||||||
|
maxmem = memory_filters.redis_maxmemory_mb(self.apps, self.app_id)
|
||||||
|
|
||||||
|
# default_mb = 256 → factor 0.8 → floor(256 * 0.8) = 204
|
||||||
|
self.assertEqual(maxmem, 204)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
unittest.main()
|
||||||
123
tests/unit/roles/sys-ctl-hlth-disc-space/files/script.py
Normal file
123
tests/unit/roles/sys-ctl-hlth-disc-space/files/script.py
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
import io
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import unittest
|
||||||
|
import pathlib
|
||||||
|
import contextlib
|
||||||
|
import importlib.util
|
||||||
|
from types import SimpleNamespace
|
||||||
|
from unittest import mock
|
||||||
|
|
||||||
|
|
||||||
|
def load_target_module():
|
||||||
|
"""
|
||||||
|
Load the target script (roles/sys-ctl-hlth-disc-space/files/script.py)
|
||||||
|
via its file path so that dashes in the directory name are not an issue.
|
||||||
|
"""
|
||||||
|
# tests/unit/roles/sys-ctl-hlth-disc-space/files/script.py
|
||||||
|
test_file_path = pathlib.Path(__file__).resolve()
|
||||||
|
repo_root = test_file_path.parents[4] # go up: files -> ... -> unit -> tests -> <root>
|
||||||
|
|
||||||
|
script_path = repo_root / "roles" / "sys-ctl-hlth-disc-space" / "files" / "script.py"
|
||||||
|
if not script_path.is_file():
|
||||||
|
raise FileNotFoundError(f"Target script not found at: {script_path}")
|
||||||
|
|
||||||
|
spec = importlib.util.spec_from_file_location("disk_space_script", script_path)
|
||||||
|
module = importlib.util.module_from_spec(spec)
|
||||||
|
assert spec.loader is not None
|
||||||
|
spec.loader.exec_module(module)
|
||||||
|
return module
|
||||||
|
|
||||||
|
|
||||||
|
# Load the module once for all tests
|
||||||
|
SCRIPT_MODULE = load_target_module()
|
||||||
|
|
||||||
|
|
||||||
|
class TestDiskSpaceScript(unittest.TestCase):
|
||||||
|
def test_get_disk_usage_percentages_parses_output(self):
|
||||||
|
"""
|
||||||
|
Ensure get_disk_usage_percentages parses 'df --output=pcent' correctly
|
||||||
|
and returns integer percentages without the '%' sign.
|
||||||
|
"""
|
||||||
|
# Fake df output, including header line and various spacings
|
||||||
|
fake_df_output = "Use%\n 10%\n 50%\n100%\n"
|
||||||
|
|
||||||
|
with mock.patch.object(
|
||||||
|
SCRIPT_MODULE.subprocess,
|
||||||
|
"run",
|
||||||
|
return_value=SimpleNamespace(stdout=fake_df_output, returncode=0),
|
||||||
|
):
|
||||||
|
result = SCRIPT_MODULE.get_disk_usage_percentages()
|
||||||
|
|
||||||
|
self.assertEqual(result, [10, 50, 100])
|
||||||
|
|
||||||
|
def test_main_exits_zero_when_below_threshold(self):
|
||||||
|
"""
|
||||||
|
If all filesystems are below or equal the threshold,
|
||||||
|
main() should exit with status code 0.
|
||||||
|
"""
|
||||||
|
# First call: 'df' (printing only) -> we don't care about stdout here
|
||||||
|
df_print_cp = SimpleNamespace(stdout="Filesystem ...\n", returncode=0)
|
||||||
|
# Second call: 'df --output=pcent'
|
||||||
|
df_pcent_cp = SimpleNamespace(stdout="Use%\n 10%\n 50%\n 80%\n", returncode=0)
|
||||||
|
|
||||||
|
def fake_run(args, capture_output=False, text=False, check=False):
|
||||||
|
# Decide which fake result to return based on the arguments
|
||||||
|
if args == ["df", "--output=pcent"]:
|
||||||
|
return df_pcent_cp
|
||||||
|
elif args == ["df"]:
|
||||||
|
return df_print_cp
|
||||||
|
else:
|
||||||
|
raise AssertionError(f"Unexpected subprocess.run args: {args}")
|
||||||
|
|
||||||
|
with mock.patch.object(SCRIPT_MODULE.subprocess, "run", side_effect=fake_run):
|
||||||
|
with mock.patch.object(sys, "argv", ["script.py", "80"]):
|
||||||
|
with mock.patch.object(SCRIPT_MODULE.sys, "exit", side_effect=SystemExit) as mock_exit:
|
||||||
|
# Capture stdout to avoid clutter in test output
|
||||||
|
with contextlib.redirect_stdout(io.StringIO()):
|
||||||
|
with self.assertRaises(SystemExit):
|
||||||
|
SCRIPT_MODULE.main()
|
||||||
|
|
||||||
|
# Expect no filesystem above 80% -> exit code 0
|
||||||
|
mock_exit.assert_called_once_with(0)
|
||||||
|
|
||||||
|
def test_main_exits_with_error_count_and_prints_warnings(self):
|
||||||
|
"""
|
||||||
|
If some filesystems exceed the threshold, main() should:
|
||||||
|
- Print a warning for each filesystem that exceeds it
|
||||||
|
- Exit with a status code equal to the number of such filesystems
|
||||||
|
"""
|
||||||
|
df_print_cp = SimpleNamespace(stdout="Filesystem ...\n", returncode=0)
|
||||||
|
# Two filesystems above threshold (90%, 95%), one below (60%)
|
||||||
|
df_pcent_cp = SimpleNamespace(stdout="Use%\n 60%\n 90%\n 95%\n", returncode=0)
|
||||||
|
|
||||||
|
def fake_run(args, capture_output=False, text=False, check=False):
|
||||||
|
if args == ["df", "--output=pcent"]:
|
||||||
|
return df_pcent_cp
|
||||||
|
elif args == ["df"]:
|
||||||
|
return df_print_cp
|
||||||
|
else:
|
||||||
|
raise AssertionError(f"Unexpected subprocess.run args: {args}")
|
||||||
|
|
||||||
|
with mock.patch.object(SCRIPT_MODULE.subprocess, "run", side_effect=fake_run):
|
||||||
|
with mock.patch.object(sys, "argv", ["script.py", "80"]):
|
||||||
|
with mock.patch.object(SCRIPT_MODULE.sys, "exit", side_effect=SystemExit) as mock_exit:
|
||||||
|
buffer = io.StringIO()
|
||||||
|
with contextlib.redirect_stdout(buffer):
|
||||||
|
with self.assertRaises(SystemExit):
|
||||||
|
SCRIPT_MODULE.main()
|
||||||
|
|
||||||
|
# Expect exit code 2 (two filesystems over 80%)
|
||||||
|
mock_exit.assert_called_once_with(2)
|
||||||
|
|
||||||
|
output = buffer.getvalue()
|
||||||
|
self.assertIn("Checking disk space usage...", output)
|
||||||
|
self.assertIn("WARNING: 90% exceeds the limit of 80%.", output)
|
||||||
|
self.assertIn("WARNING: 95% exceeds the limit of 80%.", output)
|
||||||
|
# Ensure the "below threshold" value does not produce a warning
|
||||||
|
self.assertNotIn("60% exceeds the limit of 80%.", output)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
unittest.main()
|
||||||
Reference in New Issue
Block a user