mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-11-09 22:56:29 +00:00
Compare commits
308 Commits
d25da76117
...
feature/xw
| Author | SHA1 | Date | |
|---|---|---|---|
| 2f46b99e4e | |||
| 295ae7e477 | |||
| c67ccc1df6 | |||
| cb483f60d1 | |||
| 2be73502ca | |||
| 57d5269b07 | |||
| 1eefdea050 | |||
| 561160504e | |||
| 9a4bf91276 | |||
| 468b6e734c | |||
| 83cb94b6ff | |||
| 6857295969 | |||
| 8ab398f679 | |||
| 31133ddd90 | |||
| 783b1e152d | |||
| eca567fefd | |||
| 905f461ee8 | |||
| 9f0b259ba9 | |||
| 06e4323faa | |||
| 3d99226f37 | |||
| 73ba09fbe2 | |||
| 01ea9b76ce | |||
| c22acf202f | |||
| 61e138c1a6 | |||
| 07c8e036ec | |||
| 0b36059cd2 | |||
| d76e384ae3 | |||
| e6f4f3a6a4 | |||
| a80b26ed9e | |||
| 45ec7b0ead | |||
| ec396d130c | |||
| 93c2fbedd7 | |||
| d006f0ba5e | |||
| dd43722e02 | |||
| 05d7ddc491 | |||
| e54436821c | |||
| ed73a37795 | |||
| adff9271fd | |||
| 2f0fb2cb69 | |||
| 6abf2629e0 | |||
| 6a8e0f38d8 | |||
| ae618cbf19 | |||
| c835ca8f2c | |||
| 087175a3c7 | |||
| 3da645f3b8 | |||
| a996e2190f | |||
| 7dccffd52d | |||
| 853f2c3e2d | |||
| b2978a3141 | |||
| 0e0b703ccd | |||
| 0b86b2f057 | |||
| 80e048a274 | |||
| 2610aec293 | |||
| 07db162368 | |||
| a526d1adc4 | |||
| ca95079111 | |||
| e410d66cb4 | |||
| ab48cf522f | |||
| 41c12bdc12 | |||
| aae463b602 | |||
| bb50551533 | |||
| 098099b41e | |||
| 0a7d767252 | |||
| d88599f76c | |||
| 4d9890406e | |||
| 59b652958f | |||
| a327adf8db | |||
| 7a38cb90fb | |||
| 9d6cf03f5b | |||
| 9439ac7f76 | |||
| 23353ac878 | |||
| 8beda2d45d | |||
| 5773409bd7 | |||
| b3ea962338 | |||
| b9fbf92461 | |||
| 6824e444b0 | |||
| 5cdcc18a99 | |||
| e7702948b8 | |||
| 09a4c243d7 | |||
| 1d5a50abf2 | |||
| 0d99c7f297 | |||
| 0a17e54d8c | |||
| bf94338845 | |||
| 5d42b78b3d | |||
| 26a1992d84 | |||
| 2439beb95a | |||
| 251f7b227d | |||
| 3fbb9c38a8 | |||
| 29e8b3a590 | |||
| 27b89d8fb6 | |||
| 55f2d15e93 | |||
| aa19a97ed6 | |||
| c06d1c4d17 | |||
| 66f294537d | |||
| a9097a3ec3 | |||
| fc59c64273 | |||
| dbbb3510f3 | |||
| eb3bf543a4 | |||
| 4f5602c791 | |||
| 75d476267e | |||
| c3e5db7f2e | |||
| dfd2d243b7 | |||
| 78ad2ea4b6 | |||
| c362e160fc | |||
| a044028e03 | |||
| 7405883b48 | |||
| 85db0a40db | |||
| 8af39c32ec | |||
| 31e86ac0fc | |||
| 4d223f1784 | |||
| 926def3d01 | |||
| 083b7d2914 | |||
| 73a38e0b2b | |||
| e3c0880e98 | |||
| a817d964e4 | |||
| 7572134e9d | |||
| 97af4990aa | |||
| b6d0535173 | |||
| 27d33435f8 | |||
| 3cc4014edf | |||
| 63da669c33 | |||
| fb04a4c7a0 | |||
| 2968ac7f0a | |||
| 1daa53017e | |||
| 9082443753 | |||
| bcee1fecdf | |||
| 0602148caa | |||
| cbfb991e79 | |||
| fa7b1400bd | |||
| c7cae93597 | |||
| 6ea0d09f14 | |||
| 5e4cda0ac9 | |||
| 1d29617f85 | |||
| 7c5ad8e6a1 | |||
| a26538d1b3 | |||
| f55b0ca797 | |||
| 6f3522dc28 | |||
| 5186eb5714 | |||
| 73bcdcaf45 | |||
| 9e402c863f | |||
| 84865d61b8 | |||
| 423850d3e6 | |||
| 598f4e854a | |||
| 1f99a6b84b | |||
| 189aaaa9ec | |||
| ca52dcda43 | |||
| 4f59e8e48b | |||
| a993c153dd | |||
| 8d6ebb4693 | |||
| 567babfdfc | |||
| 18e5f001d0 | |||
| 7d9cb5820f | |||
| c181c7f6cd | |||
| 929cddec0e | |||
| 9ba0efc1a1 | |||
| 9bf77e1e35 | |||
| 426ba32c11 | |||
| ff7b7aeb2d | |||
| c523d8d8d4 | |||
| 12d05ef013 | |||
| 3cbf37d774 | |||
| fc99c72f86 | |||
| 3211dd7cea | |||
| c07a9835fc | |||
| f4cf55b3c8 | |||
| 1b91ddeac2 | |||
| b638d00d73 | |||
| 75c36a1d71 | |||
| 7a119c3175 | |||
| 3e6193ffce | |||
| 9d8e06015f | |||
| 5daf3387bf | |||
| 6da7f28370 | |||
| 208848579d | |||
| d8c73e9fc3 | |||
| 10b20cc3c4 | |||
| 790c184e66 | |||
| 93d165fa4c | |||
| 1f3abb95af | |||
| 7ca3a73f21 | |||
| 08720a43c1 | |||
| 1baed62078 | |||
| 963e1aea21 | |||
| a819a05737 | |||
| 4cb58bec0f | |||
| 002f45d1df | |||
| cbc4dad1d1 | |||
| 70d395ed15 | |||
| e20a709f04 | |||
| d129f71cef | |||
| 4cb428274a | |||
| 97e2d440b2 | |||
| 588cd1959f | |||
| 5d1210d651 | |||
| aeab7e7358 | |||
| fa6bb67a66 | |||
| 3dc2fbd47c | |||
| 4b56ab3d18 | |||
| 8e934677ff | |||
| 0a927f49a2 | |||
| e6803e5614 | |||
| 6cf6c74802 | |||
| 734b8764f2 | |||
| 3edb66f444 | |||
| 181b2d0542 | |||
| 78ebf4d075 | |||
| d523629cdd | |||
| 08ac8b6a9d | |||
| 79db2419a6 | |||
| c424afa935 | |||
| 974a83fe6e | |||
| 0168167769 | |||
| 1c7152ceb2 | |||
| 2a98b265bc | |||
| 14d1362dc8 | |||
| a4a8061998 | |||
| 96ded68ef4 | |||
| 2d8967d559 | |||
| 5e616d3962 | |||
| 0f85d27a4d | |||
| c6677ca61b | |||
| 83ce88a048 | |||
| 7d150fa021 | |||
| 2806aab89e | |||
| 61772d5916 | |||
| a10ba78a5a | |||
| 6854acf204 | |||
| 54d4eeb1ab | |||
| 52fb7accac | |||
| d4c62dbf72 | |||
| 9ef4f91ec4 | |||
| 5bc635109a | |||
| efb5488cfc | |||
| 1dceabfd46 | |||
| c64ac0b4dc | |||
| e94aac1d78 | |||
| c274c1a5d4 | |||
| 62493ac5a9 | |||
| cc2b9d476f | |||
| d9c527e2e2 | |||
| eafdacc378 | |||
| c93ec6d43a | |||
| 0839b8e37f | |||
| def6dc96d8 | |||
| 364f4799bc | |||
| 6eb4ba45f7 | |||
| 0566c426c9 | |||
| 9ce73b9c71 | |||
| 83936edf73 | |||
| 40ecbc5466 | |||
| b18b3b104c | |||
| 2f992983f4 | |||
| d7d8578b13 | |||
| f106d5ec36 | |||
| 53b3a3a7b1 | |||
| f576b42579 | |||
| b0f10aa0d0 | |||
| a6a2be4373 | |||
| b7a7be4737 | |||
| 2d71c461de | |||
| 07b7c6484f | |||
| cce33373ba | |||
| fcc9dc71ef | |||
| 1b42ca46e8 | |||
| ce8958cc01 | |||
| 7e5990aa16 | |||
| 60ef36456a | |||
| 3a8b9cc958 | |||
| a1a956585c | |||
| 1a1f185265 | |||
| 57ca6adaec | |||
| a0c2245bbd | |||
| 206b3eadbc | |||
| feee3fd71f | |||
| 39e745049b | |||
| db034553a3 | |||
| f7e661bcca | |||
| d5f1ae0288 | |||
| 3c3083481e | |||
| 7cfe97ab50 | |||
| a552ea175d | |||
| dc16b7d21c | |||
| 54797aa65b | |||
| a6e42bff9b | |||
| 58cf63c040 | |||
| 682ea6d7f2 | |||
| 486729d57d | |||
| 5342f70b03 | |||
| d40a275d70 | |||
| 3224e24d76 | |||
| 4539817c16 | |||
| 1a377f1eb4 | |||
| a356566822 | |||
| 5af6c0ef1b | |||
| 71276f3e5a | |||
| d5d7a7dffb | |||
| dcd1545093 | |||
| 04778a4fcc | |||
| e57f3bfdc1 | |||
| cbfb096cdb | |||
| 6418a462ec | |||
| 8bc6e1f921 | |||
| 91ce097a0a | |||
| 79c623d8db | |||
| 445c94788e | |||
| aac9704e8b | |||
| a57a5f8828 | |||
| 90843726de |
6
.github/workflows/test-cli.yml
vendored
6
.github/workflows/test-cli.yml
vendored
@@ -21,12 +21,12 @@ jobs:
|
|||||||
|
|
||||||
- name: Clean build artifacts
|
- name: Clean build artifacts
|
||||||
run: |
|
run: |
|
||||||
docker run --rm infinito:latest make clean
|
docker run --rm infinito:latest infinito make clean
|
||||||
|
|
||||||
- name: Generate project outputs
|
- name: Generate project outputs
|
||||||
run: |
|
run: |
|
||||||
docker run --rm infinito:latest make build
|
docker run --rm infinito:latest infinito make build
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: |
|
run: |
|
||||||
docker run --rm infinito:latest make test
|
docker run --rm infinito:latest infinito make test
|
||||||
|
|||||||
@@ -59,11 +59,4 @@ RUN INFINITO_PATH=$(pkgmgr path infinito) && \
|
|||||||
ln -sf "$INFINITO_PATH"/main.py /usr/local/bin/infinito && \
|
ln -sf "$INFINITO_PATH"/main.py /usr/local/bin/infinito && \
|
||||||
chmod +x /usr/local/bin/infinito
|
chmod +x /usr/local/bin/infinito
|
||||||
|
|
||||||
# 10) Run integration tests
|
CMD sh -c "infinito --help && exec tail -f /dev/null"
|
||||||
# This needed to be deactivated becaus it doesn't work with gitthub workflow
|
|
||||||
#RUN INFINITO_PATH=$(pkgmgr path infinito) && \
|
|
||||||
# cd "$INFINITO_PATH" && \
|
|
||||||
# make test
|
|
||||||
|
|
||||||
ENTRYPOINT ["infinito"]
|
|
||||||
CMD ["--help"]
|
|
||||||
|
|||||||
2
Makefile
2
Makefile
@@ -73,7 +73,7 @@ messy-test:
|
|||||||
@echo "🧪 Running Python tests…"
|
@echo "🧪 Running Python tests…"
|
||||||
PYTHONPATH=. python -m unittest discover -s tests
|
PYTHONPATH=. python -m unittest discover -s tests
|
||||||
@echo "📑 Checking Ansible syntax…"
|
@echo "📑 Checking Ansible syntax…"
|
||||||
ansible-playbook playbook.yml --syntax-check
|
ansible-playbook -i localhost, -c local $(foreach f,$(wildcard group_vars/all/*.yml),-e @$(f)) playbook.yml --syntax-check
|
||||||
|
|
||||||
install: build
|
install: build
|
||||||
@echo "⚙️ Install complete."
|
@echo "⚙️ Install complete."
|
||||||
|
|||||||
12
ansible.cfg
12
ansible.cfg
@@ -1,5 +1,6 @@
|
|||||||
[defaults]
|
[defaults]
|
||||||
# --- Performance & Behavior ---
|
# --- Performance & Behavior ---
|
||||||
|
pipelining = True
|
||||||
forks = 25
|
forks = 25
|
||||||
strategy = linear
|
strategy = linear
|
||||||
gathering = smart
|
gathering = smart
|
||||||
@@ -14,19 +15,14 @@ stdout_callback = yaml
|
|||||||
callbacks_enabled = profile_tasks,timer
|
callbacks_enabled = profile_tasks,timer
|
||||||
|
|
||||||
# --- Plugin paths ---
|
# --- Plugin paths ---
|
||||||
filter_plugins = ./filter_plugins
|
filter_plugins = ./filter_plugins
|
||||||
lookup_plugins = ./lookup_plugins
|
lookup_plugins = ./lookup_plugins
|
||||||
module_utils = ./module_utils
|
module_utils = ./module_utils
|
||||||
|
|
||||||
[ssh_connection]
|
[ssh_connection]
|
||||||
# Multiplexing: safer socket path in HOME instead of /tmp
|
ssh_args = -o ControlMaster=auto -o ControlPersist=20s -o ControlPath=~/.ssh/ansible-%h-%p-%r -o ServerAliveInterval=15 -o ServerAliveCountMax=3 -o StrictHostKeyChecking=accept-new -o PreferredAuthentications=publickey,password,keyboard-interactive
|
||||||
ssh_args = -o ControlMaster=auto -o ControlPersist=20s -o ControlPath=~/.ssh/ansible-%h-%p-%r \
|
|
||||||
-o ServerAliveInterval=15 -o ServerAliveCountMax=3 -o StrictHostKeyChecking=accept-new \
|
|
||||||
-o PreferredAuthentications=publickey,password,keyboard-interactive
|
|
||||||
|
|
||||||
# Pipelining boosts speed; works fine if sudoers does not enforce "requiretty"
|
|
||||||
pipelining = True
|
pipelining = True
|
||||||
scp_if_ssh = smart
|
transfer_method = smart
|
||||||
|
|
||||||
[persistent_connection]
|
[persistent_connection]
|
||||||
connect_timeout = 30
|
connect_timeout = 30
|
||||||
|
|||||||
@@ -1,3 +0,0 @@
|
|||||||
# Todo
|
|
||||||
- Test this script. It's just a draft. Checkout https://chatgpt.com/c/681d9e2b-7b28-800f-aef8-4f1427e9021d
|
|
||||||
- Solve bugs in show_vault_variables.py
|
|
||||||
@@ -83,6 +83,13 @@ class DefaultsGenerator:
|
|||||||
print(f"Error during rendering: {e}", file=sys.stderr)
|
print(f"Error during rendering: {e}", file=sys.stderr)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Sort applications by application key for stable output
|
||||||
|
apps = result.get("defaults_applications", {})
|
||||||
|
if isinstance(apps, dict) and apps:
|
||||||
|
result["defaults_applications"] = {
|
||||||
|
k: apps[k] for k in sorted(apps.keys())
|
||||||
|
}
|
||||||
|
|
||||||
# Write output
|
# Write output
|
||||||
self.output_file.parent.mkdir(parents=True, exist_ok=True)
|
self.output_file.parent.mkdir(parents=True, exist_ok=True)
|
||||||
with self.output_file.open("w", encoding="utf-8") as f:
|
with self.output_file.open("w", encoding="utf-8") as f:
|
||||||
|
|||||||
@@ -220,6 +220,10 @@ def main():
|
|||||||
print(f"Error building user entries: {e}", file=sys.stderr)
|
print(f"Error building user entries: {e}", file=sys.stderr)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Sort users by key for deterministic output
|
||||||
|
if isinstance(users, dict) and users:
|
||||||
|
users = OrderedDict(sorted(users.items()))
|
||||||
|
|
||||||
# Convert OrderedDict into plain dict for YAML
|
# Convert OrderedDict into plain dict for YAML
|
||||||
default_users = {'default_users': users}
|
default_users = {'default_users': users}
|
||||||
plain_data = dictify(default_users)
|
plain_data = dictify(default_users)
|
||||||
|
|||||||
257
cli/deploy.py
257
cli/deploy.py
@@ -5,6 +5,9 @@ import subprocess
|
|||||||
import os
|
import os
|
||||||
import datetime
|
import datetime
|
||||||
import sys
|
import sys
|
||||||
|
import re
|
||||||
|
from typing import Optional, Dict, Any, List
|
||||||
|
|
||||||
|
|
||||||
def run_ansible_playbook(
|
def run_ansible_playbook(
|
||||||
inventory,
|
inventory,
|
||||||
@@ -13,21 +16,20 @@ def run_ansible_playbook(
|
|||||||
allowed_applications=None,
|
allowed_applications=None,
|
||||||
password_file=None,
|
password_file=None,
|
||||||
verbose=0,
|
verbose=0,
|
||||||
skip_tests=False,
|
|
||||||
skip_validation=False,
|
|
||||||
skip_build=False,
|
skip_build=False,
|
||||||
cleanup=False,
|
skip_tests=False,
|
||||||
logs=False
|
logs=False
|
||||||
):
|
):
|
||||||
start_time = datetime.datetime.now()
|
start_time = datetime.datetime.now()
|
||||||
print(f"\n▶️ Script started at: {start_time.isoformat()}\n")
|
print(f"\n▶️ Script started at: {start_time.isoformat()}\n")
|
||||||
|
|
||||||
if cleanup:
|
# Cleanup is now handled via MODE_CLEANUP
|
||||||
|
if modes.get("MODE_CLEANUP", False):
|
||||||
cleanup_command = ["make", "clean-keep-logs"] if logs else ["make", "clean"]
|
cleanup_command = ["make", "clean-keep-logs"] if logs else ["make", "clean"]
|
||||||
print("\n🧹 Cleaning up project (" + " ".join(cleanup_command) +")...\n")
|
print("\n🧹 Cleaning up project (" + " ".join(cleanup_command) + ")...\n")
|
||||||
subprocess.run(cleanup_command, check=True)
|
subprocess.run(cleanup_command, check=True)
|
||||||
else:
|
else:
|
||||||
print("\n⚠️ Skipping build as requested.\n")
|
print("\n⚠️ Skipping cleanup as requested.\n")
|
||||||
|
|
||||||
if not skip_build:
|
if not skip_build:
|
||||||
print("\n🛠️ Building project (make messy-build)...\n")
|
print("\n🛠️ Building project (make messy-build)...\n")
|
||||||
@@ -38,26 +40,24 @@ def run_ansible_playbook(
|
|||||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||||
playbook = os.path.join(os.path.dirname(script_dir), "playbook.yml")
|
playbook = os.path.join(os.path.dirname(script_dir), "playbook.yml")
|
||||||
|
|
||||||
# Inventory validation step
|
# Inventory validation is controlled via MODE_ASSERT
|
||||||
if not skip_validation:
|
if modes.get("MODE_ASSERT", None) is False:
|
||||||
|
print("\n⚠️ Skipping inventory validation as requested.\n")
|
||||||
|
elif "MODE_ASSERT" not in modes or modes["MODE_ASSERT"] is True:
|
||||||
print("\n🔍 Validating inventory before deployment...\n")
|
print("\n🔍 Validating inventory before deployment...\n")
|
||||||
try:
|
try:
|
||||||
subprocess.run(
|
subprocess.run(
|
||||||
[sys.executable,
|
[
|
||||||
os.path.join(script_dir, "validate/inventory.py"),
|
sys.executable,
|
||||||
os.path.dirname(inventory)
|
os.path.join(script_dir, "validate", "inventory.py"),
|
||||||
|
os.path.dirname(inventory),
|
||||||
],
|
],
|
||||||
check=True
|
check=True,
|
||||||
)
|
)
|
||||||
except subprocess.CalledProcessError:
|
except subprocess.CalledProcessError:
|
||||||
print(
|
print("\n❌ Inventory validation failed. Deployment aborted.\n", file=sys.stderr)
|
||||||
"\n❌ Inventory validation failed. Deployment aborted.\n",
|
|
||||||
file=sys.stderr
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
else:
|
|
||||||
print("\n⚠️ Skipping inventory validation as requested.\n")
|
|
||||||
|
|
||||||
if not skip_tests:
|
if not skip_tests:
|
||||||
print("\n🧪 Running tests (make messy-test)...\n")
|
print("\n🧪 Running tests (make messy-test)...\n")
|
||||||
subprocess.run(["make", "messy-test"], check=True)
|
subprocess.run(["make", "messy-test"], check=True)
|
||||||
@@ -93,25 +93,136 @@ def run_ansible_playbook(
|
|||||||
duration = end_time - start_time
|
duration = end_time - start_time
|
||||||
print(f"⏱️ Total execution time: {duration}\n")
|
print(f"⏱️ Total execution time: {duration}\n")
|
||||||
|
|
||||||
|
|
||||||
def validate_application_ids(inventory, app_ids):
|
def validate_application_ids(inventory, app_ids):
|
||||||
"""
|
"""
|
||||||
Abort the script if any application IDs are invalid, with detailed reasons.
|
Abort the script if any application IDs are invalid, with detailed reasons.
|
||||||
"""
|
"""
|
||||||
from module_utils.valid_deploy_id import ValidDeployId
|
from module_utils.valid_deploy_id import ValidDeployId
|
||||||
|
|
||||||
validator = ValidDeployId()
|
validator = ValidDeployId()
|
||||||
invalid = validator.validate(inventory, app_ids)
|
invalid = validator.validate(inventory, app_ids)
|
||||||
if invalid:
|
if invalid:
|
||||||
print("\n❌ Detected invalid application_id(s):\n")
|
print("\n❌ Detected invalid application_id(s):\n")
|
||||||
for app_id, status in invalid.items():
|
for app_id, status in invalid.items():
|
||||||
reasons = []
|
reasons = []
|
||||||
if not status['in_roles']:
|
if not status["in_roles"]:
|
||||||
reasons.append("not defined in roles (infinito)")
|
reasons.append("not defined in roles (infinito)")
|
||||||
if not status['in_inventory']:
|
if not status["in_inventory"]:
|
||||||
reasons.append("not found in inventory file")
|
reasons.append("not found in inventory file")
|
||||||
print(f" - {app_id}: " + ", ".join(reasons))
|
print(f" - {app_id}: " + ", ".join(reasons))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
MODE_LINE_RE = re.compile(
|
||||||
|
r"""^\s*(?P<key>[A-Z0-9_]+)\s*:\s*(?P<value>.+?)\s*(?:#\s*(?P<cmt>.*))?\s*$"""
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_bool_literal(text: str) -> Optional[bool]:
|
||||||
|
t = text.strip().lower()
|
||||||
|
if t in ("true", "yes", "on"):
|
||||||
|
return True
|
||||||
|
if t in ("false", "no", "off"):
|
||||||
|
return False
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def load_modes_from_yaml(modes_yaml_path: str) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Parse group_vars/all/01_modes.yml line-by-line to recover:
|
||||||
|
- name (e.g., MODE_TEST)
|
||||||
|
- default (True/False/None if templated/unknown)
|
||||||
|
- help (from trailing # comment, if present)
|
||||||
|
"""
|
||||||
|
modes = []
|
||||||
|
if not os.path.exists(modes_yaml_path):
|
||||||
|
raise FileNotFoundError(f"Modes file not found: {modes_yaml_path}")
|
||||||
|
|
||||||
|
with open(modes_yaml_path, "r", encoding="utf-8") as fh:
|
||||||
|
for line in fh:
|
||||||
|
line = line.rstrip()
|
||||||
|
if not line or line.lstrip().startswith("#"):
|
||||||
|
continue
|
||||||
|
m = MODE_LINE_RE.match(line)
|
||||||
|
if not m:
|
||||||
|
continue
|
||||||
|
key = m.group("key")
|
||||||
|
val = m.group("value").strip()
|
||||||
|
cmt = (m.group("cmt") or "").strip()
|
||||||
|
|
||||||
|
if not key.startswith("MODE_"):
|
||||||
|
continue
|
||||||
|
|
||||||
|
default_bool = _parse_bool_literal(val)
|
||||||
|
modes.append(
|
||||||
|
{
|
||||||
|
"name": key,
|
||||||
|
"default": default_bool,
|
||||||
|
"help": cmt or f"Toggle {key}",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
return modes
|
||||||
|
|
||||||
|
|
||||||
|
def add_dynamic_mode_args(
|
||||||
|
parser: argparse.ArgumentParser, modes_meta: List[Dict[str, Any]]
|
||||||
|
) -> Dict[str, Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Add argparse options based on modes metadata.
|
||||||
|
Returns a dict mapping mode name -> { 'dest': <argparse_dest>, 'default': <bool/None>, 'kind': 'bool_true'|'bool_false'|'explicit' }.
|
||||||
|
"""
|
||||||
|
spec: Dict[str, Dict[str, Any]] = {}
|
||||||
|
for m in modes_meta:
|
||||||
|
name = m["name"]
|
||||||
|
default = m["default"]
|
||||||
|
desc = m["help"]
|
||||||
|
short = name.replace("MODE_", "").lower()
|
||||||
|
|
||||||
|
if default is True:
|
||||||
|
opt = f"--skip-{short}"
|
||||||
|
dest = f"skip_{short}"
|
||||||
|
help_txt = desc or f"Skip/disable {short} (default: enabled)"
|
||||||
|
parser.add_argument(opt, action="store_true", help=help_txt, dest=dest)
|
||||||
|
spec[name] = {"dest": dest, "default": True, "kind": "bool_true"}
|
||||||
|
elif default is False:
|
||||||
|
opt = f"--{short}"
|
||||||
|
dest = short
|
||||||
|
help_txt = desc or f"Enable {short} (default: disabled)"
|
||||||
|
parser.add_argument(opt, action="store_true", help=help_txt, dest=dest)
|
||||||
|
spec[name] = {"dest": dest, "default": False, "kind": "bool_false"}
|
||||||
|
else:
|
||||||
|
opt = f"--{short}"
|
||||||
|
dest = short
|
||||||
|
help_txt = desc or f"Set {short} explicitly (true/false). If omitted, keep inventory default."
|
||||||
|
parser.add_argument(opt, choices=["true", "false"], help=help_txt, dest=dest)
|
||||||
|
spec[name] = {"dest": dest, "default": None, "kind": "explicit"}
|
||||||
|
|
||||||
|
return spec
|
||||||
|
|
||||||
|
|
||||||
|
def build_modes_from_args(
|
||||||
|
spec: Dict[str, Dict[str, Any]], args_namespace: argparse.Namespace
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Using the argparse results and the spec, compute the `modes` dict to pass to Ansible.
|
||||||
|
"""
|
||||||
|
modes: Dict[str, Any] = {}
|
||||||
|
for mode_name, info in spec.items():
|
||||||
|
dest = info["dest"]
|
||||||
|
kind = info["kind"]
|
||||||
|
val = getattr(args_namespace, dest, None)
|
||||||
|
|
||||||
|
if kind == "bool_true":
|
||||||
|
modes[mode_name] = False if val else True
|
||||||
|
elif kind == "bool_false":
|
||||||
|
modes[mode_name] = True if val else False
|
||||||
|
else:
|
||||||
|
if val is not None:
|
||||||
|
modes[mode_name] = True if val == "true" else False
|
||||||
|
return modes
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="Run the central Ansible deployment script to manage infrastructure, updates, and tests."
|
description="Run the central Ansible deployment script to manage infrastructure, updates, and tests."
|
||||||
@@ -119,88 +230,74 @@ def main():
|
|||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"inventory",
|
"inventory",
|
||||||
help="Path to the inventory file (INI or YAML) containing hosts and variables."
|
help="Path to the inventory file (INI or YAML) containing hosts and variables.",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-l", "--limit",
|
"-l",
|
||||||
help="Restrict execution to a specific host or host group from the inventory."
|
"--limit",
|
||||||
|
help="Restrict execution to a specific host or host group from the inventory.",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-T", "--host-type",
|
"-T",
|
||||||
|
"--host-type",
|
||||||
choices=["server", "desktop"],
|
choices=["server", "desktop"],
|
||||||
default="server",
|
default="server",
|
||||||
help="Specify whether the target is a server or a personal computer. Affects role selection and variables."
|
help="Specify whether the target is a server or a personal computer. Affects role selection and variables.",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-r", "--reset", action="store_true",
|
"-p",
|
||||||
help="Reset all Infinito.Nexus files and configurations, and run the entire playbook (not just individual roles)."
|
"--password-file",
|
||||||
|
help="Path to the file containing the Vault password. If not provided, prompts for the password interactively.",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-t", "--test", action="store_true",
|
"-B",
|
||||||
help="Run test routines instead of production tasks. Useful for local testing and CI pipelines."
|
"--skip-build",
|
||||||
|
action="store_true",
|
||||||
|
help="Skip running 'make build' before deployment.",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-u", "--update", action="store_true",
|
"-t",
|
||||||
help="Enable the update procedure to bring software and roles up to date."
|
"--skip-tests",
|
||||||
|
action="store_true",
|
||||||
|
help="Skip running 'make messy-tests' before deployment.",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-b", "--backup", action="store_true",
|
"-i",
|
||||||
help="Perform a full backup of critical data and configurations before the update process."
|
"--id",
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-c", "--cleanup", action="store_true",
|
|
||||||
help="Clean up unused files and outdated configurations after all tasks are complete. Also cleans up the repository before the deployment procedure."
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-d", "--debug", action="store_true",
|
|
||||||
help="Enable detailed debug output for Ansible and this script."
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-p", "--password-file",
|
|
||||||
help="Path to the file containing the Vault password. If not provided, prompts for the password interactively."
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-s", "--skip-tests", action="store_true",
|
|
||||||
help="Skip running 'make test' even if tests are normally enabled."
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-V", "--skip-validation", action="store_true",
|
|
||||||
help="Skip inventory validation before deployment."
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-B", "--skip-build", action="store_true",
|
|
||||||
help="Skip running 'make build' before deployment."
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-i", "--id",
|
|
||||||
nargs="+",
|
nargs="+",
|
||||||
default=[],
|
default=[],
|
||||||
dest="id",
|
dest="id",
|
||||||
help="List of application_id's for partial deploy. If not set, all application IDs defined in the inventory will be executed."
|
help="List of application_id's for partial deploy. If not set, all application IDs defined in the inventory will be executed.",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-v", "--verbose", action="count", default=0,
|
"-v",
|
||||||
help="Increase verbosity level. Multiple -v flags increase detail (e.g., -vvv for maximum log output)."
|
"--verbose",
|
||||||
|
action="count",
|
||||||
|
default=0,
|
||||||
|
help="Increase verbosity level. Multiple -v flags increase detail (e.g., -vvv for maximum log output).",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--logs", action="store_true",
|
"--logs",
|
||||||
help="Keep the CLI logs during cleanup command"
|
action="store_true",
|
||||||
|
help="Keep the CLI logs during cleanup command",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# ---- Dynamically add mode flags from group_vars/all/01_modes.yml ----
|
||||||
|
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
repo_root = os.path.dirname(script_dir)
|
||||||
|
modes_yaml_path = os.path.join(repo_root, "group_vars", "all", "01_modes.yml")
|
||||||
|
modes_meta = load_modes_from_yaml(modes_yaml_path)
|
||||||
|
modes_spec = add_dynamic_mode_args(parser, modes_meta)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
validate_application_ids(args.inventory, args.id)
|
validate_application_ids(args.inventory, args.id)
|
||||||
|
|
||||||
modes = {
|
# Build modes from dynamic args
|
||||||
"MODE_RESET": args.reset,
|
modes = build_modes_from_args(modes_spec, args)
|
||||||
"MODE_TEST": args.test,
|
|
||||||
"MODE_UPDATE": args.update,
|
# Additional non-dynamic flags
|
||||||
"MODE_BACKUP": args.backup,
|
modes["MODE_LOGS"] = args.logs
|
||||||
"MODE_CLEANUP": args.cleanup,
|
modes["host_type"] = args.host_type
|
||||||
"MODE_LOGS": args.logs,
|
|
||||||
"MODE_DEBUG": args.debug,
|
|
||||||
"MODE_ASSERT": not args.skip_validation,
|
|
||||||
"host_type": args.host_type
|
|
||||||
}
|
|
||||||
|
|
||||||
run_ansible_playbook(
|
run_ansible_playbook(
|
||||||
inventory=args.inventory,
|
inventory=args.inventory,
|
||||||
@@ -209,11 +306,9 @@ def main():
|
|||||||
allowed_applications=args.id,
|
allowed_applications=args.id,
|
||||||
password_file=args.password_file,
|
password_file=args.password_file,
|
||||||
verbose=args.verbose,
|
verbose=args.verbose,
|
||||||
skip_tests=args.skip_tests,
|
|
||||||
skip_validation=args.skip_validation,
|
|
||||||
skip_build=args.skip_build,
|
skip_build=args.skip_build,
|
||||||
cleanup=args.cleanup,
|
skip_tests=args.skip_tests,
|
||||||
logs=args.logs
|
logs=args.logs,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
60
docker-compose.yml
Normal file
60
docker-compose.yml
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
version: "3.9"
|
||||||
|
|
||||||
|
services:
|
||||||
|
infinito:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
network: host
|
||||||
|
pull_policy: never
|
||||||
|
container_name: infinito_nexus
|
||||||
|
image: infinito_nexus
|
||||||
|
restart: unless-stopped
|
||||||
|
volumes:
|
||||||
|
- data:/var/lib/docker/volumes/
|
||||||
|
- backups:/Backups/
|
||||||
|
- letsencrypt:/etc/letsencrypt/
|
||||||
|
ports:
|
||||||
|
# --- Mail services (classic + secure) ---
|
||||||
|
- "${BIND_IP:-127.0.0.1}:25:25" # SMTP
|
||||||
|
- "${BIND_IP:-127.0.0.1}:110:110" # POP3
|
||||||
|
- "${BIND_IP:-127.0.0.1}:143:143" # IMAP
|
||||||
|
- "${BIND_IP:-127.0.0.1}:465:465" # SMTPS
|
||||||
|
- "${BIND_IP:-127.0.0.1}:587:587" # Submission (SMTP)
|
||||||
|
- "${BIND_IP:-127.0.0.1}:993:993" # IMAPS (bound to public IP)
|
||||||
|
- "${BIND_IP:-127.0.0.1}:995:995" # POP3S
|
||||||
|
- "${BIND_IP:-127.0.0.1}:4190:4190" # Sieve (ManageSieve)
|
||||||
|
|
||||||
|
# --- Web / API services ---
|
||||||
|
- "${BIND_IP:-127.0.0.1}:80:80" # HTTP
|
||||||
|
- "${BIND_IP:-127.0.0.1}:443:443" # HTTPS
|
||||||
|
- "${BIND_IP:-127.0.0.1}:8448:8448" # Matrix federation port
|
||||||
|
|
||||||
|
# --- TURN / STUN (UDP + TCP) ---
|
||||||
|
- "${BIND_IP:-127.0.0.1}:3478-3480:3478-3480/udp" # TURN/STUN UDP
|
||||||
|
- "${BIND_IP:-127.0.0.1}:3478-3480:3478-3480" # TURN/STUN TCP
|
||||||
|
|
||||||
|
# --- Streaming / RTMP ---
|
||||||
|
- "${BIND_IP:-127.0.0.1}:1935:1935" # Peertube
|
||||||
|
|
||||||
|
# --- Custom / application ports ---
|
||||||
|
- "${BIND_IP:-127.0.0.1}:2201:2201" # Gitea
|
||||||
|
- "${BIND_IP:-127.0.0.1}:2202:2202" # Gitlab
|
||||||
|
- "${BIND_IP:-127.0.0.1}:2203:22" # SSH
|
||||||
|
- "${BIND_IP:-127.0.0.1}:33552:33552"
|
||||||
|
|
||||||
|
# --- Consecutive ranges ---
|
||||||
|
- "${BIND_IP:-127.0.0.1}:48081-48083:48081-48083"
|
||||||
|
- "${BIND_IP:-127.0.0.1}:48087:48087"
|
||||||
|
volumes:
|
||||||
|
data:
|
||||||
|
backups:
|
||||||
|
letsencrypt:
|
||||||
|
networks:
|
||||||
|
default:
|
||||||
|
driver: bridge
|
||||||
|
ipam:
|
||||||
|
driver: default
|
||||||
|
config:
|
||||||
|
- subnet: ${SUBNET:-172.30.0.0/24}
|
||||||
|
gateway: ${GATEWAY:-172.30.0.1}
|
||||||
@@ -15,7 +15,7 @@ Follow these guides to install and configure Infinito.Nexus:
|
|||||||
- **Networking & VPN** - Configure `WireGuard`, `OpenVPN`, and `Nginx Reverse Proxy`.
|
- **Networking & VPN** - Configure `WireGuard`, `OpenVPN`, and `Nginx Reverse Proxy`.
|
||||||
|
|
||||||
## Managing & Updating Infinito.Nexus 🔄
|
## Managing & Updating Infinito.Nexus 🔄
|
||||||
- Regularly update services using `update-docker`, `update-pacman`, or `update-apt`.
|
- Regularly update services using `update-pacman`, or `update-apt`.
|
||||||
- Monitor system health with `sys-ctl-hlth-btrfs`, `sys-ctl-hlth-webserver`, and `sys-ctl-hlth-docker-container`.
|
- Monitor system health with `sys-ctl-hlth-btrfs`, `sys-ctl-hlth-webserver`, and `sys-ctl-hlth-docker-container`.
|
||||||
- Automate system maintenance with `sys-lock`, `sys-ctl-cln-bkps`, and `sys-ctl-rpr-docker-hard`.
|
- Automate system maintenance with `sys-lock`, `sys-ctl-cln-bkps`, and `sys-ctl-rpr-docker-hard`.
|
||||||
|
|
||||||
|
|||||||
3
env.sample
Normal file
3
env.sample
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
BIND_IP=127.0.0.1
|
||||||
|
SUBNET=172.30.0.0/24
|
||||||
|
GATEWAY=172.30.0.1
|
||||||
79
filter_plugins/active_docker.py
Normal file
79
filter_plugins/active_docker.py
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
Ansible filter to count active docker services for current host.
|
||||||
|
|
||||||
|
Active means:
|
||||||
|
- application key is in group_names
|
||||||
|
- application key matches prefix regex (default: ^(web-|svc-).* )
|
||||||
|
- under applications[app]['docker']['services'] each service is counted if:
|
||||||
|
- 'enabled' is True, OR
|
||||||
|
- 'enabled' is missing/undefined (treated as active)
|
||||||
|
|
||||||
|
Returns an integer. If ensure_min_one=True, returns at least 1.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
|
from typing import Any, Dict, Mapping, Iterable
|
||||||
|
|
||||||
|
|
||||||
|
def _is_mapping(x: Any) -> bool:
|
||||||
|
# be liberal: Mapping covers dict-like; fallback to dict check
|
||||||
|
try:
|
||||||
|
return isinstance(x, Mapping)
|
||||||
|
except Exception:
|
||||||
|
return isinstance(x, dict)
|
||||||
|
|
||||||
|
|
||||||
|
def active_docker_container_count(applications: Mapping[str, Any],
|
||||||
|
group_names: Iterable[str],
|
||||||
|
prefix_regex: str = r'^(web-|svc-).*',
|
||||||
|
ensure_min_one: bool = False) -> int:
|
||||||
|
if not _is_mapping(applications):
|
||||||
|
return 1 if ensure_min_one else 0
|
||||||
|
|
||||||
|
group_set = set(group_names or [])
|
||||||
|
try:
|
||||||
|
pattern = re.compile(prefix_regex)
|
||||||
|
except re.error:
|
||||||
|
pattern = re.compile(r'^(web-|svc-).*') # fallback
|
||||||
|
|
||||||
|
count = 0
|
||||||
|
|
||||||
|
for app_key, app_val in applications.items():
|
||||||
|
# host selection + name prefix
|
||||||
|
if app_key not in group_set:
|
||||||
|
continue
|
||||||
|
if not pattern.match(str(app_key)):
|
||||||
|
continue
|
||||||
|
|
||||||
|
docker = app_val.get('docker') if _is_mapping(app_val) else None
|
||||||
|
services = docker.get('services') if _is_mapping(docker) else None
|
||||||
|
if not _is_mapping(services):
|
||||||
|
# sometimes roles define a single service name string; ignore
|
||||||
|
continue
|
||||||
|
|
||||||
|
for _svc_name, svc_cfg in services.items():
|
||||||
|
if not _is_mapping(svc_cfg):
|
||||||
|
# allow shorthand like: service: {} or image string -> counts as enabled
|
||||||
|
count += 1
|
||||||
|
continue
|
||||||
|
enabled = svc_cfg.get('enabled', True)
|
||||||
|
if isinstance(enabled, bool):
|
||||||
|
if enabled:
|
||||||
|
count += 1
|
||||||
|
else:
|
||||||
|
# non-bool enabled -> treat "truthy" as enabled
|
||||||
|
if bool(enabled):
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
if ensure_min_one and count < 1:
|
||||||
|
return 1
|
||||||
|
return count
|
||||||
|
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
# usage: {{ applications | active_docker_container_count(group_names) }}
|
||||||
|
'active_docker_container_count': active_docker_container_count,
|
||||||
|
}
|
||||||
@@ -10,9 +10,23 @@ from module_utils.config_utils import get_app_conf
|
|||||||
from module_utils.get_url import get_url
|
from module_utils.get_url import get_url
|
||||||
|
|
||||||
|
|
||||||
|
def _dedup_preserve(seq):
|
||||||
|
"""Return a list with stable order and unique items."""
|
||||||
|
seen = set()
|
||||||
|
out = []
|
||||||
|
for x in seq:
|
||||||
|
if x not in seen:
|
||||||
|
seen.add(x)
|
||||||
|
out.append(x)
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
class FilterModule(object):
|
class FilterModule(object):
|
||||||
"""
|
"""
|
||||||
Custom filters for Content Security Policy generation and CSP-related utilities.
|
Jinja filters for building a robust, CSP3-aware Content-Security-Policy header.
|
||||||
|
Safari/CSP2 compatibility is ensured by merging the -elem/-attr variants into the base
|
||||||
|
directives (style-src, script-src). We intentionally do NOT mirror back into -elem/-attr
|
||||||
|
to allow true CSP3 granularity on modern browsers.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def filters(self):
|
def filters(self):
|
||||||
@@ -61,11 +75,14 @@ class FilterModule(object):
|
|||||||
"""
|
"""
|
||||||
Returns CSP flag tokens (e.g., "'unsafe-eval'", "'unsafe-inline'") for a directive,
|
Returns CSP flag tokens (e.g., "'unsafe-eval'", "'unsafe-inline'") for a directive,
|
||||||
merging sane defaults with app config.
|
merging sane defaults with app config.
|
||||||
Default: 'unsafe-inline' is enabled for style-src and style-src-elem.
|
|
||||||
|
Defaults:
|
||||||
|
- For styles we enable 'unsafe-inline' by default (style-src, style-src-elem, style-src-attr),
|
||||||
|
because many apps rely on inline styles / style attributes.
|
||||||
|
- For scripts we do NOT enable 'unsafe-inline' by default.
|
||||||
"""
|
"""
|
||||||
# Defaults that apply to all apps
|
|
||||||
default_flags = {}
|
default_flags = {}
|
||||||
if directive in ('style-src', 'style-src-elem'):
|
if directive in ('style-src', 'style-src-elem', 'style-src-attr'):
|
||||||
default_flags = {'unsafe-inline': True}
|
default_flags = {'unsafe-inline': True}
|
||||||
|
|
||||||
configured = get_app_conf(
|
configured = get_app_conf(
|
||||||
@@ -76,7 +93,6 @@ class FilterModule(object):
|
|||||||
{}
|
{}
|
||||||
)
|
)
|
||||||
|
|
||||||
# Merge defaults with configured flags (configured overrides defaults)
|
|
||||||
merged = {**default_flags, **configured}
|
merged = {**default_flags, **configured}
|
||||||
|
|
||||||
tokens = []
|
tokens = []
|
||||||
@@ -131,77 +147,148 @@ class FilterModule(object):
|
|||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Builds the Content-Security-Policy header value dynamically based on application settings.
|
Builds the Content-Security-Policy header value dynamically based on application settings.
|
||||||
- Flags (e.g., 'unsafe-eval', 'unsafe-inline') are read from server.csp.flags.<directive>,
|
|
||||||
with sane defaults applied in get_csp_flags (always 'unsafe-inline' for style-src and style-src-elem).
|
Key points:
|
||||||
- Inline hashes are read from server.csp.hashes.<directive>.
|
- CSP3-aware: supports base/elem/attr for styles and scripts.
|
||||||
- Whitelists are read from server.csp.whitelist.<directive>.
|
- Safari/CSP2 fallback: base directives (style-src, script-src) always include
|
||||||
- Inline hashes are added only if the final tokens do NOT include 'unsafe-inline'.
|
the union of their -elem/-attr variants.
|
||||||
|
- We do NOT mirror back into -elem/-attr; finer CSP3 rules remain effective
|
||||||
|
on modern browsers if you choose to use them.
|
||||||
|
- If the app explicitly disables a token on the *base* (e.g. style-src.unsafe-inline: false),
|
||||||
|
that token is removed from the merged base even if present in elem/attr.
|
||||||
|
- Inline hashes are added ONLY if that directive does NOT include 'unsafe-inline'.
|
||||||
|
- Whitelists/flags/hashes read from:
|
||||||
|
server.csp.whitelist.<directive>
|
||||||
|
server.csp.flags.<directive>
|
||||||
|
server.csp.hashes.<directive>
|
||||||
|
- “Smart defaults”:
|
||||||
|
* internal CDN for style/script elem and connect
|
||||||
|
* Matomo endpoints (if feature enabled) for script-elem/connect
|
||||||
|
* Simpleicons (if feature enabled) for connect
|
||||||
|
* reCAPTCHA (if feature enabled) for script-elem/frame-src
|
||||||
|
* frame-ancestors extended for desktop/logout/keycloak if enabled
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
directives = [
|
directives = [
|
||||||
'default-src', # Fallback source list for content types not explicitly listed
|
'default-src',
|
||||||
'connect-src', # Allowed URLs for XHR, WebSockets, EventSource, fetch()
|
'connect-src',
|
||||||
'frame-ancestors', # Who may embed this page
|
'frame-ancestors',
|
||||||
'frame-src', # Sources for nested browsing contexts (e.g., <iframe>)
|
'frame-src',
|
||||||
'script-src', # Sources for script execution
|
'script-src',
|
||||||
'script-src-elem', # Sources for <script> elements
|
'script-src-elem',
|
||||||
'style-src', # Sources for inline styles and <style>/<link> elements
|
'script-src-attr',
|
||||||
'style-src-elem', # Sources for <style> and <link rel="stylesheet">
|
'style-src',
|
||||||
'font-src', # Sources for fonts
|
'style-src-elem',
|
||||||
'worker-src', # Sources for workers
|
'style-src-attr',
|
||||||
'manifest-src', # Sources for web app manifests
|
'font-src',
|
||||||
'media-src', # Sources for audio and video
|
'worker-src',
|
||||||
|
'manifest-src',
|
||||||
|
'media-src',
|
||||||
]
|
]
|
||||||
|
|
||||||
parts = []
|
tokens_by_dir = {}
|
||||||
|
explicit_flags_by_dir = {}
|
||||||
|
|
||||||
for directive in directives:
|
for directive in directives:
|
||||||
|
# Collect explicit flags (to later respect explicit "False" on base during merge)
|
||||||
|
explicit_flags = get_app_conf(
|
||||||
|
applications,
|
||||||
|
application_id,
|
||||||
|
'server.csp.flags.' + directive,
|
||||||
|
False,
|
||||||
|
{}
|
||||||
|
)
|
||||||
|
explicit_flags_by_dir[directive] = explicit_flags
|
||||||
|
|
||||||
tokens = ["'self'"]
|
tokens = ["'self'"]
|
||||||
|
|
||||||
# 1) Load flags (includes defaults from get_csp_flags)
|
# 1) Flags (with sane defaults)
|
||||||
flags = self.get_csp_flags(applications, application_id, directive)
|
flags = self.get_csp_flags(applications, application_id, directive)
|
||||||
tokens += flags
|
tokens += flags
|
||||||
|
|
||||||
# 2) Allow fetching from internal CDN by default for selected directives
|
# 2) Internal CDN defaults for selected directives
|
||||||
if directive in ['script-src-elem', 'connect-src', 'style-src-elem']:
|
if directive in ('script-src-elem', 'connect-src', 'style-src-elem', 'style-src'):
|
||||||
tokens.append(get_url(domains, 'web-svc-cdn', web_protocol))
|
tokens.append(get_url(domains, 'web-svc-cdn', web_protocol))
|
||||||
|
|
||||||
# 3) Matomo integration if feature is enabled
|
# 3) Matomo (if enabled)
|
||||||
if directive in ['script-src-elem', 'connect-src']:
|
if directive in ('script-src-elem', 'connect-src'):
|
||||||
if self.is_feature_enabled(applications, matomo_feature_name, application_id):
|
if self.is_feature_enabled(applications, matomo_feature_name, application_id):
|
||||||
tokens.append(get_url(domains, 'web-app-matomo', web_protocol))
|
tokens.append(get_url(domains, 'web-app-matomo', web_protocol))
|
||||||
|
|
||||||
# 4) ReCaptcha integration (scripts + frames) if feature is enabled
|
# 4) Simpleicons (if enabled) – typically used via connect-src (fetch)
|
||||||
|
if directive == 'connect-src':
|
||||||
|
if self.is_feature_enabled(applications, 'simpleicons', application_id):
|
||||||
|
tokens.append(get_url(domains, 'web-svc-simpleicons', web_protocol))
|
||||||
|
|
||||||
|
# 5) reCAPTCHA (if enabled) – scripts + frames
|
||||||
if self.is_feature_enabled(applications, 'recaptcha', application_id):
|
if self.is_feature_enabled(applications, 'recaptcha', application_id):
|
||||||
if directive in ['script-src-elem', 'frame-src']:
|
if directive in ('script-src-elem', 'frame-src'):
|
||||||
tokens.append('https://www.gstatic.com')
|
tokens.append('https://www.gstatic.com')
|
||||||
tokens.append('https://www.google.com')
|
tokens.append('https://www.google.com')
|
||||||
|
|
||||||
# 5) Frame ancestors handling (desktop + logout support)
|
# 6) Frame ancestors (desktop + logout)
|
||||||
if directive == 'frame-ancestors':
|
if directive == 'frame-ancestors':
|
||||||
if self.is_feature_enabled(applications, 'desktop', application_id):
|
if self.is_feature_enabled(applications, 'desktop', application_id):
|
||||||
# Allow being embedded by the desktop app domain (and potentially its parent)
|
# Allow being embedded by the desktop app domain's site
|
||||||
domain = domains.get('web-app-desktop')[0]
|
domain = domains.get('web-app-desktop')[0]
|
||||||
sld_tld = ".".join(domain.split(".")[-2:]) # e.g., example.com
|
sld_tld = ".".join(domain.split(".")[-2:]) # e.g., example.com
|
||||||
tokens.append(f"{sld_tld}")
|
tokens.append(f"{sld_tld}")
|
||||||
if self.is_feature_enabled(applications, 'logout', application_id):
|
if self.is_feature_enabled(applications, 'logout', application_id):
|
||||||
# Allow embedding via logout proxy and Keycloak app
|
|
||||||
tokens.append(get_url(domains, 'web-svc-logout', web_protocol))
|
tokens.append(get_url(domains, 'web-svc-logout', web_protocol))
|
||||||
tokens.append(get_url(domains, 'web-app-keycloak', web_protocol))
|
tokens.append(get_url(domains, 'web-app-keycloak', web_protocol))
|
||||||
|
|
||||||
# 6) Custom whitelist entries
|
# 7) Custom whitelist
|
||||||
tokens += self.get_csp_whitelist(applications, application_id, directive)
|
tokens += self.get_csp_whitelist(applications, application_id, directive)
|
||||||
|
|
||||||
# 7) Add inline content hashes ONLY if final tokens do NOT include 'unsafe-inline'
|
# 8) Inline hashes (only if this directive does NOT include 'unsafe-inline')
|
||||||
# (Check tokens, not flags, to include defaults and later modifications.)
|
|
||||||
if "'unsafe-inline'" not in tokens:
|
if "'unsafe-inline'" not in tokens:
|
||||||
for snippet in self.get_csp_inline_content(applications, application_id, directive):
|
for snippet in self.get_csp_inline_content(applications, application_id, directive):
|
||||||
tokens.append(self.get_csp_hash(snippet))
|
tokens.append(self.get_csp_hash(snippet))
|
||||||
|
|
||||||
# Append directive
|
tokens_by_dir[directive] = _dedup_preserve(tokens)
|
||||||
parts.append(f"{directive} {' '.join(tokens)};")
|
|
||||||
|
|
||||||
# 8) Static img-src directive (kept permissive for data/blob and any host)
|
# ----------------------------------------------------------
|
||||||
|
# CSP3 families → ensure CSP2 fallback (Safari-safe)
|
||||||
|
# Merge style/script families so base contains union of elem/attr.
|
||||||
|
# Respect explicit disables on the base (e.g. unsafe-inline=False).
|
||||||
|
# Do NOT mirror back into elem/attr (keep granularity).
|
||||||
|
# ----------------------------------------------------------
|
||||||
|
def _strip_if_disabled(unioned_tokens, explicit_flags, name):
|
||||||
|
"""
|
||||||
|
Remove a token (e.g. 'unsafe-inline') from the unioned token list
|
||||||
|
if it is explicitly disabled in the base directive flags.
|
||||||
|
"""
|
||||||
|
if isinstance(explicit_flags, dict) and explicit_flags.get(name) is False:
|
||||||
|
tok = f"'{name}'"
|
||||||
|
return [t for t in unioned_tokens if t != tok]
|
||||||
|
return unioned_tokens
|
||||||
|
|
||||||
|
def merge_family(base_key, elem_key, attr_key):
|
||||||
|
base = tokens_by_dir.get(base_key, [])
|
||||||
|
elem = tokens_by_dir.get(elem_key, [])
|
||||||
|
attr = tokens_by_dir.get(attr_key, [])
|
||||||
|
union = _dedup_preserve(base + elem + attr)
|
||||||
|
|
||||||
|
# Respect explicit disables on the base
|
||||||
|
explicit_base = explicit_flags_by_dir.get(base_key, {})
|
||||||
|
# The most relevant flags for script/style:
|
||||||
|
for flag_name in ('unsafe-inline', 'unsafe-eval'):
|
||||||
|
union = _strip_if_disabled(union, explicit_base, flag_name)
|
||||||
|
|
||||||
|
tokens_by_dir[base_key] = union # write back only to base
|
||||||
|
|
||||||
|
merge_family('style-src', 'style-src-elem', 'style-src-attr')
|
||||||
|
merge_family('script-src', 'script-src-elem', 'script-src-attr')
|
||||||
|
|
||||||
|
# ----------------------------------------------------------
|
||||||
|
# Assemble header
|
||||||
|
# ----------------------------------------------------------
|
||||||
|
parts = []
|
||||||
|
for directive in directives:
|
||||||
|
if directive in tokens_by_dir:
|
||||||
|
parts.append(f"{directive} {' '.join(tokens_by_dir[directive])};")
|
||||||
|
|
||||||
|
# Keep permissive img-src for data/blob + any host (as before)
|
||||||
parts.append("img-src * data: blob:;")
|
parts.append("img-src * data: blob:;")
|
||||||
|
|
||||||
return ' '.join(parts)
|
return ' '.join(parts)
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ class FilterModule(object):
|
|||||||
def filters(self):
|
def filters(self):
|
||||||
return {'domain_mappings': self.domain_mappings}
|
return {'domain_mappings': self.domain_mappings}
|
||||||
|
|
||||||
def domain_mappings(self, apps, PRIMARY_DOMAIN):
|
def domain_mappings(self, apps, primary_domain, auto_build_alias):
|
||||||
"""
|
"""
|
||||||
Build a flat list of redirect mappings for all apps:
|
Build a flat list of redirect mappings for all apps:
|
||||||
- source: each alias domain
|
- source: each alias domain
|
||||||
@@ -43,7 +43,7 @@ class FilterModule(object):
|
|||||||
domains_cfg = cfg.get('server',{}).get('domains',{})
|
domains_cfg = cfg.get('server',{}).get('domains',{})
|
||||||
entry = domains_cfg.get('canonical')
|
entry = domains_cfg.get('canonical')
|
||||||
if entry is None:
|
if entry is None:
|
||||||
canonical_map[app_id] = [default_domain(app_id, PRIMARY_DOMAIN)]
|
canonical_map[app_id] = [default_domain(app_id, primary_domain)]
|
||||||
elif isinstance(entry, dict):
|
elif isinstance(entry, dict):
|
||||||
canonical_map[app_id] = list(entry.values())
|
canonical_map[app_id] = list(entry.values())
|
||||||
elif isinstance(entry, list):
|
elif isinstance(entry, list):
|
||||||
@@ -61,11 +61,11 @@ class FilterModule(object):
|
|||||||
alias_map[app_id] = []
|
alias_map[app_id] = []
|
||||||
continue
|
continue
|
||||||
if isinstance(domains_cfg, dict) and not domains_cfg:
|
if isinstance(domains_cfg, dict) and not domains_cfg:
|
||||||
alias_map[app_id] = [default_domain(app_id, PRIMARY_DOMAIN)]
|
alias_map[app_id] = [default_domain(app_id, primary_domain)]
|
||||||
continue
|
continue
|
||||||
|
|
||||||
aliases = parse_entry(domains_cfg, 'aliases', app_id) or []
|
aliases = parse_entry(domains_cfg, 'aliases', app_id) or []
|
||||||
default = default_domain(app_id, PRIMARY_DOMAIN)
|
default = default_domain(app_id, primary_domain)
|
||||||
has_aliases = 'aliases' in domains_cfg
|
has_aliases = 'aliases' in domains_cfg
|
||||||
has_canonical = 'canonical' in domains_cfg
|
has_canonical = 'canonical' in domains_cfg
|
||||||
|
|
||||||
@@ -74,7 +74,7 @@ class FilterModule(object):
|
|||||||
aliases.append(default)
|
aliases.append(default)
|
||||||
elif has_canonical:
|
elif has_canonical:
|
||||||
canon = canonical_map.get(app_id, [])
|
canon = canonical_map.get(app_id, [])
|
||||||
if default not in canon and default not in aliases:
|
if default not in canon and default not in aliases and auto_build_alias:
|
||||||
aliases.append(default)
|
aliases.append(default)
|
||||||
|
|
||||||
alias_map[app_id] = aliases
|
alias_map[app_id] = aliases
|
||||||
@@ -84,7 +84,7 @@ class FilterModule(object):
|
|||||||
mappings = []
|
mappings = []
|
||||||
for app_id, sources in alias_map.items():
|
for app_id, sources in alias_map.items():
|
||||||
canon_list = canonical_map.get(app_id, [])
|
canon_list = canonical_map.get(app_id, [])
|
||||||
target = canon_list[0] if canon_list else default_domain(app_id, PRIMARY_DOMAIN)
|
target = canon_list[0] if canon_list else default_domain(app_id, primary_domain)
|
||||||
for src in sources:
|
for src in sources:
|
||||||
if src == target:
|
if src == target:
|
||||||
# skip self-redirects
|
# skip self-redirects
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ class FilterModule(object):
|
|||||||
def filters(self):
|
def filters(self):
|
||||||
return {'generate_all_domains': self.generate_all_domains}
|
return {'generate_all_domains': self.generate_all_domains}
|
||||||
|
|
||||||
def generate_all_domains(self, domains_dict, include_www=True):
|
def generate_all_domains(self, domains_dict, include_www:bool=True):
|
||||||
"""
|
"""
|
||||||
Transform a dict of domains (values: str, list, dict) into a flat list,
|
Transform a dict of domains (values: str, list, dict) into a flat list,
|
||||||
optionally add 'www.' prefixes, dedupe and sort alphabetically.
|
optionally add 'www.' prefixes, dedupe and sort alphabetically.
|
||||||
|
|||||||
@@ -20,9 +20,10 @@ def get_docker_paths(application_id: str, path_docker_compose_instances: str) ->
|
|||||||
'config': f"{base}config/",
|
'config': f"{base}config/",
|
||||||
},
|
},
|
||||||
'files': {
|
'files': {
|
||||||
'env': f"{base}.env/env",
|
'env': f"{base}.env/env",
|
||||||
'docker_compose': f"{base}docker-compose.yml",
|
'docker_compose': f"{base}docker-compose.yml",
|
||||||
'dockerfile': f"{base}Dockerfile",
|
'docker_compose_override': f"{base}docker-compose.override.yml",
|
||||||
|
'dockerfile': f"{base}Dockerfile",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
77
filter_plugins/jvm_filters.py
Normal file
77
filter_plugins/jvm_filters.py
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import sys, os, re
|
||||||
|
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||||
|
|
||||||
|
from ansible.errors import AnsibleFilterError
|
||||||
|
from module_utils.config_utils import get_app_conf
|
||||||
|
from module_utils.entity_name_utils import get_entity_name
|
||||||
|
|
||||||
|
_UNIT_RE = re.compile(r'^\s*(\d+(?:\.\d+)?)\s*([kKmMgGtT]?[bB]?)?\s*$')
|
||||||
|
_FACTORS = {
|
||||||
|
'': 1, 'b': 1,
|
||||||
|
'k': 1024, 'kb': 1024,
|
||||||
|
'm': 1024**2, 'mb': 1024**2,
|
||||||
|
'g': 1024**3, 'gb': 1024**3,
|
||||||
|
't': 1024**4, 'tb': 1024**4,
|
||||||
|
}
|
||||||
|
|
||||||
|
def _to_bytes(v: str) -> int:
|
||||||
|
if v is None:
|
||||||
|
raise AnsibleFilterError("jvm_filters: size value is None")
|
||||||
|
s = str(v).strip()
|
||||||
|
m = _UNIT_RE.match(s)
|
||||||
|
if not m:
|
||||||
|
raise AnsibleFilterError(f"jvm_filters: invalid size '{v}'")
|
||||||
|
num, unit = m.group(1), (m.group(2) or '').lower()
|
||||||
|
try:
|
||||||
|
val = float(num)
|
||||||
|
except ValueError as e:
|
||||||
|
raise AnsibleFilterError(f"jvm_filters: invalid numeric size '{v}'") from e
|
||||||
|
factor = _FACTORS.get(unit)
|
||||||
|
if factor is None:
|
||||||
|
raise AnsibleFilterError(f"jvm_filters: unknown unit in '{v}'")
|
||||||
|
return int(val * factor)
|
||||||
|
|
||||||
|
def _to_mb(v: str) -> int:
|
||||||
|
return max(0, _to_bytes(v) // (1024 * 1024))
|
||||||
|
|
||||||
|
def _svc(app_id: str) -> str:
|
||||||
|
return get_entity_name(app_id)
|
||||||
|
|
||||||
|
def _mem_limit_mb(apps: dict, app_id: str) -> int:
|
||||||
|
svc = _svc(app_id)
|
||||||
|
raw = get_app_conf(apps, app_id, f"docker.services.{svc}.mem_limit")
|
||||||
|
mb = _to_mb(raw)
|
||||||
|
if mb <= 0:
|
||||||
|
raise AnsibleFilterError(f"jvm_filters: mem_limit for '{svc}' must be > 0 MB (got '{raw}')")
|
||||||
|
return mb
|
||||||
|
|
||||||
|
def _mem_res_mb(apps: dict, app_id: str) -> int:
|
||||||
|
svc = _svc(app_id)
|
||||||
|
raw = get_app_conf(apps, app_id, f"docker.services.{svc}.mem_reservation")
|
||||||
|
mb = _to_mb(raw)
|
||||||
|
if mb <= 0:
|
||||||
|
raise AnsibleFilterError(f"jvm_filters: mem_reservation for '{svc}' must be > 0 MB (got '{raw}')")
|
||||||
|
return mb
|
||||||
|
|
||||||
|
def jvm_max_mb(apps: dict, app_id: str) -> int:
|
||||||
|
"""Xmx = min( floor(0.7*limit), limit-1024, 12288 ) with floor at 1024 MB."""
|
||||||
|
limit_mb = _mem_limit_mb(apps, app_id)
|
||||||
|
c1 = (limit_mb * 7) // 10
|
||||||
|
c2 = max(0, limit_mb - 1024)
|
||||||
|
c3 = 12288
|
||||||
|
return max(1024, min(c1, c2, c3))
|
||||||
|
|
||||||
|
def jvm_min_mb(apps: dict, app_id: str) -> int:
|
||||||
|
"""Xms = min( floor(Xmx/2), mem_reservation, Xmx ) with floor at 512 MB."""
|
||||||
|
xmx = jvm_max_mb(apps, app_id)
|
||||||
|
res = _mem_res_mb(apps, app_id)
|
||||||
|
return max(512, min(xmx // 2, res, xmx))
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
"jvm_max_mb": jvm_max_mb,
|
||||||
|
"jvm_min_mb": jvm_min_mb,
|
||||||
|
}
|
||||||
40
filter_plugins/resource_filter.py
Normal file
40
filter_plugins/resource_filter.py
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
# filter_plugins/resource_filter.py
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import sys, os
|
||||||
|
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||||
|
|
||||||
|
from module_utils.config_utils import get_app_conf, AppConfigKeyError, ConfigEntryNotSetError # noqa: F401
|
||||||
|
from module_utils.entity_name_utils import get_entity_name
|
||||||
|
|
||||||
|
from ansible.errors import AnsibleFilterError
|
||||||
|
|
||||||
|
|
||||||
|
def resource_filter(
|
||||||
|
applications: dict,
|
||||||
|
application_id: str,
|
||||||
|
key: str,
|
||||||
|
service_name: str,
|
||||||
|
hard_default,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Lookup order:
|
||||||
|
1) docker.services.<service_name or get_entity_name(application_id)>.<key>
|
||||||
|
2) hard_default (mandatory)
|
||||||
|
|
||||||
|
- service_name may be "" → will resolve to get_entity_name(application_id).
|
||||||
|
- hard_default is mandatory (no implicit None).
|
||||||
|
- required=False always.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
primary_service = service_name if service_name != "" else get_entity_name(application_id)
|
||||||
|
return get_app_conf(applications, application_id, f"docker.services.{primary_service}.{key}", False, hard_default)
|
||||||
|
except (AppConfigKeyError, ConfigEntryNotSetError) as e:
|
||||||
|
raise AnsibleFilterError(str(e))
|
||||||
|
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
"resource_filter": resource_filter,
|
||||||
|
}
|
||||||
@@ -29,8 +29,13 @@ WEB_PORT: "{{ 443 if WEB_PROTOCOL == 'https' else 80 }}" # Defaul
|
|||||||
# Websocket
|
# Websocket
|
||||||
WEBSOCKET_PROTOCOL: "{{ 'wss' if WEB_PROTOCOL == 'https' else 'ws' }}"
|
WEBSOCKET_PROTOCOL: "{{ 'wss' if WEB_PROTOCOL == 'https' else 'ws' }}"
|
||||||
|
|
||||||
|
# WWW-Redirect to None WWW-Domains enabled
|
||||||
|
WWW_REDIRECT_ENABLED: "{{ ('web-opt-rdr-www' in group_names) | bool }}"
|
||||||
|
|
||||||
|
AUTO_BUILD_ALIASES: False # If enabled it creates an alias domain for each web application by the entity name, recommended to set to false to safge domain space
|
||||||
|
|
||||||
# Domain
|
# Domain
|
||||||
PRIMARY_DOMAIN: "localhost" # Primary Domain of the server
|
PRIMARY_DOMAIN: "localhost" # Primary Domain of the server
|
||||||
|
|
||||||
DNS_PROVIDER: cloudflare # The DNS Provider\Registrar for the domain
|
DNS_PROVIDER: cloudflare # The DNS Provider\Registrar for the domain
|
||||||
|
|
||||||
@@ -55,7 +60,7 @@ DOCKER_WHITELISTET_ANON_VOLUMES: []
|
|||||||
|
|
||||||
# Asyn Confitguration
|
# Asyn Confitguration
|
||||||
ASYNC_ENABLED: "{{ not MODE_DEBUG | bool }}" # Activate async, deactivated for debugging
|
ASYNC_ENABLED: "{{ not MODE_DEBUG | bool }}" # Activate async, deactivated for debugging
|
||||||
ASYNC_TIME: "{{ 300 if ASYNC_ENABLED | bool else omit }}" # Run for mnax 5min
|
ASYNC_TIME: "{{ 300 if ASYNC_ENABLED | bool else omit }}" # Run for max 5min
|
||||||
ASYNC_POLL: "{{ 0 if ASYNC_ENABLED | bool else 10 }}" # Don't wait for task
|
ASYNC_POLL: "{{ 0 if ASYNC_ENABLED | bool else 10 }}" # Don't wait for task
|
||||||
|
|
||||||
# default value if not set via CLI (-e) or in playbook vars
|
# default value if not set via CLI (-e) or in playbook vars
|
||||||
@@ -71,8 +76,9 @@ _applications_nextcloud_oidc_flavor: >-
|
|||||||
False,
|
False,
|
||||||
'oidc_login'
|
'oidc_login'
|
||||||
if applications
|
if applications
|
||||||
| get_app_conf('web-app-nextcloud','features.ldap',False, True)
|
| get_app_conf('web-app-nextcloud','features.ldap',False, True, True)
|
||||||
else 'sociallogin'
|
else 'sociallogin',
|
||||||
|
True
|
||||||
)
|
)
|
||||||
}}
|
}}
|
||||||
|
|
||||||
@@ -81,4 +87,4 @@ _applications_nextcloud_oidc_flavor: >-
|
|||||||
RBAC:
|
RBAC:
|
||||||
GROUP:
|
GROUP:
|
||||||
NAME: "/roles" # Name of the group which holds the RBAC roles
|
NAME: "/roles" # Name of the group which holds the RBAC roles
|
||||||
CLAIM: "groups" # Name of the claim containing the RBAC groups
|
CLAIM: "groups" # Name of the claim containing the RBAC groups
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
# Mode
|
# Mode
|
||||||
|
|
||||||
# The following modes can be combined with each other
|
# The following modes can be combined with each other
|
||||||
MODE_TEST: false # Executes test routines instead of productive routines
|
MODE_DUMMY: false # Executes dummy/test routines instead of productive routines
|
||||||
MODE_UPDATE: true # Executes updates
|
MODE_UPDATE: true # Executes updates
|
||||||
MODE_DEBUG: false # This enables debugging in ansible and in the apps, You SHOULD NOT enable this on production servers
|
MODE_DEBUG: false # This enables debugging in ansible and in the apps, You SHOULD NOT enable this on production servers
|
||||||
MODE_RESET: false # Cleans up all Infinito.Nexus files. It's necessary to run to whole playbook and not particial roles when using this function.
|
MODE_RESET: false # Cleans up all Infinito.Nexus files. It's necessary to run to whole playbook and not particial roles when using this function.
|
||||||
MODE_BACKUP: "{{ MODE_UPDATE }}" # Activates the backup before the update procedure
|
MODE_CLEANUP: true # Cleanup unused files and configurations
|
||||||
MODE_CLEANUP: "{{ MODE_DEBUG }}" # Cleanup unused files and configurations
|
MODE_ASSERT: "{{ MODE_DEBUG | bool }}" # Executes validation tasks during the run.
|
||||||
MODE_ASSERT: "{{ MODE_DEBUG }}" # Executes validation tasks during the run.
|
MODE_BACKUP: true # Executes the Backup before the deployment
|
||||||
|
|||||||
@@ -29,4 +29,31 @@ NGINX:
|
|||||||
IMAGE: "/tmp/cache_nginx_image/" # Directory which nginx uses to cache images
|
IMAGE: "/tmp/cache_nginx_image/" # Directory which nginx uses to cache images
|
||||||
USER: "http" # Default nginx user in ArchLinux
|
USER: "http" # Default nginx user in ArchLinux
|
||||||
|
|
||||||
|
# Effective CPUs (float) across proxy and the current app
|
||||||
|
WEBSERVER_CPUS_EFFECTIVE: >-
|
||||||
|
{{
|
||||||
|
[
|
||||||
|
(applications | resource_filter('svc-prx-openresty', 'cpus', service_name | default(''), RESOURCE_CPUS)) | float,
|
||||||
|
(applications | resource_filter(application_id, 'cpus', service_name | default(''), RESOURCE_CPUS)) | float
|
||||||
|
] | min
|
||||||
|
}}
|
||||||
|
|
||||||
|
# Nginx requires an integer for worker_processes:
|
||||||
|
# - if cpus < 1 → 1
|
||||||
|
# - else → floor to int
|
||||||
|
WEBSERVER_WORKER_PROCESSES: >-
|
||||||
|
{{
|
||||||
|
1 if (WEBSERVER_CPUS_EFFECTIVE | float) < 1
|
||||||
|
else (WEBSERVER_CPUS_EFFECTIVE | float | int)
|
||||||
|
}}
|
||||||
|
|
||||||
|
# worker_connections from pids_limit (use the smaller one), with correct key/defaults
|
||||||
|
WEBSERVER_WORKER_CONNECTIONS: >-
|
||||||
|
{{
|
||||||
|
[
|
||||||
|
(applications | resource_filter('svc-prx-openresty', 'pids_limit', service_name | default(''), RESOURCE_PIDS_LIMIT)) | int,
|
||||||
|
(applications | resource_filter(application_id, 'pids_limit', service_name | default(''), RESOURCE_PIDS_LIMIT)) | int
|
||||||
|
] | min
|
||||||
|
}}
|
||||||
|
|
||||||
# @todo It propably makes sense to distinguish between target and source mount path, so that the config files can be stored in the openresty volumes folder
|
# @todo It propably makes sense to distinguish between target and source mount path, so that the config files can be stored in the openresty volumes folder
|
||||||
@@ -5,14 +5,15 @@
|
|||||||
SYS_SERVICE_SUFFIX: ".{{ SOFTWARE_NAME | lower }}.service"
|
SYS_SERVICE_SUFFIX: ".{{ SOFTWARE_NAME | lower }}.service"
|
||||||
|
|
||||||
## Names
|
## Names
|
||||||
|
SYS_SERVICE_CLEANUP_BACKUPS: "{{ 'sys-ctl-cln-bkps' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
SYS_SERVICE_CLEANUP_BACKUPS_FAILED: "{{ 'sys-ctl-cln-faild-bkps' | get_service_name(SOFTWARE_NAME) }}"
|
SYS_SERVICE_CLEANUP_BACKUPS_FAILED: "{{ 'sys-ctl-cln-faild-bkps' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
SYS_SERVICE_CLEANUP_ANONYMOUS_VOLUMES: "{{ 'sys-ctl-cln-anon-volumes' | get_service_name(SOFTWARE_NAME) }}"
|
SYS_SERVICE_CLEANUP_ANONYMOUS_VOLUMES: "{{ 'sys-ctl-cln-anon-volumes' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
|
SYS_SERVICE_CLEANUP_DISC_SPACE: "{{ 'sys-ctl-cln-disc-space' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
SYS_SERVICE_OPTIMIZE_DRIVE: "{{ 'svc-opt-ssd-hdd' | get_service_name(SOFTWARE_NAME) }}"
|
SYS_SERVICE_OPTIMIZE_DRIVE: "{{ 'svc-opt-ssd-hdd' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
SYS_SERVICE_BACKUP_RMT_2_LOC: "{{ 'svc-bkp-rmt-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
SYS_SERVICE_BACKUP_RMT_2_LOC: "{{ 'svc-bkp-rmt-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
SYS_SERVICE_BACKUP_DOCKER_2_LOC: "{{ 'sys-ctl-bkp-docker-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
SYS_SERVICE_BACKUP_DOCKER_2_LOC: "{{ 'sys-ctl-bkp-docker-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
SYS_SERVICE_REPAIR_DOCKER_SOFT: "{{ 'sys-ctl-rpr-docker-soft' | get_service_name(SOFTWARE_NAME) }}"
|
SYS_SERVICE_REPAIR_DOCKER_SOFT: "{{ 'sys-ctl-rpr-docker-soft' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
SYS_SERVICE_REPAIR_DOCKER_HARD: "{{ 'sys-ctl-rpr-docker-hard' | get_service_name(SOFTWARE_NAME) }}"
|
SYS_SERVICE_REPAIR_DOCKER_HARD: "{{ 'sys-ctl-rpr-docker-hard' | get_service_name(SOFTWARE_NAME) }}"
|
||||||
SYS_SERVICE_UPDATE_DOCKER: "{{ 'update-docker' | get_service_name(SOFTWARE_NAME) }}"
|
|
||||||
|
|
||||||
## On Failure
|
## On Failure
|
||||||
SYS_SERVICE_ON_FAILURE_COMPOSE: "{{ ('sys-ctl-alm-compose@') | get_service_name(SOFTWARE_NAME, False) }}%n.service"
|
SYS_SERVICE_ON_FAILURE_COMPOSE: "{{ ('sys-ctl-alm-compose@') | get_service_name(SOFTWARE_NAME, False) }}%n.service"
|
||||||
@@ -46,8 +47,7 @@ SYS_SERVICE_GROUP_MANIPULATION: >
|
|||||||
SYS_SERVICE_GROUP_CLEANUP +
|
SYS_SERVICE_GROUP_CLEANUP +
|
||||||
SYS_SERVICE_GROUP_REPAIR +
|
SYS_SERVICE_GROUP_REPAIR +
|
||||||
SYS_SERVICE_GROUP_OPTIMIZATION +
|
SYS_SERVICE_GROUP_OPTIMIZATION +
|
||||||
SYS_SERVICE_GROUP_MAINTANANCE +
|
SYS_SERVICE_GROUP_MAINTANANCE
|
||||||
[ SYS_SERVICE_UPDATE_DOCKER ]
|
|
||||||
) | sort
|
) | sort
|
||||||
}}
|
}}
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
|
|
||||||
# Service Timers
|
# Service Timers
|
||||||
|
|
||||||
## Meta
|
## Meta
|
||||||
@@ -6,12 +5,12 @@ SYS_TIMER_ALL_ENABLED: "{{ MODE_DEBUG }}" # Runtime Var
|
|||||||
|
|
||||||
## Server Tact Variables
|
## Server Tact Variables
|
||||||
|
|
||||||
HOURS_SERVER_AWAKE: "0..23" # Ours in which the server is "awake" (100% working). Rest of the time is reserved for maintanance
|
HOURS_SERVER_AWAKE: "6..23" # Ours in which the server is "awake" (100% working). Rest of the time is reserved for maintanance
|
||||||
RANDOMIZED_DELAY_SEC: "5min" # Random delay for systemd timers to avoid peak loads.
|
RANDOMIZED_DELAY_SEC: "5min" # Random delay for systemd timers to avoid peak loads.
|
||||||
|
|
||||||
## Timeouts for all services
|
## Timeouts for all services
|
||||||
SYS_TIMEOUT_DOCKER_RPR_HARD: "10min"
|
SYS_TIMEOUT_DOCKER_RPR_HARD: "10min"
|
||||||
SYS_TIMEOUT_DOCKER_RPR_SOFT: "{{ SYS_TIMEOUT_DOCKER_RPR_HARD }}"
|
SYS_TIMEOUT_DOCKER_RPR_SOFT: "{{ SYS_TIMEOUT_DOCKER_RPR_HARD }}"
|
||||||
SYS_TIMEOUT_CLEANUP_SERVICES: "15min"
|
SYS_TIMEOUT_CLEANUP_SERVICES: "15min"
|
||||||
SYS_TIMEOUT_DOCKER_UPDATE: "20min"
|
SYS_TIMEOUT_DOCKER_UPDATE: "20min"
|
||||||
SYS_TIMEOUT_STORAGE_OPTIMIZER: "{{ SYS_TIMEOUT_DOCKER_UPDATE }}"
|
SYS_TIMEOUT_STORAGE_OPTIMIZER: "{{ SYS_TIMEOUT_DOCKER_UPDATE }}"
|
||||||
@@ -24,29 +23,29 @@ SYS_SCHEDULE_HEALTH_BTRFS: "*-*-* 00:00:00"
|
|||||||
SYS_SCHEDULE_HEALTH_JOURNALCTL: "*-*-* 00:00:00" # Check once per day the journalctl for errors
|
SYS_SCHEDULE_HEALTH_JOURNALCTL: "*-*-* 00:00:00" # Check once per day the journalctl for errors
|
||||||
SYS_SCHEDULE_HEALTH_DISC_SPACE: "*-*-* 06,12,18,00:00:00" # Check four times per day if there is sufficient disc space
|
SYS_SCHEDULE_HEALTH_DISC_SPACE: "*-*-* 06,12,18,00:00:00" # Check four times per day if there is sufficient disc space
|
||||||
SYS_SCHEDULE_HEALTH_DOCKER_CONTAINER: "*-*-* {{ HOURS_SERVER_AWAKE }}:00:00" # Check once per hour if the docker containers are healthy
|
SYS_SCHEDULE_HEALTH_DOCKER_CONTAINER: "*-*-* {{ HOURS_SERVER_AWAKE }}:00:00" # Check once per hour if the docker containers are healthy
|
||||||
SYS_SCHEDULE_HEALTH_DOCKER_VOLUMES: "*-*-* {{ HOURS_SERVER_AWAKE }}:15:00" # Check once per hour if the docker volumes are healthy
|
SYS_SCHEDULE_HEALTH_DOCKER_VOLUMES: "*-*-* {{ HOURS_SERVER_AWAKE }}:00:00" # Check once per hour if the docker volumes are healthy
|
||||||
SYS_SCHEDULE_HEALTH_CSP_CRAWLER: "*-*-* {{ HOURS_SERVER_AWAKE }}:30:00" # Check once per hour if all CSP are fullfilled available
|
SYS_SCHEDULE_HEALTH_CSP_CRAWLER: "*-*-* {{ HOURS_SERVER_AWAKE }}:00:00" # Check once per hour if all CSP are fullfilled available
|
||||||
SYS_SCHEDULE_HEALTH_NGINX: "*-*-* {{ HOURS_SERVER_AWAKE }}:45:00" # Check once per hour if all webservices are available
|
SYS_SCHEDULE_HEALTH_NGINX: "*-*-* {{ HOURS_SERVER_AWAKE }}:00:00" # Check once per hour if all webservices are available
|
||||||
SYS_SCHEDULE_HEALTH_MSMTP: "*-*-* 00:00:00" # Check once per day SMTP Server
|
SYS_SCHEDULE_HEALTH_MSMTP: "*-*-* 00:00:00" # Check once per day SMTP Server
|
||||||
|
|
||||||
### Schedule for cleanup tasks
|
### Schedule for cleanup tasks
|
||||||
SYS_SCHEDULE_CLEANUP_BACKUPS: "*-*-* 00,06,12,18:30:00" # Cleanup backups every 6 hours, MUST be called before disc space cleanup
|
SYS_SCHEDULE_CLEANUP_CERTS: "*-*-* 20:00" # Deletes and revokes unused certs once per day
|
||||||
SYS_SCHEDULE_CLEANUP_DISC_SPACE: "*-*-* 07,13,19,01:30:00" # Cleanup disc space every 6 hours
|
SYS_SCHEDULE_CLEANUP_FAILED_BACKUPS: "*-*-* 21:00" # Clean up failed docker backups once per day
|
||||||
SYS_SCHEDULE_CLEANUP_CERTS: "*-*-* 12,00:45:00" # Deletes and revokes unused certs
|
SYS_SCHEDULE_CLEANUP_BACKUPS: "*-*-* 22:00" # Cleanup backups once per day, MUST be called before disc space cleanup
|
||||||
SYS_SCHEDULE_CLEANUP_FAILED_BACKUPS: "*-*-* 12:00:00" # Clean up failed docker backups every noon
|
SYS_SCHEDULE_CLEANUP_DISC_SPACE: "*-*-* 23:00" # Cleanup disc space once per day
|
||||||
|
|
||||||
### Schedule for repair services
|
### Schedule for repair services
|
||||||
SYS_SCHEDULE_REPAIR_BTRFS_AUTO_BALANCER: "Sat *-*-01..07 00:00:00" # Execute btrfs auto balancer every first Saturday of a month
|
SYS_SCHEDULE_REPAIR_BTRFS_AUTO_BALANCER: "Sat *-*-01..07 00:00:00" # Execute btrfs auto balancer every first Saturday of a month
|
||||||
SYS_SCHEDULE_REPAIR_DOCKER_HARD: "Sun *-*-* 08:00:00" # Restart docker instances every Sunday at 8:00 AM
|
SYS_SCHEDULE_REPAIR_DOCKER_HARD: "Sun *-*-* 00:00:00" # Restart docker instances every Sunday
|
||||||
|
|
||||||
### Schedule for backup tasks
|
### Schedule for backup tasks
|
||||||
SYS_SCHEDULE_BACKUP_DOCKER_TO_LOCAL: "*-*-* 03:30:00"
|
SYS_SCHEDULE_BACKUP_REMOTE_TO_LOCAL: "*-*-* 00:30:00" # Pull Backup of the previous day
|
||||||
SYS_SCHEDULE_BACKUP_REMOTE_TO_LOCAL: "*-*-* 21:30:00"
|
SYS_SCHEDULE_BACKUP_DOCKER_TO_LOCAL: "*-*-* 01:00:00" # Backup the current day
|
||||||
|
|
||||||
### Schedule for Maintenance Tasks
|
### Schedule for Maintenance Tasks
|
||||||
SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_RENEW: "*-*-* 12,00:30:00" # Renew Mailu certificates twice per day
|
SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_RENEW: "*-*-* 10,22:00:00" # Renew Mailu certificates twice per day
|
||||||
SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_DEPLOY: "*-*-* 13,01:30:00" # Deploy letsencrypt certificates twice per day to docker containers
|
SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_DEPLOY: "*-*-* 11,23:00:00" # Deploy letsencrypt certificates twice per day to docker containers
|
||||||
SYS_SCHEDULE_MAINTANANCE_NEXTCLOUD: "22" # Do nextcloud maintanace between 22:00 and 02:00
|
SYS_SCHEDULE_MAINTANANCE_NEXTCLOUD: "21" # Do nextcloud maintanace between 21:00 and 01:00
|
||||||
|
|
||||||
### Animation
|
### Animation
|
||||||
SYS_SCHEDULE_ANIMATION_KEYBOARD_COLOR: "*-*-* *:*:00" # Change the keyboard color every minute
|
SYS_SCHEDULE_ANIMATION_KEYBOARD_COLOR: "*-*-* *:*:00" # Change the keyboard color every minute
|
||||||
@@ -102,6 +102,18 @@ defaults_networks:
|
|||||||
subnet: 192.168.103.208/28
|
subnet: 192.168.103.208/28
|
||||||
web-app-bridgy-fed:
|
web-app-bridgy-fed:
|
||||||
subnet: 192.168.103.224/28
|
subnet: 192.168.103.224/28
|
||||||
|
web-app-xwiki:
|
||||||
|
subnet: 192.168.103.240/28
|
||||||
|
web-app-openwebui:
|
||||||
|
subnet: 192.168.104.0/28
|
||||||
|
web-app-flowise:
|
||||||
|
subnet: 192.168.104.16/28
|
||||||
|
web-app-minio:
|
||||||
|
subnet: 192.168.104.32/28
|
||||||
|
web-svc-coturn:
|
||||||
|
subnet: 192.168.104.48/28
|
||||||
|
web-app-mini-qr:
|
||||||
|
subnet: 192.168.104.64/28
|
||||||
|
|
||||||
# /24 Networks / 254 Usable Clients
|
# /24 Networks / 254 Usable Clients
|
||||||
web-app-bigbluebutton:
|
web-app-bigbluebutton:
|
||||||
@@ -114,3 +126,5 @@ defaults_networks:
|
|||||||
subnet: 192.168.201.0/24
|
subnet: 192.168.201.0/24
|
||||||
svc-db-openldap:
|
svc-db-openldap:
|
||||||
subnet: 192.168.202.0/24
|
subnet: 192.168.202.0/24
|
||||||
|
svc-ai-ollama:
|
||||||
|
subnet: 192.168.203.0/24 # Big network to bridge applications into ai
|
||||||
|
|||||||
@@ -75,20 +75,35 @@ ports:
|
|||||||
web-app-bluesky_view: 8051
|
web-app-bluesky_view: 8051
|
||||||
web-app-magento: 8052
|
web-app-magento: 8052
|
||||||
web-app-bridgy-fed: 8053
|
web-app-bridgy-fed: 8053
|
||||||
|
web-app-xwiki: 8054
|
||||||
|
web-app-openwebui: 8055
|
||||||
|
web-app-flowise: 8056
|
||||||
|
web-app-minio_api: 8057
|
||||||
|
web-app-minio_console: 8058
|
||||||
|
web-app-mini-qr: 8059
|
||||||
web-app-bigbluebutton: 48087 # This port is predefined by bbb. @todo Try to change this to a 8XXX port
|
web-app-bigbluebutton: 48087 # This port is predefined by bbb. @todo Try to change this to a 8XXX port
|
||||||
public:
|
public:
|
||||||
# The following ports should be changed to 22 on the subdomain via stream mapping
|
# The following ports should be changed to 22 on the subdomain via stream mapping
|
||||||
ssh:
|
ssh:
|
||||||
web-app-gitea: 2201
|
web-app-gitea: 2201
|
||||||
web-app-gitlab: 2202
|
web-app-gitlab: 2202
|
||||||
ldaps:
|
ldaps:
|
||||||
svc-db-openldap: 636
|
svc-db-openldap: 636
|
||||||
stun:
|
stun_turn:
|
||||||
web-app-bigbluebutton: 3478 # Not sure if it's right placed here or if it should be moved to localhost section
|
web-app-bigbluebutton: 3478 # Not sure if it's right placed here or if it should be moved to localhost section
|
||||||
# Occupied by BBB: 3479
|
# Occupied by BBB: 3479
|
||||||
web-app-nextcloud: 3480
|
web-app-nextcloud: 3480
|
||||||
turn:
|
web-svc-coturn: 3481
|
||||||
web-app-bigbluebutton: 5349 # Not sure if it's right placed here or if it should be moved to localhost section
|
stun_turn_tls:
|
||||||
web-app-nextcloud: 5350 # Not used yet
|
web-app-bigbluebutton: 5349 # Not sure if it's right placed here or if it should be moved to localhost section
|
||||||
|
web-app-nextcloud: 5350 # Not used yet
|
||||||
|
web-svc-coturn: 5351
|
||||||
federation:
|
federation:
|
||||||
web-app-matrix_synapse: 8448
|
web-app-matrix_synapse: 8448
|
||||||
|
relay_port_ranges:
|
||||||
|
web-svc-coturn_start: 20000
|
||||||
|
web-svc-coturn_end: 39999
|
||||||
|
web-app-bigbluebutton_start: 40000
|
||||||
|
web-app-bigbluebutton_end: 49999
|
||||||
|
web-app-nextcloud_start: 50000
|
||||||
|
web-app-nextcloud_end: 59999
|
||||||
|
|||||||
@@ -3,4 +3,3 @@ BACKUPS_FOLDER_PATH: "/Backups/" # Path to the backups folder
|
|||||||
# Storage Space-Related Configurations
|
# Storage Space-Related Configurations
|
||||||
SIZE_PERCENT_MAXIMUM_BACKUP: 75 # Maximum storage space in percent for backups
|
SIZE_PERCENT_MAXIMUM_BACKUP: 75 # Maximum storage space in percent for backups
|
||||||
SIZE_PERCENT_CLEANUP_DISC_SPACE: 85 # Threshold for triggering cleanup actions
|
SIZE_PERCENT_CLEANUP_DISC_SPACE: 85 # Threshold for triggering cleanup actions
|
||||||
SIZE_PERCENT_DISC_SPACE_WARNING: 90 # Warning threshold in percent for free disk space
|
|
||||||
3
group_vars/all/17_ai.yml
Normal file
3
group_vars/all/17_ai.yml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# URL of Local Ollama Container
|
||||||
|
OLLAMA_BASE_LOCAL_URL: "http://{{ applications | get_app_conf('svc-ai-ollama', 'docker.services.ollama.name') }}:{{ applications | get_app_conf('svc-ai-ollama', 'docker.services.ollama.port') }}"
|
||||||
|
OLLAMA_LOCAL_ENABLED: "{{ applications | get_app_conf(application_id, 'features.local_ai') }}"
|
||||||
47
group_vars/all/18_resource.yml
Normal file
47
group_vars/all/18_resource.yml
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
# Host resources
|
||||||
|
RESOURCE_HOST_CPUS: "{{ ansible_processor_vcpus | int }}"
|
||||||
|
RESOURCE_HOST_MEM: "{{ (ansible_memtotal_mb | int) // 1024 }}"
|
||||||
|
|
||||||
|
# Reserve for OS
|
||||||
|
RESOURCE_HOST_RESERVE_CPU: 2
|
||||||
|
RESOURCE_HOST_RESERVE_MEM: 4
|
||||||
|
|
||||||
|
# Available for apps
|
||||||
|
RESOURCE_AVAIL_CPUS: "{{ (RESOURCE_HOST_CPUS | int) - (RESOURCE_HOST_RESERVE_CPU | int) }}"
|
||||||
|
RESOURCE_AVAIL_MEM: "{{ (RESOURCE_HOST_MEM | int) - (RESOURCE_HOST_RESERVE_MEM | int) }}"
|
||||||
|
|
||||||
|
# Count active docker services (only roles starting with web- or svc-; service counts if enabled==true OR enabled is undefined)
|
||||||
|
RESOURCE_ACTIVE_DOCKER_CONTAINER_COUNT: >-
|
||||||
|
{{
|
||||||
|
applications
|
||||||
|
| active_docker_container_count(group_names, '^(web-|svc-).*', ensure_min_one=True)
|
||||||
|
}}
|
||||||
|
|
||||||
|
# Per-container fair share (numbers!), later we append 'g' only for the string fields in compose
|
||||||
|
RESOURCE_CPUS_NUM: >-
|
||||||
|
{{
|
||||||
|
[
|
||||||
|
(
|
||||||
|
((RESOURCE_AVAIL_CPUS | float) / (RESOURCE_ACTIVE_DOCKER_CONTAINER_COUNT | float))
|
||||||
|
| round(2)
|
||||||
|
),
|
||||||
|
0.5
|
||||||
|
] | max
|
||||||
|
}}
|
||||||
|
|
||||||
|
RESOURCE_MEM_RESERVATION_NUM: >-
|
||||||
|
{{
|
||||||
|
(((RESOURCE_AVAIL_MEM | float) / (RESOURCE_ACTIVE_DOCKER_CONTAINER_COUNT | float)) * 0.7)
|
||||||
|
| round(1)
|
||||||
|
}}
|
||||||
|
RESOURCE_MEM_LIMIT_NUM: >-
|
||||||
|
{{
|
||||||
|
(((RESOURCE_AVAIL_MEM | float) / (RESOURCE_ACTIVE_DOCKER_CONTAINER_COUNT | float)) * 1.0)
|
||||||
|
| round(1)
|
||||||
|
}}
|
||||||
|
|
||||||
|
# Final strings with units for compose defaults (keep numbers above for math elsewhere if needed)
|
||||||
|
RESOURCE_CPUS: "{{ RESOURCE_CPUS_NUM }}"
|
||||||
|
RESOURCE_MEM_RESERVATION: "{{ RESOURCE_MEM_RESERVATION_NUM }}g"
|
||||||
|
RESOURCE_MEM_LIMIT: "{{ RESOURCE_MEM_LIMIT_NUM }}g"
|
||||||
|
RESOURCE_PIDS_LIMIT: 512
|
||||||
@@ -6,6 +6,7 @@ __metaclass__ = type
|
|||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
import time
|
import time
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
class CertUtils:
|
class CertUtils:
|
||||||
_domain_cert_mapping = None
|
_domain_cert_mapping = None
|
||||||
@@ -22,6 +23,30 @@ class CertUtils:
|
|||||||
except subprocess.CalledProcessError:
|
except subprocess.CalledProcessError:
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def run_openssl_dates(cert_path):
|
||||||
|
"""
|
||||||
|
Returns (not_before_ts, not_after_ts) as POSIX timestamps or (None, None) on failure.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
output = subprocess.check_output(
|
||||||
|
['openssl', 'x509', '-in', cert_path, '-noout', '-startdate', '-enddate'],
|
||||||
|
universal_newlines=True
|
||||||
|
)
|
||||||
|
nb, na = None, None
|
||||||
|
for line in output.splitlines():
|
||||||
|
line = line.strip()
|
||||||
|
if line.startswith('notBefore='):
|
||||||
|
nb = line.split('=', 1)[1].strip()
|
||||||
|
elif line.startswith('notAfter='):
|
||||||
|
na = line.split('=', 1)[1].strip()
|
||||||
|
def _parse(openssl_dt):
|
||||||
|
# OpenSSL format example: "Oct 10 12:34:56 2025 GMT"
|
||||||
|
return int(datetime.strptime(openssl_dt, "%b %d %H:%M:%S %Y %Z").timestamp())
|
||||||
|
return (_parse(nb) if nb else None, _parse(na) if na else None)
|
||||||
|
except Exception:
|
||||||
|
return (None, None)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def extract_sans(cert_text):
|
def extract_sans(cert_text):
|
||||||
dns_entries = []
|
dns_entries = []
|
||||||
@@ -59,7 +84,6 @@ class CertUtils:
|
|||||||
else:
|
else:
|
||||||
return domain == san
|
return domain == san
|
||||||
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def build_snapshot(cls, cert_base_path):
|
def build_snapshot(cls, cert_base_path):
|
||||||
snapshot = []
|
snapshot = []
|
||||||
@@ -82,6 +106,17 @@ class CertUtils:
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def refresh_cert_mapping(cls, cert_base_path, debug=False):
|
def refresh_cert_mapping(cls, cert_base_path, debug=False):
|
||||||
|
"""
|
||||||
|
Build mapping: SAN -> list of entries
|
||||||
|
entry = {
|
||||||
|
'folder': str,
|
||||||
|
'cert_path': str,
|
||||||
|
'mtime': float,
|
||||||
|
'not_before': int|None,
|
||||||
|
'not_after': int|None,
|
||||||
|
'is_wildcard': bool
|
||||||
|
}
|
||||||
|
"""
|
||||||
cert_files = cls.list_cert_files(cert_base_path)
|
cert_files = cls.list_cert_files(cert_base_path)
|
||||||
mapping = {}
|
mapping = {}
|
||||||
for cert_path in cert_files:
|
for cert_path in cert_files:
|
||||||
@@ -90,46 +125,82 @@ class CertUtils:
|
|||||||
continue
|
continue
|
||||||
sans = cls.extract_sans(cert_text)
|
sans = cls.extract_sans(cert_text)
|
||||||
folder = os.path.basename(os.path.dirname(cert_path))
|
folder = os.path.basename(os.path.dirname(cert_path))
|
||||||
|
try:
|
||||||
|
mtime = os.stat(cert_path).st_mtime
|
||||||
|
except FileNotFoundError:
|
||||||
|
mtime = 0.0
|
||||||
|
nb, na = cls.run_openssl_dates(cert_path)
|
||||||
|
|
||||||
for san in sans:
|
for san in sans:
|
||||||
if san not in mapping:
|
entry = {
|
||||||
mapping[san] = folder
|
'folder': folder,
|
||||||
|
'cert_path': cert_path,
|
||||||
|
'mtime': mtime,
|
||||||
|
'not_before': nb,
|
||||||
|
'not_after': na,
|
||||||
|
'is_wildcard': san.startswith('*.'),
|
||||||
|
}
|
||||||
|
mapping.setdefault(san, []).append(entry)
|
||||||
|
|
||||||
cls._domain_cert_mapping = mapping
|
cls._domain_cert_mapping = mapping
|
||||||
if debug:
|
if debug:
|
||||||
print(f"[DEBUG] Refreshed domain-to-cert mapping: {mapping}")
|
print(f"[DEBUG] Refreshed domain-to-cert mapping (counts): "
|
||||||
|
f"{ {k: len(v) for k, v in mapping.items()} }")
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def ensure_cert_mapping(cls, cert_base_path, debug=False):
|
def ensure_cert_mapping(cls, cert_base_path, debug=False):
|
||||||
if cls._domain_cert_mapping is None or cls.snapshot_changed(cert_base_path):
|
if cls._domain_cert_mapping is None or cls.snapshot_changed(cert_base_path):
|
||||||
cls.refresh_cert_mapping(cert_base_path, debug)
|
cls.refresh_cert_mapping(cert_base_path, debug)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _score_entry(entry):
|
||||||
|
"""
|
||||||
|
Return tuple used for sorting newest-first:
|
||||||
|
(not_before or -inf, mtime)
|
||||||
|
"""
|
||||||
|
nb = entry.get('not_before')
|
||||||
|
mtime = entry.get('mtime', 0.0)
|
||||||
|
return (nb if nb is not None else -1, mtime)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def find_cert_for_domain(cls, domain, cert_base_path, debug=False):
|
def find_cert_for_domain(cls, domain, cert_base_path, debug=False):
|
||||||
cls.ensure_cert_mapping(cert_base_path, debug)
|
cls.ensure_cert_mapping(cert_base_path, debug)
|
||||||
|
|
||||||
exact_match = None
|
candidates_exact = []
|
||||||
wildcard_match = None
|
candidates_wild = []
|
||||||
|
|
||||||
for san, folder in cls._domain_cert_mapping.items():
|
for san, entries in cls._domain_cert_mapping.items():
|
||||||
if san == domain:
|
if san == domain:
|
||||||
exact_match = folder
|
candidates_exact.extend(entries)
|
||||||
break
|
elif san.startswith('*.'):
|
||||||
if san.startswith('*.'):
|
|
||||||
base = san[2:]
|
base = san[2:]
|
||||||
if domain.count('.') == base.count('.') + 1 and domain.endswith('.' + base):
|
if domain.count('.') == base.count('.') + 1 and domain.endswith('.' + base):
|
||||||
wildcard_match = folder
|
candidates_wild.extend(entries)
|
||||||
|
|
||||||
if exact_match:
|
def _pick_newest(entries):
|
||||||
if debug:
|
if not entries:
|
||||||
print(f"[DEBUG] Exact match for {domain} found in {exact_match}")
|
return None
|
||||||
return exact_match
|
# newest by (not_before, mtime)
|
||||||
|
best = max(entries, key=cls._score_entry)
|
||||||
|
return best
|
||||||
|
|
||||||
if wildcard_match:
|
best_exact = _pick_newest(candidates_exact)
|
||||||
if debug:
|
best_wild = _pick_newest(candidates_wild)
|
||||||
print(f"[DEBUG] Wildcard match for {domain} found in {wildcard_match}")
|
|
||||||
return wildcard_match
|
if best_exact and debug:
|
||||||
|
print(f"[DEBUG] Best exact match for {domain}: {best_exact['folder']} "
|
||||||
|
f"(not_before={best_exact['not_before']}, mtime={best_exact['mtime']})")
|
||||||
|
if best_wild and debug:
|
||||||
|
print(f"[DEBUG] Best wildcard match for {domain}: {best_wild['folder']} "
|
||||||
|
f"(not_before={best_wild['not_before']}, mtime={best_wild['mtime']})")
|
||||||
|
|
||||||
|
# Prefer exact if it exists; otherwise wildcard
|
||||||
|
chosen = best_exact or best_wild
|
||||||
|
|
||||||
|
if chosen:
|
||||||
|
return chosen['folder']
|
||||||
|
|
||||||
if debug:
|
if debug:
|
||||||
print(f"[DEBUG] No certificate folder found for {domain}")
|
print(f"[DEBUG] No certificate folder found for {domain}")
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ class ConfigEntryNotSetError(AppConfigKeyError):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def get_app_conf(applications, application_id, config_path, strict=True, default=None):
|
def get_app_conf(applications, application_id, config_path, strict=True, default=None, skip_missing_app=False):
|
||||||
# Path to the schema file for this application
|
# Path to the schema file for this application
|
||||||
schema_path = os.path.join('roles', application_id, 'schema', 'main.yml')
|
schema_path = os.path.join('roles', application_id, 'schema', 'main.yml')
|
||||||
|
|
||||||
@@ -133,6 +133,9 @@ def get_app_conf(applications, application_id, config_path, strict=True, default
|
|||||||
try:
|
try:
|
||||||
obj = applications[application_id]
|
obj = applications[application_id]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
|
if skip_missing_app:
|
||||||
|
# Simply return default instead of failing
|
||||||
|
return default if default is not None else False
|
||||||
raise AppConfigKeyError(
|
raise AppConfigKeyError(
|
||||||
f"Application ID '{application_id}' not found in applications dict.\n"
|
f"Application ID '{application_id}' not found in applications dict.\n"
|
||||||
f"path_trace: {path_trace}\n"
|
f"path_trace: {path_trace}\n"
|
||||||
|
|||||||
@@ -142,7 +142,8 @@ class InventoryManager:
|
|||||||
"""
|
"""
|
||||||
if algorithm == "random_hex":
|
if algorithm == "random_hex":
|
||||||
return secrets.token_hex(64)
|
return secrets.token_hex(64)
|
||||||
|
if algorithm == "random_hex_32":
|
||||||
|
return secrets.token_hex(32)
|
||||||
if algorithm == "sha256":
|
if algorithm == "sha256":
|
||||||
return hashlib.sha256(secrets.token_bytes(32)).hexdigest()
|
return hashlib.sha256(secrets.token_bytes(32)).hexdigest()
|
||||||
if algorithm == "sha1":
|
if algorithm == "sha1":
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
- name: Execute {{ SOFTWARE_NAME }} Play
|
- name: "Execute {{ SOFTWARE_NAME }} Play"
|
||||||
hosts: all
|
hosts: all
|
||||||
tasks:
|
tasks:
|
||||||
- name: "Load 'constructor' tasks"
|
- name: "Load 'constructor' tasks"
|
||||||
include_tasks: "tasks/stages/01_constructor.yml"
|
include_tasks: "tasks/stages/01_constructor.yml"
|
||||||
- name: "Load '{{host_type}}' tasks"
|
- name: "Load '{{ host_type }}' tasks"
|
||||||
include_tasks: "tasks/stages/02_{{host_type}}.yml"
|
include_tasks: "tasks/stages/02_{{ host_type }}.yml"
|
||||||
- name: "Load 'destructor' tasks"
|
- name: "Load 'destructor' tasks"
|
||||||
include_tasks: "tasks/stages/03_destructor.yml"
|
include_tasks: "tasks/stages/03_destructor.yml"
|
||||||
become: true
|
become: true
|
||||||
@@ -3,4 +3,7 @@ collections:
|
|||||||
- name: community.general
|
- name: community.general
|
||||||
- name: hetzner.hcloud
|
- name: hetzner.hcloud
|
||||||
yay:
|
yay:
|
||||||
- python-simpleaudio
|
- python-simpleaudio
|
||||||
|
- python-numpy
|
||||||
|
pacman:
|
||||||
|
- ansible
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
# Todos
|
|
||||||
- Use at all applications the ansible role name as application_id
|
|
||||||
- Implement filter_plugins/get_infinito_path.py
|
|
||||||
@@ -71,11 +71,6 @@ roles:
|
|||||||
description: "OS & package updates"
|
description: "OS & package updates"
|
||||||
icon: "fas fa-sync"
|
icon: "fas fa-sync"
|
||||||
invokable: true
|
invokable: true
|
||||||
pkgmgr:
|
|
||||||
title: "Package Manager Helpers"
|
|
||||||
description: "Helpers for package managers and unified install flows."
|
|
||||||
icon: "fas fa-box-open"
|
|
||||||
invokable: false
|
|
||||||
drv:
|
drv:
|
||||||
title: "Drivers"
|
title: "Drivers"
|
||||||
description: "Roles for installing and configuring hardware drivers—covering printers, graphics, input devices, and other peripheral support."
|
description: "Roles for installing and configuring hardware drivers—covering printers, graphics, input devices, and other peripheral support."
|
||||||
@@ -153,6 +148,16 @@ roles:
|
|||||||
description: "Network setup (DNS, Let's Encrypt HTTP, WireGuard, etc.)"
|
description: "Network setup (DNS, Let's Encrypt HTTP, WireGuard, etc.)"
|
||||||
icon: "fas fa-globe"
|
icon: "fas fa-globe"
|
||||||
invokable: true
|
invokable: true
|
||||||
|
ai:
|
||||||
|
title: "AI Services"
|
||||||
|
description: "Core AI building blocks—model serving, OpenAI-compatible gateways, vector databases, orchestration, and chat UIs."
|
||||||
|
icon: "fas fa-brain"
|
||||||
|
invokable: true
|
||||||
|
bkp:
|
||||||
|
title: "Backup Services"
|
||||||
|
description: "Service-level backup and recovery components—handling automated data snapshots, remote backups, synchronization services, and backup orchestration across databases, files, and containers."
|
||||||
|
icon: "fas fa-database"
|
||||||
|
invokable: true
|
||||||
user:
|
user:
|
||||||
title: "Users & Access"
|
title: "Users & Access"
|
||||||
description: "User accounts & access control"
|
description: "User accounts & access control"
|
||||||
|
|||||||
@@ -5,8 +5,8 @@
|
|||||||
|
|
||||||
- name: Link homefolders to cloud
|
- name: Link homefolders to cloud
|
||||||
ansible.builtin.file:
|
ansible.builtin.file:
|
||||||
src: "{{nextcloud_cloud_directory}}{{item}}"
|
src: "{{nextcloud_cloud_directory}}{{ item }}"
|
||||||
dest: "{{nextcloud_user_home_directory}}{{item}}"
|
dest: "{{nextcloud_user_home_directory}}{{ item }}"
|
||||||
owner: "{{ users[desktop_username].username }}"
|
owner: "{{ users[desktop_username].username }}"
|
||||||
group: "{{ users[desktop_username].username }}"
|
group: "{{ users[desktop_username].username }}"
|
||||||
state: link
|
state: link
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
---
|
---
|
||||||
- name: Setup locale.gen
|
- name: Setup locale.gen
|
||||||
template:
|
template:
|
||||||
src: locale.gen.j2
|
src: locale.gen.j2
|
||||||
dest: /etc/locale.gen
|
dest: /etc/locale.gen
|
||||||
|
|
||||||
- name: Setup locale.conf
|
- name: Setup locale.conf
|
||||||
template:
|
template:
|
||||||
src: locale.conf.j2
|
src: locale.conf.j2
|
||||||
dest: /etc/locale.conf
|
dest: /etc/locale.conf
|
||||||
|
|
||||||
|
|||||||
@@ -127,7 +127,7 @@
|
|||||||
#de_BE@euro ISO-8859-15
|
#de_BE@euro ISO-8859-15
|
||||||
#de_CH.UTF-8 UTF-8
|
#de_CH.UTF-8 UTF-8
|
||||||
#de_CH ISO-8859-1
|
#de_CH ISO-8859-1
|
||||||
de_DE.UTF-8 UTF-8
|
#de_DE.UTF-8 UTF-8
|
||||||
#de_DE ISO-8859-1
|
#de_DE ISO-8859-1
|
||||||
#de_DE@euro ISO-8859-15
|
#de_DE@euro ISO-8859-15
|
||||||
#de_IT.UTF-8 UTF-8
|
#de_IT.UTF-8 UTF-8
|
||||||
|
|||||||
4
roles/dev-yay/defaults/main.yml
Normal file
4
roles/dev-yay/defaults/main.yml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
AUR_HELPER: yay
|
||||||
|
AUR_BUILDER_USER: aur_builder
|
||||||
|
AUR_BUILDER_GROUP: wheel
|
||||||
|
AUR_BUILDER_SUDOERS_PATH: /etc/sudoers.d/11-install-aur_builder
|
||||||
@@ -6,42 +6,53 @@
|
|||||||
- dev-git
|
- dev-git
|
||||||
- dev-base-devel
|
- dev-base-devel
|
||||||
|
|
||||||
- name: install yay
|
- name: Install yay build prerequisites
|
||||||
community.general.pacman:
|
community.general.pacman:
|
||||||
name:
|
name:
|
||||||
- base-devel
|
- base-devel
|
||||||
- patch
|
- patch
|
||||||
state: present
|
state: present
|
||||||
|
|
||||||
- name: Create the `aur_builder` user
|
- name: Create the AUR builder user
|
||||||
become: true
|
become: true
|
||||||
ansible.builtin.user:
|
ansible.builtin.user:
|
||||||
name: aur_builder
|
name: "{{ AUR_BUILDER_USER }}"
|
||||||
create_home: yes
|
create_home: yes
|
||||||
group: wheel
|
group: "{{ AUR_BUILDER_GROUP }}"
|
||||||
|
|
||||||
- name: Allow the `aur_builder` user to run `sudo pacman` without a password
|
- name: Allow AUR builder to run pacman without password
|
||||||
become: true
|
become: true
|
||||||
ansible.builtin.lineinfile:
|
ansible.builtin.lineinfile:
|
||||||
path: /etc/sudoers.d/11-install-aur_builder
|
path: "{{ AUR_BUILDER_SUDOERS_PATH }}"
|
||||||
line: 'aur_builder ALL=(ALL) NOPASSWD: /usr/bin/pacman'
|
line: '{{ AUR_BUILDER_USER }} ALL=(ALL) NOPASSWD: /usr/bin/pacman'
|
||||||
create: yes
|
create: yes
|
||||||
validate: 'visudo -cf %s'
|
validate: 'visudo -cf %s'
|
||||||
|
|
||||||
- name: Clone yay from AUR
|
- name: Clone yay from AUR
|
||||||
become: true
|
become: true
|
||||||
become_user: aur_builder
|
become_user: "{{ AUR_BUILDER_USER }}"
|
||||||
git:
|
git:
|
||||||
repo: https://aur.archlinux.org/yay.git
|
repo: https://aur.archlinux.org/yay.git
|
||||||
dest: /home/aur_builder/yay
|
dest: "/home/{{ AUR_BUILDER_USER }}/yay"
|
||||||
clone: yes
|
clone: yes
|
||||||
update: yes
|
update: yes
|
||||||
|
|
||||||
- name: Build and install yay
|
- name: Build and install yay
|
||||||
become: true
|
become: true
|
||||||
become_user: aur_builder
|
become_user: "{{ AUR_BUILDER_USER }}"
|
||||||
shell: |
|
shell: |
|
||||||
cd /home/aur_builder/yay
|
cd /home/{{ AUR_BUILDER_USER }}/yay
|
||||||
makepkg -si --noconfirm
|
makepkg -si --noconfirm
|
||||||
args:
|
args:
|
||||||
creates: /usr/bin/yay
|
creates: /usr/bin/yay
|
||||||
|
|
||||||
|
- name: upgrade the system using yay, only act on AUR packages.
|
||||||
|
become: true
|
||||||
|
become_user: "{{ AUR_BUILDER_USER }}"
|
||||||
|
kewlfft.aur.aur:
|
||||||
|
upgrade: yes
|
||||||
|
use: "{{ AUR_HELPER }}"
|
||||||
|
aur_only: yes
|
||||||
|
when: MODE_UPDATE | bool
|
||||||
|
|
||||||
|
- include_tasks: utils/run_once.yml
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
- block:
|
- block:
|
||||||
- include_tasks: 01_core.yml
|
- include_tasks: 01_core.yml
|
||||||
- set_fact:
|
|
||||||
run_once_dev_yay: true
|
|
||||||
when: run_once_dev_yay is not defined
|
when: run_once_dev_yay is not defined
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
docker_compose_skipp_file_creation: false # If set to true the file creation will be skipped
|
docker_compose_file_creation_enabled: true # If set to true the file creation will be skipped
|
||||||
docker_pull_git_repository: false # Activates docker repository download and routine
|
docker_pull_git_repository: false # Activates docker repository download and routine
|
||||||
docker_compose_flush_handlers: false # Set to true in the vars/main.yml of the including role to autoflush after docker compose routine
|
docker_compose_flush_handlers: false # Set to true in the vars/main.yml of the including role to autoflush after docker compose routine
|
||||||
@@ -9,7 +9,6 @@
|
|||||||
listen:
|
listen:
|
||||||
- docker compose up
|
- docker compose up
|
||||||
- docker compose restart
|
- docker compose restart
|
||||||
- docker compose just up
|
|
||||||
when: MODE_ASSERT | bool
|
when: MODE_ASSERT | bool
|
||||||
|
|
||||||
- name: docker compose pull
|
- name: docker compose pull
|
||||||
@@ -41,9 +40,8 @@
|
|||||||
listen:
|
listen:
|
||||||
- docker compose up
|
- docker compose up
|
||||||
- docker compose restart
|
- docker compose restart
|
||||||
- docker compose just up
|
|
||||||
|
|
||||||
- name: Build docker compose
|
- name: Build docker compose
|
||||||
shell: |
|
shell: |
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
docker compose build || {
|
docker compose build || {
|
||||||
@@ -77,7 +75,6 @@
|
|||||||
DOCKER_CLIENT_TIMEOUT: 600
|
DOCKER_CLIENT_TIMEOUT: 600
|
||||||
listen:
|
listen:
|
||||||
- docker compose up
|
- docker compose up
|
||||||
- docker compose just up # @todo replace later just up by up when code is refactored, build atm is also listening to up
|
|
||||||
|
|
||||||
- name: docker compose restart
|
- name: docker compose restart
|
||||||
command:
|
command:
|
||||||
|
|||||||
@@ -1,15 +1,18 @@
|
|||||||
- name: Set default docker_repository_path
|
- name: Set default docker_repository_path
|
||||||
set_fact:
|
set_fact:
|
||||||
docker_repository_path: "{{docker_compose.directories.services}}repository/"
|
docker_repository_path: "{{ [ docker_compose.directories.services, 'repository/' ] | path_join }}"
|
||||||
|
|
||||||
- name: pull docker repository
|
- name: pull docker repository
|
||||||
git:
|
git:
|
||||||
repo: "{{ docker_repository_address }}"
|
repo: "{{ docker_repository_address }}"
|
||||||
dest: "{{ docker_repository_path }}"
|
dest: "{{ docker_repository_path }}"
|
||||||
version: "{{ docker_repository_branch | default('main') }}"
|
version: "{{ docker_repository_branch | default('main') }}"
|
||||||
depth: 1
|
single_branch: yes
|
||||||
update: yes
|
depth: 1
|
||||||
recursive: yes
|
update: yes
|
||||||
|
recursive: yes
|
||||||
|
force: yes
|
||||||
|
accept_hostkey: yes
|
||||||
notify:
|
notify:
|
||||||
- docker compose build
|
- docker compose build
|
||||||
- docker compose up
|
- docker compose up
|
||||||
|
|||||||
@@ -6,8 +6,8 @@
|
|||||||
- "{{ application_id | abs_role_path_by_application_id }}/templates/Dockerfile.j2"
|
- "{{ application_id | abs_role_path_by_application_id }}/templates/Dockerfile.j2"
|
||||||
- "{{ application_id | abs_role_path_by_application_id }}/files/Dockerfile"
|
- "{{ application_id | abs_role_path_by_application_id }}/files/Dockerfile"
|
||||||
notify:
|
notify:
|
||||||
- docker compose up
|
|
||||||
- docker compose build
|
- docker compose build
|
||||||
|
- docker compose up
|
||||||
register: create_dockerfile_result
|
register: create_dockerfile_result
|
||||||
failed_when:
|
failed_when:
|
||||||
- create_dockerfile_result is failed
|
- create_dockerfile_result is failed
|
||||||
@@ -28,6 +28,21 @@
|
|||||||
- env_template is failed
|
- env_template is failed
|
||||||
- "'Could not find or access' not in env_template.msg"
|
- "'Could not find or access' not in env_template.msg"
|
||||||
|
|
||||||
|
- name: "Create (optional) '{{ docker_compose.files.docker_compose_override }}'"
|
||||||
|
template:
|
||||||
|
src: "{{ item }}"
|
||||||
|
dest: "{{ docker_compose.files.docker_compose_override }}"
|
||||||
|
mode: '770'
|
||||||
|
force: yes
|
||||||
|
notify: docker compose up
|
||||||
|
register: docker_compose_override_template
|
||||||
|
loop:
|
||||||
|
- "{{ application_id | abs_role_path_by_application_id }}/templates/docker-compose.override.yml.j2"
|
||||||
|
- "{{ application_id | abs_role_path_by_application_id }}/files/docker-compose.override.yml"
|
||||||
|
failed_when:
|
||||||
|
- docker_compose_override_template is failed
|
||||||
|
- "'Could not find or access' not in docker_compose_override_template.msg"
|
||||||
|
|
||||||
- name: "Create (obligatoric) '{{ docker_compose.files.docker_compose }}'"
|
- name: "Create (obligatoric) '{{ docker_compose.files.docker_compose }}'"
|
||||||
template:
|
template:
|
||||||
src: "docker-compose.yml.j2"
|
src: "docker-compose.yml.j2"
|
||||||
|
|||||||
@@ -24,7 +24,7 @@
|
|||||||
include_tasks: "04_files.yml"
|
include_tasks: "04_files.yml"
|
||||||
- name: "Ensure that {{ docker_compose.directories.instance }} is up"
|
- name: "Ensure that {{ docker_compose.directories.instance }} is up"
|
||||||
include_tasks: "05_ensure_up.yml"
|
include_tasks: "05_ensure_up.yml"
|
||||||
when: not docker_compose_skipp_file_creation | bool
|
when: docker_compose_file_creation_enabled | bool
|
||||||
|
|
||||||
- name: "flush docker compose for '{{ application_id }}'"
|
- name: "flush docker compose for '{{ application_id }}'"
|
||||||
meta: flush_handlers
|
meta: flush_handlers
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
{# This template needs to be included in docker-compose.yml #}
|
{# This template needs to be included in docker-compose.yml #}
|
||||||
networks:
|
networks:
|
||||||
|
{# Central RDMS-Database Network #}
|
||||||
{% if
|
{% if
|
||||||
(applications | get_app_conf(application_id, 'features.central_database', False) and database_type is defined) or
|
(applications | get_app_conf(application_id, 'features.central_database', False) and database_type is defined) or
|
||||||
application_id in ['svc-db-mariadb','svc-db-postgres']
|
application_id in ['svc-db-mariadb','svc-db-postgres']
|
||||||
@@ -7,6 +8,7 @@ networks:
|
|||||||
{{ applications | get_app_conf('svc-db-' ~ database_type, 'docker.network') }}:
|
{{ applications | get_app_conf('svc-db-' ~ database_type, 'docker.network') }}:
|
||||||
external: true
|
external: true
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
{# Central LDAP Network #}
|
||||||
{% if
|
{% if
|
||||||
applications | get_app_conf(application_id, 'features.ldap', False) and
|
applications | get_app_conf(application_id, 'features.ldap', False) and
|
||||||
applications | get_app_conf('svc-db-openldap', 'network.docker', False)
|
applications | get_app_conf('svc-db-openldap', 'network.docker', False)
|
||||||
@@ -14,7 +16,13 @@ networks:
|
|||||||
{{ applications | get_app_conf('svc-db-openldap', 'docker.network') }}:
|
{{ applications | get_app_conf('svc-db-openldap', 'docker.network') }}:
|
||||||
external: true
|
external: true
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if not application_id.startswith('svc-db-') %}
|
{# Central AI Network #}
|
||||||
|
{% if applications | get_app_conf(application_id, 'features.local_ai', False) %}
|
||||||
|
{{ applications | get_app_conf('svc-ai-ollama', 'docker.network') }}:
|
||||||
|
external: true
|
||||||
|
{% endif %}
|
||||||
|
{# Default Network #}
|
||||||
|
{% if not application_id.startswith('svc-db-') and not application_id.startswith('svc-ai-') %}
|
||||||
default:
|
default:
|
||||||
{% if
|
{% if
|
||||||
application_id in networks.local and
|
application_id in networks.local and
|
||||||
@@ -25,7 +33,7 @@ networks:
|
|||||||
ipam:
|
ipam:
|
||||||
driver: default
|
driver: default
|
||||||
config:
|
config:
|
||||||
- subnet: {{networks.local[application_id].subnet}}
|
- subnet: {{ networks.local[application_id].subnet }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{{ "\n" }}
|
{{ "\n" }}
|
||||||
@@ -1,4 +1,6 @@
|
|||||||
users:
|
users:
|
||||||
blackhole:
|
blackhole:
|
||||||
description: "Everything what will be send to this user will disapear"
|
description: "Everything what will be send to this user will disapear"
|
||||||
username: "blackhole"
|
username: "blackhole"
|
||||||
|
roles:
|
||||||
|
- mail-bot
|
||||||
@@ -1,2 +1,4 @@
|
|||||||
# @See https://chatgpt.com/share/67a23d18-fb54-800f-983c-d6d00752b0b4
|
# @See https://chatgpt.com/share/67a23d18-fb54-800f-983c-d6d00752b0b4
|
||||||
docker_compose: "{{ application_id | get_docker_paths(PATH_DOCKER_COMPOSE_INSTANCES) }}"
|
docker_compose: "{{ application_id | get_docker_paths(PATH_DOCKER_COMPOSE_INSTANCES) }}"
|
||||||
|
docker_compose_command_base: "docker compose --env-file {{ docker_compose.files.env }}"
|
||||||
|
docker_compose_command_exec: "{{ docker_compose_command_base }} exec"
|
||||||
@@ -1,11 +1,13 @@
|
|||||||
{# Base for docker services #}
|
{# Base for docker services #}
|
||||||
|
|
||||||
restart: {{ DOCKER_RESTART_POLICY }}
|
restart: {{ docker_restart_policy | default(DOCKER_RESTART_POLICY) }}
|
||||||
{% if application_id | has_env %}
|
{% if application_id | has_env %}
|
||||||
env_file:
|
env_file:
|
||||||
- "{{ docker_compose.files.env }}"
|
- "{{ docker_compose.files.env }}"
|
||||||
{% endif %}
|
{% endif %}
|
||||||
logging:
|
logging:
|
||||||
driver: journald
|
driver: journald
|
||||||
|
{% filter indent(4) %}
|
||||||
|
{% include 'roles/docker-container/templates/resource.yml.j2' %}
|
||||||
|
{% endfilter %}
|
||||||
{{ "\n" }}
|
{{ "\n" }}
|
||||||
6
roles/docker-container/templates/build.yml.j2
Normal file
6
roles/docker-container/templates/build.yml.j2
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
{# integrate it into service sections to be build by Dockerfile #}
|
||||||
|
pull_policy: never
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
{# pass Arguments here #}
|
||||||
@@ -1,15 +1,25 @@
|
|||||||
{# This template needs to be included in docker-compose.yml containers #}
|
{# This template needs to be included in docker-compose.yml containers #}
|
||||||
networks:
|
networks:
|
||||||
|
{# Central RDMS-Database Network #}
|
||||||
{% if
|
{% if
|
||||||
(applications | get_app_conf(application_id, 'features.central_database', False) and database_type is defined) or
|
(applications | get_app_conf(application_id, 'features.central_database', False) and database_type is defined) or
|
||||||
application_id in ['svc-db-mariadb','svc-db-postgres']
|
application_id in ['svc-db-mariadb','svc-db-postgres']
|
||||||
%}
|
%}
|
||||||
{{ applications | get_app_conf('svc-db-' ~ database_type, 'docker.network') }}:
|
{{ applications | get_app_conf('svc-db-' ~ database_type, 'docker.network') }}:
|
||||||
|
{% if application_id in ['svc-db-mariadb','svc-db-postgres'] %}
|
||||||
|
aliases:
|
||||||
|
- {{ database_type }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
{# Central LDAP Network #}
|
||||||
{% if applications | get_app_conf(application_id, 'features.ldap', False) and applications | get_app_conf('svc-db-openldap', 'network.docker') %}
|
{% if applications | get_app_conf(application_id, 'features.ldap', False) and applications | get_app_conf('svc-db-openldap', 'network.docker') %}
|
||||||
{{ applications | get_app_conf('svc-db-openldap', 'docker.network') }}:
|
{{ applications | get_app_conf('svc-db-openldap', 'docker.network') }}:
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if application_id != 'svc-db-openldap' %}
|
{# Central AI Network #}
|
||||||
|
{% if applications | get_app_conf(application_id, 'features.local_ai', False) %}
|
||||||
|
{{ applications | get_app_conf('svc-ai-ollama', 'docker.network') }}:
|
||||||
|
{% endif %}
|
||||||
|
{% if not application_id.startswith('svc-db-') and not application_id.startswith('svc-ai-') %}
|
||||||
default:
|
default:
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{{ "\n" }}
|
{{ "\n" }}
|
||||||
4
roles/docker-container/templates/resource.yml.j2
Normal file
4
roles/docker-container/templates/resource.yml.j2
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
cpus: {{ applications | resource_filter(application_id, 'cpus', service_name | default(''), RESOURCE_CPUS) }}
|
||||||
|
mem_reservation: {{ applications | resource_filter(application_id, 'mem_reservation', service_name | default(''), RESOURCE_MEM_RESERVATION) }}
|
||||||
|
mem_limit: {{ applications | resource_filter(application_id, 'mem_limit', service_name | default(''), RESOURCE_MEM_LIMIT) }}
|
||||||
|
pids_limit: {{ applications | resource_filter(application_id, 'pids_limit', service_name | default(''), RESOURCE_PIDS_LIMIT) }}
|
||||||
@@ -4,7 +4,7 @@
|
|||||||
run_once_pkgmgr_install: true
|
run_once_pkgmgr_install: true
|
||||||
when: run_once_pkgmgr_install is not defined
|
when: run_once_pkgmgr_install is not defined
|
||||||
|
|
||||||
- name: update {{ package_name }}
|
- name: "update {{ package_name }}"
|
||||||
ansible.builtin.shell: |
|
ansible.builtin.shell: |
|
||||||
source ~/.venvs/pkgmgr/bin/activate
|
source ~/.venvs/pkgmgr/bin/activate
|
||||||
pkgmgr update {{ package_name }} --dependencies --clone-mode https
|
pkgmgr update {{ package_name }} --dependencies --clone-mode https
|
||||||
|
|||||||
@@ -43,3 +43,7 @@
|
|||||||
chdir: "{{ PKGMGR_INSTALL_PATH }}"
|
chdir: "{{ PKGMGR_INSTALL_PATH }}"
|
||||||
executable: /bin/bash
|
executable: /bin/bash
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
|
- name: "Update all repositories with pkgmgr"
|
||||||
|
command: "pkgmgr pull --all"
|
||||||
|
when: MODE_UPDATE | bool
|
||||||
23
roles/svc-ai-ollama/README.md
Normal file
23
roles/svc-ai-ollama/README.md
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
|
||||||
|
# Ollama
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
**Ollama** is a local model server that runs open LLMs on your hardware and exposes a simple HTTP API. It’s the backbone for privacy-first AI: prompts and data stay on your machines.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
After the first model pull, Ollama serves models to clients like Open WebUI (for chat) and Flowise (for workflows). Models are cached locally for quick reuse and can run fully offline when required.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
* Run popular open models (chat, code, embeddings) locally
|
||||||
|
* Simple, predictable HTTP API for developers
|
||||||
|
* Local caching to avoid repeated downloads
|
||||||
|
* Works seamlessly with Open WebUI and Flowise
|
||||||
|
* Offline-capable for air-gapped deployments
|
||||||
|
|
||||||
|
## Further Resources
|
||||||
|
|
||||||
|
* Ollama — [https://ollama.com](https://ollama.com)
|
||||||
|
* Ollama Model Library — [https://ollama.com/library](https://ollama.com/library)
|
||||||
22
roles/svc-ai-ollama/config/main.yml
Normal file
22
roles/svc-ai-ollama/config/main.yml
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
features:
|
||||||
|
local_ai: true # Needs to be set so that network is loaded
|
||||||
|
docker:
|
||||||
|
services:
|
||||||
|
ollama:
|
||||||
|
backup:
|
||||||
|
no_stop_required: true
|
||||||
|
image: ollama/ollama
|
||||||
|
version: latest
|
||||||
|
name: ollama
|
||||||
|
port: 11434
|
||||||
|
cpus: "4.0"
|
||||||
|
mem_reservation: "6g"
|
||||||
|
mem_limit: "8g"
|
||||||
|
pids_limit: 2048
|
||||||
|
volumes:
|
||||||
|
models: "ollama_models"
|
||||||
|
network: "ollama"
|
||||||
|
preload_models:
|
||||||
|
- "llama3:latest"
|
||||||
|
- "mistral:latest"
|
||||||
|
- "nomic-embed-text:latest"
|
||||||
25
roles/svc-ai-ollama/meta/main.yml
Normal file
25
roles/svc-ai-ollama/meta/main.yml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
---
|
||||||
|
galaxy_info:
|
||||||
|
author: "Kevin Veen-Birkenbach"
|
||||||
|
description: "Installs Ollama — a local model server for running open LLMs with a simple HTTP API."
|
||||||
|
license: "Infinito.Nexus NonCommercial License"
|
||||||
|
license_url: "https://s.infinito.nexus/license"
|
||||||
|
company: |
|
||||||
|
Kevin Veen-Birkenbach
|
||||||
|
Consulting & Coaching Solutions
|
||||||
|
https://www.veen.world
|
||||||
|
galaxy_tags:
|
||||||
|
- ai
|
||||||
|
- llm
|
||||||
|
- inference
|
||||||
|
- offline
|
||||||
|
- privacy
|
||||||
|
- self-hosted
|
||||||
|
- ollama
|
||||||
|
repository: "https://s.infinito.nexus/code"
|
||||||
|
issue_tracker_url: "https://s.infinito.nexus/issues"
|
||||||
|
documentation: "https://s.infinito.nexus/code/"
|
||||||
|
logo:
|
||||||
|
class: "fa-solid fa-microchip"
|
||||||
|
run_after: []
|
||||||
|
dependencies: []
|
||||||
38
roles/svc-ai-ollama/tasks/01_core.yml
Normal file
38
roles/svc-ai-ollama/tasks/01_core.yml
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
- name: create docker network for Ollama, so that other applications can access it
|
||||||
|
community.docker.docker_network:
|
||||||
|
name: "{{ OLLAMA_NETWORK }}"
|
||||||
|
state: present
|
||||||
|
ipam_config:
|
||||||
|
- subnet: "{{ networks.local[application_id].subnet }}"
|
||||||
|
|
||||||
|
- name: Include dependency 'sys-svc-docker'
|
||||||
|
include_role:
|
||||||
|
name: sys-svc-docker
|
||||||
|
when: run_once_sys_svc_docker is not defined
|
||||||
|
|
||||||
|
- name: "include docker-compose role"
|
||||||
|
include_role:
|
||||||
|
name: docker-compose
|
||||||
|
vars:
|
||||||
|
docker_compose_flush_handlers: true
|
||||||
|
|
||||||
|
- name: Pre-pull Ollama models
|
||||||
|
vars:
|
||||||
|
_cmd: "docker exec -i {{ OLLAMA_CONTAINER }} ollama pull {{ model }}"
|
||||||
|
shell: "{{ _cmd }}"
|
||||||
|
register: pull_result
|
||||||
|
loop: "{{ OLLAMA_PRELOAD_MODELS }}"
|
||||||
|
loop_control:
|
||||||
|
loop_var: model
|
||||||
|
async: "{{ ASYNC_TIME if ASYNC_ENABLED | bool else omit }}"
|
||||||
|
poll: "{{ ASYNC_POLL if ASYNC_ENABLED | bool else omit }}"
|
||||||
|
changed_when: >
|
||||||
|
(not (ASYNC_ENABLED | bool)) and (
|
||||||
|
'downloaded' in (pull_result.stdout | default('')) or
|
||||||
|
'pulling manifest' in (pull_result.stdout | default(''))
|
||||||
|
)
|
||||||
|
failed_when: >
|
||||||
|
(pull_result.rc | default(0)) != 0 and
|
||||||
|
('up to date' not in (pull_result.stdout | default('')))
|
||||||
|
|
||||||
|
- include_tasks: utils/run_once.yml
|
||||||
5
roles/svc-ai-ollama/tasks/main.yml
Normal file
5
roles/svc-ai-ollama/tasks/main.yml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
- block:
|
||||||
|
- include_tasks: 01_core.yml
|
||||||
|
vars:
|
||||||
|
flush_handlers: true
|
||||||
|
when: run_once_svc_ai_ollama is not defined
|
||||||
17
roles/svc-ai-ollama/templates/docker-compose.yml.j2
Normal file
17
roles/svc-ai-ollama/templates/docker-compose.yml.j2
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
{% include 'roles/docker-compose/templates/base.yml.j2' %}
|
||||||
|
|
||||||
|
ollama:
|
||||||
|
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||||
|
image: {{ OLLAMA_IMAGE }}:{{ OLLAMA_VERSION }}
|
||||||
|
container_name: {{ OLLAMA_CONTAINER }}
|
||||||
|
expose:
|
||||||
|
- "{{ OLLAMA_PORT }}"
|
||||||
|
volumes:
|
||||||
|
- ollama_models:/root/.ollama
|
||||||
|
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||||
|
|
||||||
|
{% include 'roles/docker-compose/templates/networks.yml.j2' %}
|
||||||
|
|
||||||
|
{% include 'roles/docker-compose/templates/volumes.yml.j2' %}
|
||||||
|
ollama_models:
|
||||||
|
name: {{ OLLAMA_VOLUME }}
|
||||||
16
roles/svc-ai-ollama/vars/main.yml
Normal file
16
roles/svc-ai-ollama/vars/main.yml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# General
|
||||||
|
application_id: "svc-ai-ollama"
|
||||||
|
|
||||||
|
# Docker
|
||||||
|
docker_compose_flush_handlers: true
|
||||||
|
|
||||||
|
# Ollama
|
||||||
|
# https://ollama.com/
|
||||||
|
OLLAMA_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.ollama.version') }}"
|
||||||
|
OLLAMA_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.ollama.image') }}"
|
||||||
|
OLLAMA_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.ollama.name') }}"
|
||||||
|
OLLAMA_PORT: "{{ applications | get_app_conf(application_id, 'docker.services.ollama.port') }}"
|
||||||
|
OLLAMA_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.models') }}"
|
||||||
|
OLLAMA_NETWORK: "{{ applications | get_app_conf(application_id, 'docker.network') }}"
|
||||||
|
OLLAMA_PRELOAD_MODELS: "{{ applications | get_app_conf(application_id, 'preload_models') }}"
|
||||||
|
|
||||||
0
roles/svc-bkp-rmt-2-loc/__init__.py
Normal file
0
roles/svc-bkp-rmt-2-loc/__init__.py
Normal file
0
roles/svc-bkp-rmt-2-loc/files/__init__.py
Normal file
0
roles/svc-bkp-rmt-2-loc/files/__init__.py
Normal file
132
roles/svc-bkp-rmt-2-loc/files/pull-specific-host.py
Normal file
132
roles/svc-bkp-rmt-2-loc/files/pull-specific-host.py
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import time
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
def run_command(command, capture_output=True, check=False, shell=True):
|
||||||
|
"""Run a shell command and return its output as string."""
|
||||||
|
try:
|
||||||
|
result = subprocess.run(
|
||||||
|
command,
|
||||||
|
capture_output=capture_output,
|
||||||
|
shell=shell,
|
||||||
|
text=True,
|
||||||
|
check=check
|
||||||
|
)
|
||||||
|
return result.stdout.strip()
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
if capture_output:
|
||||||
|
print(e.stdout)
|
||||||
|
print(e.stderr)
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def pull_backups(hostname: str):
|
||||||
|
print(f"pulling backups from: {hostname}")
|
||||||
|
errors = 0
|
||||||
|
|
||||||
|
print("loading meta data...")
|
||||||
|
remote_host = f"backup@{hostname}"
|
||||||
|
print(f"host address: {remote_host}")
|
||||||
|
|
||||||
|
remote_machine_id = run_command(f'ssh "{remote_host}" sha256sum /etc/machine-id')[:64]
|
||||||
|
print(f"remote machine id: {remote_machine_id}")
|
||||||
|
|
||||||
|
general_backup_machine_dir = f"/Backups/{remote_machine_id}/"
|
||||||
|
print(f"backup dir: {general_backup_machine_dir}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
remote_backup_types = run_command(
|
||||||
|
f'ssh "{remote_host}" "find {general_backup_machine_dir} -maxdepth 1 -type d -execdir basename {{}} ;"'
|
||||||
|
).splitlines()
|
||||||
|
print(f"backup types: {' '.join(remote_backup_types)}")
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
for backup_type in remote_backup_types:
|
||||||
|
if backup_type == remote_machine_id:
|
||||||
|
continue
|
||||||
|
|
||||||
|
print(f"backup type: {backup_type}")
|
||||||
|
|
||||||
|
general_backup_type_dir = f"{general_backup_machine_dir}{backup_type}/"
|
||||||
|
general_versions_dir = general_backup_type_dir
|
||||||
|
|
||||||
|
# local previous version
|
||||||
|
try:
|
||||||
|
local_previous_version_dir = run_command(f"ls -d {general_versions_dir}* | tail -1")
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
local_previous_version_dir = ""
|
||||||
|
print(f"last local backup: {local_previous_version_dir}")
|
||||||
|
|
||||||
|
# remote versions
|
||||||
|
remote_backup_versions = run_command(
|
||||||
|
f'ssh "{remote_host}" "ls -d /Backups/{remote_machine_id}/backup-docker-to-local/*"'
|
||||||
|
).splitlines()
|
||||||
|
print(f"remote backup versions: {' '.join(remote_backup_versions)}")
|
||||||
|
|
||||||
|
remote_last_backup_dir = remote_backup_versions[-1] if remote_backup_versions else ""
|
||||||
|
print(f"last remote backup: {remote_last_backup_dir}")
|
||||||
|
|
||||||
|
remote_source_path = f"{remote_host}:{remote_last_backup_dir}/"
|
||||||
|
print(f"source path: {remote_source_path}")
|
||||||
|
|
||||||
|
local_backup_destination_path = remote_last_backup_dir
|
||||||
|
print(f"backup destination: {local_backup_destination_path}")
|
||||||
|
|
||||||
|
print("creating local backup destination folder...")
|
||||||
|
os.makedirs(local_backup_destination_path, exist_ok=True)
|
||||||
|
|
||||||
|
rsync_command = (
|
||||||
|
f'rsync -abP --delete --delete-excluded --rsync-path="sudo rsync" '
|
||||||
|
f'--link-dest="{local_previous_version_dir}" "{remote_source_path}" "{local_backup_destination_path}"'
|
||||||
|
)
|
||||||
|
print("starting backup...")
|
||||||
|
print(f"executing: {rsync_command}")
|
||||||
|
|
||||||
|
retry_count = 0
|
||||||
|
max_retries = 12
|
||||||
|
retry_delay = 300 # 5 minutes
|
||||||
|
last_retry_start = 0
|
||||||
|
max_retry_duration = 43200 # 12 hours
|
||||||
|
|
||||||
|
rsync_exit_code = 1
|
||||||
|
while retry_count < max_retries:
|
||||||
|
print(f"Retry attempt: {retry_count + 1}")
|
||||||
|
if retry_count > 0:
|
||||||
|
current_time = int(time.time())
|
||||||
|
last_retry_duration = current_time - last_retry_start
|
||||||
|
if last_retry_duration >= max_retry_duration:
|
||||||
|
print("Last retry took more than 12 hours, increasing max retries to 12.")
|
||||||
|
max_retries = 12
|
||||||
|
last_retry_start = int(time.time())
|
||||||
|
rsync_exit_code = os.system(rsync_command)
|
||||||
|
if rsync_exit_code == 0:
|
||||||
|
break
|
||||||
|
retry_count += 1
|
||||||
|
time.sleep(retry_delay)
|
||||||
|
|
||||||
|
if rsync_exit_code != 0:
|
||||||
|
print(f"Error: rsync failed after {max_retries} attempts")
|
||||||
|
errors += 1
|
||||||
|
|
||||||
|
sys.exit(errors)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Pull backups from a remote backup host via rsync."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"hostname",
|
||||||
|
help="Hostname from which backup should be pulled"
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
pull_backups(args.hostname)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -1,85 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# @param $1 hostname from which backup should be pulled
|
|
||||||
|
|
||||||
echo "pulling backups from: $1" &&
|
|
||||||
|
|
||||||
# error counter
|
|
||||||
errors=0 &&
|
|
||||||
|
|
||||||
echo "loading meta data..." &&
|
|
||||||
|
|
||||||
remote_host="backup@$1" &&
|
|
||||||
echo "host address: $remote_host" &&
|
|
||||||
|
|
||||||
remote_machine_id="$( (ssh "$remote_host" sha256sum /etc/machine-id) | head -c 64 )" &&
|
|
||||||
echo "remote machine id: $remote_machine_id" &&
|
|
||||||
|
|
||||||
general_backup_machine_dir="/Backups/$remote_machine_id/" &&
|
|
||||||
echo "backup dir: $general_backup_machine_dir" &&
|
|
||||||
|
|
||||||
remote_backup_types="$(ssh "$remote_host" "find $general_backup_machine_dir -maxdepth 1 -type d -execdir basename {} ;")" &&
|
|
||||||
echo "backup types: $remote_backup_types" || exit 1
|
|
||||||
|
|
||||||
for backup_type in $remote_backup_types; do
|
|
||||||
if [ "$backup_type" != "$remote_machine_id" ]; then
|
|
||||||
echo "backup type: $backup_type" &&
|
|
||||||
|
|
||||||
general_backup_type_dir="$general_backup_machine_dir""$backup_type/" &&
|
|
||||||
general_versions_dir="$general_backup_type_dir" &&
|
|
||||||
local_previous_version_dir="$(ls -d $general_versions_dir* | tail -1)" &&
|
|
||||||
echo "last local backup: $local_previous_version_dir" &&
|
|
||||||
|
|
||||||
remote_backup_versions="$(ssh "$remote_host" ls -d "$general_backup_type_dir"\*)" &&
|
|
||||||
echo "remote backup versions: $remote_backup_versions" &&
|
|
||||||
|
|
||||||
|
|
||||||
remote_last_backup_dir=$(echo "$remote_backup_versions" | tail -1) &&
|
|
||||||
echo "last remote backup: $remote_last_backup_dir" &&
|
|
||||||
|
|
||||||
remote_source_path="$remote_host:$remote_last_backup_dir/" &&
|
|
||||||
echo "source path: $remote_source_path" &&
|
|
||||||
|
|
||||||
local_backup_destination_path=$remote_last_backup_dir &&
|
|
||||||
echo "backup destination: $local_backup_destination_path" &&
|
|
||||||
|
|
||||||
echo "creating local backup destination folder..." &&
|
|
||||||
mkdir -vp "$local_backup_destination_path" &&
|
|
||||||
|
|
||||||
echo "starting backup..."
|
|
||||||
rsync_command='rsync -abP --delete --delete-excluded --rsync-path="sudo rsync" --link-dest="'$local_previous_version_dir'" "'$remote_source_path'" "'$local_backup_destination_path'"'
|
|
||||||
|
|
||||||
echo "executing: $rsync_command"
|
|
||||||
|
|
||||||
retry_count=0
|
|
||||||
max_retries=12
|
|
||||||
retry_delay=300 # Retry delay in seconds (5 minutes)
|
|
||||||
last_retry_start=0
|
|
||||||
max_retry_duration=43200 # Maximum duration for a single retry attempt (12 hours)
|
|
||||||
|
|
||||||
while [[ $retry_count -lt $max_retries ]]; do
|
|
||||||
echo "Retry attempt: $((retry_count + 1))"
|
|
||||||
if [[ $retry_count -gt 0 ]]; then
|
|
||||||
current_time=$(date +%s)
|
|
||||||
last_retry_duration=$((current_time - last_retry_start))
|
|
||||||
if [[ $last_retry_duration -ge $max_retry_duration ]]; then
|
|
||||||
echo "Last retry took more than 12 hours, increasing max retries to 12."
|
|
||||||
max_retries=12
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
last_retry_start=$(date +%s)
|
|
||||||
eval "$rsync_command"
|
|
||||||
rsync_exit_code=$?
|
|
||||||
if [[ $rsync_exit_code -eq 0 ]]; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
retry_count=$((retry_count + 1))
|
|
||||||
sleep $retry_delay
|
|
||||||
done
|
|
||||||
|
|
||||||
if [[ $rsync_exit_code -ne 0 ]]; then
|
|
||||||
echo "Error: rsync failed after $max_retries attempts"
|
|
||||||
((errors += 1))
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
exit $errors;
|
|
||||||
@@ -10,15 +10,15 @@
|
|||||||
- include_tasks: utils/run_once.yml
|
- include_tasks: utils/run_once.yml
|
||||||
when: run_once_svc_bkp_rmt_2_loc is not defined
|
when: run_once_svc_bkp_rmt_2_loc is not defined
|
||||||
|
|
||||||
- name: "create {{ DOCKER_BACKUP_REMOTE_2_LOCAL_DIR }}"
|
- name: "Create Directory '{{ DOCKER_BACKUP_REMOTE_2_LOCAL_DIR }}'"
|
||||||
file:
|
file:
|
||||||
path: "{{ DOCKER_BACKUP_REMOTE_2_LOCAL_DIR }}"
|
path: "{{ DOCKER_BACKUP_REMOTE_2_LOCAL_DIR }}"
|
||||||
state: directory
|
state: directory
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
|
|
||||||
- name: create svc-bkp-rmt-2-loc.sh
|
- name: "Deploy '{{ DOCKER_BACKUP_REMOTE_2_LOCAL_SCRIPT }}'"
|
||||||
copy:
|
copy:
|
||||||
src: svc-bkp-rmt-2-loc.sh
|
src: "{{ DOCKER_BACKUP_REMOTE_2_LOCAL_FILE }}"
|
||||||
dest: "{{ DOCKER_BACKUP_REMOTE_2_LOCAL_SCRIPT }}"
|
dest: "{{ DOCKER_BACKUP_REMOTE_2_LOCAL_SCRIPT }}"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
|
|
||||||
|
|||||||
@@ -3,6 +3,6 @@
|
|||||||
hosts="{{ DOCKER_BACKUP_REMOTE_2_LOCAL_BACKUP_PROVIDERS | join(' ') }}";
|
hosts="{{ DOCKER_BACKUP_REMOTE_2_LOCAL_BACKUP_PROVIDERS | join(' ') }}";
|
||||||
errors=0
|
errors=0
|
||||||
for host in $hosts; do
|
for host in $hosts; do
|
||||||
bash {{ DOCKER_BACKUP_REMOTE_2_LOCAL_SCRIPT }} $host || ((errors+=1));
|
python {{ DOCKER_BACKUP_REMOTE_2_LOCAL_SCRIPT }} $host || ((errors+=1));
|
||||||
done;
|
done;
|
||||||
exit $errors;
|
exit $errors;
|
||||||
|
|||||||
@@ -1,5 +1,9 @@
|
|||||||
|
# General
|
||||||
application_id: svc-bkp-rmt-2-loc
|
application_id: svc-bkp-rmt-2-loc
|
||||||
system_service_id: "{{ application_id }}"
|
system_service_id: "{{ application_id }}"
|
||||||
|
|
||||||
|
# Role Specific
|
||||||
DOCKER_BACKUP_REMOTE_2_LOCAL_DIR: '{{ PATH_ADMINISTRATOR_SCRIPTS }}{{ application_id }}/'
|
DOCKER_BACKUP_REMOTE_2_LOCAL_DIR: '{{ PATH_ADMINISTRATOR_SCRIPTS }}{{ application_id }}/'
|
||||||
DOCKER_BACKUP_REMOTE_2_LOCAL_SCRIPT: "{{ DOCKER_BACKUP_REMOTE_2_LOCAL_DIR }}svc-bkp-rmt-2-loc.sh"
|
DOCKER_BACKUP_REMOTE_2_LOCAL_FILE: 'pull-specific-host.py'
|
||||||
|
DOCKER_BACKUP_REMOTE_2_LOCAL_SCRIPT: "{{ [ DOCKER_BACKUP_REMOTE_2_LOCAL_DIR , DOCKER_BACKUP_REMOTE_2_LOCAL_FILE ] | path_join }}"
|
||||||
DOCKER_BACKUP_REMOTE_2_LOCAL_BACKUP_PROVIDERS: "{{ applications | get_app_conf(application_id, 'backup_providers') }}"
|
DOCKER_BACKUP_REMOTE_2_LOCAL_BACKUP_PROVIDERS: "{{ applications | get_app_conf(application_id, 'backup_providers') }}"
|
||||||
@@ -1,11 +1,16 @@
|
|||||||
docker:
|
docker:
|
||||||
services:
|
services:
|
||||||
mariadb:
|
mariadb:
|
||||||
version: "latest"
|
version: "latest"
|
||||||
image: "mariadb"
|
image: "mariadb"
|
||||||
name: "mariadb"
|
name: "mariadb"
|
||||||
backup:
|
backup:
|
||||||
database_routine: true
|
database_routine: true
|
||||||
|
# Performance Variables aren't used yet, but will be in the future as soon as an docker file is implemented
|
||||||
|
cpus: "2.0"
|
||||||
|
mem_reservation: "2g"
|
||||||
|
mem_limit: "4g"
|
||||||
|
pids_limit: 1024
|
||||||
network: "mariadb"
|
network: "mariadb"
|
||||||
volumes:
|
volumes:
|
||||||
data: "mariadb_data"
|
data: "mariadb_data"
|
||||||
@@ -5,9 +5,14 @@ network:
|
|||||||
docker:
|
docker:
|
||||||
services:
|
services:
|
||||||
openldap:
|
openldap:
|
||||||
image: "bitnami/openldap"
|
image: "bitnamilegacy/openldap"
|
||||||
name: "openldap"
|
name: "openldap"
|
||||||
version: "latest"
|
version: "latest"
|
||||||
|
cpus: 1.25
|
||||||
|
# Optimized up to 5k user
|
||||||
|
mem_reservation: 1g
|
||||||
|
mem_limit: 1.5g
|
||||||
|
pids_limit: 1024
|
||||||
network: "openldap"
|
network: "openldap"
|
||||||
volumes:
|
volumes:
|
||||||
data: "openldap_data"
|
data: "openldap_data"
|
||||||
|
|||||||
40
roles/svc-db-openldap/handlers/main.yml
Normal file
40
roles/svc-db-openldap/handlers/main.yml
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
- name: Load memberof module from file in OpenLDAP container
|
||||||
|
shell: >
|
||||||
|
docker exec -i {{ OPENLDAP_CONTAINER }} ldapmodify -Y EXTERNAL -H ldapi:/// -f "{{ [OPENLDAP_LDIF_PATH_DOCKER, 'configuration/01_member_of_configuration.ldif' ] | path_join }}"
|
||||||
|
listen:
|
||||||
|
- "Import configuration LDIF files"
|
||||||
|
# @todo Remove the following ignore errors when setting up a new server
|
||||||
|
# Just here because debugging would take to much time
|
||||||
|
ignore_errors: true
|
||||||
|
|
||||||
|
- name: Refint Module Activation for OpenLDAP
|
||||||
|
shell: >
|
||||||
|
docker exec -i {{ OPENLDAP_CONTAINER }} ldapadd -Y EXTERNAL -H ldapi:/// -f "{{ [ OPENLDAP_LDIF_PATH_DOCKER, 'configuration/02_member_of_configuration.ldif' ] | path_join }}"
|
||||||
|
listen:
|
||||||
|
- "Import configuration LDIF files"
|
||||||
|
register: ldapadd_result
|
||||||
|
failed_when: ldapadd_result.rc not in [0, 68]
|
||||||
|
# @todo Remove the following ignore errors when setting up a new server
|
||||||
|
# Just here because debugging would take to much time
|
||||||
|
ignore_errors: true
|
||||||
|
|
||||||
|
- name: Refint Overlay Configuration for OpenLDAP
|
||||||
|
shell: >
|
||||||
|
docker exec -i {{ OPENLDAP_CONTAINER }} ldapmodify -Y EXTERNAL -H ldapi:/// -f "{{ [ OPENLDAP_LDIF_PATH_DOCKER, 'configuration/03_member_of_configuration.ldif' ] | path_join }}"
|
||||||
|
listen:
|
||||||
|
- "Import configuration LDIF files"
|
||||||
|
register: ldapadd_result
|
||||||
|
failed_when: ldapadd_result.rc not in [0, 68]
|
||||||
|
# @todo Remove the following ignore errors when setting up a new server
|
||||||
|
# Just here because debugging would take to much time
|
||||||
|
ignore_errors: true
|
||||||
|
|
||||||
|
- name: "Import users, groups, etc. to LDAP"
|
||||||
|
shell: >
|
||||||
|
docker exec -i {{ OPENLDAP_CONTAINER }} ldapadd -x -D "{{ LDAP.DN.ADMINISTRATOR.DATA }}" -w "{{ LDAP.BIND_CREDENTIAL }}" -c -f "{{ [ OPENLDAP_LDIF_PATH_DOCKER, 'groups', (item | basename | regex_replace('\.j2$', '')) ] | path_join }}"
|
||||||
|
register: ldapadd_result
|
||||||
|
changed_when: "'adding new entry' in ldapadd_result.stdout"
|
||||||
|
failed_when: ldapadd_result.rc not in [0, 20, 68, 65]
|
||||||
|
listen:
|
||||||
|
- "Import groups LDIF files"
|
||||||
|
loop: "{{ query('fileglob', role_path ~ '/templates/ldif/groups/*.j2') | sort }}"
|
||||||
@@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
- name: "Query available LDAP databases"
|
- name: "Query available LDAP databases"
|
||||||
shell: |
|
shell: |
|
||||||
docker exec {{ openldap_name }} \
|
docker exec {{ OPENLDAP_CONTAINER }} \
|
||||||
ldapsearch -Y EXTERNAL -H ldapi:/// -LLL -b cn=config "(olcDatabase=*)" dn
|
ldapsearch -Y EXTERNAL -H ldapi:/// -LLL -b cn=config "(olcDatabase=*)" dn
|
||||||
register: ldap_databases
|
register: ldap_databases
|
||||||
|
|
||||||
@@ -27,13 +27,13 @@
|
|||||||
|
|
||||||
- name: "Generate hash for Database Admin password"
|
- name: "Generate hash for Database Admin password"
|
||||||
shell: |
|
shell: |
|
||||||
docker exec {{ openldap_name }} \
|
docker exec {{ OPENLDAP_CONTAINER }} \
|
||||||
slappasswd -s "{{ LDAP.BIND_CREDENTIAL }}"
|
slappasswd -s "{{ LDAP.BIND_CREDENTIAL }}"
|
||||||
register: database_admin_pw_hash
|
register: database_admin_pw_hash
|
||||||
|
|
||||||
- name: "Reset Database Admin password in LDAP (olcRootPW)"
|
- name: "Reset Database Admin password in LDAP (olcRootPW)"
|
||||||
shell: |
|
shell: |
|
||||||
docker exec -i {{ openldap_name }} ldapmodify -Y EXTERNAL -H ldapi:/// <<EOF
|
docker exec -i {{ OPENLDAP_CONTAINER }} ldapmodify -Y EXTERNAL -H ldapi:/// <<EOF
|
||||||
dn: {{ data_backend_dn }}
|
dn: {{ data_backend_dn }}
|
||||||
changetype: modify
|
changetype: modify
|
||||||
replace: olcRootPW
|
replace: olcRootPW
|
||||||
@@ -42,13 +42,13 @@
|
|||||||
|
|
||||||
- name: "Generate hash for Configuration Admin password"
|
- name: "Generate hash for Configuration Admin password"
|
||||||
shell: |
|
shell: |
|
||||||
docker exec {{ openldap_name }} \
|
docker exec {{ OPENLDAP_CONTAINER }} \
|
||||||
slappasswd -s "{{ applications | get_app_conf(application_id, 'credentials.administrator_password', True) }}"
|
slappasswd -s "{{ applications | get_app_conf(application_id, 'credentials.administrator_password', True) }}"
|
||||||
register: config_admin_pw_hash
|
register: config_admin_pw_hash
|
||||||
|
|
||||||
- name: "Reset Configuration Admin password in LDAP (olcRootPW)"
|
- name: "Reset Configuration Admin password in LDAP (olcRootPW)"
|
||||||
shell: |
|
shell: |
|
||||||
docker exec -i {{ openldap_name }} ldapmodify -Y EXTERNAL -H ldapi:/// <<EOF
|
docker exec -i {{ OPENLDAP_CONTAINER }} ldapmodify -Y EXTERNAL -H ldapi:/// <<EOF
|
||||||
dn: {{ config_backend_dn }}
|
dn: {{ config_backend_dn }}
|
||||||
changetype: modify
|
changetype: modify
|
||||||
replace: olcRootPW
|
replace: olcRootPW
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
- name: Ensure LDAP users exist
|
- name: Ensure LDAP users exist
|
||||||
community.general.ldap_entry:
|
community.general.ldap_entry:
|
||||||
dn: "{{ LDAP.USER.ATTRIBUTES.ID }}={{ item.key }},{{ LDAP.DN.OU.USERS }}"
|
dn: "{{ LDAP.USER.ATTRIBUTES.ID }}={{ item.key }},{{ LDAP.DN.OU.USERS }}"
|
||||||
server_uri: "{{ openldap_server_uri }}"
|
server_uri: "{{ OPENLDAP_SERVER_URI }}"
|
||||||
bind_dn: "{{ LDAP.DN.ADMINISTRATOR.DATA }}"
|
bind_dn: "{{ LDAP.DN.ADMINISTRATOR.DATA }}"
|
||||||
bind_pw: "{{ LDAP.BIND_CREDENTIAL }}"
|
bind_pw: "{{ LDAP.BIND_CREDENTIAL }}"
|
||||||
objectClass: "{{ LDAP.USER.OBJECTS.STRUCTURAL }}"
|
objectClass: "{{ LDAP.USER.OBJECTS.STRUCTURAL }}"
|
||||||
@@ -30,7 +30,7 @@
|
|||||||
- name: Ensure required objectClass values and mail address are present
|
- name: Ensure required objectClass values and mail address are present
|
||||||
community.general.ldap_attrs:
|
community.general.ldap_attrs:
|
||||||
dn: "{{ LDAP.USER.ATTRIBUTES.ID }}={{ item.key }},{{ LDAP.DN.OU.USERS }}"
|
dn: "{{ LDAP.USER.ATTRIBUTES.ID }}={{ item.key }},{{ LDAP.DN.OU.USERS }}"
|
||||||
server_uri: "{{ openldap_server_uri }}"
|
server_uri: "{{ OPENLDAP_SERVER_URI }}"
|
||||||
bind_dn: "{{ LDAP.DN.ADMINISTRATOR.DATA }}"
|
bind_dn: "{{ LDAP.DN.ADMINISTRATOR.DATA }}"
|
||||||
bind_pw: "{{ LDAP.BIND_CREDENTIAL }}"
|
bind_pw: "{{ LDAP.BIND_CREDENTIAL }}"
|
||||||
attributes:
|
attributes:
|
||||||
@@ -46,7 +46,7 @@
|
|||||||
- name: "Ensure container for application roles exists"
|
- name: "Ensure container for application roles exists"
|
||||||
community.general.ldap_entry:
|
community.general.ldap_entry:
|
||||||
dn: "{{ LDAP.DN.OU.ROLES }}"
|
dn: "{{ LDAP.DN.OU.ROLES }}"
|
||||||
server_uri: "{{ openldap_server_uri }}"
|
server_uri: "{{ OPENLDAP_SERVER_URI }}"
|
||||||
bind_dn: "{{ LDAP.DN.ADMINISTRATOR.DATA }}"
|
bind_dn: "{{ LDAP.DN.ADMINISTRATOR.DATA }}"
|
||||||
bind_pw: "{{ LDAP.BIND_CREDENTIAL }}"
|
bind_pw: "{{ LDAP.BIND_CREDENTIAL }}"
|
||||||
objectClass: organizationalUnit
|
objectClass: organizationalUnit
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
- name: Gather all users with their current objectClass list
|
- name: Gather all users with their current objectClass list
|
||||||
community.general.ldap_search:
|
community.general.ldap_search:
|
||||||
server_uri: "{{ openldap_server_uri }}"
|
server_uri: "{{ OPENLDAP_SERVER_URI }}"
|
||||||
bind_dn: "{{ LDAP.DN.ADMINISTRATOR.DATA }}"
|
bind_dn: "{{ LDAP.DN.ADMINISTRATOR.DATA }}"
|
||||||
bind_pw: "{{ LDAP.BIND_CREDENTIAL }}"
|
bind_pw: "{{ LDAP.BIND_CREDENTIAL }}"
|
||||||
dn: "{{ LDAP.DN.OU.USERS }}"
|
dn: "{{ LDAP.DN.OU.USERS }}"
|
||||||
@@ -14,16 +14,16 @@
|
|||||||
|
|
||||||
- name: Add only missing auxiliary classes
|
- name: Add only missing auxiliary classes
|
||||||
community.general.ldap_attrs:
|
community.general.ldap_attrs:
|
||||||
server_uri: "{{ openldap_server_uri }}"
|
server_uri: "{{ OPENLDAP_SERVER_URI }}"
|
||||||
bind_dn: "{{ LDAP.DN.ADMINISTRATOR.DATA }}"
|
bind_dn: "{{ LDAP.DN.ADMINISTRATOR.DATA }}"
|
||||||
bind_pw: "{{ LDAP.BIND_CREDENTIAL }}"
|
bind_pw: "{{ LDAP.BIND_CREDENTIAL }}"
|
||||||
dn: "{{ item.dn }}"
|
dn: "{{ item.dn }}"
|
||||||
attributes:
|
attributes:
|
||||||
objectClass: "{{ missing_auxiliary }}"
|
objectClass: "{{ missing_auxiliary }}"
|
||||||
state: present
|
state: present
|
||||||
async: "{{ ASYNC_TIME if ASYNC_ENABLED | bool else omit }}"
|
async: "{{ ASYNC_TIME if ASYNC_ENABLED | bool else omit }}"
|
||||||
poll: "{{ ASYNC_POLL if ASYNC_ENABLED | bool else omit }}"
|
poll: "{{ ASYNC_POLL if ASYNC_ENABLED | bool else omit }}"
|
||||||
loop: "{{ ldap_users_with_classes.results }}"
|
loop: "{{ ldap_users_with_classes.results }}"
|
||||||
loop_control:
|
loop_control:
|
||||||
label: "{{ item.dn }}"
|
label: "{{ item.dn }}"
|
||||||
vars:
|
vars:
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
- name: "Create LDIF files at {{ openldap_ldif_host_path }}{{ folder }}"
|
- name: "Create LDIF files at {{ OPENLDAP_LDIF_PATH_HOST }}{{ folder }}"
|
||||||
template:
|
template:
|
||||||
src: "{{ item }}"
|
src: "{{ item }}"
|
||||||
dest: "{{ openldap_ldif_host_path }}{{ folder }}/{{ item | basename | regex_replace('\\.j2$', '') }}"
|
dest: "{{ OPENLDAP_LDIF_PATH_HOST }}{{ folder }}/{{ item | basename | regex_replace('\\.j2$', '') }}"
|
||||||
mode: "0770"
|
mode: "0770"
|
||||||
loop: >-
|
loop: >-
|
||||||
{{
|
{{
|
||||||
@@ -1,25 +1,25 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
- name: "include docker-compose role"
|
- name: "include docker-compose role"
|
||||||
include_role:
|
include_role:
|
||||||
name: docker-compose
|
name: docker-compose
|
||||||
|
|
||||||
- name: Create {{ domains | get_domain(application_id) }}.conf if LDAP is exposed to internet
|
- name: Create {{ domains | get_domain(application_id) }}.conf if LDAP is exposed to internet
|
||||||
template:
|
template:
|
||||||
src: "nginx.stream.conf.j2"
|
src: "nginx.stream.conf.j2"
|
||||||
dest: "{{ NGINX.DIRECTORIES.STREAMS }}{{ domains | get_domain(application_id) }}.conf"
|
dest: "{{ NGINX.DIRECTORIES.STREAMS }}{{ domains | get_domain(application_id) }}.conf"
|
||||||
notify: restart openresty
|
notify: restart openresty
|
||||||
when: applications | get_app_conf(application_id, 'network.public', True) | bool
|
when: OPENLDAP_NETWORK_SWITCH_PUBLIC | bool
|
||||||
|
|
||||||
- name: Remove {{ domains | get_domain(application_id) }}.conf if LDAP is not exposed to internet
|
- name: Remove {{ domains | get_domain(application_id) }}.conf if LDAP is not exposed to internet
|
||||||
file:
|
file:
|
||||||
path: "{{ NGINX.DIRECTORIES.STREAMS }}{{ domains | get_domain(application_id) }}.conf"
|
path: "{{ NGINX.DIRECTORIES.STREAMS }}{{ domains | get_domain(application_id) }}.conf"
|
||||||
state: absent
|
state: absent
|
||||||
when: not applications | get_app_conf(application_id, 'network.public', True) | bool
|
when: not OPENLDAP_NETWORK_SWITCH_PUBLIC | bool
|
||||||
|
|
||||||
- name: create docker network for LDAP, so that other applications can access it
|
- name: create docker network for LDAP, so that other applications can access it
|
||||||
community.docker.docker_network:
|
community.docker.docker_network:
|
||||||
name: "{{ openldap_network }}"
|
name: "{{ OPENLDAP_NETWORK }}"
|
||||||
state: present
|
state: present
|
||||||
ipam_config:
|
ipam_config:
|
||||||
- subnet: "{{ networks.local[application_id].subnet }}"
|
- subnet: "{{ networks.local[application_id].subnet }}"
|
||||||
@@ -37,23 +37,23 @@
|
|||||||
- name: "Reset LDAP Credentials"
|
- name: "Reset LDAP Credentials"
|
||||||
include_tasks: 01_credentials.yml
|
include_tasks: 01_credentials.yml
|
||||||
when:
|
when:
|
||||||
- applications | get_app_conf(application_id, 'network.local')
|
- OPENLDAP_NETWORK_SWITCH_LOCAL | bool
|
||||||
- applications | get_app_conf(application_id, 'provisioning.credentials', True)
|
- applications | get_app_conf(application_id, 'provisioning.credentials')
|
||||||
|
|
||||||
- name: "create directory {{openldap_ldif_host_path}}{{item}}"
|
- name: "create directory {{ OPENLDAP_LDIF_PATH_HOST }}{{ item }}"
|
||||||
file:
|
file:
|
||||||
path: "{{openldap_ldif_host_path}}{{item}}"
|
path: "{{ OPENLDAP_LDIF_PATH_HOST }}{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
loop: "{{openldap_ldif_types}}"
|
loop: "{{ OPENLDAP_LDIF_TYPES }}"
|
||||||
|
|
||||||
- name: "Import LDIF Configuration"
|
- name: "Import LDIF Configuration"
|
||||||
include_tasks: ldifs_creation.yml
|
include_tasks: _ldifs_creation.yml
|
||||||
loop:
|
loop:
|
||||||
- configuration
|
- configuration
|
||||||
loop_control:
|
loop_control:
|
||||||
loop_var: folder
|
loop_var: folder
|
||||||
when: applications | get_app_conf(application_id, 'provisioning.configuration', True)
|
when: applications | get_app_conf(application_id, 'provisioning.configuration')
|
||||||
|
|
||||||
- name: flush LDIF handlers
|
- name: flush LDIF handlers
|
||||||
meta: flush_handlers
|
meta: flush_handlers
|
||||||
@@ -66,20 +66,22 @@
|
|||||||
|
|
||||||
- name: "Include Schemas (if enabled)"
|
- name: "Include Schemas (if enabled)"
|
||||||
include_tasks: 02_schemas.yml
|
include_tasks: 02_schemas.yml
|
||||||
when: applications | get_app_conf(application_id, 'provisioning.schemas', True)
|
when: applications | get_app_conf(application_id, 'provisioning.schemas')
|
||||||
|
|
||||||
- name: "Import LDAP Entries (if enabled)"
|
- name: "Import LDAP Entries (if enabled)"
|
||||||
include_tasks: 03_users.yml
|
include_tasks: 03_users.yml
|
||||||
when: applications | get_app_conf(application_id, 'provisioning.users', True)
|
when: applications | get_app_conf(application_id, 'provisioning.users')
|
||||||
|
|
||||||
- name: "Import LDIF Data (if enabled)"
|
- name: "Import LDIF Data (if enabled)"
|
||||||
include_tasks: ldifs_creation.yml
|
include_tasks: _ldifs_creation.yml
|
||||||
loop:
|
loop:
|
||||||
- groups
|
- groups
|
||||||
loop_control:
|
loop_control:
|
||||||
loop_var: folder
|
loop_var: folder
|
||||||
when: applications | get_app_conf(application_id, 'provisioning.groups', True)
|
when: applications | get_app_conf(application_id, 'provisioning.groups')
|
||||||
|
|
||||||
|
- meta: flush_handlers
|
||||||
|
|
||||||
- name: "Add Objects to all users"
|
- name: "Add Objects to all users"
|
||||||
include_tasks: 04_update.yml
|
include_tasks: 04_update.yml
|
||||||
when: applications | get_app_conf(application_id, 'provisioning.update', True)
|
when: applications | get_app_conf(application_id, 'provisioning.update')
|
||||||
@@ -13,9 +13,9 @@
|
|||||||
- "( 1.3.6.1.4.1.99999.2 NAME '{{ LDAP.USER.OBJECTS.AUXILIARY.NEXTCLOUD_USER }}' DESC 'Auxiliary class for Nextcloud attributes' AUXILIARY MAY ( {{ LDAP.USER.ATTRIBUTES.NEXTCLOUD_QUOTA }} ) )"
|
- "( 1.3.6.1.4.1.99999.2 NAME '{{ LDAP.USER.OBJECTS.AUXILIARY.NEXTCLOUD_USER }}' DESC 'Auxiliary class for Nextcloud attributes' AUXILIARY MAY ( {{ LDAP.USER.ATTRIBUTES.NEXTCLOUD_QUOTA }} ) )"
|
||||||
command: >
|
command: >
|
||||||
ldapsm
|
ldapsm
|
||||||
-s {{ openldap_server_uri }}
|
-s {{ OPENLDAP_SERVER_URI }}
|
||||||
-D '{{ openldap_bind_dn }}'
|
-D '{{ OPENLDAP_BIND_DN }}'
|
||||||
-W '{{ openldap_bind_pw }}'
|
-W '{{ OPENLDAP_BIND_PW }}'
|
||||||
-n {{ schema_name }}
|
-n {{ schema_name }}
|
||||||
{% for at in attribute_defs %}
|
{% for at in attribute_defs %}
|
||||||
-a "{{ at }}"
|
-a "{{ at }}"
|
||||||
|
|||||||
@@ -21,9 +21,9 @@
|
|||||||
|
|
||||||
command: >
|
command: >
|
||||||
ldapsm
|
ldapsm
|
||||||
-s {{ openldap_server_uri }}
|
-s {{ OPENLDAP_SERVER_URI }}
|
||||||
-D '{{ openldap_bind_dn }}'
|
-D '{{ OPENLDAP_BIND_DN }}'
|
||||||
-W '{{ openldap_bind_pw }}'
|
-W '{{ OPENLDAP_BIND_PW }}'
|
||||||
-n {{ schema_name }}
|
-n {{ schema_name }}
|
||||||
{% for at in attribute_defs %}
|
{% for at in attribute_defs %}
|
||||||
-a "{{ at }}"
|
-a "{{ at }}"
|
||||||
|
|||||||
@@ -1,20 +1,20 @@
|
|||||||
{% include 'roles/docker-compose/templates/base.yml.j2' %}
|
{% include 'roles/docker-compose/templates/base.yml.j2' %}
|
||||||
|
|
||||||
application:
|
application:
|
||||||
image: "{{ openldap_image }}:{{ openldap_version }}"
|
image: "{{ OPENLDAP_IMAGE }}:{{ OPENLDAP_VERSION }}"
|
||||||
container_name: "{{ openldap_name }}"
|
container_name: "{{ OPENLDAP_CONTAINER }}"
|
||||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||||
{% if openldap_network_expose_local %}
|
{% if OPENLDAP_NETWORK_EXPOSE_LOCAL | bool %}
|
||||||
ports:
|
ports:
|
||||||
- 127.0.0.1:{{ports.localhost.ldap['svc-db-openldap']}}:{{openldap_docker_port_open}}
|
- 127.0.0.1:{{ports.localhost.ldap['svc-db-openldap']}}:{{ OPENLDAP_DOCKER_PORT_OPEN }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
volumes:
|
volumes:
|
||||||
- 'data:/bitnami/openldap'
|
- 'data:/bitnami/openldap'
|
||||||
- '{{openldap_ldif_host_path}}:{{ openldap_ldif_docker_path }}:ro'
|
- '{{ OPENLDAP_LDIF_PATH_HOST }}:{{ OPENLDAP_LDIF_PATH_DOCKER }}:ro'
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: >
|
test: >
|
||||||
bash -c '
|
bash -c '
|
||||||
ldapsearch -x -H ldap://localhost:{{ openldap_docker_port_open }} \
|
ldapsearch -x -H ldap://localhost:{{ OPENLDAP_DOCKER_PORT_OPEN }} \
|
||||||
-D "{{ LDAP.DN.ADMINISTRATOR.DATA }}" -w "{{ LDAP.BIND_CREDENTIAL }}" -b "{{ LDAP.DN.ROOT }}" > /dev/null \
|
-D "{{ LDAP.DN.ADMINISTRATOR.DATA }}" -w "{{ LDAP.BIND_CREDENTIAL }}" -b "{{ LDAP.DN.ROOT }}" > /dev/null \
|
||||||
&& ldapsearch -Y EXTERNAL -H ldapi:/// \
|
&& ldapsearch -Y EXTERNAL -H ldapi:/// \
|
||||||
-b cn=config "(&(objectClass=olcOverlayConfig)(olcOverlay=memberof))" \
|
-b cn=config "(&(objectClass=olcOverlayConfig)(olcOverlay=memberof))" \
|
||||||
@@ -24,6 +24,6 @@
|
|||||||
|
|
||||||
{% include 'roles/docker-compose/templates/volumes.yml.j2' %}
|
{% include 'roles/docker-compose/templates/volumes.yml.j2' %}
|
||||||
data:
|
data:
|
||||||
name: "{{ openldap_volume }}"
|
name: "{{ OPENLDAP_VOLUME }}"
|
||||||
|
|
||||||
{% include 'roles/docker-compose/templates/networks.yml.j2' %}
|
{% include 'roles/docker-compose/templates/networks.yml.j2' %}
|
||||||
@@ -3,24 +3,24 @@
|
|||||||
|
|
||||||
# GENERAL
|
# GENERAL
|
||||||
## Admin (Data)
|
## Admin (Data)
|
||||||
LDAP_ADMIN_USERNAME= {{ applications | get_app_conf(application_id, 'users.administrator.username') }} # LDAP database admin user.
|
LDAP_ADMIN_USERNAME= {{ applications | get_app_conf(application_id, 'users.administrator.username') }} # LDAP database admin user.
|
||||||
LDAP_ADMIN_PASSWORD= {{ LDAP.BIND_CREDENTIAL }} # LDAP database admin password.
|
LDAP_ADMIN_PASSWORD= {{ LDAP.BIND_CREDENTIAL }} # LDAP database admin password.
|
||||||
|
|
||||||
## Users
|
## Users
|
||||||
LDAP_USERS= ' ' # Comma separated list of LDAP users to create in the default LDAP tree. Default: user01,user02
|
LDAP_USERS= ' ' # Comma separated list of LDAP users to create in the default LDAP tree. Default: user01,user02
|
||||||
LDAP_PASSWORDS= ' ' # Comma separated list of passwords to use for LDAP users. Default: bitnami1,bitnami2
|
LDAP_PASSWORDS= ' ' # Comma separated list of passwords to use for LDAP users. Default: bitnami1,bitnami2
|
||||||
LDAP_ROOT= {{ LDAP.DN.ROOT }} # LDAP baseDN (or suffix) of the LDAP tree. Default: dc=example,dc=org
|
LDAP_ROOT= {{ LDAP.DN.ROOT }} # LDAP baseDN (or suffix) of the LDAP tree. Default: dc=example,dc=org
|
||||||
|
|
||||||
## Admin (Config)
|
## Admin (Config)
|
||||||
LDAP_ADMIN_DN= {{LDAP.DN.ADMINISTRATOR.DATA}}
|
LDAP_ADMIN_DN= {{ LDAP.DN.ADMINISTRATOR.DATA }}
|
||||||
LDAP_CONFIG_ADMIN_ENABLED= yes
|
LDAP_CONFIG_ADMIN_ENABLED= yes
|
||||||
LDAP_CONFIG_ADMIN_USERNAME= {{ applications | get_app_conf(application_id, 'users.administrator.username') }}
|
LDAP_CONFIG_ADMIN_USERNAME= {{ applications | get_app_conf(application_id, 'users.administrator.username') }}
|
||||||
LDAP_CONFIG_ADMIN_PASSWORD= {{ applications | get_app_conf(application_id, 'credentials.administrator_password') }}
|
LDAP_CONFIG_ADMIN_PASSWORD= {{ applications | get_app_conf(application_id, 'credentials.administrator_password') }}
|
||||||
|
|
||||||
# Network
|
# Network
|
||||||
LDAP_PORT_NUMBER= {{openldap_docker_port_open}} # Route to default port
|
LDAP_PORT_NUMBER= {{ OPENLDAP_DOCKER_PORT_OPEN }} # Route to default port
|
||||||
LDAP_ENABLE_TLS= no # Using nginx proxy for tls
|
LDAP_ENABLE_TLS= no # Using nginx proxy for tls
|
||||||
LDAP_LDAPS_PORT_NUMBER= {{openldap_docker_port_secure}} # Port used for TLS secure traffic. Priviledged port is supported (e.g. 636). Default: 1636 (non privileged port).
|
LDAP_LDAPS_PORT_NUMBER= {{ OPENLDAP_DOCKER_PORT_SECURE }} # Port used for TLS secure traffic. Priviledged port is supported (e.g. 636). Default: 1636 (non privileged port).
|
||||||
|
|
||||||
# Security
|
# Security
|
||||||
LDAP_ALLOW_ANON_BINDING= no # Allow anonymous bindings to the LDAP server. Default: yes.
|
LDAP_ALLOW_ANON_BINDING= no # Allow anonymous bindings to the LDAP server. Default: yes.
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
{#
|
|
||||||
@todo: activate
|
|
||||||
{% for dn, entry in (applications | build_ldap_role_entries(users, ldap)).items() %}
|
|
||||||
|
|
||||||
dn: {{ dn }}
|
|
||||||
{% for oc in entry.objectClass %}
|
|
||||||
objectClass: {{ oc }}
|
|
||||||
{% endfor %}
|
|
||||||
{% if entry.ou is defined %}
|
|
||||||
ou: {{ entry.ou }}
|
|
||||||
{% else %}
|
|
||||||
cn: {{ entry.cn }}
|
|
||||||
{% endif %}
|
|
||||||
{% if entry.gidNumber is defined %}
|
|
||||||
gidNumber: {{ entry.gidNumber }}
|
|
||||||
{% endif %}
|
|
||||||
description: {{ entry.description }}
|
|
||||||
{% if entry.memberUid is defined %}
|
|
||||||
{% for uid in entry.memberUid %}
|
|
||||||
memberUid: {{ uid }}
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
{% if entry.member is defined %}
|
|
||||||
{% for m in entry.member %}
|
|
||||||
member: {{ m }}
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% endfor %}
|
|
||||||
#}
|
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
{% for dn, entry in (applications | build_ldap_role_entries(users, ldap)).items() %}
|
{% for dn, entry in (applications | build_ldap_role_entries(users, LDAP)).items() %}
|
||||||
|
|
||||||
dn: {{ dn }}
|
dn: {{ dn }}
|
||||||
{% for oc in entry.objectClass %}
|
{% for oc in entry.objectClass %}
|
||||||
@@ -1,24 +1,27 @@
|
|||||||
application_id: "svc-db-openldap"
|
application_id: "svc-db-openldap"
|
||||||
|
|
||||||
# LDAP Variables
|
# LDAP Variables
|
||||||
openldap_docker_port_secure: 636
|
OPENLDAP_DOCKER_PORT_SECURE: 636
|
||||||
openldap_docker_port_open: 389
|
OPENLDAP_DOCKER_PORT_OPEN: 389
|
||||||
openldap_server_uri: "ldap://127.0.0.1:{{ ports.localhost.ldap[application_id] }}"
|
OPENLDAP_SERVER_URI: "ldap://127.0.0.1:{{ ports.localhost.ldap[application_id] }}"
|
||||||
openldap_bind_dn: "{{ LDAP.DN.ADMINISTRATOR.CONFIGURATION }}"
|
OPENLDAP_BIND_DN: "{{ LDAP.DN.ADMINISTRATOR.CONFIGURATION }}"
|
||||||
openldap_bind_pw: "{{ applications | get_app_conf(application_id, 'credentials.administrator_password', True) }}"
|
OPENLDAP_BIND_PW: "{{ applications | get_app_conf(application_id, 'credentials.administrator_password') }}"
|
||||||
|
|
||||||
# LDIF Variables
|
# LDIF Variables
|
||||||
openldap_ldif_host_path: "{{ docker_compose.directories.volumes }}ldif/"
|
OPENLDAP_LDIF_PATH_HOST: "{{ docker_compose.directories.volumes }}ldif/"
|
||||||
openldap_ldif_docker_path: "/tmp/ldif/"
|
OPENLDAP_LDIF_PATH_DOCKER: "/tmp/ldif/"
|
||||||
openldap_ldif_types:
|
OPENLDAP_LDIF_TYPES:
|
||||||
- configuration
|
- configuration
|
||||||
- groups
|
- groups
|
||||||
- schema # Don't know if this is still needed, it's now setup via tasks
|
|
||||||
|
|
||||||
openldap_name: "{{ applications | get_app_conf(application_id, 'docker.services.openldap.name', True) }}"
|
# Container
|
||||||
openldap_image: "{{ applications | get_app_conf(application_id, 'docker.services.openldap.image', True) }}"
|
OPENLDAP_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.openldap.name') }}"
|
||||||
openldap_version: "{{ applications | get_app_conf(application_id, 'docker.services.openldap.version', True) }}"
|
OPENLDAP_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.openldap.image') }}"
|
||||||
openldap_volume: "{{ applications | get_app_conf(application_id, 'docker.volumes.data', True) }}"
|
OPENLDAP_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.openldap.version') }}"
|
||||||
openldap_network: "{{ applications | get_app_conf(application_id, 'docker.network', True) }}"
|
OPENLDAP_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.data') }}"
|
||||||
|
OPENLDAP_NETWORK: "{{ applications | get_app_conf(application_id, 'docker.network') }}"
|
||||||
|
|
||||||
openldap_network_expose_local: "{{ applications | get_app_conf(application_id, 'network.public', True) | bool or applications | get_app_conf(application_id, 'network.local') | bool }}"
|
# Network
|
||||||
|
OPENLDAP_NETWORK_SWITCH_PUBLIC: "{{ applications | get_app_conf(application_id, 'network.public') }}"
|
||||||
|
OPENLDAP_NETWORK_SWITCH_LOCAL: "{{ applications | get_app_conf(application_id, 'network.local') }}"
|
||||||
|
OPENLDAP_NETWORK_EXPOSE_LOCAL: "{{ OPENLDAP_NETWORK_SWITCH_PUBLIC | bool or OPENLDAP_NETWORK_SWITCH_LOCAL | bool }}"
|
||||||
@@ -2,13 +2,17 @@ docker:
|
|||||||
services:
|
services:
|
||||||
postgres:
|
postgres:
|
||||||
# Postgis is necessary for mobilizon
|
# Postgis is necessary for mobilizon
|
||||||
image: postgis/postgis
|
image: postgis/postgis
|
||||||
name: postgres
|
name: postgres
|
||||||
# Please set an version in your inventory file!
|
# Please set an version in your inventory file!
|
||||||
# Rolling release isn't recommended
|
# Rolling release isn't recommended
|
||||||
version: "latest"
|
version: "17-3.5"
|
||||||
backup:
|
backup:
|
||||||
database_routine: true
|
database_routine: true
|
||||||
|
cpus: "2.0"
|
||||||
|
mem_reservation: "4g"
|
||||||
|
mem_limit: "6g"
|
||||||
|
pids_limit: 1024
|
||||||
volumes:
|
volumes:
|
||||||
data: "postgres_data"
|
data: "postgres_data"
|
||||||
network: "postgres"
|
network: "postgres"
|
||||||
@@ -25,3 +25,5 @@
|
|||||||
community.general.pacman:
|
community.general.pacman:
|
||||||
name: python-psycopg2
|
name: python-psycopg2
|
||||||
state: present
|
state: present
|
||||||
|
|
||||||
|
- include_tasks: utils/run_once.yml
|
||||||
@@ -1,12 +1,11 @@
|
|||||||
- block:
|
- block:
|
||||||
- include_tasks: 01_core.yml
|
- include_tasks: 01_core.yml
|
||||||
- include_tasks: utils/run_once.yml
|
|
||||||
vars:
|
vars:
|
||||||
# Force the flush of the pg handler on the first run
|
# Force the flush of the pg handler on the first run
|
||||||
flush_handlers: true
|
flush_handlers: true
|
||||||
when: run_once_svc_db_postgres is not defined
|
when: run_once_svc_db_postgres is not defined
|
||||||
|
|
||||||
- include_tasks: "{{ playbook_dir }}/tasks/utils/load_handlers.yml"
|
- include_tasks: "{{ [ playbook_dir, 'tasks/utils/load_handlers.yml' ] | path_join }}"
|
||||||
# Necessary because docker handlers are overwritten by condition
|
# Necessary because docker handlers are overwritten by condition
|
||||||
vars:
|
vars:
|
||||||
handler_role_name: "docker-compose"
|
handler_role_name: "docker-compose"
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ RUN apt-get update \
|
|||||||
&& apt-get install -y --no-install-recommends \
|
&& apt-get install -y --no-install-recommends \
|
||||||
build-essential \
|
build-essential \
|
||||||
git \
|
git \
|
||||||
postgresql-server-dev-all \
|
postgresql-server-dev-{{ POSTGRES_VERSION_MAJOR | default('all', true) }} \
|
||||||
&& git clone https://github.com/pgvector/pgvector.git /tmp/pgvector \
|
&& git clone https://github.com/pgvector/pgvector.git /tmp/pgvector \
|
||||||
&& cd /tmp/pgvector \
|
&& cd /tmp/pgvector \
|
||||||
&& make \
|
&& make \
|
||||||
|
|||||||
@@ -3,10 +3,7 @@
|
|||||||
postgres:
|
postgres:
|
||||||
container_name: "{{ POSTGRES_CONTAINER }}"
|
container_name: "{{ POSTGRES_CONTAINER }}"
|
||||||
image: "{{ POSTGRES_CUSTOM_IMAGE_NAME }}"
|
image: "{{ POSTGRES_CUSTOM_IMAGE_NAME }}"
|
||||||
build:
|
{{ lookup('template', 'roles/docker-container/templates/build.yml.j2') | indent(4) }}
|
||||||
context: .
|
|
||||||
dockerfile: Dockerfile
|
|
||||||
pull_policy: never
|
|
||||||
command:
|
command:
|
||||||
- "postgres"
|
- "postgres"
|
||||||
- "-c"
|
- "-c"
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
# General
|
# General
|
||||||
application_id: svc-db-postgres
|
application_id: svc-db-postgres
|
||||||
|
entity_name: "{{ application_id | get_entity_name }}"
|
||||||
|
|
||||||
# Docker
|
# Docker
|
||||||
docker_compose_flush_handlers: true
|
docker_compose_flush_handlers: true
|
||||||
@@ -9,11 +10,12 @@ database_type: "{{ application_id | get_entity_name }
|
|||||||
|
|
||||||
## Postgres
|
## Postgres
|
||||||
POSTGRES_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.data') }}"
|
POSTGRES_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.data') }}"
|
||||||
POSTGRES_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.name') }}"
|
POSTGRES_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.' ~ entity_name ~ '.name') }}"
|
||||||
POSTGRES_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.image') }}"
|
POSTGRES_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.' ~ entity_name ~ '.image') }}"
|
||||||
POSTGRES_SUBNET: "{{ networks.local['svc-db-postgres'].subnet }}"
|
POSTGRES_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.' ~ entity_name ~ '.version') }}"
|
||||||
|
POSTGRES_VERSION_MAJOR: "{{ POSTGRES_VERSION | regex_replace('^([0-9]+).*', '\\1') }}"
|
||||||
POSTGRES_NETWORK_NAME: "{{ applications | get_app_conf(application_id, 'docker.network') }}"
|
POSTGRES_NETWORK_NAME: "{{ applications | get_app_conf(application_id, 'docker.network') }}"
|
||||||
POSTGRES_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.postgres.version') }}"
|
POSTGRES_SUBNET: "{{ networks.local['svc-db-postgres'].subnet }}"
|
||||||
POSTGRES_PASSWORD: "{{ applications | get_app_conf(application_id, 'credentials.POSTGRES_PASSWORD') }}"
|
POSTGRES_PASSWORD: "{{ applications | get_app_conf(application_id, 'credentials.POSTGRES_PASSWORD') }}"
|
||||||
POSTGRES_PORT: "{{ database_port | default(ports.localhost.database[ application_id ]) }}"
|
POSTGRES_PORT: "{{ database_port | default(ports.localhost.database[ application_id ]) }}"
|
||||||
POSTGRES_INIT: "{{ database_username is defined and database_password is defined and database_name is defined }}"
|
POSTGRES_INIT: "{{ database_username is defined and database_password is defined and database_name is defined }}"
|
||||||
|
|||||||
@@ -16,4 +16,12 @@
|
|||||||
retries: 30
|
retries: 30
|
||||||
networks:
|
networks:
|
||||||
- default
|
- default
|
||||||
|
{% macro include_resource_for(svc, indent=4) -%}
|
||||||
|
{% set service_name = svc -%}
|
||||||
|
{%- set _snippet -%}
|
||||||
|
{% include 'roles/docker-container/templates/resource.yml.j2' %}
|
||||||
|
{%- endset -%}
|
||||||
|
{{ _snippet | indent(indent, true) }}
|
||||||
|
{%- endmacro %}
|
||||||
|
{{ include_resource_for('redis') }}
|
||||||
{{ "\n" }}
|
{{ "\n" }}
|
||||||
@@ -13,6 +13,6 @@
|
|||||||
- include_role:
|
- include_role:
|
||||||
name: sys-service
|
name: sys-service
|
||||||
vars:
|
vars:
|
||||||
system_service_on_calendar: "{{ SYS_SCHEDULE_ANIMATION_KEYBOARD_COLOR }}"
|
system_service_on_calendar: "{{ SYS_SCHEDULE_ANIMATION_KEYBOARD_COLOR }}"
|
||||||
system_service_timer_enabled: true
|
system_service_timer_enabled: true
|
||||||
persistent: true
|
persistent: true
|
||||||
|
|||||||
14
roles/svc-opt-swapfile/tasks/01_core.yml
Normal file
14
roles/svc-opt-swapfile/tasks/01_core.yml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
- name: Install '
|
||||||
|
include_role:
|
||||||
|
name: pkgmgr-install
|
||||||
|
vars:
|
||||||
|
package_name: "{{ SWAPFILE_PKG }}"
|
||||||
|
when: run_once_pkgmgr_install is not defined
|
||||||
|
|
||||||
|
- name: Execute create swapfile script
|
||||||
|
shell: "{{ SWAPFILE_PKG }} '{{ SWAPFILE_SIZE }}'"
|
||||||
|
become: true
|
||||||
|
async: "{{ ASYNC_TIME if ASYNC_ENABLED | bool else omit }}"
|
||||||
|
poll: "{{ ASYNC_POLL if ASYNC_ENABLED | bool else omit }}"
|
||||||
|
|
||||||
|
- include_tasks: utils/run_once.yml
|
||||||
@@ -1,17 +1,3 @@
|
|||||||
- block:
|
- block:
|
||||||
- name: Include dependency 'pkgmgr-install'
|
- include_tasks: 01_core.yml
|
||||||
include_role:
|
|
||||||
name: pkgmgr-install
|
|
||||||
when: run_once_pkgmgr_install is not defined
|
|
||||||
- include_tasks: utils/run_once.yml
|
|
||||||
when: run_once_svc_opt_swapfile is not defined
|
when: run_once_svc_opt_swapfile is not defined
|
||||||
|
|
||||||
- name: "pkgmgr install"
|
|
||||||
include_role:
|
|
||||||
name: pkgmgr-install
|
|
||||||
vars:
|
|
||||||
package_name: swap-forge
|
|
||||||
|
|
||||||
- name: Execute create swapfile script
|
|
||||||
shell: swap-forge "{{swapfile_size}}"
|
|
||||||
become: true
|
|
||||||
|
|||||||
@@ -1,2 +1,4 @@
|
|||||||
application_id: "svc-opt-swapfile"
|
application_id: "svc-opt-swapfile"
|
||||||
swapfile_size: "{{ applications | get_app_conf(application_id, 'swapfile_size') }}"
|
|
||||||
|
SWAPFILE_SIZE: "{{ applications | get_app_conf(application_id, 'swapfile_size') }}"
|
||||||
|
SWAPFILE_PKG: "swap-forge"
|
||||||
@@ -1,7 +1,10 @@
|
|||||||
docker:
|
docker:
|
||||||
services:
|
services:
|
||||||
openresty:
|
openresty:
|
||||||
name: "openresty"
|
name: "openresty"
|
||||||
|
cpus: 0.5
|
||||||
|
mem_reservation: 1g
|
||||||
|
mem_limit: 2g
|
||||||
volumes:
|
volumes:
|
||||||
www: "/var/www/"
|
www: "/var/www/"
|
||||||
nginx: "/etc/nginx/"
|
nginx: "/etc/nginx/"
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
- block:
|
- block:
|
||||||
- name: "For '{{ application_id }}': Load docker-compose"
|
- name: "For '{{ application_id }}': Load docker-compose"
|
||||||
include_role:
|
include_role:
|
||||||
name: docker-compose
|
name: docker-compose
|
||||||
vars:
|
vars:
|
||||||
docker_compose_flush_handlers: true
|
docker_compose_flush_handlers: true
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ get_backup_types="find /Backups/$hashed_machine_id/ -maxdepth 1 -type d -execdir
|
|||||||
|
|
||||||
|
|
||||||
# @todo This configuration is not scalable yet. If other backup services then sys-ctl-bkp-docker-2-loc are integrated, this logic needs to be optimized
|
# @todo This configuration is not scalable yet. If other backup services then sys-ctl-bkp-docker-2-loc are integrated, this logic needs to be optimized
|
||||||
get_version_directories="ls -d /Backups/$hashed_machine_id/sys-ctl-bkp-docker-2-loc/*"
|
get_version_directories="ls -d /Backups/$hashed_machine_id/backup-docker-to-local/*"
|
||||||
last_version_directory="$($get_version_directories | tail -1)"
|
last_version_directory="$($get_version_directories | tail -1)"
|
||||||
rsync_command="sudo rsync --server --sender -blogDtpre.iLsfxCIvu . $last_version_directory/"
|
rsync_command="sudo rsync --server --sender -blogDtpre.iLsfxCIvu . $last_version_directory/"
|
||||||
|
|
||||||
|
|||||||
@@ -3,30 +3,6 @@
|
|||||||
name: backup
|
name: backup
|
||||||
create_home: yes
|
create_home: yes
|
||||||
|
|
||||||
- name: create .ssh directory
|
|
||||||
file:
|
|
||||||
path: /home/backup/.ssh
|
|
||||||
state: directory
|
|
||||||
owner: backup
|
|
||||||
group: backup
|
|
||||||
mode: '0700'
|
|
||||||
|
|
||||||
- name: create /home/backup/.ssh/authorized_keys
|
|
||||||
template:
|
|
||||||
src: "authorized_keys.j2"
|
|
||||||
dest: /home/backup/.ssh/authorized_keys
|
|
||||||
owner: backup
|
|
||||||
group: backup
|
|
||||||
mode: '0644'
|
|
||||||
|
|
||||||
- name: create /home/backup/ssh-wrapper.sh
|
|
||||||
copy:
|
|
||||||
src: "ssh-wrapper.sh"
|
|
||||||
dest: /home/backup/ssh-wrapper.sh
|
|
||||||
owner: backup
|
|
||||||
group: backup
|
|
||||||
mode: '0700'
|
|
||||||
|
|
||||||
- name: grant backup sudo rights
|
- name: grant backup sudo rights
|
||||||
copy:
|
copy:
|
||||||
src: "backup"
|
src: "backup"
|
||||||
@@ -35,3 +11,9 @@
|
|||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
notify: sshd restart
|
notify: sshd restart
|
||||||
|
|
||||||
|
- include_tasks: 02_permissions_ssh.yml
|
||||||
|
|
||||||
|
- include_tasks: 03_permissions_folders.yml
|
||||||
|
|
||||||
|
- include_tasks: utils/run_once.yml
|
||||||
23
roles/sys-bkp-provider-user/tasks/02_permissions_ssh.yml
Normal file
23
roles/sys-bkp-provider-user/tasks/02_permissions_ssh.yml
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
- name: create .ssh directory
|
||||||
|
file:
|
||||||
|
path: /home/backup/.ssh
|
||||||
|
state: directory
|
||||||
|
owner: backup
|
||||||
|
group: backup
|
||||||
|
mode: '0700'
|
||||||
|
|
||||||
|
- name: create /home/backup/.ssh/authorized_keys
|
||||||
|
template:
|
||||||
|
src: "authorized_keys.j2"
|
||||||
|
dest: /home/backup/.ssh/authorized_keys
|
||||||
|
owner: backup
|
||||||
|
group: backup
|
||||||
|
mode: '0644'
|
||||||
|
|
||||||
|
- name: create /home/backup/ssh-wrapper.sh
|
||||||
|
copy:
|
||||||
|
src: "ssh-wrapper.sh"
|
||||||
|
dest: /home/backup/ssh-wrapper.sh
|
||||||
|
owner: backup
|
||||||
|
group: backup
|
||||||
|
mode: '0700'
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user