mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-12-14 13:05:25 +00:00
Compare commits
220 Commits
cb483f60d1
...
v0.2.0
| Author | SHA1 | Date | |
|---|---|---|---|
| 57ec936d30 | |||
| f143ce258c | |||
| 060ae45c7d | |||
| a3ba40edb6 | |||
| f9825ac4fc | |||
| 9d66910120 | |||
| f067e4ee9a | |||
| 486a98af3e | |||
| eca7ffea36 | |||
| 8bcda51528 | |||
| cb4e750317 | |||
| ecfdac6764 | |||
| 9fb372be52 | |||
| 0cf6674ab5 | |||
| 0d18d86243 | |||
| bf0134b9c5 | |||
| 2ed58ceffc | |||
| 5a488b9300 | |||
| 19b417602f | |||
| 8e4ee723d7 | |||
| d0aac64c67 | |||
| 1d7f1d4bb2 | |||
| 986f959696 | |||
| f2ace362bc | |||
| 8c64f91a6d | |||
| 86dd36930f | |||
| 5f0dfa616f | |||
| f147bbcc93 | |||
| a0b6ff490d | |||
| 91e93a5cc8 | |||
| 9051ba179f | |||
| d0efc77b6c | |||
| 73ec220847 | |||
| e5a3c1f0e2 | |||
| 4dbf58e082 | |||
| db68f16e19 | |||
| 5999763b4e | |||
| fc0bdbea9a | |||
| a731b5cd4b | |||
| 651038a7c5 | |||
| 7847d5fddc | |||
| 657d4a064d | |||
| 770107ff1e | |||
| 093a44b59c | |||
| 845b6e7cc1 | |||
| c128bd9c73 | |||
| bb4391d083 | |||
| 5c67c42911 | |||
| fbeed5793f | |||
| c0980e91c0 | |||
| 27c399123b | |||
| 7b262cf46e | |||
| 8fad77f17f | |||
| 91d5ba35d1 | |||
| 84d3c8fc16 | |||
| 9182d14175 | |||
| b914fb9789 | |||
| 298ba80664 | |||
| c5e294e567 | |||
| 40c40e2000 | |||
| 031775c096 | |||
| 9ecfd8a762 | |||
| 379b1d420e | |||
| 13d47766b7 | |||
| 1d0f26617a | |||
| 6f676462ea | |||
| 794bd4487a | |||
| 3e02fc0190 | |||
| 4a4c3c741c | |||
| a31ef07718 | |||
| d12f7a10a6 | |||
| e22893bdcb | |||
| 716ebef33b | |||
| a6ed047765 | |||
| 629e6194f9 | |||
| e9b0760d08 | |||
| 1cee61d00c | |||
| f06460db16 | |||
| cde3991c95 | |||
| 1eaeae2187 | |||
| 94f97ed1f3 | |||
| 46174125bc | |||
| 8a453be4b9 | |||
| f21bf5d459 | |||
| de121338cf | |||
| 20ecb62afe | |||
| 63f6723106 | |||
| 48cd7743b5 | |||
| 4b5ba892ba | |||
| 5b18f39ccd | |||
| d0d24547c2 | |||
| fe496d4800 | |||
| 4ee208ffff | |||
| 0756e9d06e | |||
| c0e26275f8 | |||
| 13b10ff85b | |||
| 5320a5d20c | |||
| 1064afc0dc | |||
| 8314d7e6a6 | |||
| 116c20d61d | |||
| 81c4d407a5 | |||
| e09f561f0b | |||
| 72ede9414b | |||
| 81304934d5 | |||
| c9f959058b | |||
| d55ab2a2d7 | |||
| f57ccb2dd7 | |||
| 1e1f8e56e0 | |||
| edec4f3722 | |||
| 8d3874f432 | |||
| 8008afe0de | |||
| 0cb9b08e8f | |||
| fff7d261a2 | |||
| e53abbec3b | |||
| 5a523cfe24 | |||
| f19f64b1cd | |||
| e6d6cb639c | |||
| 007963044b | |||
| 5a65410dd8 | |||
| 7d0489e312 | |||
| f363d36a36 | |||
| ce7347f70b | |||
| ece4f493d3 | |||
| 86760a4be7 | |||
| 26dfab147d | |||
| 3b3725cbd1 | |||
| 7fa6b2d770 | |||
| 9314cab664 | |||
| 03bea763f1 | |||
| 1129b943fc | |||
| e754df7e5c | |||
| 880d0ab1d6 | |||
| 654131ab89 | |||
| 4dd1769225 | |||
| c2a181edd4 | |||
| 2132356f02 | |||
| 3dddda39f6 | |||
| 3912e9b217 | |||
| 4337b63c2f | |||
| 0287652774 | |||
| 03a8819483 | |||
| 9dd48b7a8e | |||
| e72e5d3e37 | |||
| 21a1c99fda | |||
| 9f4bbd4066 | |||
| 7eac9374bc | |||
| 6fda85788a | |||
| 6abd4b0373 | |||
| 63ad5e807b | |||
| 9ce21b7242 | |||
| ba26e5a53b | |||
| 05ff3d3d61 | |||
| 873607246c | |||
| ac5fdeafd2 | |||
| cc817f3967 | |||
| 4f48cf633b | |||
| bbebf7964d | |||
| c5afae42cf | |||
| d7e0123af2 | |||
| 3fe83f26d5 | |||
| bee833feb4 | |||
| b80cfbdc9d | |||
| 6d6b0fdea6 | |||
| d97d34a822 | |||
| 4b2c7eef88 | |||
| e6cb0cbed9 | |||
| 254a685b05 | |||
| 9cbb74b7c8 | |||
| 62d20fbb71 | |||
| da8dc3b53a | |||
| 287cccf6cb | |||
| 61ee993ff1 | |||
| 2e490ed238 | |||
| c11ea9b699 | |||
| 2f5ead2212 | |||
| 13e74a86a6 | |||
| 962c68fdab | |||
| f8899e9493 | |||
| 9c65bd4839 | |||
| aca2da885d | |||
| d6422a7881 | |||
| 8cf3dbd5bf | |||
| dfa5e26582 | |||
| a312f353fb | |||
| e333c9d85b | |||
| 854e6902d3 | |||
| cc1ed2b125 | |||
| 28caa495e7 | |||
| 19de04c475 | |||
| 002f8de3ec | |||
| 68a8128d38 | |||
| 36f9573fdf | |||
| 493d5bbbda | |||
| 2fcbae8fc7 | |||
| 02f38d60db | |||
| d66ad37c5d | |||
| 0c16f9c43c | |||
| 7330aeb8ec | |||
| d3aad632c0 | |||
| d1bad3d7a6 | |||
| 43056a8b92 | |||
| 0bf286f62a | |||
| df8390f386 | |||
| 48557b06e3 | |||
| 1cff5778d3 | |||
| 60e2c972d6 | |||
| 637de6a190 | |||
| f5efbce205 | |||
| d6f3618d70 | |||
| 773655efb5 | |||
| 7bc9f7abd9 | |||
| ec7b8662dd | |||
| d1ccfd9cdd | |||
| d61c81634c | |||
| 265f815b48 | |||
| f8e5110730 | |||
| 37b213f96a | |||
| 5ef525eac9 | |||
| 295ae7e477 | |||
| c67ccc1df6 |
@@ -4,10 +4,11 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
build-and-test:
|
||||
test-code:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
|
||||
@@ -17,16 +18,16 @@ jobs:
|
||||
|
||||
- name: Build Docker image
|
||||
run: |
|
||||
docker build -t infinito:latest .
|
||||
docker build --network=host -t infinito:latest .
|
||||
|
||||
- name: Clean build artifacts
|
||||
run: |
|
||||
docker run --rm infinito:latest infinito make clean
|
||||
docker run --network=host --rm infinito:latest infinito make clean
|
||||
|
||||
- name: Generate project outputs
|
||||
run: |
|
||||
docker run --rm infinito:latest infinito make build
|
||||
docker run --network=host --rm infinito:latest infinito make build
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
docker run --rm infinito:latest infinito make test
|
||||
docker run --network=host --rm infinito:latest infinito make test
|
||||
83
.github/workflows/test-deploy.yml
vendored
Normal file
83
.github/workflows/test-deploy.yml
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
name: Build & Test Infinito.Nexus CLI in Docker Container
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
- develop
|
||||
- "*"
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
test-deploy:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 240
|
||||
|
||||
env:
|
||||
EXCLUDED_ROLES: >
|
||||
drv-lid-switch,
|
||||
svc-db-memcached,
|
||||
svc-db-redis,
|
||||
svc-net-wireguard-core,
|
||||
svc-net-wireguard-firewalled,
|
||||
svc-net-wireguard-plain,
|
||||
svc-bkp-loc-2-usb,
|
||||
svc-bkp-rmt-2-loc,
|
||||
svc-opt-keyboard-color,
|
||||
svc-opt-ssd-hdd,
|
||||
web-app-bridgy-fed,
|
||||
web-app-oauth2-proxy,
|
||||
web-app-postmarks,
|
||||
web-app-elk,
|
||||
web-app-syncope,
|
||||
web-app-socialhome,
|
||||
web-svc-xmpp,
|
||||
|
||||
INFINITO_IMAGE: infinito:latest
|
||||
|
||||
steps:
|
||||
- name: Main Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Show Docker version
|
||||
run: docker version
|
||||
|
||||
# 1) First deploy: normal + debug (mit Build)
|
||||
- name: First deploy (normal + debug)
|
||||
run: |
|
||||
python -m cli.deploy.container run --image "$INFINITO_IMAGE" --build -- \
|
||||
--exclude "$EXCLUDED_ROLES" \
|
||||
--vars '{"MASK_CREDENTIALS_IN_LOGS": false}' \
|
||||
--authorized-keys "ssh-ed25519 AAAA_TEST_DUMMY_KEY github-ci-dummy@infinito" \
|
||||
-- \
|
||||
-T server \
|
||||
--debug \
|
||||
--skip-cleanup \
|
||||
--skip-tests
|
||||
|
||||
# 2) Second deploy: reset + debug (ohne Build, nur reuse Image)
|
||||
- name: Second deploy (--reset --debug)
|
||||
run: |
|
||||
python -m cli.deploy.container run --image "$INFINITO_IMAGE" -- \
|
||||
--exclude "$EXCLUDED_ROLES" \
|
||||
--vars '{"MASK_CREDENTIALS_IN_LOGS": false}' \
|
||||
--authorized-keys "ssh-ed25519 AAAA_TEST_DUMMY_KEY github-ci-dummy@infinito" \
|
||||
-- \
|
||||
-T server \
|
||||
--reset \
|
||||
--debug \
|
||||
--skip-cleanup \
|
||||
--skip-tests
|
||||
|
||||
# 3) Third deploy: async deploy – no debug
|
||||
- name: Third deploy (async deploy – no debug)
|
||||
run: |
|
||||
python -m cli.deploy.container run --image "$INFINITO_IMAGE" -- \
|
||||
--exclude "$EXCLUDED_ROLES" \
|
||||
--vars '{"MASK_CREDENTIALS_IN_LOGS": false}' \
|
||||
--authorized-keys "ssh-ed25519 AAAA_TEST_DUMMY_KEY github-ci-dummy@infinito" \
|
||||
-- \
|
||||
-T server \
|
||||
--skip-cleanup \
|
||||
--skip-tests
|
||||
14
CHANGELOG.md
Normal file
14
CHANGELOG.md
Normal file
@@ -0,0 +1,14 @@
|
||||
## [0.2.0] - 2025-12-10
|
||||
|
||||
* Added full Nix installer integration with dynamic upstream SHA256 verification, OS-specific installation paths, template-driven configuration, and updated pkgmgr integration.
|
||||
|
||||
|
||||
## [0.1.1] - 2025-12-10
|
||||
|
||||
* PKGMGR will now be pulled again
|
||||
|
||||
|
||||
## [0.1.0] - 2025-12-09
|
||||
|
||||
* Added Nix support role
|
||||
|
||||
22
Dockerfile
22
Dockerfile
@@ -1,6 +1,6 @@
|
||||
FROM archlinux:latest
|
||||
|
||||
# 1) Update system and install build/runtime deps
|
||||
# 1) Pakete inkl. docker (damit docker CLI im Container vorhanden ist)
|
||||
RUN pacman -Syu --noconfirm \
|
||||
base-devel \
|
||||
git \
|
||||
@@ -10,15 +10,16 @@ RUN pacman -Syu --noconfirm \
|
||||
alsa-lib \
|
||||
go \
|
||||
rsync \
|
||||
docker \
|
||||
&& pacman -Scc --noconfirm
|
||||
|
||||
# 2) Stub out systemctl & yay so post-install hooks and AUR calls never fail
|
||||
# 2) systemctl & yay stubben
|
||||
RUN printf '#!/bin/sh\nexit 0\n' > /usr/bin/systemctl \
|
||||
&& chmod +x /usr/bin/systemctl \
|
||||
&& printf '#!/bin/sh\nexit 0\n' > /usr/bin/yay \
|
||||
&& chmod +x /usr/bin/yay
|
||||
|
||||
# 3) Build & install python-simpleaudio from AUR manually (as non-root)
|
||||
# 3) python-simpleaudio aus AUR
|
||||
RUN useradd -m aur_builder \
|
||||
&& su aur_builder -c "git clone https://aur.archlinux.org/python-simpleaudio.git /home/aur_builder/psa && \
|
||||
cd /home/aur_builder/psa && \
|
||||
@@ -26,35 +27,32 @@ RUN useradd -m aur_builder \
|
||||
&& pacman -U --noconfirm /home/aur_builder/psa/*.pkg.tar.zst \
|
||||
&& rm -rf /home/aur_builder/psa
|
||||
|
||||
# 4) Clone Kevin’s Package Manager and create its venv
|
||||
# 4) pkgmgr + venv
|
||||
ENV PKGMGR_REPO=/opt/package-manager \
|
||||
PKGMGR_VENV=/root/.venvs/pkgmgr
|
||||
|
||||
RUN git clone https://github.com/kevinveenbirkenbach/package-manager.git $PKGMGR_REPO \
|
||||
&& python -m venv $PKGMGR_VENV \
|
||||
&& $PKGMGR_VENV/bin/pip install --upgrade pip \
|
||||
# install pkgmgr’s own deps + the ansible Python library so infinito import yaml & ansible.plugins.lookup work
|
||||
&& $PKGMGR_VENV/bin/pip install --no-cache-dir -r $PKGMGR_REPO/requirements.txt ansible \
|
||||
# drop a thin wrapper so `pkgmgr` always runs inside that venv
|
||||
&& printf '#!/bin/sh\n. %s/bin/activate\nexec python %s/main.py "$@"\n' \
|
||||
"$PKGMGR_VENV" "$PKGMGR_REPO" > /usr/local/bin/pkgmgr \
|
||||
&& chmod +x /usr/local/bin/pkgmgr
|
||||
|
||||
# 5) Ensure pkgmgr venv bin and user-local bin are on PATH
|
||||
ENV PATH="$PKGMGR_VENV/bin:/root/.local/bin:${PATH}"
|
||||
|
||||
# 6) Copy local Infinito.Nexus source into the image for override
|
||||
# 6) Infinito.Nexus Quelle rein
|
||||
COPY . /opt/infinito-src
|
||||
|
||||
# 7) Install Infinito.Nexus via pkgmgr (clone-mode https)
|
||||
RUN pkgmgr install infinito --clone-mode https
|
||||
# 7) Infinito via pkgmgr (shallow)
|
||||
RUN pkgmgr install infinito --clone-mode shallow
|
||||
|
||||
# 8) Override installed Infinito.Nexus with local source and clean ignored files
|
||||
# 8) Override mit lokaler Quelle
|
||||
RUN INFINITO_PATH=$(pkgmgr path infinito) && \
|
||||
rm -rf "$INFINITO_PATH"/* && \
|
||||
rsync -a --delete --exclude='.git' /opt/infinito-src/ "$INFINITO_PATH"/
|
||||
|
||||
# 9) Symlink the infinito script into /usr/local/bin so ENTRYPOINT works
|
||||
# 9) Symlink
|
||||
RUN INFINITO_PATH=$(pkgmgr path infinito) && \
|
||||
ln -sf "$INFINITO_PATH"/main.py /usr/local/bin/infinito && \
|
||||
chmod +x /usr/local/bin/infinito
|
||||
|
||||
4
Makefile
4
Makefile
@@ -11,7 +11,7 @@ INCLUDE_GROUPS := $(shell python3 main.py meta categories invokable -s "-" --no-
|
||||
INCLUDES_OUT_DIR := ./tasks/groups
|
||||
|
||||
# Compute extra users as before
|
||||
EXTRA_USERS := $(shell \
|
||||
RESERVED_USERNAMES := $(shell \
|
||||
find $(ROLES_DIR) -maxdepth 1 -type d -printf '%f\n' \
|
||||
| sed -E 's/.*-//' \
|
||||
| grep -E -x '[a-z0-9]+' \
|
||||
@@ -50,7 +50,7 @@ messy-build: dockerignore
|
||||
python3 $(USERS_SCRIPT) \
|
||||
--roles-dir $(ROLES_DIR) \
|
||||
--output $(USERS_OUT) \
|
||||
--extra-users "$(EXTRA_USERS)"
|
||||
--reserved-usernames "$(RESERVED_USERNAMES)"
|
||||
@echo "✅ Users defaults written to $(USERS_OUT)\n"
|
||||
|
||||
@echo "🔧 Generating applications defaults → $(APPLICATIONS_OUT)…"
|
||||
|
||||
@@ -11,7 +11,7 @@ deprecation_warnings = True
|
||||
interpreter_python = auto_silent
|
||||
|
||||
# --- Output & Profiling ---
|
||||
stdout_callback = yaml
|
||||
stdout_callback = ansible.builtin.default
|
||||
callbacks_enabled = profile_tasks,timer
|
||||
|
||||
# --- Plugin paths ---
|
||||
@@ -27,3 +27,6 @@ transfer_method = smart
|
||||
[persistent_connection]
|
||||
connect_timeout = 30
|
||||
command_timeout = 60
|
||||
|
||||
#[callback_default]
|
||||
#result_format = yaml
|
||||
@@ -58,22 +58,28 @@ class DefaultsGenerator:
|
||||
continue
|
||||
|
||||
config_data = load_yaml_file(config_file)
|
||||
if config_data:
|
||||
try:
|
||||
gid_number = self.gid_lookup.run([application_id], roles_dir=str(self.roles_dir))[0]
|
||||
except Exception as e:
|
||||
print(f"Warning: failed to determine gid for '{application_id}': {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
if not config_data:
|
||||
# Empty or null config → still register the application with empty defaults
|
||||
self.log(f"Empty config for {role_name}, adding empty defaults for '{application_id}'")
|
||||
result["defaults_applications"][application_id] = {}
|
||||
continue
|
||||
|
||||
config_data["group_id"] = gid_number
|
||||
result["defaults_applications"][application_id] = config_data
|
||||
# Existing non-empty config: keep current behavior
|
||||
try:
|
||||
gid_number = self.gid_lookup.run([application_id], roles_dir=str(self.roles_dir))[0]
|
||||
except Exception as e:
|
||||
print(f"Warning: failed to determine gid for '{application_id}': {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Inject users mapping as Jinja2 references
|
||||
users_meta = load_yaml_file(role_dir / "users" / "main.yml")
|
||||
users_data = users_meta.get("users", {})
|
||||
transformed = {user: f"{{{{ users[\"{user}\"] }}}}" for user in users_data}
|
||||
if transformed:
|
||||
result["defaults_applications"][application_id]["users"] = transformed
|
||||
config_data["group_id"] = gid_number
|
||||
result["defaults_applications"][application_id] = config_data
|
||||
|
||||
# Inject users mapping as Jinja2 references (unchanged)
|
||||
users_meta = load_yaml_file(role_dir / "users" / "main.yml")
|
||||
users_data = users_meta.get("users", {})
|
||||
transformed = {user: f"{{{{ users[\"{user}\"] }}}}" for user in users_data}
|
||||
if transformed:
|
||||
result["defaults_applications"][application_id]["users"] = transformed
|
||||
|
||||
# Render placeholders in entire result context
|
||||
self.log("Starting placeholder rendering...")
|
||||
@@ -102,6 +108,95 @@ class DefaultsGenerator:
|
||||
rel = self.output_file
|
||||
print(f"✅ Generated: {rel}")
|
||||
|
||||
def test_empty_config_mapping_adds_empty_defaults(self):
|
||||
"""
|
||||
If a role has vars/main.yml and config/main.yml exists but contains an
|
||||
empty mapping ({}), the generator must still emit an empty-dict entry
|
||||
for that application_id.
|
||||
"""
|
||||
role_empty_cfg = self.roles_dir / "role-empty-config"
|
||||
(role_empty_cfg / "vars").mkdir(parents=True, exist_ok=True)
|
||||
(role_empty_cfg / "config").mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# application_id is defined…
|
||||
(role_empty_cfg / "vars" / "main.yml").write_text(
|
||||
"application_id: emptycfg\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
# …but config is an explicit empty mapping
|
||||
(role_empty_cfg / "config" / "main.yml").write_text(
|
||||
"{}\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
result = subprocess.run(
|
||||
[
|
||||
"python3",
|
||||
str(self.script_path),
|
||||
"--roles-dir",
|
||||
str(self.roles_dir),
|
||||
"--output-file",
|
||||
str(self.output_file),
|
||||
],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
self.assertEqual(result.returncode, 0, msg=result.stderr)
|
||||
|
||||
data = yaml.safe_load(self.output_file.read_text())
|
||||
apps = data.get("defaults_applications", {})
|
||||
|
||||
self.assertIn("emptycfg", apps)
|
||||
self.assertEqual(
|
||||
apps["emptycfg"],
|
||||
{},
|
||||
msg="Role with {} config should produce an empty defaults mapping",
|
||||
)
|
||||
|
||||
def test_empty_config_file_adds_empty_defaults(self):
|
||||
"""
|
||||
If a role has vars/main.yml and config/main.yml exists but is an empty
|
||||
file (or only whitespace), the generator must still emit an empty-dict
|
||||
entry for that application_id.
|
||||
"""
|
||||
role_empty_file = self.roles_dir / "role-empty-config-file"
|
||||
(role_empty_file / "vars").mkdir(parents=True, exist_ok=True)
|
||||
(role_empty_file / "config").mkdir(parents=True, exist_ok=True)
|
||||
|
||||
(role_empty_file / "vars" / "main.yml").write_text(
|
||||
"application_id: emptyfileapp\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
# Create an empty file (no YAML content at all)
|
||||
(role_empty_file / "config" / "main.yml").write_text(
|
||||
"",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
result = subprocess.run(
|
||||
[
|
||||
"python3",
|
||||
str(self.script_path),
|
||||
"--roles-dir",
|
||||
str(self.roles_dir),
|
||||
"--output-file",
|
||||
str(self.output_file),
|
||||
],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
self.assertEqual(result.returncode, 0, msg=result.stderr)
|
||||
|
||||
data = yaml.safe_load(self.output_file.read_text())
|
||||
apps = data.get("defaults_applications", {})
|
||||
|
||||
self.assertIn("emptyfileapp", apps)
|
||||
self.assertEqual(
|
||||
apps["emptyfileapp"],
|
||||
{},
|
||||
msg="Role with empty config file should produce an empty defaults mapping",
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Generate defaults_applications YAML...")
|
||||
parser.add_argument("--roles-dir", default="roles", help="Path to the roles directory")
|
||||
|
||||
@@ -70,6 +70,7 @@ def build_users(defs, primary_domain, start_id, become_pwd):
|
||||
description = overrides.get('description')
|
||||
roles = overrides.get('roles', [])
|
||||
password = overrides.get('password', become_pwd)
|
||||
reserved = overrides.get('reserved', False)
|
||||
|
||||
# Determine UID and GID
|
||||
if 'uid' in overrides:
|
||||
@@ -89,6 +90,9 @@ def build_users(defs, primary_domain, start_id, become_pwd):
|
||||
if description is not None:
|
||||
entry['description'] = description
|
||||
|
||||
if reserved:
|
||||
entry['reserved'] = reserved
|
||||
|
||||
users[key] = entry
|
||||
|
||||
# Ensure uniqueness of usernames and emails
|
||||
@@ -180,8 +184,8 @@ def parse_args():
|
||||
help='Starting UID/GID number (default: 1001).'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--extra-users', '-e',
|
||||
help='Comma-separated list of additional usernames to include.',
|
||||
'--reserved-usernames', '-e',
|
||||
help='Comma-separated list of usernames to reserve.',
|
||||
default=None
|
||||
)
|
||||
return parser.parse_args()
|
||||
@@ -198,17 +202,21 @@ def main():
|
||||
print(f"Error merging user definitions: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Add extra users if specified
|
||||
if args.extra_users:
|
||||
for name in args.extra_users.split(','):
|
||||
# Add reserved/ users if specified
|
||||
if args.reserved_usernames:
|
||||
for name in args.reserved_usernames.split(','):
|
||||
user_key = name.strip()
|
||||
if not user_key:
|
||||
continue
|
||||
if user_key in definitions:
|
||||
print(f"Warning: extra user '{user_key}' already defined; skipping.", file=sys.stderr)
|
||||
print(
|
||||
f"Warning: reserved user '{user_key}' already defined; skipping (not changing existing definition).",
|
||||
file=sys.stderr
|
||||
)
|
||||
else:
|
||||
definitions[user_key] = {}
|
||||
|
||||
# Mark user as reserved
|
||||
definitions[user_key]["reserved"] = True
|
||||
try:
|
||||
users = build_users(
|
||||
definitions,
|
||||
|
||||
@@ -6,168 +6,347 @@ import json
|
||||
import re
|
||||
from typing import List, Dict, Any, Set
|
||||
|
||||
from module_utils.role_dependency_resolver import RoleDependencyResolver
|
||||
|
||||
# Regex used to ignore Jinja expressions inside include/import statements
|
||||
JINJA_PATTERN = re.compile(r'{{.*}}')
|
||||
ALL_DEP_TYPES = ['run_after', 'dependencies', 'include_tasks', 'import_tasks', 'include_role', 'import_role']
|
||||
ALL_DIRECTIONS = ['to', 'from']
|
||||
ALL_KEYS = [f"{dep}_{dir}" for dep in ALL_DEP_TYPES for dir in ALL_DIRECTIONS]
|
||||
|
||||
# All dependency types the graph builder supports
|
||||
ALL_DEP_TYPES = [
|
||||
"run_after",
|
||||
"dependencies",
|
||||
"include_tasks",
|
||||
"import_tasks",
|
||||
"include_role",
|
||||
"import_role",
|
||||
]
|
||||
|
||||
# Graph directions: outgoing edges ("to") vs incoming edges ("from")
|
||||
ALL_DIRECTIONS = ["to", "from"]
|
||||
|
||||
# Combined keys: e.g. "include_role_to", "dependencies_from", etc.
|
||||
ALL_KEYS = [f"{dep}_{direction}" for dep in ALL_DEP_TYPES for direction in ALL_DIRECTIONS]
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Helpers for locating meta and task files
|
||||
# ------------------------------------------------------------
|
||||
|
||||
def find_role_meta(roles_dir: str, role: str) -> str:
|
||||
path = os.path.join(roles_dir, role, 'meta', 'main.yml')
|
||||
"""Return path to meta/main.yml of a role or raise FileNotFoundError."""
|
||||
path = os.path.join(roles_dir, role, "meta", "main.yml")
|
||||
if not os.path.isfile(path):
|
||||
raise FileNotFoundError(f"Metadata not found for role: {role}")
|
||||
return path
|
||||
|
||||
|
||||
def find_role_tasks(roles_dir: str, role: str) -> str:
|
||||
path = os.path.join(roles_dir, role, 'tasks', 'main.yml')
|
||||
"""Return path to tasks/main.yml of a role or raise FileNotFoundError."""
|
||||
path = os.path.join(roles_dir, role, "tasks", "main.yml")
|
||||
if not os.path.isfile(path):
|
||||
raise FileNotFoundError(f"Tasks not found for role: {role}")
|
||||
return path
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Parsers for meta and tasks
|
||||
# ------------------------------------------------------------
|
||||
|
||||
def load_meta(path: str) -> Dict[str, Any]:
|
||||
with open(path, 'r') as f:
|
||||
"""
|
||||
Load metadata from meta/main.yml.
|
||||
Returns a dict with:
|
||||
- galaxy_info
|
||||
- run_after
|
||||
- dependencies
|
||||
"""
|
||||
with open(path, "r") as f:
|
||||
data = yaml.safe_load(f) or {}
|
||||
|
||||
galaxy_info = data.get('galaxy_info', {}) or {}
|
||||
galaxy_info = data.get("galaxy_info", {}) or {}
|
||||
return {
|
||||
'galaxy_info': galaxy_info,
|
||||
'run_after': galaxy_info.get('run_after', []) or [],
|
||||
'dependencies': data.get('dependencies', []) or []
|
||||
"galaxy_info": galaxy_info,
|
||||
"run_after": galaxy_info.get("run_after", []) or [],
|
||||
"dependencies": data.get("dependencies", []) or [],
|
||||
}
|
||||
|
||||
|
||||
def load_tasks(path: str, dep_type: str) -> List[str]:
|
||||
with open(path, 'r') as f:
|
||||
"""
|
||||
Parse include_tasks/import_tasks from tasks/main.yml.
|
||||
Only accepts simple, non-Jinja names.
|
||||
"""
|
||||
with open(path, "r") as f:
|
||||
data = yaml.safe_load(f) or []
|
||||
|
||||
included_roles = []
|
||||
roles: List[str] = []
|
||||
|
||||
for task in data:
|
||||
if not isinstance(task, dict):
|
||||
continue
|
||||
|
||||
if dep_type in task:
|
||||
entry = task[dep_type]
|
||||
if isinstance(entry, dict):
|
||||
entry = entry.get('name', '')
|
||||
if entry and not JINJA_PATTERN.search(entry):
|
||||
included_roles.append(entry)
|
||||
entry = entry.get("name", "")
|
||||
if isinstance(entry, str) and entry and not JINJA_PATTERN.search(entry):
|
||||
roles.append(entry)
|
||||
|
||||
return included_roles
|
||||
return roles
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Graph builder using precomputed caches (fast)
|
||||
# ------------------------------------------------------------
|
||||
|
||||
def build_single_graph(
|
||||
start_role: str,
|
||||
dep_type: str,
|
||||
direction: str,
|
||||
roles_dir: str,
|
||||
max_depth: int
|
||||
max_depth: int,
|
||||
caches: Dict[str, Any],
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Build a graph (nodes + links) for one role, one dep_type, one direction.
|
||||
Uses only precomputed in-memory caches, no filesystem access.
|
||||
|
||||
caches structure:
|
||||
caches["meta"][role] -> meta information
|
||||
caches["deps"][dep_type][role] -> outgoing targets
|
||||
caches["rev"][dep_type][target] -> set of source roles
|
||||
"""
|
||||
|
||||
nodes: Dict[str, Dict[str, Any]] = {}
|
||||
links: List[Dict[str, str]] = []
|
||||
|
||||
meta_cache = caches["meta"]
|
||||
deps_cache = caches["deps"]
|
||||
rev_cache = caches["rev"]
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Ensure a role exists as a node
|
||||
# --------------------------------------------------------
|
||||
def ensure_node(role: str):
|
||||
if role in nodes:
|
||||
return
|
||||
|
||||
# Try retrieving cached meta; fallback: lazy load
|
||||
meta = meta_cache.get(role)
|
||||
if meta is None:
|
||||
try:
|
||||
meta = load_meta(find_role_meta(roles_dir, role))
|
||||
meta_cache[role] = meta
|
||||
except FileNotFoundError:
|
||||
meta = {"galaxy_info": {}}
|
||||
|
||||
galaxy_info = meta.get("galaxy_info", {}) or {}
|
||||
|
||||
node = {
|
||||
"id": role,
|
||||
**galaxy_info,
|
||||
"doc_url": f"https://docs.infinito.nexus/roles/{role}/README.html",
|
||||
"source_url": f"https://github.com/kevinveenbirkenbach/infinito-nexus/tree/master/roles/{role}",
|
||||
}
|
||||
nodes[role] = node
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Outgoing edges: role -> targets
|
||||
# --------------------------------------------------------
|
||||
def outgoing(role: str) -> List[str]:
|
||||
return deps_cache.get(dep_type, {}).get(role, []) or []
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Incoming edges: sources -> role
|
||||
# --------------------------------------------------------
|
||||
def incoming(role: str) -> Set[str]:
|
||||
return rev_cache.get(dep_type, {}).get(role, set())
|
||||
|
||||
# --------------------------------------------------------
|
||||
# DFS traversal
|
||||
# --------------------------------------------------------
|
||||
def traverse(role: str, depth: int, path: Set[str]):
|
||||
if role not in nodes:
|
||||
meta = load_meta(find_role_meta(roles_dir, role))
|
||||
node = {'id': role}
|
||||
node.update(meta['galaxy_info'])
|
||||
node['doc_url'] = f"https://docs.infinito.nexus/roles/{role}/README.html"
|
||||
node['source_url'] = f"https://s.infinito.nexus/code/tree/master/roles/{role}"
|
||||
nodes[role] = node
|
||||
ensure_node(role)
|
||||
|
||||
if max_depth > 0 and depth >= max_depth:
|
||||
return
|
||||
|
||||
neighbors = []
|
||||
if dep_type in ['run_after', 'dependencies']:
|
||||
meta = load_meta(find_role_meta(roles_dir, role))
|
||||
neighbors = meta.get(dep_type, [])
|
||||
else:
|
||||
try:
|
||||
neighbors = load_tasks(find_role_tasks(roles_dir, role), dep_type)
|
||||
except FileNotFoundError:
|
||||
neighbors = []
|
||||
if direction == "to":
|
||||
for tgt in outgoing(role):
|
||||
ensure_node(tgt)
|
||||
links.append({"source": role, "target": tgt, "type": dep_type})
|
||||
if tgt not in path:
|
||||
traverse(tgt, depth + 1, path | {tgt})
|
||||
|
||||
if direction == 'to':
|
||||
for tgt in neighbors:
|
||||
links.append({'source': role, 'target': tgt, 'type': dep_type})
|
||||
if tgt in path:
|
||||
continue
|
||||
traverse(tgt, depth + 1, path | {tgt})
|
||||
else: # direction == "from"
|
||||
for src in incoming(role):
|
||||
ensure_node(src)
|
||||
links.append({"source": src, "target": role, "type": dep_type})
|
||||
if src not in path:
|
||||
traverse(src, depth + 1, path | {src})
|
||||
|
||||
else: # direction == 'from'
|
||||
for other in os.listdir(roles_dir):
|
||||
try:
|
||||
other_neighbors = []
|
||||
if dep_type in ['run_after', 'dependencies']:
|
||||
meta_o = load_meta(find_role_meta(roles_dir, other))
|
||||
other_neighbors = meta_o.get(dep_type, [])
|
||||
else:
|
||||
other_neighbors = load_tasks(find_role_tasks(roles_dir, other), dep_type)
|
||||
traverse(start_role, 0, {start_role})
|
||||
|
||||
if role in other_neighbors:
|
||||
links.append({'source': other, 'target': role, 'type': dep_type})
|
||||
if other in path:
|
||||
continue
|
||||
traverse(other, depth + 1, path | {other})
|
||||
return {"nodes": list(nodes.values()), "links": links}
|
||||
|
||||
except FileNotFoundError:
|
||||
continue
|
||||
|
||||
traverse(start_role, depth=0, path={start_role})
|
||||
return {'nodes': list(nodes.values()), 'links': links}
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Build all graph variants for one role
|
||||
# ------------------------------------------------------------
|
||||
|
||||
def build_mappings(
|
||||
start_role: str,
|
||||
roles_dir: str,
|
||||
max_depth: int
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Build all 12 graph variants (6 dep types × 2 directions).
|
||||
Accelerated version:
|
||||
- One-time scan of all metadata
|
||||
- One-time scan of all include_role/import_role
|
||||
- One-time scan of include_tasks/import_tasks
|
||||
- Build reverse-index tables
|
||||
- Then generate all graphs purely from memory
|
||||
"""
|
||||
|
||||
result: Dict[str, Any] = {}
|
||||
for key in ALL_KEYS:
|
||||
dep_type, direction = key.rsplit('_', 1)
|
||||
|
||||
roles = [
|
||||
r for r in os.listdir(roles_dir)
|
||||
if os.path.isdir(os.path.join(roles_dir, r))
|
||||
]
|
||||
|
||||
# Pre-caches
|
||||
meta_cache: Dict[str, Dict[str, Any]] = {}
|
||||
deps_cache: Dict[str, Dict[str, List[str]]] = {dep: {} for dep in ALL_DEP_TYPES}
|
||||
rev_cache: Dict[str, Dict[str, Set[str]]] = {dep: {} for dep in ALL_DEP_TYPES}
|
||||
|
||||
resolver = RoleDependencyResolver(roles_dir)
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Step 1: Preload meta-based deps (run_after, dependencies)
|
||||
# --------------------------------------------------------
|
||||
for role in roles:
|
||||
try:
|
||||
result[key] = build_single_graph(start_role, dep_type, direction, roles_dir, max_depth)
|
||||
meta = load_meta(find_role_meta(roles_dir, role))
|
||||
except FileNotFoundError:
|
||||
continue
|
||||
|
||||
meta_cache[role] = meta
|
||||
|
||||
for dep_key in ["run_after", "dependencies"]:
|
||||
values = meta.get(dep_key, []) or []
|
||||
if isinstance(values, list) and values:
|
||||
deps_cache[dep_key][role] = values
|
||||
|
||||
for tgt in values:
|
||||
if isinstance(tgt, str) and tgt.strip():
|
||||
rev_cache[dep_key].setdefault(tgt.strip(), set()).add(role)
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Step 2: Preload include_role/import_role (resolver)
|
||||
# --------------------------------------------------------
|
||||
for role in roles:
|
||||
role_path = os.path.join(roles_dir, role)
|
||||
inc, imp = resolver._scan_tasks(role_path)
|
||||
|
||||
if inc:
|
||||
inc_list = sorted(inc)
|
||||
deps_cache["include_role"][role] = inc_list
|
||||
for tgt in inc_list:
|
||||
rev_cache["include_role"].setdefault(tgt, set()).add(role)
|
||||
|
||||
if imp:
|
||||
imp_list = sorted(imp)
|
||||
deps_cache["import_role"][role] = imp_list
|
||||
for tgt in imp_list:
|
||||
rev_cache["import_role"].setdefault(tgt, set()).add(role)
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Step 3: Preload include_tasks/import_tasks
|
||||
# --------------------------------------------------------
|
||||
for role in roles:
|
||||
try:
|
||||
tasks_path = find_role_tasks(roles_dir, role)
|
||||
except FileNotFoundError:
|
||||
continue
|
||||
|
||||
for dep_key in ["include_tasks", "import_tasks"]:
|
||||
values = load_tasks(tasks_path, dep_key)
|
||||
if values:
|
||||
deps_cache[dep_key][role] = values
|
||||
|
||||
for tgt in values:
|
||||
rev_cache[dep_key].setdefault(tgt, set()).add(role)
|
||||
|
||||
caches = {
|
||||
"meta": meta_cache,
|
||||
"deps": deps_cache,
|
||||
"rev": rev_cache,
|
||||
}
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Step 4: Build all graphs from caches
|
||||
# --------------------------------------------------------
|
||||
for key in ALL_KEYS:
|
||||
dep_type, direction = key.rsplit("_", 1)
|
||||
try:
|
||||
result[key] = build_single_graph(
|
||||
start_role=start_role,
|
||||
dep_type=dep_type,
|
||||
direction=direction,
|
||||
roles_dir=roles_dir,
|
||||
max_depth=max_depth,
|
||||
caches=caches,
|
||||
)
|
||||
except Exception:
|
||||
result[key] = {'nodes': [], 'links': []}
|
||||
result[key] = {"nodes": [], "links": []}
|
||||
|
||||
return result
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Output helper
|
||||
# ------------------------------------------------------------
|
||||
|
||||
def output_graph(graph_data: Any, fmt: str, start: str, key: str):
|
||||
base = f"{start}_{key}"
|
||||
if fmt == 'console':
|
||||
if fmt == "console":
|
||||
print(f"--- {base} ---")
|
||||
print(yaml.safe_dump(graph_data, sort_keys=False))
|
||||
elif fmt in ('yaml', 'json'):
|
||||
|
||||
else:
|
||||
path = f"{base}.{fmt}"
|
||||
with open(path, 'w') as f:
|
||||
if fmt == 'yaml':
|
||||
with open(path, "w") as f:
|
||||
if fmt == "yaml":
|
||||
yaml.safe_dump(graph_data, f, sort_keys=False)
|
||||
else:
|
||||
json.dump(graph_data, f, indent=2)
|
||||
print(f"Wrote {path}")
|
||||
else:
|
||||
raise ValueError(f"Unknown format: {fmt}")
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# CLI entrypoint
|
||||
# ------------------------------------------------------------
|
||||
|
||||
def main():
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
default_roles_dir = os.path.abspath(os.path.join(script_dir, '..', '..', 'roles'))
|
||||
default_roles_dir = os.path.abspath(os.path.join(script_dir, "..", "..", "roles"))
|
||||
|
||||
parser = argparse.ArgumentParser(description="Generate dependency graphs")
|
||||
parser.add_argument('-r', '--role', required=True, help="Starting role name")
|
||||
parser.add_argument('-D', '--depth', type=int, default=0, help="Max recursion depth")
|
||||
parser.add_argument('-o', '--output', choices=['yaml', 'json', 'console'], default='console')
|
||||
parser.add_argument('--roles-dir', default=default_roles_dir, help="Roles directory")
|
||||
parser.add_argument("-r", "--role", required=True, help="Starting role name")
|
||||
parser.add_argument("-D", "--depth", type=int, default=0, help="Max recursion depth")
|
||||
parser.add_argument("-o", "--output", choices=["yaml", "json", "console"], default="console")
|
||||
parser.add_argument("--roles-dir", default=default_roles_dir, help="Roles directory")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
graphs = build_mappings(args.role, args.roles_dir, args.depth)
|
||||
|
||||
for key in ALL_KEYS:
|
||||
graph_data = graphs.get(key, {'nodes': [], 'links': []})
|
||||
graph_data = graphs.get(key, {"nodes": [], "links": []})
|
||||
output_graph(graph_data, args.output, args.role, key)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -2,19 +2,76 @@
|
||||
import os
|
||||
import argparse
|
||||
import json
|
||||
from typing import Dict, Any
|
||||
from typing import Dict, Any, Optional, Iterable, Tuple
|
||||
from concurrent.futures import ProcessPoolExecutor, as_completed
|
||||
|
||||
from cli.build.graph import build_mappings, output_graph
|
||||
from module_utils.role_dependency_resolver import RoleDependencyResolver
|
||||
|
||||
|
||||
def find_roles(roles_dir: str):
|
||||
def find_roles(roles_dir: str) -> Iterable[Tuple[str, str]]:
|
||||
"""
|
||||
Yield (role_name, role_path) for all roles in the given roles_dir.
|
||||
"""
|
||||
for entry in os.listdir(roles_dir):
|
||||
path = os.path.join(roles_dir, entry)
|
||||
if os.path.isdir(path):
|
||||
yield entry, path
|
||||
|
||||
|
||||
def process_role(
|
||||
role_name: str,
|
||||
roles_dir: str,
|
||||
depth: int,
|
||||
shadow_folder: Optional[str],
|
||||
output: str,
|
||||
preview: bool,
|
||||
verbose: bool,
|
||||
no_include_role: bool, # currently unused, kept for CLI compatibility
|
||||
no_import_role: bool, # currently unused, kept for CLI compatibility
|
||||
no_dependencies: bool, # currently unused, kept for CLI compatibility
|
||||
no_run_after: bool, # currently unused, kept for CLI compatibility
|
||||
) -> None:
|
||||
"""
|
||||
Worker function: build graphs and (optionally) write meta/tree.json for a single role.
|
||||
|
||||
Note:
|
||||
This version no longer adds a custom top-level "dependencies" bucket.
|
||||
Only the graphs returned by build_mappings() are written.
|
||||
"""
|
||||
role_path = os.path.join(roles_dir, role_name)
|
||||
|
||||
if verbose:
|
||||
print(f"[worker] Processing role: {role_name}")
|
||||
|
||||
# Build the full graph structure (all dep types / directions) for this role
|
||||
graphs: Dict[str, Any] = build_mappings(
|
||||
start_role=role_name,
|
||||
roles_dir=roles_dir,
|
||||
max_depth=depth,
|
||||
)
|
||||
|
||||
# Preview mode: dump graphs to console instead of writing tree.json
|
||||
if preview:
|
||||
for key, data in graphs.items():
|
||||
if verbose:
|
||||
print(f"[worker] Previewing graph '{key}' for role '{role_name}'")
|
||||
# In preview mode we always output as console
|
||||
output_graph(data, "console", role_name, key)
|
||||
return
|
||||
|
||||
# Non-preview: write meta/tree.json for this role
|
||||
if shadow_folder:
|
||||
tree_file = os.path.join(shadow_folder, role_name, "meta", "tree.json")
|
||||
else:
|
||||
tree_file = os.path.join(role_path, "meta", "tree.json")
|
||||
|
||||
os.makedirs(os.path.dirname(tree_file), exist_ok=True)
|
||||
with open(tree_file, "w", encoding="utf-8") as f:
|
||||
json.dump(graphs, f, indent=2)
|
||||
|
||||
print(f"Wrote {tree_file}")
|
||||
|
||||
|
||||
def main():
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
default_roles_dir = os.path.abspath(os.path.join(script_dir, "..", "..", "roles"))
|
||||
@@ -22,24 +79,67 @@ def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate all graphs for each role and write meta/tree.json"
|
||||
)
|
||||
parser.add_argument("-d", "--role_dir", default=default_roles_dir,
|
||||
help=f"Path to roles directory (default: {default_roles_dir})")
|
||||
parser.add_argument("-D", "--depth", type=int, default=0,
|
||||
help="Max recursion depth (>0) or <=0 to stop on cycle")
|
||||
parser.add_argument("-o", "--output", choices=["yaml", "json", "console"],
|
||||
default="json", help="Output format")
|
||||
parser.add_argument("-p", "--preview", action="store_true",
|
||||
help="Preview graphs to console instead of writing files")
|
||||
parser.add_argument("-s", "--shadow-folder", type=str, default=None,
|
||||
help="If set, writes tree.json to this shadow folder instead of the role's actual meta/ folder")
|
||||
parser.add_argument("-v", "--verbose", action="store_true", help="Enable verbose logging")
|
||||
parser.add_argument(
|
||||
"-d",
|
||||
"--role_dir",
|
||||
default=default_roles_dir,
|
||||
help=f"Path to roles directory (default: {default_roles_dir})",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-D",
|
||||
"--depth",
|
||||
type=int,
|
||||
default=0,
|
||||
help="Max recursion depth (>0) or <=0 to stop on cycle",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-o",
|
||||
"--output",
|
||||
choices=["yaml", "json", "console"],
|
||||
default="json",
|
||||
help="Output format for preview mode",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-p",
|
||||
"--preview",
|
||||
action="store_true",
|
||||
help="Preview graphs to console instead of writing files",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-s",
|
||||
"--shadow-folder",
|
||||
type=str,
|
||||
default=None,
|
||||
help="If set, writes tree.json to this shadow folder instead of the role's actual meta/ folder",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-v",
|
||||
"--verbose",
|
||||
action="store_true",
|
||||
help="Enable verbose logging",
|
||||
)
|
||||
|
||||
# Toggles
|
||||
parser.add_argument("--no-include-role", action="store_true", help="Do not scan include_role")
|
||||
parser.add_argument("--no-import-role", action="store_true", help="Do not scan import_role")
|
||||
parser.add_argument("--no-dependencies", action="store_true", help="Do not read meta/main.yml dependencies")
|
||||
parser.add_argument("--no-run-after", action="store_true",
|
||||
help="Do not read galaxy_info.run_after from meta/main.yml")
|
||||
# Toggles (kept for CLI compatibility, currently only meaningful for future extensions)
|
||||
parser.add_argument(
|
||||
"--no-include-role",
|
||||
action="store_true",
|
||||
help="Reserved: do not include include_role in custom dependency bucket",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-import-role",
|
||||
action="store_true",
|
||||
help="Reserved: do not include import_role in custom dependency bucket",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-dependencies",
|
||||
action="store_true",
|
||||
help="Reserved: do not include meta dependencies in custom dependency bucket",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-run-after",
|
||||
action="store_true",
|
||||
help="Reserved: do not include run_after in custom dependency bucket",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -50,54 +150,53 @@ def main():
|
||||
print(f"Preview mode: {args.preview}")
|
||||
print(f"Shadow folder: {args.shadow_folder}")
|
||||
|
||||
resolver = RoleDependencyResolver(args.role_dir)
|
||||
roles = [role_name for role_name, _ in find_roles(args.role_dir)]
|
||||
|
||||
for role_name, role_path in find_roles(args.role_dir):
|
||||
if args.verbose:
|
||||
print(f"Processing role: {role_name}")
|
||||
# For preview, run sequentially to avoid completely interleaved output.
|
||||
if args.preview:
|
||||
for role_name in roles:
|
||||
process_role(
|
||||
role_name=role_name,
|
||||
roles_dir=args.role_dir,
|
||||
depth=args.depth,
|
||||
shadow_folder=args.shadow_folder,
|
||||
output=args.output,
|
||||
preview=True,
|
||||
verbose=args.verbose,
|
||||
no_include_role=args.no_include_role,
|
||||
no_import_role=args.no_import_role,
|
||||
no_dependencies=args.no_dependencies,
|
||||
no_run_after=args.no_run_after,
|
||||
)
|
||||
return
|
||||
|
||||
graphs: Dict[str, Any] = build_mappings(
|
||||
start_role=role_name,
|
||||
roles_dir=args.role_dir,
|
||||
max_depth=args.depth
|
||||
)
|
||||
# Non-preview: roles are processed in parallel
|
||||
with ProcessPoolExecutor() as executor:
|
||||
futures = {
|
||||
executor.submit(
|
||||
process_role,
|
||||
role_name,
|
||||
args.role_dir,
|
||||
args.depth,
|
||||
args.shadow_folder,
|
||||
args.output,
|
||||
False, # preview=False in parallel mode
|
||||
args.verbose,
|
||||
args.no_include_role,
|
||||
args.no_import_role,
|
||||
args.no_dependencies,
|
||||
args.no_run_after,
|
||||
): role_name
|
||||
for role_name in roles
|
||||
}
|
||||
|
||||
# Direct deps (depth=1) – getrennt erfasst für buckets
|
||||
inc_roles, imp_roles = resolver._scan_tasks(role_path)
|
||||
meta_deps = resolver._extract_meta_dependencies(role_path)
|
||||
run_after = set()
|
||||
if not args.no_run_after:
|
||||
run_after = resolver._extract_meta_run_after(role_path)
|
||||
|
||||
if any([not args.no_include_role and inc_roles,
|
||||
not args.no_import_role and imp_roles,
|
||||
not args.no_dependencies and meta_deps,
|
||||
not args.no_run_after and run_after]):
|
||||
deps_root = graphs.setdefault("dependencies", {})
|
||||
if not args.no_include_role and inc_roles:
|
||||
deps_root["include_role"] = sorted(inc_roles)
|
||||
if not args.no_import_role and imp_roles:
|
||||
deps_root["import_role"] = sorted(imp_roles)
|
||||
if not args.no_dependencies and meta_deps:
|
||||
deps_root["dependencies"] = sorted(meta_deps)
|
||||
if not args.no_run_after and run_after:
|
||||
deps_root["run_after"] = sorted(run_after)
|
||||
graphs["dependencies"] = deps_root
|
||||
|
||||
if args.preview:
|
||||
for key, data in graphs.items():
|
||||
if args.verbose:
|
||||
print(f"Previewing graph '{key}' for role '{role_name}'")
|
||||
output_graph(data, "console", role_name, key)
|
||||
else:
|
||||
if args.shadow_folder:
|
||||
tree_file = os.path.join(args.shadow_folder, role_name, "meta", "tree.json")
|
||||
else:
|
||||
tree_file = os.path.join(role_path, "meta", "tree.json")
|
||||
os.makedirs(os.path.dirname(tree_file), exist_ok=True)
|
||||
with open(tree_file, "w", encoding="utf-8") as f:
|
||||
json.dump(graphs, f, indent=2)
|
||||
print(f"Wrote {tree_file}")
|
||||
for future in as_completed(futures):
|
||||
role_name = futures[future]
|
||||
try:
|
||||
future.result()
|
||||
except Exception as exc:
|
||||
# Do not crash the whole run; report the failing role instead.
|
||||
print(f"[ERROR] Role '{role_name}' failed: {exc}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -9,6 +9,14 @@ Usage example:
|
||||
--inventory-file host_vars/echoserver.yml \
|
||||
--vault-password-file .pass/echoserver.txt \
|
||||
--set credentials.database_password=mysecret
|
||||
|
||||
With snippet mode (no file changes, just YAML output):
|
||||
|
||||
infinito create credentials \
|
||||
--role-path roles/web-app-akaunting \
|
||||
--inventory-file host_vars/echoserver.yml \
|
||||
--vault-password-file .pass/echoserver.txt \
|
||||
--snippet
|
||||
"""
|
||||
|
||||
import argparse
|
||||
@@ -92,7 +100,14 @@ def to_vault_block(vault_handler: VaultHandler, value: Union[str, Any], label: s
|
||||
Return a ruamel scalar tagged as !vault. If the input value is already
|
||||
vault-encrypted (string contains $ANSIBLE_VAULT or is a !vault scalar), reuse/wrap.
|
||||
Otherwise, encrypt plaintext via ansible-vault.
|
||||
|
||||
Special rule:
|
||||
- Empty strings ("") are NOT encrypted and are returned as plain "".
|
||||
"""
|
||||
# Empty strings should not be encrypted
|
||||
if isinstance(value, str) and value == "":
|
||||
return ""
|
||||
|
||||
# Already a ruamel !vault scalar → reuse
|
||||
if _is_ruamel_vault(value):
|
||||
return value
|
||||
@@ -105,7 +120,6 @@ def to_vault_block(vault_handler: VaultHandler, value: Union[str, Any], label: s
|
||||
snippet = vault_handler.encrypt_string(str(value), label)
|
||||
return _make_vault_scalar_from_text(snippet)
|
||||
|
||||
|
||||
def parse_overrides(pairs: list[str]) -> Dict[str, str]:
|
||||
"""
|
||||
Parse --set key=value pairs into a dict.
|
||||
@@ -139,6 +153,23 @@ def main() -> int:
|
||||
"-y", "--yes", action="store_true",
|
||||
help="Non-interactive: assume 'yes' for all overwrite confirmations when --force is used"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--snippet",
|
||||
action="store_true",
|
||||
help=(
|
||||
"Do not modify the inventory file. Instead, print a YAML snippet with "
|
||||
"the generated credentials to stdout. The snippet contains only the "
|
||||
"application's credentials (and ansible_become_password if provided)."
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--allow-empty-plain",
|
||||
action="store_true",
|
||||
help=(
|
||||
"Allow 'plain' credentials in the schema without an explicit --set override. "
|
||||
"Missing plain values will be set to an empty string before encryption."
|
||||
),
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
overrides = parse_overrides(args.set)
|
||||
@@ -148,36 +179,82 @@ def main() -> int:
|
||||
role_path=Path(args.role_path),
|
||||
inventory_path=Path(args.inventory_file),
|
||||
vault_pw=args.vault_password_file,
|
||||
overrides=overrides
|
||||
overrides=overrides,
|
||||
allow_empty_plain=args.allow_empty_plain,
|
||||
)
|
||||
|
||||
# 1) Load existing inventory with ruamel (round-trip)
|
||||
yaml_rt = YAML(typ="rt")
|
||||
yaml_rt.preserve_quotes = True
|
||||
|
||||
# Get schema-applied structure (defaults etc.) for *non-destructive* merge
|
||||
schema_inventory: Dict[str, Any] = manager.apply_schema()
|
||||
schema_apps = schema_inventory.get("applications", {})
|
||||
schema_app_block = schema_apps.get(manager.app_id, {})
|
||||
schema_creds = schema_app_block.get("credentials", {}) if isinstance(schema_app_block, dict) else {}
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# SNIPPET MODE: only build a YAML fragment and print to stdout, no file I/O
|
||||
# -------------------------------------------------------------------------
|
||||
if args.snippet:
|
||||
# Build a minimal structure:
|
||||
# applications:
|
||||
# <app_id>:
|
||||
# credentials:
|
||||
# key: !vault |
|
||||
# ...
|
||||
# ansible_become_password: !vault | ...
|
||||
snippet_data = CommentedMap()
|
||||
apps_snip = ensure_map(snippet_data, "applications")
|
||||
app_block_snip = ensure_map(apps_snip, manager.app_id)
|
||||
creds_snip = ensure_map(app_block_snip, "credentials")
|
||||
|
||||
for key, default_val in schema_creds.items():
|
||||
# Priority: --set exact key → default from schema → empty string
|
||||
ov = overrides.get(f"credentials.{key}", None)
|
||||
if ov is None:
|
||||
ov = overrides.get(key, None)
|
||||
|
||||
if ov is not None:
|
||||
value_for_key: Union[str, Any] = ov
|
||||
else:
|
||||
if _is_vault_encrypted(default_val):
|
||||
creds_snip[key] = to_vault_block(manager.vault_handler, default_val, key)
|
||||
continue
|
||||
value_for_key = "" if default_val is None else str(default_val)
|
||||
|
||||
creds_snip[key] = to_vault_block(manager.vault_handler, value_for_key, key)
|
||||
|
||||
# Optional ansible_become_password only if provided via overrides
|
||||
if "ansible_become_password" in overrides:
|
||||
snippet_data["ansible_become_password"] = to_vault_block(
|
||||
manager.vault_handler,
|
||||
overrides["ansible_become_password"],
|
||||
"ansible_become_password",
|
||||
)
|
||||
|
||||
yaml_rt.dump(snippet_data, sys.stdout)
|
||||
return 0
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# DEFAULT MODE: modify the inventory file on disk (previous behavior)
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
# 1) Load existing inventory with ruamel (round-trip)
|
||||
with open(args.inventory_file, "r", encoding="utf-8") as f:
|
||||
data = yaml_rt.load(f) # CommentedMap or None
|
||||
if data is None:
|
||||
data = CommentedMap()
|
||||
|
||||
# 2) Get schema-applied structure (defaults etc.) for *non-destructive* merge
|
||||
schema_inventory: Dict[str, Any] = manager.apply_schema()
|
||||
|
||||
# 3) Ensure structural path exists
|
||||
# 2) Ensure structural path exists
|
||||
apps = ensure_map(data, "applications")
|
||||
app_block = ensure_map(apps, manager.app_id)
|
||||
creds = ensure_map(app_block, "credentials")
|
||||
|
||||
# 4) Determine defaults we could add
|
||||
schema_apps = schema_inventory.get("applications", {})
|
||||
schema_app_block = schema_apps.get(manager.app_id, {})
|
||||
schema_creds = schema_app_block.get("credentials", {}) if isinstance(schema_app_block, dict) else {}
|
||||
|
||||
# 5) Add ONLY missing credential keys
|
||||
# 3) Add ONLY missing credential keys (respect existing values)
|
||||
newly_added_keys = set()
|
||||
for key, default_val in schema_creds.items():
|
||||
if key in creds:
|
||||
# existing → do not touch (preserve plaintext/vault/formatting/comments)
|
||||
# Existing → do not touch (preserve plaintext/vault/formatting/comments)
|
||||
continue
|
||||
|
||||
# Value to use for the new key
|
||||
@@ -200,7 +277,7 @@ def main() -> int:
|
||||
creds[key] = to_vault_block(manager.vault_handler, value_for_new_key, key)
|
||||
newly_added_keys.add(key)
|
||||
|
||||
# 6) ansible_become_password: only add if missing;
|
||||
# 4) ansible_become_password: only add if missing;
|
||||
# never rewrite an existing one unless --force (+ confirm/--yes) and override provided.
|
||||
if "ansible_become_password" not in data:
|
||||
val = overrides.get("ansible_become_password", None)
|
||||
@@ -216,7 +293,7 @@ def main() -> int:
|
||||
manager.vault_handler, overrides["ansible_become_password"], "ansible_become_password"
|
||||
)
|
||||
|
||||
# 7) Overrides for existing credential keys (only with --force)
|
||||
# 5) Overrides for existing credential keys (only with --force)
|
||||
if args.force:
|
||||
for ov_key, ov_val in overrides.items():
|
||||
# Accept both 'credentials.key' and bare 'key'
|
||||
@@ -228,7 +305,7 @@ def main() -> int:
|
||||
if args.yes or ask_for_confirmation(key):
|
||||
creds[key] = to_vault_block(manager.vault_handler, ov_val, key)
|
||||
|
||||
# 8) Write back with ruamel (preserve formatting & comments)
|
||||
# 6) Write back with ruamel (preserve formatting & comments)
|
||||
with open(args.inventory_file, "w", encoding="utf-8") as f:
|
||||
yaml_rt.dump(data, f)
|
||||
|
||||
|
||||
1173
cli/create/inventory.py
Normal file
1173
cli/create/inventory.py
Normal file
File diff suppressed because it is too large
Load Diff
316
cli/deploy.py
316
cli/deploy.py
@@ -1,316 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import subprocess
|
||||
import os
|
||||
import datetime
|
||||
import sys
|
||||
import re
|
||||
from typing import Optional, Dict, Any, List
|
||||
|
||||
|
||||
def run_ansible_playbook(
|
||||
inventory,
|
||||
modes,
|
||||
limit=None,
|
||||
allowed_applications=None,
|
||||
password_file=None,
|
||||
verbose=0,
|
||||
skip_build=False,
|
||||
skip_tests=False,
|
||||
logs=False
|
||||
):
|
||||
start_time = datetime.datetime.now()
|
||||
print(f"\n▶️ Script started at: {start_time.isoformat()}\n")
|
||||
|
||||
# Cleanup is now handled via MODE_CLEANUP
|
||||
if modes.get("MODE_CLEANUP", False):
|
||||
cleanup_command = ["make", "clean-keep-logs"] if logs else ["make", "clean"]
|
||||
print("\n🧹 Cleaning up project (" + " ".join(cleanup_command) + ")...\n")
|
||||
subprocess.run(cleanup_command, check=True)
|
||||
else:
|
||||
print("\n⚠️ Skipping cleanup as requested.\n")
|
||||
|
||||
if not skip_build:
|
||||
print("\n🛠️ Building project (make messy-build)...\n")
|
||||
subprocess.run(["make", "messy-build"], check=True)
|
||||
else:
|
||||
print("\n⚠️ Skipping build as requested.\n")
|
||||
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
playbook = os.path.join(os.path.dirname(script_dir), "playbook.yml")
|
||||
|
||||
# Inventory validation is controlled via MODE_ASSERT
|
||||
if modes.get("MODE_ASSERT", None) is False:
|
||||
print("\n⚠️ Skipping inventory validation as requested.\n")
|
||||
elif "MODE_ASSERT" not in modes or modes["MODE_ASSERT"] is True:
|
||||
print("\n🔍 Validating inventory before deployment...\n")
|
||||
try:
|
||||
subprocess.run(
|
||||
[
|
||||
sys.executable,
|
||||
os.path.join(script_dir, "validate", "inventory.py"),
|
||||
os.path.dirname(inventory),
|
||||
],
|
||||
check=True,
|
||||
)
|
||||
except subprocess.CalledProcessError:
|
||||
print("\n❌ Inventory validation failed. Deployment aborted.\n", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if not skip_tests:
|
||||
print("\n🧪 Running tests (make messy-test)...\n")
|
||||
subprocess.run(["make", "messy-test"], check=True)
|
||||
|
||||
# Build ansible-playbook command
|
||||
cmd = ["ansible-playbook", "-i", inventory, playbook]
|
||||
|
||||
if limit:
|
||||
cmd.extend(["--limit", limit])
|
||||
|
||||
if allowed_applications:
|
||||
joined = ",".join(allowed_applications)
|
||||
cmd.extend(["-e", f"allowed_applications={joined}"])
|
||||
|
||||
for key, value in modes.items():
|
||||
val = str(value).lower() if isinstance(value, bool) else str(value)
|
||||
cmd.extend(["-e", f"{key}={val}"])
|
||||
|
||||
if password_file:
|
||||
cmd.extend(["--vault-password-file", password_file])
|
||||
else:
|
||||
cmd.extend(["--ask-vault-pass"])
|
||||
|
||||
if verbose:
|
||||
cmd.append("-" + "v" * verbose)
|
||||
|
||||
print("\n🚀 Launching Ansible Playbook...\n")
|
||||
subprocess.run(cmd, check=True)
|
||||
|
||||
end_time = datetime.datetime.now()
|
||||
print(f"\n✅ Script ended at: {end_time.isoformat()}\n")
|
||||
|
||||
duration = end_time - start_time
|
||||
print(f"⏱️ Total execution time: {duration}\n")
|
||||
|
||||
|
||||
def validate_application_ids(inventory, app_ids):
|
||||
"""
|
||||
Abort the script if any application IDs are invalid, with detailed reasons.
|
||||
"""
|
||||
from module_utils.valid_deploy_id import ValidDeployId
|
||||
|
||||
validator = ValidDeployId()
|
||||
invalid = validator.validate(inventory, app_ids)
|
||||
if invalid:
|
||||
print("\n❌ Detected invalid application_id(s):\n")
|
||||
for app_id, status in invalid.items():
|
||||
reasons = []
|
||||
if not status["in_roles"]:
|
||||
reasons.append("not defined in roles (infinito)")
|
||||
if not status["in_inventory"]:
|
||||
reasons.append("not found in inventory file")
|
||||
print(f" - {app_id}: " + ", ".join(reasons))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
MODE_LINE_RE = re.compile(
|
||||
r"""^\s*(?P<key>[A-Z0-9_]+)\s*:\s*(?P<value>.+?)\s*(?:#\s*(?P<cmt>.*))?\s*$"""
|
||||
)
|
||||
|
||||
|
||||
def _parse_bool_literal(text: str) -> Optional[bool]:
|
||||
t = text.strip().lower()
|
||||
if t in ("true", "yes", "on"):
|
||||
return True
|
||||
if t in ("false", "no", "off"):
|
||||
return False
|
||||
return None
|
||||
|
||||
|
||||
def load_modes_from_yaml(modes_yaml_path: str) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Parse group_vars/all/01_modes.yml line-by-line to recover:
|
||||
- name (e.g., MODE_TEST)
|
||||
- default (True/False/None if templated/unknown)
|
||||
- help (from trailing # comment, if present)
|
||||
"""
|
||||
modes = []
|
||||
if not os.path.exists(modes_yaml_path):
|
||||
raise FileNotFoundError(f"Modes file not found: {modes_yaml_path}")
|
||||
|
||||
with open(modes_yaml_path, "r", encoding="utf-8") as fh:
|
||||
for line in fh:
|
||||
line = line.rstrip()
|
||||
if not line or line.lstrip().startswith("#"):
|
||||
continue
|
||||
m = MODE_LINE_RE.match(line)
|
||||
if not m:
|
||||
continue
|
||||
key = m.group("key")
|
||||
val = m.group("value").strip()
|
||||
cmt = (m.group("cmt") or "").strip()
|
||||
|
||||
if not key.startswith("MODE_"):
|
||||
continue
|
||||
|
||||
default_bool = _parse_bool_literal(val)
|
||||
modes.append(
|
||||
{
|
||||
"name": key,
|
||||
"default": default_bool,
|
||||
"help": cmt or f"Toggle {key}",
|
||||
}
|
||||
)
|
||||
return modes
|
||||
|
||||
|
||||
def add_dynamic_mode_args(
|
||||
parser: argparse.ArgumentParser, modes_meta: List[Dict[str, Any]]
|
||||
) -> Dict[str, Dict[str, Any]]:
|
||||
"""
|
||||
Add argparse options based on modes metadata.
|
||||
Returns a dict mapping mode name -> { 'dest': <argparse_dest>, 'default': <bool/None>, 'kind': 'bool_true'|'bool_false'|'explicit' }.
|
||||
"""
|
||||
spec: Dict[str, Dict[str, Any]] = {}
|
||||
for m in modes_meta:
|
||||
name = m["name"]
|
||||
default = m["default"]
|
||||
desc = m["help"]
|
||||
short = name.replace("MODE_", "").lower()
|
||||
|
||||
if default is True:
|
||||
opt = f"--skip-{short}"
|
||||
dest = f"skip_{short}"
|
||||
help_txt = desc or f"Skip/disable {short} (default: enabled)"
|
||||
parser.add_argument(opt, action="store_true", help=help_txt, dest=dest)
|
||||
spec[name] = {"dest": dest, "default": True, "kind": "bool_true"}
|
||||
elif default is False:
|
||||
opt = f"--{short}"
|
||||
dest = short
|
||||
help_txt = desc or f"Enable {short} (default: disabled)"
|
||||
parser.add_argument(opt, action="store_true", help=help_txt, dest=dest)
|
||||
spec[name] = {"dest": dest, "default": False, "kind": "bool_false"}
|
||||
else:
|
||||
opt = f"--{short}"
|
||||
dest = short
|
||||
help_txt = desc or f"Set {short} explicitly (true/false). If omitted, keep inventory default."
|
||||
parser.add_argument(opt, choices=["true", "false"], help=help_txt, dest=dest)
|
||||
spec[name] = {"dest": dest, "default": None, "kind": "explicit"}
|
||||
|
||||
return spec
|
||||
|
||||
|
||||
def build_modes_from_args(
|
||||
spec: Dict[str, Dict[str, Any]], args_namespace: argparse.Namespace
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Using the argparse results and the spec, compute the `modes` dict to pass to Ansible.
|
||||
"""
|
||||
modes: Dict[str, Any] = {}
|
||||
for mode_name, info in spec.items():
|
||||
dest = info["dest"]
|
||||
kind = info["kind"]
|
||||
val = getattr(args_namespace, dest, None)
|
||||
|
||||
if kind == "bool_true":
|
||||
modes[mode_name] = False if val else True
|
||||
elif kind == "bool_false":
|
||||
modes[mode_name] = True if val else False
|
||||
else:
|
||||
if val is not None:
|
||||
modes[mode_name] = True if val == "true" else False
|
||||
return modes
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Run the central Ansible deployment script to manage infrastructure, updates, and tests."
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"inventory",
|
||||
help="Path to the inventory file (INI or YAML) containing hosts and variables.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-l",
|
||||
"--limit",
|
||||
help="Restrict execution to a specific host or host group from the inventory.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-T",
|
||||
"--host-type",
|
||||
choices=["server", "desktop"],
|
||||
default="server",
|
||||
help="Specify whether the target is a server or a personal computer. Affects role selection and variables.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-p",
|
||||
"--password-file",
|
||||
help="Path to the file containing the Vault password. If not provided, prompts for the password interactively.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-B",
|
||||
"--skip-build",
|
||||
action="store_true",
|
||||
help="Skip running 'make build' before deployment.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-t",
|
||||
"--skip-tests",
|
||||
action="store_true",
|
||||
help="Skip running 'make messy-tests' before deployment.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-i",
|
||||
"--id",
|
||||
nargs="+",
|
||||
default=[],
|
||||
dest="id",
|
||||
help="List of application_id's for partial deploy. If not set, all application IDs defined in the inventory will be executed.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-v",
|
||||
"--verbose",
|
||||
action="count",
|
||||
default=0,
|
||||
help="Increase verbosity level. Multiple -v flags increase detail (e.g., -vvv for maximum log output).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--logs",
|
||||
action="store_true",
|
||||
help="Keep the CLI logs during cleanup command",
|
||||
)
|
||||
|
||||
# ---- Dynamically add mode flags from group_vars/all/01_modes.yml ----
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
repo_root = os.path.dirname(script_dir)
|
||||
modes_yaml_path = os.path.join(repo_root, "group_vars", "all", "01_modes.yml")
|
||||
modes_meta = load_modes_from_yaml(modes_yaml_path)
|
||||
modes_spec = add_dynamic_mode_args(parser, modes_meta)
|
||||
|
||||
args = parser.parse_args()
|
||||
validate_application_ids(args.inventory, args.id)
|
||||
|
||||
# Build modes from dynamic args
|
||||
modes = build_modes_from_args(modes_spec, args)
|
||||
|
||||
# Additional non-dynamic flags
|
||||
modes["MODE_LOGS"] = args.logs
|
||||
modes["host_type"] = args.host_type
|
||||
|
||||
run_ansible_playbook(
|
||||
inventory=args.inventory,
|
||||
modes=modes,
|
||||
limit=args.limit,
|
||||
allowed_applications=args.id,
|
||||
password_file=args.password_file,
|
||||
verbose=args.verbose,
|
||||
skip_build=args.skip_build,
|
||||
skip_tests=args.skip_tests,
|
||||
logs=args.logs,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
0
cli/deploy/__init__.py
Normal file
0
cli/deploy/__init__.py
Normal file
373
cli/deploy/container.py
Normal file
373
cli/deploy/container.py
Normal file
@@ -0,0 +1,373 @@
|
||||
# cli/deploy/container.py
|
||||
import argparse
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
from typing import List, Tuple
|
||||
|
||||
|
||||
WORKDIR_DEFAULT = "/opt/infinito-src"
|
||||
|
||||
|
||||
def ensure_image(image: str, rebuild: bool = False, no_cache: bool = False) -> None:
|
||||
"""
|
||||
Handle Docker image creation rules:
|
||||
- rebuild=True => always rebuild
|
||||
- rebuild=False & image missing => build once
|
||||
- no_cache=True => add '--no-cache' to docker build
|
||||
"""
|
||||
build_args = ["docker", "build", "--network=host", "--pull"]
|
||||
if no_cache:
|
||||
build_args.append("--no-cache")
|
||||
build_args += ["-t", image, "."]
|
||||
|
||||
if rebuild:
|
||||
print(f">>> Forcing rebuild of Docker image '{image}'...")
|
||||
subprocess.run(build_args, check=True)
|
||||
print(f">>> Docker image '{image}' rebuilt (forced).")
|
||||
return
|
||||
|
||||
print(f">>> Checking if Docker image '{image}' exists...")
|
||||
result = subprocess.run(
|
||||
["docker", "image", "inspect", image],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
if result.returncode == 0:
|
||||
print(f">>> Docker image '{image}' already exists.")
|
||||
return
|
||||
|
||||
print(f">>> Docker image '{image}' not found. Building it...")
|
||||
subprocess.run(build_args, check=True)
|
||||
print(f">>> Docker image '{image}' successfully built.")
|
||||
|
||||
|
||||
def docker_exec(
|
||||
container: str,
|
||||
args: List[str],
|
||||
workdir: str | None = None,
|
||||
check: bool = True,
|
||||
) -> subprocess.CompletedProcess:
|
||||
"""
|
||||
Helper to run `docker exec` with optional working directory.
|
||||
"""
|
||||
cmd = ["docker", "exec"]
|
||||
if workdir:
|
||||
cmd += ["-w", workdir]
|
||||
cmd.append(container)
|
||||
cmd += args
|
||||
|
||||
return subprocess.run(cmd, check=check)
|
||||
|
||||
|
||||
def wait_for_inner_docker(container: str, timeout: int = 60) -> None:
|
||||
"""
|
||||
Poll `docker exec <container> docker info` until inner dockerd is ready.
|
||||
"""
|
||||
print(">>> Waiting for inner Docker daemon inside CI container...")
|
||||
for _ in range(timeout):
|
||||
result = subprocess.run(
|
||||
["docker", "exec", container, "docker", "info"],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
if result.returncode == 0:
|
||||
print(">>> Inner Docker daemon is UP.")
|
||||
return
|
||||
time.sleep(1)
|
||||
|
||||
raise RuntimeError("Inner Docker daemon did not become ready in time")
|
||||
|
||||
|
||||
def start_ci_container(
|
||||
image: str,
|
||||
build: bool,
|
||||
rebuild: bool,
|
||||
no_cache: bool,
|
||||
name: str | None = None,
|
||||
) -> str:
|
||||
"""
|
||||
Start a CI container running dockerd inside.
|
||||
|
||||
Returns the container name.
|
||||
"""
|
||||
if build or rebuild:
|
||||
ensure_image(image, rebuild=rebuild, no_cache=no_cache)
|
||||
|
||||
if not name:
|
||||
name = f"infinito-ci-{uuid.uuid4().hex[:8]}"
|
||||
|
||||
print(f">>> Starting CI container '{name}' with inner dockerd...")
|
||||
subprocess.run(
|
||||
[
|
||||
"docker",
|
||||
"run",
|
||||
"-d",
|
||||
"--name",
|
||||
name,
|
||||
"--network=host",
|
||||
"--privileged",
|
||||
"--cgroupns=host",
|
||||
image,
|
||||
"dockerd",
|
||||
"--debug",
|
||||
"--host=unix:///var/run/docker.sock",
|
||||
"--storage-driver=vfs",
|
||||
],
|
||||
check=True,
|
||||
)
|
||||
|
||||
wait_for_inner_docker(name)
|
||||
print(f">>> CI container '{name}' started and inner dockerd is ready.")
|
||||
return name
|
||||
|
||||
|
||||
def run_in_container(
|
||||
image: str,
|
||||
build: bool,
|
||||
rebuild: bool,
|
||||
no_cache: bool,
|
||||
inventory_args: List[str],
|
||||
deploy_args: List[str],
|
||||
name: str | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Full CI "run" mode:
|
||||
- start CI container with dockerd
|
||||
- run cli.create.inventory (with forwarded inventory_args)
|
||||
- ensure CI vault password file
|
||||
- run cli.deploy.dedicated (with forwarded deploy_args)
|
||||
- always remove container at the end
|
||||
"""
|
||||
container_name = None
|
||||
try:
|
||||
container_name = start_ci_container(
|
||||
image=image,
|
||||
build=build,
|
||||
rebuild=rebuild,
|
||||
no_cache=no_cache,
|
||||
name=name,
|
||||
)
|
||||
|
||||
# 1) Create CI inventory
|
||||
print(">>> Creating CI inventory inside container (cli.create.inventory)...")
|
||||
inventory_cmd: List[str] = [
|
||||
"python3",
|
||||
"-m",
|
||||
"cli.create.inventory",
|
||||
"inventories/github-ci",
|
||||
"--host",
|
||||
"localhost",
|
||||
"--ssl-disabled",
|
||||
]
|
||||
inventory_cmd.extend(inventory_args)
|
||||
|
||||
docker_exec(
|
||||
container_name,
|
||||
inventory_cmd,
|
||||
workdir=WORKDIR_DEFAULT,
|
||||
check=True,
|
||||
)
|
||||
|
||||
# 2) Ensure vault password file exists
|
||||
print(">>> Ensuring CI vault password file exists...")
|
||||
docker_exec(
|
||||
container_name,
|
||||
[
|
||||
"sh",
|
||||
"-c",
|
||||
"mkdir -p inventories/github-ci && "
|
||||
"[ -f inventories/github-ci/.password ] || "
|
||||
"printf '%s\n' 'ci-vault-password' > inventories/github-ci/.password",
|
||||
],
|
||||
workdir=WORKDIR_DEFAULT,
|
||||
check=True,
|
||||
)
|
||||
|
||||
# 3) Run dedicated deploy
|
||||
print(">>> Running cli.deploy.dedicated inside CI container...")
|
||||
cmd = [
|
||||
"python3",
|
||||
"-m",
|
||||
"cli.deploy.dedicated",
|
||||
"inventories/github-ci/servers.yml",
|
||||
"-p",
|
||||
"inventories/github-ci/.password",
|
||||
*deploy_args,
|
||||
]
|
||||
result = docker_exec(container_name, cmd, workdir=WORKDIR_DEFAULT, check=False)
|
||||
|
||||
if result.returncode != 0:
|
||||
raise subprocess.CalledProcessError(result.returncode, cmd)
|
||||
|
||||
print(">>> Deployment finished successfully inside CI container.")
|
||||
|
||||
finally:
|
||||
if container_name:
|
||||
print(f">>> Cleaning up CI container '{container_name}'...")
|
||||
subprocess.run(
|
||||
["docker", "rm", "-f", container_name],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
|
||||
|
||||
def stop_container(name: str) -> None:
|
||||
print(f">>> Stopping container '{name}'...")
|
||||
subprocess.run(["docker", "stop", name], check=True)
|
||||
print(f">>> Container '{name}' stopped.")
|
||||
|
||||
|
||||
def remove_container(name: str) -> None:
|
||||
print(f">>> Removing container '{name}'...")
|
||||
subprocess.run(["docker", "rm", "-f", name], check=True)
|
||||
print(f">>> Container '{name}' removed.")
|
||||
|
||||
|
||||
def exec_in_container(name: str, cmd_args: List[str], workdir: str | None = WORKDIR_DEFAULT) -> int:
|
||||
if not cmd_args:
|
||||
print("Error: exec mode requires a command to run inside the container.", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
print(f">>> Executing command in container '{name}': {' '.join(cmd_args)}")
|
||||
result = docker_exec(name, cmd_args, workdir=workdir, check=False)
|
||||
return result.returncode
|
||||
|
||||
|
||||
def split_inventory_and_deploy_args(rest: List[str]) -> Tuple[List[str], List[str]]:
|
||||
"""
|
||||
Split remaining arguments into:
|
||||
- inventory_args: passed to cli.create.inventory
|
||||
- deploy_args: passed to cli.deploy.dedicated
|
||||
|
||||
Convention:
|
||||
- [inventory-args ...] -- [deploy-args ...]
|
||||
- If no '--' is present: inventory_args = [], deploy_args = all rest.
|
||||
"""
|
||||
if not rest:
|
||||
return [], []
|
||||
|
||||
if "--" in rest:
|
||||
idx = rest.index("--")
|
||||
inventory_args = rest[:idx]
|
||||
deploy_args = rest[idx + 1 :]
|
||||
else:
|
||||
inventory_args = []
|
||||
deploy_args = rest
|
||||
|
||||
return inventory_args, deploy_args
|
||||
|
||||
|
||||
def main() -> int:
|
||||
# Capture raw arguments without program name
|
||||
raw_argv = sys.argv[1:]
|
||||
|
||||
# Split container-args vs forwarded args using first "--"
|
||||
if "--" in raw_argv:
|
||||
sep_index = raw_argv.index("--")
|
||||
container_argv = raw_argv[:sep_index]
|
||||
rest = raw_argv[sep_index + 1:]
|
||||
else:
|
||||
container_argv = raw_argv
|
||||
rest = []
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
prog="infinito-deploy-container",
|
||||
description=(
|
||||
"Run Ansible deploy inside an infinito Docker image with an inner "
|
||||
"Docker daemon (dockerd + vfs) and auto-generated CI inventory.\n\n"
|
||||
"Usage (run mode):\n"
|
||||
" python -m cli.deploy.container run [container-opts] -- \\\n"
|
||||
" [inventory-args ...] -- [deploy-args ...]\n\n"
|
||||
"Example:\n"
|
||||
" python -m cli.deploy.container run --build -- \\\n"
|
||||
" --include svc-db-mariadb -- \\\n"
|
||||
" -T server --debug\n"
|
||||
)
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"mode",
|
||||
choices=["run", "start", "stop", "exec", "remove"],
|
||||
help="Container mode: run, start, stop, exec, remove."
|
||||
)
|
||||
|
||||
parser.add_argument("--image", default=os.environ.get("INFINITO_IMAGE", "infinito:latest"))
|
||||
parser.add_argument("--build", action="store_true")
|
||||
parser.add_argument("--rebuild", action="store_true")
|
||||
parser.add_argument("--no-cache", action="store_true")
|
||||
parser.add_argument("--name")
|
||||
|
||||
# Parse only container-level arguments
|
||||
args = parser.parse_args(container_argv)
|
||||
|
||||
mode = args.mode
|
||||
|
||||
# --- RUN MODE ---
|
||||
if mode == "run":
|
||||
inventory_args, deploy_args = split_inventory_and_deploy_args(rest)
|
||||
|
||||
if not deploy_args:
|
||||
print(
|
||||
"Error: missing deploy arguments in run mode.\n"
|
||||
"Use: container run [opts] -- [inventory args] -- [deploy args]",
|
||||
file=sys.stderr
|
||||
)
|
||||
return 1
|
||||
|
||||
try:
|
||||
run_in_container(
|
||||
image=args.image,
|
||||
build=args.build,
|
||||
rebuild=args.rebuild,
|
||||
no_cache=args.no_cache,
|
||||
inventory_args=inventory_args,
|
||||
deploy_args=deploy_args,
|
||||
name=args.name,
|
||||
)
|
||||
except subprocess.CalledProcessError as exc:
|
||||
print(f"[ERROR] Deploy failed with exit code {exc.returncode}", file=sys.stderr)
|
||||
return exc.returncode
|
||||
|
||||
return 0
|
||||
|
||||
# --- START MODE ---
|
||||
if mode == "start":
|
||||
try:
|
||||
name = start_ci_container(
|
||||
image=args.image,
|
||||
build=args.build,
|
||||
rebuild=args.rebuild,
|
||||
no_cache=args.no_cache,
|
||||
name=args.name,
|
||||
)
|
||||
except Exception as exc:
|
||||
print(f"[ERROR] {exc}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
print(f">>> Started CI container: {name}")
|
||||
return 0
|
||||
|
||||
# For stop/remove/exec, a container name is mandatory
|
||||
if not args.name:
|
||||
print(f"Error: '{mode}' requires --name", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
if mode == "stop":
|
||||
stop_container(args.name)
|
||||
return 0
|
||||
|
||||
if mode == "remove":
|
||||
remove_container(args.name)
|
||||
return 0
|
||||
|
||||
if mode == "exec":
|
||||
return exec_in_container(args.name, rest)
|
||||
|
||||
print(f"Unknown mode: {mode}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
367
cli/deploy/dedicated.py
Normal file
367
cli/deploy/dedicated.py
Normal file
@@ -0,0 +1,367 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Infinito.Nexus Deploy CLI
|
||||
|
||||
This script is the main entrypoint for running the Ansible playbook with
|
||||
dynamic MODE_* flags, automatic inventory validation, and optional build/test
|
||||
phases. It supports partial deployments, dynamic MODE flag generation,
|
||||
inventory validation, and structured execution flow.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import subprocess
|
||||
import os
|
||||
import datetime
|
||||
import sys
|
||||
import re
|
||||
from typing import Optional, Dict, Any, List
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------------------
|
||||
# Path resolution
|
||||
# --------------------------------------------------------------------------------------
|
||||
|
||||
# Current file: .../cli/deploy/deploy.py
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) # → cli/deploy
|
||||
CLI_ROOT = os.path.dirname(SCRIPT_DIR) # → cli
|
||||
REPO_ROOT = os.path.dirname(CLI_ROOT) # → project root
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------------------
|
||||
# Main execution logic
|
||||
# --------------------------------------------------------------------------------------
|
||||
|
||||
def run_ansible_playbook(
|
||||
inventory: str,
|
||||
modes: Dict[str, Any],
|
||||
limit: Optional[str] = None,
|
||||
allowed_applications: Optional[List[str]] = None,
|
||||
password_file: Optional[str] = None,
|
||||
verbose: int = 0,
|
||||
skip_build: bool = False,
|
||||
skip_tests: bool = False,
|
||||
logs: bool = False,
|
||||
diff: bool = False,
|
||||
) -> None:
|
||||
"""Run ansible-playbook with the given parameters and execution modes."""
|
||||
start_time = datetime.datetime.now()
|
||||
print(f"\n▶️ Script started at: {start_time.isoformat()}\n")
|
||||
|
||||
# ---------------------------------------------------------
|
||||
# 1) Cleanup Phase
|
||||
# ---------------------------------------------------------
|
||||
if modes.get("MODE_CLEANUP", False):
|
||||
cleanup_cmd = ["make", "clean-keep-logs"] if logs else ["make", "clean"]
|
||||
print(f"\n🧹 Running cleanup ({' '.join(cleanup_cmd)})...\n")
|
||||
subprocess.run(cleanup_cmd, check=True)
|
||||
else:
|
||||
print("\n🧹 Cleanup skipped (MODE_CLEANUP not set or False)\n")
|
||||
|
||||
# ---------------------------------------------------------
|
||||
# 2) Build Phase
|
||||
# ---------------------------------------------------------
|
||||
if not skip_build:
|
||||
print("\n🛠️ Running project build (make messy-build)...\n")
|
||||
subprocess.run(["make", "messy-build"], check=True)
|
||||
else:
|
||||
print("\n🛠️ Build skipped (--skip-build)\n")
|
||||
|
||||
# The Ansible playbook is located in the repo root
|
||||
playbook_path = os.path.join(REPO_ROOT, "playbook.yml")
|
||||
|
||||
# ---------------------------------------------------------
|
||||
# 3) Inventory Validation Phase
|
||||
# ---------------------------------------------------------
|
||||
if modes.get("MODE_ASSERT", None) is False:
|
||||
print("\n🔍 Inventory assertion explicitly disabled (MODE_ASSERT=false)\n")
|
||||
else:
|
||||
print("\n🔍 Validating inventory before deployment...\n")
|
||||
validator_path = os.path.join(CLI_ROOT, "validate", "inventory.py")
|
||||
try:
|
||||
subprocess.run(
|
||||
[sys.executable, validator_path, os.path.dirname(inventory)],
|
||||
check=True,
|
||||
)
|
||||
except subprocess.CalledProcessError:
|
||||
print(
|
||||
"\n[ERROR] Inventory validation failed. Aborting deployment.\n",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# ---------------------------------------------------------
|
||||
# 4) Test Phase
|
||||
# ---------------------------------------------------------
|
||||
if not skip_tests:
|
||||
print("\n🧪 Running tests (make messy-test)...\n")
|
||||
subprocess.run(["make", "messy-test"], check=True)
|
||||
else:
|
||||
print("\n🧪 Tests skipped (--skip-tests)\n")
|
||||
|
||||
# ---------------------------------------------------------
|
||||
# 5) Build ansible-playbook command
|
||||
# ---------------------------------------------------------
|
||||
cmd: List[str] = ["ansible-playbook", "-i", inventory, playbook_path]
|
||||
|
||||
# Limit hosts
|
||||
if limit:
|
||||
cmd.extend(["-l", limit])
|
||||
|
||||
# Allowed applications (partial deployment)
|
||||
if allowed_applications:
|
||||
joined = ",".join(allowed_applications)
|
||||
cmd.extend(["-e", f"allowed_applications={joined}"])
|
||||
|
||||
# MODE_* flags
|
||||
for key, value in modes.items():
|
||||
val = str(value).lower() if isinstance(value, bool) else str(value)
|
||||
cmd.extend(["-e", f"{key}={val}"])
|
||||
|
||||
# Vault password file
|
||||
if password_file:
|
||||
cmd.extend(["--vault-password-file", password_file])
|
||||
|
||||
# Enable diff mode
|
||||
if diff:
|
||||
cmd.append("--diff")
|
||||
|
||||
# MODE_DEBUG → enforce high verbosity
|
||||
if modes.get("MODE_DEBUG", False):
|
||||
verbose = max(verbose, 3)
|
||||
|
||||
# Verbosity flags
|
||||
if verbose:
|
||||
cmd.append("-" + "v" * verbose)
|
||||
|
||||
print("\n🚀 Launching Ansible Playbook...\n")
|
||||
result = subprocess.run(cmd)
|
||||
|
||||
if result.returncode != 0:
|
||||
print(
|
||||
f"\n[ERROR] ansible-playbook exited with status {result.returncode}\n",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(result.returncode)
|
||||
|
||||
end_time = datetime.datetime.now()
|
||||
|
||||
print(f"\n✅ Script ended at: {end_time.isoformat()}\n")
|
||||
print(f"⏱️ Total execution time: {end_time - start_time}\n")
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------------------
|
||||
# Application ID validation
|
||||
# --------------------------------------------------------------------------------------
|
||||
|
||||
def validate_application_ids(inventory: str, app_ids: List[str]) -> None:
|
||||
"""Validate requested application IDs using ValidDeployId."""
|
||||
if not app_ids:
|
||||
return
|
||||
|
||||
from module_utils.valid_deploy_id import ValidDeployId
|
||||
|
||||
validator = ValidDeployId()
|
||||
invalid = validator.validate(inventory, app_ids)
|
||||
|
||||
if invalid:
|
||||
print("\n[ERROR] Some application_ids are invalid for this inventory:\n")
|
||||
for app_id, status in invalid.items():
|
||||
reasons = []
|
||||
if not status.get("allowed", True):
|
||||
reasons.append("not allowed by configuration")
|
||||
if not status.get("in_inventory", True):
|
||||
reasons.append("not present in inventory")
|
||||
print(f" - {app_id}: {', '.join(reasons)}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------------------
|
||||
# MODE_* parsing logic
|
||||
# --------------------------------------------------------------------------------------
|
||||
|
||||
MODE_LINE_RE = re.compile(
|
||||
r"""^\s*(?P<key>[A-Z0-9_]+)\s*:\s*(?P<value>.+?)\s*(?:#\s*(?P<cmt>.*))?\s*$"""
|
||||
)
|
||||
|
||||
|
||||
def _parse_bool_literal(text: str) -> Optional[bool]:
|
||||
"""Convert simple true/false/yes/no/on/off into boolean."""
|
||||
t = text.strip().lower()
|
||||
if t in ("true", "yes", "on"):
|
||||
return True
|
||||
if t in ("false", "no", "off"):
|
||||
return False
|
||||
return None
|
||||
|
||||
|
||||
def load_modes_from_yaml(modes_yaml_path: str) -> List[Dict[str, Any]]:
|
||||
"""Load MODE_* definitions from YAML-like key/value file."""
|
||||
modes: List[Dict[str, Any]] = []
|
||||
|
||||
if not os.path.exists(modes_yaml_path):
|
||||
raise FileNotFoundError(f"Modes file not found: {modes_yaml_path}")
|
||||
|
||||
with open(modes_yaml_path, "r", encoding="utf-8") as fh:
|
||||
for line in fh:
|
||||
line = line.rstrip()
|
||||
if not line or line.lstrip().startswith("#"):
|
||||
continue
|
||||
|
||||
m = MODE_LINE_RE.match(line)
|
||||
if not m:
|
||||
continue
|
||||
|
||||
key = m.group("key")
|
||||
val = m.group("value").strip()
|
||||
cmt = (m.group("cmt") or "").strip()
|
||||
|
||||
if not key.startswith("MODE_"):
|
||||
continue
|
||||
|
||||
default_bool = _parse_bool_literal(val)
|
||||
|
||||
modes.append(
|
||||
{
|
||||
"name": key,
|
||||
"default": default_bool,
|
||||
"help": cmt or f"Toggle {key}",
|
||||
}
|
||||
)
|
||||
|
||||
return modes
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------------------
|
||||
# Dynamic argparse mode injection
|
||||
# --------------------------------------------------------------------------------------
|
||||
|
||||
def add_dynamic_mode_args(
|
||||
parser: argparse.ArgumentParser, modes_meta: List[Dict[str, Any]]
|
||||
) -> Dict[str, Dict[str, Any]]:
|
||||
"""
|
||||
Add command-line arguments dynamically based on MODE_* metadata.
|
||||
"""
|
||||
|
||||
spec: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
for m in modes_meta:
|
||||
name = m["name"]
|
||||
default = m["default"]
|
||||
desc = m["help"]
|
||||
short = name.replace("MODE_", "").lower()
|
||||
|
||||
if default is True:
|
||||
# MODE_FOO: true → --skip-foo disables it
|
||||
opt = f"--skip-{short}"
|
||||
dest = f"skip_{short}"
|
||||
parser.add_argument(opt, action="store_true", dest=dest, help=desc)
|
||||
spec[name] = {"dest": dest, "default": True, "kind": "bool_true"}
|
||||
|
||||
elif default is False:
|
||||
# MODE_BAR: false → --bar enables it
|
||||
opt = f"--{short}"
|
||||
dest = short
|
||||
parser.add_argument(opt, action="store_true", dest=dest, help=desc)
|
||||
spec[name] = {"dest": dest, "default": False, "kind": "bool_false"}
|
||||
|
||||
else:
|
||||
# Explicit: MODE_XYZ: null → --xyz true|false
|
||||
opt = f"--{short}"
|
||||
dest = short
|
||||
parser.add_argument(opt, choices=["true", "false"], dest=dest, help=desc)
|
||||
spec[name] = {"dest": dest, "default": None, "kind": "explicit"}
|
||||
|
||||
return spec
|
||||
|
||||
|
||||
def build_modes_from_args(
|
||||
spec: Dict[str, Dict[str, Any]], args_namespace: argparse.Namespace
|
||||
) -> Dict[str, Any]:
|
||||
"""Resolve CLI arguments into a MODE_* dictionary."""
|
||||
modes: Dict[str, Any] = {}
|
||||
|
||||
for mode_name, info in spec.items():
|
||||
dest = info["dest"]
|
||||
kind = info["kind"]
|
||||
value = getattr(args_namespace, dest, None)
|
||||
|
||||
if kind == "bool_true":
|
||||
modes[mode_name] = False if value else True
|
||||
|
||||
elif kind == "bool_false":
|
||||
modes[mode_name] = True if value else False
|
||||
|
||||
else: # explicit
|
||||
if value is not None:
|
||||
modes[mode_name] = (value == "true")
|
||||
|
||||
return modes
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------------------
|
||||
# Main entrypoint
|
||||
# --------------------------------------------------------------------------------------
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Deploy the Infinito.Nexus stack using ansible-playbook."
|
||||
)
|
||||
|
||||
# Standard arguments
|
||||
parser.add_argument("inventory", help="Path to the inventory file.")
|
||||
parser.add_argument("-l", "--limit", help="Limit execution to certain hosts or groups.")
|
||||
parser.add_argument(
|
||||
"-T", "--host-type", choices=["server", "desktop"], default="server",
|
||||
help="Specify target type: server or desktop."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-p", "--password-file",
|
||||
help="Vault password file for encrypted variables."
|
||||
)
|
||||
parser.add_argument("-B", "--skip-build", action="store_true", help="Skip build phase.")
|
||||
parser.add_argument("-t", "--skip-tests", action="store_true", help="Skip test phase.")
|
||||
parser.add_argument(
|
||||
"-i", "--id", nargs="+", default=[], dest="id",
|
||||
help="List of application_ids for partial deployment."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-v", "--verbose", action="count", default=0,
|
||||
help="Increase verbosity (e.g. -vvv)."
|
||||
)
|
||||
parser.add_argument("--logs", action="store_true", help="Keep logs during cleanup.")
|
||||
parser.add_argument("--diff", action="store_true", help="Enable Ansible diff mode.")
|
||||
|
||||
# Dynamic MODE_* parsing
|
||||
modes_yaml_path = os.path.join(REPO_ROOT, "group_vars", "all", "01_modes.yml")
|
||||
modes_meta = load_modes_from_yaml(modes_yaml_path)
|
||||
modes_spec = add_dynamic_mode_args(parser, modes_meta)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Validate application IDs
|
||||
validate_application_ids(args.inventory, args.id)
|
||||
|
||||
# Build final mode map
|
||||
modes = build_modes_from_args(modes_spec, args)
|
||||
modes["MODE_LOGS"] = args.logs
|
||||
modes["host_type"] = args.host_type
|
||||
|
||||
# Run playbook
|
||||
run_ansible_playbook(
|
||||
inventory=args.inventory,
|
||||
modes=modes,
|
||||
limit=args.limit,
|
||||
allowed_applications=args.id,
|
||||
password_file=args.password_file,
|
||||
verbose=args.verbose,
|
||||
skip_build=args.skip_build,
|
||||
skip_tests=args.skip_tests,
|
||||
logs=args.logs,
|
||||
diff=args.diff,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -20,6 +20,26 @@ def _dedup_preserve(seq):
|
||||
out.append(x)
|
||||
return out
|
||||
|
||||
def _sort_tokens(tokens):
|
||||
"""
|
||||
Return a deterministically ordered list of CSP tokens.
|
||||
- de-duplicates while preserving relative order
|
||||
- then sorts lexicographically
|
||||
- keeps 'self' as the first token if present
|
||||
"""
|
||||
uniq = _dedup_preserve(tokens)
|
||||
if not uniq:
|
||||
return uniq
|
||||
|
||||
# Lexicographically sort all tokens
|
||||
uniq = sorted(uniq)
|
||||
|
||||
# Ensure "'self'" is always first if present
|
||||
if "'self'" in uniq:
|
||||
uniq.remove("'self'")
|
||||
uniq.insert(0, "'self'")
|
||||
|
||||
return uniq
|
||||
|
||||
class FilterModule(object):
|
||||
"""
|
||||
@@ -202,31 +222,39 @@ class FilterModule(object):
|
||||
|
||||
tokens = ["'self'"]
|
||||
|
||||
# 1) Flags (with sane defaults)
|
||||
# Flags (with sane defaults)
|
||||
flags = self.get_csp_flags(applications, application_id, directive)
|
||||
tokens += flags
|
||||
|
||||
# 2) Internal CDN defaults for selected directives
|
||||
# Internal CDN defaults for selected directives
|
||||
if directive in ('script-src-elem', 'connect-src', 'style-src-elem', 'style-src'):
|
||||
tokens.append(get_url(domains, 'web-svc-cdn', web_protocol))
|
||||
|
||||
# 3) Matomo (if enabled)
|
||||
# Matomo (if enabled)
|
||||
if directive in ('script-src-elem', 'connect-src'):
|
||||
if self.is_feature_enabled(applications, matomo_feature_name, application_id):
|
||||
tokens.append(get_url(domains, 'web-app-matomo', web_protocol))
|
||||
|
||||
# 4) Simpleicons (if enabled) – typically used via connect-src (fetch)
|
||||
# Simpleicons (if enabled) – typically used via connect-src (fetch)
|
||||
if directive == 'connect-src':
|
||||
if self.is_feature_enabled(applications, 'simpleicons', application_id):
|
||||
tokens.append(get_url(domains, 'web-svc-simpleicons', web_protocol))
|
||||
|
||||
# 5) reCAPTCHA (if enabled) – scripts + frames
|
||||
# reCAPTCHA (if enabled) – scripts + frames
|
||||
if self.is_feature_enabled(applications, 'recaptcha', application_id):
|
||||
if directive in ('script-src-elem', 'frame-src'):
|
||||
tokens.append('https://www.gstatic.com')
|
||||
tokens.append('https://www.google.com')
|
||||
|
||||
# hCaptcha (if enabled) – scripts + frames
|
||||
if self.is_feature_enabled(applications, 'hcaptcha', application_id):
|
||||
if directive in ('script-src-elem'):
|
||||
tokens.append('https://www.hcaptcha.com')
|
||||
tokens.append('https://js.hcaptcha.com')
|
||||
if directive in ('frame-src'):
|
||||
tokens.append('https://newassets.hcaptcha.com/')
|
||||
|
||||
# 6) Frame ancestors (desktop + logout)
|
||||
# Frame ancestors (desktop + logout)
|
||||
if directive == 'frame-ancestors':
|
||||
if self.is_feature_enabled(applications, 'desktop', application_id):
|
||||
# Allow being embedded by the desktop app domain's site
|
||||
@@ -236,11 +264,17 @@ class FilterModule(object):
|
||||
if self.is_feature_enabled(applications, 'logout', application_id):
|
||||
tokens.append(get_url(domains, 'web-svc-logout', web_protocol))
|
||||
tokens.append(get_url(domains, 'web-app-keycloak', web_protocol))
|
||||
|
||||
# Logout support requires inline handlers (script-src-attr)
|
||||
if directive in ('script-src-attr','script-src-elem'):
|
||||
if self.is_feature_enabled(applications, 'logout', application_id):
|
||||
tokens.append("'unsafe-inline'")
|
||||
|
||||
# 7) Custom whitelist
|
||||
|
||||
# Custom whitelist
|
||||
tokens += self.get_csp_whitelist(applications, application_id, directive)
|
||||
|
||||
# 8) Inline hashes (only if this directive does NOT include 'unsafe-inline')
|
||||
# Inline hashes (only if this directive does NOT include 'unsafe-inline')
|
||||
if "'unsafe-inline'" not in tokens:
|
||||
for snippet in self.get_csp_inline_content(applications, application_id, directive):
|
||||
tokens.append(self.get_csp_hash(snippet))
|
||||
@@ -283,6 +317,10 @@ class FilterModule(object):
|
||||
# ----------------------------------------------------------
|
||||
# Assemble header
|
||||
# ----------------------------------------------------------
|
||||
# Sort tokens per directive for deterministic output
|
||||
for directive, toks in list(tokens_by_dir.items()):
|
||||
tokens_by_dir[directive] = _sort_tokens(toks)
|
||||
|
||||
parts = []
|
||||
for directive in directives:
|
||||
if directive in tokens_by_dir:
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys, os, re
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
|
||||
from ansible.errors import AnsibleFilterError
|
||||
from module_utils.config_utils import get_app_conf
|
||||
from module_utils.entity_name_utils import get_entity_name
|
||||
|
||||
_UNIT_RE = re.compile(r'^\s*(\d+(?:\.\d+)?)\s*([kKmMgGtT]?[bB]?)?\s*$')
|
||||
_FACTORS = {
|
||||
'': 1, 'b': 1,
|
||||
'k': 1024, 'kb': 1024,
|
||||
'm': 1024**2, 'mb': 1024**2,
|
||||
'g': 1024**3, 'gb': 1024**3,
|
||||
't': 1024**4, 'tb': 1024**4,
|
||||
}
|
||||
|
||||
def _to_bytes(v: str) -> int:
|
||||
if v is None:
|
||||
raise AnsibleFilterError("jvm_filters: size value is None")
|
||||
s = str(v).strip()
|
||||
m = _UNIT_RE.match(s)
|
||||
if not m:
|
||||
raise AnsibleFilterError(f"jvm_filters: invalid size '{v}'")
|
||||
num, unit = m.group(1), (m.group(2) or '').lower()
|
||||
try:
|
||||
val = float(num)
|
||||
except ValueError as e:
|
||||
raise AnsibleFilterError(f"jvm_filters: invalid numeric size '{v}'") from e
|
||||
factor = _FACTORS.get(unit)
|
||||
if factor is None:
|
||||
raise AnsibleFilterError(f"jvm_filters: unknown unit in '{v}'")
|
||||
return int(val * factor)
|
||||
|
||||
def _to_mb(v: str) -> int:
|
||||
return max(0, _to_bytes(v) // (1024 * 1024))
|
||||
|
||||
def _svc(app_id: str) -> str:
|
||||
return get_entity_name(app_id)
|
||||
|
||||
def _mem_limit_mb(apps: dict, app_id: str) -> int:
|
||||
svc = _svc(app_id)
|
||||
raw = get_app_conf(apps, app_id, f"docker.services.{svc}.mem_limit")
|
||||
mb = _to_mb(raw)
|
||||
if mb <= 0:
|
||||
raise AnsibleFilterError(f"jvm_filters: mem_limit for '{svc}' must be > 0 MB (got '{raw}')")
|
||||
return mb
|
||||
|
||||
def _mem_res_mb(apps: dict, app_id: str) -> int:
|
||||
svc = _svc(app_id)
|
||||
raw = get_app_conf(apps, app_id, f"docker.services.{svc}.mem_reservation")
|
||||
mb = _to_mb(raw)
|
||||
if mb <= 0:
|
||||
raise AnsibleFilterError(f"jvm_filters: mem_reservation for '{svc}' must be > 0 MB (got '{raw}')")
|
||||
return mb
|
||||
|
||||
def jvm_max_mb(apps: dict, app_id: str) -> int:
|
||||
"""Xmx = min( floor(0.7*limit), limit-1024, 12288 ) with floor at 1024 MB."""
|
||||
limit_mb = _mem_limit_mb(apps, app_id)
|
||||
c1 = (limit_mb * 7) // 10
|
||||
c2 = max(0, limit_mb - 1024)
|
||||
c3 = 12288
|
||||
return max(1024, min(c1, c2, c3))
|
||||
|
||||
def jvm_min_mb(apps: dict, app_id: str) -> int:
|
||||
"""Xms = min( floor(Xmx/2), mem_reservation, Xmx ) with floor at 512 MB."""
|
||||
xmx = jvm_max_mb(apps, app_id)
|
||||
res = _mem_res_mb(apps, app_id)
|
||||
return max(512, min(xmx // 2, res, xmx))
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
"jvm_max_mb": jvm_max_mb,
|
||||
"jvm_min_mb": jvm_min_mb,
|
||||
}
|
||||
179
filter_plugins/memory_filters.py
Normal file
179
filter_plugins/memory_filters.py
Normal file
@@ -0,0 +1,179 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys, os, re
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
|
||||
from ansible.errors import AnsibleFilterError
|
||||
from module_utils.config_utils import get_app_conf
|
||||
from module_utils.entity_name_utils import get_entity_name
|
||||
|
||||
# Regex and unit conversion table
|
||||
_UNIT_RE = re.compile(r'^\s*(\d+(?:\.\d+)?)\s*([kKmMgGtT]?[bB]?)?\s*$')
|
||||
_FACTORS = {
|
||||
'': 1, 'b': 1,
|
||||
'k': 1024, 'kb': 1024,
|
||||
'm': 1024**2, 'mb': 1024**2,
|
||||
'g': 1024**3, 'gb': 1024**3,
|
||||
't': 1024**4, 'tb': 1024**4,
|
||||
}
|
||||
|
||||
# ------------------------------------------------------
|
||||
# Helpers: unit conversion
|
||||
# ------------------------------------------------------
|
||||
|
||||
def _to_bytes(v: str) -> int:
|
||||
"""Convert a human-readable size string (e.g., '2g', '512m') to bytes."""
|
||||
if v is None:
|
||||
raise AnsibleFilterError("memory_filters: size value is None")
|
||||
|
||||
s = str(v).strip()
|
||||
m = _UNIT_RE.match(s)
|
||||
if not m:
|
||||
raise AnsibleFilterError(f"memory_filters: invalid size '{v}'")
|
||||
|
||||
num, unit = m.group(1), (m.group(2) or '').lower()
|
||||
|
||||
try:
|
||||
val = float(num)
|
||||
except ValueError as e:
|
||||
raise AnsibleFilterError(f"memory_filters: invalid numeric size '{v}'") from e
|
||||
|
||||
factor = _FACTORS.get(unit)
|
||||
if factor is None:
|
||||
raise AnsibleFilterError(f"memory_filters: unknown unit in '{v}'")
|
||||
|
||||
return int(val * factor)
|
||||
|
||||
|
||||
def _to_mb(v: str) -> int:
|
||||
"""Convert human-readable size to megabytes."""
|
||||
return max(0, _to_bytes(v) // (1024 * 1024))
|
||||
|
||||
|
||||
# ------------------------------------------------------
|
||||
# JVM-specific helpers
|
||||
# ------------------------------------------------------
|
||||
|
||||
def _svc(app_id: str) -> str:
|
||||
"""Resolve the internal service name for JVM-based applications."""
|
||||
return get_entity_name(app_id)
|
||||
|
||||
|
||||
def _mem_limit_mb(apps: dict, app_id: str) -> int:
|
||||
"""Resolve mem_limit for the JVM service of the given application."""
|
||||
svc = _svc(app_id)
|
||||
raw = get_app_conf(apps, app_id, f"docker.services.{svc}.mem_limit")
|
||||
mb = _to_mb(raw)
|
||||
|
||||
if mb <= 0:
|
||||
raise AnsibleFilterError(
|
||||
f"memory_filters: mem_limit for '{svc}' must be > 0 MB (got '{raw}')"
|
||||
)
|
||||
return mb
|
||||
|
||||
|
||||
def _mem_res_mb(apps: dict, app_id: str) -> int:
|
||||
"""Resolve mem_reservation for the JVM service of the given application."""
|
||||
svc = _svc(app_id)
|
||||
raw = get_app_conf(apps, app_id, f"docker.services.{svc}.mem_reservation")
|
||||
mb = _to_mb(raw)
|
||||
|
||||
if mb <= 0:
|
||||
raise AnsibleFilterError(
|
||||
f"memory_filters: mem_reservation for '{svc}' must be > 0 MB (got '{raw}')"
|
||||
)
|
||||
return mb
|
||||
|
||||
|
||||
def jvm_max_mb(apps: dict, app_id: str) -> int:
|
||||
"""
|
||||
Compute recommended JVM Xmx in MB using:
|
||||
Xmx = min(
|
||||
floor(0.7 * mem_limit),
|
||||
mem_limit - 1024,
|
||||
12288
|
||||
)
|
||||
with a lower bound of 1024 MB.
|
||||
"""
|
||||
limit_mb = _mem_limit_mb(apps, app_id)
|
||||
c1 = (limit_mb * 7) // 10
|
||||
c2 = max(0, limit_mb - 1024)
|
||||
c3 = 12288
|
||||
|
||||
return max(1024, min(c1, c2, c3))
|
||||
|
||||
|
||||
def jvm_min_mb(apps: dict, app_id: str) -> int:
|
||||
"""
|
||||
Compute recommended JVM Xms in MB using:
|
||||
Xms = min(
|
||||
floor(Xmx / 2),
|
||||
mem_reservation,
|
||||
Xmx
|
||||
)
|
||||
with a lower bound of 512 MB.
|
||||
"""
|
||||
xmx = jvm_max_mb(apps, app_id)
|
||||
res = _mem_res_mb(apps, app_id)
|
||||
|
||||
return max(512, min(xmx // 2, res, xmx))
|
||||
|
||||
|
||||
# ------------------------------------------------------
|
||||
# Redis-specific helpers (always service name "redis")
|
||||
# ------------------------------------------------------
|
||||
|
||||
def _redis_mem_limit_mb(apps: dict, app_id: str, default_mb: int = 256) -> int:
|
||||
"""
|
||||
Resolve mem_limit for the Redis service of an application.
|
||||
Unlike JVM-based services, Redis always uses the service name "redis".
|
||||
|
||||
If no mem_limit is defined, fall back to default_mb.
|
||||
"""
|
||||
raw = get_app_conf(
|
||||
apps,
|
||||
app_id,
|
||||
"docker.services.redis.mem_limit",
|
||||
strict=False,
|
||||
default=f"{default_mb}m",
|
||||
)
|
||||
|
||||
mb = _to_mb(raw)
|
||||
|
||||
if mb <= 0:
|
||||
raise AnsibleFilterError(
|
||||
f"memory_filters: mem_limit for 'redis' must be > 0 MB (got '{raw}')"
|
||||
)
|
||||
|
||||
return mb
|
||||
|
||||
|
||||
def redis_maxmemory_mb(
|
||||
apps: dict,
|
||||
app_id: str,
|
||||
factor: float = 0.8,
|
||||
min_mb: int = 64
|
||||
) -> int:
|
||||
"""
|
||||
Compute recommended Redis `maxmemory` in MB.
|
||||
|
||||
* factor: fraction of allowed memory used for Redis data (default 0.8)
|
||||
* min_mb: minimum floor value (default 64 MB)
|
||||
|
||||
maxmemory = max(min_mb, floor(factor * mem_limit))
|
||||
"""
|
||||
limit_mb = _redis_mem_limit_mb(apps, app_id)
|
||||
return max(min_mb, int(limit_mb * factor))
|
||||
|
||||
|
||||
# ------------------------------------------------------
|
||||
# Filter module
|
||||
# ------------------------------------------------------
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
"jvm_max_mb": jvm_max_mb,
|
||||
"jvm_min_mb": jvm_min_mb,
|
||||
"redis_maxmemory_mb": redis_maxmemory_mb,
|
||||
}
|
||||
141
filter_plugins/node_autosize.py
Normal file
141
filter_plugins/node_autosize.py
Normal file
@@ -0,0 +1,141 @@
|
||||
# filter_plugins/node_autosize.py
|
||||
# Reuse app config to derive sensible Node.js heap sizes for containers.
|
||||
#
|
||||
# Usage example (Jinja):
|
||||
# {{ applications | node_max_old_space_size('web-app-nextcloud', 'whiteboard') }}
|
||||
#
|
||||
# Heuristics (defaults):
|
||||
# - candidate = 35% of mem_limit
|
||||
# - min = 768 MB (required minimum)
|
||||
# - cap = min(3072 MB, 60% of mem_limit)
|
||||
#
|
||||
# NEW: If mem_limit (container cgroup RAM) is smaller than min_mb, we raise an
|
||||
# exception — to prevent a misconfiguration where Node's heap could exceed the cgroup
|
||||
# and be OOM-killed.
|
||||
|
||||
from __future__ import annotations
|
||||
import re
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
# Import the shared config resolver from module_utils
|
||||
try:
|
||||
from module_utils.config_utils import get_app_conf, AppConfigKeyError
|
||||
except Exception as e:
|
||||
raise AnsibleFilterError(
|
||||
f"Failed to import get_app_conf from module_utils.config_utils: {e}"
|
||||
)
|
||||
|
||||
_SIZE_RE = re.compile(r"^\s*(\d+(?:\.\d+)?)\s*([kmgtp]?i?b?)?\s*$", re.IGNORECASE)
|
||||
_MULT = {
|
||||
"": 1,
|
||||
"b": 1,
|
||||
"k": 10**3, "kb": 10**3,
|
||||
"m": 10**6, "mb": 10**6,
|
||||
"g": 10**9, "gb": 10**9,
|
||||
"t": 10**12, "tb": 10**12,
|
||||
"p": 10**15, "pb": 10**15,
|
||||
"kib": 1024,
|
||||
"mib": 1024**2,
|
||||
"gib": 1024**3,
|
||||
"tib": 1024**4,
|
||||
"pib": 1024**5,
|
||||
}
|
||||
|
||||
|
||||
def _to_bytes(val):
|
||||
"""Convert numeric or string memory limits (e.g. '512m', '2GiB') to bytes."""
|
||||
if val is None or val == "":
|
||||
return None
|
||||
if isinstance(val, (int, float)):
|
||||
return int(val)
|
||||
if not isinstance(val, str):
|
||||
raise AnsibleFilterError(f"Unsupported mem_limit type: {type(val).__name__}")
|
||||
m = _SIZE_RE.match(val)
|
||||
if not m:
|
||||
raise AnsibleFilterError(f"Unrecognized mem_limit string: {val!r}")
|
||||
num = float(m.group(1))
|
||||
unit = (m.group(2) or "").lower()
|
||||
if unit not in _MULT:
|
||||
raise AnsibleFilterError(f"Unknown unit in mem_limit: {unit!r}")
|
||||
return int(num * _MULT[unit])
|
||||
|
||||
|
||||
def _mb(bytes_val: int) -> int:
|
||||
"""Return decimal MB (10^6) as integer — Node expects MB units."""
|
||||
return int(round(bytes_val / 10**6))
|
||||
|
||||
|
||||
def _compute_old_space_mb(
|
||||
total_mb: int, pct: float, min_mb: int, hardcap_mb: int, safety_cap_pct: float
|
||||
) -> int:
|
||||
"""
|
||||
Compute Node.js old-space heap (MB) with safe minimum and cap handling.
|
||||
|
||||
NOTE: The calling function ensures total_mb >= min_mb; here we only
|
||||
apply the sizing heuristics and caps.
|
||||
"""
|
||||
candidate = int(total_mb * float(pct))
|
||||
safety_cap = int(total_mb * float(safety_cap_pct))
|
||||
final_cap = min(int(hardcap_mb), safety_cap)
|
||||
|
||||
# Enforce minimum first; only apply cap if it's above the minimum
|
||||
candidate = max(candidate, int(min_mb))
|
||||
if final_cap >= int(min_mb):
|
||||
candidate = min(candidate, final_cap)
|
||||
|
||||
# Never below a tiny hard floor
|
||||
return max(candidate, 128)
|
||||
|
||||
|
||||
def node_max_old_space_size(
|
||||
applications: dict,
|
||||
application_id: str,
|
||||
service_name: str,
|
||||
pct: float = 0.35,
|
||||
min_mb: int = 768,
|
||||
hardcap_mb: int = 3072,
|
||||
safety_cap_pct: float = 0.60,
|
||||
) -> int:
|
||||
"""
|
||||
Derive Node.js --max-old-space-size (MB) from the service's mem_limit in app config.
|
||||
|
||||
Looks up: docker.services.<service_name>.mem_limit for the given application_id.
|
||||
|
||||
Raises:
|
||||
AnsibleFilterError if mem_limit is missing/invalid OR if mem_limit (MB) < min_mb.
|
||||
"""
|
||||
try:
|
||||
mem_limit = get_app_conf(
|
||||
applications=applications,
|
||||
application_id=application_id,
|
||||
config_path=f"docker.services.{service_name}.mem_limit",
|
||||
strict=True,
|
||||
default=None,
|
||||
)
|
||||
except AppConfigKeyError as e:
|
||||
raise AnsibleFilterError(str(e))
|
||||
|
||||
if mem_limit in (None, False, ""):
|
||||
raise AnsibleFilterError(
|
||||
f"mem_limit not set for application '{application_id}', service '{service_name}'"
|
||||
)
|
||||
|
||||
total_bytes = _to_bytes(mem_limit)
|
||||
total_mb = _mb(total_bytes)
|
||||
|
||||
# NEW: guardrail — refuse to size a heap larger than the cgroup limit
|
||||
if total_mb < int(min_mb):
|
||||
raise AnsibleFilterError(
|
||||
f"mem_limit ({total_mb} MB) is below the required minimum heap ({int(min_mb)} MB) "
|
||||
f"for application '{application_id}', service '{service_name}'. "
|
||||
f"Increase mem_limit or lower min_mb."
|
||||
)
|
||||
|
||||
return _compute_old_space_mb(total_mb, pct, min_mb, hardcap_mb, safety_cap_pct)
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
"node_max_old_space_size": node_max_old_space_size,
|
||||
}
|
||||
53
filter_plugins/reserved_users.py
Normal file
53
filter_plugins/reserved_users.py
Normal file
@@ -0,0 +1,53 @@
|
||||
from ansible.errors import AnsibleFilterError
|
||||
import re
|
||||
|
||||
|
||||
def reserved_usernames(users_dict):
|
||||
"""
|
||||
Return a list of usernames where reserved: true.
|
||||
Usernames are regex-escaped to be safely embeddable.
|
||||
"""
|
||||
if not isinstance(users_dict, dict):
|
||||
raise AnsibleFilterError("reserved_usernames expects a dictionary.")
|
||||
|
||||
results = []
|
||||
|
||||
for _key, user in users_dict.items():
|
||||
if not isinstance(user, dict):
|
||||
continue
|
||||
if not user.get("reserved", False):
|
||||
continue
|
||||
username = user.get("username")
|
||||
if username:
|
||||
results.append(re.escape(str(username)))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def non_reserved_users(users_dict):
|
||||
"""
|
||||
Return a dict of users where reserved != true.
|
||||
"""
|
||||
if not isinstance(users_dict, dict):
|
||||
raise AnsibleFilterError("non_reserved_users expects a dictionary.")
|
||||
|
||||
results = {}
|
||||
|
||||
for key, user in users_dict.items():
|
||||
if not isinstance(user, dict):
|
||||
continue
|
||||
if user.get("reserved", False):
|
||||
continue
|
||||
results[key] = user
|
||||
|
||||
return results
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
"""User filters for extracting reserved and non-reserved subsets."""
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
"reserved_usernames": reserved_usernames,
|
||||
"non_reserved_users": non_reserved_users,
|
||||
}
|
||||
@@ -22,12 +22,15 @@ HOST_TIME_FORMAT: "HH:mm"
|
||||
HOST_THOUSAND_SEPARATOR: "."
|
||||
HOST_DECIMAL_MARK: ","
|
||||
|
||||
# Encryptiom
|
||||
SSL_ENABLED: true
|
||||
|
||||
# Web
|
||||
WEB_PROTOCOL: "https" # Web protocol type. Use https or http. If you run local you need to change it to http
|
||||
WEB_PORT: "{{ 443 if WEB_PROTOCOL == 'https' else 80 }}" # Default port web applications will listen to
|
||||
WEB_PROTOCOL: "{{ 'https' if SSL_ENABLED | bool else 'http' }}" # Web protocol type. Use https or http. If you run local you need to change it to http
|
||||
WEB_PORT: "{{ 443 if SSL_ENABLED | bool else 80 }}" # Default port web applications will listen to
|
||||
|
||||
# Websocket
|
||||
WEBSOCKET_PROTOCOL: "{{ 'wss' if WEB_PROTOCOL == 'https' else 'ws' }}"
|
||||
WEBSOCKET_PROTOCOL: "{{ 'wss' if SSL_ENABLED | bool else 'ws' }}"
|
||||
|
||||
# WWW-Redirect to None WWW-Domains enabled
|
||||
WWW_REDIRECT_ENABLED: "{{ ('web-opt-rdr-www' in group_names) | bool }}"
|
||||
@@ -35,11 +38,11 @@ WWW_REDIRECT_ENABLED: "{{ ('web-opt-rdr-www' in group_names) | bool }}"
|
||||
AUTO_BUILD_ALIASES: False # If enabled it creates an alias domain for each web application by the entity name, recommended to set to false to safge domain space
|
||||
|
||||
# Domain
|
||||
PRIMARY_DOMAIN: "localhost" # Primary Domain of the server
|
||||
PRIMARY_DOMAIN: "localhost" # Primary Domain of the server
|
||||
|
||||
DNS_PROVIDER: cloudflare # The DNS Provider\Registrar for the domain
|
||||
DNS_PROVIDER: "localhost" # The DNS Provider\Registrar for the domain -> Legit values: cloudflare
|
||||
|
||||
HOSTING_PROVIDER: hetzner # Provider which hosts the server
|
||||
HOSTING_PROVIDER: "localhost" # Provider which hosts the server -> hetzner
|
||||
|
||||
# Which ACME method to use: webroot, cloudflare, or hetzner
|
||||
CERTBOT_ACME_CHALLENGE_METHOD: "cloudflare"
|
||||
@@ -88,3 +91,33 @@ RBAC:
|
||||
GROUP:
|
||||
NAME: "/roles" # Name of the group which holds the RBAC roles
|
||||
CLAIM: "groups" # Name of the claim containing the RBAC groups
|
||||
|
||||
# You need to set both keys to enable them
|
||||
CAPTCHA:
|
||||
RECAPTCHA:
|
||||
KEY: ""
|
||||
SECRET: ""
|
||||
HCAPTCHA:
|
||||
KEY: ""
|
||||
SECRET: ""
|
||||
|
||||
RECAPTCHA_ENABLED: "{{ (CAPTCHA.RECAPTCHA.KEY | default('') | length > 0)
|
||||
and
|
||||
(CAPTCHA.RECAPTCHA.SECRET | default('') | length > 0) }}"
|
||||
|
||||
HCAPTCHA_ENABLED: "{{ (CAPTCHA.HCAPTCHA.KEY | default('') | length > 0)
|
||||
and
|
||||
(CAPTCHA.HCAPTCHA.SECRET | default('') | length > 0) }}"
|
||||
|
||||
# Applications which are allways required
|
||||
WEBSERVER_CORE_APPLICATIONS:
|
||||
- web-svc-logout
|
||||
- web-svc-cdn
|
||||
|
||||
# Global flag for detecting containerized environments
|
||||
IS_CONTAINER: >-
|
||||
{{
|
||||
(ansible_virtualization_role | default('') == 'guest')
|
||||
and
|
||||
(ansible_virtualization_type | default('') in ['docker', 'podman', 'lxc', 'container'])
|
||||
}}
|
||||
|
||||
@@ -9,6 +9,7 @@ SYS_SERVICE_CLEANUP_BACKUPS: "{{ 'sys-ctl-cln-bkps' | get_se
|
||||
SYS_SERVICE_CLEANUP_BACKUPS_FAILED: "{{ 'sys-ctl-cln-faild-bkps' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_CLEANUP_ANONYMOUS_VOLUMES: "{{ 'sys-ctl-cln-anon-volumes' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_CLEANUP_DISC_SPACE: "{{ 'sys-ctl-cln-disc-space' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_CLEANUP_DOCKER: "{{ 'sys-ctl-cln-docker' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_OPTIMIZE_DRIVE: "{{ 'svc-opt-ssd-hdd' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_BACKUP_RMT_2_LOC: "{{ 'svc-bkp-rmt-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_BACKUP_DOCKER_2_LOC: "{{ 'sys-ctl-bkp-docker-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
||||
|
||||
@@ -32,7 +32,8 @@ SYS_SCHEDULE_HEALTH_MSMTP: "*-*-* 00:00:00"
|
||||
SYS_SCHEDULE_CLEANUP_CERTS: "*-*-* 20:00" # Deletes and revokes unused certs once per day
|
||||
SYS_SCHEDULE_CLEANUP_FAILED_BACKUPS: "*-*-* 21:00" # Clean up failed docker backups once per day
|
||||
SYS_SCHEDULE_CLEANUP_BACKUPS: "*-*-* 22:00" # Cleanup backups once per day, MUST be called before disc space cleanup
|
||||
SYS_SCHEDULE_CLEANUP_DISC_SPACE: "*-*-* 23:00" # Cleanup disc space once per day
|
||||
SYS_SCHEDULE_CLEANUP_DOCKER: "*-*-* 23:00" # Cleanup docker anonymous volumes and prune ones per day
|
||||
SYS_SCHEDULE_CLEANUP_DISC_SPACE: "*-*-* 23:30" # Cleanup disc space once per day
|
||||
|
||||
### Schedule for repair services
|
||||
SYS_SCHEDULE_REPAIR_BTRFS_AUTO_BALANCER: "Sat *-*-01..07 00:00:00" # Execute btrfs auto balancer every first Saturday of a month
|
||||
|
||||
@@ -114,6 +114,16 @@ defaults_networks:
|
||||
subnet: 192.168.104.48/28
|
||||
web-app-mini-qr:
|
||||
subnet: 192.168.104.64/28
|
||||
web-app-shopware:
|
||||
subnet: 192.168.104.80/28
|
||||
web-svc-onlyoffice:
|
||||
subnet: 192.168.104.96/28
|
||||
web-app-suitecrm:
|
||||
subnet: 192.168.104.112/28
|
||||
web-app-littlejs:
|
||||
subnet: 192.168.104.128/28
|
||||
web-app-roulette-wheel:
|
||||
subnet: 192.168.104.144/28
|
||||
|
||||
# /24 Networks / 254 Usable Clients
|
||||
web-app-bigbluebutton:
|
||||
|
||||
@@ -18,6 +18,7 @@ ports:
|
||||
web-app-fusiondirectory: 4187
|
||||
web-app-gitea: 4188
|
||||
web-app-snipe-it: 4189
|
||||
web-app-suitecrm: 4190
|
||||
ldap:
|
||||
svc-db-openldap: 389
|
||||
http:
|
||||
@@ -81,6 +82,10 @@ ports:
|
||||
web-app-minio_api: 8057
|
||||
web-app-minio_console: 8058
|
||||
web-app-mini-qr: 8059
|
||||
web-app-shopware: 8060
|
||||
web-svc-onlyoffice: 8061
|
||||
web-app-suitecrm: 8062
|
||||
web-app-littlejs: 8063
|
||||
web-app-bigbluebutton: 48087 # This port is predefined by bbb. @todo Try to change this to a 8XXX port
|
||||
public:
|
||||
# The following ports should be changed to 22 on the subdomain via stream mapping
|
||||
|
||||
@@ -87,7 +87,7 @@ LDAP:
|
||||
ID: "{{ _ldap_user_id }}"
|
||||
MAIL: "mail"
|
||||
FULLNAME: "cn"
|
||||
FIRSTNAME: "givenname"
|
||||
FIRSTNAME: "givenName"
|
||||
SURNAME: "sn"
|
||||
SSH_PUBLIC_KEY: "sshPublicKey"
|
||||
NEXTCLOUD_QUOTA: "nextcloudQuota"
|
||||
|
||||
@@ -12,7 +12,7 @@ defaults_service_provider:
|
||||
logo: "{{ applications['web-svc-asset'].url ~ '/img/logo.png' }}"
|
||||
platform:
|
||||
titel: "{{ SOFTWARE_NAME }}"
|
||||
subtitel: "One login. Infinite applications."
|
||||
subtitel: "One login. Infinite Solutions."
|
||||
logo: "{{ applications['web-svc-asset'].url ~ '/img/logo.png' }}"
|
||||
favicon: "{{ applications['web-svc-asset'].url ~ '/img/favicon.ico' }}"
|
||||
contact:
|
||||
@@ -26,6 +26,7 @@ defaults_service_provider:
|
||||
pixelfed: "{{ '@' ~ users.contact.username ~ '@' ~ domains | get_domain('web-app-pixelfed') if 'web-app-pixelfed' in group_names else '' }}"
|
||||
phone: "+0 000 000 404"
|
||||
wordpress: "{{ '@' ~ users.contact.username ~ '@' ~ domains | get_domain('web-app-wordpress') if 'web-app-wordpress' in group_names else '' }}"
|
||||
newsletter: "{{ [ domains | get_url('web-app-listmonk', WEB_PROTOCOL), '/subscription/form' ] | url_join if 'web-app-listmonk' in group_names else '' }}"
|
||||
|
||||
legal:
|
||||
editorial_responsible: "Johannes Gutenberg"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Host resources
|
||||
RESOURCE_HOST_CPUS: "{{ ansible_processor_vcpus | int }}"
|
||||
RESOURCE_HOST_MEM: "{{ (ansible_memtotal_mb | int) // 1024 }}"
|
||||
RESOURCE_HOST_CPUS: "{{ ansible_facts['processor_vcpus'] | int }}"
|
||||
RESOURCE_HOST_MEM: "{{ (ansible_facts['memtotal_mb'] | int) // 1024 }}"
|
||||
|
||||
# Reserve for OS
|
||||
RESOURCE_HOST_RESERVE_CPU: 2
|
||||
|
||||
222
main.py
222
main.py
@@ -16,10 +16,31 @@ try:
|
||||
from colorama import init as colorama_init, Fore, Back, Style
|
||||
colorama_init(autoreset=True)
|
||||
except ImportError:
|
||||
class Dummy:
|
||||
def __getattr__(self, name): return ''
|
||||
Fore = Back = Style = Dummy()
|
||||
# Minimal ANSI fallback if colorama is not available
|
||||
class Style:
|
||||
RESET_ALL = "\033[0m"
|
||||
BRIGHT = "\033[1m"
|
||||
DIM = "\033[2m"
|
||||
|
||||
class Fore:
|
||||
BLACK = "\033[30m"
|
||||
RED = "\033[31m"
|
||||
GREEN = "\033[32m"
|
||||
YELLOW = "\033[33m"
|
||||
BLUE = "\033[34m"
|
||||
MAGENTA = "\033[35m"
|
||||
CYAN = "\033[36m"
|
||||
WHITE = "\033[37m"
|
||||
|
||||
class Back:
|
||||
BLACK = "\033[40m"
|
||||
RED = "\033[41m"
|
||||
GREEN = "\033[42m"
|
||||
YELLOW = "\033[43m"
|
||||
BLUE = "\033[44m"
|
||||
MAGENTA = "\033[45m"
|
||||
CYAN = "\033[46m"
|
||||
WHITE = "\033[47m"
|
||||
|
||||
def color_text(text, color):
|
||||
return f"{color}{text}{Style.RESET_ALL}"
|
||||
@@ -86,10 +107,128 @@ def extract_description_via_help(cli_script_path):
|
||||
except Exception:
|
||||
return "-"
|
||||
|
||||
def show_full_help_for_all(cli_dir, available):
|
||||
"""
|
||||
Print the full --help output for all discovered CLI commands.
|
||||
"""
|
||||
print(color_text("Infinito.Nexus CLI – Full Help Overview", Fore.CYAN + Style.BRIGHT))
|
||||
print()
|
||||
|
||||
for folder, cmd in available:
|
||||
# Build file path (e.g. "meta/j2/compiler.py")
|
||||
file_path = f"{folder + '/' if folder else ''}{cmd}.py"
|
||||
|
||||
# Build subcommand (spaces instead of slashes, no .py)
|
||||
if folder:
|
||||
subcommand = f"{folder.replace('/', ' ')} {cmd}"
|
||||
else:
|
||||
subcommand = cmd
|
||||
|
||||
# Colorful header
|
||||
print(color_text("=" * 80, Fore.BLUE + Style.BRIGHT))
|
||||
print(color_text(f"Subcommand: {subcommand}", Fore.YELLOW + Style.BRIGHT))
|
||||
print(color_text(f"File: {file_path}", Fore.CYAN))
|
||||
print(color_text("-" * 80, Fore.BLUE))
|
||||
|
||||
try:
|
||||
module = "cli." + file_path[:-3].replace(os.sep, ".")
|
||||
result = subprocess.run(
|
||||
[sys.executable, "-m", module, "--help"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=False
|
||||
)
|
||||
if result.stdout:
|
||||
print(result.stdout.rstrip())
|
||||
if result.stderr:
|
||||
print(color_text(result.stderr.rstrip(), Fore.RED))
|
||||
except Exception as e:
|
||||
print(color_text(f"Failed to get help for {file_path}: {e}", Fore.RED))
|
||||
|
||||
print() # extra spacer between commands
|
||||
|
||||
def git_clean_repo():
|
||||
subprocess.run(['git', 'clean', '-Xfd'], check=True)
|
||||
|
||||
def print_global_help(available, cli_dir):
|
||||
"""
|
||||
Print the standard global help screen for the Infinito.Nexus CLI.
|
||||
This is used by both --help and --help-all.
|
||||
"""
|
||||
print(color_text("Infinito.Nexus CLI 🦫🌐🖥️", Fore.CYAN + Style.BRIGHT))
|
||||
print()
|
||||
print(color_text("Your Gateway to Automated IT Infrastructure Setup", Style.DIM))
|
||||
print()
|
||||
print(color_text(
|
||||
"Usage: infinito "
|
||||
"[--sound] "
|
||||
"[--no-signal] "
|
||||
"[--log] "
|
||||
"[--git-clean] "
|
||||
"[--infinite] "
|
||||
"[--help-all] "
|
||||
"[--alarm-timeout <seconds>] "
|
||||
"[-h|--help] "
|
||||
"<command> [options]",
|
||||
Fore.GREEN
|
||||
))
|
||||
print()
|
||||
# Use bright style for headings
|
||||
print(color_text("Options:", Style.BRIGHT))
|
||||
print(color_text(" --sound Play startup melody and warning sounds", Fore.YELLOW))
|
||||
print(color_text(" --no-signal Suppress success/failure signals", Fore.YELLOW))
|
||||
print(color_text(" --log Log all proxied command output to logfile.log", Fore.YELLOW))
|
||||
print(color_text(" --git-clean Remove all Git-ignored files before running", Fore.YELLOW))
|
||||
print(color_text(" --infinite Run the proxied command in an infinite loop", Fore.YELLOW))
|
||||
print(color_text(" --help-all Show full --help for all CLI commands", Fore.YELLOW))
|
||||
print(color_text(" --alarm-timeout Stop warnings and exit after N seconds (default: 60)", Fore.YELLOW))
|
||||
print(color_text(" -h, --help Show this help message and exit", Fore.YELLOW))
|
||||
print()
|
||||
print(color_text("Available commands:", Style.BRIGHT))
|
||||
print()
|
||||
|
||||
current_folder = None
|
||||
for folder, cmd in available:
|
||||
if folder != current_folder:
|
||||
if folder:
|
||||
print(color_text(f"{folder}/", Fore.MAGENTA))
|
||||
print()
|
||||
current_folder = folder
|
||||
desc = extract_description_via_help(
|
||||
os.path.join(cli_dir, *(folder.split('/') if folder else []), f"{cmd}.py")
|
||||
)
|
||||
print(color_text(format_command_help(cmd, desc, indent=2), ''), "\n")
|
||||
|
||||
print()
|
||||
print(color_text(
|
||||
"🔗 You can chain subcommands by specifying nested directories,",
|
||||
Fore.CYAN
|
||||
))
|
||||
print(color_text(
|
||||
" e.g. `infinito build defaults users` →",
|
||||
Fore.CYAN
|
||||
))
|
||||
print(color_text(
|
||||
" corresponds to `cli/build/defaults/users.py`.",
|
||||
Fore.CYAN
|
||||
))
|
||||
print()
|
||||
print(color_text(
|
||||
"Infinito.Nexus is a product of Kevin Veen-Birkenbach, https://cybermaster.space .\n",
|
||||
Style.DIM
|
||||
))
|
||||
print(color_text(
|
||||
"Test and use productively on https://infinito.nexus .\n",
|
||||
Style.DIM
|
||||
))
|
||||
print(color_text(
|
||||
"For commercial use, a license agreement with Kevin Veen-Birkenbach is required. \n",
|
||||
Style.DIM
|
||||
))
|
||||
print(color_text("License: https://s.infinito.nexus/license", Style.DIM))
|
||||
print()
|
||||
print(color_text("🎉🌈 Happy IT Infrastructuring! 🚀🔧✨", Fore.MAGENTA + Style.BRIGHT))
|
||||
print()
|
||||
|
||||
def play_start_intro():
|
||||
Sound.play_start_sound()
|
||||
@@ -163,6 +302,7 @@ if __name__ == "__main__":
|
||||
sys.argv.remove('--log')
|
||||
git_clean = '--git-clean' in sys.argv and (sys.argv.remove('--git-clean') or True)
|
||||
infinite = '--infinite' in sys.argv and (sys.argv.remove('--infinite') or True)
|
||||
help_all = '--help-all' in sys.argv and (sys.argv.remove('--help-all') or True)
|
||||
alarm_timeout = 60
|
||||
if '--alarm-timeout' in sys.argv:
|
||||
i = sys.argv.index('--alarm-timeout')
|
||||
@@ -188,72 +328,20 @@ if __name__ == "__main__":
|
||||
available = list_cli_commands(cli_dir)
|
||||
args = sys.argv[1:]
|
||||
|
||||
# Global "show help for all commands" mode
|
||||
if help_all:
|
||||
# 1) Print the normal global help (same as --help)
|
||||
print_global_help(available, cli_dir)
|
||||
|
||||
# 2) Then print detailed --help for all subcommands
|
||||
print(color_text("Full detailed help for all subcommands:", Style.BRIGHT))
|
||||
print()
|
||||
show_full_help_for_all(cli_dir, available)
|
||||
sys.exit(0)
|
||||
|
||||
# Global help
|
||||
if not args or args[0] in ('-h', '--help'):
|
||||
print(color_text("Infinito.Nexus CLI 🦫🌐🖥️", Fore.CYAN + Style.BRIGHT))
|
||||
print()
|
||||
print(color_text("Your Gateway to Automated IT Infrastructure Setup", Style.DIM))
|
||||
print()
|
||||
print(color_text(
|
||||
"Usage: infinito [--sound] [--no-signal] [--log] [--git-clean] [--infinite] <command> [options]",
|
||||
Fore.GREEN
|
||||
))
|
||||
print()
|
||||
# Use bright style for headings
|
||||
print(color_text("Options:", Style.BRIGHT))
|
||||
print(color_text(" --sound Play startup melody and warning sounds", Fore.YELLOW))
|
||||
print(color_text(" --no-signal Suppress success/failure signals", Fore.YELLOW))
|
||||
print(color_text(" --log Log all proxied command output to logfile.log", Fore.YELLOW))
|
||||
print(color_text(" --git-clean Remove all Git-ignored files before running", Fore.YELLOW))
|
||||
print(color_text(" --infinite Run the proxied command in an infinite loop", Fore.YELLOW))
|
||||
print(color_text(" --alarm-timeout Stop warnings and exit after N seconds (default: 60)", Fore.YELLOW))
|
||||
print(color_text(" -h, --help Show this help message and exit", Fore.YELLOW))
|
||||
print()
|
||||
print(color_text("Available commands:", Style.BRIGHT))
|
||||
print()
|
||||
|
||||
current_folder = None
|
||||
for folder, cmd in available:
|
||||
if folder != current_folder:
|
||||
if folder:
|
||||
print(color_text(f"{folder}/", Fore.MAGENTA))
|
||||
print()
|
||||
current_folder = folder
|
||||
desc = extract_description_via_help(
|
||||
os.path.join(cli_dir, *(folder.split('/') if folder else []), f"{cmd}.py")
|
||||
)
|
||||
print(color_text(format_command_help(cmd, desc, indent=2), ''), "\n")
|
||||
|
||||
print()
|
||||
print(color_text(
|
||||
"🔗 You can chain subcommands by specifying nested directories,",
|
||||
Fore.CYAN
|
||||
))
|
||||
print(color_text(
|
||||
" e.g. `infinito build defaults users` →",
|
||||
Fore.CYAN
|
||||
))
|
||||
print(color_text(
|
||||
" corresponds to `cli/build/defaults/users.py`.",
|
||||
Fore.CYAN
|
||||
))
|
||||
print()
|
||||
print(color_text(
|
||||
"Infinito.Nexus is a product of Kevin Veen-Birkenbach, https://cybermaster.space .\n",
|
||||
Style.DIM
|
||||
))
|
||||
print(color_text(
|
||||
"Test and use productively on https://infinito.nexus .\n",
|
||||
Style.DIM
|
||||
))
|
||||
print(color_text(
|
||||
"For commercial use, a license agreement with Kevin Veen-Birkenbach is required. \n",
|
||||
Style.DIM
|
||||
))
|
||||
print(color_text("License: https://s.infinito.nexus/license", Style.DIM))
|
||||
print()
|
||||
print(color_text("🎉🌈 Happy IT Infrastructuring! 🚀🔧✨", Fore.MAGENTA + Style.BRIGHT))
|
||||
print()
|
||||
print_global_help(available, cli_dir)
|
||||
sys.exit(0)
|
||||
|
||||
# Directory-specific help
|
||||
|
||||
@@ -10,12 +10,20 @@ import sys
|
||||
import base64
|
||||
|
||||
class InventoryManager:
|
||||
def __init__(self, role_path: Path, inventory_path: Path, vault_pw: str, overrides: Dict[str, str]):
|
||||
def __init__(
|
||||
self,
|
||||
role_path: Path,
|
||||
inventory_path: Path,
|
||||
vault_pw: str,
|
||||
overrides: Dict[str, str],
|
||||
allow_empty_plain: bool = False,
|
||||
):
|
||||
"""Initialize the Inventory Manager."""
|
||||
self.role_path = role_path
|
||||
self.inventory_path = inventory_path
|
||||
self.vault_pw = vault_pw
|
||||
self.overrides = overrides
|
||||
self.allow_empty_plain = allow_empty_plain
|
||||
self.inventory = YamlHandler.load_yaml(inventory_path)
|
||||
self.schema = YamlHandler.load_yaml(role_path / "schema" / "main.yml")
|
||||
self.app_id = self.load_application_id(role_path)
|
||||
@@ -43,12 +51,10 @@ class InventoryManager:
|
||||
|
||||
# Check if 'central-database' is enabled in the features section of data
|
||||
if "features" in data:
|
||||
if "central_database" in data["features"] and \
|
||||
data["features"]["central_database"]:
|
||||
if "central_database" in data["features"] and data["features"]["central_database"]:
|
||||
# Add 'central_database' value (password) to credentials
|
||||
target.setdefault("credentials", {})["database_password"] = self.generate_value("alphanumeric")
|
||||
if "oauth2" in data["features"] and \
|
||||
data["features"]["oauth2"]:
|
||||
if "oauth2" in data["features"] and data["features"]["oauth2"]:
|
||||
target.setdefault("credentials", {})["oauth2_proxy_cookie_secret"] = self.generate_value("random_hex_16")
|
||||
|
||||
# Apply recursion only for the `credentials` section
|
||||
@@ -59,46 +65,59 @@ class InventoryManager:
|
||||
"""Recursively process only the 'credentials' section and generate values."""
|
||||
for key, meta in branch.items():
|
||||
full_key = f"{prefix}.{key}" if prefix else key
|
||||
|
||||
|
||||
# Only process 'credentials' section for encryption
|
||||
if prefix == "credentials" and isinstance(meta, dict) and all(k in meta for k in ("description", "algorithm", "validation")):
|
||||
if prefix == "credentials" and isinstance(meta, dict) and all(
|
||||
k in meta for k in ("description", "algorithm", "validation")
|
||||
):
|
||||
alg = meta["algorithm"]
|
||||
if alg == "plain":
|
||||
# Must be supplied via --set
|
||||
# Must be supplied via --set, unless allow_empty_plain=True
|
||||
if full_key not in self.overrides:
|
||||
print(f"ERROR: Plain algorithm for '{full_key}' requires override via --set {full_key}=<value>", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
plain = self.overrides[full_key]
|
||||
if self.allow_empty_plain:
|
||||
plain = ""
|
||||
else:
|
||||
print(
|
||||
f"ERROR: Plain algorithm for '{full_key}' requires override via --set {full_key}=<value>",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
else:
|
||||
plain = self.overrides[full_key]
|
||||
else:
|
||||
plain = self.overrides.get(full_key, self.generate_value(alg))
|
||||
|
||||
|
||||
# Check if the value is already vaulted or if it's a dictionary
|
||||
existing_value = dest.get(key)
|
||||
|
||||
|
||||
# If existing_value is a dictionary, print a warning and skip encryption
|
||||
if isinstance(existing_value, dict):
|
||||
print(f"Skipping encryption for '{key}', as it is a dictionary.")
|
||||
print(f"Skipping encryption for '{key}', as it is a dictionary.", file=sys.stderr)
|
||||
continue
|
||||
|
||||
|
||||
# Check if the value is a VaultScalar and already vaulted
|
||||
if existing_value and isinstance(existing_value, VaultScalar):
|
||||
print(f"Skipping encryption for '{key}', as it is already vaulted.")
|
||||
print(f"Skipping encryption for '{key}', as it is already vaulted.", file=sys.stderr)
|
||||
continue
|
||||
|
||||
|
||||
# Empty strings should *not* be encrypted
|
||||
if plain == "":
|
||||
dest[key] = ""
|
||||
continue
|
||||
|
||||
# Encrypt only if it's not already vaulted
|
||||
snippet = self.vault_handler.encrypt_string(plain, key)
|
||||
lines = snippet.splitlines()
|
||||
indent = len(lines[1]) - len(lines[1].lstrip())
|
||||
body = "\n".join(line[indent:] for line in lines[1:])
|
||||
dest[key] = VaultScalar(body)
|
||||
|
||||
|
||||
elif isinstance(meta, dict):
|
||||
sub = dest.setdefault(key, {})
|
||||
self.recurse_credentials(meta, sub, full_key)
|
||||
else:
|
||||
dest[key] = meta
|
||||
|
||||
|
||||
def generate_secure_alphanumeric(self, length: int) -> str:
|
||||
"""Generate a cryptographically secure random alphanumeric string of the given length."""
|
||||
characters = string.ascii_letters + string.digits # a-zA-Z0-9
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import os
|
||||
import warnings
|
||||
|
||||
class DummySound:
|
||||
@staticmethod
|
||||
@@ -16,7 +15,6 @@ class DummySound:
|
||||
_IN_DOCKER = os.path.exists('/.dockerenv')
|
||||
|
||||
if _IN_DOCKER:
|
||||
warnings.warn("Sound support disabled: running inside Docker.", RuntimeWarning)
|
||||
Sound = DummySound
|
||||
else:
|
||||
try:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
- name: "Execute {{ SOFTWARE_NAME }} Play"
|
||||
- name: "Execute Infinito.Nexus Play"
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: "Load 'constructor' tasks"
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
- name: Update package cache (Debian/Ubuntu)
|
||||
apt:
|
||||
update_cache: yes
|
||||
when: ansible_os_family == "Debian"
|
||||
when: ansible_facts['os_family'] == "Debian"
|
||||
|
||||
- name: Install Chromium browser
|
||||
package:
|
||||
|
||||
@@ -2,4 +2,4 @@
|
||||
|
||||
application_id: "desk-chromium"
|
||||
|
||||
chromium_package: "{{ 'chromium-browser' if ansible_os_family == 'Debian' else 'chromium' }}"
|
||||
chromium_package: "{{ 'chromium-browser' if ansible_facts['os_family'] == 'Debian' else 'chromium' }}"
|
||||
@@ -5,14 +5,14 @@
|
||||
|
||||
- name: Ensure autostart directory exists
|
||||
file:
|
||||
path: "{{ ansible_env.HOME }}/.config/autostart"
|
||||
path: "{{ ansible_facts['env']['HOME'] }}/.config/autostart"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
become: false
|
||||
|
||||
- name: Add CopyQ to user autostart
|
||||
copy:
|
||||
dest: "{{ ansible_env.HOME }}/.config/autostart/copyq.desktop"
|
||||
dest: "{{ ansible_facts['env']['HOME'] }}/.config/autostart/copyq.desktop"
|
||||
content: |
|
||||
[Desktop Entry]
|
||||
Type=Application
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
- name: Ensure systemd user unit directory exists
|
||||
file:
|
||||
path: "{{ ansible_env.HOME }}/.config/systemd/user"
|
||||
path: "{{ ansible_facts['env']['HOME'] }}/.config/systemd/user"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
become: false
|
||||
|
||||
- name: Install CopyQ user service unit
|
||||
copy:
|
||||
dest: "{{ ansible_env.HOME }}/.config/systemd/user/copyq.service"
|
||||
dest: "{{ ansible_facts['env']['HOME'] }}/.config/systemd/user/copyq.service"
|
||||
content: |
|
||||
[Unit]
|
||||
Description=CopyQ Clipboard Manager Server
|
||||
|
||||
@@ -10,12 +10,10 @@
|
||||
vars:
|
||||
package_name: gitconfig
|
||||
|
||||
- name: setup git
|
||||
command: gitconfig --merge-option rebase --name "{{users.client.full_name}}" --email "{{users.client.email}}" --website "{{users.client.website}}" --signing gpg --gpg-key "{{users.client.gpg}}"
|
||||
when: run_once_desk_git is not defined
|
||||
become: false
|
||||
- when: run_once_desk_git is not defined
|
||||
block:
|
||||
- name: setup git
|
||||
command: gitconfig --merge-option rebase --name "{{users.client.full_name}}" --email "{{users.client.email}}" --website "{{users.client.website}}" --signing gpg --gpg-key "{{users.client.gpg}}"
|
||||
become: false
|
||||
|
||||
- name: run the gitconfig tasks once
|
||||
set_fact:
|
||||
run_once_desk_git: true
|
||||
when: run_once_desk_git is not defined
|
||||
- include_tasks: utils/once/flag.yml
|
||||
@@ -20,4 +20,4 @@
|
||||
src: caffeine.desktop.j2
|
||||
dest: "{{auto_start_directory}}caffeine.desktop"
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
- include_tasks: utils/once/flag.yml
|
||||
|
||||
@@ -1,3 +1,2 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_desk_gnome_caffeine is not defined
|
||||
|
||||
1
roles/desk-nextcloud/config/main.yml
Normal file
1
roles/desk-nextcloud/config/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
{}
|
||||
@@ -1 +1,5 @@
|
||||
cloud_fqdn: # @todo Add detailled scheme for this entry
|
||||
credentials:
|
||||
cloud_fqdn:
|
||||
description: "Cloud fqdn"
|
||||
algorithm: "plain"
|
||||
validation: "^.*$"
|
||||
@@ -1,4 +1,4 @@
|
||||
application_id: desk-nextcloud
|
||||
nextcloud_user_home_directory: "/home/{{ users[desktop_username].username }}/"
|
||||
nextcloud_cloud_fqdn: "{{ applications | get_app_conf(application_id, 'cloud_fqdn') }}"
|
||||
nextcloud_cloud_directory: '{{nextcloud_user_home_directory}}Clouds/{{nextcloud_cloud_fqdn}}/{{ users[desktop_username].username }}/'
|
||||
nextcloud_cloud_fqdn: "{{ applications | get_app_conf(application_id, 'credentials.cloud_fqdn') }}"
|
||||
nextcloud_cloud_directory: '{{ nextcloud_user_home_directory }}Clouds/{{nextcloud_cloud_fqdn}}/{{ users[desktop_username].username }}/'
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
- block:
|
||||
- name: Include dependency 'dev-yay'
|
||||
include_role:
|
||||
name: dev-yay
|
||||
when: run_once_dev_yay is not defined
|
||||
|
||||
- name: Include dependency 'dev-yay'
|
||||
include_role:
|
||||
name: dev-yay
|
||||
when: run_once_dev_yay is not defined
|
||||
|
||||
- name: install torrent software
|
||||
kewlfft.aur.aur:
|
||||
use: yay
|
||||
name:
|
||||
- qbittorrent
|
||||
- include_tasks: utils/run_once.yml
|
||||
- name: install torrent software
|
||||
kewlfft.aur.aur:
|
||||
use: yay
|
||||
name:
|
||||
- qbittorrent
|
||||
- include_tasks: utils/once/flag.yml
|
||||
when: run_once_desk_qbittorrent is not defined
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
- block:
|
||||
- name: Include dependency 'dev-yay'
|
||||
include_role:
|
||||
name: dev-yay
|
||||
when: run_once_dev_yay is not defined
|
||||
- name: Include dependency 'dev-yay'
|
||||
include_role:
|
||||
name: dev-yay
|
||||
when: run_once_dev_yay is not defined
|
||||
|
||||
- name: install spotify
|
||||
kewlfft.aur.aur:
|
||||
use: yay
|
||||
name:
|
||||
- spotify
|
||||
- include_tasks: utils/run_once.yml
|
||||
- name: install spotify
|
||||
kewlfft.aur.aur:
|
||||
use: yay
|
||||
name:
|
||||
- spotify
|
||||
- include_tasks: utils/once/flag.yml
|
||||
when: run_once_desk_spotify is not defined
|
||||
|
||||
@@ -5,9 +5,10 @@
|
||||
|
||||
- name: pull ssh repository from {{desk_ssh_repository}}
|
||||
git:
|
||||
repo: "{{desk_ssh_repository}}"
|
||||
dest: "$HOME/.ssh"
|
||||
update: yes
|
||||
repo: "{{desk_ssh_repository}}"
|
||||
dest: "$HOME/.ssh"
|
||||
update: yes
|
||||
depth: 1
|
||||
register: git_result
|
||||
ignore_errors: true
|
||||
become: false
|
||||
@@ -50,4 +51,4 @@
|
||||
mode: "0644"
|
||||
become: false
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
- include_tasks: utils/once/flag.yml
|
||||
@@ -1,3 +1,2 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_desk_ssh is not defined
|
||||
@@ -1,13 +1,13 @@
|
||||
- block:
|
||||
- name: Include dependency 'dev-yay'
|
||||
include_role:
|
||||
name: dev-yay
|
||||
- name: Include dependency 'dev-yay'
|
||||
include_role:
|
||||
name: dev-yay
|
||||
|
||||
- name: install video conference software
|
||||
kewlfft.aur.aur:
|
||||
use: yay
|
||||
name:
|
||||
- zoom
|
||||
become: false
|
||||
- include_tasks: utils/run_once.yml
|
||||
- name: install video conference software
|
||||
kewlfft.aur.aur:
|
||||
use: yay
|
||||
name:
|
||||
- zoom
|
||||
become: false
|
||||
- include_tasks: utils/once/flag.yml
|
||||
when: run_once_desk_zoom is not defined
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
---
|
||||
- name: Install fakeroot
|
||||
community.general.pacman:
|
||||
name: fakeroot
|
||||
state: present
|
||||
when: run_once_dev_fakeroot is not defined
|
||||
- when: run_once_dev_fakeroot is not defined
|
||||
block:
|
||||
- name: Install fakeroot
|
||||
community.general.pacman:
|
||||
name: fakeroot
|
||||
state: present
|
||||
|
||||
- name: run the fakeroot tasks once
|
||||
set_fact:
|
||||
run_once_dev_fakeroot: true
|
||||
when: run_once_dev_fakeroot is not defined
|
||||
- include_tasks: utils/once/flag.yml
|
||||
@@ -1,10 +1,8 @@
|
||||
- name: install git
|
||||
community.general.pacman:
|
||||
name: git
|
||||
state: present
|
||||
when: run_once_dev_git is not defined
|
||||
- block:
|
||||
- name: install git
|
||||
community.general.pacman:
|
||||
name: git
|
||||
state: present
|
||||
|
||||
- name: run the git tasks once
|
||||
set_fact:
|
||||
run_once_dev_git: true
|
||||
- include_tasks: utils/once/flag.yml
|
||||
when: run_once_dev_git is not defined
|
||||
32
roles/dev-nix/README.md
Normal file
32
roles/dev-nix/README.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# dev-nix
|
||||
|
||||
This role installs the Nix package manager in a secure and reproducible way.
|
||||
|
||||
## Description
|
||||
|
||||
The role provides an offline-friendly and deterministic installation of Nix by
|
||||
using a locally stored installer script that is verified via SHA256 before
|
||||
execution. This avoids remote code downloads during Ansible runs and ensures a
|
||||
stable installation across different systems.
|
||||
|
||||
## Overview
|
||||
|
||||
The installer script is shipped with the role and copied to the target host.
|
||||
Its checksum is validated against a predefined SHA256 value. Only if the
|
||||
checksum matches, the installer is executed in multi-user (daemon) mode.
|
||||
Optionally, the role can install a small shell snippet to automatically load
|
||||
the Nix environment.
|
||||
|
||||
## Features
|
||||
|
||||
- Local, pinned Nix installer (no network download at runtime)
|
||||
- SHA256 checksum verification
|
||||
- Multi-user (daemon) installation mode
|
||||
- Optional shell integration via `/etc/profile.d`
|
||||
- Fully idempotent and distro-agnostic
|
||||
|
||||
## Further Resources
|
||||
|
||||
- Nix project: https://nixos.org
|
||||
- Nix releases: https://releases.nixos.org
|
||||
- Infinito.Nexus License: https://s.infinito.nexus/license
|
||||
2
roles/dev-nix/TODO.md
Normal file
2
roles/dev-nix/TODO.md
Normal file
@@ -0,0 +1,2 @@
|
||||
# to-dos
|
||||
- Implement better hash validation for security
|
||||
34
roles/dev-nix/defaults/main.yml
Normal file
34
roles/dev-nix/defaults/main.yml
Normal file
@@ -0,0 +1,34 @@
|
||||
---
|
||||
# Nix version to install via official installer
|
||||
dev_nix_installer_version: "2.32.4"
|
||||
|
||||
# Base URL for Nix releases
|
||||
dev_nix_installer_base_url: "https://releases.nixos.org/nix"
|
||||
|
||||
# Full URL to the installer script (can be overridden if needed)
|
||||
dev_nix_installer_url: >-
|
||||
{{ dev_nix_installer_base_url }}/nix-{{ dev_nix_installer_version }}/install
|
||||
|
||||
# Full URL to the SHA256 checksum file
|
||||
dev_nix_installer_sha256_url: "{{ dev_nix_installer_url }}.sha256"
|
||||
|
||||
# Path where the installer will be downloaded on the target host
|
||||
dev_nix_installer_dest: "/usr/local/share/nix-install.sh"
|
||||
|
||||
# Will be filled at runtime from dev_nix_installer_sha256_url
|
||||
dev_nix_installer_sha256: ""
|
||||
|
||||
# Whether to drop a small shell snippet into /etc/profile.d to ensure
|
||||
# Nix environment is available for login shells.
|
||||
dev_nix_enable_shell_snippet: false
|
||||
|
||||
# Path of the profile.d snippet
|
||||
dev_nix_shell_snippet_path: "/etc/profile.d/nix.sh"
|
||||
|
||||
# Enable experimental features such as nix-command and flakes
|
||||
dev_nix_enable_experimental_features: true
|
||||
|
||||
# List of experimental features to enable when dev_nix_enable_experimental_features is true
|
||||
dev_nix_experimental_features:
|
||||
- nix-command
|
||||
- flakes
|
||||
1
roles/dev-nix/files/nix-install.sh
Normal file
1
roles/dev-nix/files/nix-install.sh
Normal file
@@ -0,0 +1 @@
|
||||
chmod +x roles/dev-nix/files/nix-install.sh
|
||||
37
roles/dev-nix/meta/main.yml
Normal file
37
roles/dev-nix/meta/main.yml
Normal file
@@ -0,0 +1,37 @@
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birkenbach"
|
||||
description: "Installs the Nix package manager using a locally stored installer script with SHA256 verification."
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birkenbach
|
||||
Consulting & Coaching Solutions
|
||||
https://www.veen.world
|
||||
min_ansible_version: "2.9"
|
||||
platforms:
|
||||
- name: Archlinux
|
||||
versions:
|
||||
- rolling
|
||||
- name: Debian
|
||||
versions:
|
||||
- all
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- all
|
||||
- name: EL
|
||||
versions:
|
||||
- all
|
||||
- name: Fedora
|
||||
versions:
|
||||
- all
|
||||
- name: GenericLinux
|
||||
versions:
|
||||
- all
|
||||
galaxy_tags:
|
||||
- nix
|
||||
- devtools
|
||||
- development
|
||||
- build
|
||||
- automation
|
||||
- infinito-nexus
|
||||
dependencies: []
|
||||
49
roles/dev-nix/tasks/01_core.yml
Normal file
49
roles/dev-nix/tasks/01_core.yml
Normal file
@@ -0,0 +1,49 @@
|
||||
---
|
||||
# Install Nix differently depending on the target platform:
|
||||
# - Arch-based systems: install via package manager
|
||||
# - Non-Arch systems: use the official installer with SHA256 verification
|
||||
|
||||
# 1) Arch-based systems: just install the distro package
|
||||
- name: Install Nix via package manager on Arch-based systems
|
||||
community.general.pacman:
|
||||
name: nix
|
||||
state: present
|
||||
become: true
|
||||
when: ansible_facts.os_family == "Archlinux"
|
||||
|
||||
# 2) Non-Arch systems: delegate installer logic to a separate task file
|
||||
- name: Include non-Arch installer logic
|
||||
ansible.builtin.include_tasks: 02_non_arch_installer.yml
|
||||
when: ansible_facts.os_family != "Archlinux"
|
||||
|
||||
# 3) Configure Nix experimental features (common for all platforms)
|
||||
- name: Ensure Nix config directory exists
|
||||
ansible.builtin.file:
|
||||
path: /etc/nix
|
||||
state: directory
|
||||
mode: "0755"
|
||||
when: dev_nix_enable_experimental_features | bool
|
||||
become: true
|
||||
|
||||
- name: Deploy Nix configuration (nix.conf)
|
||||
ansible.builtin.template:
|
||||
src: "nix.conf.j2"
|
||||
dest: "/etc/nix/nix.conf"
|
||||
mode: "0644"
|
||||
become: true
|
||||
|
||||
# 4) Optionally drop shell snippet for Nix
|
||||
- name: Optionally drop shell snippet for Nix
|
||||
ansible.builtin.copy:
|
||||
dest: "{{ dev_nix_shell_snippet_path }}"
|
||||
mode: "0644"
|
||||
content: |
|
||||
# Added by dev-nix Ansible role
|
||||
if [ -e /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh ]; then
|
||||
. /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh
|
||||
fi
|
||||
when: dev_nix_enable_shell_snippet | bool
|
||||
become: true
|
||||
|
||||
# 5) Mark this role as "run once" in your global once-flag system
|
||||
- include_tasks: utils/once/flag.yml
|
||||
37
roles/dev-nix/tasks/02_non_arch_installer.yml
Normal file
37
roles/dev-nix/tasks/02_non_arch_installer.yml
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
# Non-Arch installer logic:
|
||||
# Download the official Nix installer and its SHA256 from releases.nixos.org
|
||||
# and run the daemon (multi-user) installer.
|
||||
|
||||
# 1) Fetch the official SHA256 from releases.nixos.org on the control node
|
||||
- name: Fetch official Nix installer SHA256
|
||||
ansible.builtin.uri:
|
||||
url: "{{ dev_nix_installer_sha256_url }}"
|
||||
return_content: true
|
||||
register: dev_nix_official_sha_response
|
||||
delegate_to: localhost
|
||||
run_once: true
|
||||
|
||||
- name: Set expected installer checksum from official SHA256
|
||||
ansible.builtin.set_fact:
|
||||
dev_nix_installer_sha256: >-
|
||||
{{ dev_nix_official_sha_response.content.split()[0] | trim }}
|
||||
run_once: true
|
||||
|
||||
# 2) Download installer script on the target and verify via checksum
|
||||
- name: Download Nix installer script from official releases
|
||||
ansible.builtin.get_url:
|
||||
url: "{{ dev_nix_installer_url }}"
|
||||
dest: "{{ dev_nix_installer_dest }}"
|
||||
mode: "0755"
|
||||
# get_url will verify the checksum and fail if it does not match
|
||||
checksum: "sha256:{{ dev_nix_installer_sha256 }}"
|
||||
become: true
|
||||
|
||||
# 3) Run Nix installer in daemon (multi-user) mode if Nix is not installed
|
||||
- name: Run Nix installer in daemon (multi-user) mode if Nix is not installed
|
||||
ansible.builtin.shell: >
|
||||
"{{ dev_nix_installer_dest }}" --daemon
|
||||
args:
|
||||
creates: "/nix/store"
|
||||
become: true
|
||||
3
roles/dev-nix/tasks/main.yml
Normal file
3
roles/dev-nix/tasks/main.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_dev_nix is not defined
|
||||
12
roles/dev-nix/templates/nix.conf.j2
Normal file
12
roles/dev-nix/templates/nix.conf.j2
Normal file
@@ -0,0 +1,12 @@
|
||||
# Nix configuration file
|
||||
# Managed by the {{ SOFTWARE_NAME }}dev-nix Ansible role
|
||||
|
||||
# Unix group containing the Nix build user accounts
|
||||
build-users-group = nixbld
|
||||
|
||||
# Enable experimental features if configured
|
||||
{% if dev_nix_enable_experimental_features %}
|
||||
experimental-features = {{ dev_nix_experimental_features | join(" ") }}
|
||||
{% endif %}
|
||||
|
||||
# (Optional) Add more global nix.conf options below
|
||||
1
roles/dev-nix/vars/main.yml
Normal file
1
roles/dev-nix/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
application_id: dev-nix
|
||||
@@ -1,15 +1,12 @@
|
||||
- block:
|
||||
- include_role:
|
||||
name: dev-gcc
|
||||
when: run_once_dev_gcc is not defined
|
||||
|
||||
- include_role:
|
||||
name: dev-gcc
|
||||
when: run_once_dev_gcc is not defined
|
||||
- name: python pip install
|
||||
community.general.pacman:
|
||||
name: python-pip
|
||||
state: present
|
||||
|
||||
- name: python pip install
|
||||
community.general.pacman:
|
||||
name: python-pip
|
||||
state: present
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
vars:
|
||||
flush_handlers: false
|
||||
- include_tasks: utils/once/flag.yml
|
||||
when: run_once_dev_python_pip is not defined
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
---
|
||||
- name: python-yaml install
|
||||
community.general.pacman:
|
||||
name: python-yaml
|
||||
state: present
|
||||
when: run_once_dev_python_yaml is not defined
|
||||
|
||||
- name: run the python_yaml tasks once
|
||||
set_fact:
|
||||
run_once_dev_python_yaml: true
|
||||
when: run_once_dev_python_yaml is not defined
|
||||
- when: run_once_dev_python_yaml is not defined
|
||||
block:
|
||||
- name: python-yaml install
|
||||
community.general.pacman:
|
||||
name: python-yaml
|
||||
state: present
|
||||
|
||||
- include_tasks: utils/once/flag.yml
|
||||
|
||||
21
roles/dev-shell/tasks/01_core.yml
Normal file
21
roles/dev-shell/tasks/01_core.yml
Normal file
@@ -0,0 +1,21 @@
|
||||
- name: Ensure ~/.bash_profile sources ~/.profile
|
||||
lineinfile:
|
||||
path: "$HOME/.bash_profile"
|
||||
line: '[ -f ~/.profile ] && . ~/.profile'
|
||||
insertafter: EOF
|
||||
state: present
|
||||
create: yes
|
||||
mode: "0644"
|
||||
become: false
|
||||
|
||||
- name: Ensure ~/.zprofile sources ~/.profile
|
||||
lineinfile:
|
||||
path: "$HOME/.zprofile"
|
||||
line: '[ -f ~/.profile ] && . ~/.profile'
|
||||
insertafter: EOF
|
||||
state: present
|
||||
create: yes
|
||||
mode: "0644"
|
||||
become: false
|
||||
|
||||
- include_tasks: utils/once/flag.yml
|
||||
@@ -1,25 +1,2 @@
|
||||
---
|
||||
- block:
|
||||
- name: Ensure ~/.bash_profile sources ~/.profile
|
||||
lineinfile:
|
||||
path: "$HOME/.bash_profile"
|
||||
line: '[ -f ~/.profile ] && . ~/.profile'
|
||||
insertafter: EOF
|
||||
state: present
|
||||
create: yes
|
||||
mode: "0644"
|
||||
become: false
|
||||
|
||||
- name: Ensure ~/.zprofile sources ~/.profile
|
||||
lineinfile:
|
||||
path: "$HOME/.zprofile"
|
||||
line: '[ -f ~/.profile ] && . ~/.profile'
|
||||
insertafter: EOF
|
||||
state: present
|
||||
create: yes
|
||||
mode: "0644"
|
||||
become: false
|
||||
|
||||
- set_fact:
|
||||
run_once_dev_shell: true
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_dev_shell is not defined
|
||||
|
||||
@@ -32,10 +32,11 @@
|
||||
become: true
|
||||
become_user: "{{ AUR_BUILDER_USER }}"
|
||||
git:
|
||||
repo: https://aur.archlinux.org/yay.git
|
||||
dest: "/home/{{ AUR_BUILDER_USER }}/yay"
|
||||
clone: yes
|
||||
repo: https://aur.archlinux.org/yay.git
|
||||
dest: "/home/{{ AUR_BUILDER_USER }}/yay"
|
||||
clone: yes
|
||||
update: yes
|
||||
depth: 1
|
||||
|
||||
- name: Build and install yay
|
||||
become: true
|
||||
@@ -55,4 +56,4 @@
|
||||
aur_only: yes
|
||||
when: MODE_UPDATE | bool
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
- include_tasks: utils/once/flag.yml
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: 01_core.yml
|
||||
vars:
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
when: run_once_dev_yay is not defined
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
docker_compose_file_creation_enabled: true # If set to true the file creation will be skipped
|
||||
docker_pull_git_repository: false # Activates docker repository download and routine
|
||||
docker_compose_flush_handlers: false # Set to true in the vars/main.yml of the including role to autoflush after docker compose routine
|
||||
docker_compose_flush_handlers: false # Set to true in the vars/main.yml of the including role to autoflush after docker compose routine
|
||||
docker_git_repository_pull: "{{ docker_git_repository_address is defined and docker_git_repository_address is string and docker_git_repository_address | length > 0 }}"
|
||||
|
||||
@@ -34,8 +34,8 @@
|
||||
register: compose_pull
|
||||
changed_when: "'pulled' in compose_pull.stdout"
|
||||
environment:
|
||||
COMPOSE_HTTP_TIMEOUT: 600
|
||||
DOCKER_CLIENT_TIMEOUT: 600
|
||||
COMPOSE_HTTP_TIMEOUT: "600"
|
||||
DOCKER_CLIENT_TIMEOUT: "600"
|
||||
when: MODE_UPDATE | bool
|
||||
listen:
|
||||
- docker compose up
|
||||
@@ -52,8 +52,8 @@
|
||||
chdir: "{{ docker_compose.directories.instance }}"
|
||||
executable: /bin/bash
|
||||
environment:
|
||||
COMPOSE_HTTP_TIMEOUT: 600
|
||||
DOCKER_CLIENT_TIMEOUT: 600
|
||||
COMPOSE_HTTP_TIMEOUT: "600"
|
||||
DOCKER_CLIENT_TIMEOUT: "600"
|
||||
# Faster build
|
||||
DOCKER_BUILDKIT: "1"
|
||||
COMPOSE_DOCKER_CLI_BUILD: "1"
|
||||
@@ -71,8 +71,8 @@
|
||||
chdir: "{{ docker_compose.directories.instance }}"
|
||||
executable: /bin/bash
|
||||
environment:
|
||||
COMPOSE_HTTP_TIMEOUT: 600
|
||||
DOCKER_CLIENT_TIMEOUT: 600
|
||||
COMPOSE_HTTP_TIMEOUT: "600"
|
||||
DOCKER_CLIENT_TIMEOUT: "600"
|
||||
listen:
|
||||
- docker compose up
|
||||
|
||||
|
||||
@@ -1,21 +1,23 @@
|
||||
- include_tasks: utils/once/flag.yml
|
||||
|
||||
- name: Remove all docker compose pull locks
|
||||
file:
|
||||
path: "{{ PATH_DOCKER_COMPOSE_PULL_LOCK_DIR }}"
|
||||
state: absent
|
||||
|
||||
- name: "Load docker container role"
|
||||
include_role:
|
||||
name: docker-container
|
||||
|
||||
when: run_once_docker_container is not defined
|
||||
- name: "reset (if enabled)"
|
||||
- name: "reset (if enabled) for {{ role_name}}"
|
||||
include_tasks: 02_reset.yml
|
||||
when: MODE_RESET | bool
|
||||
|
||||
- name: "create {{ PATH_DOCKER_COMPOSE_INSTANCES }}"
|
||||
file:
|
||||
path: "{{ PATH_DOCKER_COMPOSE_INSTANCES }}"
|
||||
state: directory
|
||||
mode: 0700
|
||||
owner: root
|
||||
group: root
|
||||
path: "{{ PATH_DOCKER_COMPOSE_INSTANCES }}"
|
||||
state: directory
|
||||
mode: 0700
|
||||
owner: root
|
||||
group: root
|
||||
|
||||
- name: "Load docker container role"
|
||||
include_role:
|
||||
name: docker-container
|
||||
when: run_once_docker_container is not defined
|
||||
|
||||
@@ -4,9 +4,9 @@
|
||||
|
||||
- name: pull docker repository
|
||||
git:
|
||||
repo: "{{ docker_repository_address }}"
|
||||
repo: "{{ docker_git_repository_address }}"
|
||||
dest: "{{ docker_repository_path }}"
|
||||
version: "{{ docker_repository_branch | default('main') }}"
|
||||
version: "{{ docker_git_repository_branch | default('main') }}"
|
||||
single_branch: yes
|
||||
depth: 1
|
||||
update: yes
|
||||
|
||||
@@ -1,51 +1,57 @@
|
||||
- name: "Find optional Dockerfile for {{ application_id }}"
|
||||
set_fact:
|
||||
dockerfile_src: >-
|
||||
{{ lookup(
|
||||
'first_found',
|
||||
{
|
||||
'files': [
|
||||
application_id | abs_role_path_by_application_id ~ '/templates/Dockerfile.j2',
|
||||
application_id | abs_role_path_by_application_id ~ '/files/Dockerfile'
|
||||
]
|
||||
},
|
||||
errors='ignore'
|
||||
) | default('', true)
|
||||
}}
|
||||
|
||||
- name: "Create (optional) Dockerfile for {{ application_id }}"
|
||||
template:
|
||||
src: "{{ item }}"
|
||||
src: "{{ dockerfile_src }}"
|
||||
dest: "{{ docker_compose.files.dockerfile }}"
|
||||
loop:
|
||||
- "{{ application_id | abs_role_path_by_application_id }}/templates/Dockerfile.j2"
|
||||
- "{{ application_id | abs_role_path_by_application_id }}/files/Dockerfile"
|
||||
notify:
|
||||
notify:
|
||||
- docker compose build
|
||||
- docker compose up
|
||||
register: create_dockerfile_result
|
||||
failed_when:
|
||||
- create_dockerfile_result is failed
|
||||
- "'Could not find or access' not in create_dockerfile_result.msg"
|
||||
when:
|
||||
- dockerfile_src | default('') | length > 0
|
||||
|
||||
- name: "Create (optional) '{{ docker_compose.files.env }}'"
|
||||
- name: "Create (optional) '{{ docker_compose.files.env }}'"
|
||||
template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ docker_compose.files.env }}"
|
||||
mode: '770'
|
||||
force: yes
|
||||
notify: docker compose up
|
||||
register: env_template
|
||||
loop:
|
||||
- "{{ application_id | abs_role_path_by_application_id }}/templates/env.j2"
|
||||
- "{{ application_id | abs_role_path_by_application_id }}/files/env"
|
||||
failed_when:
|
||||
- env_template is failed
|
||||
- "'Could not find or access' not in env_template.msg"
|
||||
src: "{{ item }}"
|
||||
dest: "{{ docker_compose.files.env }}"
|
||||
mode: '770'
|
||||
force: yes
|
||||
notify: docker compose up
|
||||
with_first_found:
|
||||
- files:
|
||||
- "{{ application_id | abs_role_path_by_application_id }}/templates/env.j2"
|
||||
- "{{ application_id | abs_role_path_by_application_id }}/files/env"
|
||||
skip: true
|
||||
|
||||
- name: "Create (optional) '{{ docker_compose.files.docker_compose_override }}'"
|
||||
- name: "Create (optional) '{{ docker_compose.files.docker_compose_override }}'"
|
||||
template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ docker_compose.files.docker_compose_override }}"
|
||||
mode: '770'
|
||||
force: yes
|
||||
notify: docker compose up
|
||||
register: docker_compose_override_template
|
||||
loop:
|
||||
- "{{ application_id | abs_role_path_by_application_id }}/templates/docker-compose.override.yml.j2"
|
||||
- "{{ application_id | abs_role_path_by_application_id }}/files/docker-compose.override.yml"
|
||||
failed_when:
|
||||
- docker_compose_override_template is failed
|
||||
- "'Could not find or access' not in docker_compose_override_template.msg"
|
||||
src: "{{ item }}"
|
||||
dest: "{{ docker_compose.files.docker_compose_override }}"
|
||||
mode: '770'
|
||||
force: yes
|
||||
notify: docker compose up
|
||||
with_first_found:
|
||||
- files:
|
||||
- "{{ application_id | abs_role_path_by_application_id }}/templates/docker-compose.override.yml.j2"
|
||||
- "{{ application_id | abs_role_path_by_application_id }}/files/docker-compose.override.yml"
|
||||
skip: true
|
||||
|
||||
- name: "Create (obligatoric) '{{ docker_compose.files.docker_compose }}'"
|
||||
- name: "Create (obligatoric) '{{ docker_compose.files.docker_compose }}'"
|
||||
template:
|
||||
src: "docker-compose.yml.j2"
|
||||
dest: "{{ docker_compose.files.docker_compose }}"
|
||||
notify: docker compose up
|
||||
register: docker_compose_template
|
||||
src: "docker-compose.yml.j2"
|
||||
dest: "{{ docker_compose.files.docker_compose }}"
|
||||
notify: docker compose up
|
||||
register: docker_compose_template
|
||||
@@ -1,6 +1,4 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_docker_compose is not defined
|
||||
|
||||
- name: "Load variables from {{ DOCKER_COMPOSE_VARIABLE_FILE }} for whole play"
|
||||
@@ -17,15 +15,12 @@
|
||||
|
||||
- name: "Include routines to set up a git repository based installation for '{{ application_id }}'."
|
||||
include_tasks: "03_repository.yml"
|
||||
when: docker_pull_git_repository | bool
|
||||
when: docker_git_repository_pull | bool
|
||||
|
||||
- block:
|
||||
- name: "Include file management routines for '{{ application_id }}'."
|
||||
include_tasks: "04_files.yml"
|
||||
- name: "Ensure that {{ docker_compose.directories.instance }} is up"
|
||||
include_tasks: "05_ensure_up.yml"
|
||||
include_tasks: "utils/up.yml"
|
||||
when: docker_compose_file_creation_enabled | bool
|
||||
|
||||
- name: "flush docker compose for '{{ application_id }}'"
|
||||
meta: flush_handlers
|
||||
when: docker_compose_flush_handlers | bool
|
||||
|
||||
17
roles/docker-compose/tasks/utils/network.yml
Normal file
17
roles/docker-compose/tasks/utils/network.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
- name: Load Docker
|
||||
include_role:
|
||||
name: sys-svc-docker
|
||||
when: run_once_sys_svc_docker is not defined
|
||||
|
||||
- name: Create docker network
|
||||
community.docker.docker_network:
|
||||
name: "{{ docker_network_name }}"
|
||||
state: present
|
||||
ipam_config:
|
||||
- subnet: "{{ docker_network_subnet }}"
|
||||
|
||||
- name: "include docker-compose role"
|
||||
include_role:
|
||||
name: docker-compose
|
||||
vars:
|
||||
docker_compose_flush_handlers: true
|
||||
@@ -13,10 +13,15 @@
|
||||
(docker_ps.stderr | default(''))
|
||||
| regex_search('(no configuration file provided|no such file or directory|env file .* not found)') is none
|
||||
)
|
||||
when: >
|
||||
not (
|
||||
docker_compose_template.changed | default(false)
|
||||
or
|
||||
env_template.changed | default(false)
|
||||
)
|
||||
notify: docker compose up
|
||||
when:
|
||||
- >
|
||||
not (
|
||||
docker_compose_template.changed | default(false)
|
||||
or
|
||||
env_template.changed | default(false)
|
||||
)
|
||||
notify: docker compose up
|
||||
|
||||
- name: "Flush Docker Compose Up Handlers for {{ docker_compose.directories.instance }}"
|
||||
meta: flush_handlers
|
||||
when: docker_compose_flush_handlers | bool
|
||||
@@ -1,2 +1,2 @@
|
||||
DOCKER_COMPOSE_VARIABLE_FILE: "{{ role_path }}/vars/docker-compose.yml"
|
||||
DOCKER_COMPOSE_VARIABLE_FILE: "{{ [ role_path, 'vars/docker-compose.yml' ] | path_join }}"
|
||||
DOCKER_COMPOSE_DOWN_ALL_PACKAGE: "docodol"
|
||||
5
roles/docker-container/tasks/01_core.yml
Normal file
5
roles/docker-container/tasks/01_core.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
- include_role:
|
||||
name: sys-svc-docker
|
||||
when: run_once_sys_svc_docker is not defined
|
||||
|
||||
- include_tasks: utils/once/flag.yml
|
||||
@@ -1,6 +1,2 @@
|
||||
- block:
|
||||
- include_role:
|
||||
name: sys-svc-docker
|
||||
when: run_once_sys_svc_docker is not defined
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_docker_container is not defined
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_docker_container is not defined
|
||||
@@ -6,7 +6,7 @@
|
||||
- "{{ docker_compose.files.env }}"
|
||||
{% endif %}
|
||||
logging:
|
||||
driver: journald
|
||||
driver: {{ "json-file" if IS_CONTAINER | bool else 'journald' }}
|
||||
{% filter indent(4) %}
|
||||
{% include 'roles/docker-container/templates/resource.yml.j2' %}
|
||||
{% endfilter %}
|
||||
|
||||
31
roles/docker-container/templates/healthcheck/http.yml.j2
Normal file
31
roles/docker-container/templates/healthcheck/http.yml.j2
Normal file
@@ -0,0 +1,31 @@
|
||||
{# ------------------------------------------------------------------------------
|
||||
Healthcheck: HTTP Local
|
||||
------------------------------------------------------------------------------
|
||||
This template defines a generic HTTP healthcheck for containers exposing
|
||||
a web service on a local port (e.g., Nginx, Apache, PHP-FPM, Shopware, etc.).
|
||||
|
||||
It uses `wget` or `curl` (as fallback) to test if the container responds on
|
||||
http://127.0.0.1:{{ container_port }}/. If the request succeeds, Docker marks
|
||||
the container as "healthy"; otherwise, as "unhealthy".
|
||||
|
||||
Parameters:
|
||||
- container_port: The internal port the service listens on.
|
||||
|
||||
Timing:
|
||||
- interval: 30s → Check every 30 seconds
|
||||
- timeout: 5s → Each check must complete within 5 seconds
|
||||
- retries: 5 → Mark unhealthy after 5 consecutive failures
|
||||
- start_period: 20s → Grace period before health checks begin
|
||||
|
||||
Usage:
|
||||
{% filter indent(4) %}
|
||||
{% include 'roles/docker-container/templates/healthcheck/http.yml.j2' %}
|
||||
{% endfilter %}
|
||||
------------------------------------------------------------------------------
|
||||
#}
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget -qO- http://127.0.0.1:{{ container_port }}/ >/dev/null || curl -fsS http://127.0.0.1:{{ container_port }}/ >/dev/null"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 20s
|
||||
@@ -11,9 +11,7 @@
|
||||
- imagescan-plugin-networkscan
|
||||
- epson-inkjet-printer-escpr
|
||||
- epson-inkjet-printer-escpr2
|
||||
- imagescan
|
||||
become: false
|
||||
|
||||
- name: install imagescan
|
||||
community.general.pacman:
|
||||
name: imagescan
|
||||
state: present
|
||||
- include_tasks: utils/once/flag.yml
|
||||
@@ -1,5 +1,2 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- set_fact:
|
||||
run_once_drv_epson_multiprinter: true
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_drv_epson_multiprinter is not defined
|
||||
|
||||
@@ -17,10 +17,8 @@
|
||||
line: 'HandleLidSwitch=hibernate'
|
||||
backup: yes
|
||||
notify: Restart systemd-logind
|
||||
become: true
|
||||
|
||||
- name: Configure systemd to lock session when lid is closed on external power
|
||||
become: true
|
||||
lineinfile:
|
||||
path: /etc/systemd/logind.conf
|
||||
regexp: '^#?HandleLidSwitchExternalPower='
|
||||
@@ -30,7 +28,6 @@
|
||||
become: true
|
||||
|
||||
- name: Configure systemd to lock session when lid is closed while docked
|
||||
become: true
|
||||
lineinfile:
|
||||
path: /etc/systemd/logind.conf
|
||||
regexp: '^#?HandleLidSwitchDocked='
|
||||
|
||||
@@ -1,2 +1,32 @@
|
||||
- name: Install nonfree drivers
|
||||
ansible.builtin.shell: mhwd -a pci nonfree 0300
|
||||
- name: Gather OS facts (ensure we know distribution)
|
||||
ansible.builtin.setup:
|
||||
when: ansible_facts is not defined
|
||||
|
||||
- name: Ensure mhwd is installed on Manjaro
|
||||
community.general.pacman:
|
||||
name: mhwd
|
||||
state: present
|
||||
become: true
|
||||
when:
|
||||
- ansible_facts['distribution'] is defined
|
||||
- ansible_facts['distribution'] in ['ManjaroLinux', 'Manjaro']
|
||||
register: mhwd_install
|
||||
|
||||
- name: Detect mhwd command
|
||||
ansible.builtin.stat:
|
||||
path: /usr/bin/mhwd
|
||||
register: mhwd_binary
|
||||
|
||||
- name: Install nonfree drivers via mhwd (Manjaro only)
|
||||
ansible.builtin.shell: mhwd -a pci nonfree 0300
|
||||
become: true
|
||||
when:
|
||||
- mhwd_binary.stat.exists
|
||||
|
||||
- name: Warn when mhwd is not available
|
||||
ansible.builtin.debug:
|
||||
msg: >
|
||||
Skipping proprietary GPU driver installation: `mhwd` not found.
|
||||
This role currently only supports Manjaro (mhwd); on other distros it does nothing.
|
||||
when:
|
||||
- not mhwd_binary.stat.exists
|
||||
|
||||
@@ -6,6 +6,8 @@
|
||||
- name: update pkgmgr
|
||||
shell: |
|
||||
source ~/.venvs/pkgmgr/bin/activate
|
||||
pkgmgr update pkgmgr
|
||||
pkgmgr update pkgmgr --clone-mode shallow
|
||||
register: pkgmgr_update
|
||||
changed_when: "'already up to date' not in (pkgmgr_update.stdout | lower)"
|
||||
|
||||
- include_tasks: utils/once/flag.yml
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- set_fact:
|
||||
run_once_pkgmgr_install: true
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_pkgmgr_install is not defined
|
||||
|
||||
- name: "update {{ package_name }}"
|
||||
- name: "Install '{{ package_name }}' via Kevin's Package Manager (pkgmgr)"
|
||||
ansible.builtin.shell: |
|
||||
source ~/.venvs/pkgmgr/bin/activate
|
||||
pkgmgr update {{ package_name }} --dependencies --clone-mode https
|
||||
pkgmgr update {{ package_name }} --dependencies --clone-mode shallow
|
||||
args:
|
||||
executable: /bin/bash
|
||||
notify: "{{ package_notify | default(omit,true) }}"
|
||||
|
||||
@@ -2,9 +2,15 @@
|
||||
include_role:
|
||||
name: '{{ item }}'
|
||||
loop:
|
||||
- dev-git
|
||||
- dev-make
|
||||
- dev-python-yaml
|
||||
- dev-git
|
||||
- dev-make
|
||||
- dev-nix
|
||||
|
||||
- name: Ensure OpenSSH client is installed
|
||||
community.general.pacman:
|
||||
name: openssh
|
||||
state: present
|
||||
become: true
|
||||
|
||||
- name: Ensure GitHub host key is in known_hosts
|
||||
known_hosts:
|
||||
@@ -21,12 +27,27 @@
|
||||
mode: '0755'
|
||||
become: true
|
||||
|
||||
- name: Clone Kevin's Package Manager repository
|
||||
- name: Check if pkgmgr git repo already exists
|
||||
stat:
|
||||
path: "{{ PKGMGR_INSTALL_PATH }}/.git"
|
||||
register: pkgmgr_git_repo
|
||||
become: true
|
||||
|
||||
- name: Remove legacy 'latest' tag from existing pkgmgr repo (if present)
|
||||
command: git tag -d latest
|
||||
args:
|
||||
chdir: "{{ PKGMGR_INSTALL_PATH }}"
|
||||
when: pkgmgr_git_repo.stat.exists
|
||||
ignore_errors: true
|
||||
become: true
|
||||
|
||||
- name: Clone Kevin's Package Manager repository (always latest HEAD)
|
||||
git:
|
||||
repo: "{{ PKGMGR_REPO_URL }}"
|
||||
dest: "{{ PKGMGR_INSTALL_PATH }}"
|
||||
version: "HEAD"
|
||||
force: yes
|
||||
repo: "{{ PKGMGR_REPO_URL }}"
|
||||
dest: "{{ PKGMGR_INSTALL_PATH }}"
|
||||
version: "HEAD"
|
||||
force: yes
|
||||
depth: 1
|
||||
become: true
|
||||
|
||||
- name: create config.yaml
|
||||
@@ -45,5 +66,7 @@
|
||||
become: true
|
||||
|
||||
- name: "Update all repositories with pkgmgr"
|
||||
command: "pkgmgr pull --all"
|
||||
when: MODE_UPDATE | bool
|
||||
command: "pkgmgr update --all --clone-mode shallow"
|
||||
when: MODE_UPDATE | bool
|
||||
|
||||
- include_tasks: utils/once/flag.yml
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
---
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_pkgmgr is not defined
|
||||
@@ -1,20 +1,10 @@
|
||||
- name: create docker network for Ollama, so that other applications can access it
|
||||
community.docker.docker_network:
|
||||
name: "{{ OLLAMA_NETWORK }}"
|
||||
state: present
|
||||
ipam_config:
|
||||
- subnet: "{{ networks.local[application_id].subnet }}"
|
||||
|
||||
- name: Include dependency 'sys-svc-docker'
|
||||
include_role:
|
||||
name: sys-svc-docker
|
||||
when: run_once_sys_svc_docker is not defined
|
||||
|
||||
- name: "include docker-compose role"
|
||||
include_role:
|
||||
name: docker-compose
|
||||
- name: "Setup docker network for {{ application_id }}"
|
||||
include_tasks: "{{ [playbook_dir, 'roles/docker-compose/tasks/utils/network.yml' ] | path_join }}"
|
||||
vars:
|
||||
docker_compose_flush_handlers: true
|
||||
docker_network_name: "{{ OLLAMA_NETWORK }}"
|
||||
docker_network_subnet: "{{ networks.local[application_id].subnet }}"
|
||||
docker_compose_flush_handlers: true
|
||||
|
||||
- name: Pre-pull Ollama models
|
||||
vars:
|
||||
@@ -35,4 +25,4 @@
|
||||
(pull_result.rc | default(0)) != 0 and
|
||||
('up to date' not in (pull_result.stdout | default('')))
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
- include_tasks: utils/once/flag.yml
|
||||
@@ -1,5 +1,2 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
vars:
|
||||
flush_handlers: true
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_svc_ai_ollama is not defined
|
||||
@@ -1,9 +1,6 @@
|
||||
# General
|
||||
application_id: "svc-ai-ollama"
|
||||
|
||||
# Docker
|
||||
docker_compose_flush_handlers: true
|
||||
|
||||
# Ollama
|
||||
# https://ollama.com/
|
||||
OLLAMA_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.ollama.version') }}"
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
- block:
|
||||
- name: Include dependencies
|
||||
include_role:
|
||||
name: '{{ item }}'
|
||||
loop:
|
||||
- sys-ctl-cln-bkps
|
||||
- sys-lock
|
||||
- include_tasks: utils/run_once.yml
|
||||
- name: Include dependencies
|
||||
include_role:
|
||||
name: '{{ item }}'
|
||||
loop:
|
||||
- sys-ctl-cln-bkps
|
||||
- sys-lock
|
||||
- include_tasks: utils/once/flag.yml
|
||||
when: run_once_svc_bkp_loc_2_usb is not defined
|
||||
|
||||
- name: Fail if any backup_to_usb variable is empty
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
- sys-ctl-alm-compose
|
||||
- sys-lock
|
||||
- sys-timer-cln-bkps
|
||||
- include_tasks: utils/run_once.yml
|
||||
- include_tasks: utils/once/flag.yml
|
||||
when: run_once_svc_bkp_rmt_2_loc is not defined
|
||||
|
||||
- name: "Create Directory '{{ DOCKER_BACKUP_REMOTE_2_LOCAL_DIR }}'"
|
||||
|
||||
@@ -11,6 +11,6 @@ docker:
|
||||
mem_reservation: "2g"
|
||||
mem_limit: "4g"
|
||||
pids_limit: 1024
|
||||
network: "mariadb"
|
||||
network: "mariadb"
|
||||
volumes:
|
||||
data: "mariadb_data"
|
||||
data: "mariadb_data"
|
||||
@@ -1,3 +1,4 @@
|
||||
# Check out the README.md file for more information, why this encodings and collations are used
|
||||
database_encoding: "utf8mb4"
|
||||
database_collation: "utf8mb4_unicode_ci"
|
||||
MARIADB_ENCODING: "utf8mb4"
|
||||
MARIADB_COLLATION: "utf8mb4_unicode_ci"
|
||||
database_init: false # When true a database is initialized
|
||||
@@ -1,48 +1,58 @@
|
||||
|
||||
- name: Create Docker network for MariaDB
|
||||
community.docker.docker_network:
|
||||
name: "{{ mariadb_network_name }}"
|
||||
state: present
|
||||
ipam_config:
|
||||
- subnet: "{{ mariadb_subnet }}"
|
||||
|
||||
- name: install MariaDB
|
||||
community.docker.docker_container:
|
||||
name: "{{ mariadb_name }}"
|
||||
image: "{{ mariadb_image }}:{{ mariadb_version}}"
|
||||
detach: yes
|
||||
env:
|
||||
MARIADB_ROOT_PASSWORD: "{{ mariadb_root_pwd }}"
|
||||
MARIADB_AUTO_UPGRADE: "1"
|
||||
networks:
|
||||
- name: "{{ mariadb_network_name }}"
|
||||
volumes:
|
||||
- "{{ mariadb_volume }}:/var/lib/mysql"
|
||||
published_ports:
|
||||
- "127.0.0.1:{{ mariadb_port }}:3306" # can be that this will be removed if all applications use sockets
|
||||
command: "--transaction-isolation=READ-COMMITTED --binlog-format=ROW" #for nextcloud
|
||||
restart_policy: "{{ DOCKER_RESTART_POLICY }}"
|
||||
healthcheck:
|
||||
test: "/usr/bin/mariadb --user=root --password={{ mariadb_root_pwd }} --execute \"SHOW DATABASES;\""
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 18
|
||||
register: setup_mariadb_container_result
|
||||
- name: "Setup docker network for {{ application_id }}"
|
||||
include_tasks: "{{ [playbook_dir, 'roles/docker-compose/tasks/utils/network.yml' ] | path_join }}"
|
||||
vars:
|
||||
docker_network_name: "{{ MARIADB_NETWORK }}"
|
||||
docker_network_subnet: "{{ MARIADB_SUBNET }}"
|
||||
docker_compose_flush_handlers: true
|
||||
|
||||
- name: install python-mysqlclient
|
||||
community.general.pacman:
|
||||
name: python-mysqlclient
|
||||
state: present
|
||||
when: not IS_CONTAINER | bool
|
||||
|
||||
- name: "Wait until the MariaDB container with hostname '{{ mariadb_name }}' is healthy"
|
||||
- name: Ensure PyMySQL is installed for Ansible interpreter
|
||||
ansible.builtin.pip:
|
||||
name: PyMySQL
|
||||
when: IS_CONTAINER | bool
|
||||
|
||||
- name: "Wait until the MariaDB container with hostname '{{ MARIADB_NAME }}' is healthy"
|
||||
community.docker.docker_container_info:
|
||||
name: "{{ mariadb_name }}"
|
||||
name: "{{ MARIADB_NAME }}"
|
||||
register: db_info
|
||||
until:
|
||||
- db_info.container is defined
|
||||
- db_info.container.State.Health.Status == "healthy"
|
||||
- db_info.container is defined
|
||||
- db_info.container.State.Health.Status == "healthy"
|
||||
retries: 30
|
||||
delay: 5
|
||||
when:
|
||||
- setup_mariadb_container_result is defined
|
||||
- setup_mariadb_container_result.changed
|
||||
|
||||
- name: "Wait until MariaDB accepts root credentials (inside container)"
|
||||
community.docker.docker_container_exec:
|
||||
container: "{{ MARIADB_NAME }}"
|
||||
command: >
|
||||
mariadb
|
||||
{{ '-h127.0.0.1 -P3306' if MARIADB_EXPOSE_LOCAL else '' }}
|
||||
-uroot -p'{{ MARIADB_ROOT_PWD }}'
|
||||
-e 'SELECT 1;'
|
||||
register: mariadb_cli
|
||||
changed_when: false
|
||||
retries: 30
|
||||
delay: 5
|
||||
until: mariadb_cli.rc == 0
|
||||
|
||||
- name: "Wait until MariaDB accepts root credentials (via mysql_db)"
|
||||
community.mysql.mysql_db:
|
||||
name: "{{ MARIADB_HEALTHCHECK_DB }}"
|
||||
state: present
|
||||
login_user: root
|
||||
login_password: "{{ MARIADB_ROOT_PWD }}"
|
||||
login_host: "{{ MARIADB_HOST }}"
|
||||
login_port: "{{ MARIADB_PORT }}"
|
||||
config_file: ""
|
||||
register: mariadb_ready
|
||||
retries: 30
|
||||
delay: 5
|
||||
until: mariadb_ready is succeeded
|
||||
changed_when: false
|
||||
|
||||
- include_tasks: utils/once/flag.yml
|
||||
|
||||
@@ -3,28 +3,22 @@
|
||||
name: "{{ database_name }}"
|
||||
state: present
|
||||
login_user: root
|
||||
login_password: "{{ mariadb_root_pwd }}"
|
||||
login_host: 127.0.0.1
|
||||
login_port: "{{ database_port }}"
|
||||
encoding: "{{ database_encoding }}"
|
||||
collation: "{{ database_collation }}"
|
||||
login_password: "{{ MARIADB_ROOT_PWD }}"
|
||||
login_host: "{{ MARIADB_HOST }}"
|
||||
login_port: "{{ MARIADB_PORT }}"
|
||||
encoding: "{{ MARIADB_ENCODING }}"
|
||||
collation: "{{ MARIADB_COLLATION }}"
|
||||
config_file: ""
|
||||
|
||||
- name: "Create database user: {{ database_username }}"
|
||||
community.mysql.mysql_user:
|
||||
name: "{{ database_username }}"
|
||||
password: "{{ database_password }}"
|
||||
host: "%"
|
||||
priv: '{{ database_name }}.*:ALL'
|
||||
state: present
|
||||
login_user: root
|
||||
login_password: "{{mariadb_root_pwd}}"
|
||||
login_host: 127.0.0.1
|
||||
login_port: "{{ database_port }}"
|
||||
|
||||
# Deactivated due to https://chatgpt.com/share/683ba14b-0e74-800f-9ad1-a8979bc77093
|
||||
# @todo Remove if this works fine in the future.
|
||||
#- name: Grant database privileges
|
||||
# ansible.builtin.shell:
|
||||
# cmd: "docker exec {{mariadb_name }} mariadb -u root -p{{ mariadb_root_pwd }} -e \"GRANT ALL PRIVILEGES ON `{{ database_name }}`.* TO '{{ database_username }}'@'%';\""
|
||||
# args:
|
||||
# executable: /bin/bash
|
||||
name: "{{ database_username }}"
|
||||
password: "{{ database_password }}"
|
||||
host: "%"
|
||||
priv: '`{{ database_name }}`.*:ALL'
|
||||
state: present
|
||||
login_user: root
|
||||
login_password: "{{ MARIADB_ROOT_PWD }}"
|
||||
login_host: "{{ MARIADB_HOST }}"
|
||||
login_port: "{{ MARIADB_PORT }}"
|
||||
config_file: ""
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_svc_db_mariadb is not defined
|
||||
|
||||
- name: "Initialize database for '{{ database_name }}'"
|
||||
include_tasks: 02_init.yml
|
||||
when: "{{ mariadb_init }}"
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_svc_db_mariadb is not defined
|
||||
|
||||
- name: "Initialize database for '{{ database_name }}'"
|
||||
include_tasks: 02_init.yml
|
||||
when: database_init | bool
|
||||
vars:
|
||||
application_id: svc-db-mariadb
|
||||
1
roles/svc-db-mariadb/templates/Dockerfile.j2
Normal file
1
roles/svc-db-mariadb/templates/Dockerfile.j2
Normal file
@@ -0,0 +1 @@
|
||||
FROM {{ MARIADB_IMAGE }}:{{ MARIADB_VERSION}}
|
||||
34
roles/svc-db-mariadb/templates/docker-compose.yml.j2
Normal file
34
roles/svc-db-mariadb/templates/docker-compose.yml.j2
Normal file
@@ -0,0 +1,34 @@
|
||||
{% include 'roles/docker-compose/templates/base.yml.j2' %}
|
||||
|
||||
mariadb:
|
||||
container_name: "{{ MARIADB_NAME }}"
|
||||
image: "{{ MARIADB_CUSTOM_IMAGE }}"
|
||||
{{ lookup('template', 'roles/docker-container/templates/build.yml.j2') | indent(4) }}
|
||||
command:
|
||||
- "--transaction-isolation=READ-COMMITTED"
|
||||
- "--binlog-format=ROW"
|
||||
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
{% if MARIADB_EXPOSE_LOCAL %}
|
||||
ports:
|
||||
- "127.0.0.1:{{ MARIADB_PORT }}:3306"
|
||||
{% endif %}
|
||||
volumes:
|
||||
- "data:/var/lib/mysql"
|
||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||
healthcheck:
|
||||
test:
|
||||
- "CMD-SHELL"
|
||||
- >
|
||||
mariadb
|
||||
{% if MARIADB_EXPOSE_LOCAL %}-h127.0.0.1 -P3306{% endif %}
|
||||
-u root -p{{ MARIADB_ROOT_PWD }} -e 'SHOW DATABASES;'
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 18
|
||||
|
||||
{% include 'roles/docker-compose/templates/volumes.yml.j2' %}
|
||||
data:
|
||||
name: "{{ MARIADB_VOLUME }}"
|
||||
|
||||
{% include 'roles/docker-compose/templates/networks.yml.j2' %}
|
||||
2
roles/svc-db-mariadb/templates/env.j2
Normal file
2
roles/svc-db-mariadb/templates/env.j2
Normal file
@@ -0,0 +1,2 @@
|
||||
MARIADB_ROOT_PASSWORD: "{{ MARIADB_ROOT_PWD }}"
|
||||
MARIADB_AUTO_UPGRADE: "1"
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user