mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-09-08 11:17:17 +02:00
Compare commits
428 Commits
dfd7be9d72
...
master
Author | SHA1 | Date | |
---|---|---|---|
445c94788e | |||
aac9704e8b | |||
a57a5f8828 | |||
90843726de | |||
d25da76117 | |||
d48a1b3c0a | |||
2839d2e1a4 | |||
00c99e58e9 | |||
904040589e | |||
9f3d300bca | |||
9e253a2d09 | |||
49120b0dcf | |||
b6f91ab9d3 | |||
77e8e7ed7e | |||
32bc17e0c3 | |||
e294637cb6 | |||
577767bed6 | |||
e77f8da510 | |||
4738b263ec | |||
0a588023a7 | |||
d2fa90774b | |||
0e72dcbe36 | |||
4f8ce598a9 | |||
3769e66d8d | |||
33a5fadf67 | |||
699a6b6f1e | |||
61c29eee60 | |||
d5204fb5c2 | |||
751615b1a4 | |||
e2993d2912 | |||
24b6647bfb | |||
d2dc2eab5f | |||
a1130e33d7 | |||
df122905eb | |||
d093a22d61 | |||
5e550ce3a3 | |||
0ada12e3ca | |||
1a5ce4a7fa | |||
a9abb3ce5d | |||
71ceb339fc | |||
61bba3d2ef | |||
0bde4295c7 | |||
8059f272d5 | |||
7c814e6e83 | |||
d760c042c2 | |||
6cac8085a8 | |||
3a83f3d14e | |||
61d852c508 | |||
188b098503 | |||
bc56940e55 | |||
5dfc2efb5a | |||
7f9dc65b37 | |||
163a925096 | |||
a8c88634b5 | |||
ce3fe1cd51 | |||
7ca8b7c71d | |||
110381e80c | |||
b02d88adc0 | |||
b7065837df | |||
c98a2378c4 | |||
4ae3cee36c | |||
b834f0c95c | |||
9f734dff17 | |||
6fa4d00547 | |||
7254667186 | |||
aaedaab3da | |||
7791bd8c04 | |||
34b3f3b0ad | |||
94fe58b5da | |||
9feb766e6f | |||
231fd567b3 | |||
3f8e7c1733 | |||
3bfab9ef8e | |||
f1870c07be | |||
d0cec9a7d4 | |||
1dbd714a56 | |||
3a17b2979e | |||
bb0530c2ac | |||
aa2eb53776 | |||
5f66c1a622 | |||
b3dfb8bf22 | |||
db642c1c39 | |||
2fccebbd1f | |||
c23fbd8ec4 | |||
2999d9af77 | |||
2809ffb9f0 | |||
cb12114ce8 | |||
ba99e558f7 | |||
2aed0f97d2 | |||
f36c7831b1 | |||
009bee531b | |||
4c7bb6d9db | |||
092869b29a | |||
f4ea6c6c0f | |||
3ed84717a7 | |||
1cfc2b7e23 | |||
01b9648650 | |||
65d3b3040d | |||
28f7ac5aba | |||
19926b0c57 | |||
3a79d9d630 | |||
983287a84a | |||
dd9a9b6d84 | |||
23a2e081bf | |||
4cbd848026 | |||
d67f660152 | |||
5c6349321b | |||
af1ee64246 | |||
d96bfc64a6 | |||
6ea8301364 | |||
92f5bf6481 | |||
58c17bf043 | |||
6c2d5c52c8 | |||
b919f39e35 | |||
9f2cfe65af | |||
fe399c3967 | |||
ef801aa498 | |||
18f3b1042f | |||
dece6228a4 | |||
cb66fb2978 | |||
b9da6908ec | |||
8baec17562 | |||
1401779a9d | |||
707a3fc1d0 | |||
d595d46e2e | |||
73d5651eea | |||
12a267827d | |||
c6cd6430bb | |||
67b2ebf001 | |||
ebb6660473 | |||
f62d09d8f1 | |||
de159db918 | |||
e2c2cf4bcf | |||
6e1e1ad5c5 | |||
06baa4b03a | |||
73e7fbdc8a | |||
bae2bc21ec | |||
a8f4dea9d2 | |||
5aaf2d28dc | |||
5287bb4d74 | |||
5446a1497e | |||
19889a8cfc | |||
d9980c0d8f | |||
35206aaafd | |||
942e8c9c12 | |||
97f4045c68 | |||
c182ecf516 | |||
ce033c370a | |||
a0477ad54c | |||
35c3681f55 | |||
af97e71976 | |||
19a51fd718 | |||
b916173422 | |||
9756a0f75f | |||
e417bc19bd | |||
7ad14673e1 | |||
eb781dbf8b | |||
6016da6f1f | |||
8b2f0ac47b | |||
9d6d64e11d | |||
f1a2967a37 | |||
95a2172fff | |||
dc3f4e05a8 | |||
e33944cda2 | |||
efa68cc1e0 | |||
79e702a3ab | |||
9180182d5b | |||
535094d15d | |||
658003f5b9 | |||
3ff783df17 | |||
3df511aee9 | |||
c27d16322b | |||
7a6e273ea4 | |||
384beae7c1 | |||
ad7e61e8b1 | |||
fa46523433 | |||
f4a380d802 | |||
42d6c1799b | |||
8608d89653 | |||
a4f39ac732 | |||
9cfb8f3a60 | |||
3e5344a46c | |||
ec07d1a20b | |||
594d9417d1 | |||
dc125e4843 | |||
39a54294dd | |||
a57fe718de | |||
b6aec5fe33 | |||
de07d890dc | |||
e27f355697 | |||
790762d397 | |||
4ce681e643 | |||
55cf3d0d8e | |||
2708b67751 | |||
f477ee3731 | |||
6d70f78989 | |||
b867a52471 | |||
78ee3e3c64 | |||
d7ece2a8c3 | |||
3794aa87b0 | |||
4cf996b1bb | |||
79517b2fe9 | |||
a84ee1240a | |||
7019b307c5 | |||
838a8fc7a1 | |||
95aba805c0 | |||
0856c340c7 | |||
b90a2f6c87 | |||
98e045196b | |||
a10dd402b8 | |||
6e538eabc8 | |||
82cc24a7f5 | |||
26b392ea76 | |||
b49fdc509e | |||
b1e8339283 | |||
f5db786878 | |||
7ef20474a0 | |||
83b9f697ab | |||
dd7b5e844c | |||
da01305cac | |||
1082caddae | |||
242347878d | |||
f46aabe884 | |||
d3cc187c3b | |||
0a4b9bc8e4 | |||
2887e54cca | |||
630fd43382 | |||
3114a7b586 | |||
34d771266a | |||
73b7d2728e | |||
fc4df980c5 | |||
763b43b44c | |||
db860e6ae3 | |||
2ba486902f | |||
7848226f83 | |||
185f37af52 | |||
b9461026a6 | |||
bf63e01b98 | |||
4a600ac531 | |||
dc0bb555c1 | |||
5adce08aea | |||
2569abc0be | |||
3a839cfe37 | |||
29f50da226 | |||
a5941763ff | |||
3d7bbabd7b | |||
e4b8c97e03 | |||
29df95ed82 | |||
6443771d93 | |||
d1cd87c843 | |||
5f0762e4f6 | |||
5642793f4a | |||
7d0502ebc5 | |||
20c8d46f54 | |||
a524c52f89 | |||
5c9ca20e04 | |||
bfe18dd83c | |||
0a83f3159a | |||
fb7b3a3c8e | |||
42f9ebad34 | |||
33b2d3f582 | |||
14e868a644 | |||
2a1a956739 | |||
bd2dde3af6 | |||
1126765da2 | |||
2620ee088e | |||
838a55ea94 | |||
1b26f1da8d | |||
43362e1694 | |||
14d3f65a70 | |||
b8ccd50ab2 | |||
4a39cc90c0 | |||
0de26fa6c7 | |||
1bed83078e | |||
7ffd79ebd9 | |||
2b7950920c | |||
f0b323afee | |||
eadcb62f2a | |||
cc2c1dc730 | |||
3b4821f7e7 | |||
5b64b47754 | |||
cb2b9462e1 | |||
03564b34bb | |||
e3b09e7f1a | |||
3adb08fc68 | |||
e9a41bd40c | |||
cb539b038c | |||
3ac9bd9f90 | |||
85a2f4b3d2 | |||
012426cf3b | |||
6c966bce2e | |||
3587531bda | |||
411a1f8931 | |||
cc51629337 | |||
022800425d | |||
0228014d34 | |||
1b638c366e | |||
5c90c252d0 | |||
4a65a254ae | |||
5e00deea19 | |||
bf7b24c3ee | |||
85924ab3c5 | |||
ac293c90f4 | |||
e0f35c4bbd | |||
989bee9522 | |||
2f12d8ea83 | |||
58620f6695 | |||
abc064fa56 | |||
7f42462514 | |||
41cd6b7702 | |||
a40d48bb03 | |||
2fba32d384 | |||
f2a765d69a | |||
c729edb525 | |||
597e9d5222 | |||
db0e030900 | |||
004507e233 | |||
e2014b9b59 | |||
567b1365c0 | |||
e99fa77b91 | |||
80dad1a5ed | |||
03290eafe1 | |||
58c64bd7c6 | |||
e497c001d6 | |||
4fa1c6cfbd | |||
53770f5308 | |||
13d8663796 | |||
f31565e4c5 | |||
a4d8de2152 | |||
c744ebe3f9 | |||
ce029881d0 | |||
94da112736 | |||
b62df5599d | |||
c9a7830953 | |||
53e5c563ae | |||
0b3b3a810a | |||
6d14f16dfd | |||
632d922977 | |||
26b29debc0 | |||
0c4cd283c4 | |||
5d36a806ff | |||
84de85d905 | |||
457f3659fa | |||
4c7ee0441e | |||
140572a0a4 | |||
a30cd4e8b5 | |||
2067804e9f | |||
1a42e8bd14 | |||
8634b5e1b3 | |||
1595a7c4a6 | |||
82aaf7ad74 | |||
7e4a1062af | |||
d5e5f57f92 | |||
f671678720 | |||
2219696c3f | |||
fbaee683fd | |||
b301e58ee6 | |||
de15c42de8 | |||
918355743f | |||
f6e62525d1 | |||
f72ac30884 | |||
1496f1de95 | |||
38de10ba65 | |||
e8c19b4b84 | |||
b0737b1cdb | |||
e4cc928eea | |||
c9b2136578 | |||
5709935c92 | |||
c7badc608a | |||
0e59d35129 | |||
1ba50397db | |||
6318611931 | |||
6e04ac58d2 | |||
b6e571a496 | |||
21b6362bc1 | |||
1fcf072257 | |||
ea0149b5d4 | |||
fe76fe1e62 | |||
3431796283 | |||
b5d8ac5462 | |||
5426014096 | |||
a9d77de2a4 | |||
766ef8619f | |||
66013a4da3 | |||
1cb5a12d85 | |||
6e8ae793e3 | |||
0746acedfd | |||
f5659a44f8 | |||
77816ac4e7 | |||
8779afd1f7 | |||
0074bcbd69 | |||
149c563831 | |||
e9ef62b95d | |||
aeaf84de6f | |||
fdceb0f792 | |||
2fd83eaf55 | |||
|
21eb614912 | ||
b880b98ac3 | |||
acfb1a2ee7 | |||
4885ad7eb4 | |||
d9669fc6dd | |||
8e0341c120 | |||
22c8c395f0 | |||
aae69ea15b | |||
c7b25ed093 | |||
e675aa5886 | |||
14f07adc9d | |||
dba12b89d8 | |||
0607974dac | |||
e8fa22cb43 | |||
eedfe83ece | |||
9f865dd215 | |||
220e3e1c60 | |||
2996c7cbb6 | |||
59bd4ca8eb | |||
da58691d25 | |||
c96f278ac3 | |||
2715479c95 | |||
926640371f | |||
cdc97c8ba5 | |||
4124e97aeb | |||
7f0d40bdc3 | |||
8dc2238ba2 | |||
b9b08feadd | |||
dc437c7621 | |||
7d63d92166 | |||
3eb51a32ce | |||
6272303b55 |
@@ -5,7 +5,7 @@ Thank you for your interest in contributing to Infinito.Nexus! We welcome contri
|
||||
## How to Contribute
|
||||
|
||||
There are several ways you can help:
|
||||
- **Reporting Issues:** Found a bug or have a feature request? Please open an issue on our [GitHub Issues page](https://github.com/kevinveenbirkenbach/infinito-nexus/issues) with a clear description and steps to reproduce the problem.
|
||||
- **Reporting Issues:** Found a bug or have a feature request? Please open an issue on our [GitHub Issues page](https://s.infinito.nexus/issues) with a clear description and steps to reproduce the problem.
|
||||
- **Code Contributions:** If you'd like to contribute code, fork the repository, create a new branch for your feature or bug fix, and submit a pull request. Ensure your code adheres to our coding style and includes tests where applicable.
|
||||
- **Documentation:** Improving the documentation is a great way to contribute. Whether it's clarifying an existing section or adding new guides, your contributions help others understand and use Infinito.Nexus effectively.
|
||||
- **Financial Contributions:** If you appreciate Infinito.Nexus and want to support its ongoing development, consider making a financial contribution. For more details, please see our [donate options](12_DONATE.md).
|
||||
|
@@ -1,9 +1,9 @@
|
||||
# License Agreement
|
||||
|
||||
## Infinito.Nexus NonCommercial License (CNCL)
|
||||
## Infinito.Nexus NonCommercial License
|
||||
|
||||
### Definitions
|
||||
- **"Software":** Refers to *"[Infinito.Nexus - Cyber Master Infrastructure Solution](https://infinito.nexus/)"* and its associated source code.
|
||||
- **"Software":** Refers to *"[Infinito.Nexus](https://infinito.nexus/)"* and its associated source code.
|
||||
- **"Commercial Use":** Any use of the Software intended for direct or indirect financial gain, including but not limited to sales, rentals, or provision of services.
|
||||
|
||||
### Provisions
|
||||
|
4
Makefile
4
Makefile
@@ -21,6 +21,10 @@ EXTRA_USERS := $(shell \
|
||||
|
||||
.PHONY: build install test
|
||||
|
||||
clean-keep-logs:
|
||||
@echo "🧹 Cleaning ignored files but keeping logs/…"
|
||||
git clean -fdX -- ':!logs' ':!logs/**'
|
||||
|
||||
clean:
|
||||
@echo "Removing ignored git files"
|
||||
git clean -fdX
|
||||
|
@@ -1,9 +1,7 @@
|
||||
# IT-Infrastructure Automation Framework 🚀
|
||||
# Infinito.Nexus 🚀
|
||||
|
||||
**🔐 One login. ♾️ Infinite application**
|
||||
|
||||
*Automate the Provisioning of All Your Servers and Workstations with a Single Open‑Source Script!*
|
||||
|
||||

|
||||
---
|
||||
|
||||
@@ -15,7 +13,7 @@
|
||||
|---|---|
|
||||
| 🌐 Try It Live | [](https://infinito.nexus) |
|
||||
| 🔧 Request Your Setup | [](https://cybermaster.space) |
|
||||
| 📖 About This Project | [](https://github.com/sponsors/kevinveenbirkenbach) [](https://github.com/kevinveenbirkenbach/infinito-nexus/actions/workflows/test-cli.yml?query=branch%3Amaster) [](https://github.com/kevinveenbirkenbach/infinito-nexus) |
|
||||
| 📖 About This Project | [](https://github.com/sponsors/kevinveenbirkenbach) [](https://github.com/kevinveenbirkenbach/infinito-nexus/actions/workflows/test-cli.yml) [](https://s.infinito.nexus/code) |
|
||||
| ☕️ Support Us | [](https://www.patreon.com/c/kevinveenbirkenbach) [](https://buymeacoffee.com/kevinveenbirkenbach) [](https://s.veen.world/paypaldonate) [](https://github.com/sponsors/kevinveenbirkenbach) |
|
||||
|
||||
---
|
||||
@@ -93,4 +91,4 @@ Infinito.Nexus is distributed under the **Infinito.Nexus NonCommercial License**
|
||||
|
||||
## Professional Setup & Support 💼
|
||||
|
||||
For expert installation and configuration visit [cybermaster.space](https://cybermaster.space/) or write to us at **[contact@infinito.nexus](mailto:contact@infinito.nexus)**.
|
||||
For expert installation and configuration visit [cybermaster.space](https://cybermaster.space/) or write to us at **[contact@cymais.cloud](mailto:contact@cymais.cloud)**.
|
||||
|
5
TODO.md
Normal file
5
TODO.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Todos
|
||||
- Implement multi language
|
||||
- Implement rbac administration interface
|
||||
- Implement ``MASK_CREDENTIALS_IN_LOGS`` for all sensible tasks
|
||||
- [Enable IP6 for docker](https://chatgpt.com/share/68a0acb8-db20-800f-9d2c-b34e38b5cdee).
|
4
Todo.md
4
Todo.md
@@ -1,4 +0,0 @@
|
||||
# Todos
|
||||
- Implement multi language
|
||||
- Implement rbac administration interface
|
||||
- Implement [cloudflare dev cache via API](https://chatgpt.com/share/689385e2-7744-800f-aa93-a6e811a245df)
|
33
ansible.cfg
33
ansible.cfg
@@ -1,4 +1,33 @@
|
||||
[defaults]
|
||||
lookup_plugins = ./lookup_plugins
|
||||
# --- Performance & Behavior ---
|
||||
forks = 25
|
||||
strategy = linear
|
||||
gathering = smart
|
||||
timeout = 120
|
||||
retry_files_enabled = False
|
||||
host_key_checking = True
|
||||
deprecation_warnings = True
|
||||
interpreter_python = auto_silent
|
||||
|
||||
# --- Output & Profiling ---
|
||||
stdout_callback = yaml
|
||||
callbacks_enabled = profile_tasks,timer
|
||||
|
||||
# --- Plugin paths ---
|
||||
filter_plugins = ./filter_plugins
|
||||
module_utils = ./module_utils
|
||||
lookup_plugins = ./lookup_plugins
|
||||
module_utils = ./module_utils
|
||||
|
||||
[ssh_connection]
|
||||
# Multiplexing: safer socket path in HOME instead of /tmp
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=20s -o ControlPath=~/.ssh/ansible-%h-%p-%r \
|
||||
-o ServerAliveInterval=15 -o ServerAliveCountMax=3 -o StrictHostKeyChecking=accept-new \
|
||||
-o PreferredAuthentications=publickey,password,keyboard-interactive
|
||||
|
||||
# Pipelining boosts speed; works fine if sudoers does not enforce "requiretty"
|
||||
pipelining = True
|
||||
scp_if_ssh = smart
|
||||
|
||||
[persistent_connection]
|
||||
connect_timeout = 30
|
||||
command_timeout = 60
|
||||
|
@@ -189,7 +189,7 @@ def parse_args():
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
primary_domain = '{{ primary_domain }}'
|
||||
primary_domain = '{{ SYSTEM_EMAIL.DOMAIN }}'
|
||||
become_pwd = '{{ lookup("password", "/dev/null length=42 chars=ascii_letters,digits") }}'
|
||||
|
||||
try:
|
||||
|
@@ -72,7 +72,7 @@ def build_single_graph(
|
||||
node = {'id': role}
|
||||
node.update(meta['galaxy_info'])
|
||||
node['doc_url'] = f"https://docs.infinito.nexus/roles/{role}/README.html"
|
||||
node['source_url'] = f"https://github.com/kevinveenbirkenbach/infinito-nexus/tree/master/roles/{role}"
|
||||
node['source_url'] = f"https://s.infinito.nexus/code/tree/master/roles/{role}"
|
||||
nodes[role] = node
|
||||
|
||||
if max_depth > 0 and depth >= max_depth:
|
||||
|
@@ -102,8 +102,10 @@ def find_cycle(roles):
|
||||
def topological_sort(graph, in_degree, roles=None):
|
||||
"""
|
||||
Perform topological sort on the dependency graph.
|
||||
If `roles` is provided, on error it will include detailed debug info.
|
||||
If a cycle is detected, raise an Exception with detailed debug info.
|
||||
"""
|
||||
from collections import deque
|
||||
|
||||
queue = deque([r for r, d in in_degree.items() if d == 0])
|
||||
sorted_roles = []
|
||||
local_in = dict(in_degree)
|
||||
@@ -117,28 +119,26 @@ def topological_sort(graph, in_degree, roles=None):
|
||||
queue.append(nbr)
|
||||
|
||||
if len(sorted_roles) != len(in_degree):
|
||||
# Something went wrong: likely a cycle
|
||||
cycle = find_cycle(roles or {})
|
||||
if roles is not None:
|
||||
if cycle:
|
||||
header = f"Circular dependency detected: {' -> '.join(cycle)}"
|
||||
else:
|
||||
header = "Circular dependency detected among the roles!"
|
||||
unsorted = [r for r in in_degree if r not in sorted_roles]
|
||||
|
||||
unsorted = [r for r in in_degree if r not in sorted_roles]
|
||||
detail_lines = ["Unsorted roles and their dependencies:"]
|
||||
header = "❌ Dependency resolution failed"
|
||||
if cycle:
|
||||
reason = f"Circular dependency detected: {' -> '.join(cycle)}"
|
||||
else:
|
||||
reason = "Unresolved dependencies among roles (possible cycle or missing role)."
|
||||
|
||||
details = []
|
||||
if unsorted:
|
||||
details.append("Unsorted roles and their declared run_after dependencies:")
|
||||
for r in unsorted:
|
||||
deps = roles.get(r, {}).get('run_after', [])
|
||||
detail_lines.append(f" - {r} depends on {deps!r}")
|
||||
details.append(f" - {r} depends on {deps!r}")
|
||||
|
||||
detail_lines.append("Full dependency graph:")
|
||||
detail_lines.append(f" {dict(graph)!r}")
|
||||
graph_repr = f"Full dependency graph: {dict(graph)!r}"
|
||||
|
||||
raise Exception("\n".join([header] + detail_lines))
|
||||
else:
|
||||
if cycle:
|
||||
raise Exception(f"Circular dependency detected: {' -> '.join(cycle)}")
|
||||
else:
|
||||
raise Exception("Circular dependency detected among the roles!")
|
||||
raise Exception("\n".join([header, reason] + details + [graph_repr]))
|
||||
|
||||
return sorted_roles
|
||||
|
||||
|
@@ -5,10 +5,10 @@ import json
|
||||
from typing import Dict, Any
|
||||
|
||||
from cli.build.graph import build_mappings, output_graph
|
||||
from module_utils.role_dependency_resolver import RoleDependencyResolver
|
||||
|
||||
|
||||
def find_roles(roles_dir: str):
|
||||
"""Yield (role_name, role_path) for every subfolder in roles_dir."""
|
||||
for entry in os.listdir(roles_dir):
|
||||
path = os.path.join(roles_dir, entry)
|
||||
if os.path.isdir(path):
|
||||
@@ -16,46 +16,31 @@ def find_roles(roles_dir: str):
|
||||
|
||||
|
||||
def main():
|
||||
# default roles dir is ../../roles relative to this script
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
default_roles_dir = os.path.abspath(os.path.join(script_dir, '..', '..', 'roles'))
|
||||
default_roles_dir = os.path.abspath(os.path.join(script_dir, "..", "..", "roles"))
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate all graphs for each role and write meta/tree.json"
|
||||
)
|
||||
parser.add_argument(
|
||||
'-d', '--role_dir',
|
||||
default=default_roles_dir,
|
||||
help=f"Path to roles directory (default: {default_roles_dir})"
|
||||
)
|
||||
parser.add_argument(
|
||||
'-D', '--depth',
|
||||
type=int,
|
||||
default=0,
|
||||
help="Max recursion depth (>0) or <=0 to stop on cycle"
|
||||
)
|
||||
parser.add_argument(
|
||||
'-o', '--output',
|
||||
choices=['yaml', 'json', 'console'],
|
||||
default='json',
|
||||
help="Output format"
|
||||
)
|
||||
parser.add_argument(
|
||||
'-p', '--preview',
|
||||
action='store_true',
|
||||
help="Preview graphs to console instead of writing files"
|
||||
)
|
||||
parser.add_argument(
|
||||
'-s', '--shadow-folder',
|
||||
type=str,
|
||||
default=None,
|
||||
help="If set, writes tree.json to this shadow folder instead of the role's actual meta/ folder"
|
||||
)
|
||||
parser.add_argument(
|
||||
'-v', '--verbose',
|
||||
action='store_true',
|
||||
help="Enable verbose logging"
|
||||
)
|
||||
parser.add_argument("-d", "--role_dir", default=default_roles_dir,
|
||||
help=f"Path to roles directory (default: {default_roles_dir})")
|
||||
parser.add_argument("-D", "--depth", type=int, default=0,
|
||||
help="Max recursion depth (>0) or <=0 to stop on cycle")
|
||||
parser.add_argument("-o", "--output", choices=["yaml", "json", "console"],
|
||||
default="json", help="Output format")
|
||||
parser.add_argument("-p", "--preview", action="store_true",
|
||||
help="Preview graphs to console instead of writing files")
|
||||
parser.add_argument("-s", "--shadow-folder", type=str, default=None,
|
||||
help="If set, writes tree.json to this shadow folder instead of the role's actual meta/ folder")
|
||||
parser.add_argument("-v", "--verbose", action="store_true", help="Enable verbose logging")
|
||||
|
||||
# Toggles
|
||||
parser.add_argument("--no-include-role", action="store_true", help="Do not scan include_role")
|
||||
parser.add_argument("--no-import-role", action="store_true", help="Do not scan import_role")
|
||||
parser.add_argument("--no-dependencies", action="store_true", help="Do not read meta/main.yml dependencies")
|
||||
parser.add_argument("--no-run-after", action="store_true",
|
||||
help="Do not read galaxy_info.run_after from meta/main.yml")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.verbose:
|
||||
@@ -65,6 +50,8 @@ def main():
|
||||
print(f"Preview mode: {args.preview}")
|
||||
print(f"Shadow folder: {args.shadow_folder}")
|
||||
|
||||
resolver = RoleDependencyResolver(args.role_dir)
|
||||
|
||||
for role_name, role_path in find_roles(args.role_dir):
|
||||
if args.verbose:
|
||||
print(f"Processing role: {role_name}")
|
||||
@@ -75,24 +62,43 @@ def main():
|
||||
max_depth=args.depth
|
||||
)
|
||||
|
||||
# Direct deps (depth=1) – getrennt erfasst für buckets
|
||||
inc_roles, imp_roles = resolver._scan_tasks(role_path)
|
||||
meta_deps = resolver._extract_meta_dependencies(role_path)
|
||||
run_after = set()
|
||||
if not args.no_run_after:
|
||||
run_after = resolver._extract_meta_run_after(role_path)
|
||||
|
||||
if any([not args.no_include_role and inc_roles,
|
||||
not args.no_import_role and imp_roles,
|
||||
not args.no_dependencies and meta_deps,
|
||||
not args.no_run_after and run_after]):
|
||||
deps_root = graphs.setdefault("dependencies", {})
|
||||
if not args.no_include_role and inc_roles:
|
||||
deps_root["include_role"] = sorted(inc_roles)
|
||||
if not args.no_import_role and imp_roles:
|
||||
deps_root["import_role"] = sorted(imp_roles)
|
||||
if not args.no_dependencies and meta_deps:
|
||||
deps_root["dependencies"] = sorted(meta_deps)
|
||||
if not args.no_run_after and run_after:
|
||||
deps_root["run_after"] = sorted(run_after)
|
||||
graphs["dependencies"] = deps_root
|
||||
|
||||
if args.preview:
|
||||
for key, data in graphs.items():
|
||||
if args.verbose:
|
||||
print(f"Previewing graph '{key}' for role '{role_name}'")
|
||||
output_graph(data, 'console', role_name, key)
|
||||
output_graph(data, "console", role_name, key)
|
||||
else:
|
||||
# Decide on output folder
|
||||
if args.shadow_folder:
|
||||
tree_file = os.path.join(
|
||||
args.shadow_folder, role_name, 'meta', 'tree.json'
|
||||
)
|
||||
tree_file = os.path.join(args.shadow_folder, role_name, "meta", "tree.json")
|
||||
else:
|
||||
tree_file = os.path.join(role_path, 'meta', 'tree.json')
|
||||
tree_file = os.path.join(role_path, "meta", "tree.json")
|
||||
os.makedirs(os.path.dirname(tree_file), exist_ok=True)
|
||||
with open(tree_file, 'w') as f:
|
||||
with open(tree_file, "w", encoding="utf-8") as f:
|
||||
json.dump(graphs, f, indent=2)
|
||||
print(f"Wrote {tree_file}")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@@ -1,14 +1,29 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Selectively add & vault NEW credentials in your inventory, preserving comments
|
||||
and formatting. Existing values are left untouched unless --force is used.
|
||||
|
||||
Usage example:
|
||||
infinito create credentials \
|
||||
--role-path roles/web-app-akaunting \
|
||||
--inventory-file host_vars/echoserver.yml \
|
||||
--vault-password-file .pass/echoserver.txt \
|
||||
--set credentials.database_password=mysecret
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
import yaml
|
||||
from typing import Dict, Any
|
||||
from module_utils.manager.inventory import InventoryManager
|
||||
from module_utils.handler.vault import VaultHandler, VaultScalar
|
||||
from module_utils.handler.yaml import YamlHandler
|
||||
from yaml.dumper import SafeDumper
|
||||
from typing import Dict, Any, Union
|
||||
|
||||
from ruamel.yaml import YAML
|
||||
from ruamel.yaml.comments import CommentedMap
|
||||
|
||||
from module_utils.manager.inventory import InventoryManager
|
||||
from module_utils.handler.vault import VaultHandler # uses your existing handler
|
||||
|
||||
|
||||
# ---------- helpers ----------
|
||||
|
||||
def ask_for_confirmation(key: str) -> bool:
|
||||
"""Prompt the user for confirmation to overwrite an existing value."""
|
||||
@@ -18,35 +33,117 @@ def ask_for_confirmation(key: str) -> bool:
|
||||
return confirmation == 'y'
|
||||
|
||||
|
||||
def main():
|
||||
def ensure_map(node: CommentedMap, key: str) -> CommentedMap:
|
||||
"""
|
||||
Ensure node[key] exists and is a mapping (CommentedMap) for round-trip safety.
|
||||
"""
|
||||
if key not in node or not isinstance(node.get(key), CommentedMap):
|
||||
node[key] = CommentedMap()
|
||||
return node[key]
|
||||
|
||||
|
||||
def _is_ruamel_vault(val: Any) -> bool:
|
||||
"""Detect if a ruamel scalar already carries the !vault tag."""
|
||||
try:
|
||||
return getattr(val, 'tag', None) == '!vault'
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _is_vault_encrypted(val: Any) -> bool:
|
||||
"""
|
||||
Detect if value is already a vault string or a ruamel !vault scalar.
|
||||
Accept both '$ANSIBLE_VAULT' and '!vault' markers.
|
||||
"""
|
||||
if _is_ruamel_vault(val):
|
||||
return True
|
||||
if isinstance(val, str) and ("$ANSIBLE_VAULT" in val or "!vault" in val):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _vault_body(text: str) -> str:
|
||||
"""
|
||||
Return only the vault body starting from the first line that contains
|
||||
'$ANSIBLE_VAULT'. If not found, return the original text.
|
||||
Also strips any leading '!vault |' header if present.
|
||||
"""
|
||||
lines = text.splitlines()
|
||||
for i, ln in enumerate(lines):
|
||||
if "$ANSIBLE_VAULT" in ln:
|
||||
return "\n".join(lines[i:])
|
||||
return text
|
||||
|
||||
|
||||
def _make_vault_scalar_from_text(text: str) -> Any:
|
||||
"""
|
||||
Build a ruamel object representing a literal block scalar tagged with !vault
|
||||
by parsing a tiny YAML snippet. This avoids depending on yaml_set_tag().
|
||||
"""
|
||||
body = _vault_body(text)
|
||||
indented = " " + body.replace("\n", "\n ") # proper block scalar indentation
|
||||
snippet = f"v: !vault |\n{indented}\n"
|
||||
y = YAML(typ="rt")
|
||||
return y.load(snippet)["v"]
|
||||
|
||||
|
||||
def to_vault_block(vault_handler: VaultHandler, value: Union[str, Any], label: str) -> Any:
|
||||
"""
|
||||
Return a ruamel scalar tagged as !vault. If the input value is already
|
||||
vault-encrypted (string contains $ANSIBLE_VAULT or is a !vault scalar), reuse/wrap.
|
||||
Otherwise, encrypt plaintext via ansible-vault.
|
||||
"""
|
||||
# Already a ruamel !vault scalar → reuse
|
||||
if _is_ruamel_vault(value):
|
||||
return value
|
||||
|
||||
# Already an encrypted string (may include '!vault |' or just the header)
|
||||
if isinstance(value, str) and ("$ANSIBLE_VAULT" in value or "!vault" in value):
|
||||
return _make_vault_scalar_from_text(value)
|
||||
|
||||
# Plaintext → encrypt now
|
||||
snippet = vault_handler.encrypt_string(str(value), label)
|
||||
return _make_vault_scalar_from_text(snippet)
|
||||
|
||||
|
||||
def parse_overrides(pairs: list[str]) -> Dict[str, str]:
|
||||
"""
|
||||
Parse --set key=value pairs into a dict.
|
||||
Supports both 'credentials.key=val' and 'key=val' (short) forms.
|
||||
"""
|
||||
out: Dict[str, str] = {}
|
||||
for pair in pairs:
|
||||
k, v = pair.split("=", 1)
|
||||
out[k.strip()] = v.strip()
|
||||
return out
|
||||
|
||||
|
||||
# ---------- main ----------
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Selectively vault credentials + become-password in your inventory."
|
||||
description="Selectively add & vault NEW credentials in your inventory, preserving comments/formatting."
|
||||
)
|
||||
parser.add_argument("--role-path", required=True, help="Path to your role")
|
||||
parser.add_argument("--inventory-file", required=True, help="Host vars file to update")
|
||||
parser.add_argument("--vault-password-file", required=True, help="Vault password file")
|
||||
parser.add_argument(
|
||||
"--role-path", required=True, help="Path to your role"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--inventory-file", required=True, help="Host vars file to update"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--vault-password-file", required=True, help="Vault password file"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--set", nargs="*", default=[], help="Override values key.subkey=VALUE"
|
||||
"--set", nargs="*", default=[],
|
||||
help="Override values key[.subkey]=VALUE (applied to NEW keys; with --force also to existing)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-f", "--force", action="store_true",
|
||||
help="Force overwrite without confirmation"
|
||||
help="Allow overrides to replace existing values (will ask per key unless combined with --yes)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-y", "--yes", action="store_true",
|
||||
help="Non-interactive: assume 'yes' for all overwrite confirmations when --force is used"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Parse overrides
|
||||
overrides = {
|
||||
k.strip(): v.strip()
|
||||
for pair in args.set for k, v in [pair.split("=", 1)]
|
||||
}
|
||||
overrides = parse_overrides(args.set)
|
||||
|
||||
# Initialize inventory manager
|
||||
# Initialize inventory manager (provides schema + app_id + vault)
|
||||
manager = InventoryManager(
|
||||
role_path=Path(args.role_path),
|
||||
inventory_path=Path(args.inventory_file),
|
||||
@@ -54,62 +151,90 @@ def main():
|
||||
overrides=overrides
|
||||
)
|
||||
|
||||
# Load existing credentials to preserve
|
||||
existing_apps = manager.inventory.get("applications", {})
|
||||
existing_creds = {}
|
||||
if manager.app_id in existing_apps:
|
||||
existing_creds = existing_apps[manager.app_id].get("credentials", {}).copy()
|
||||
# 1) Load existing inventory with ruamel (round-trip)
|
||||
yaml_rt = YAML(typ="rt")
|
||||
yaml_rt.preserve_quotes = True
|
||||
|
||||
# Apply schema (may generate defaults)
|
||||
updated_inventory = manager.apply_schema()
|
||||
with open(args.inventory_file, "r", encoding="utf-8") as f:
|
||||
data = yaml_rt.load(f) # CommentedMap or None
|
||||
if data is None:
|
||||
data = CommentedMap()
|
||||
|
||||
# Restore existing database_password if present
|
||||
apps = updated_inventory.setdefault("applications", {})
|
||||
app_block = apps.setdefault(manager.app_id, {})
|
||||
creds = app_block.setdefault("credentials", {})
|
||||
if "database_password" in existing_creds:
|
||||
creds["database_password"] = existing_creds["database_password"]
|
||||
# 2) Get schema-applied structure (defaults etc.) for *non-destructive* merge
|
||||
schema_inventory: Dict[str, Any] = manager.apply_schema()
|
||||
|
||||
# Store original plaintext values
|
||||
original_plain = {key: str(val) for key, val in creds.items()}
|
||||
# 3) Ensure structural path exists
|
||||
apps = ensure_map(data, "applications")
|
||||
app_block = ensure_map(apps, manager.app_id)
|
||||
creds = ensure_map(app_block, "credentials")
|
||||
|
||||
for key, raw_val in list(creds.items()):
|
||||
# Skip if already vaulted
|
||||
if isinstance(raw_val, VaultScalar) or str(raw_val).lstrip().startswith("$ANSIBLE_VAULT"):
|
||||
# 4) Determine defaults we could add
|
||||
schema_apps = schema_inventory.get("applications", {})
|
||||
schema_app_block = schema_apps.get(manager.app_id, {})
|
||||
schema_creds = schema_app_block.get("credentials", {}) if isinstance(schema_app_block, dict) else {}
|
||||
|
||||
# 5) Add ONLY missing credential keys
|
||||
newly_added_keys = set()
|
||||
for key, default_val in schema_creds.items():
|
||||
if key in creds:
|
||||
# existing → do not touch (preserve plaintext/vault/formatting/comments)
|
||||
continue
|
||||
|
||||
# Determine plaintext
|
||||
plain = original_plain.get(key, "")
|
||||
if key in overrides and (args.force or ask_for_confirmation(key)):
|
||||
plain = overrides[key]
|
||||
# Value to use for the new key
|
||||
# Priority: --set exact key → default from schema → empty string
|
||||
ov = overrides.get(f"credentials.{key}", None)
|
||||
if ov is None:
|
||||
ov = overrides.get(key, None)
|
||||
|
||||
# Encrypt the plaintext
|
||||
encrypted = manager.vault_handler.encrypt_string(plain, key)
|
||||
lines = encrypted.splitlines()
|
||||
indent = len(lines[1]) - len(lines[1].lstrip())
|
||||
body = "\n".join(line[indent:] for line in lines[1:])
|
||||
creds[key] = VaultScalar(body)
|
||||
|
||||
# Vault top-level become password if present
|
||||
if "ansible_become_password" in updated_inventory:
|
||||
val = str(updated_inventory["ansible_become_password"])
|
||||
if val.lstrip().startswith("$ANSIBLE_VAULT"):
|
||||
updated_inventory["ansible_become_password"] = VaultScalar(val)
|
||||
if ov is not None:
|
||||
value_for_new_key: Union[str, Any] = ov
|
||||
else:
|
||||
snippet = manager.vault_handler.encrypt_string(
|
||||
val, "ansible_become_password"
|
||||
if _is_vault_encrypted(default_val):
|
||||
# Schema already provides a vault value → take it as-is
|
||||
creds[key] = to_vault_block(manager.vault_handler, default_val, key)
|
||||
newly_added_keys.add(key)
|
||||
continue
|
||||
value_for_new_key = "" if default_val is None else str(default_val)
|
||||
|
||||
# Insert as !vault literal (encrypt if needed)
|
||||
creds[key] = to_vault_block(manager.vault_handler, value_for_new_key, key)
|
||||
newly_added_keys.add(key)
|
||||
|
||||
# 6) ansible_become_password: only add if missing;
|
||||
# never rewrite an existing one unless --force (+ confirm/--yes) and override provided.
|
||||
if "ansible_become_password" not in data:
|
||||
val = overrides.get("ansible_become_password", None)
|
||||
if val is not None:
|
||||
data["ansible_become_password"] = to_vault_block(
|
||||
manager.vault_handler, val, "ansible_become_password"
|
||||
)
|
||||
lines = snippet.splitlines()
|
||||
indent = len(lines[1]) - len(lines[1].lstrip())
|
||||
body = "\n".join(line[indent:] for line in lines[1:])
|
||||
updated_inventory["ansible_become_password"] = VaultScalar(body)
|
||||
else:
|
||||
if args.force and "ansible_become_password" in overrides:
|
||||
do_overwrite = args.yes or ask_for_confirmation("ansible_become_password")
|
||||
if do_overwrite:
|
||||
data["ansible_become_password"] = to_vault_block(
|
||||
manager.vault_handler, overrides["ansible_become_password"], "ansible_become_password"
|
||||
)
|
||||
|
||||
# Write back to file
|
||||
# 7) Overrides for existing credential keys (only with --force)
|
||||
if args.force:
|
||||
for ov_key, ov_val in overrides.items():
|
||||
# Accept both 'credentials.key' and bare 'key'
|
||||
key = ov_key.split(".", 1)[1] if ov_key.startswith("credentials.") else ov_key
|
||||
if key in creds:
|
||||
# If we just added it in this run, don't ask again or rewrap
|
||||
if key in newly_added_keys:
|
||||
continue
|
||||
if args.yes or ask_for_confirmation(key):
|
||||
creds[key] = to_vault_block(manager.vault_handler, ov_val, key)
|
||||
|
||||
# 8) Write back with ruamel (preserve formatting & comments)
|
||||
with open(args.inventory_file, "w", encoding="utf-8") as f:
|
||||
yaml.dump(updated_inventory, f, sort_keys=False, Dumper=SafeDumper)
|
||||
yaml_rt.dump(data, f)
|
||||
|
||||
print(f"✅ Inventory selectively vaulted → {args.inventory_file}")
|
||||
print(f"✅ Added new credentials without touching existing formatting/comments → {args.inventory_file}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
sys.exit(main())
|
||||
|
@@ -11,8 +11,8 @@ sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')
|
||||
from module_utils.entity_name_utils import get_entity_name
|
||||
|
||||
# Paths to the group-vars files
|
||||
PORTS_FILE = './group_vars/all/09_ports.yml'
|
||||
NETWORKS_FILE = './group_vars/all/10_networks.yml'
|
||||
PORTS_FILE = './group_vars/all/10_ports.yml'
|
||||
NETWORKS_FILE = './group_vars/all/09_networks.yml'
|
||||
ROLE_TEMPLATE_DIR = './templates/roles/web-app'
|
||||
ROLES_DIR = './roles'
|
||||
|
||||
|
@@ -16,14 +16,16 @@ def run_ansible_playbook(
|
||||
skip_tests=False,
|
||||
skip_validation=False,
|
||||
skip_build=False,
|
||||
cleanup=False
|
||||
cleanup=False,
|
||||
logs=False
|
||||
):
|
||||
start_time = datetime.datetime.now()
|
||||
print(f"\n▶️ Script started at: {start_time.isoformat()}\n")
|
||||
|
||||
if cleanup:
|
||||
print("\n🧹 Cleaning up project (make clean)...\n")
|
||||
subprocess.run(["make", "clean"], check=True)
|
||||
cleanup_command = ["make", "clean-keep-logs"] if logs else ["make", "clean"]
|
||||
print("\n🧹 Cleaning up project (" + " ".join(cleanup_command) +")...\n")
|
||||
subprocess.run(cleanup_command, check=True)
|
||||
else:
|
||||
print("\n⚠️ Skipping build as requested.\n")
|
||||
|
||||
@@ -180,17 +182,23 @@ def main():
|
||||
"-v", "--verbose", action="count", default=0,
|
||||
help="Increase verbosity level. Multiple -v flags increase detail (e.g., -vvv for maximum log output)."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--logs", action="store_true",
|
||||
help="Keep the CLI logs during cleanup command"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
validate_application_ids(args.inventory, args.id)
|
||||
|
||||
modes = {
|
||||
"mode_reset": args.reset,
|
||||
"mode_test": args.test,
|
||||
"mode_update": args.update,
|
||||
"mode_backup": args.backup,
|
||||
"mode_cleanup": args.cleanup,
|
||||
"enable_debug": args.debug,
|
||||
"MODE_RESET": args.reset,
|
||||
"MODE_TEST": args.test,
|
||||
"MODE_UPDATE": args.update,
|
||||
"MODE_BACKUP": args.backup,
|
||||
"MODE_CLEANUP": args.cleanup,
|
||||
"MODE_LOGS": args.logs,
|
||||
"MODE_DEBUG": args.debug,
|
||||
"MODE_ASSERT": not args.skip_validation,
|
||||
"host_type": args.host_type
|
||||
}
|
||||
|
||||
@@ -204,7 +212,8 @@ def main():
|
||||
skip_tests=args.skip_tests,
|
||||
skip_validation=args.skip_validation,
|
||||
skip_build=args.skip_build,
|
||||
cleanup=args.cleanup
|
||||
cleanup=args.cleanup,
|
||||
logs=args.logs
|
||||
)
|
||||
|
||||
|
||||
|
480
cli/fix/move_unnecessary_dependencies.py
Normal file
480
cli/fix/move_unnecessary_dependencies.py
Normal file
@@ -0,0 +1,480 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Move unnecessary meta dependencies to guarded include_role/import_role
|
||||
for better performance, while preserving YAML comments, quotes, and layout.
|
||||
|
||||
Heuristic (matches tests/integration/test_unnecessary_role_dependencies.py):
|
||||
- A dependency is considered UNNECESSARY if:
|
||||
* The consumer does NOT use provider variables in defaults/vars/handlers
|
||||
(no early-var need), AND
|
||||
* In tasks, any usage of provider vars or provider-handler notifications
|
||||
occurs only AFTER an include/import of the provider in the same file,
|
||||
OR there is no usage at all.
|
||||
|
||||
Action:
|
||||
- Remove such dependencies from roles/<role>/meta/main.yml.
|
||||
- Prepend a guarded include block to roles/<role>/tasks/01_core.yml (preferred)
|
||||
or roles/<role>/tasks/main.yml if 01_core.yml is absent.
|
||||
- If multiple dependencies are moved for a role, use a loop over include_role.
|
||||
|
||||
Notes:
|
||||
- Creates .bak backups for modified YAML files.
|
||||
- Requires ruamel.yaml to preserve comments/quotes everywhere.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import glob
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
from typing import Dict, Set, List, Tuple, Optional
|
||||
|
||||
# --- Require ruamel.yaml for full round-trip preservation ---
|
||||
try:
|
||||
from ruamel.yaml import YAML
|
||||
from ruamel.yaml.comments import CommentedMap, CommentedSeq
|
||||
from ruamel.yaml.scalarstring import SingleQuotedScalarString
|
||||
_HAVE_RUAMEL = True
|
||||
except Exception:
|
||||
_HAVE_RUAMEL = False
|
||||
|
||||
if not _HAVE_RUAMEL:
|
||||
print("[ERR] ruamel.yaml is required to preserve comments/quotes. Install with: pip install ruamel.yaml", file=sys.stderr)
|
||||
sys.exit(3)
|
||||
|
||||
yaml_rt = YAML()
|
||||
yaml_rt.preserve_quotes = True
|
||||
yaml_rt.width = 10**9 # prevent line wrapping
|
||||
|
||||
# ---------------- Utilities ----------------
|
||||
|
||||
def _backup(path: str):
|
||||
if os.path.exists(path):
|
||||
shutil.copy2(path, path + ".bak")
|
||||
|
||||
def read_text(path: str) -> str:
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
return f.read()
|
||||
except Exception:
|
||||
return ""
|
||||
|
||||
def load_yaml_rt(path: str):
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
data = yaml_rt.load(f)
|
||||
return data if data is not None else CommentedMap()
|
||||
except FileNotFoundError:
|
||||
return CommentedMap()
|
||||
except Exception as e:
|
||||
print(f"[WARN] Failed to parse YAML: {path}: {e}", file=sys.stderr)
|
||||
return CommentedMap()
|
||||
|
||||
def dump_yaml_rt(data, path: str):
|
||||
_backup(path)
|
||||
with open(path, "w", encoding="utf-8") as f:
|
||||
yaml_rt.dump(data, f)
|
||||
|
||||
def roles_root(project_root: str) -> str:
|
||||
return os.path.join(project_root, "roles")
|
||||
|
||||
def iter_role_dirs(project_root: str) -> List[str]:
|
||||
root = roles_root(project_root)
|
||||
return [d for d in glob.glob(os.path.join(root, "*")) if os.path.isdir(d)]
|
||||
|
||||
def role_name_from_dir(role_dir: str) -> str:
|
||||
return os.path.basename(role_dir.rstrip(os.sep))
|
||||
|
||||
def path_if_exists(*parts) -> Optional[str]:
|
||||
p = os.path.join(*parts)
|
||||
return p if os.path.exists(p) else None
|
||||
|
||||
def gather_yaml_files(base: str, patterns: List[str]) -> List[str]:
|
||||
files: List[str] = []
|
||||
for pat in patterns:
|
||||
files.extend(glob.glob(os.path.join(base, pat), recursive=True))
|
||||
return [f for f in files if os.path.isfile(f)]
|
||||
|
||||
def sq(v: str):
|
||||
"""Return a single-quoted scalar (ruamel) for consistent quoting."""
|
||||
return SingleQuotedScalarString(v)
|
||||
|
||||
# ---------------- Providers: vars & handlers ----------------
|
||||
|
||||
def flatten_keys(data) -> Set[str]:
|
||||
out: Set[str] = set()
|
||||
if isinstance(data, dict):
|
||||
for k, v in data.items():
|
||||
if isinstance(k, str):
|
||||
out.add(k)
|
||||
out |= flatten_keys(v)
|
||||
elif isinstance(data, list):
|
||||
for item in data:
|
||||
out |= flatten_keys(item)
|
||||
return out
|
||||
|
||||
def collect_role_defined_vars(role_dir: str) -> Set[str]:
|
||||
"""Vars a role 'provides': defaults/vars keys + set_fact keys in tasks."""
|
||||
provided: Set[str] = set()
|
||||
|
||||
for rel in ("defaults/main.yml", "vars/main.yml"):
|
||||
p = path_if_exists(role_dir, rel)
|
||||
if p:
|
||||
data = load_yaml_rt(p)
|
||||
provided |= flatten_keys(data)
|
||||
|
||||
# set_fact keys
|
||||
task_files = gather_yaml_files(os.path.join(role_dir, "tasks"), ["**/*.yml", "*.yml"])
|
||||
for tf in task_files:
|
||||
data = load_yaml_rt(tf)
|
||||
if isinstance(data, list):
|
||||
for task in data:
|
||||
if isinstance(task, dict) and "set_fact" in task and isinstance(task["set_fact"], dict):
|
||||
provided |= set(task["set_fact"].keys())
|
||||
|
||||
noisy = {"when", "name", "vars", "tags", "register"}
|
||||
return {v for v in provided if isinstance(v, str) and v and v not in noisy}
|
||||
|
||||
def collect_role_handler_names(role_dir: str) -> Set[str]:
|
||||
"""Handler names defined by a role (for notify detection)."""
|
||||
handler_file = path_if_exists(role_dir, "handlers/main.yml")
|
||||
if not handler_file:
|
||||
return set()
|
||||
data = load_yaml_rt(handler_file)
|
||||
names: Set[str] = set()
|
||||
if isinstance(data, list):
|
||||
for task in data:
|
||||
if isinstance(task, dict):
|
||||
nm = task.get("name")
|
||||
if isinstance(nm, str) and nm.strip():
|
||||
names.add(nm.strip())
|
||||
return names
|
||||
|
||||
# ---------------- Consumers: usage scanning ----------------
|
||||
|
||||
def find_var_positions(text: str, varname: str) -> List[int]:
|
||||
"""Return byte offsets for occurrences of varname (word-ish boundary)."""
|
||||
positions: List[int] = []
|
||||
if not varname:
|
||||
return positions
|
||||
pattern = re.compile(rf"(?<!\w){re.escape(varname)}(?!\w)")
|
||||
for m in pattern.finditer(text):
|
||||
positions.append(m.start())
|
||||
return positions
|
||||
|
||||
def first_var_use_offset_in_text(text: str, provided_vars: Set[str]) -> Optional[int]:
|
||||
first: Optional[int] = None
|
||||
for v in provided_vars:
|
||||
for off in find_var_positions(text, v):
|
||||
if first is None or off < first:
|
||||
first = off
|
||||
return first
|
||||
|
||||
def first_include_offset_for_role(text: str, producer_role: str) -> Optional[int]:
|
||||
"""
|
||||
Find earliest include/import of a given role in this YAML text.
|
||||
Handles compact dict and block styles.
|
||||
"""
|
||||
pattern = re.compile(
|
||||
r"(include_role|import_role)\s*:\s*\{[^}]*\bname\s*:\s*['\"]?"
|
||||
+ re.escape(producer_role) + r"['\"]?[^}]*\}"
|
||||
r"|"
|
||||
r"(include_role|import_role)\s*:\s*\n(?:\s+[a-z_]+\s*:\s*.*\n)*\s*name\s*:\s*['\"]?"
|
||||
+ re.escape(producer_role) + r"['\"]?",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
m = pattern.search(text)
|
||||
return m.start() if m else None
|
||||
|
||||
def find_notify_offsets_for_handlers(text: str, handler_names: Set[str]) -> List[int]:
|
||||
"""
|
||||
Heuristic: for each handler name, find occurrences where 'notify' appears within
|
||||
the preceding ~200 chars. Works for single string or list-style notify blocks.
|
||||
"""
|
||||
if not handler_names:
|
||||
return []
|
||||
offsets: List[int] = []
|
||||
for h in handler_names:
|
||||
for m in re.finditer(re.escape(h), text):
|
||||
start = m.start()
|
||||
back = max(0, start - 200)
|
||||
context = text[back:start]
|
||||
if re.search(r"notify\s*:", context):
|
||||
offsets.append(start)
|
||||
return sorted(offsets)
|
||||
|
||||
def parse_meta_dependencies(role_dir: str) -> List[str]:
|
||||
meta = path_if_exists(role_dir, "meta/main.yml")
|
||||
if not meta:
|
||||
return []
|
||||
data = load_yaml_rt(meta)
|
||||
dd = data.get("dependencies")
|
||||
deps: List[str] = []
|
||||
if isinstance(dd, list):
|
||||
for item in dd:
|
||||
if isinstance(item, str):
|
||||
deps.append(item)
|
||||
elif isinstance(item, dict) and "role" in item:
|
||||
deps.append(str(item["role"]))
|
||||
elif isinstance(item, dict) and "name" in item:
|
||||
deps.append(str(item["name"]))
|
||||
return deps
|
||||
|
||||
# ---------------- Fix application ----------------
|
||||
|
||||
def sanitize_run_once_var(role_name: str) -> str:
|
||||
"""
|
||||
Generate run_once variable name from role name.
|
||||
Example: 'sys-front-inj-logout' -> 'run_once_sys_front_inj_logout'
|
||||
"""
|
||||
return "run_once_" + role_name.replace("-", "_")
|
||||
|
||||
def build_include_block_yaml(consumer_role: str, moved_deps: List[str]) -> List[dict]:
|
||||
"""
|
||||
Build a guarded block that includes one or many roles.
|
||||
This block will be prepended to tasks/01_core.yml or tasks/main.yml.
|
||||
"""
|
||||
guard_var = sanitize_run_once_var(consumer_role)
|
||||
|
||||
if len(moved_deps) == 1:
|
||||
inner_tasks = [
|
||||
{
|
||||
"name": f"Include dependency '{moved_deps[0]}'",
|
||||
"include_role": {"name": moved_deps[0]},
|
||||
}
|
||||
]
|
||||
else:
|
||||
inner_tasks = [
|
||||
{
|
||||
"name": "Include dependencies",
|
||||
"include_role": {"name": "{{ item }}"},
|
||||
"loop": moved_deps,
|
||||
}
|
||||
]
|
||||
|
||||
# Always set the run_once fact at the end
|
||||
inner_tasks.append({"set_fact": {guard_var: True}})
|
||||
|
||||
# Correct Ansible block structure
|
||||
block_task = {
|
||||
"name": "Load former meta dependencies once",
|
||||
"block": inner_tasks,
|
||||
"when": f"{guard_var} is not defined",
|
||||
}
|
||||
|
||||
return [block_task]
|
||||
|
||||
def prepend_tasks(tasks_path: str, new_tasks, dry_run: bool):
|
||||
"""
|
||||
Prepend new_tasks (CommentedSeq) to an existing tasks YAML list while preserving comments.
|
||||
If the file does not exist, create it with new_tasks.
|
||||
"""
|
||||
if os.path.exists(tasks_path):
|
||||
existing = load_yaml_rt(tasks_path)
|
||||
if isinstance(existing, list):
|
||||
combined = CommentedSeq()
|
||||
for item in new_tasks:
|
||||
combined.append(item)
|
||||
for item in existing:
|
||||
combined.append(item)
|
||||
elif isinstance(existing, dict):
|
||||
# Rare case: tasks file with a single mapping; coerce to list
|
||||
combined = CommentedSeq()
|
||||
for item in new_tasks:
|
||||
combined.append(item)
|
||||
combined.append(existing)
|
||||
else:
|
||||
combined = new_tasks
|
||||
else:
|
||||
os.makedirs(os.path.dirname(tasks_path), exist_ok=True)
|
||||
combined = new_tasks
|
||||
|
||||
if dry_run:
|
||||
print(f"[DRY-RUN] Would write {tasks_path} with {len(new_tasks)} prepended task(s).")
|
||||
return
|
||||
|
||||
dump_yaml_rt(combined, tasks_path)
|
||||
print(f"[OK] Updated {tasks_path} (prepended {len(new_tasks)} task(s)).")
|
||||
|
||||
def update_meta_remove_deps(meta_path: str, remove: List[str], dry_run: bool):
|
||||
"""
|
||||
Remove entries from meta.dependencies while leaving the rest of the file intact.
|
||||
Quotes, comments, key order, and line breaks are preserved.
|
||||
Returns True if a change would be made (or was made when not in dry-run).
|
||||
"""
|
||||
if not os.path.exists(meta_path):
|
||||
return False
|
||||
|
||||
doc = load_yaml_rt(meta_path)
|
||||
deps = doc.get("dependencies")
|
||||
if not isinstance(deps, list):
|
||||
return False
|
||||
|
||||
def dep_name(item):
|
||||
if isinstance(item, dict):
|
||||
return item.get("role") or item.get("name")
|
||||
return item
|
||||
|
||||
keep = CommentedSeq()
|
||||
removed = []
|
||||
for item in deps:
|
||||
name = dep_name(item)
|
||||
if name in remove:
|
||||
removed.append(name)
|
||||
else:
|
||||
keep.append(item)
|
||||
|
||||
if not removed:
|
||||
return False
|
||||
|
||||
if keep:
|
||||
doc["dependencies"] = keep
|
||||
else:
|
||||
if "dependencies" in doc:
|
||||
del doc["dependencies"]
|
||||
|
||||
if dry_run:
|
||||
print(f"[DRY-RUN] Would rewrite {meta_path}; removed: {', '.join(removed)}")
|
||||
return True
|
||||
|
||||
dump_yaml_rt(doc, meta_path)
|
||||
print(f"[OK] Rewrote {meta_path}; removed: {', '.join(removed)}")
|
||||
return True
|
||||
|
||||
def dependency_is_unnecessary(consumer_dir: str,
|
||||
consumer_name: str,
|
||||
producer_name: str,
|
||||
provider_vars: Set[str],
|
||||
provider_handlers: Set[str]) -> bool:
|
||||
"""Apply heuristic to decide if we can move this dependency."""
|
||||
# 1) Early usage in defaults/vars/handlers? If yes -> necessary
|
||||
defaults_files = [p for p in [
|
||||
path_if_exists(consumer_dir, "defaults/main.yml"),
|
||||
path_if_exists(consumer_dir, "vars/main.yml"),
|
||||
path_if_exists(consumer_dir, "handlers/main.yml"),
|
||||
] if p]
|
||||
for p in defaults_files:
|
||||
text = read_text(p)
|
||||
if first_var_use_offset_in_text(text, provider_vars) is not None:
|
||||
return False # needs meta dep
|
||||
|
||||
# 2) Tasks: any usage before include/import? If yes -> keep meta dep
|
||||
task_files = gather_yaml_files(os.path.join(consumer_dir, "tasks"), ["**/*.yml", "*.yml"])
|
||||
for p in task_files:
|
||||
text = read_text(p)
|
||||
if not text:
|
||||
continue
|
||||
include_off = first_include_offset_for_role(text, producer_name)
|
||||
var_use_off = first_var_use_offset_in_text(text, provider_vars)
|
||||
notify_offs = find_notify_offsets_for_handlers(text, provider_handlers)
|
||||
|
||||
if var_use_off is not None:
|
||||
if include_off is None or include_off > var_use_off:
|
||||
return False # used before include
|
||||
|
||||
for noff in notify_offs:
|
||||
if include_off is None or include_off > noff:
|
||||
return False # notify before include
|
||||
|
||||
# If we get here: no early use, and either no usage at all or usage after include
|
||||
return True
|
||||
|
||||
def process_role(role_dir: str,
|
||||
providers_index: Dict[str, Tuple[Set[str], Set[str]]],
|
||||
only_role: Optional[str],
|
||||
dry_run: bool) -> bool:
|
||||
"""
|
||||
Returns True if any change suggested/made for this role.
|
||||
"""
|
||||
consumer_name = role_name_from_dir(role_dir)
|
||||
if only_role and only_role != consumer_name:
|
||||
return False
|
||||
|
||||
meta_deps = parse_meta_dependencies(role_dir)
|
||||
if not meta_deps:
|
||||
return False
|
||||
|
||||
# Build provider vars/handlers accessors
|
||||
moved: List[str] = []
|
||||
for producer in meta_deps:
|
||||
# Only consider local roles we can analyze
|
||||
producer_dir = path_if_exists(os.path.dirname(role_dir), producer) or path_if_exists(os.path.dirname(roles_root(os.path.dirname(role_dir))), "roles", producer)
|
||||
if producer not in providers_index:
|
||||
# Unknown/external role → skip (we cannot verify safety)
|
||||
continue
|
||||
pvars, phandlers = providers_index[producer]
|
||||
if dependency_is_unnecessary(role_dir, consumer_name, producer, pvars, phandlers):
|
||||
moved.append(producer)
|
||||
|
||||
if not moved:
|
||||
return False
|
||||
|
||||
# 1) Remove from meta
|
||||
meta_path = os.path.join(role_dir, "meta", "main.yml")
|
||||
update_meta_remove_deps(meta_path, moved, dry_run=dry_run)
|
||||
|
||||
# 2) Prepend include block to tasks/01_core.yml or tasks/main.yml
|
||||
target_tasks = path_if_exists(role_dir, "tasks/01_core.yml")
|
||||
if not target_tasks:
|
||||
target_tasks = os.path.join(role_dir, "tasks", "main.yml")
|
||||
include_block = build_include_block_yaml(consumer_name, moved)
|
||||
prepend_tasks(target_tasks, include_block, dry_run=dry_run)
|
||||
return True
|
||||
|
||||
def build_providers_index(all_roles: List[str]) -> Dict[str, Tuple[Set[str], Set[str]]]:
|
||||
"""
|
||||
Map role_name -> (provided_vars, handler_names)
|
||||
"""
|
||||
index: Dict[str, Tuple[Set[str], Set[str]]] = {}
|
||||
for rd in all_roles:
|
||||
rn = role_name_from_dir(rd)
|
||||
index[rn] = (collect_role_defined_vars(rd), collect_role_handler_names(rd))
|
||||
return index
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Move unnecessary meta dependencies to guarded include_role for performance (preserve comments/quotes)."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--project-root",
|
||||
default=os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")),
|
||||
help="Path to project root (default: two levels up from this script).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--role",
|
||||
dest="only_role",
|
||||
default=None,
|
||||
help="Only process a specific role name (e.g., 'docker-core').",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dry-run",
|
||||
action="store_true",
|
||||
help="Analyze and print planned changes without modifying files.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
roles = iter_role_dirs(args.project_root)
|
||||
if not roles:
|
||||
print(f"[ERR] No roles found under {roles_root(args.project_root)}", file=sys.stderr)
|
||||
sys.exit(2)
|
||||
|
||||
providers_index = build_providers_index(roles)
|
||||
|
||||
changed_any = False
|
||||
for role_dir in roles:
|
||||
changed = process_role(role_dir, providers_index, args.only_role, args.dry_run)
|
||||
changed_any = changed_any or changed
|
||||
|
||||
if not changed_any:
|
||||
print("[OK] No unnecessary meta dependencies to move (per heuristic).")
|
||||
else:
|
||||
if args.dry_run:
|
||||
print("[DRY-RUN] Completed analysis. No files were changed.")
|
||||
else:
|
||||
print("[OK] Finished moving unnecessary dependencies.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@@ -1,8 +1,8 @@
|
||||
# Infinito.Nexus Architecture Overview
|
||||
# Infinito.Nexus Architecture
|
||||
|
||||
## Introduction
|
||||
|
||||
Infinito.Nexus (Cyber Master Infrastructure Solution) is a modular, open-source IT infrastructure automation platform designed to simplify the deployment, management, and security of self-hosted environments.
|
||||
[Infinito.Nexus](https://infinito.nexus) is a modular, open-source IT infrastructure automation platform designed to simplify the deployment, management, and security of self-hosted environments.
|
||||
|
||||
It provides a flexible, scalable, and secure architecture based on modern [DevOps](https://en.wikipedia.org/wiki/DevOps) principles, leveraging technologies like [Ansible](https://en.wikipedia.org/wiki/Ansible_(software)), [Docker](https://en.wikipedia.org/wiki/Docker_(software)), and [Infrastructure as Code (IaC)](https://en.wikipedia.org/wiki/Infrastructure_as_code).
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# 🚀 Deployment Guide
|
||||
|
||||
This section explains how to deploy and manage the **Cyber Master Infrastructure Solution (Infinito.Nexus)** using Ansible. Infinito.Nexus uses a collection of Ansible tasks, which are controlled via different **"modes"** — such as **updates**, **backups**, **resets**, and **cleanup** operations.
|
||||
This section explains how to deploy and manage **[Infinito.Nexus](https://infinito.nexus)** using Ansible. Infinito.Nexus uses a collection of Ansible tasks, which are controlled via different **"modes"** — such as **updates**, **backups**, **resets**, and **cleanup** operations.
|
||||
|
||||
---
|
||||
|
||||
|
@@ -15,8 +15,8 @@ Follow these guides to install and configure Infinito.Nexus:
|
||||
- **Networking & VPN** - Configure `WireGuard`, `OpenVPN`, and `Nginx Reverse Proxy`.
|
||||
|
||||
## Managing & Updating Infinito.Nexus 🔄
|
||||
- Regularly update services using `update-docker`, `update-pacman`, or `update-apt`.
|
||||
- Monitor system health with `sys-hlth-btrfs`, `sys-hlth-webserver`, and `sys-hlth-docker-container`.
|
||||
- Automate system maintenance with `sys-lock`, `sys-cln-bkps-service`, and `sys-rpr-docker-hard`.
|
||||
- Regularly update services using `update-pacman`, or `update-apt`.
|
||||
- Monitor system health with `sys-ctl-hlth-btrfs`, `sys-ctl-hlth-webserver`, and `sys-ctl-hlth-docker-container`.
|
||||
- Automate system maintenance with `sys-lock`, `sys-ctl-cln-bkps`, and `sys-ctl-rpr-docker-hard`.
|
||||
|
||||
For more details, refer to the specific guides above.
|
@@ -25,7 +25,7 @@ Contributing to Infinito.Nexus
|
||||
|
||||
Want to contribute to the project or explore the source code? Check out our **GitHub repository**:
|
||||
|
||||
- `Infinito.Nexus GitHub Repository <https://github.com/kevinveenbirkenbach/infinito-nexus/tree/master/roles>`_
|
||||
- `Infinito.Nexus GitHub Repository <https://s.infinito.nexus/code/tree/master/roles>`_
|
||||
|
||||
Contribution Guidelines
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
@@ -1,86 +0,0 @@
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {'alias_domains_map': self.alias_domains_map}
|
||||
|
||||
def alias_domains_map(self, apps, primary_domain):
|
||||
"""
|
||||
Build a map of application IDs to their alias domains.
|
||||
|
||||
- If no `domains` key → []
|
||||
- If `domains` exists but is an empty dict → return the original cfg
|
||||
- Explicit `aliases` are used (default appended if missing)
|
||||
- If only `canonical` defined and it doesn't include default, default is added
|
||||
- Invalid types raise AnsibleFilterError
|
||||
"""
|
||||
def parse_entry(domains_cfg, key, app_id):
|
||||
if key not in domains_cfg:
|
||||
return None
|
||||
entry = domains_cfg[key]
|
||||
if isinstance(entry, dict):
|
||||
values = list(entry.values())
|
||||
elif isinstance(entry, list):
|
||||
values = entry
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Unexpected type for 'domains.{key}' in application '{app_id}': {type(entry).__name__}"
|
||||
)
|
||||
for d in values:
|
||||
if not isinstance(d, str) or not d.strip():
|
||||
raise AnsibleFilterError(
|
||||
f"Invalid domain entry in '{key}' for application '{app_id}': {d!r}"
|
||||
)
|
||||
return values
|
||||
|
||||
def default_domain(app_id, primary):
|
||||
return f"{app_id}.{primary}"
|
||||
|
||||
# 1) Precompute canonical domains per app (fallback to default)
|
||||
canonical_map = {}
|
||||
for app_id, cfg in apps.items():
|
||||
domains_cfg = cfg.get('server',{}).get('domains',{})
|
||||
entry = domains_cfg.get('canonical')
|
||||
if entry is None:
|
||||
canonical_map[app_id] = [default_domain(app_id, primary_domain)]
|
||||
elif isinstance(entry, dict):
|
||||
canonical_map[app_id] = list(entry.values())
|
||||
elif isinstance(entry, list):
|
||||
canonical_map[app_id] = list(entry)
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Unexpected type for 'server.domains.canonical' in application '{app_id}': {type(entry).__name__}"
|
||||
)
|
||||
|
||||
# 2) Build alias list per app
|
||||
result = {}
|
||||
for app_id, cfg in apps.items():
|
||||
domains_cfg = cfg.get('server',{}).get('domains')
|
||||
|
||||
# no domains key → no aliases
|
||||
if domains_cfg is None:
|
||||
result[app_id] = []
|
||||
continue
|
||||
|
||||
# empty domains dict → return the original cfg
|
||||
if isinstance(domains_cfg, dict) and not domains_cfg:
|
||||
result[app_id] = cfg
|
||||
continue
|
||||
|
||||
# otherwise, compute aliases
|
||||
aliases = parse_entry(domains_cfg, 'aliases', app_id) or []
|
||||
default = default_domain(app_id, primary_domain)
|
||||
has_aliases = 'aliases' in domains_cfg
|
||||
has_canon = 'canonical' in domains_cfg
|
||||
|
||||
if has_aliases:
|
||||
if default not in aliases:
|
||||
aliases.append(default)
|
||||
elif has_canon:
|
||||
canon = canonical_map.get(app_id, [])
|
||||
if default not in canon and default not in aliases:
|
||||
aliases.append(default)
|
||||
|
||||
result[app_id] = aliases
|
||||
|
||||
return result
|
@@ -4,47 +4,83 @@ import os
|
||||
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
from module_utils.entity_name_utils import get_entity_name
|
||||
from module_utils.role_dependency_resolver import RoleDependencyResolver
|
||||
from typing import Iterable
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {'canonical_domains_map': self.canonical_domains_map}
|
||||
|
||||
def canonical_domains_map(self, apps, primary_domain):
|
||||
def canonical_domains_map(
|
||||
self,
|
||||
apps,
|
||||
PRIMARY_DOMAIN,
|
||||
*,
|
||||
recursive: bool = False,
|
||||
roles_base_dir: str | None = None,
|
||||
seed: Iterable[str] | None = None,
|
||||
):
|
||||
"""
|
||||
Maps applications to their canonical domains, checking for conflicts
|
||||
and ensuring all domains are valid and unique across applications.
|
||||
Build { app_id: [canonical domains...] }.
|
||||
|
||||
Rekursiv werden nur include_role, import_role und meta/main.yml:dependencies verfolgt.
|
||||
'run_after' wird hier absichtlich ignoriert.
|
||||
"""
|
||||
if not isinstance(apps, dict):
|
||||
raise AnsibleFilterError(f"'apps' must be a dict, got {type(apps).__name__}")
|
||||
|
||||
app_keys = set(apps.keys())
|
||||
seed_keys = set(seed) if seed is not None else app_keys
|
||||
|
||||
if recursive:
|
||||
roles_base_dir = roles_base_dir or os.path.join(os.getcwd(), "roles")
|
||||
if not os.path.isdir(roles_base_dir):
|
||||
raise AnsibleFilterError(
|
||||
f"roles_base_dir '{roles_base_dir}' not found or not a directory."
|
||||
)
|
||||
|
||||
resolver = RoleDependencyResolver(roles_base_dir)
|
||||
discovered_roles = resolver.resolve_transitively(
|
||||
start_roles=seed_keys,
|
||||
resolve_include_role=True,
|
||||
resolve_import_role=True,
|
||||
resolve_dependencies=True,
|
||||
resolve_run_after=False,
|
||||
max_depth=None,
|
||||
)
|
||||
# all discovered roles that actually have config entries in `apps`
|
||||
target_apps = discovered_roles & app_keys
|
||||
else:
|
||||
target_apps = seed_keys
|
||||
|
||||
result = {}
|
||||
seen_domains = {}
|
||||
|
||||
for app_id, cfg in apps.items():
|
||||
if app_id.startswith((
|
||||
"web-",
|
||||
"svc-db-" # Database services can also be exposed to the internet. It is just listening to the port, but the domain is used for port mapping
|
||||
)):
|
||||
if not isinstance(cfg, dict):
|
||||
raise AnsibleFilterError(
|
||||
f"Invalid configuration for application '{app_id}': "
|
||||
f"expected a dict, got {cfg!r}"
|
||||
for app_id in sorted(target_apps):
|
||||
cfg = apps.get(app_id)
|
||||
if cfg is None:
|
||||
continue
|
||||
if not str(app_id).startswith(("web-", "svc-db-")):
|
||||
continue
|
||||
if not isinstance(cfg, dict):
|
||||
raise AnsibleFilterError(
|
||||
f"Invalid configuration for application '{app_id}': expected dict, got {cfg!r}"
|
||||
)
|
||||
|
||||
domains_cfg = cfg.get('server',{}).get('domains',{})
|
||||
if not domains_cfg or 'canonical' not in domains_cfg:
|
||||
self._add_default_domain(app_id, primary_domain, seen_domains, result)
|
||||
continue
|
||||
|
||||
canonical_domains = domains_cfg['canonical']
|
||||
self._process_canonical_domains(app_id, canonical_domains, seen_domains, result)
|
||||
domains_cfg = cfg.get('server', {}).get('domains', {})
|
||||
if not domains_cfg or 'canonical' not in domains_cfg:
|
||||
self._add_default_domain(app_id, PRIMARY_DOMAIN, seen_domains, result)
|
||||
continue
|
||||
|
||||
canonical_domains = domains_cfg['canonical']
|
||||
self._process_canonical_domains(app_id, canonical_domains, seen_domains, result)
|
||||
|
||||
return result
|
||||
|
||||
def _add_default_domain(self, app_id, primary_domain, seen_domains, result):
|
||||
"""
|
||||
Add the default domain for an application if no canonical domains are defined.
|
||||
Ensures the domain is unique across applications.
|
||||
"""
|
||||
def _add_default_domain(self, app_id, PRIMARY_DOMAIN, seen_domains, result):
|
||||
entity_name = get_entity_name(app_id)
|
||||
default_domain = f"{entity_name}.{primary_domain}"
|
||||
default_domain = f"{entity_name}.{PRIMARY_DOMAIN}"
|
||||
if default_domain in seen_domains:
|
||||
raise AnsibleFilterError(
|
||||
f"Domain '{default_domain}' is already configured for "
|
||||
@@ -54,40 +90,21 @@ class FilterModule(object):
|
||||
result[app_id] = [default_domain]
|
||||
|
||||
def _process_canonical_domains(self, app_id, canonical_domains, seen_domains, result):
|
||||
"""
|
||||
Process the canonical domains for an application, handling both lists and dicts,
|
||||
and ensuring each domain is unique.
|
||||
"""
|
||||
if isinstance(canonical_domains, dict):
|
||||
self._process_canonical_domains_dict(app_id, canonical_domains, seen_domains, result)
|
||||
for _, domain in canonical_domains.items():
|
||||
self._validate_and_check_domain(app_id, domain, seen_domains)
|
||||
result[app_id] = canonical_domains.copy()
|
||||
elif isinstance(canonical_domains, list):
|
||||
self._process_canonical_domains_list(app_id, canonical_domains, seen_domains, result)
|
||||
for domain in canonical_domains:
|
||||
self._validate_and_check_domain(app_id, domain, seen_domains)
|
||||
result[app_id] = list(canonical_domains)
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Unexpected type for 'server.domains.canonical' in application '{app_id}': "
|
||||
f"{type(canonical_domains).__name__}"
|
||||
)
|
||||
|
||||
def _process_canonical_domains_dict(self, app_id, domains_dict, seen_domains, result):
|
||||
"""
|
||||
Process a dictionary of canonical domains for an application.
|
||||
"""
|
||||
for name, domain in domains_dict.items():
|
||||
self._validate_and_check_domain(app_id, domain, seen_domains)
|
||||
result[app_id] = domains_dict.copy()
|
||||
|
||||
def _process_canonical_domains_list(self, app_id, domains_list, seen_domains, result):
|
||||
"""
|
||||
Process a list of canonical domains for an application.
|
||||
"""
|
||||
for domain in domains_list:
|
||||
self._validate_and_check_domain(app_id, domain, seen_domains)
|
||||
result[app_id] = list(domains_list)
|
||||
|
||||
def _validate_and_check_domain(self, app_id, domain, seen_domains):
|
||||
"""
|
||||
Validate the domain and check if it has already been assigned to another application.
|
||||
"""
|
||||
if not isinstance(domain, str) or not domain.strip():
|
||||
raise AnsibleFilterError(
|
||||
f"Invalid domain entry in 'canonical' for application '{app_id}': {domain!r}"
|
||||
|
@@ -1,6 +1,14 @@
|
||||
from ansible.errors import AnsibleFilterError
|
||||
import hashlib
|
||||
import base64
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Ensure module_utils is importable when this filter runs from Ansible
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
from module_utils.config_utils import get_app_conf
|
||||
from module_utils.get_url import get_url
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
"""
|
||||
@@ -12,18 +20,36 @@ class FilterModule(object):
|
||||
'build_csp_header': self.build_csp_header,
|
||||
}
|
||||
|
||||
# -------------------------------
|
||||
# Helpers
|
||||
# -------------------------------
|
||||
|
||||
@staticmethod
|
||||
def is_feature_enabled(applications: dict, feature: str, application_id: str) -> bool:
|
||||
"""
|
||||
Return True if applications[application_id].features[feature] is truthy.
|
||||
Returns True if applications[application_id].features[feature] is truthy.
|
||||
"""
|
||||
app = applications.get(application_id, {})
|
||||
return bool(app.get('features', {}).get(feature, False))
|
||||
return get_app_conf(
|
||||
applications,
|
||||
application_id,
|
||||
'features.' + feature,
|
||||
False,
|
||||
False
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_csp_whitelist(applications, application_id, directive):
|
||||
app = applications.get(application_id, {})
|
||||
wl = app.get('server',{}).get('csp', {}).get('whitelist', {}).get(directive, [])
|
||||
"""
|
||||
Returns a list of additional whitelist entries for a given directive.
|
||||
Accepts both scalar and list in config; always returns a list.
|
||||
"""
|
||||
wl = get_app_conf(
|
||||
applications,
|
||||
application_id,
|
||||
'server.csp.whitelist.' + directive,
|
||||
False,
|
||||
[]
|
||||
)
|
||||
if isinstance(wl, list):
|
||||
return wl
|
||||
if wl:
|
||||
@@ -33,26 +59,45 @@ class FilterModule(object):
|
||||
@staticmethod
|
||||
def get_csp_flags(applications, application_id, directive):
|
||||
"""
|
||||
Dynamically extract all CSP flags for a given directive and return them as tokens,
|
||||
e.g., "'unsafe-eval'", "'unsafe-inline'", etc.
|
||||
Returns CSP flag tokens (e.g., "'unsafe-eval'", "'unsafe-inline'") for a directive,
|
||||
merging sane defaults with app config.
|
||||
Default: 'unsafe-inline' is enabled for style-src and style-src-elem.
|
||||
"""
|
||||
app = applications.get(application_id, {})
|
||||
flags = app.get('server',{}).get('csp', {}).get('flags', {}).get(directive, {})
|
||||
tokens = []
|
||||
# Defaults that apply to all apps
|
||||
default_flags = {}
|
||||
if directive in ('style-src', 'style-src-elem'):
|
||||
default_flags = {'unsafe-inline': True}
|
||||
|
||||
for flag_name, enabled in flags.items():
|
||||
configured = get_app_conf(
|
||||
applications,
|
||||
application_id,
|
||||
'server.csp.flags.' + directive,
|
||||
False,
|
||||
{}
|
||||
)
|
||||
|
||||
# Merge defaults with configured flags (configured overrides defaults)
|
||||
merged = {**default_flags, **configured}
|
||||
|
||||
tokens = []
|
||||
for flag_name, enabled in merged.items():
|
||||
if enabled:
|
||||
tokens.append(f"'{flag_name}'")
|
||||
|
||||
return tokens
|
||||
|
||||
@staticmethod
|
||||
def get_csp_inline_content(applications, application_id, directive):
|
||||
"""
|
||||
Return inline script/style snippets to hash for a given CSP directive.
|
||||
Returns inline script/style snippets to hash for a given directive.
|
||||
Accepts both scalar and list in config; always returns a list.
|
||||
"""
|
||||
app = applications.get(application_id, {})
|
||||
snippets = app.get('server',{}).get('csp', {}).get('hashes', {}).get(directive, [])
|
||||
snippets = get_app_conf(
|
||||
applications,
|
||||
application_id,
|
||||
'server.csp.hashes.' + directive,
|
||||
False,
|
||||
[]
|
||||
)
|
||||
if isinstance(snippets, list):
|
||||
return snippets
|
||||
if snippets:
|
||||
@@ -62,7 +107,7 @@ class FilterModule(object):
|
||||
@staticmethod
|
||||
def get_csp_hash(content):
|
||||
"""
|
||||
Compute the SHA256 hash of the given inline content and return
|
||||
Computes the SHA256 hash of the given inline content and returns
|
||||
a CSP token like "'sha256-<base64>'".
|
||||
"""
|
||||
try:
|
||||
@@ -72,6 +117,10 @@ class FilterModule(object):
|
||||
except Exception as exc:
|
||||
raise AnsibleFilterError(f"get_csp_hash failed: {exc}")
|
||||
|
||||
# -------------------------------
|
||||
# Main builder
|
||||
# -------------------------------
|
||||
|
||||
def build_csp_header(
|
||||
self,
|
||||
applications,
|
||||
@@ -81,81 +130,80 @@ class FilterModule(object):
|
||||
matomo_feature_name='matomo'
|
||||
):
|
||||
"""
|
||||
Build the Content-Security-Policy header value dynamically based on application settings.
|
||||
Inline hashes are read from applications[application_id].csp.hashes
|
||||
Builds the Content-Security-Policy header value dynamically based on application settings.
|
||||
- Flags (e.g., 'unsafe-eval', 'unsafe-inline') are read from server.csp.flags.<directive>,
|
||||
with sane defaults applied in get_csp_flags (always 'unsafe-inline' for style-src and style-src-elem).
|
||||
- Inline hashes are read from server.csp.hashes.<directive>.
|
||||
- Whitelists are read from server.csp.whitelist.<directive>.
|
||||
- Inline hashes are added only if the final tokens do NOT include 'unsafe-inline'.
|
||||
"""
|
||||
try:
|
||||
directives = [
|
||||
'default-src',
|
||||
'connect-src',
|
||||
'frame-ancestors',
|
||||
'frame-src',
|
||||
'script-src',
|
||||
'script-src-elem',
|
||||
'style-src',
|
||||
'font-src',
|
||||
'worker-src',
|
||||
'manifest-src',
|
||||
'media-src',
|
||||
'default-src', # Fallback source list for content types not explicitly listed
|
||||
'connect-src', # Allowed URLs for XHR, WebSockets, EventSource, fetch()
|
||||
'frame-ancestors', # Who may embed this page
|
||||
'frame-src', # Sources for nested browsing contexts (e.g., <iframe>)
|
||||
'script-src', # Sources for script execution
|
||||
'script-src-elem', # Sources for <script> elements
|
||||
'style-src', # Sources for inline styles and <style>/<link> elements
|
||||
'style-src-elem', # Sources for <style> and <link rel="stylesheet">
|
||||
'font-src', # Sources for fonts
|
||||
'worker-src', # Sources for workers
|
||||
'manifest-src', # Sources for web app manifests
|
||||
'media-src', # Sources for audio and video
|
||||
]
|
||||
|
||||
parts = []
|
||||
|
||||
for directive in directives:
|
||||
tokens = ["'self'"]
|
||||
|
||||
# unsafe-eval / unsafe-inline flags
|
||||
# 1) Load flags (includes defaults from get_csp_flags)
|
||||
flags = self.get_csp_flags(applications, application_id, directive)
|
||||
tokens += flags
|
||||
|
||||
# Matomo integration
|
||||
if (
|
||||
self.is_feature_enabled(applications, matomo_feature_name, application_id)
|
||||
and directive in ['script-src-elem', 'connect-src']
|
||||
):
|
||||
matomo_domain = domains.get('web-app-matomo')[0]
|
||||
if matomo_domain:
|
||||
tokens.append(f"{web_protocol}://{matomo_domain}")
|
||||
# 2) Allow fetching from internal CDN by default for selected directives
|
||||
if directive in ['script-src-elem', 'connect-src', 'style-src-elem']:
|
||||
tokens.append(get_url(domains, 'web-svc-cdn', web_protocol))
|
||||
|
||||
# ReCaptcha integration: allow loading scripts from Google if feature enabled
|
||||
# 3) Matomo integration if feature is enabled
|
||||
if directive in ['script-src-elem', 'connect-src']:
|
||||
if self.is_feature_enabled(applications, matomo_feature_name, application_id):
|
||||
tokens.append(get_url(domains, 'web-app-matomo', web_protocol))
|
||||
|
||||
# 4) ReCaptcha integration (scripts + frames) if feature is enabled
|
||||
if self.is_feature_enabled(applications, 'recaptcha', application_id):
|
||||
if directive in ['script-src-elem',"frame-src"]:
|
||||
if directive in ['script-src-elem', 'frame-src']:
|
||||
tokens.append('https://www.gstatic.com')
|
||||
tokens.append('https://www.google.com')
|
||||
|
||||
# Allow the loading of js from the cdn
|
||||
if directive == 'script-src-elem' and self.is_feature_enabled(applications, 'logout', application_id):
|
||||
domain = domains.get('web-svc-cdn')[0]
|
||||
tokens.append(f"{domain}")
|
||||
|
||||
# 5) Frame ancestors handling (desktop + logout support)
|
||||
if directive == 'frame-ancestors':
|
||||
# Enable loading via ancestors
|
||||
if self.is_feature_enabled(applications, 'port-ui-desktop', application_id):
|
||||
domain = domains.get('web-app-port-ui')[0]
|
||||
sld_tld = ".".join(domain.split(".")[-2:]) # yields "example.com"
|
||||
tokens.append(f"{sld_tld}") # yields "*.example.com"
|
||||
|
||||
if self.is_feature_enabled(applications, 'desktop', application_id):
|
||||
# Allow being embedded by the desktop app domain (and potentially its parent)
|
||||
domain = domains.get('web-app-desktop')[0]
|
||||
sld_tld = ".".join(domain.split(".")[-2:]) # e.g., example.com
|
||||
tokens.append(f"{sld_tld}")
|
||||
if self.is_feature_enabled(applications, 'logout', application_id):
|
||||
|
||||
# Allow logout via infinito logout proxy
|
||||
domain = domains.get('web-svc-logout')[0]
|
||||
tokens.append(f"{domain}")
|
||||
|
||||
# Allow logout via keycloak app
|
||||
domain = domains.get('web-app-keycloak')[0]
|
||||
tokens.append(f"{domain}")
|
||||
|
||||
# whitelist
|
||||
# Allow embedding via logout proxy and Keycloak app
|
||||
tokens.append(get_url(domains, 'web-svc-logout', web_protocol))
|
||||
tokens.append(get_url(domains, 'web-app-keycloak', web_protocol))
|
||||
|
||||
# 6) Custom whitelist entries
|
||||
tokens += self.get_csp_whitelist(applications, application_id, directive)
|
||||
|
||||
# only add hashes if 'unsafe-inline' is NOT in flags
|
||||
if "'unsafe-inline'" not in flags:
|
||||
# 7) Add inline content hashes ONLY if final tokens do NOT include 'unsafe-inline'
|
||||
# (Check tokens, not flags, to include defaults and later modifications.)
|
||||
if "'unsafe-inline'" not in tokens:
|
||||
for snippet in self.get_csp_inline_content(applications, application_id, directive):
|
||||
tokens.append(self.get_csp_hash(snippet))
|
||||
|
||||
# Append directive
|
||||
parts.append(f"{directive} {' '.join(tokens)};")
|
||||
|
||||
# static img-src
|
||||
# 8) Static img-src directive (kept permissive for data/blob and any host)
|
||||
parts.append("img-src * data: blob:;")
|
||||
|
||||
return ' '.join(parts)
|
||||
|
||||
except Exception as exc:
|
||||
|
@@ -1,10 +1,13 @@
|
||||
from ansible.errors import AnsibleFilterError
|
||||
import sys, os
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
from module_utils.entity_name_utils import get_entity_name
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {'domain_mappings': self.domain_mappings}
|
||||
|
||||
def domain_mappings(self, apps, primary_domain):
|
||||
def domain_mappings(self, apps, PRIMARY_DOMAIN):
|
||||
"""
|
||||
Build a flat list of redirect mappings for all apps:
|
||||
- source: each alias domain
|
||||
@@ -30,8 +33,9 @@ class FilterModule(object):
|
||||
)
|
||||
return values
|
||||
|
||||
def default_domain(app_id, primary):
|
||||
return f"{app_id}.{primary}"
|
||||
def default_domain(app_id:str, primary:str):
|
||||
subdomain = get_entity_name(app_id)
|
||||
return f"{subdomain}.{primary}"
|
||||
|
||||
# 1) Compute canonical domains per app (always as a list)
|
||||
canonical_map = {}
|
||||
@@ -39,7 +43,7 @@ class FilterModule(object):
|
||||
domains_cfg = cfg.get('server',{}).get('domains',{})
|
||||
entry = domains_cfg.get('canonical')
|
||||
if entry is None:
|
||||
canonical_map[app_id] = [default_domain(app_id, primary_domain)]
|
||||
canonical_map[app_id] = [default_domain(app_id, PRIMARY_DOMAIN)]
|
||||
elif isinstance(entry, dict):
|
||||
canonical_map[app_id] = list(entry.values())
|
||||
elif isinstance(entry, list):
|
||||
@@ -57,11 +61,11 @@ class FilterModule(object):
|
||||
alias_map[app_id] = []
|
||||
continue
|
||||
if isinstance(domains_cfg, dict) and not domains_cfg:
|
||||
alias_map[app_id] = [default_domain(app_id, primary_domain)]
|
||||
alias_map[app_id] = [default_domain(app_id, PRIMARY_DOMAIN)]
|
||||
continue
|
||||
|
||||
aliases = parse_entry(domains_cfg, 'aliases', app_id) or []
|
||||
default = default_domain(app_id, primary_domain)
|
||||
default = default_domain(app_id, PRIMARY_DOMAIN)
|
||||
has_aliases = 'aliases' in domains_cfg
|
||||
has_canonical = 'canonical' in domains_cfg
|
||||
|
||||
@@ -80,7 +84,7 @@ class FilterModule(object):
|
||||
mappings = []
|
||||
for app_id, sources in alias_map.items():
|
||||
canon_list = canonical_map.get(app_id, [])
|
||||
target = canon_list[0] if canon_list else default_domain(app_id, primary_domain)
|
||||
target = canon_list[0] if canon_list else default_domain(app_id, PRIMARY_DOMAIN)
|
||||
for src in sources:
|
||||
if src == target:
|
||||
# skip self-redirects
|
||||
|
19
filter_plugins/domain_tools.py
Normal file
19
filter_plugins/domain_tools.py
Normal file
@@ -0,0 +1,19 @@
|
||||
# filter_plugins/domain_tools.py
|
||||
# Returns the DNS zone (SLD.TLD) from a hostname.
|
||||
# Pure-Python, no external deps; handles simple cases. For exotic TLDs use tldextract (see note).
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
def to_zone(hostname: str) -> str:
|
||||
if not isinstance(hostname, str) or not hostname.strip():
|
||||
raise AnsibleFilterError("to_zone: hostname must be a non-empty string")
|
||||
parts = hostname.strip(".").split(".")
|
||||
if len(parts) < 2:
|
||||
raise AnsibleFilterError(f"to_zone: '{hostname}' has no TLD part")
|
||||
# naive default: last two labels -> SLD.TLD
|
||||
return ".".join(parts[-2:])
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
"to_zone": to_zone,
|
||||
}
|
@@ -1,49 +0,0 @@
|
||||
import os
|
||||
import re
|
||||
import yaml
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
|
||||
def get_application_id(role_name):
|
||||
"""
|
||||
Jinja2/Ansible filter: given a role name, load its vars/main.yml and return the application_id value.
|
||||
"""
|
||||
# Construct path: assumes current working directory is project root
|
||||
vars_file = os.path.join(os.getcwd(), 'roles', role_name, 'vars', 'main.yml')
|
||||
|
||||
if not os.path.isfile(vars_file):
|
||||
raise AnsibleFilterError(f"Vars file not found for role '{role_name}': {vars_file}")
|
||||
|
||||
try:
|
||||
# Read entire file content to avoid lazy stream issues
|
||||
with open(vars_file, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
data = yaml.safe_load(content)
|
||||
except Exception as e:
|
||||
raise AnsibleFilterError(f"Error reading YAML from {vars_file}: {e}")
|
||||
|
||||
# Ensure parsed data is a mapping
|
||||
if not isinstance(data, dict):
|
||||
raise AnsibleFilterError(
|
||||
f"Error reading YAML from {vars_file}: expected mapping, got {type(data).__name__}"
|
||||
)
|
||||
|
||||
# Detect malformed YAML: no valid identifier-like keys
|
||||
valid_key_pattern = re.compile(r'^[A-Za-z_][A-Za-z0-9_]*$')
|
||||
if data and not any(valid_key_pattern.match(k) for k in data.keys()):
|
||||
raise AnsibleFilterError(f"Error reading YAML from {vars_file}: invalid top-level keys")
|
||||
|
||||
if 'application_id' not in data:
|
||||
raise AnsibleFilterError(f"Key 'application_id' not found in {vars_file}")
|
||||
|
||||
return data['application_id']
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
"""
|
||||
Ansible filter plugin entry point.
|
||||
"""
|
||||
def filters(self):
|
||||
return {
|
||||
'get_application_id': get_application_id,
|
||||
}
|
31
filter_plugins/get_category_entries.py
Normal file
31
filter_plugins/get_category_entries.py
Normal file
@@ -0,0 +1,31 @@
|
||||
# Custom Ansible filter to get all role names under "roles/" with a given prefix.
|
||||
|
||||
import os
|
||||
|
||||
def get_category_entries(prefix, roles_path="roles"):
|
||||
"""
|
||||
Returns a list of role names under the given roles_path
|
||||
that start with the specified prefix.
|
||||
|
||||
:param prefix: String prefix to match role names.
|
||||
:param roles_path: Path to the roles directory (default: 'roles').
|
||||
:return: List of matching role names.
|
||||
"""
|
||||
if not os.path.isdir(roles_path):
|
||||
return []
|
||||
|
||||
roles = []
|
||||
for entry in os.listdir(roles_path):
|
||||
full_path = os.path.join(roles_path, entry)
|
||||
if os.path.isdir(full_path) and entry.startswith(prefix):
|
||||
roles.append(entry)
|
||||
|
||||
return sorted(roles)
|
||||
|
||||
class FilterModule(object):
|
||||
""" Custom filters for Ansible """
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
"get_category_entries": get_category_entries
|
||||
}
|
37
filter_plugins/get_service_name.py
Normal file
37
filter_plugins/get_service_name.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""
|
||||
Custom Ansible filter to build a systemctl unit name (always lowercase).
|
||||
|
||||
Rules:
|
||||
- If `systemctl_id` ends with '@': drop the '@' and return
|
||||
"{systemctl_id_without_at}.{software_name}@{suffix_handling}".
|
||||
- Else: return "{systemctl_id}.{software_name}{suffix_handling}".
|
||||
|
||||
Suffix handling:
|
||||
- Default "" → automatically pick:
|
||||
- ".service" if no '@' in systemctl_id
|
||||
- ".timer" if '@' in systemctl_id
|
||||
- Explicit False → no suffix at all
|
||||
- Any string → ".{suffix}" (lowercased)
|
||||
"""
|
||||
|
||||
def get_service_name(systemctl_id, software_name, suffix=""):
|
||||
sid = str(systemctl_id).strip().lower()
|
||||
software_name = str(software_name).strip().lower()
|
||||
|
||||
# Determine suffix
|
||||
if suffix is False:
|
||||
sfx = "" # no suffix at all
|
||||
elif suffix == "" or suffix is None:
|
||||
sfx = ".service"
|
||||
else:
|
||||
sfx = str(suffix).strip().lower()
|
||||
|
||||
if sid.endswith("@"):
|
||||
base = sid[:-1] # drop the trailing '@'
|
||||
return f"{base}.{software_name}@{sfx}"
|
||||
else:
|
||||
return f"{sid}.{software_name}{sfx}"
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {"get_service_name": get_service_name}
|
24
filter_plugins/get_service_script_path.py
Normal file
24
filter_plugins/get_service_script_path.py
Normal file
@@ -0,0 +1,24 @@
|
||||
# filter_plugins/get_service_script_path.py
|
||||
# Custom Ansible filter to generate service script paths.
|
||||
|
||||
def get_service_script_path(systemctl_id, script_type):
|
||||
"""
|
||||
Build the path to a service script based on systemctl_id and type.
|
||||
|
||||
:param systemctl_id: The identifier of the system service.
|
||||
:param script_type: The script type/extension (e.g., sh, py, yml).
|
||||
:return: The full path string.
|
||||
"""
|
||||
if not systemctl_id or not script_type:
|
||||
raise ValueError("Both systemctl_id and script_type are required")
|
||||
|
||||
return f"/opt/scripts/systemctl/{systemctl_id}/script.{script_type}"
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
""" Custom filters for Ansible """
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
"get_service_script_path": get_service_script_path
|
||||
}
|
@@ -1,27 +1,11 @@
|
||||
#!/usr/bin/python
|
||||
import os
|
||||
import sys
|
||||
from ansible.errors import AnsibleFilterError
|
||||
import sys, os
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
from module_utils.get_url import get_url
|
||||
|
||||
class FilterModule(object):
|
||||
''' Infinito.Nexus application config extraction filters '''
|
||||
def filters(self):
|
||||
return {'get_url': self.get_url}
|
||||
|
||||
def get_url(self, domains, application_id, protocol):
|
||||
# 1) module_util-Verzeichnis in den Pfad aufnehmen
|
||||
plugin_dir = os.path.dirname(__file__)
|
||||
project_root = os.path.dirname(plugin_dir)
|
||||
module_utils = os.path.join(project_root, 'module_utils')
|
||||
if module_utils not in sys.path:
|
||||
sys.path.append(module_utils)
|
||||
|
||||
# 2) jetzt domain_utils importieren
|
||||
try:
|
||||
from domain_utils import get_domain
|
||||
except ImportError as e:
|
||||
raise AnsibleFilterError(f"could not import domain_utils: {e}")
|
||||
|
||||
# 3) Validierung und Aufruf
|
||||
if not isinstance(protocol, str):
|
||||
raise AnsibleFilterError("Protocol must be a string")
|
||||
return f"{protocol}://{ get_domain(domains, application_id) }"
|
||||
return {
|
||||
'get_url': get_url,
|
||||
}
|
||||
|
@@ -1,122 +0,0 @@
|
||||
import os
|
||||
import yaml
|
||||
import re
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
# in-memory cache: application_id → (parsed_yaml, is_nested)
|
||||
_cfg_cache = {}
|
||||
|
||||
def load_configuration(application_id, key):
|
||||
if not isinstance(key, str):
|
||||
raise AnsibleFilterError("Key must be a dotted-string, e.g. 'features.matomo'")
|
||||
|
||||
# locate roles/
|
||||
here = os.path.dirname(__file__)
|
||||
root = os.path.abspath(os.path.join(here, '..'))
|
||||
roles_dir = os.path.join(root, 'roles')
|
||||
if not os.path.isdir(roles_dir):
|
||||
raise AnsibleFilterError(f"Roles directory not found at {roles_dir}")
|
||||
|
||||
# first time? load & cache
|
||||
if application_id not in _cfg_cache:
|
||||
config_path = None
|
||||
|
||||
# 1) primary: vars/main.yml declares it
|
||||
for role in os.listdir(roles_dir):
|
||||
mv = os.path.join(roles_dir, role, 'vars', 'main.yml')
|
||||
if os.path.exists(mv):
|
||||
try:
|
||||
md = yaml.safe_load(open(mv)) or {}
|
||||
except Exception:
|
||||
md = {}
|
||||
if md.get('application_id') == application_id:
|
||||
cf = os.path.join(roles_dir, role, "config" , "main.yml")
|
||||
if not os.path.exists(cf):
|
||||
raise AnsibleFilterError(
|
||||
f"Role '{role}' declares '{application_id}' but missing config/main.yml"
|
||||
)
|
||||
config_path = cf
|
||||
break
|
||||
|
||||
# 2) fallback nested
|
||||
if config_path is None:
|
||||
for role in os.listdir(roles_dir):
|
||||
cf = os.path.join(roles_dir, role, "config" , "main.yml")
|
||||
if not os.path.exists(cf):
|
||||
continue
|
||||
try:
|
||||
dd = yaml.safe_load(open(cf)) or {}
|
||||
except Exception:
|
||||
dd = {}
|
||||
if isinstance(dd, dict) and application_id in dd:
|
||||
config_path = cf
|
||||
break
|
||||
|
||||
# 3) fallback flat
|
||||
if config_path is None:
|
||||
for role in os.listdir(roles_dir):
|
||||
cf = os.path.join(roles_dir, role, "config" , "main.yml")
|
||||
if not os.path.exists(cf):
|
||||
continue
|
||||
try:
|
||||
dd = yaml.safe_load(open(cf)) or {}
|
||||
except Exception:
|
||||
dd = {}
|
||||
# flat style: dict with all non-dict values
|
||||
if isinstance(dd, dict) and not any(isinstance(v, dict) for v in dd.values()):
|
||||
config_path = cf
|
||||
break
|
||||
|
||||
if config_path is None:
|
||||
return None
|
||||
|
||||
# parse once
|
||||
try:
|
||||
parsed = yaml.safe_load(open(config_path)) or {}
|
||||
except Exception as e:
|
||||
raise AnsibleFilterError(f"Error loading config/main.yml at {config_path}: {e}")
|
||||
|
||||
# detect nested vs flat
|
||||
is_nested = isinstance(parsed, dict) and (application_id in parsed)
|
||||
_cfg_cache[application_id] = (parsed, is_nested)
|
||||
|
||||
parsed, is_nested = _cfg_cache[application_id]
|
||||
|
||||
# pick base entry
|
||||
entry = parsed[application_id] if is_nested else parsed
|
||||
|
||||
# resolve dotted key
|
||||
key_parts = key.split('.')
|
||||
for part in key_parts:
|
||||
# Check if part has an index (e.g., domains.canonical[0])
|
||||
match = re.match(r'([^\[]+)\[([0-9]+)\]', part)
|
||||
if match:
|
||||
part, index = match.groups()
|
||||
index = int(index)
|
||||
if isinstance(entry, dict) and part in entry:
|
||||
entry = entry[part]
|
||||
# Check if entry is a list and access the index
|
||||
if isinstance(entry, list) and 0 <= index < len(entry):
|
||||
entry = entry[index]
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Index '{index}' out of range for key '{part}' in application '{application_id}'"
|
||||
)
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Key '{part}' not found under application '{application_id}'"
|
||||
)
|
||||
else:
|
||||
if isinstance(entry, dict) and part in entry:
|
||||
entry = entry[part]
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Key '{part}' not found under application '{application_id}'"
|
||||
)
|
||||
|
||||
return entry
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {'load_configuration': load_configuration}
|
@@ -19,7 +19,7 @@ class FilterModule(object):
|
||||
Usage in Jinja:
|
||||
{{ redirect_list
|
||||
| add_redirect_if_group('lam',
|
||||
'ldap.' ~ primary_domain,
|
||||
'ldap.' ~ PRIMARY_DOMAIN,
|
||||
domains | get_domain('web-app-lam'),
|
||||
group_names) }}
|
||||
"""
|
||||
|
@@ -1,55 +0,0 @@
|
||||
from jinja2 import Undefined
|
||||
|
||||
|
||||
def safe_placeholders(template: str, mapping: dict = None) -> str:
|
||||
"""
|
||||
Format a template like "{url}/logo.png".
|
||||
If mapping is provided (not None) and ANY placeholder is missing or maps to None/empty string, the function will raise KeyError.
|
||||
If mapping is None, missing placeholders or invalid templates return empty string.
|
||||
Numerical zero or False are considered valid values.
|
||||
Any other formatting errors return an empty string.
|
||||
"""
|
||||
# Non-string templates yield empty
|
||||
if not isinstance(template, str):
|
||||
return ''
|
||||
|
||||
class SafeDict(dict):
|
||||
def __getitem__(self, key):
|
||||
val = super().get(key, None)
|
||||
# Treat None or empty string as missing
|
||||
if val is None or (isinstance(val, str) and val == ''):
|
||||
raise KeyError(key)
|
||||
return val
|
||||
def __missing__(self, key):
|
||||
raise KeyError(key)
|
||||
|
||||
silent = mapping is None
|
||||
data = mapping or {}
|
||||
try:
|
||||
return template.format_map(SafeDict(data))
|
||||
except KeyError:
|
||||
if silent:
|
||||
return ''
|
||||
raise
|
||||
except Exception:
|
||||
return ''
|
||||
|
||||
def safe_var(value):
|
||||
"""
|
||||
Ansible filter: returns the value unchanged unless it's Undefined or None,
|
||||
in which case returns an empty string.
|
||||
Catches all exceptions and yields ''.
|
||||
"""
|
||||
try:
|
||||
if isinstance(value, Undefined) or value is None:
|
||||
return ''
|
||||
return value
|
||||
except Exception:
|
||||
return ''
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'safe_var': safe_var,
|
||||
'safe_placeholders': safe_placeholders,
|
||||
}
|
@@ -1,28 +0,0 @@
|
||||
"""
|
||||
Ansible filter plugin that joins a base string and a tail path safely.
|
||||
If the base is falsy (None, empty, etc.), returns an empty string.
|
||||
"""
|
||||
|
||||
def safe_join(base, tail):
|
||||
"""
|
||||
Safely join base and tail into a path or URL.
|
||||
|
||||
- base: the base string. If falsy, returns ''.
|
||||
- tail: the string to append. Leading/trailing slashes are handled.
|
||||
- On any exception, returns ''.
|
||||
"""
|
||||
try:
|
||||
if not base:
|
||||
return ''
|
||||
base_str = str(base).rstrip('/')
|
||||
tail_str = str(tail).lstrip('/')
|
||||
return f"{base_str}/{tail_str}"
|
||||
except Exception:
|
||||
return ''
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'safe_join': safe_join,
|
||||
}
|
67
filter_plugins/timeout_start_sec_for_domains.py
Normal file
67
filter_plugins/timeout_start_sec_for_domains.py
Normal file
@@ -0,0 +1,67 @@
|
||||
# filter_plugins/timeout_start_sec_for_domains.py (nur Kern geändert)
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
"timeout_start_sec_for_domains": self.timeout_start_sec_for_domains,
|
||||
}
|
||||
|
||||
def timeout_start_sec_for_domains(
|
||||
self,
|
||||
domains_dict,
|
||||
include_www=True,
|
||||
per_domain_seconds=25,
|
||||
overhead_seconds=30,
|
||||
min_seconds=120,
|
||||
max_seconds=3600,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
domains_dict (dict | list[str] | str): Either the domain mapping dict
|
||||
(values can be str | list[str] | dict[str,str]) or an already
|
||||
flattened list of domains, or a single domain string.
|
||||
include_www (bool): If true, add 'www.<domain>' for non-www entries.
|
||||
...
|
||||
"""
|
||||
try:
|
||||
# Local flattener for dict inputs (like your generate_all_domains source)
|
||||
def _flatten_from_dict(domains_map):
|
||||
flat = []
|
||||
for v in (domains_map or {}).values():
|
||||
if isinstance(v, str):
|
||||
flat.append(v)
|
||||
elif isinstance(v, list):
|
||||
flat.extend(v)
|
||||
elif isinstance(v, dict):
|
||||
flat.extend(v.values())
|
||||
return flat
|
||||
|
||||
# Accept dict | list | str
|
||||
if isinstance(domains_dict, dict):
|
||||
flat = _flatten_from_dict(domains_dict)
|
||||
elif isinstance(domains_dict, list):
|
||||
flat = list(domains_dict)
|
||||
elif isinstance(domains_dict, str):
|
||||
flat = [domains_dict]
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
"Expected 'domains_dict' to be dict | list | str."
|
||||
)
|
||||
|
||||
if include_www:
|
||||
base_unique = sorted(set(flat))
|
||||
www_variants = [f"www.{d}" for d in base_unique if not str(d).lower().startswith("www.")]
|
||||
flat.extend(www_variants)
|
||||
|
||||
unique_domains = sorted(set(flat))
|
||||
count = len(unique_domains)
|
||||
|
||||
raw = overhead_seconds + per_domain_seconds * count
|
||||
clamped = max(min_seconds, min(max_seconds, int(raw)))
|
||||
return clamped
|
||||
|
||||
except AnsibleFilterError:
|
||||
raise
|
||||
except Exception as exc:
|
||||
raise AnsibleFilterError(f"timeout_start_sec_for_domains failed: {exc}")
|
146
filter_plugins/url_join.py
Normal file
146
filter_plugins/url_join.py
Normal file
@@ -0,0 +1,146 @@
|
||||
"""
|
||||
Ansible filter plugin that safely joins URL components from a list.
|
||||
- Requires a valid '<scheme>://' in the first element (any RFC-3986-ish scheme)
|
||||
- Preserves the double slash after the scheme, collapses other duplicate slashes
|
||||
- Supports query parts introduced by elements starting with '?' or '&'
|
||||
* first query element uses '?', subsequent use '&' (regardless of given prefix)
|
||||
* each query element must be exactly one 'key=value' pair
|
||||
* query elements may only appear after path elements; once query starts, no more path parts
|
||||
- Raises specific AnsibleFilterError messages for common misuse
|
||||
"""
|
||||
|
||||
import re
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
_SCHEME_RE = re.compile(r'^([a-zA-Z][a-zA-Z0-9+.\-]*://)(.*)$')
|
||||
_QUERY_PAIR_RE = re.compile(r'^[^&=?#]+=[^&?#]*$') # key=value (no '&', no extra '?' or '#')
|
||||
|
||||
def _to_str_or_error(obj, index):
|
||||
"""Cast to str, raising a specific AnsibleFilterError with index context."""
|
||||
try:
|
||||
return str(obj)
|
||||
except Exception as e:
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: unable to convert part at index {index} to string: {e}"
|
||||
)
|
||||
|
||||
def url_join(parts):
|
||||
"""
|
||||
Join a list of URL parts, URL-aware (scheme, path, query).
|
||||
|
||||
Args:
|
||||
parts (list|tuple): URL segments. First element MUST include '<scheme>://'.
|
||||
Path elements are plain strings.
|
||||
Query elements must start with '?' or '&' and contain exactly one 'key=value'.
|
||||
|
||||
Returns:
|
||||
str: Joined URL.
|
||||
|
||||
Raises:
|
||||
AnsibleFilterError: with specific, descriptive messages.
|
||||
"""
|
||||
# --- basic input validation ---
|
||||
if parts is None:
|
||||
raise AnsibleFilterError("url_join: parts must be a non-empty list; got None")
|
||||
if not isinstance(parts, (list, tuple)):
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: parts must be a list/tuple; got {type(parts).__name__}"
|
||||
)
|
||||
if len(parts) == 0:
|
||||
raise AnsibleFilterError("url_join: parts must be a non-empty list")
|
||||
|
||||
# --- first element must carry a scheme ---
|
||||
first_raw = parts[0]
|
||||
if first_raw is None:
|
||||
raise AnsibleFilterError(
|
||||
"url_join: first element must include a scheme like 'https://'; got None"
|
||||
)
|
||||
|
||||
first_str = _to_str_or_error(first_raw, 0)
|
||||
m = _SCHEME_RE.match(first_str)
|
||||
if not m:
|
||||
raise AnsibleFilterError(
|
||||
"url_join: first element must start with '<scheme>://', e.g. 'https://example.com'; "
|
||||
f"got '{first_str}'"
|
||||
)
|
||||
|
||||
scheme = m.group(1) # e.g., 'https://', 'ftp://', 'myapp+v1://'
|
||||
after_scheme = m.group(2).lstrip('/') # strip only leading slashes right after scheme
|
||||
|
||||
# --- iterate parts: collect path parts until first query part; then only query parts allowed ---
|
||||
path_parts = []
|
||||
query_pairs = []
|
||||
in_query = False
|
||||
|
||||
for i, p in enumerate(parts):
|
||||
if p is None:
|
||||
# skip None silently (consistent with path_join-ish behavior)
|
||||
continue
|
||||
|
||||
s = _to_str_or_error(p, i)
|
||||
|
||||
# disallow additional scheme in later parts
|
||||
if i > 0 and "://" in s:
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: only the first element may contain a scheme; part at index {i} "
|
||||
f"looks like a URL with scheme ('{s}')."
|
||||
)
|
||||
|
||||
# first element: replace with remainder after scheme and continue
|
||||
if i == 0:
|
||||
s = after_scheme
|
||||
|
||||
# check if this is a query element (starts with ? or &)
|
||||
if s.startswith('?') or s.startswith('&'):
|
||||
in_query = True
|
||||
raw_pair = s[1:] # strip the leading ? or &
|
||||
if raw_pair == '':
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: query element at index {i} is empty; expected '?key=value' or '&key=value'"
|
||||
)
|
||||
# Disallow multiple pairs in a single element; enforce exactly one key=value
|
||||
if '&' in raw_pair:
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: query element at index {i} must contain exactly one 'key=value' pair "
|
||||
f"without '&'; got '{s}'"
|
||||
)
|
||||
if not _QUERY_PAIR_RE.match(raw_pair):
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: query element at index {i} must match 'key=value' (no extra '?', '&', '#'); got '{s}'"
|
||||
)
|
||||
query_pairs.append(raw_pair)
|
||||
else:
|
||||
# non-query element
|
||||
if in_query:
|
||||
# once query started, no more path parts allowed
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: path element found at index {i} after query parameters started; "
|
||||
f"query parts must come last"
|
||||
)
|
||||
# normal path part: strip slashes to avoid duplicate '/'
|
||||
path_parts.append(s.strip('/'))
|
||||
|
||||
# normalize path: remove empty chunks
|
||||
path_parts = [p for p in path_parts if p != '']
|
||||
|
||||
# --- build result ---
|
||||
# path portion
|
||||
if path_parts:
|
||||
joined_path = "/".join(path_parts)
|
||||
base = scheme + joined_path
|
||||
else:
|
||||
# no path beyond scheme
|
||||
base = scheme
|
||||
|
||||
# query portion
|
||||
if query_pairs:
|
||||
base = base + "?" + "&".join(query_pairs)
|
||||
|
||||
return base
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'url_join': url_join,
|
||||
}
|
21
filter_plugins/volume_path.py
Normal file
21
filter_plugins/volume_path.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
def docker_volume_path(volume_name: str) -> str:
|
||||
"""
|
||||
Returns the absolute filesystem path of a Docker volume.
|
||||
|
||||
Example:
|
||||
"akaunting_data" -> "/var/lib/docker/volumes/akaunting_data/_data/"
|
||||
"""
|
||||
if not volume_name or not isinstance(volume_name, str):
|
||||
raise AnsibleFilterError(f"Invalid volume name: {volume_name}")
|
||||
|
||||
return f"/var/lib/docker/volumes/{volume_name}/_data/"
|
||||
|
||||
class FilterModule(object):
|
||||
"""Docker volume path filters."""
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
"docker_volume_path": docker_volume_path,
|
||||
}
|
@@ -1,4 +1,13 @@
|
||||
INFINITO_ENVIRONMENT: "production"
|
||||
SOFTWARE_NAME: "Infinito.Nexus" # Name of the software
|
||||
|
||||
# Deployment
|
||||
ENVIRONMENT: "production" # Possible values: production, development
|
||||
DEPLOYMENT_MODE: "single" # Use single, if you deploy on one server. Use cluster if you setup in cluster mode.
|
||||
|
||||
# If true, sensitive credentials will be masked or hidden from all Ansible task logs
|
||||
# Recommendet to set to true
|
||||
# @todo needs to be implemented everywhere
|
||||
MASK_CREDENTIALS_IN_LOGS: true
|
||||
|
||||
HOST_CURRENCY: "EUR"
|
||||
HOST_TIMEZONE: "UTC"
|
||||
@@ -13,53 +22,41 @@ HOST_TIME_FORMAT: "HH:mm"
|
||||
HOST_THOUSAND_SEPARATOR: "."
|
||||
HOST_DECIMAL_MARK: ","
|
||||
|
||||
# Deployment mode
|
||||
deployment_mode: "single" # Use single, if you deploy on one server. Use cluster if you setup in cluster mode.
|
||||
|
||||
# Web
|
||||
WEB_PROTOCOL: "https" # Web protocol type. Use https or http. If you run local you need to change it to http
|
||||
WEB_PORT: "{{ 443 if WEB_PROTOCOL == 'https' else 80 }}" # Default port web applications will listen to
|
||||
|
||||
## Domain
|
||||
primary_domain_tld: "localhost" # Top Level Domain of the server
|
||||
primary_domain_sld: "infinito" # Second Level Domain of the server
|
||||
primary_domain: "{{primary_domain_sld}}.{{primary_domain_tld}}" # Primary Domain of the server
|
||||
# Websocket
|
||||
WEBSOCKET_PROTOCOL: "{{ 'wss' if WEB_PROTOCOL == 'https' else 'ws' }}"
|
||||
|
||||
# Server Tact Variables
|
||||
# Domain
|
||||
PRIMARY_DOMAIN: "localhost" # Primary Domain of the server
|
||||
|
||||
## Ours in which the server is "awake" (100% working). Rest of the time is reserved for maintanance
|
||||
hours_server_awake: "0..23"
|
||||
DNS_PROVIDER: cloudflare # The DNS Provider\Registrar for the domain
|
||||
|
||||
## Random delay for systemd timers to avoid peak loads.
|
||||
randomized_delay_sec: "5min"
|
||||
|
||||
# Runtime Variables for Process Control
|
||||
activate_all_timers: false # Activates all timers, independend if the handlers had been triggered
|
||||
|
||||
# This enables debugging in ansible and in the apps
|
||||
# You SHOULD NOT enable this on production servers
|
||||
enable_debug: false
|
||||
|
||||
dns_provider: cloudflare # The DNS Provider\Registrar for the domain
|
||||
HOSTING_PROVIDER: hetzner # Provider which hosts the server
|
||||
|
||||
# Which ACME method to use: webroot, cloudflare, or hetzner
|
||||
certbot_acme_challenge_method: "cloudflare"
|
||||
certbot_credentials_dir: /etc/certbot
|
||||
certbot_credentials_file: "{{ certbot_credentials_dir }}/{{ certbot_acme_challenge_method }}.ini"
|
||||
certbot_dns_api_token: "" # Define in inventory file: More information here: group_vars/all/docs/CLOUDFLARE_API_TOKEN.md
|
||||
certbot_dns_propagation_wait_seconds: 40 # How long should the script wait for DNS propagation before continuing
|
||||
certbot_flavor: san # Possible options: san (recommended, with a dns flavor like cloudflare, or hetzner), wildcard(doesn't function with www redirect), dedicated
|
||||
CERTBOT_ACME_CHALLENGE_METHOD: "cloudflare"
|
||||
CERTBOT_CREDENTIALS_DIR: /etc/certbot
|
||||
CERTBOT_CREDENTIALS_FILE: "{{ CERTBOT_CREDENTIALS_DIR }}/{{ CERTBOT_ACME_CHALLENGE_METHOD }}.ini"
|
||||
CERTBOT_DNS_PROPAGATION_WAIT_SECONDS: 300 # How long should the script wait for DNS propagation before continuing
|
||||
CERTBOT_FLAVOR: san # Possible options: san (recommended, with a dns flavor like cloudflare, or hetzner), wildcard(doesn't function with www redirect), dedicated
|
||||
|
||||
# Path where Certbot stores challenge webroot files
|
||||
letsencrypt_webroot_path: "/var/lib/letsencrypt/"
|
||||
# Letsencrypt
|
||||
LETSENCRYPT_WEBROOT_PATH: "/var/lib/letsencrypt/" # Path where Certbot stores challenge webroot files
|
||||
LETSENCRYPT_BASE_PATH: "/etc/letsencrypt/" # Base directory containing Certbot configuration, account data, and archives
|
||||
LETSENCRYPT_LIVE_PATH: "{{ LETSENCRYPT_BASE_PATH }}live/" # Symlink directory for the current active certificate and private key
|
||||
|
||||
# Base directory containing Certbot configuration, account data, and archives
|
||||
letsencrypt_base_path: "/etc/letsencrypt/"
|
||||
## Docker
|
||||
DOCKER_RESTART_POLICY: "unless-stopped" # Default restart parameter for docker containers
|
||||
DOCKER_VARS_FILE: "{{ playbook_dir }}/roles/docker-compose/vars/docker-compose.yml" # File containing docker compose variables used by other services
|
||||
DOCKER_WHITELISTET_ANON_VOLUMES: [] # Volumes which should be ignored during docker anonymous health check
|
||||
|
||||
# Symlink directory for the current active certificate and private key
|
||||
letsencrypt_live_path: "{{ letsencrypt_base_path }}live/"
|
||||
|
||||
## Docker Role Specific Parameters
|
||||
docker_restart_policy: "unless-stopped"
|
||||
# Asyn Confitguration
|
||||
ASYNC_ENABLED: "{{ not MODE_DEBUG | bool }}" # Activate async, deactivated for debugging
|
||||
ASYNC_TIME: "{{ 300 if ASYNC_ENABLED | bool else omit }}" # Run for mnax 5min
|
||||
ASYNC_POLL: "{{ 0 if ASYNC_ENABLED | bool else 10 }}" # Don't wait for task
|
||||
|
||||
# default value if not set via CLI (-e) or in playbook vars
|
||||
allowed_applications: []
|
||||
@@ -77,4 +74,11 @@ _applications_nextcloud_oidc_flavor: >-
|
||||
| get_app_conf('web-app-nextcloud','features.ldap',False, True)
|
||||
else 'sociallogin'
|
||||
)
|
||||
}}
|
||||
}}
|
||||
|
||||
# Role-based access control
|
||||
# @See https://en.wikipedia.org/wiki/Role-based_access_control
|
||||
RBAC:
|
||||
GROUP:
|
||||
NAME: "/roles" # Name of the group which holds the RBAC roles
|
||||
CLAIM: "groups" # Name of the claim containing the RBAC groups
|
@@ -1,8 +1,10 @@
|
||||
# Mode
|
||||
|
||||
# The following modes can be combined with each other
|
||||
mode_reset: false # Cleans up all Infinito.Nexus files. It's necessary to run to whole playbook and not particial roles when using this function.
|
||||
mode_test: false # Executes test routines instead of productive routines
|
||||
mode_update: true # Executes updates
|
||||
mode_backup: true # Activates the backup before the update procedure
|
||||
mode_cleanup: true # Cleanup unused files and configurations
|
||||
MODE_TEST: false # Executes test routines instead of productive routines
|
||||
MODE_UPDATE: true # Executes updates
|
||||
MODE_DEBUG: false # This enables debugging in ansible and in the apps, You SHOULD NOT enable this on production servers
|
||||
MODE_RESET: false # Cleans up all Infinito.Nexus files. It's necessary to run to whole playbook and not particial roles when using this function.
|
||||
MODE_BACKUP: "{{ MODE_UPDATE }}" # Activates the backup before the update procedure
|
||||
MODE_CLEANUP: "{{ MODE_DEBUG }}" # Cleanup unused files and configurations
|
||||
MODE_ASSERT: "{{ MODE_DEBUG }}" # Executes validation tasks during the run.
|
||||
|
8
group_vars/all/02_email.yml
Normal file
8
group_vars/all/02_email.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
# Email Configuration
|
||||
DEFAULT_SYSTEM_EMAIL:
|
||||
DOMAIN: "{{ PRIMARY_DOMAIN }}"
|
||||
HOST: "mail.{{ PRIMARY_DOMAIN }}"
|
||||
PORT: 465
|
||||
TLS: true # true for TLS and false for SSL
|
||||
START_TLS: false
|
||||
SMTP: true
|
@@ -1,9 +0,0 @@
|
||||
# Email Configuration
|
||||
default_system_email:
|
||||
domain: "{{primary_domain}}"
|
||||
host: "mail.{{primary_domain}}"
|
||||
port: 465
|
||||
tls: true # true for TLS and false for SSL
|
||||
start_tls: false
|
||||
smtp: true
|
||||
# password: # Needs to be defined in inventory file
|
@@ -1,38 +0,0 @@
|
||||
|
||||
# System maintenance Services
|
||||
|
||||
## Timeouts to wait for other services to stop
|
||||
system_maintenance_lock_timeout_cleanup_services: "15min"
|
||||
system_maintenance_lock_timeout_storage_optimizer: "10min"
|
||||
system_maintenance_lock_timeout_backup_services: "1h"
|
||||
system_maintenance_lock_timeout_heal_docker: "30min"
|
||||
system_maintenance_lock_timeout_update_docker: "2min"
|
||||
system_maintenance_lock_timeout_restart_docker: "{{system_maintenance_lock_timeout_update_docker}}"
|
||||
|
||||
## Services
|
||||
|
||||
### Defined Services for Backup Tasks
|
||||
system_maintenance_backup_services:
|
||||
- "sys-bkp-docker-2-loc"
|
||||
- "svc-bkp-rmt-2-loc"
|
||||
- "svc-bkp-loc-2-usb"
|
||||
- "sys-bkp-docker-2-loc-everything"
|
||||
|
||||
### Defined Services for System Cleanup
|
||||
system_maintenance_cleanup_services:
|
||||
- "sys-cln-backups"
|
||||
- "sys-cln-disc-space"
|
||||
- "sys-cln-faild-bkps"
|
||||
|
||||
### Services that Manipulate the System
|
||||
system_maintenance_manipulation_services:
|
||||
- "sys-rpr-docker-soft"
|
||||
- "update-docker"
|
||||
- "svc-opt-ssd-hdd"
|
||||
- "sys-rpr-docker-hard"
|
||||
|
||||
## Total System Maintenance Services
|
||||
system_maintenance_services: "{{ system_maintenance_backup_services + system_maintenance_cleanup_services + system_maintenance_manipulation_services }}"
|
||||
|
||||
### Define Variables for Docker Volume Health services
|
||||
whitelisted_anonymous_docker_volumes: []
|
32
group_vars/all/05_nginx.yml
Normal file
32
group_vars/all/05_nginx.yml
Normal file
@@ -0,0 +1,32 @@
|
||||
# Webserver Configuration
|
||||
|
||||
# Helper
|
||||
_nginx_www_dir: "{{ applications | get_app_conf('svc-prx-openresty','docker.volumes.www') }}"
|
||||
_nginx_dir: "{{ applications | get_app_conf('svc-prx-openresty','docker.volumes.nginx') }}"
|
||||
_nginx_conf_dir: "{{ _nginx_dir }}conf.d/"
|
||||
_nginx_http_dir: "{{ _nginx_conf_dir }}http/"
|
||||
|
||||
## Nginx-Specific Path Configurations
|
||||
NGINX:
|
||||
FILES:
|
||||
CONFIGURATION: "{{ _nginx_dir }}nginx.conf"
|
||||
DIRECTORIES:
|
||||
CONFIGURATION: "{{ _nginx_conf_dir }}" # Configuration directory
|
||||
HTTP:
|
||||
GLOBAL: "{{ _nginx_http_dir }}global/" # Contains global configurations which will be loaded into the http block
|
||||
SERVERS: "{{ _nginx_http_dir }}servers/" # Contains one configuration per domain
|
||||
MAPS: "{{ _nginx_http_dir }}maps/" # Contains mappings
|
||||
STREAMS: "{{ _nginx_conf_dir }}streams/" # Contains streams configuration e.g. for ldaps
|
||||
DATA:
|
||||
WWW: "{{ _nginx_www_dir }}"
|
||||
WELL_KNOWN: "/usr/share/nginx/well-known/" # Path where well-known files are stored
|
||||
HTML: "{{ _nginx_www_dir }}public_html/" # Path where the static homepage files are stored
|
||||
FILES: "{{ _nginx_www_dir }}public_files/" # Path where the web accessable files are stored
|
||||
CDN: "{{ _nginx_www_dir }}public_cdn/" # Contains files which will be accessable via the content delivery network
|
||||
GLOBAL: "{{ _nginx_www_dir }}global/" # Directory containing files which will be globaly accessable, @Todo remove this when css migrated to CDN
|
||||
CACHE:
|
||||
GENERAL: "/tmp/cache_nginx_general/" # Directory which nginx uses to cache general data
|
||||
IMAGE: "/tmp/cache_nginx_image/" # Directory which nginx uses to cache images
|
||||
USER: "http" # Default nginx user in ArchLinux
|
||||
|
||||
# @todo It propably makes sense to distinguish between target and source mount path, so that the config files can be stored in the openresty volumes folder
|
@@ -1,26 +0,0 @@
|
||||
# Webserver Configuration
|
||||
|
||||
# Helper
|
||||
_nginx_www_dir: /var/www/
|
||||
## Nginx-Specific Path Configurations
|
||||
nginx:
|
||||
files:
|
||||
configuration: "/etc/nginx/nginx.conf"
|
||||
directories:
|
||||
configuration: "/etc/nginx/conf.d/" # Configuration directory
|
||||
http:
|
||||
global: "/etc/nginx/conf.d/http/global/" # Contains global configurations which will be loaded into the http block
|
||||
servers: "/etc/nginx/conf.d/http/servers/" # Contains one configuration per domain
|
||||
maps: "/etc/nginx/conf.d/http/maps/" # Contains mappings
|
||||
streams: "/etc/nginx/conf.d/streams/" # Contains streams configuration e.g. for ldaps
|
||||
data:
|
||||
www: "{{ _nginx_www_dir }}"
|
||||
well_known: "/usr/share/nginx/well-known/" # Path where well-known files are stored
|
||||
html: "{{ _nginx_www_dir }}public_html/" # Path where the static homepage files are stored
|
||||
files: "{{ _nginx_www_dir }}public_files/" # Path where the web accessable files are stored
|
||||
cdn: "{{ _nginx_www_dir }}public_cdn/" # Contains files which will be accessable via the content delivery network
|
||||
global: "{{ _nginx_www_dir }}global/" # Directory containing files which will be globaly accessable
|
||||
cache:
|
||||
general: "/tmp/cache_nginx_general/" # Directory which nginx uses to cache general data
|
||||
image: "/tmp/cache_nginx_image/" # Directory which nginx uses to cache images
|
||||
user: "http" # Default nginx user in ArchLinux
|
9
group_vars/all/06_paths.yml
Normal file
9
group_vars/all/06_paths.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
|
||||
# Path Variables for Key Directories and Scripts
|
||||
PATH_ADMINISTRATOR_HOME: "/home/administrator/"
|
||||
PATH_ADMINISTRATOR_SCRIPTS: "/opt/scripts/"
|
||||
PATH_SYSTEMCTL_SCRIPTS: "{{ [ PATH_ADMINISTRATOR_SCRIPTS, 'systemctl' ] | path_join }}"
|
||||
PATH_DOCKER_COMPOSE_INSTANCES: "/opt/docker/"
|
||||
PATH_SYSTEM_LOCK_SCRIPT: "/opt/scripts/sys-lock.py"
|
||||
PATH_SYSTEM_SERVICE_DIR: "/etc/systemd/system"
|
||||
PATH_DOCKER_COMPOSE_PULL_LOCK_DIR: "/run/ansible/compose-pull/"
|
@@ -1,6 +0,0 @@
|
||||
|
||||
# Path Variables for Key Directories and Scripts
|
||||
path_administrator_home: "/home/administrator/"
|
||||
path_administrator_scripts: "/opt/scripts/"
|
||||
path_docker_compose_instances: "/opt/docker/"
|
||||
path_system_lock_script: "/opt/scripts/sys-lock.py"
|
51
group_vars/all/07_services.yml
Normal file
51
group_vars/all/07_services.yml
Normal file
@@ -0,0 +1,51 @@
|
||||
|
||||
# Services
|
||||
|
||||
## Meta
|
||||
SYS_SERVICE_SUFFIX: ".{{ SOFTWARE_NAME | lower }}.service"
|
||||
|
||||
## Names
|
||||
SYS_SERVICE_CLEANUP_BACKUPS_FAILED: "{{ 'sys-ctl-cln-faild-bkps' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_CLEANUP_ANONYMOUS_VOLUMES: "{{ 'sys-ctl-cln-anon-volumes' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_OPTIMIZE_DRIVE: "{{ 'svc-opt-ssd-hdd' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_BACKUP_RMT_2_LOC: "{{ 'svc-bkp-rmt-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_BACKUP_DOCKER_2_LOC: "{{ 'sys-ctl-bkp-docker-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_REPAIR_DOCKER_SOFT: "{{ 'sys-ctl-rpr-docker-soft' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_REPAIR_DOCKER_HARD: "{{ 'sys-ctl-rpr-docker-hard' | get_service_name(SOFTWARE_NAME) }}"
|
||||
|
||||
## On Failure
|
||||
SYS_SERVICE_ON_FAILURE_COMPOSE: "{{ ('sys-ctl-alm-compose@') | get_service_name(SOFTWARE_NAME, False) }}%n.service"
|
||||
|
||||
## Groups
|
||||
SYS_SERVICE_GROUP_BACKUPS: >
|
||||
{{ (('sys-ctl-bkp-' | get_category_entries) + ('svc-bkp-' | get_category_entries))
|
||||
| map('regex_replace', '$', SYS_SERVICE_SUFFIX) | list | sort }}
|
||||
|
||||
SYS_SERVICE_GROUP_CLEANUP: >
|
||||
{{ ('sys-ctl-cln-' | get_category_entries)
|
||||
| map('regex_replace', '$', SYS_SERVICE_SUFFIX) | list | sort }}
|
||||
|
||||
SYS_SERVICE_GROUP_REPAIR: >
|
||||
{{ ('sys-ctl-rpr-' | get_category_entries)
|
||||
| map('regex_replace', '$', SYS_SERVICE_SUFFIX) | list | sort }}
|
||||
|
||||
SYS_SERVICE_GROUP_OPTIMIZATION: >
|
||||
{{ ('svc-opt-' | get_category_entries)
|
||||
| map('regex_replace', '$', SYS_SERVICE_SUFFIX) | list | sort }}
|
||||
|
||||
SYS_SERVICE_GROUP_MAINTANANCE: >
|
||||
{{ ('svc-mtn-' | get_category_entries)
|
||||
| map('regex_replace', '$', SYS_SERVICE_SUFFIX) | list | sort }}
|
||||
|
||||
## Collection of services to manipulate the system
|
||||
SYS_SERVICE_GROUP_MANIPULATION: >
|
||||
{{
|
||||
(
|
||||
SYS_SERVICE_GROUP_BACKUPS +
|
||||
SYS_SERVICE_GROUP_CLEANUP +
|
||||
SYS_SERVICE_GROUP_REPAIR +
|
||||
SYS_SERVICE_GROUP_OPTIMIZATION +
|
||||
SYS_SERVICE_GROUP_MAINTANANCE
|
||||
) | sort
|
||||
}}
|
||||
|
@@ -1,29 +0,0 @@
|
||||
|
||||
## Schedule for Health Checks
|
||||
on_calendar_health_btrfs: "*-*-* 00:00:00" # Check once per day the btrfs for errors
|
||||
on_calendar_health_journalctl: "*-*-* 00:00:00" # Check once per day the journalctl for errors
|
||||
on_calendar_health_disc_space: "*-*-* 06,12,18,00:00:00" # Check four times per day if there is sufficient disc space
|
||||
on_calendar_health_docker_container: "*-*-* {{ hours_server_awake }}:00:00" # Check once per hour if the docker containers are healthy
|
||||
on_calendar_health_docker_volumes: "*-*-* {{ hours_server_awake }}:15:00" # Check once per hour if the docker volumes are healthy
|
||||
on_calendar_health_csp_crawler: "*-*-* {{ hours_server_awake }}:30:00" # Check once per hour if all CSP are fullfilled available
|
||||
on_calendar_health_nginx: "*-*-* {{ hours_server_awake }}:45:00" # Check once per hour if all webservices are available
|
||||
on_calendar_health_msmtp: "*-*-* 00:00:00" # Check once per day SMTP Server
|
||||
|
||||
## Schedule for Cleanup Tasks
|
||||
on_calendar_cleanup_backups: "*-*-* 00,06,12,18:30:00" # Cleanup backups every 6 hours, MUST be called before disc space cleanup
|
||||
on_calendar_cleanup_disc_space: "*-*-* 07,13,19,01:30:00" # Cleanup disc space every 6 hours
|
||||
on_calendar_cleanup_certs: "*-*-* 12,00:45:00" # Deletes and revokes unused certs
|
||||
|
||||
## Schedule for Backup Tasks
|
||||
on_calendar_backup_docker_to_local: "*-*-* 03:30:00"
|
||||
on_calendar_backup_remote_to_local: "*-*-* 21:30:00"
|
||||
|
||||
## Schedule for Maintenance Tasks
|
||||
on_calendar_heal_docker: "*-*-* {{ hours_server_awake }}:30:00" # Heal unhealthy docker instances once per hour
|
||||
on_calendar_renew_lets_encrypt_certificates: "*-*-* 12,00:30:00" # Renew Mailu certificates twice per day
|
||||
on_calendar_deploy_certificates: "*-*-* 13,01:30:00" # Deploy letsencrypt certificates twice per day to docker containers
|
||||
on_calendar_msi_keyboard_color: "*-*-* *:*:00" # Change the keyboard color every minute
|
||||
on_calendar_cleanup_failed_docker: "*-*-* 12:00:00" # Clean up failed docker backups every noon
|
||||
on_calendar_btrfs_auto_balancer: "Sat *-*-01..07 00:00:00" # Execute btrfs auto balancer every first Saturday of a month
|
||||
on_calendar_restart_docker: "Sun *-*-* 08:00:00" # Restart docker instances every Sunday at 8:00 AM
|
||||
on_calendar_nextcloud: "22" # Do nextcloud maintanace between 22:00 and 02:00
|
52
group_vars/all/08_schedule.yml
Normal file
52
group_vars/all/08_schedule.yml
Normal file
@@ -0,0 +1,52 @@
|
||||
|
||||
# Service Timers
|
||||
|
||||
## Meta
|
||||
SYS_TIMER_ALL_ENABLED: "{{ MODE_DEBUG }}" # Runtime Variables for Process Control - Activates all timers, independend if the handlers had been triggered
|
||||
|
||||
## Server Tact Variables
|
||||
|
||||
HOURS_SERVER_AWAKE: "0..23" # Ours in which the server is "awake" (100% working). Rest of the time is reserved for maintanance
|
||||
RANDOMIZED_DELAY_SEC: "5min" # Random delay for systemd timers to avoid peak loads.
|
||||
|
||||
## Timeouts for all services
|
||||
SYS_TIMEOUT_DOCKER_RPR_HARD: "10min"
|
||||
SYS_TIMEOUT_DOCKER_RPR_SOFT: "{{ SYS_TIMEOUT_DOCKER_RPR_HARD }}"
|
||||
SYS_TIMEOUT_CLEANUP_SERVICES: "15min"
|
||||
SYS_TIMEOUT_DOCKER_UPDATE: "20min"
|
||||
SYS_TIMEOUT_STORAGE_OPTIMIZER: "{{ SYS_TIMEOUT_DOCKER_UPDATE }}"
|
||||
SYS_TIMEOUT_BACKUP_SERVICES: "60min"
|
||||
|
||||
## On Calendar
|
||||
|
||||
### Schedule for health checks
|
||||
SYS_SCHEDULE_HEALTH_BTRFS: "*-*-* 00:00:00" # Check once per day the btrfs for errors
|
||||
SYS_SCHEDULE_HEALTH_JOURNALCTL: "*-*-* 00:00:00" # Check once per day the journalctl for errors
|
||||
SYS_SCHEDULE_HEALTH_DISC_SPACE: "*-*-* 06,12,18,00:00:00" # Check four times per day if there is sufficient disc space
|
||||
SYS_SCHEDULE_HEALTH_DOCKER_CONTAINER: "*-*-* {{ HOURS_SERVER_AWAKE }}:00:00" # Check once per hour if the docker containers are healthy
|
||||
SYS_SCHEDULE_HEALTH_DOCKER_VOLUMES: "*-*-* {{ HOURS_SERVER_AWAKE }}:15:00" # Check once per hour if the docker volumes are healthy
|
||||
SYS_SCHEDULE_HEALTH_CSP_CRAWLER: "*-*-* {{ HOURS_SERVER_AWAKE }}:30:00" # Check once per hour if all CSP are fullfilled available
|
||||
SYS_SCHEDULE_HEALTH_NGINX: "*-*-* {{ HOURS_SERVER_AWAKE }}:45:00" # Check once per hour if all webservices are available
|
||||
SYS_SCHEDULE_HEALTH_MSMTP: "*-*-* 00:00:00" # Check once per day SMTP Server
|
||||
|
||||
### Schedule for cleanup tasks
|
||||
SYS_SCHEDULE_CLEANUP_BACKUPS: "*-*-* 00,06,12,18:30:00" # Cleanup backups every 6 hours, MUST be called before disc space cleanup
|
||||
SYS_SCHEDULE_CLEANUP_DISC_SPACE: "*-*-* 07,13,19,01:30:00" # Cleanup disc space every 6 hours
|
||||
SYS_SCHEDULE_CLEANUP_CERTS: "*-*-* 12,00:45:00" # Deletes and revokes unused certs
|
||||
SYS_SCHEDULE_CLEANUP_FAILED_BACKUPS: "*-*-* 12:00:00" # Clean up failed docker backups every noon
|
||||
|
||||
### Schedule for repair services
|
||||
SYS_SCHEDULE_REPAIR_BTRFS_AUTO_BALANCER: "Sat *-*-01..07 00:00:00" # Execute btrfs auto balancer every first Saturday of a month
|
||||
SYS_SCHEDULE_REPAIR_DOCKER_HARD: "Sun *-*-* 08:00:00" # Restart docker instances every Sunday at 8:00 AM
|
||||
|
||||
### Schedule for backup tasks
|
||||
SYS_SCHEDULE_BACKUP_DOCKER_TO_LOCAL: "*-*-* 03:30:00"
|
||||
SYS_SCHEDULE_BACKUP_REMOTE_TO_LOCAL: "*-*-* 21:30:00"
|
||||
|
||||
### Schedule for Maintenance Tasks
|
||||
SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_RENEW: "*-*-* 12,00:30:00" # Renew Mailu certificates twice per day
|
||||
SYS_SCHEDULE_MAINTANANCE_LETSENCRYPT_DEPLOY: "*-*-* 13,01:30:00" # Deploy letsencrypt certificates twice per day to docker containers
|
||||
SYS_SCHEDULE_MAINTANANCE_NEXTCLOUD: "22" # Do nextcloud maintanace between 22:00 and 02:00
|
||||
|
||||
### Animation
|
||||
SYS_SCHEDULE_ANIMATION_KEYBOARD_COLOR: "*-*-* *:*:00" # Change the keyboard color every minute
|
@@ -10,7 +10,7 @@ defaults_networks:
|
||||
# /28 Networks, 14 Usable Ip Addresses
|
||||
web-app-akaunting:
|
||||
subnet: 192.168.101.0/28
|
||||
web-app-attendize:
|
||||
web-app-confluence:
|
||||
subnet: 192.168.101.16/28
|
||||
web-app-baserow:
|
||||
subnet: 192.168.101.32/28
|
||||
@@ -34,8 +34,8 @@ defaults_networks:
|
||||
subnet: 192.168.101.176/28
|
||||
web-app-listmonk:
|
||||
subnet: 192.168.101.192/28
|
||||
# Free:
|
||||
# subnet: 192.168.101.208/28
|
||||
web-app-jira:
|
||||
subnet: 192.168.101.208/28
|
||||
web-app-matomo:
|
||||
subnet: 192.168.101.224/28
|
||||
web-app-mastodon:
|
||||
@@ -44,11 +44,11 @@ defaults_networks:
|
||||
subnet: 192.168.102.0/28
|
||||
web-app-mailu:
|
||||
# Use one of the last container ips for dns resolving so that it isn't used
|
||||
dns: 192.168.102.29
|
||||
subnet: 192.168.102.16/28
|
||||
dns_resolver: 192.168.102.29
|
||||
subnet: 192.168.102.16/28
|
||||
web-app-moodle:
|
||||
subnet: 192.168.102.32/28
|
||||
web-app-mybb:
|
||||
web-app-bookwyrm:
|
||||
subnet: 192.168.102.48/28
|
||||
web-app-nextcloud:
|
||||
subnet: 192.168.102.64/28
|
||||
@@ -84,11 +84,11 @@ defaults_networks:
|
||||
subnet: 192.168.103.64/28
|
||||
web-app-syncope:
|
||||
subnet: 192.168.103.80/28
|
||||
web-app-collabora:
|
||||
web-svc-collabora:
|
||||
subnet: 192.168.103.96/28
|
||||
web-svc-simpleicons:
|
||||
subnet: 192.168.103.112/28
|
||||
web-app-libretranslate:
|
||||
web-svc-libretranslate:
|
||||
subnet: 192.168.103.128/28
|
||||
web-app-pretix:
|
||||
subnet: 192.168.103.144/28
|
||||
@@ -96,6 +96,12 @@ defaults_networks:
|
||||
subnet: 192.168.103.160/28
|
||||
web-svc-logout:
|
||||
subnet: 192.168.103.176/28
|
||||
web-app-chess:
|
||||
subnet: 192.168.103.192/28
|
||||
web-app-magento:
|
||||
subnet: 192.168.103.208/28
|
||||
web-app-bridgy-fed:
|
||||
subnet: 192.168.103.224/28
|
||||
|
||||
# /24 Networks / 254 Usable Clients
|
||||
web-app-bigbluebutton:
|
@@ -2,12 +2,12 @@ ports:
|
||||
# Ports which are exposed to localhost
|
||||
localhost:
|
||||
database:
|
||||
svc-db-postgres: 5432
|
||||
svc-db-mariadb: 3306
|
||||
svc-db-postgres: 5432
|
||||
svc-db-mariadb: 3306
|
||||
# https://developer.mozilla.org/de/docs/Web/API/WebSockets_API
|
||||
websocket:
|
||||
web-app-mastodon: 4001
|
||||
web-app-espocrm: 4002
|
||||
web-app-mastodon: 4001
|
||||
web-app-espocrm: 4002
|
||||
oauth2_proxy:
|
||||
web-app-phpmyadmin: 4181
|
||||
web-app-lam: 4182
|
||||
@@ -26,7 +26,7 @@ ports:
|
||||
web-app-gitea: 8002
|
||||
web-app-wordpress: 8003
|
||||
web-app-mediawiki: 8004
|
||||
web-app-mybb: 8005
|
||||
web-app-confluence: 8005
|
||||
web-app-yourls: 8006
|
||||
web-app-mailu: 8007
|
||||
web-app-elk: 8008
|
||||
@@ -36,7 +36,7 @@ ports:
|
||||
web-app-funkwhale: 8012
|
||||
web-app-roulette-wheel: 8013
|
||||
web-app-joomla: 8014
|
||||
web-app-attendize: 8015
|
||||
web-app-jira: 8015
|
||||
web-app-pgadmin: 8016
|
||||
web-app-baserow: 8017
|
||||
web-app-matomo: 8018
|
||||
@@ -50,7 +50,7 @@ ports:
|
||||
web-app-moodle: 8026
|
||||
web-app-taiga: 8027
|
||||
web-app-friendica: 8028
|
||||
web-app-port-ui: 8029
|
||||
web-app-desktop: 8029
|
||||
web-app-bluesky_api: 8030
|
||||
web-app-bluesky_web: 8031
|
||||
web-app-keycloak: 8032
|
||||
@@ -63,13 +63,18 @@ ports:
|
||||
web-app-navigator: 8039
|
||||
web-app-espocrm: 8040
|
||||
web-app-syncope: 8041
|
||||
web-app-collabora: 8042
|
||||
web-svc-collabora: 8042
|
||||
web-app-mobilizon: 8043
|
||||
web-svc-simpleicons: 8044
|
||||
web-app-libretranslate: 8045
|
||||
web-svc-libretranslate: 8045
|
||||
web-app-pretix: 8046
|
||||
web-app-mig: 8047
|
||||
web-svc-logout: 8048
|
||||
web-app-bookwyrm: 8049
|
||||
web-app-chess: 8050
|
||||
web-app-bluesky_view: 8051
|
||||
web-app-magento: 8052
|
||||
web-app-bridgy-fed: 8053
|
||||
web-app-bigbluebutton: 48087 # This port is predefined by bbb. @todo Try to change this to a 8XXX port
|
||||
public:
|
||||
# The following ports should be changed to 22 on the subdomain via stream mapping
|
||||
@@ -80,9 +85,10 @@ ports:
|
||||
svc-db-openldap: 636
|
||||
stun:
|
||||
web-app-bigbluebutton: 3478 # Not sure if it's right placed here or if it should be moved to localhost section
|
||||
web-app-nextcloud: 3479
|
||||
# Occupied by BBB: 3479
|
||||
web-app-nextcloud: 3480
|
||||
turn:
|
||||
web-app-bigbluebutton: 5349 # Not sure if it's right placed here or if it should be moved to localhost section
|
||||
web-app-nextcloud: 5350 # Not used yet
|
||||
web-app-nextcloud: 5350 # Not used yet
|
||||
federation:
|
||||
web-app-matrix_synapse: 8448
|
@@ -7,38 +7,43 @@
|
||||
#############################################
|
||||
# @see https://en.wikipedia.org/wiki/OpenID_Connect
|
||||
|
||||
## Helper Variables:
|
||||
_oidc_client_realm: "{{ oidc.client.realm if oidc.client is defined and oidc.client.realm is defined else primary_domain }}"
|
||||
_oidc_url: "{{
|
||||
(oidc.url
|
||||
if (oidc is defined and oidc.url is defined)
|
||||
else WEB_PROTOCOL ~ '://' ~ (domains | get_domain('web-app-keycloak'))
|
||||
)
|
||||
}}"
|
||||
_oidc_client_issuer_url: "{{ _oidc_url }}/realms/{{_oidc_client_realm}}"
|
||||
_oidc_client_id: "{{ oidc.client.id if oidc.client is defined and oidc.client.id is defined else primary_domain }}"
|
||||
# Helper Variables:
|
||||
_oidc_client_realm: "{{ OIDC.CLIENT.REALM if OIDC.CLIENT is defined and OIDC.CLIENT.REALM is defined else SOFTWARE_NAME | lower }}"
|
||||
_oidc_url: "{{
|
||||
( OIDC.URL
|
||||
if (OIDC is defined and OIDC.URL is defined)
|
||||
else domains | get_url('web-app-keycloak', WEB_PROTOCOL)
|
||||
).rstrip('/')
|
||||
}}"
|
||||
_oidc_client_issuer_url: "{{ _oidc_url ~ '/realms/' ~ _oidc_client_realm }}"
|
||||
_oidc_client_id: "{{ OIDC.CLIENT.ID if OIDC.CLIENT is defined and OIDC.CLIENT.ID is defined else SOFTWARE_NAME | lower }}"
|
||||
_oidc_account_url: "{{ _oidc_client_issuer_url ~ '/account' }}"
|
||||
_oidc_protocol_oidc: "{{ _oidc_client_issuer_url ~ '/protocol/openid-connect' }}"
|
||||
|
||||
# Definition
|
||||
defaults_oidc:
|
||||
url: "{{ _oidc_url }}"
|
||||
client:
|
||||
id: "{{ _oidc_client_id }}" # Client identifier, typically matching your primary domain
|
||||
# secret: # Client secret for authenticating with the OIDC provider (set in the inventory file). Recommend greater then 32 characters
|
||||
realm: "{{_oidc_client_realm}}" # The realm to which the client belongs in the OIDC provider
|
||||
issuer_url: "{{_oidc_client_issuer_url}}" # Base URL of the OIDC provider (issuer)
|
||||
discovery_document: "{{_oidc_client_issuer_url}}/.well-known/openid-configuration" # URL for fetching the provider's configuration details
|
||||
authorize_url: "{{_oidc_client_issuer_url}}/protocol/openid-connect/auth" # Endpoint to start the authorization process
|
||||
token_url: "{{_oidc_client_issuer_url}}/protocol/openid-connect/token" # Endpoint to exchange authorization codes for tokens (note: 'token_url' may be a typo for 'token_url')
|
||||
user_info_url: "{{_oidc_client_issuer_url}}/protocol/openid-connect/userinfo" # Endpoint to retrieve user information
|
||||
logout_url: "{{_oidc_client_issuer_url}}/protocol/openid-connect/logout" # Endpoint to log out the user
|
||||
change_credentials: "{{_oidc_client_issuer_url}}account/account-security/signing-in" # URL for managing or changing user credentials
|
||||
certs: "{{_oidc_client_issuer_url}}/protocol/openid-connect/certs" # JSON Web Key Set (JWKS)
|
||||
reset_credentials: "{{_oidc_client_issuer_url}}/login-actions/reset-credentials?client_id={{ _oidc_client_id }}" # Password reset url
|
||||
button_text: "SSO Login ({{primary_domain | upper}})" # Default button text
|
||||
attributes:
|
||||
URL: "{{ _oidc_url }}"
|
||||
CLIENT:
|
||||
ID: "{{ _oidc_client_id }}" # Client identifier, typically matching your primary domain
|
||||
# SECRET: # Client secret for authenticating with the OIDC provider (set in the inventory file). Recommend greater then 32 characters
|
||||
REALM: "{{ _oidc_client_realm }}" # The realm to which the client belongs in the OIDC provider
|
||||
ISSUER_URL: "{{ _oidc_client_issuer_url }}" # Base URL of the OIDC provider (issuer)
|
||||
DISCOVERY_DOCUMENT: "{{ _oidc_client_issuer_url ~ '/.well-known/openid-configuration' }}" # URL for fetching the provider's configuration details
|
||||
AUTHORIZE_URL: "{{ _oidc_protocol_oidc ~ '/auth' }}" # Endpoint to start the authorization process
|
||||
TOKEN_URL: "{{ _oidc_protocol_oidc ~ '/token' }}" # Endpoint to exchange authorization codes for tokens (note: 'token_url' may be a typo for 'token_url')
|
||||
USER_INFO_URL: "{{ _oidc_protocol_oidc ~ '/userinfo' }}" # Endpoint to retrieve user information
|
||||
LOGOUT_URL: "{{ _oidc_protocol_oidc ~ '/logout' }}" # Endpoint to log out the user
|
||||
CERTS: "{{ _oidc_protocol_oidc ~ '/certs' }}" # JSON Web Key Set (JWKS)
|
||||
ACCOUNT:
|
||||
URL: "{{ _oidc_account_url }}" # Entry point for the user settings console
|
||||
PROFILE_URL: "{{ _oidc_account_url ~ '/#/personal-info' }}" # Section for managing personal information
|
||||
SECURITY_URL: "{{ _oidc_account_url ~ '/#/security/signingin' }}" # Section for managing login and security settings
|
||||
CHANGE_CREDENTIALS: "{{ _oidc_account_url ~ '/account-security/signing-in' }}" # URL for managing or changing user credentials
|
||||
RESET_CREDENTIALS: "{{ _oidc_client_issuer_url ~ '/login-actions/reset-credentials?client_id=' ~ _oidc_client_id }}" # Password reset url
|
||||
BUTTON_TEXT: "SSO Login ({{ PRIMARY_DOMAIN | upper }})" # Default button text
|
||||
ATTRIBUTES:
|
||||
# Attribut to identify the user
|
||||
username: "preferred_username"
|
||||
given_name: "givenName"
|
||||
family_name: "surname"
|
||||
email: "email"
|
||||
claims:
|
||||
groups: "groups"
|
||||
USERNAME: "preferred_username"
|
||||
GIVEN_NAME: "givenName"
|
||||
FAMILY_NAME: "surname"
|
||||
EMAIL: "email"
|
@@ -5,31 +5,31 @@
|
||||
|
||||
# Helper Variables:
|
||||
# Keep in mind to mapp this variables if there is ever the possibility for the user to define them in the inventory
|
||||
_ldap_dn_base: "dc={{primary_domain_sld}},dc={{primary_domain_tld}}"
|
||||
_ldap_docker_network_enabled: "{{ applications | get_app_conf('svc-db-openldap', 'network.docker') }}"
|
||||
_ldap_protocol: "{{ 'ldap' if _ldap_docker_network_enabled else 'ldaps' }}"
|
||||
_ldap_server_port: "{{ ports.localhost[_ldap_protocol]['svc-db-openldap'] }}"
|
||||
_ldap_name: "{{ applications | get_app_conf('svc-db-openldap', 'docker.services.openldap.name') }}"
|
||||
_ldap_domain: "{{ primary_domain }}" # LDAP is jsut listening to a port not to a dedicated domain, so primary domain should be sufficient
|
||||
_ldap_user_id: "uid"
|
||||
_ldap_filters_users_all: "(|(objectclass=inetOrgPerson))"
|
||||
LDAP_DN_BASE: "{{ PRIMARY_DOMAIN.split('.') | map('regex_replace', '^(.*)$', 'dc=\\1') | join(',') }}"
|
||||
_ldap_docker_network_enabled: "{{ applications | get_app_conf('svc-db-openldap', 'network.docker') }}"
|
||||
_ldap_protocol: "{{ 'ldap' if _ldap_docker_network_enabled else 'ldaps' }}"
|
||||
_ldap_server_port: "{{ ports.localhost[_ldap_protocol]['svc-db-openldap'] }}"
|
||||
_ldap_name: "{{ applications | get_app_conf('svc-db-openldap', 'docker.services.openldap.name') }}"
|
||||
_ldap_domain: "{{ PRIMARY_DOMAIN }}" # LDAP is jsut listening to a port not to a dedicated domain, so primary domain should be sufficient
|
||||
_ldap_user_id: "uid"
|
||||
_ldap_filters_users_all: "(|(objectclass=inetOrgPerson))"
|
||||
|
||||
ldap:
|
||||
LDAP:
|
||||
# Distinguished Names (DN)
|
||||
dn:
|
||||
DN:
|
||||
# -------------------------------------------------------------------------
|
||||
# Base DN / Suffix
|
||||
# This is the top-level naming context for your directory, used as the
|
||||
# default search base for most operations (e.g. adding users, groups).
|
||||
# Example: “dc=example,dc=com”
|
||||
root: "{{_ldap_dn_base}}"
|
||||
administrator:
|
||||
ROOT: "{{ LDAP_DN_BASE }}"
|
||||
ADMINISTRATOR:
|
||||
# -------------------------------------------------------------------------
|
||||
# Data-Tree Administrator Bind DN
|
||||
# The DN used to authenticate for regular directory operations under
|
||||
# the data tree (adding users, modifying attributes, creating OUs, etc.).
|
||||
# Typically: “cn=admin,dc=example,dc=com”
|
||||
data: "cn={{ applications['svc-db-openldap'].users.administrator.username }},{{ _ldap_dn_base }}"
|
||||
DATA: "cn={{ applications['svc-db-openldap'].users.administrator.username }},{{ LDAP_DN_BASE }}"
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Config-Tree Administrator Bind DN
|
||||
@@ -37,9 +37,9 @@ ldap:
|
||||
# need to load or modify schema, overlays, modules, or other server-
|
||||
# level settings.
|
||||
# Typically: “cn=admin,cn=config”
|
||||
configuration: "cn={{ applications['svc-db-openldap'].users.administrator.username }},cn=config"
|
||||
CONFIGURATION: "cn={{ applications['svc-db-openldap'].users.administrator.username }},cn=config"
|
||||
|
||||
ou:
|
||||
OU:
|
||||
# -------------------------------------------------------------------------
|
||||
# Organizational Units (OUs)
|
||||
# Pre-created containers in the directory tree to logically separate entries:
|
||||
@@ -47,9 +47,9 @@ ldap:
|
||||
# – groups: Contains organizational or business groups (e.g., departments, teams).
|
||||
# – roles: Contains application-specific RBAC roles
|
||||
# (e.g., "cn=app1-user", "cn=yourls-admin").
|
||||
users: "ou=users,{{ _ldap_dn_base }}"
|
||||
groups: "ou=groups,{{ _ldap_dn_base }}"
|
||||
roles: "ou=roles,{{ _ldap_dn_base }}"
|
||||
USERS: "ou=users,{{ LDAP_DN_BASE }}"
|
||||
GROUPS: "ou=groups,{{ LDAP_DN_BASE }}"
|
||||
ROLES: "ou=roles,{{ LDAP_DN_BASE }}"
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Additional Notes
|
||||
@@ -59,17 +59,17 @@ ldap:
|
||||
# for ordinary user/group operations, and vice versa.
|
||||
|
||||
# Password to access dn.bind
|
||||
bind_credential: "{{ applications | get_app_conf('svc-db-openldap', 'credentials.administrator_database_password') }}"
|
||||
server:
|
||||
domain: "{{ _ldap_name if _ldap_docker_network_enabled else _ldap_domain }}" # Mapping for public or locale access
|
||||
port: "{{ _ldap_server_port }}"
|
||||
uri: "{{ _ldap_protocol }}://{{ _ldap_name if _ldap_docker_network_enabled else _ldap_domain }}:{{ _ldap_server_port }}"
|
||||
security: "" #TLS, SSL - Leave empty for none
|
||||
network:
|
||||
local: "{{ _ldap_docker_network_enabled }}" # Uses the application configuration to define if local network should be available or not
|
||||
user:
|
||||
objects:
|
||||
structural:
|
||||
BIND_CREDENTIAL: "{{ applications | get_app_conf('svc-db-openldap', 'credentials.administrator_database_password') }}"
|
||||
SERVER:
|
||||
DOMAIN: "{{ _ldap_name if _ldap_docker_network_enabled else _ldap_domain }}" # Mapping for public or locale access
|
||||
PORT: "{{ _ldap_server_port }}"
|
||||
URI: "{{ _ldap_protocol }}://{{ _ldap_name if _ldap_docker_network_enabled else _ldap_domain }}:{{ _ldap_server_port }}"
|
||||
SECURITY: "" #TLS, SSL - Leave empty for none
|
||||
NETWORK:
|
||||
LOCAL: "{{ _ldap_docker_network_enabled }}" # Uses the application configuration to define if local network should be available or not
|
||||
USER:
|
||||
OBJECTS:
|
||||
STRUCTURAL:
|
||||
- person # Structural Classes define the core identity of an entry:
|
||||
# • Specify mandatory attributes (e.g. sn, cn)
|
||||
# • Each entry must have exactly one structural class
|
||||
@@ -77,26 +77,26 @@ ldap:
|
||||
# (e.g. mail, employeeNumber)
|
||||
- posixAccount # Provides UNIX account attributes (uidNumber, gidNumber,
|
||||
# homeDirectory)
|
||||
auxiliary:
|
||||
nextloud_user: "nextcloudUser" # Auxiliary Classes attach optional attributes without
|
||||
AUXILIARY:
|
||||
NEXTCLOUD_USER: "nextcloudUser" # Auxiliary Classes attach optional attributes without
|
||||
# changing the entry’s structural role. Here they add
|
||||
# nextcloudQuota and nextcloudEnabled for Nextcloud.
|
||||
ssh_public_key: "ldapPublicKey" # Allows storing SSH public keys for services like Gitea.
|
||||
attributes:
|
||||
SSH_PUBLIC_KEY: "ldapPublicKey" # Allows storing SSH public keys for services like Gitea.
|
||||
ATTRIBUTES:
|
||||
# Attribut to identify the user
|
||||
id: "{{ _ldap_user_id }}"
|
||||
mail: "mail"
|
||||
fullname: "cn"
|
||||
firstname: "givenname"
|
||||
surname: "sn"
|
||||
ssh_public_key: "sshPublicKey"
|
||||
nextcloud_quota: "nextcloudQuota"
|
||||
filters:
|
||||
users:
|
||||
login: "(&{{ _ldap_filters_users_all }}({{_ldap_user_id}}=%{{_ldap_user_id}}))"
|
||||
all: "{{ _ldap_filters_users_all }}"
|
||||
rbac:
|
||||
flavors:
|
||||
ID: "{{ _ldap_user_id }}"
|
||||
MAIL: "mail"
|
||||
FULLNAME: "cn"
|
||||
FIRSTNAME: "givenname"
|
||||
SURNAME: "sn"
|
||||
SSH_PUBLIC_KEY: "sshPublicKey"
|
||||
NEXTCLOUD_QUOTA: "nextcloudQuota"
|
||||
FILTERS:
|
||||
USERS:
|
||||
LOGIN: "(&{{ _ldap_filters_users_all }}({{_ldap_user_id}}=%{{_ldap_user_id}}))"
|
||||
ALL: "{{ _ldap_filters_users_all }}"
|
||||
RBAC:
|
||||
FLAVORS:
|
||||
# Valid values posixGroup, groupOfNames
|
||||
- groupOfNames
|
||||
# - posixGroup
|
||||
|
@@ -2,8 +2,8 @@
|
||||
defaults_service_provider:
|
||||
type: "legal" # Accepted Values: natural, legal
|
||||
company:
|
||||
titel: "Infinito.Nexus by Kevin Veen-Birkenbach"
|
||||
slogan: "Infinito.Nexus — Empowering a Sovereign Digital Future."
|
||||
titel: "{{ SOFTWARE_NAME }} by Kevin Veen-Birkenbach"
|
||||
slogan: "{{ SOFTWARE_NAME }} — Empowering a Sovereign Digital Future."
|
||||
address:
|
||||
street: "Binary Avenue 01"
|
||||
city: "Cybertown"
|
||||
@@ -11,7 +11,7 @@ defaults_service_provider:
|
||||
country: "Nexusland"
|
||||
logo: "{{ applications['web-svc-asset'].url ~ '/img/logo.png' }}"
|
||||
platform:
|
||||
titel: "Infinito.Nexus"
|
||||
titel: "{{ SOFTWARE_NAME }}"
|
||||
subtitel: "One login. Infinite applications."
|
||||
logo: "{{ applications['web-svc-asset'].url ~ '/img/logo.png' }}"
|
||||
favicon: "{{ applications['web-svc-asset'].url ~ '/img/favicon.ico' }}"
|
||||
@@ -19,9 +19,9 @@ defaults_service_provider:
|
||||
web-app-bluesky: >-
|
||||
{{ ('@' ~ users.contact.username ~ '.' ~ domains['web-app-bluesky'].api)
|
||||
if 'web-app-bluesky' in group_names else '' }}
|
||||
email: "{{ users.contact.username ~ '@' ~ primary_domain if 'web-app-mailu' in group_names else '' }}"
|
||||
email: "{{ users.contact.username ~ '@' ~ PRIMARY_DOMAIN if 'web-app-mailu' in group_names else '' }}"
|
||||
mastodon: "{{ '@' ~ users.contact.username ~ '@' ~ domains | get_domain('web-app-mastodon') if 'web-app-mastodon' in group_names else '' }}"
|
||||
matrix: "{{ '@' ~ users.contact.username ~ ':' ~ domains['web-app-matrix'].synapse if 'web-app-matrix' in group_names else '' }}"
|
||||
matrix: "{{ '@' ~ users.contact.username ~ ':' ~ applications | get_app_conf('web-app-matrix', 'server_name') if 'web-app-matrix' in group_names else '' }}"
|
||||
peertube: "{{ '@' ~ users.contact.username ~ '@' ~ domains | get_domain('web-app-peertube') if 'web-app-peertube' in group_names else '' }}"
|
||||
pixelfed: "{{ '@' ~ users.contact.username ~ '@' ~ domains | get_domain('web-app-pixelfed') if 'web-app-pixelfed' in group_names else '' }}"
|
||||
phone: "+0 000 000 404"
|
||||
@@ -29,5 +29,5 @@ defaults_service_provider:
|
||||
|
||||
legal:
|
||||
editorial_responsible: "Johannes Gutenberg"
|
||||
source_code: "https://github.com/kevinveenbirkenbach/infinito-nexus"
|
||||
imprint: "{{WEB_PROTOCOL}}://{{ domains | get_domain('web-svc-html') }}/imprint.html"
|
||||
source_code: "https://s.{{ SOFTWARE_NAME | lower }}/code"
|
||||
imprint: "{{ domains | get_url('web-svc-html', WEB_PROTOCOL) }}/imprint.html"
|
@@ -1,6 +1,6 @@
|
||||
backups_folder_path: "/Backups/" # Path to the backups folder
|
||||
BACKUPS_FOLDER_PATH: "/Backups/" # Path to the backups folder
|
||||
|
||||
# Storage Space-Related Configurations
|
||||
size_percent_maximum_backup: 75 # Maximum storage space in percent for backups
|
||||
size_percent_cleanup_disc_space: 85 # Threshold for triggering cleanup actions
|
||||
size_percent_disc_space_warning: 90 # Warning threshold in percent for free disk space
|
||||
SIZE_PERCENT_MAXIMUM_BACKUP: 75 # Maximum storage space in percent for backups
|
||||
SIZE_PERCENT_CLEANUP_DISC_SPACE: 85 # Threshold for triggering cleanup actions
|
||||
SIZE_PERCENT_DISC_SPACE_WARNING: 90 # Warning threshold in percent for free disk space
|
2
logs/README.md
Normal file
2
logs/README.md
Normal file
@@ -0,0 +1,2 @@
|
||||
# Logs
|
||||
This folder contains the log files.
|
53
lookup_plugins/local_mtime_qs.py
Normal file
53
lookup_plugins/local_mtime_qs.py
Normal file
@@ -0,0 +1,53 @@
|
||||
from __future__ import annotations
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.errors import AnsibleError
|
||||
import os
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
"""
|
||||
Return a cache-busting string based on the LOCAL file's mtime.
|
||||
|
||||
Usage (single path → string via Jinja):
|
||||
{{ lookup('local_mtime_qs', '/path/to/file.css') }}
|
||||
-> "?version=1712323456"
|
||||
|
||||
Options:
|
||||
param (str): query parameter name (default: "version")
|
||||
mode (str): "qs" (default) → returns "?<param>=<mtime>"
|
||||
"epoch" → returns "<mtime>"
|
||||
|
||||
Multiple paths (returns list, one result per term):
|
||||
{{ lookup('local_mtime_qs', '/a.js', '/b.js', param='v') }}
|
||||
"""
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
if not terms:
|
||||
return []
|
||||
|
||||
param = kwargs.get('param', 'version')
|
||||
mode = kwargs.get('mode', 'qs')
|
||||
|
||||
if mode not in ('qs', 'epoch'):
|
||||
raise AnsibleError("local_mtime_qs: 'mode' must be 'qs' or 'epoch'")
|
||||
|
||||
results = []
|
||||
for term in terms:
|
||||
path = os.path.abspath(os.path.expanduser(str(term)))
|
||||
|
||||
# Fail fast if path is missing or not a regular file
|
||||
if not os.path.exists(path):
|
||||
raise AnsibleError(f"local_mtime_qs: file does not exist: {path}")
|
||||
if not os.path.isfile(path):
|
||||
raise AnsibleError(f"local_mtime_qs: not a regular file: {path}")
|
||||
|
||||
try:
|
||||
mtime = int(os.stat(path).st_mtime)
|
||||
except OSError as e:
|
||||
raise AnsibleError(f"local_mtime_qs: cannot stat '{path}': {e}")
|
||||
|
||||
if mode == 'qs':
|
||||
results.append(f"?{param}={mtime}")
|
||||
else: # mode == 'epoch'
|
||||
results.append(str(mtime))
|
||||
|
||||
return results
|
77
main.py
77
main.py
@@ -96,34 +96,71 @@ def play_start_intro():
|
||||
Sound.play_infinito_intro_sound()
|
||||
|
||||
|
||||
from multiprocessing import Process, get_start_method, set_start_method
|
||||
import time
|
||||
|
||||
def _call_sound(method_name: str):
|
||||
# Re-import inside child to (re)init audio backend cleanly under 'spawn'
|
||||
from module_utils.sounds import Sound as _Sound
|
||||
getattr(_Sound, method_name)()
|
||||
|
||||
def _play_in_child(method_name: str) -> bool:
|
||||
p = Process(target=_call_sound, args=(method_name,))
|
||||
p.start(); p.join()
|
||||
if p.exitcode != 0:
|
||||
try:
|
||||
# Sichtbare Diagnose, wenn das Kind crasht/fehlschlägt
|
||||
print(color_text(f"[sound] child '{method_name}' exitcode={p.exitcode}", Fore.YELLOW))
|
||||
except Exception:
|
||||
pass
|
||||
return p.exitcode == 0
|
||||
|
||||
def failure_with_warning_loop(no_signal, sound_enabled, alarm_timeout=60):
|
||||
"""
|
||||
On failure: Plays warning sound in a loop.
|
||||
Aborts after alarm_timeout seconds and exits with code 1.
|
||||
Plays a warning sound in a loop until timeout; Ctrl+C stops earlier.
|
||||
Sound playback is isolated in a child process to avoid segfaulting the main process.
|
||||
"""
|
||||
if not no_signal:
|
||||
Sound.play_finished_failed_sound()
|
||||
# Try the failure jingle once; ignore failures
|
||||
_play_in_child("play_finished_failed_sound")
|
||||
|
||||
print(color_text("Warning: command failed. Press Ctrl+C to stop warnings.", Fore.RED))
|
||||
start = time.monotonic()
|
||||
try:
|
||||
while True:
|
||||
if not no_signal:
|
||||
Sound.play_warning_sound()
|
||||
if time.monotonic() - start > alarm_timeout:
|
||||
print(color_text(f"Alarm aborted after {alarm_timeout} seconds.", Fore.RED))
|
||||
sys.exit(1)
|
||||
while time.monotonic() - start <= alarm_timeout:
|
||||
if no_signal:
|
||||
time.sleep(0.5)
|
||||
continue
|
||||
|
||||
ok = _play_in_child("play_warning_sound")
|
||||
# If audio stack is broken, stay silent but avoid busy loop
|
||||
if not ok:
|
||||
time.sleep(0.8)
|
||||
print(color_text(f"Alarm aborted after {alarm_timeout} seconds.", Fore.RED))
|
||||
sys.exit(1)
|
||||
except KeyboardInterrupt:
|
||||
print(color_text("Warnings stopped by user.", Fore.YELLOW))
|
||||
|
||||
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
# IMPORTANT: use 'spawn' so the child re-initializes audio cleanly
|
||||
try:
|
||||
if get_start_method(allow_none=True) != "spawn":
|
||||
set_start_method("spawn", force=True)
|
||||
except RuntimeError:
|
||||
pass
|
||||
|
||||
# Prefer system audio backend by default (prevents simpleaudio segfaults in child processes)
|
||||
os.environ.setdefault("INFINITO_AUDIO_BACKEND", "system")
|
||||
|
||||
|
||||
# Parse flags
|
||||
sound_enabled = '--sound' in sys.argv and (sys.argv.remove('--sound') or True)
|
||||
no_signal = '--no-signal' in sys.argv and (sys.argv.remove('--no-signal') or True)
|
||||
log_enabled = '--log' in sys.argv and (sys.argv.remove('--log') or True)
|
||||
# Guaranty that --log is passed to deploy command
|
||||
log_enabled = '--log' in sys.argv
|
||||
if log_enabled and (len(sys.argv) < 2 or sys.argv[1] != 'deploy'):
|
||||
sys.argv.remove('--log')
|
||||
git_clean = '--git-clean' in sys.argv and (sys.argv.remove('--git-clean') or True)
|
||||
infinite = '--infinite' in sys.argv and (sys.argv.remove('--infinite') or True)
|
||||
alarm_timeout = 60
|
||||
@@ -135,19 +172,6 @@ if __name__ == "__main__":
|
||||
except Exception:
|
||||
print(color_text("Invalid --alarm-timeout value!", Fore.RED))
|
||||
sys.exit(1)
|
||||
|
||||
# Segfault handler
|
||||
def segv_handler(signum, frame):
|
||||
if not no_signal:
|
||||
Sound.play_finished_failed_sound()
|
||||
try:
|
||||
while True:
|
||||
Sound.play_warning_sound()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
print(color_text("Segmentation fault detected. Exiting.", Fore.RED))
|
||||
sys.exit(1)
|
||||
signal.signal(signal.SIGSEGV, segv_handler)
|
||||
|
||||
# Play intro melody if requested
|
||||
if sound_enabled:
|
||||
@@ -182,6 +206,7 @@ if __name__ == "__main__":
|
||||
print(color_text(" --log Log all proxied command output to logfile.log", Fore.YELLOW))
|
||||
print(color_text(" --git-clean Remove all Git-ignored files before running", Fore.YELLOW))
|
||||
print(color_text(" --infinite Run the proxied command in an infinite loop", Fore.YELLOW))
|
||||
print(color_text(" --alarm-timeout Stop warnings and exit after N seconds (default: 60)", Fore.YELLOW))
|
||||
print(color_text(" -h, --help Show this help message and exit", Fore.YELLOW))
|
||||
print()
|
||||
print(color_text("Available commands:", Style.BRIGHT))
|
||||
@@ -225,7 +250,7 @@ if __name__ == "__main__":
|
||||
"For commercial use, a license agreement with Kevin Veen-Birkenbach is required. \n",
|
||||
Style.DIM
|
||||
))
|
||||
print(color_text("License: https://s.veen.world/cncl", Style.DIM))
|
||||
print(color_text("License: https://s.infinito.nexus/license", Style.DIM))
|
||||
print()
|
||||
print(color_text("🎉🌈 Happy IT Infrastructuring! 🚀🔧✨", Fore.MAGENTA + Style.BRIGHT))
|
||||
print()
|
||||
|
18
module_utils/get_url.py
Normal file
18
module_utils/get_url.py
Normal file
@@ -0,0 +1,18 @@
|
||||
from ansible.errors import AnsibleFilterError
|
||||
import sys, os
|
||||
|
||||
def get_url(domains, application_id, protocol):
|
||||
plugin_dir = os.path.dirname(__file__)
|
||||
project_root = os.path.dirname(plugin_dir)
|
||||
module_utils = os.path.join(project_root, 'module_utils')
|
||||
if module_utils not in sys.path:
|
||||
sys.path.append(module_utils)
|
||||
|
||||
try:
|
||||
from domain_utils import get_domain
|
||||
except ImportError as e:
|
||||
raise AnsibleFilterError(f"could not import domain_utils: {e}")
|
||||
|
||||
if not isinstance(protocol, str):
|
||||
raise AnsibleFilterError("Protocol must be a string")
|
||||
return f"{protocol}://{ get_domain(domains, application_id) }"
|
296
module_utils/role_dependency_resolver.py
Normal file
296
module_utils/role_dependency_resolver.py
Normal file
@@ -0,0 +1,296 @@
|
||||
import os
|
||||
import fnmatch
|
||||
import re
|
||||
from typing import Dict, Set, Iterable, Tuple, Optional
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
class RoleDependencyResolver:
|
||||
_RE_PURE_JINJA = re.compile(r"\s*\{\{\s*[^}]+\s*\}\}\s*$")
|
||||
|
||||
def __init__(self, roles_dir: str):
|
||||
self.roles_dir = roles_dir
|
||||
|
||||
# -------------------------- public API --------------------------
|
||||
|
||||
def resolve_transitively(
|
||||
self,
|
||||
start_roles: Iterable[str],
|
||||
*,
|
||||
resolve_include_role: bool = True,
|
||||
resolve_import_role: bool = True,
|
||||
resolve_dependencies: bool = True,
|
||||
resolve_run_after: bool = False,
|
||||
max_depth: Optional[int] = None,
|
||||
) -> Set[str]:
|
||||
to_visit = list(dict.fromkeys(start_roles))
|
||||
visited: Set[str] = set()
|
||||
depth: Dict[str, int] = {}
|
||||
|
||||
for r in to_visit:
|
||||
depth[r] = 0
|
||||
|
||||
while to_visit:
|
||||
role = to_visit.pop()
|
||||
cur_d = depth.get(role, 0)
|
||||
if role in visited:
|
||||
continue
|
||||
visited.add(role)
|
||||
|
||||
if max_depth is not None and cur_d >= max_depth:
|
||||
continue
|
||||
|
||||
for dep in self.get_role_dependencies(
|
||||
role,
|
||||
resolve_include_role=resolve_include_role,
|
||||
resolve_import_role=resolve_import_role,
|
||||
resolve_dependencies=resolve_dependencies,
|
||||
resolve_run_after=resolve_run_after,
|
||||
):
|
||||
if dep not in visited:
|
||||
to_visit.append(dep)
|
||||
depth[dep] = cur_d + 1
|
||||
|
||||
return visited
|
||||
|
||||
def get_role_dependencies(
|
||||
self,
|
||||
role_name: str,
|
||||
*,
|
||||
resolve_include_role: bool = True,
|
||||
resolve_import_role: bool = True,
|
||||
resolve_dependencies: bool = True,
|
||||
resolve_run_after: bool = False,
|
||||
) -> Set[str]:
|
||||
role_path = os.path.join(self.roles_dir, role_name)
|
||||
if not os.path.isdir(role_path):
|
||||
return set()
|
||||
|
||||
deps: Set[str] = set()
|
||||
|
||||
if resolve_include_role or resolve_import_role:
|
||||
includes, imports = self._scan_tasks(role_path)
|
||||
if resolve_include_role:
|
||||
deps |= includes
|
||||
if resolve_import_role:
|
||||
deps |= imports
|
||||
|
||||
if resolve_dependencies:
|
||||
deps |= self._extract_meta_dependencies(role_path)
|
||||
|
||||
if resolve_run_after:
|
||||
deps |= self._extract_meta_run_after(role_path)
|
||||
|
||||
return deps
|
||||
|
||||
# -------------------------- scanning helpers --------------------------
|
||||
|
||||
def _scan_tasks(self, role_path: str) -> Tuple[Set[str], Set[str]]:
|
||||
tasks_dir = os.path.join(role_path, "tasks")
|
||||
include_roles: Set[str] = set()
|
||||
import_roles: Set[str] = set()
|
||||
|
||||
if not os.path.isdir(tasks_dir):
|
||||
return include_roles, import_roles
|
||||
|
||||
all_roles = self._list_role_dirs(self.roles_dir)
|
||||
|
||||
candidates = []
|
||||
for root, _, files in os.walk(tasks_dir):
|
||||
for f in files:
|
||||
if f.endswith(".yml") or f.endswith(".yaml"):
|
||||
candidates.append(os.path.join(root, f))
|
||||
|
||||
for file_path in candidates:
|
||||
try:
|
||||
with open(file_path, "r", encoding="utf-8") as f:
|
||||
docs = list(yaml.safe_load_all(f))
|
||||
except Exception:
|
||||
inc, imp = self._tolerant_scan_file(file_path, all_roles)
|
||||
include_roles |= inc
|
||||
import_roles |= imp
|
||||
continue
|
||||
|
||||
for doc in docs or []:
|
||||
if not isinstance(doc, list):
|
||||
continue
|
||||
for task in doc:
|
||||
if not isinstance(task, dict):
|
||||
continue
|
||||
if "include_role" in task:
|
||||
include_roles |= self._extract_from_task(task, "include_role", all_roles)
|
||||
if "import_role" in task:
|
||||
import_roles |= self._extract_from_task(task, "import_role", all_roles)
|
||||
|
||||
return include_roles, import_roles
|
||||
|
||||
def _extract_from_task(self, task: dict, key: str, all_roles: Iterable[str]) -> Set[str]:
|
||||
roles: Set[str] = set()
|
||||
spec = task.get(key)
|
||||
if not isinstance(spec, dict):
|
||||
return roles
|
||||
|
||||
name = spec.get("name")
|
||||
loop_val = self._collect_loop_values(task)
|
||||
|
||||
if loop_val is not None:
|
||||
for item in self._iter_flat(loop_val):
|
||||
cand = self._role_from_loop_item(item, name_template=name)
|
||||
if cand:
|
||||
roles.add(cand)
|
||||
|
||||
if isinstance(name, str) and name.strip() and not self._is_pure_jinja_var(name):
|
||||
pattern = self._jinja_to_glob(name) if ("{{" in name and "}}" in name) else name
|
||||
self._match_glob_into(pattern, all_roles, roles)
|
||||
return roles
|
||||
|
||||
if isinstance(name, str) and name.strip():
|
||||
if "{{" in name and "}}" in name:
|
||||
if self._is_pure_jinja_var(name):
|
||||
return roles
|
||||
pattern = self._jinja_to_glob(name)
|
||||
self._match_glob_into(pattern, all_roles, roles)
|
||||
else:
|
||||
roles.add(name.strip())
|
||||
|
||||
return roles
|
||||
|
||||
def _collect_loop_values(self, task: dict):
|
||||
for k in ("loop", "with_items", "with_list", "with_flattened"):
|
||||
if k in task:
|
||||
return task[k]
|
||||
return None
|
||||
|
||||
def _iter_flat(self, value):
|
||||
if isinstance(value, list):
|
||||
for v in value:
|
||||
if isinstance(v, list):
|
||||
for x in v:
|
||||
yield x
|
||||
else:
|
||||
yield v
|
||||
|
||||
def _role_from_loop_item(self, item, name_template=None) -> Optional[str]:
|
||||
tmpl = (name_template or "").strip() if isinstance(name_template, str) else ""
|
||||
|
||||
if isinstance(item, str):
|
||||
if tmpl in ("{{ item }}", "{{item}}") or not tmpl or "item" in tmpl:
|
||||
return item.strip()
|
||||
return None
|
||||
|
||||
if isinstance(item, dict):
|
||||
for k in ("role", "name"):
|
||||
v = item.get(k)
|
||||
if isinstance(v, str) and v.strip():
|
||||
if tmpl in (f"{{{{ item.{k} }}}}", f"{{{{item.{k}}}}}") or not tmpl or "item" in tmpl:
|
||||
return v.strip()
|
||||
return None
|
||||
|
||||
def _match_glob_into(self, pattern: str, all_roles: Iterable[str], out: Set[str]):
|
||||
if "*" in pattern or "?" in pattern or "[" in pattern:
|
||||
for r in all_roles:
|
||||
if fnmatch.fnmatch(r, pattern):
|
||||
out.add(r)
|
||||
else:
|
||||
out.add(pattern)
|
||||
|
||||
def test_jinja_mixed_name_glob_matching(self):
|
||||
"""
|
||||
include_role:
|
||||
name: "prefix-{{ item }}-suffix"
|
||||
loop: [x, y]
|
||||
Existing roles: prefix-x-suffix, prefix-y-suffix, prefix-z-suffix
|
||||
|
||||
Expectation:
|
||||
- NO raw loop items ('x', 'y') end up as roles
|
||||
- Glob matching resolves to all three concrete roles
|
||||
"""
|
||||
make_role(self.roles_dir, "A")
|
||||
for rn in ["prefix-x-suffix", "prefix-y-suffix", "prefix-z-suffix"]:
|
||||
make_role(self.roles_dir, rn)
|
||||
|
||||
write(
|
||||
os.path.join(self.roles_dir, "A", "tasks", "main.yml"),
|
||||
"""
|
||||
- name: jinja-mixed glob
|
||||
include_role:
|
||||
name: "prefix-{{ item }}-suffix"
|
||||
loop:
|
||||
- x
|
||||
- y
|
||||
"""
|
||||
)
|
||||
|
||||
r = RoleDependencyResolver(self.roles_dir)
|
||||
deps = r.get_role_dependencies("A")
|
||||
|
||||
# ensure no raw loop items leak into the results
|
||||
self.assertNotIn("x", deps)
|
||||
self.assertNotIn("y", deps)
|
||||
|
||||
# only the resolved role names should be present
|
||||
self.assertEqual(
|
||||
deps,
|
||||
{"prefix-x-suffix", "prefix-y-suffix", "prefix-z-suffix"},
|
||||
)
|
||||
|
||||
|
||||
# -------------------------- meta helpers --------------------------
|
||||
|
||||
def _extract_meta_dependencies(self, role_path: str) -> Set[str]:
|
||||
deps: Set[str] = set()
|
||||
meta_main = os.path.join(role_path, "meta", "main.yml")
|
||||
if not os.path.isfile(meta_main):
|
||||
return deps
|
||||
try:
|
||||
with open(meta_main, "r", encoding="utf-8") as f:
|
||||
meta = yaml.safe_load(f) or {}
|
||||
raw_deps = meta.get("dependencies", [])
|
||||
if isinstance(raw_deps, list):
|
||||
for item in raw_deps:
|
||||
if isinstance(item, str):
|
||||
deps.add(item.strip())
|
||||
elif isinstance(item, dict):
|
||||
r = item.get("role")
|
||||
if isinstance(r, str) and r.strip():
|
||||
deps.add(r.strip())
|
||||
except Exception:
|
||||
pass
|
||||
return deps
|
||||
|
||||
def _extract_meta_run_after(self, role_path: str) -> Set[str]:
|
||||
deps: Set[str] = set()
|
||||
meta_main = os.path.join(role_path, "meta", "main.yml")
|
||||
if not os.path.isfile(meta_main):
|
||||
return deps
|
||||
try:
|
||||
with open(meta_main, "r", encoding="utf-8") as f:
|
||||
meta = yaml.safe_load(f) or {}
|
||||
galaxy_info = meta.get("galaxy_info", {})
|
||||
run_after = galaxy_info.get("run_after", [])
|
||||
if isinstance(run_after, list):
|
||||
for item in run_after:
|
||||
if isinstance(item, str) and item.strip():
|
||||
deps.add(item.strip())
|
||||
except Exception:
|
||||
pass
|
||||
return deps
|
||||
|
||||
# -------------------------- small utils --------------------------
|
||||
|
||||
def _list_role_dirs(self, roles_dir: str) -> list[str]:
|
||||
return [
|
||||
d for d in os.listdir(roles_dir)
|
||||
if os.path.isdir(os.path.join(roles_dir, d))
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def _is_pure_jinja_var(cls, s: str) -> bool:
|
||||
return bool(cls._RE_PURE_JINJA.fullmatch(s or ""))
|
||||
|
||||
@staticmethod
|
||||
def _jinja_to_glob(s: str) -> str:
|
||||
pattern = re.sub(r"\{\{[^}]+\}\}", "*", s or "")
|
||||
pattern = re.sub(r"\*{2,}", "*", pattern)
|
||||
return pattern.strip()
|
@@ -22,6 +22,7 @@ else:
|
||||
try:
|
||||
import numpy as np
|
||||
import simpleaudio as sa
|
||||
import shutil, subprocess, tempfile, wave as wavmod
|
||||
class Sound:
|
||||
"""
|
||||
Sound effects for the application with enhanced complexity.
|
||||
@@ -63,10 +64,49 @@ else:
|
||||
middle = (w1_end + w2_start).astype(np.int16)
|
||||
return np.concatenate([w1[:-fade_len], middle, w2[fade_len:]])
|
||||
|
||||
@staticmethod
|
||||
def _play_via_system(wave: np.ndarray):
|
||||
# Write a temp WAV and play it via available system player
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as f:
|
||||
fname = f.name
|
||||
try:
|
||||
with wavmod.open(fname, "wb") as w:
|
||||
w.setnchannels(1)
|
||||
w.setsampwidth(2)
|
||||
w.setframerate(Sound.fs)
|
||||
w.writeframes(wave.tobytes())
|
||||
def run(cmd):
|
||||
return subprocess.run(
|
||||
cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL
|
||||
).returncode == 0
|
||||
# Preferred order: PipeWire → PulseAudio → ALSA → ffplay
|
||||
if shutil.which("pw-play") and run(["pw-play", fname]): return
|
||||
if shutil.which("paplay") and run(["paplay", fname]): return
|
||||
if shutil.which("aplay") and run(["aplay", "-q", fname]): return
|
||||
if shutil.which("ffplay") and run(["ffplay", "-autoexit", "-nodisp", fname]): return
|
||||
# Last resort if no system player exists: simpleaudio
|
||||
play_obj = sa.play_buffer(wave, 1, 2, Sound.fs)
|
||||
play_obj.wait_done()
|
||||
finally:
|
||||
try: os.unlink(fname)
|
||||
except Exception: pass
|
||||
|
||||
@staticmethod
|
||||
def _play(wave: np.ndarray):
|
||||
play_obj = sa.play_buffer(wave, 1, 2, Sound.fs)
|
||||
play_obj.wait_done()
|
||||
# Switch via env: system | simpleaudio | auto (default)
|
||||
backend = os.getenv("INFINITO_AUDIO_BACKEND", "auto").lower()
|
||||
if backend == "system":
|
||||
return Sound._play_via_system(wave)
|
||||
if backend == "simpleaudio":
|
||||
play_obj = sa.play_buffer(wave, 1, 2, Sound.fs)
|
||||
play_obj.wait_done()
|
||||
return
|
||||
# auto: try simpleaudio first; if it fails, fall back to system
|
||||
try:
|
||||
play_obj = sa.play_buffer(wave, 1, 2, Sound.fs)
|
||||
play_obj.wait_done()
|
||||
except Exception:
|
||||
Sound._play_via_system(wave)
|
||||
|
||||
@classmethod
|
||||
def play_infinito_intro_sound(cls):
|
||||
|
@@ -1,4 +1,4 @@
|
||||
- name: Execute Infinito.Nexus Play
|
||||
- name: Execute {{ SOFTWARE_NAME }} Play
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: "Load 'constructor' tasks"
|
||||
|
@@ -1,5 +1,6 @@
|
||||
collections:
|
||||
- name: kewlfft.aur
|
||||
- name: community.general
|
||||
- name: hetzner.hcloud
|
||||
yay:
|
||||
- python-simpleaudio
|
@@ -1,4 +1,9 @@
|
||||
roles:
|
||||
docker:
|
||||
title: "Docker Toolkit"
|
||||
description: "Generic Docker helpers and utilities (compose wrappers, container tooling)."
|
||||
icon: "fas fa-docker"
|
||||
invokable: false
|
||||
dev:
|
||||
title: "Software Development Utilties"
|
||||
invokable: false
|
||||
@@ -6,41 +11,76 @@ roles:
|
||||
title: "System"
|
||||
description: "System near components. Will be automaticly called if necessary from other roles."
|
||||
invokable: false
|
||||
alm:
|
||||
title: "Alerting"
|
||||
description: "Notification handlers for system events"
|
||||
icon: "fas fa-bell"
|
||||
ctl:
|
||||
title: "Control"
|
||||
description: "Control layer for system lifecycle management—handling cleanup, monitoring, backups, alerting, maintenance, and repair tasks."
|
||||
icon: "fas fa-cogs"
|
||||
invokable: false
|
||||
cln:
|
||||
title: "Cleanup"
|
||||
description: "Roles for cleaning up various system resources—old backups, unused certificates, temporary files, Docker volumes, disk caches, deprecated domains, and more."
|
||||
icon: "fas fa-trash-alt"
|
||||
cln:
|
||||
title: "Cleanup"
|
||||
description: "Roles for cleaning up various system resources—old backups, unused certificates, temporary files, Docker volumes, disk caches, deprecated domains, and more."
|
||||
icon: "fas fa-trash-alt"
|
||||
invokable: false
|
||||
hlth:
|
||||
title: "Monitoring"
|
||||
description: "Roles for system monitoring and health checks—encompassing bot-style automated checks and core low-level monitors for logs, containers, disk usage, and more."
|
||||
icon: "fas fa-chart-area"
|
||||
invokable: false
|
||||
bkp:
|
||||
title: "Backup & Restore"
|
||||
description: "Backup strategies & restore procedures"
|
||||
icon: "fas fa-hdd"
|
||||
invokable: false
|
||||
alm:
|
||||
title: "Alerting"
|
||||
description: "Notification handlers for system events"
|
||||
icon: "fas fa-bell"
|
||||
invokable: false
|
||||
mtn:
|
||||
title: "Maintenance"
|
||||
description: "Maintenance roles for certificates, system upkeep, and recurring operational tasks."
|
||||
icon: "fas fa-tools"
|
||||
invokable: false
|
||||
rpr:
|
||||
title: "Repair"
|
||||
description: "Repair and recovery roles—handling hard/soft recovery of Docker, Btrfs balancers, and other low-level system fixes."
|
||||
icon: "fas fa-wrench"
|
||||
invokable: false
|
||||
dns:
|
||||
title: "DNS Automation"
|
||||
description: "DNS providers, records, and rDNS management (Cloudflare, Hetzner, etc.)."
|
||||
icon: "fas fa-network-wired"
|
||||
invokable: false
|
||||
hlth:
|
||||
title: "Monitoring"
|
||||
description: "Roles for system monitoring and health checks—encompassing bot-style automated checks and core low-level monitors for logs, containers, disk usage, and more."
|
||||
icon: "fas fa-chart-area"
|
||||
stk:
|
||||
title: "Stack"
|
||||
description: "Stack levels to setup the server"
|
||||
icon: "fas fa-bars-staggered"
|
||||
invokable: false
|
||||
bkp:
|
||||
title: "Backup & Restore"
|
||||
description: "Backup strategies & restore procedures"
|
||||
icon: "fas fa-hdd"
|
||||
front:
|
||||
title: "System Frontend Helpers"
|
||||
description: "Frontend helpers for reverse-proxied apps (injection, shared assets, CDN plumbing)."
|
||||
icon: "fas fa-wand-magic-sparkles"
|
||||
invokable: false
|
||||
inj:
|
||||
title: "Injection"
|
||||
description: "Composable HTML injection roles (CSS, JS, logout interceptor, analytics, desktop iframe) for Nginx/OpenResty via sub_filter/Lua with CDN-backed assets."
|
||||
icon: "fas fa-filter"
|
||||
invokable: false
|
||||
update:
|
||||
title: "Updates & Package Management"
|
||||
description: "OS & package updates"
|
||||
icon: "fas fa-sync"
|
||||
invokable: true
|
||||
pkgmgr:
|
||||
title: "Package Manager Helpers"
|
||||
description: "Helpers for package managers and unified install flows."
|
||||
icon: "fas fa-box-open"
|
||||
invokable: false
|
||||
drv:
|
||||
title: "Drivers"
|
||||
description: "Roles for installing and configuring hardware drivers—covering printers, graphics, input devices, and other peripheral support."
|
||||
icon: "fas fa-microchip"
|
||||
invokable: true
|
||||
# core:
|
||||
# title: "Core & System"
|
||||
# description: "Fundamental system configuration"
|
||||
# icon: "fas fa-cogs"
|
||||
# invokable: true
|
||||
gen:
|
||||
title: "Generic"
|
||||
description: "Helper roles & installers (git, locales, timer, etc.)"
|
||||
@@ -66,20 +106,10 @@ roles:
|
||||
description: "Utility roles for server-side configuration and management—covering corporate identity provisioning, network helpers, and other service-oriented toolkits."
|
||||
icon: "fas fa-cogs"
|
||||
invokable: true
|
||||
srv:
|
||||
title: "Server"
|
||||
description: "General server roles for provisioning and managing server infrastructure—covering web servers, proxy servers, network services, and other backend components."
|
||||
icon: "fas fa-server"
|
||||
invokable: false
|
||||
web:
|
||||
title: "Webserver"
|
||||
description: "Web-server roles for installing and configuring Nginx (core, TLS, injection filters, composer modules)."
|
||||
icon: "fas fa-server"
|
||||
invokable: false
|
||||
proxy:
|
||||
title: "Proxy Server"
|
||||
description: "Proxy-server roles for virtual-host orchestration and reverse-proxy setups."
|
||||
icon: "fas fa-project-diagram"
|
||||
dev:
|
||||
title: "Developer Utilities"
|
||||
description: "Developer-centric server utilities and admin toolkits."
|
||||
icon: "fas fa-code"
|
||||
invokable: false
|
||||
web:
|
||||
title: "Web Infrastructure"
|
||||
@@ -99,11 +129,6 @@ roles:
|
||||
title: "Webserver Optimation"
|
||||
description: "Tools which help to optimize webservers"
|
||||
invokable: true
|
||||
net:
|
||||
title: "Network"
|
||||
description: "Network setup (DNS, Let's Encrypt HTTP, WireGuard, etc.)"
|
||||
icon: "fas fa-globe"
|
||||
invokable: true
|
||||
svc:
|
||||
title: "Services"
|
||||
description: "Infrastructure services like databases"
|
||||
@@ -123,7 +148,11 @@ roles:
|
||||
description: "Reverse‑proxy roles for routing and load‑balancing traffic to backend services"
|
||||
icon: "fas fa-project-diagram"
|
||||
invokable: true
|
||||
|
||||
net:
|
||||
title: "Network"
|
||||
description: "Network setup (DNS, Let's Encrypt HTTP, WireGuard, etc.)"
|
||||
icon: "fas fa-globe"
|
||||
invokable: true
|
||||
user:
|
||||
title: "Users & Access"
|
||||
description: "User accounts & access control"
|
||||
|
@@ -1,11 +0,0 @@
|
||||
# Database Docker with Web Proxy
|
||||
|
||||
This role builds on `cmp-db-docker` by adding a reverse-proxy frontend for HTTP access to your database service.
|
||||
|
||||
## Features
|
||||
|
||||
- **Database Composition**
|
||||
Leverages the `cmp-db-docker` role to stand up your containerized database (PostgreSQL, MariaDB, etc.) with backups and user management.
|
||||
|
||||
- **Reverse Proxy**
|
||||
Includes the `srv-proxy-6-6-domain` role to configure a proxy (e.g. nginx) for routing HTTP(S) traffic to your database UI or management endpoint.
|
@@ -1,12 +0,0 @@
|
||||
# run_once_cmp_db_docker_proxy: deactivated
|
||||
|
||||
- name: "For '{{ application_id }}': load docker and db"
|
||||
include_role:
|
||||
name: cmp-db-docker
|
||||
|
||||
- name: "For '{{ application_id }}': include role srv-proxy-6-6-domain"
|
||||
include_role:
|
||||
name: srv-proxy-6-6-domain
|
||||
vars:
|
||||
domain: "{{ domains | get_domain(application_id) }}"
|
||||
http_port: "{{ ports.localhost.http[application_id] }}"
|
@@ -1,19 +0,0 @@
|
||||
# run_once_cmp_db_docker: disabled
|
||||
|
||||
- name: "For '{{ application_id }}': Set database_application_id (Needed due to lazzy loading issue)"
|
||||
set_fact:
|
||||
database_application_id: "{{ application_id }}"
|
||||
|
||||
- name: "For '{{ application_id }}': Load database variables"
|
||||
include_vars: "{{ item }}"
|
||||
loop:
|
||||
- "{{ cmp_db_docker_vars_file_docker }}" # Important to load docker variables first so that database can use them
|
||||
- "{{ cmp_db_docker_vars_file_db }}" # Important to load them before docker role so that backup can use them
|
||||
|
||||
- name: "For '{{ application_id }}': Load cmp-docker-oauth2"
|
||||
include_role:
|
||||
name: cmp-docker-oauth2
|
||||
|
||||
- name: "For '{{ application_id }}': Load central RDBMS"
|
||||
include_role:
|
||||
name: cmp-rdbms
|
@@ -1,2 +0,0 @@
|
||||
cmp_db_docker_vars_file_db: "{{ playbook_dir }}/roles/cmp-rdbms/vars/database.yml"
|
||||
cmp_db_docker_vars_file_docker: "{{ playbook_dir }}/roles/docker-compose/vars/docker-compose.yml"
|
@@ -1,14 +0,0 @@
|
||||
# run_once_cmp_docker_oauth2: disabled
|
||||
|
||||
- name: "For '{{ application_id }}': Load docker-compose"
|
||||
include_role:
|
||||
name: docker-compose
|
||||
|
||||
- name: "set oauth2_proxy_application_id (Needed due to lazzy loading issue)"
|
||||
set_fact:
|
||||
oauth2_proxy_application_id: "{{ application_id }}"
|
||||
when: applications | get_app_conf(application_id, 'features.oauth2', False)
|
||||
|
||||
- name: "include the web-app-oauth2-proxy role {{domain}}"
|
||||
include_tasks: "{{ playbook_dir }}/roles/web-app-oauth2-proxy/tasks/main.yml"
|
||||
when: applications | get_app_conf(application_id, 'features.oauth2', False)
|
@@ -1,14 +0,0 @@
|
||||
# run_once_cmp_docker_proxy: deactivated
|
||||
|
||||
# To load the proxy first is just implemented due to some issues with BBB
|
||||
- name: "For '{{ application_id }}': include role srv-proxy-6-6-domain"
|
||||
include_role:
|
||||
name: srv-proxy-6-6-domain
|
||||
vars:
|
||||
domain: "{{ domains | get_domain(application_id) }}"
|
||||
http_port: "{{ ports.localhost.http[application_id] }}"
|
||||
|
||||
- name: "For '{{ application_id }}': Load cmp-docker-oauth2"
|
||||
include_role:
|
||||
name: cmp-docker-oauth2
|
||||
|
5
roles/cmp-rdbms/templates/env/mariadb.env.j2
vendored
5
roles/cmp-rdbms/templates/env/mariadb.env.j2
vendored
@@ -1,5 +0,0 @@
|
||||
MYSQL_DATABASE="{{database_name}}"
|
||||
MYSQL_USER="{{database_username}}"
|
||||
MYSQL_PASSWORD="{{database_password}}"
|
||||
MYSQL_ROOT_PASSWORD="{{database_password}}"
|
||||
MARIADB_AUTO_UPGRADE="1"
|
@@ -1,4 +0,0 @@
|
||||
POSTGRES_PASSWORD={{database_password}}
|
||||
POSTGRES_USER={{database_username}}
|
||||
POSTGRES_DB={{database_name}}
|
||||
POSTGRES_INITDB_ARGS=--encoding=UTF8 --locale=C
|
@@ -1 +0,0 @@
|
||||
{% include 'roles/cmp-rdbms/templates/services/' + database_type + '.yml.j2' %}
|
@@ -1,17 +0,0 @@
|
||||
# Helper variables
|
||||
_database_id: "svc-db-{{ database_type }}"
|
||||
_database_central_name: "{{ applications | get_app_conf( _database_id, 'docker.services.' ~ database_type ~ '.name') }}"
|
||||
_database_consumer_entity_name: "{{ database_application_id | get_entity_name }}"
|
||||
_database_central_enabled: "{{ applications | get_app_conf(database_application_id, 'features.central_database', False) }}"
|
||||
|
||||
# Definition
|
||||
database_name: "{{ _database_consumer_entity_name }}"
|
||||
database_instance: "{{ _database_central_name if _database_central_enabled else database_name }}" # This could lead to bugs at dedicated database @todo cleanup
|
||||
database_host: "{{ _database_central_name if _database_central_enabled else 'database' }}" # This could lead to bugs at dedicated database @todo cleanup
|
||||
database_username: "{{ _database_consumer_entity_name }}"
|
||||
database_password: "{{ applications | get_app_conf(database_application_id, 'credentials.database_password', true) }}"
|
||||
database_port: "{{ ports.localhost.database[ _database_id ] }}"
|
||||
database_env: "{{docker_compose.directories.env}}{{database_type}}.env"
|
||||
database_url_jdbc: "jdbc:{{ database_type if database_type == 'mariadb' else 'postgresql' }}://{{ database_host }}:{{ database_port }}/{{ database_name }}"
|
||||
database_url_full: "{{database_type}}://{{database_username}}:{{database_password}}@{{database_host}}:{{database_port}}/{{ database_name }}"
|
||||
database_volume: "{{ _database_consumer_entity_name ~ '_' if not _database_central_enabled }}{{ database_host }}"
|
@@ -2,8 +2,8 @@
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birchenbach"
|
||||
description: "Installs VLC, libaacs and libbluray for Blu-ray playback on Arch Linux–based systems."
|
||||
license: "Infinito.Nexus NonCommercial License (CNCL)"
|
||||
license_url: "https://s.veen.world/cncl"
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birchenbach
|
||||
Consulting & Coaching Solutions
|
||||
@@ -12,9 +12,9 @@ galaxy_info:
|
||||
- vlc
|
||||
- bluray
|
||||
- media
|
||||
repository: "https://github.com/kevinveenbirkenbach/infinito-nexus"
|
||||
issue_tracker_url: "https://github.com/kevinveenbirkenbach/infinito-nexus/issues"
|
||||
documentation: "https://github.com/kevinveenbirkenbach/infinito-nexus/tree/main/roles/desk-bluray-player"
|
||||
repository: "https://s.infinito.nexus/code"
|
||||
issue_tracker_url: "https://s.infinito.nexus/issues"
|
||||
documentation: "https://s.infinito.nexus/code/tree/main/roles/desk-bluray-player"
|
||||
min_ansible_version: "2.9"
|
||||
platforms:
|
||||
- name: Archlinux
|
||||
|
@@ -25,5 +25,5 @@ The purpose of this role is to automate the provisioning of a secure Chromium en
|
||||
Developed and maintained by **Kevin Veen-Birkenbach**.
|
||||
Learn more at [www.veen.world](https://www.veen.world)
|
||||
|
||||
Part of the [Infinito.Nexus Project](https://github.com/kevinveenbirkenbach/infinito-nexus)
|
||||
License: [Infinito.Nexus NonCommercial License (CNCL)](https://s.veen.world/cncl)
|
||||
Part of the [Infinito.Nexus Project](https://s.infinito.nexus/code)
|
||||
License: [Infinito.Nexus NonCommercial License](https://s.infinito.nexus/license)
|
||||
|
@@ -2,8 +2,8 @@
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birkenbach"
|
||||
description: "Automates the installation and configuration of the Chromium browser with enforced security extensions."
|
||||
license: "Infinito.Nexus NonCommercial License (CNCL)"
|
||||
license_url: "https://s.veen.world/cncl"
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birkenbach
|
||||
Consulting & Coaching Solutions
|
||||
@@ -29,7 +29,7 @@ galaxy_info:
|
||||
- enterprise-policy
|
||||
- security
|
||||
- automation
|
||||
repository: https://github.com/kevinveenbirkenbach/infinito-nexus
|
||||
issue_tracker_url: https://github.com/kevinveenbirkenbach/infinito-nexus/issues
|
||||
repository: https://s.infinito.nexus/code
|
||||
issue_tracker_url: https://s.infinito.nexus/issues
|
||||
documentation: "https://docs.infinito.nexus/"
|
||||
dependencies: []
|
||||
|
@@ -2,8 +2,8 @@
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birchenbach"
|
||||
description: "Installs CopyQ clipboard manager on Pacman-based systems and configures autostart for the current user."
|
||||
license: "Infinito.Nexus NonCommercial License (CNCL)"
|
||||
license_url: "https://s.veen.world/cncl"
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birchenbach
|
||||
Consulting & Coaching Solutions
|
||||
@@ -16,9 +16,9 @@ galaxy_info:
|
||||
- cli
|
||||
logo:
|
||||
class: fa fa-clipboard
|
||||
repository: "https://github.com/kevinveenbirkenbach/infinito-nexus"
|
||||
issue_tracker_url: "https://github.com/kevinveenbirkenbach/infinito-nexus/issues"
|
||||
documentation: "https://github.com/kevinveenbirkenbach/infinito-nexus/tree/main/roles/desk-copyq"
|
||||
repository: "https://s.infinito.nexus/code"
|
||||
issue_tracker_url: "https://s.infinito.nexus/issues"
|
||||
documentation: "https://s.infinito.nexus/code/tree/main/roles/desk-copyq"
|
||||
min_ansible_version: "2.9"
|
||||
platforms:
|
||||
- name: Archlinux
|
||||
|
@@ -2,8 +2,8 @@
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birchenbach"
|
||||
description: "Installs Docker and Docker Compose, and adds a user to the Docker group for non-root usage on development machines."
|
||||
license: "Infinito.Nexus NonCommercial License (CNCL)"
|
||||
license_url: "https://s.veen.world/cncl"
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birchenbach
|
||||
Consulting & Coaching Solutions
|
||||
@@ -11,9 +11,9 @@ galaxy_info:
|
||||
galaxy_tags:
|
||||
- docker
|
||||
- development
|
||||
repository: "https://github.com/kevinveenbirkenbach/infinito-nexus"
|
||||
issue_tracker_url: "https://github.com/kevinveenbirkenbach/infinito-nexus/issues"
|
||||
documentation: "https://github.com/kevinveenbirkenbach/infinito-nexus/tree/main/roles/desk-docker"
|
||||
repository: "https://s.infinito.nexus/code"
|
||||
issue_tracker_url: "https://s.infinito.nexus/issues"
|
||||
documentation: "https://s.infinito.nexus/code/tree/main/roles/desk-docker"
|
||||
min_ansible_version: "2.9"
|
||||
platforms:
|
||||
- name: Archlinux
|
||||
|
@@ -24,5 +24,5 @@ The role automates the provisioning of a secure Firefox environment, reducing ma
|
||||
Developed and maintained by **Kevin Veen-Birkenbach**.
|
||||
Learn more at [www.veen.world](https://www.veen.world)
|
||||
|
||||
Part of the [Infinito.Nexus Project](https://github.com/kevinveenbirkenbach/infinito-nexus)
|
||||
License: [Infinito.Nexus NonCommercial License (CNCL)](https://s.veen.world/cncl)
|
||||
Part of the [Infinito.Nexus Project](https://s.infinito.nexus/code)
|
||||
License: [Infinito.Nexus NonCommercial License](https://s.infinito.nexus/license)
|
||||
|
@@ -2,8 +2,8 @@
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birchenbach"
|
||||
description: "Automates Firefox installation and enforces Enterprise Policies (auto-install extensions) on Arch Linux."
|
||||
license: "Infinito.Nexus NonCommercial License (CNCL)"
|
||||
license_url: "https://s.veen.world/cncl"
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birchenbach
|
||||
Consulting & Coaching Solutions
|
||||
@@ -12,9 +12,9 @@ galaxy_info:
|
||||
- firefox
|
||||
- enterprise-policy
|
||||
- browser
|
||||
repository: "https://github.com/kevinveenbirkenbach/infinito-nexus"
|
||||
issue_tracker_url: "https://github.com/kevinveenbirkenbach/infinito-nexus/issues"
|
||||
documentation: "https://github.com/kevinveenbirkenbach/infinito-nexus/tree/main/roles/desk-firefox"
|
||||
repository: "https://s.infinito.nexus/code"
|
||||
issue_tracker_url: "https://s.infinito.nexus/issues"
|
||||
documentation: "https://s.infinito.nexus/code/tree/main/roles/desk-firefox"
|
||||
min_ansible_version: "2.9"
|
||||
platforms:
|
||||
- name: Archlinux
|
||||
|
@@ -21,4 +21,4 @@ Developed and maintained by **Kevin Veen-Birkenbach**.
|
||||
Learn more at [www.veen.world](https://www.veen.world)
|
||||
For Git configuration details, see [git-configurator on GitHub](https://github.com/kevinveenbirkenbach/git-configurator).
|
||||
|
||||
License: [Infinito.Nexus NonCommercial License (CNCL)](https://s.veen.world/cncl)
|
||||
License: [Infinito.Nexus NonCommercial License](https://s.infinito.nexus/license)
|
||||
|
@@ -2,8 +2,8 @@
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birkenbach"
|
||||
description: "Installs Git and configures it using a custom git-configurator for personal computers."
|
||||
license: "Infinito.Nexus NonCommercial License (CNCL)"
|
||||
license_url: "https://s.veen.world/cncl"
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birkenbach
|
||||
Consulting & Coaching Solutions
|
||||
|
@@ -12,10 +12,10 @@
|
||||
|
||||
- name: setup git
|
||||
command: gitconfig --merge-option rebase --name "{{users.client.full_name}}" --email "{{users.client.email}}" --website "{{users.client.website}}" --signing gpg --gpg-key "{{users.client.gpg}}"
|
||||
when: run_once_gitconfig is not defined
|
||||
when: run_once_desk_git is not defined
|
||||
become: false
|
||||
|
||||
- name: run the gitconfig tasks once
|
||||
set_fact:
|
||||
run_once_gitconfig: true
|
||||
when: run_once_gitconfig is not defined
|
||||
run_once_desk_git: true
|
||||
when: run_once_desk_git is not defined
|
@@ -20,4 +20,4 @@ The purpose of this role is to ensure uninterrupted workflow by keeping the desk
|
||||
Developed and maintained by **Kevin Veen-Birkenbach**.
|
||||
Learn more at [www.veen.world](https://www.veen.world)
|
||||
|
||||
License: [Infinito.Nexus NonCommercial License (CNCL)](https://s.veen.world/cncl)
|
||||
License: [Infinito.Nexus NonCommercial License](https://s.infinito.nexus/license)
|
||||
|
@@ -1,17 +1,14 @@
|
||||
---
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birkenbach"
|
||||
description: "Installs caffeine-ng and configures it to autostart for preventing screen sleep on GNOME."
|
||||
license: "Infinito.Nexus NonCommercial License (CNCL)"
|
||||
license_url: "https://s.veen.world/cncl"
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
min_ansible_version: "2.4"
|
||||
platforms:
|
||||
- name: Archlinux
|
||||
versions:
|
||||
- all
|
||||
- name: Archlinux
|
||||
versions:
|
||||
- all
|
||||
galaxy_tags:
|
||||
- caffeine
|
||||
- autostart
|
||||
- archlinux
|
||||
dependencies:
|
||||
- dev-yay
|
||||
- caffeine
|
||||
- autostart
|
||||
- archlinux
|
||||
|
23
roles/desk-gnome-caffeine/tasks/01_core.yml
Normal file
23
roles/desk-gnome-caffeine/tasks/01_core.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
- name: Include dependency 'dev-yay'
|
||||
include_role:
|
||||
name: dev-yay
|
||||
when: run_once_dev_yay is not defined
|
||||
|
||||
- name: Install caffeine
|
||||
kewlfft.aur.aur:
|
||||
use: yay
|
||||
name:
|
||||
- caffeine-ng
|
||||
become: false
|
||||
|
||||
- name: Create autostart directory if it doesn't exist
|
||||
file:
|
||||
path: "{{auto_start_directory}}"
|
||||
state: directory
|
||||
|
||||
- name: Copy caffeine.desktop file to autostart directory
|
||||
template:
|
||||
src: caffeine.desktop.j2
|
||||
dest: "{{auto_start_directory}}caffeine.desktop"
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
@@ -1,17 +1,3 @@
|
||||
---
|
||||
- name: Install caffeine
|
||||
kewlfft.aur.aur:
|
||||
use: yay
|
||||
name:
|
||||
- caffeine-ng
|
||||
become: false
|
||||
|
||||
- name: Create autostart directory if it doesn't exist
|
||||
file:
|
||||
path: "{{auto_start_directory}}"
|
||||
state: directory
|
||||
|
||||
- name: Copy caffeine.desktop file to autostart directory
|
||||
template:
|
||||
src: caffeine.desktop.j2
|
||||
dest: "{{auto_start_directory}}caffeine.desktop"
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_desk_gnome_caffeine is not defined
|
||||
|
@@ -22,4 +22,4 @@ The purpose of this role is to enhance and customize the GNOME desktop environme
|
||||
Developed and maintained by **Kevin Veen-Birkenbach**.
|
||||
Learn more at [www.veen.world](https://www.veen.world)
|
||||
|
||||
License: [Infinito.Nexus NonCommercial License (CNCL)](https://s.veen.world/cncl)
|
||||
License: [Infinito.Nexus NonCommercial License](https://s.infinito.nexus/license)
|
@@ -2,8 +2,8 @@
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birkenbach"
|
||||
description: "Configures GNOME Shell extensions and installs the CLI GNOME Extension Manager for managing extensions."
|
||||
license: "Infinito.Nexus NonCommercial License (CNCL)"
|
||||
license_url: "https://s.veen.world/cncl"
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birkenbach
|
||||
Consulting & Coaching Solutions
|
||||
|
@@ -22,4 +22,4 @@ The purpose of this role is to ensure that GNOME Terminal is installed and prope
|
||||
Developed and maintained by **Kevin Veen-Birkenbach**.
|
||||
Learn more at [www.veen.world](https://www.veen.world)
|
||||
|
||||
License: [Infinito.Nexus NonCommercial License (CNCL)](https://s.veen.world/cncl)
|
||||
License: [Infinito.Nexus NonCommercial License](https://s.infinito.nexus/license)
|
@@ -2,8 +2,8 @@
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birkenbach"
|
||||
description: "Installs GNOME Terminal on Arch Linux, providing a modern terminal emulator for the GNOME desktop environment."
|
||||
license: "Infinito.Nexus NonCommercial License (CNCL)"
|
||||
license_url: "https://s.veen.world/cncl"
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birkenbach
|
||||
Consulting & Coaching Solutions
|
||||
|
@@ -24,4 +24,4 @@ The purpose of this role is to provide a complete GNOME desktop experience by or
|
||||
Developed and maintained by **Kevin Veen-Birkenbach**.
|
||||
Learn more at [www.veen.world](https://www.veen.world)
|
||||
|
||||
License: [Infinito.Nexus NonCommercial License (CNCL)](https://s.veen.world/cncl)
|
||||
License: [Infinito.Nexus NonCommercial License](https://s.infinito.nexus/license)
|
@@ -2,8 +2,8 @@
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birkenbach"
|
||||
description: "Aggregates essential GNOME desktop roles—including caffeine, extensions, and terminal—for a complete GNOME environment on Linux."
|
||||
license: "Infinito.Nexus NonCommercial License (CNCL)"
|
||||
license_url: "https://s.veen.world/cncl"
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birkenbach
|
||||
Consulting & Coaching Solutions
|
||||
|
@@ -2,8 +2,8 @@
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birchenbach"
|
||||
description: "Installs GnuCash finance management software on Pacman-based systems, ensuring the latest version is present."
|
||||
license: "Infinito.Nexus NonCommercial License (CNCL)"
|
||||
license_url: "https://s.veen.world/cncl"
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birchenbach
|
||||
Consulting & Coaching Solutions
|
||||
@@ -12,9 +12,9 @@ galaxy_info:
|
||||
- gnucash
|
||||
- finance
|
||||
- accounting
|
||||
repository: "https://github.com/kevinveenbirkenbach/infinito-nexus"
|
||||
issue_tracker_url: "https://github.com/kevinveenbirkenbach/infinito-nexus/issues"
|
||||
documentation: "https://github.com/kevinveenbirkenbach/infinito-nexus/tree/main/roles/desk-gnucash"
|
||||
repository: "https://s.infinito.nexus/code"
|
||||
issue_tracker_url: "https://s.infinito.nexus/issues"
|
||||
documentation: "https://s.infinito.nexus/code/tree/main/roles/desk-gnucash"
|
||||
min_ansible_version: "2.9"
|
||||
platforms:
|
||||
- name: Archlinux
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user