mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-09-08 11:17:17 +02:00
Compare commits
684 Commits
4e3c124f55
...
master
Author | SHA1 | Date | |
---|---|---|---|
445c94788e | |||
aac9704e8b | |||
a57a5f8828 | |||
90843726de | |||
d25da76117 | |||
d48a1b3c0a | |||
2839d2e1a4 | |||
00c99e58e9 | |||
904040589e | |||
9f3d300bca | |||
9e253a2d09 | |||
49120b0dcf | |||
b6f91ab9d3 | |||
77e8e7ed7e | |||
32bc17e0c3 | |||
e294637cb6 | |||
577767bed6 | |||
e77f8da510 | |||
4738b263ec | |||
0a588023a7 | |||
d2fa90774b | |||
0e72dcbe36 | |||
4f8ce598a9 | |||
3769e66d8d | |||
33a5fadf67 | |||
699a6b6f1e | |||
61c29eee60 | |||
d5204fb5c2 | |||
751615b1a4 | |||
e2993d2912 | |||
24b6647bfb | |||
d2dc2eab5f | |||
a1130e33d7 | |||
df122905eb | |||
d093a22d61 | |||
5e550ce3a3 | |||
0ada12e3ca | |||
1a5ce4a7fa | |||
a9abb3ce5d | |||
71ceb339fc | |||
61bba3d2ef | |||
0bde4295c7 | |||
8059f272d5 | |||
7c814e6e83 | |||
d760c042c2 | |||
6cac8085a8 | |||
3a83f3d14e | |||
61d852c508 | |||
188b098503 | |||
bc56940e55 | |||
5dfc2efb5a | |||
7f9dc65b37 | |||
163a925096 | |||
a8c88634b5 | |||
ce3fe1cd51 | |||
7ca8b7c71d | |||
110381e80c | |||
b02d88adc0 | |||
b7065837df | |||
c98a2378c4 | |||
4ae3cee36c | |||
b834f0c95c | |||
9f734dff17 | |||
6fa4d00547 | |||
7254667186 | |||
aaedaab3da | |||
7791bd8c04 | |||
34b3f3b0ad | |||
94fe58b5da | |||
9feb766e6f | |||
231fd567b3 | |||
3f8e7c1733 | |||
3bfab9ef8e | |||
f1870c07be | |||
d0cec9a7d4 | |||
1dbd714a56 | |||
3a17b2979e | |||
bb0530c2ac | |||
aa2eb53776 | |||
5f66c1a622 | |||
b3dfb8bf22 | |||
db642c1c39 | |||
2fccebbd1f | |||
c23fbd8ec4 | |||
2999d9af77 | |||
2809ffb9f0 | |||
cb12114ce8 | |||
ba99e558f7 | |||
2aed0f97d2 | |||
f36c7831b1 | |||
009bee531b | |||
4c7bb6d9db | |||
092869b29a | |||
f4ea6c6c0f | |||
3ed84717a7 | |||
1cfc2b7e23 | |||
01b9648650 | |||
65d3b3040d | |||
28f7ac5aba | |||
19926b0c57 | |||
3a79d9d630 | |||
983287a84a | |||
dd9a9b6d84 | |||
23a2e081bf | |||
4cbd848026 | |||
d67f660152 | |||
5c6349321b | |||
af1ee64246 | |||
d96bfc64a6 | |||
6ea8301364 | |||
92f5bf6481 | |||
58c17bf043 | |||
6c2d5c52c8 | |||
b919f39e35 | |||
9f2cfe65af | |||
fe399c3967 | |||
ef801aa498 | |||
18f3b1042f | |||
dece6228a4 | |||
cb66fb2978 | |||
b9da6908ec | |||
8baec17562 | |||
1401779a9d | |||
707a3fc1d0 | |||
d595d46e2e | |||
73d5651eea | |||
12a267827d | |||
c6cd6430bb | |||
67b2ebf001 | |||
ebb6660473 | |||
f62d09d8f1 | |||
de159db918 | |||
e2c2cf4bcf | |||
6e1e1ad5c5 | |||
06baa4b03a | |||
73e7fbdc8a | |||
bae2bc21ec | |||
a8f4dea9d2 | |||
5aaf2d28dc | |||
5287bb4d74 | |||
5446a1497e | |||
19889a8cfc | |||
d9980c0d8f | |||
35206aaafd | |||
942e8c9c12 | |||
97f4045c68 | |||
c182ecf516 | |||
ce033c370a | |||
a0477ad54c | |||
35c3681f55 | |||
af97e71976 | |||
19a51fd718 | |||
b916173422 | |||
9756a0f75f | |||
e417bc19bd | |||
7ad14673e1 | |||
eb781dbf8b | |||
6016da6f1f | |||
8b2f0ac47b | |||
9d6d64e11d | |||
f1a2967a37 | |||
95a2172fff | |||
dc3f4e05a8 | |||
e33944cda2 | |||
efa68cc1e0 | |||
79e702a3ab | |||
9180182d5b | |||
535094d15d | |||
658003f5b9 | |||
3ff783df17 | |||
3df511aee9 | |||
c27d16322b | |||
7a6e273ea4 | |||
384beae7c1 | |||
ad7e61e8b1 | |||
fa46523433 | |||
f4a380d802 | |||
42d6c1799b | |||
8608d89653 | |||
a4f39ac732 | |||
9cfb8f3a60 | |||
3e5344a46c | |||
ec07d1a20b | |||
594d9417d1 | |||
dc125e4843 | |||
39a54294dd | |||
a57fe718de | |||
b6aec5fe33 | |||
de07d890dc | |||
e27f355697 | |||
790762d397 | |||
4ce681e643 | |||
55cf3d0d8e | |||
2708b67751 | |||
f477ee3731 | |||
6d70f78989 | |||
b867a52471 | |||
78ee3e3c64 | |||
d7ece2a8c3 | |||
3794aa87b0 | |||
4cf996b1bb | |||
79517b2fe9 | |||
a84ee1240a | |||
7019b307c5 | |||
838a8fc7a1 | |||
95aba805c0 | |||
0856c340c7 | |||
b90a2f6c87 | |||
98e045196b | |||
a10dd402b8 | |||
6e538eabc8 | |||
82cc24a7f5 | |||
26b392ea76 | |||
b49fdc509e | |||
b1e8339283 | |||
f5db786878 | |||
7ef20474a0 | |||
83b9f697ab | |||
dd7b5e844c | |||
da01305cac | |||
1082caddae | |||
242347878d | |||
f46aabe884 | |||
d3cc187c3b | |||
0a4b9bc8e4 | |||
2887e54cca | |||
630fd43382 | |||
3114a7b586 | |||
34d771266a | |||
73b7d2728e | |||
fc4df980c5 | |||
763b43b44c | |||
db860e6ae3 | |||
2ba486902f | |||
7848226f83 | |||
185f37af52 | |||
b9461026a6 | |||
bf63e01b98 | |||
4a600ac531 | |||
dc0bb555c1 | |||
5adce08aea | |||
2569abc0be | |||
3a839cfe37 | |||
29f50da226 | |||
a5941763ff | |||
3d7bbabd7b | |||
e4b8c97e03 | |||
29df95ed82 | |||
6443771d93 | |||
d1cd87c843 | |||
5f0762e4f6 | |||
5642793f4a | |||
7d0502ebc5 | |||
20c8d46f54 | |||
a524c52f89 | |||
5c9ca20e04 | |||
bfe18dd83c | |||
0a83f3159a | |||
fb7b3a3c8e | |||
42f9ebad34 | |||
33b2d3f582 | |||
14e868a644 | |||
2a1a956739 | |||
bd2dde3af6 | |||
1126765da2 | |||
2620ee088e | |||
838a55ea94 | |||
1b26f1da8d | |||
43362e1694 | |||
14d3f65a70 | |||
b8ccd50ab2 | |||
4a39cc90c0 | |||
0de26fa6c7 | |||
1bed83078e | |||
7ffd79ebd9 | |||
2b7950920c | |||
f0b323afee | |||
eadcb62f2a | |||
cc2c1dc730 | |||
3b4821f7e7 | |||
5b64b47754 | |||
cb2b9462e1 | |||
03564b34bb | |||
e3b09e7f1a | |||
3adb08fc68 | |||
e9a41bd40c | |||
cb539b038c | |||
3ac9bd9f90 | |||
85a2f4b3d2 | |||
012426cf3b | |||
6c966bce2e | |||
3587531bda | |||
411a1f8931 | |||
cc51629337 | |||
022800425d | |||
0228014d34 | |||
1b638c366e | |||
5c90c252d0 | |||
4a65a254ae | |||
5e00deea19 | |||
bf7b24c3ee | |||
85924ab3c5 | |||
ac293c90f4 | |||
e0f35c4bbd | |||
989bee9522 | |||
2f12d8ea83 | |||
58620f6695 | |||
abc064fa56 | |||
7f42462514 | |||
41cd6b7702 | |||
a40d48bb03 | |||
2fba32d384 | |||
f2a765d69a | |||
c729edb525 | |||
597e9d5222 | |||
db0e030900 | |||
004507e233 | |||
e2014b9b59 | |||
567b1365c0 | |||
e99fa77b91 | |||
80dad1a5ed | |||
03290eafe1 | |||
58c64bd7c6 | |||
e497c001d6 | |||
4fa1c6cfbd | |||
53770f5308 | |||
13d8663796 | |||
f31565e4c5 | |||
a4d8de2152 | |||
c744ebe3f9 | |||
ce029881d0 | |||
94da112736 | |||
b62df5599d | |||
c9a7830953 | |||
53e5c563ae | |||
0b3b3a810a | |||
6d14f16dfd | |||
632d922977 | |||
26b29debc0 | |||
0c4cd283c4 | |||
5d36a806ff | |||
84de85d905 | |||
457f3659fa | |||
4c7ee0441e | |||
140572a0a4 | |||
a30cd4e8b5 | |||
2067804e9f | |||
1a42e8bd14 | |||
8634b5e1b3 | |||
1595a7c4a6 | |||
82aaf7ad74 | |||
7e4a1062af | |||
d5e5f57f92 | |||
f671678720 | |||
2219696c3f | |||
fbaee683fd | |||
b301e58ee6 | |||
de15c42de8 | |||
918355743f | |||
f6e62525d1 | |||
f72ac30884 | |||
1496f1de95 | |||
38de10ba65 | |||
e8c19b4b84 | |||
b0737b1cdb | |||
e4cc928eea | |||
c9b2136578 | |||
5709935c92 | |||
c7badc608a | |||
0e59d35129 | |||
1ba50397db | |||
6318611931 | |||
6e04ac58d2 | |||
b6e571a496 | |||
21b6362bc1 | |||
1fcf072257 | |||
ea0149b5d4 | |||
fe76fe1e62 | |||
3431796283 | |||
b5d8ac5462 | |||
5426014096 | |||
a9d77de2a4 | |||
766ef8619f | |||
66013a4da3 | |||
1cb5a12d85 | |||
6e8ae793e3 | |||
0746acedfd | |||
f5659a44f8 | |||
77816ac4e7 | |||
8779afd1f7 | |||
0074bcbd69 | |||
149c563831 | |||
e9ef62b95d | |||
aeaf84de6f | |||
fdceb0f792 | |||
2fd83eaf55 | |||
|
21eb614912 | ||
b880b98ac3 | |||
acfb1a2ee7 | |||
4885ad7eb4 | |||
d9669fc6dd | |||
8e0341c120 | |||
22c8c395f0 | |||
aae69ea15b | |||
c7b25ed093 | |||
e675aa5886 | |||
14f07adc9d | |||
dba12b89d8 | |||
0607974dac | |||
e8fa22cb43 | |||
eedfe83ece | |||
9f865dd215 | |||
220e3e1c60 | |||
2996c7cbb6 | |||
59bd4ca8eb | |||
da58691d25 | |||
c96f278ac3 | |||
2715479c95 | |||
926640371f | |||
cdc97c8ba5 | |||
4124e97aeb | |||
7f0d40bdc3 | |||
8dc2238ba2 | |||
b9b08feadd | |||
dc437c7621 | |||
7d63d92166 | |||
3eb51a32ce | |||
6272303b55 | |||
dfd7be9d72 | |||
90ad688ca9 | |||
2f02ad6c15 | |||
1257bef61d | |||
3eca5dabdf | |||
5a0684fa2d | |||
051e4accd6 | |||
7f53cc3a12 | |||
9228d51e86 | |||
99c6c9ec92 | |||
34f9d773bd | |||
5edb9d19cf | |||
7a09f223af | |||
f88e57ca52 | |||
7bc11f9b31 | |||
0b25161af6 | |||
14c3ff1253 | |||
234cfea02f | |||
|
69e29029af | ||
bc5374cf52 | |||
|
1660bcd384 | ||
|
41d924af1c | ||
80278f2bb0 | |||
44e0fea0b2 | |||
a9e7ed3605 | |||
f9f76892af | |||
996244b672 | |||
9f61b4e50b | |||
3549f4de32 | |||
552bb1bbae | |||
1b385c5215 | |||
1240d3bfdf | |||
27973c2773 | |||
f62355e490 | |||
f5213fd59c | |||
0472fecd64 | |||
d1fcbedef6 | |||
c8be88e3b1 | |||
5e315f9603 | |||
bab1035a24 | |||
30930c4136 | |||
bba663f95d | |||
c2f83abb60 | |||
3bc64023af | |||
d94254effb | |||
ff18c7cd73 | |||
a84abbdade | |||
5dc8ec2344 | |||
4b9e7dd3b7 | |||
22ff2dc1f3 | |||
16c1a5d834 | |||
b25f7f52b3 | |||
4826de621e | |||
4501c31756 | |||
c185c537cb | |||
809ac1adf4 | |||
1a2451af4e | |||
e78974b469 | |||
b1bf7aaba5 | |||
a1643870db | |||
aeeae776c7 | |||
356c214718 | |||
4717e33649 | |||
ee4ee9a1b7 | |||
57211c2076 | |||
2ffaadfaca | |||
bc5059fe62 | |||
e6db73c02a | |||
4ad6f1f8ea | |||
7e58b825ea | |||
f3aa7625fe | |||
d9c4493e0d | |||
14dde77134 | |||
fd422a14ce | |||
5343536d27 | |||
6e2e3e45a7 | |||
ed866bf177 | |||
a580f41edd | |||
dcb57af6f7 | |||
2699edd197 | |||
257d0c4673 | |||
4cbd29735f | |||
8ea86d2bd7 | |||
3951376a29 | |||
e1d36045da | |||
c572d535e2 | |||
c79dbeec68 | |||
5501e40b7b | |||
e84c7e5612 | |||
be675d5f9e | |||
bf16a44e87 | |||
98cc3d5070 | |||
2db5f75888 | |||
867b377115 | |||
1882fcfef5 | |||
15dc99a221 | |||
6b35454f35 | |||
d86ca6cc0e | |||
1b9775ccb5 | |||
45d9da3125 | |||
8ccfb1dfbe | |||
6a1a83432f | |||
85195e01f9 | |||
45624037b1 | |||
d4fbdb409f | |||
a738199868 | |||
c1da74de3f | |||
c23624e30c | |||
0f1f40f2e0 | |||
d1982af63d | |||
409e659143 | |||
562603a8cd | |||
6d4b7227ce | |||
9a8ef5e047 | |||
ad449c3b6a | |||
9469452275 | |||
fd8ef26b53 | |||
8cda54c46e | |||
90bc52632e | |||
0b8d2e0b40 | |||
40491dbc2e | |||
fac8971982 | |||
c791e86b8b | |||
d222b55f30 | |||
a04a1710d3 | |||
4f06f94023 | |||
2529c7cdb3 | |||
ab12a933f6 | |||
529efc0bd7 | |||
725fea1169 | |||
84322f81ef | |||
fd637c58e3 | |||
bfc42ce2ac | |||
1bdfb71f2f | |||
807fab42c3 | |||
2f45038bef | |||
f263992393 | |||
f4d1f2a303 | |||
3b2190f7ab | |||
7145213f45 | |||
70f7953027 | |||
c155e82f8c | |||
169493179e | |||
dea2669de2 | |||
e4ce3848fc | |||
8113e412dd | |||
94796efae8 | |||
7aed3dd8c2 | |||
1a649568ce | |||
f9f7d9b299 | |||
9d8e48d303 | |||
f9426cfb74 | |||
e56c960900 | |||
41934ab285 | |||
932ce7c8ca | |||
0730c1efd5 | |||
fd370624c7 | |||
4b8b04f29c | |||
2d276cfa5e | |||
241c5c6da8 | |||
af3ea9039c | |||
c8054ffbc3 | |||
54490faca7 | |||
b6eb73dee4 | |||
3fed9eb75a | |||
45c18b69ba | |||
ac3bc5742d | |||
f6c767f122 | |||
5e83f306b4 | |||
2e2501980c | |||
cb9a7b2ade | |||
a6afbaff38 | |||
111d6ac50d | |||
766fe39c4c | |||
8254bc9f07 | |||
a8139c2e72 | |||
f8264b88d5 | |||
779823eb09 | |||
0d5f369755 | |||
4627d9031c | |||
8ac88475d5 | |||
da88871108 | |||
b61f695aac | |||
a6000d7666 | |||
b5b65c4f67 | |||
ea79b9456a | |||
7c9b895dbe | |||
3c759cbb4c | |||
733356b4f7 | |||
21b4fdee47 | |||
294a43bd97 | |||
dd73a87e19 | |||
bb7859ab44 | |||
bbabc58cf9 | |||
959c48c1a1 | |||
253b088cdb | |||
c99def5724 | |||
75a5ab455e | |||
d5c14ad53c | |||
e90c9a18b0 | |||
fff06d52b8 | |||
f02ca50f88 | |||
4acf2137e8 | |||
6a447a1426 | |||
d1c8036fa4 | |||
30d583f0c9 | |||
f7aab39167 | |||
e4028fccf4 | |||
b6ee7b9f98 | |||
67122800f3 | |||
bfd1a2ee70 | |||
076a2058cc | |||
9dc55c5893 | |||
81ef808191 | |||
8161dd1b6d | |||
ac72544b72 | |||
732607bbb6 | |||
c6f49dc6e2 | |||
ce68391b4e | |||
c42d7cdf19 | |||
f012b4fc78 | |||
56f6a2dc3b | |||
632ad14bd8 | |||
fb0ca533ae | |||
6fbe550afe | |||
294d402990 | |||
95cbce93f0 | |||
77b3ca5fa2 | |||
33d14741e2 | |||
ed67ca0501 | |||
8f31b2fbfe | |||
325695777a | |||
4c9ae52fd7 | |||
3c22fb8d36 | |||
ae8a0d608b | |||
f9aa1ed2a4 | |||
8e4e497d2c | |||
24d2c0edb5 | |||
e1d090ce04 | |||
56caecc5d8 | |||
63bf7f7640 | |||
ad60f5fb37 | |||
991ed7d614 | |||
840836702d | |||
9142eeba3c | |||
882cf47c20 | |||
e8992f254c | |||
92245b5935 | |||
a98332bfb9 | |||
422e4c136d | |||
756597668c | |||
4cc4195fab | |||
78031855b9 | |||
5340d580ce | |||
c8669e19cf | |||
a18e888044 |
13
.dockerignore
Normal file
13
.dockerignore
Normal file
@@ -0,0 +1,13 @@
|
||||
# The .gitignore is the single point of truth for files which should be ignored.
|
||||
# Add patterns, files and folders to the .gitignore and execute 'make build'
|
||||
# NEVER TOUCH THE .dockerignore, BECAUSE IT ANYHOW WILL BE OVERWRITTEN
|
||||
|
||||
site.retry
|
||||
*__pycache__
|
||||
venv
|
||||
*.log
|
||||
*.bak
|
||||
*tree.json
|
||||
roles/list.json
|
||||
*.pyc
|
||||
.git
|
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
* text=auto eol=lf
|
4
.github/workflows/TODO.md
vendored
Normal file
4
.github/workflows/TODO.md
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
# Todo
|
||||
- Create workflow test-server, which tests all server roles
|
||||
- Create workflow test-desktop, which tests all desktop roles
|
||||
- For the backup services keep in mind to setup a tandem, which pulls the backups from each other to verify that this also works
|
32
.github/workflows/test-cli.yml
vendored
Normal file
32
.github/workflows/test-cli.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: Build & Test Infinito.Nexus CLI in Docker Container
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
build-and-test:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Build Docker image
|
||||
run: |
|
||||
docker build -t infinito:latest .
|
||||
|
||||
- name: Clean build artifacts
|
||||
run: |
|
||||
docker run --rm infinito:latest make clean
|
||||
|
||||
- name: Generate project outputs
|
||||
run: |
|
||||
docker run --rm infinito:latest make build
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
docker run --rm infinito:latest make test
|
22
.github/workflows/test-on-arch.yml
vendored
22
.github/workflows/test-on-arch.yml
vendored
@@ -1,22 +0,0 @@
|
||||
name: Build & Test on Arch Linux
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
build-and-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Build & Test in Arch Linux Container
|
||||
uses: addnab/docker-run-action@v3
|
||||
with:
|
||||
image: archlinux:latest
|
||||
options: -v ${{ github.workspace }}:/workspace -w /workspace
|
||||
run: |
|
||||
pacman -Sy --noconfirm base-devel git python python-pip docker make
|
||||
make build
|
||||
make test
|
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,3 +1,7 @@
|
||||
# The .gitignore is the single point of truth for files which should be ignored.
|
||||
# Add patterns, files and folders to the .gitignore and execute 'make build'
|
||||
# NEVER TOUCH THE .dockerignore, BECAUSE IT ANYHOW WILL BE OVERWRITTEN
|
||||
|
||||
site.retry
|
||||
*__pycache__
|
||||
venv
|
||||
@@ -5,3 +9,4 @@ venv
|
||||
*.bak
|
||||
*tree.json
|
||||
roles/list.json
|
||||
*.pyc
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# Code of Conduct
|
||||
|
||||
In order to foster a welcoming, open, and respectful community for everyone, we expect all contributors and participants in the CyMaIS project to abide by the following Code of Conduct.
|
||||
In order to foster a welcoming, open, and respectful community for everyone, we expect all contributors and participants in the Infinito.Nexus project to abide by the following Code of Conduct.
|
||||
|
||||
## Our Pledge
|
||||
|
||||
@@ -29,10 +29,10 @@ Our project maintainers and community leaders will review all reports and take a
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies to all spaces managed by the CyMaIS project, including GitHub repositories, mailing lists, chat rooms, and other communication channels.
|
||||
This Code of Conduct applies to all spaces managed by the Infinito.Nexus project, including GitHub repositories, mailing lists, chat rooms, and other communication channels.
|
||||
|
||||
## Acknowledgment
|
||||
|
||||
By participating in the CyMaIS project, you agree to adhere to this Code of Conduct. We appreciate your cooperation in helping us build a positive and productive community.
|
||||
By participating in the Infinito.Nexus project, you agree to adhere to this Code of Conduct. We appreciate your cooperation in helping us build a positive and productive community.
|
||||
|
||||
Thank you for contributing to a safe and inclusive CyMaIS community!
|
||||
Thank you for contributing to a safe and inclusive Infinito.Nexus community!
|
@@ -2,13 +2,13 @@
|
||||
|
||||
<img src="https://cybermaster.space/wp-content/uploads/sites/7/2023/11/FVG_8364BW-scaled.jpg" width="300" style="float: right; margin-left: 30px;">
|
||||
|
||||
My name is Kevin Veen-Birkenbach and I'm the author and founder of CyMaIS.
|
||||
My name is Kevin Veen-Birkenbach and I'm the author and founder of Infinito.Nexus.
|
||||
|
||||
I'm glad to assist you in the implementation of your secure and scalable IT infrastrucutre solution with CyMaIS.
|
||||
I'm glad to assist you in the implementation of your secure and scalable IT infrastrucutre solution with Infinito.Nexus.
|
||||
|
||||
My expertise in server administration, digital corporate infrastructure, custom software, and information security, all underpinned by a commitment to Open Source solutions, guarantees that your IT setup meets the highest industry standards.
|
||||
|
||||
Discover how CyMaIS can transform your IT landscape.
|
||||
Discover how Infinito.Nexus can transform your IT landscape.
|
||||
|
||||
Contact me for more details:
|
||||
|
||||
|
@@ -1,14 +1,14 @@
|
||||
# Contributing
|
||||
|
||||
Thank you for your interest in contributing to CyMaIS! We welcome contributions from the community to help improve and enhance this project. Your input makes the project stronger and more adaptable to a wide range of IT infrastructure needs.
|
||||
Thank you for your interest in contributing to Infinito.Nexus! We welcome contributions from the community to help improve and enhance this project. Your input makes the project stronger and more adaptable to a wide range of IT infrastructure needs.
|
||||
|
||||
## How to Contribute
|
||||
|
||||
There are several ways you can help:
|
||||
- **Reporting Issues:** Found a bug or have a feature request? Please open an issue on our [GitHub Issues page](https://github.com/kevinveenbirkenbach/cymais/issues) with a clear description and steps to reproduce the problem.
|
||||
- **Reporting Issues:** Found a bug or have a feature request? Please open an issue on our [GitHub Issues page](https://s.infinito.nexus/issues) with a clear description and steps to reproduce the problem.
|
||||
- **Code Contributions:** If you'd like to contribute code, fork the repository, create a new branch for your feature or bug fix, and submit a pull request. Ensure your code adheres to our coding style and includes tests where applicable.
|
||||
- **Documentation:** Improving the documentation is a great way to contribute. Whether it's clarifying an existing section or adding new guides, your contributions help others understand and use CyMaIS effectively.
|
||||
- **Financial Contributions:** If you appreciate CyMaIS and want to support its ongoing development, consider making a financial contribution. For more details, please see our [donate options](12_DONATE.md).
|
||||
- **Documentation:** Improving the documentation is a great way to contribute. Whether it's clarifying an existing section or adding new guides, your contributions help others understand and use Infinito.Nexus effectively.
|
||||
- **Financial Contributions:** If you appreciate Infinito.Nexus and want to support its ongoing development, consider making a financial contribution. For more details, please see our [donate options](12_DONATE.md).
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
@@ -40,7 +40,7 @@ Please follow these guidelines when contributing code:
|
||||
|
||||
## License and Commercial Use
|
||||
|
||||
CyMaIS is primarily designed for private use. Commercial use of CyMaIS is not permitted without a proper licensing agreement. By contributing to this project, you agree that your contributions will be licensed under the same terms as the rest of the project.
|
||||
Infinito.Nexus is primarily designed for private use. Commercial use of Infinito.Nexus is not permitted without a proper licensing agreement. By contributing to this project, you agree that your contributions will be licensed under the same terms as the rest of the project.
|
||||
|
||||
## Getting Started
|
||||
|
||||
@@ -54,4 +54,4 @@ CyMaIS is primarily designed for private use. Commercial use of CyMaIS is not pe
|
||||
|
||||
If you have any questions or need help, feel free to open an issue or join our community discussions. We appreciate your efforts and are here to support you.
|
||||
|
||||
Thank you for contributing to CyMaIS and helping us build a better, more efficient IT infrastructure solution!
|
||||
Thank you for contributing to Infinito.Nexus and helping us build a better, more efficient IT infrastructure solution!
|
||||
|
@@ -1,8 +1,8 @@
|
||||
# Support Us
|
||||
|
||||
CyMaIS is an Open Source Based transformative tool designed to redefine IT infrastructure setup for organizations and individuals alike. Your contributions directly support the ongoing development and innovation behind CyMaIS, ensuring that it continues to grow and serve its community effectively.
|
||||
Infinito.Nexus is an Open Source Based transformative tool designed to redefine IT infrastructure setup for organizations and individuals alike. Your contributions directly support the ongoing development and innovation behind Infinito.Nexus, ensuring that it continues to grow and serve its community effectively.
|
||||
|
||||
If you enjoy using CyMaIS and would like to contribute to its improvement, please consider donating. Every contribution, no matter the size, helps us maintain and expand this project.
|
||||
If you enjoy using Infinito.Nexus and would like to contribute to its improvement, please consider donating. Every contribution, no matter the size, helps us maintain and expand this project.
|
||||
|
||||
[](https://github.com/sponsors/kevinveenbirkenbach) [](https://www.patreon.com/c/kevinveenbirkenbach) [](https://buymeacoffee.com/kevinveenbirkenbach) [](https://s.veen.world/paypaldonate)
|
||||
|
||||
|
69
Dockerfile
Normal file
69
Dockerfile
Normal file
@@ -0,0 +1,69 @@
|
||||
FROM archlinux:latest
|
||||
|
||||
# 1) Update system and install build/runtime deps
|
||||
RUN pacman -Syu --noconfirm \
|
||||
base-devel \
|
||||
git \
|
||||
python \
|
||||
python-pip \
|
||||
python-setuptools \
|
||||
alsa-lib \
|
||||
go \
|
||||
rsync \
|
||||
&& pacman -Scc --noconfirm
|
||||
|
||||
# 2) Stub out systemctl & yay so post-install hooks and AUR calls never fail
|
||||
RUN printf '#!/bin/sh\nexit 0\n' > /usr/bin/systemctl \
|
||||
&& chmod +x /usr/bin/systemctl \
|
||||
&& printf '#!/bin/sh\nexit 0\n' > /usr/bin/yay \
|
||||
&& chmod +x /usr/bin/yay
|
||||
|
||||
# 3) Build & install python-simpleaudio from AUR manually (as non-root)
|
||||
RUN useradd -m aur_builder \
|
||||
&& su aur_builder -c "git clone https://aur.archlinux.org/python-simpleaudio.git /home/aur_builder/psa && \
|
||||
cd /home/aur_builder/psa && \
|
||||
makepkg --noconfirm --skippgpcheck" \
|
||||
&& pacman -U --noconfirm /home/aur_builder/psa/*.pkg.tar.zst \
|
||||
&& rm -rf /home/aur_builder/psa
|
||||
|
||||
# 4) Clone Kevin’s Package Manager and create its venv
|
||||
ENV PKGMGR_REPO=/opt/package-manager \
|
||||
PKGMGR_VENV=/root/.venvs/pkgmgr
|
||||
|
||||
RUN git clone https://github.com/kevinveenbirkenbach/package-manager.git $PKGMGR_REPO \
|
||||
&& python -m venv $PKGMGR_VENV \
|
||||
&& $PKGMGR_VENV/bin/pip install --upgrade pip \
|
||||
# install pkgmgr’s own deps + the ansible Python library so infinito import yaml & ansible.plugins.lookup work
|
||||
&& $PKGMGR_VENV/bin/pip install --no-cache-dir -r $PKGMGR_REPO/requirements.txt ansible \
|
||||
# drop a thin wrapper so `pkgmgr` always runs inside that venv
|
||||
&& printf '#!/bin/sh\n. %s/bin/activate\nexec python %s/main.py "$@"\n' \
|
||||
"$PKGMGR_VENV" "$PKGMGR_REPO" > /usr/local/bin/pkgmgr \
|
||||
&& chmod +x /usr/local/bin/pkgmgr
|
||||
|
||||
# 5) Ensure pkgmgr venv bin and user-local bin are on PATH
|
||||
ENV PATH="$PKGMGR_VENV/bin:/root/.local/bin:${PATH}"
|
||||
|
||||
# 6) Copy local Infinito.Nexus source into the image for override
|
||||
COPY . /opt/infinito-src
|
||||
|
||||
# 7) Install Infinito.Nexus via pkgmgr (clone-mode https)
|
||||
RUN pkgmgr install infinito --clone-mode https
|
||||
|
||||
# 8) Override installed Infinito.Nexus with local source and clean ignored files
|
||||
RUN INFINITO_PATH=$(pkgmgr path infinito) && \
|
||||
rm -rf "$INFINITO_PATH"/* && \
|
||||
rsync -a --delete --exclude='.git' /opt/infinito-src/ "$INFINITO_PATH"/
|
||||
|
||||
# 9) Symlink the infinito script into /usr/local/bin so ENTRYPOINT works
|
||||
RUN INFINITO_PATH=$(pkgmgr path infinito) && \
|
||||
ln -sf "$INFINITO_PATH"/main.py /usr/local/bin/infinito && \
|
||||
chmod +x /usr/local/bin/infinito
|
||||
|
||||
# 10) Run integration tests
|
||||
# This needed to be deactivated becaus it doesn't work with gitthub workflow
|
||||
#RUN INFINITO_PATH=$(pkgmgr path infinito) && \
|
||||
# cd "$INFINITO_PATH" && \
|
||||
# make test
|
||||
|
||||
ENTRYPOINT ["infinito"]
|
||||
CMD ["--help"]
|
@@ -1,9 +1,9 @@
|
||||
# License Agreement
|
||||
|
||||
## CyMaIS NonCommercial License (CNCL)
|
||||
## Infinito.Nexus NonCommercial License
|
||||
|
||||
### Definitions
|
||||
- **"Software":** Refers to *"[CyMaIS - Cyber Master Infrastructure Solution](https://cymais.cloud/)"* and its associated source code.
|
||||
- **"Software":** Refers to *"[Infinito.Nexus](https://infinito.nexus/)"* and its associated source code.
|
||||
- **"Commercial Use":** Any use of the Software intended for direct or indirect financial gain, including but not limited to sales, rentals, or provision of services.
|
||||
|
||||
### Provisions
|
||||
|
38
Makefile
38
Makefile
@@ -21,15 +21,31 @@ EXTRA_USERS := $(shell \
|
||||
|
||||
.PHONY: build install test
|
||||
|
||||
clean-keep-logs:
|
||||
@echo "🧹 Cleaning ignored files but keeping logs/…"
|
||||
git clean -fdX -- ':!logs' ':!logs/**'
|
||||
|
||||
clean:
|
||||
@echo "Removing not tracked git files"
|
||||
git clean -fdx
|
||||
@echo "Removing ignored git files"
|
||||
git clean -fdX
|
||||
|
||||
list:
|
||||
@echo Generating the roles list
|
||||
python3 main.py build roles_list
|
||||
|
||||
tree:
|
||||
@echo Generating Tree
|
||||
python3 main.py build tree -D 2 --no-signal
|
||||
|
||||
build:
|
||||
mig: list tree
|
||||
@echo Creating meta data for meta infinity graph
|
||||
|
||||
dockerignore:
|
||||
@echo Create dockerignore
|
||||
cat .gitignore > .dockerignore
|
||||
echo ".git" >> .dockerignore
|
||||
|
||||
messy-build: dockerignore
|
||||
@echo "🔧 Generating users defaults → $(USERS_OUT)…"
|
||||
python3 $(USERS_SCRIPT) \
|
||||
--roles-dir $(ROLES_DIR) \
|
||||
@@ -53,11 +69,17 @@ build:
|
||||
echo " ✅ $$out"; \
|
||||
)
|
||||
|
||||
messy-test:
|
||||
@echo "🧪 Running Python tests…"
|
||||
PYTHONPATH=. python -m unittest discover -s tests
|
||||
@echo "📑 Checking Ansible syntax…"
|
||||
ansible-playbook playbook.yml --syntax-check
|
||||
|
||||
install: build
|
||||
@echo "⚙️ Install complete."
|
||||
|
||||
test:
|
||||
@echo "🧪 Running Python tests…"
|
||||
python -m unittest discover -s tests
|
||||
@echo "📑 Checking Ansible syntax…"
|
||||
ansible-playbook playbook.yml --syntax-check
|
||||
build: clean messy-build
|
||||
@echo "Full build with cleanup before was executed."
|
||||
|
||||
test: build messy-test
|
||||
@echo "Full test with build before was executed."
|
||||
|
42
README.md
42
README.md
@@ -1,14 +1,20 @@
|
||||
# IT-Infrastructure Automation Framework 🚀
|
||||
# Infinito.Nexus 🚀
|
||||
|
||||
[](https://github.com/sponsors/kevinveenbirkenbach) [](https://www.patreon.com/c/kevinveenbirkenbach) [](https://buymeacoffee.com/kevinveenbirkenbach) [](https://s.veen.world/paypaldonate)
|
||||
**🔐 One login. ♾️ Infinite application**
|
||||
|
||||

|
||||
---
|
||||
|
||||

|
||||
## What is Infinito.Nexus? 📌
|
||||
|
||||
## What is CyMaIS? 📌
|
||||
**Infinito.Nexus** is an **automated, modular infrastructure framework** built on **Docker**, **Linux**, and **Ansible**, equally suited for cloud services, local server management, and desktop workstations. At its core lies a **web-based desktop with single sign-on**—backed by an **LDAP directory** and **OIDC**—granting **seamless access** to an almost limitless portfolio of self-hosted applications. It fully supports **ActivityPub applications** and is **Fediverse-compatible**, while integrated **monitoring**, **alerting**, **cleanup**, **self-healing**, **automated updates**, and **backup solutions** provide everything an organization needs to run at scale.
|
||||
|
||||
**CyMaIS** is an **automated, modular infrastructure framework** built on **Docker**, **Linux**, and **Ansible**, equally suited for cloud services, local server management, and desktop workstations. At its core lies a **web-based desktop with single sign-on**—backed by an **LDAP directory** and **OIDC**—granting **seamless access** to an almost limitless portfolio of self-hosted applications. It fully supports **ActivityPub applications** and is **Fediverse-compatible**, while integrated **monitoring**, **alerting**, **cleanup**, **self-healing**, **automated updates**, and **backup solutions** provide everything an organization needs to run at scale.
|
||||
| 📚 | 🔗 |
|
||||
|---|---|
|
||||
| 🌐 Try It Live | [](https://infinito.nexus) |
|
||||
| 🔧 Request Your Setup | [](https://cybermaster.space) |
|
||||
| 📖 About This Project | [](https://github.com/sponsors/kevinveenbirkenbach) [](https://github.com/kevinveenbirkenbach/infinito-nexus/actions/workflows/test-cli.yml) [](https://s.infinito.nexus/code) |
|
||||
| ☕️ Support Us | [](https://www.patreon.com/c/kevinveenbirkenbach) [](https://buymeacoffee.com/kevinveenbirkenbach) [](https://s.veen.world/paypaldonate) [](https://github.com/sponsors/kevinveenbirkenbach) |
|
||||
|
||||
---
|
||||
|
||||
@@ -49,23 +55,37 @@ More informations about the features you will find [here](docs/overview/Features
|
||||
|
||||
### Use it online 🌐
|
||||
|
||||
Give CyMaIS a spin at [CyMaIS.cloud](httpy://cymais.cloud) – sign up in seconds, click around, and see how easy infra magic can be! 🚀🔧✨
|
||||
Try [Infinito.Nexus](https://infinito.nexus) – sign up in seconds, explore the platform, and discover what our solution can do for you! 🚀🔧✨
|
||||
|
||||
### Install locally 💻
|
||||
1. **Install CyMaIS** via [Kevin's Package Manager](https://github.com/kevinveenbirkenbach/package-manager)
|
||||
2. **Setup CyMaIS** using:
|
||||
1. **Install Infinito.Nexus** via [Kevin's Package Manager](https://github.com/kevinveenbirkenbach/package-manager)
|
||||
2. **Setup Infinito.Nexus** using:
|
||||
```sh
|
||||
pkgmgr install cymais
|
||||
pkgmgr install infinito
|
||||
```
|
||||
3. **Explore Commands** with:
|
||||
```sh
|
||||
cymais --help
|
||||
infinito --help
|
||||
```
|
||||
---
|
||||
|
||||
### Setup with Docker🚢
|
||||
|
||||
Get Infinito.Nexus up and running inside Docker in just a few steps. For detailed build options and troubleshooting, see the [Docker Guide](docs/Docker.md).
|
||||
|
||||
```bash
|
||||
# 1. Build the Docker image: the Docker image:
|
||||
docker build -t infinito:latest .
|
||||
|
||||
# 2. Run the CLI interactively:
|
||||
docker run --rm -it infinito:latest infinito --help
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## License ⚖️
|
||||
|
||||
CyMaIS is distributed under the **CyMaIS NonCommercial License**. Please see [LICENSE.md](LICENSE.md) for full terms.
|
||||
Infinito.Nexus is distributed under the **Infinito.Nexus NonCommercial License**. Please see [LICENSE.md](LICENSE.md) for full terms.
|
||||
|
||||
---
|
||||
|
||||
|
5
TODO.md
Normal file
5
TODO.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Todos
|
||||
- Implement multi language
|
||||
- Implement rbac administration interface
|
||||
- Implement ``MASK_CREDENTIALS_IN_LOGS`` for all sensible tasks
|
||||
- [Enable IP6 for docker](https://chatgpt.com/share/68a0acb8-db20-800f-9d2c-b34e38b5cdee).
|
3
Todo.md
3
Todo.md
@@ -1,3 +0,0 @@
|
||||
# Todos
|
||||
- Implement multi language
|
||||
- Implement rbac administration interface
|
33
ansible.cfg
33
ansible.cfg
@@ -1,4 +1,33 @@
|
||||
[defaults]
|
||||
lookup_plugins = ./lookup_plugins
|
||||
# --- Performance & Behavior ---
|
||||
forks = 25
|
||||
strategy = linear
|
||||
gathering = smart
|
||||
timeout = 120
|
||||
retry_files_enabled = False
|
||||
host_key_checking = True
|
||||
deprecation_warnings = True
|
||||
interpreter_python = auto_silent
|
||||
|
||||
# --- Output & Profiling ---
|
||||
stdout_callback = yaml
|
||||
callbacks_enabled = profile_tasks,timer
|
||||
|
||||
# --- Plugin paths ---
|
||||
filter_plugins = ./filter_plugins
|
||||
module_utils = ./module_utils
|
||||
lookup_plugins = ./lookup_plugins
|
||||
module_utils = ./module_utils
|
||||
|
||||
[ssh_connection]
|
||||
# Multiplexing: safer socket path in HOME instead of /tmp
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=20s -o ControlPath=~/.ssh/ansible-%h-%p-%r \
|
||||
-o ServerAliveInterval=15 -o ServerAliveCountMax=3 -o StrictHostKeyChecking=accept-new \
|
||||
-o PreferredAuthentications=publickey,password,keyboard-interactive
|
||||
|
||||
# Pipelining boosts speed; works fine if sudoers does not enforce "requiretty"
|
||||
pipelining = True
|
||||
scp_if_ssh = smart
|
||||
|
||||
[persistent_connection]
|
||||
connect_timeout = 30
|
||||
command_timeout = 60
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 162 KiB After Width: | Height: | Size: 157 KiB |
Binary file not shown.
Before Width: | Height: | Size: 701 KiB After Width: | Height: | Size: 1015 KiB |
@@ -5,7 +5,7 @@ import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
# Ensure project root on PYTHONPATH so utils is importable
|
||||
# Ensure project root on PYTHONPATH so module_utils is importable
|
||||
repo_root = Path(__file__).resolve().parent.parent.parent.parent
|
||||
sys.path.insert(0, str(repo_root))
|
||||
|
||||
@@ -13,7 +13,7 @@ sys.path.insert(0, str(repo_root))
|
||||
plugin_path = repo_root / "lookup_plugins"
|
||||
sys.path.insert(0, str(plugin_path))
|
||||
|
||||
from utils.dict_renderer import DictRenderer
|
||||
from module_utils.dict_renderer import DictRenderer
|
||||
from application_gid import LookupModule
|
||||
|
||||
def load_yaml_file(path: Path) -> dict:
|
||||
@@ -53,7 +53,8 @@ class DefaultsGenerator:
|
||||
continue
|
||||
|
||||
if not config_file.exists():
|
||||
self.log(f"Skipping {role_name}: config/main.yml missing")
|
||||
self.log(f"Config missing for {role_name}, adding empty defaults for '{application_id}'")
|
||||
result["defaults_applications"][application_id] = {}
|
||||
continue
|
||||
|
||||
config_data = load_yaml_file(config_file)
|
||||
@@ -106,4 +107,4 @@ if __name__ == "__main__":
|
||||
roles_dir = (cwd / args.roles_dir).resolve()
|
||||
output_file = (cwd / args.output_file).resolve()
|
||||
|
||||
DefaultsGenerator(roles_dir, output_file, args.verbose, args.timeout).run()
|
||||
DefaultsGenerator(roles_dir, output_file, args.verbose, args.timeout).run()
|
||||
|
@@ -189,7 +189,7 @@ def parse_args():
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
primary_domain = '{{ primary_domain }}'
|
||||
primary_domain = '{{ SYSTEM_EMAIL.DOMAIN }}'
|
||||
become_pwd = '{{ lookup("password", "/dev/null length=42 chars=ascii_letters,digits") }}'
|
||||
|
||||
try:
|
||||
|
@@ -71,8 +71,8 @@ def build_single_graph(
|
||||
meta = load_meta(find_role_meta(roles_dir, role))
|
||||
node = {'id': role}
|
||||
node.update(meta['galaxy_info'])
|
||||
node['doc_url'] = f"https://docs.cymais.cloud/roles/{role}/README.html"
|
||||
node['source_url'] = f"https://github.com/kevinveenbirkenbach/cymais/tree/master/roles/{role}"
|
||||
node['doc_url'] = f"https://docs.infinito.nexus/roles/{role}/README.html"
|
||||
node['source_url'] = f"https://s.infinito.nexus/code/tree/master/roles/{role}"
|
||||
nodes[role] = node
|
||||
|
||||
if max_depth > 0 and depth >= max_depth:
|
||||
|
127
cli/build/inventory/full.py
Normal file
127
cli/build/inventory/full.py
Normal file
@@ -0,0 +1,127 @@
|
||||
#!/usr/bin/env python3
|
||||
# cli/build/inventory/full.py
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
import os
|
||||
|
||||
try:
|
||||
from filter_plugins.get_all_invokable_apps import get_all_invokable_apps
|
||||
except ImportError:
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..')))
|
||||
from filter_plugins.get_all_invokable_apps import get_all_invokable_apps
|
||||
|
||||
import yaml
|
||||
import json
|
||||
|
||||
def build_group_inventory(apps, host):
|
||||
"""
|
||||
Build an Ansible inventory in which each application is a group containing the given host.
|
||||
"""
|
||||
groups = {app: {"hosts": [host]} for app in apps}
|
||||
inventory = {
|
||||
"all": {
|
||||
"hosts": [host],
|
||||
"children": {app: {} for app in apps},
|
||||
},
|
||||
**groups
|
||||
}
|
||||
return inventory
|
||||
|
||||
def build_hostvar_inventory(apps, host):
|
||||
"""
|
||||
Alternative: Build an inventory where all invokable apps are set as a host variable (as a list).
|
||||
"""
|
||||
return {
|
||||
"all": {
|
||||
"hosts": [host],
|
||||
},
|
||||
"_meta": {
|
||||
"hostvars": {
|
||||
host: {
|
||||
"invokable_applications": apps
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Build a dynamic Ansible inventory for a given host with all invokable applications.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--host',
|
||||
required=True,
|
||||
help='Hostname to assign to all invokable application groups'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-f', '--format',
|
||||
choices=['json', 'yaml'],
|
||||
default='yaml',
|
||||
help='Output format (yaml [default], json)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--inventory-style',
|
||||
choices=['group', 'hostvars'],
|
||||
default='group',
|
||||
help='Inventory style: group (default, one group per app) or hostvars (list as hostvar)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-c', '--categories-file',
|
||||
default=os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'roles', 'categories.yml')),
|
||||
help='Path to roles/categories.yml (default: roles/categories.yml at project root)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-r', '--roles-dir',
|
||||
default=os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'roles')),
|
||||
help='Path to roles/ directory (default: roles/ at project root)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-o', '--output',
|
||||
help='Write output to file instead of stdout'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-i', '--ignore',
|
||||
action='append',
|
||||
default=[],
|
||||
help='Application ID(s) to ignore (can be specified multiple times or comma-separated)'
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
apps = get_all_invokable_apps(
|
||||
categories_file=args.categories_file,
|
||||
roles_dir=args.roles_dir
|
||||
)
|
||||
except Exception as e:
|
||||
sys.stderr.write(f"Error: {e}\n")
|
||||
sys.exit(1)
|
||||
|
||||
# Combine all ignore arguments into a flat set
|
||||
ignore_ids = set()
|
||||
for entry in args.ignore:
|
||||
ignore_ids.update(i.strip() for i in entry.split(',') if i.strip())
|
||||
|
||||
if ignore_ids:
|
||||
apps = [app for app in apps if app not in ignore_ids]
|
||||
|
||||
# Build the requested inventory style
|
||||
if args.inventory_style == 'group':
|
||||
inventory = build_group_inventory(apps, args.host)
|
||||
else:
|
||||
inventory = build_hostvar_inventory(apps, args.host)
|
||||
|
||||
# Output in the chosen format
|
||||
if args.format == 'json':
|
||||
output = json.dumps(inventory, indent=2)
|
||||
else:
|
||||
output = yaml.safe_dump(inventory, default_flow_style=False)
|
||||
|
||||
if args.output:
|
||||
with open(args.output, 'w') as f:
|
||||
f.write(output)
|
||||
else:
|
||||
print(output)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@@ -102,8 +102,10 @@ def find_cycle(roles):
|
||||
def topological_sort(graph, in_degree, roles=None):
|
||||
"""
|
||||
Perform topological sort on the dependency graph.
|
||||
If `roles` is provided, on error it will include detailed debug info.
|
||||
If a cycle is detected, raise an Exception with detailed debug info.
|
||||
"""
|
||||
from collections import deque
|
||||
|
||||
queue = deque([r for r, d in in_degree.items() if d == 0])
|
||||
sorted_roles = []
|
||||
local_in = dict(in_degree)
|
||||
@@ -117,28 +119,26 @@ def topological_sort(graph, in_degree, roles=None):
|
||||
queue.append(nbr)
|
||||
|
||||
if len(sorted_roles) != len(in_degree):
|
||||
# Something went wrong: likely a cycle
|
||||
cycle = find_cycle(roles or {})
|
||||
if roles is not None:
|
||||
if cycle:
|
||||
header = f"Circular dependency detected: {' -> '.join(cycle)}"
|
||||
else:
|
||||
header = "Circular dependency detected among the roles!"
|
||||
unsorted = [r for r in in_degree if r not in sorted_roles]
|
||||
|
||||
unsorted = [r for r in in_degree if r not in sorted_roles]
|
||||
detail_lines = ["Unsorted roles and their dependencies:"]
|
||||
header = "❌ Dependency resolution failed"
|
||||
if cycle:
|
||||
reason = f"Circular dependency detected: {' -> '.join(cycle)}"
|
||||
else:
|
||||
reason = "Unresolved dependencies among roles (possible cycle or missing role)."
|
||||
|
||||
details = []
|
||||
if unsorted:
|
||||
details.append("Unsorted roles and their declared run_after dependencies:")
|
||||
for r in unsorted:
|
||||
deps = roles.get(r, {}).get('run_after', [])
|
||||
detail_lines.append(f" - {r} depends on {deps!r}")
|
||||
details.append(f" - {r} depends on {deps!r}")
|
||||
|
||||
detail_lines.append("Full dependency graph:")
|
||||
detail_lines.append(f" {dict(graph)!r}")
|
||||
graph_repr = f"Full dependency graph: {dict(graph)!r}"
|
||||
|
||||
raise Exception("\n".join([header] + detail_lines))
|
||||
else:
|
||||
if cycle:
|
||||
raise Exception(f"Circular dependency detected: {' -> '.join(cycle)}")
|
||||
else:
|
||||
raise Exception("Circular dependency detected among the roles!")
|
||||
raise Exception("\n".join([header, reason] + details + [graph_repr]))
|
||||
|
||||
return sorted_roles
|
||||
|
||||
|
@@ -5,10 +5,10 @@ import json
|
||||
from typing import Dict, Any
|
||||
|
||||
from cli.build.graph import build_mappings, output_graph
|
||||
from module_utils.role_dependency_resolver import RoleDependencyResolver
|
||||
|
||||
|
||||
def find_roles(roles_dir: str):
|
||||
"""Yield (role_name, role_path) for every subfolder in roles_dir."""
|
||||
for entry in os.listdir(roles_dir):
|
||||
path = os.path.join(roles_dir, entry)
|
||||
if os.path.isdir(path):
|
||||
@@ -16,40 +16,31 @@ def find_roles(roles_dir: str):
|
||||
|
||||
|
||||
def main():
|
||||
# default roles dir is ../../roles relative to this script
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
default_roles_dir = os.path.abspath(os.path.join(script_dir, '..', '..', 'roles'))
|
||||
default_roles_dir = os.path.abspath(os.path.join(script_dir, "..", "..", "roles"))
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate all graphs for each role and write meta/tree.json"
|
||||
)
|
||||
parser.add_argument(
|
||||
'-d', '--role_dir',
|
||||
default=default_roles_dir,
|
||||
help=f"Path to roles directory (default: {default_roles_dir})"
|
||||
)
|
||||
parser.add_argument(
|
||||
'-D', '--depth',
|
||||
type=int,
|
||||
default=0,
|
||||
help="Max recursion depth (>0) or <=0 to stop on cycle"
|
||||
)
|
||||
parser.add_argument(
|
||||
'-o', '--output',
|
||||
choices=['yaml', 'json', 'console'],
|
||||
default='json',
|
||||
help="Output format"
|
||||
)
|
||||
parser.add_argument(
|
||||
'-p', '--preview',
|
||||
action='store_true',
|
||||
help="Preview graphs to console instead of writing files"
|
||||
)
|
||||
parser.add_argument(
|
||||
'-v', '--verbose',
|
||||
action='store_true',
|
||||
help="Enable verbose logging"
|
||||
)
|
||||
parser.add_argument("-d", "--role_dir", default=default_roles_dir,
|
||||
help=f"Path to roles directory (default: {default_roles_dir})")
|
||||
parser.add_argument("-D", "--depth", type=int, default=0,
|
||||
help="Max recursion depth (>0) or <=0 to stop on cycle")
|
||||
parser.add_argument("-o", "--output", choices=["yaml", "json", "console"],
|
||||
default="json", help="Output format")
|
||||
parser.add_argument("-p", "--preview", action="store_true",
|
||||
help="Preview graphs to console instead of writing files")
|
||||
parser.add_argument("-s", "--shadow-folder", type=str, default=None,
|
||||
help="If set, writes tree.json to this shadow folder instead of the role's actual meta/ folder")
|
||||
parser.add_argument("-v", "--verbose", action="store_true", help="Enable verbose logging")
|
||||
|
||||
# Toggles
|
||||
parser.add_argument("--no-include-role", action="store_true", help="Do not scan include_role")
|
||||
parser.add_argument("--no-import-role", action="store_true", help="Do not scan import_role")
|
||||
parser.add_argument("--no-dependencies", action="store_true", help="Do not read meta/main.yml dependencies")
|
||||
parser.add_argument("--no-run-after", action="store_true",
|
||||
help="Do not read galaxy_info.run_after from meta/main.yml")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.verbose:
|
||||
@@ -57,6 +48,9 @@ def main():
|
||||
print(f"Max depth: {args.depth}")
|
||||
print(f"Output format: {args.output}")
|
||||
print(f"Preview mode: {args.preview}")
|
||||
print(f"Shadow folder: {args.shadow_folder}")
|
||||
|
||||
resolver = RoleDependencyResolver(args.role_dir)
|
||||
|
||||
for role_name, role_path in find_roles(args.role_dir):
|
||||
if args.verbose:
|
||||
@@ -68,18 +62,43 @@ def main():
|
||||
max_depth=args.depth
|
||||
)
|
||||
|
||||
# Direct deps (depth=1) – getrennt erfasst für buckets
|
||||
inc_roles, imp_roles = resolver._scan_tasks(role_path)
|
||||
meta_deps = resolver._extract_meta_dependencies(role_path)
|
||||
run_after = set()
|
||||
if not args.no_run_after:
|
||||
run_after = resolver._extract_meta_run_after(role_path)
|
||||
|
||||
if any([not args.no_include_role and inc_roles,
|
||||
not args.no_import_role and imp_roles,
|
||||
not args.no_dependencies and meta_deps,
|
||||
not args.no_run_after and run_after]):
|
||||
deps_root = graphs.setdefault("dependencies", {})
|
||||
if not args.no_include_role and inc_roles:
|
||||
deps_root["include_role"] = sorted(inc_roles)
|
||||
if not args.no_import_role and imp_roles:
|
||||
deps_root["import_role"] = sorted(imp_roles)
|
||||
if not args.no_dependencies and meta_deps:
|
||||
deps_root["dependencies"] = sorted(meta_deps)
|
||||
if not args.no_run_after and run_after:
|
||||
deps_root["run_after"] = sorted(run_after)
|
||||
graphs["dependencies"] = deps_root
|
||||
|
||||
if args.preview:
|
||||
for key, data in graphs.items():
|
||||
if args.verbose:
|
||||
print(f"Previewing graph '{key}' for role '{role_name}'")
|
||||
output_graph(data, 'console', role_name, key)
|
||||
output_graph(data, "console", role_name, key)
|
||||
else:
|
||||
tree_file = os.path.join(role_path, 'meta', 'tree.json')
|
||||
if args.shadow_folder:
|
||||
tree_file = os.path.join(args.shadow_folder, role_name, "meta", "tree.json")
|
||||
else:
|
||||
tree_file = os.path.join(role_path, "meta", "tree.json")
|
||||
os.makedirs(os.path.dirname(tree_file), exist_ok=True)
|
||||
with open(tree_file, 'w') as f:
|
||||
with open(tree_file, "w", encoding="utf-8") as f:
|
||||
json.dump(graphs, f, indent=2)
|
||||
print(f"Wrote {tree_file}")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@@ -1,14 +1,29 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Selectively add & vault NEW credentials in your inventory, preserving comments
|
||||
and formatting. Existing values are left untouched unless --force is used.
|
||||
|
||||
Usage example:
|
||||
infinito create credentials \
|
||||
--role-path roles/web-app-akaunting \
|
||||
--inventory-file host_vars/echoserver.yml \
|
||||
--vault-password-file .pass/echoserver.txt \
|
||||
--set credentials.database_password=mysecret
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
import yaml
|
||||
from typing import Dict, Any
|
||||
from utils.manager.inventory import InventoryManager
|
||||
from utils.handler.vault import VaultHandler, VaultScalar
|
||||
from utils.handler.yaml import YamlHandler
|
||||
from yaml.dumper import SafeDumper
|
||||
from typing import Dict, Any, Union
|
||||
|
||||
from ruamel.yaml import YAML
|
||||
from ruamel.yaml.comments import CommentedMap
|
||||
|
||||
from module_utils.manager.inventory import InventoryManager
|
||||
from module_utils.handler.vault import VaultHandler # uses your existing handler
|
||||
|
||||
|
||||
# ---------- helpers ----------
|
||||
|
||||
def ask_for_confirmation(key: str) -> bool:
|
||||
"""Prompt the user for confirmation to overwrite an existing value."""
|
||||
@@ -18,35 +33,117 @@ def ask_for_confirmation(key: str) -> bool:
|
||||
return confirmation == 'y'
|
||||
|
||||
|
||||
def main():
|
||||
def ensure_map(node: CommentedMap, key: str) -> CommentedMap:
|
||||
"""
|
||||
Ensure node[key] exists and is a mapping (CommentedMap) for round-trip safety.
|
||||
"""
|
||||
if key not in node or not isinstance(node.get(key), CommentedMap):
|
||||
node[key] = CommentedMap()
|
||||
return node[key]
|
||||
|
||||
|
||||
def _is_ruamel_vault(val: Any) -> bool:
|
||||
"""Detect if a ruamel scalar already carries the !vault tag."""
|
||||
try:
|
||||
return getattr(val, 'tag', None) == '!vault'
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _is_vault_encrypted(val: Any) -> bool:
|
||||
"""
|
||||
Detect if value is already a vault string or a ruamel !vault scalar.
|
||||
Accept both '$ANSIBLE_VAULT' and '!vault' markers.
|
||||
"""
|
||||
if _is_ruamel_vault(val):
|
||||
return True
|
||||
if isinstance(val, str) and ("$ANSIBLE_VAULT" in val or "!vault" in val):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _vault_body(text: str) -> str:
|
||||
"""
|
||||
Return only the vault body starting from the first line that contains
|
||||
'$ANSIBLE_VAULT'. If not found, return the original text.
|
||||
Also strips any leading '!vault |' header if present.
|
||||
"""
|
||||
lines = text.splitlines()
|
||||
for i, ln in enumerate(lines):
|
||||
if "$ANSIBLE_VAULT" in ln:
|
||||
return "\n".join(lines[i:])
|
||||
return text
|
||||
|
||||
|
||||
def _make_vault_scalar_from_text(text: str) -> Any:
|
||||
"""
|
||||
Build a ruamel object representing a literal block scalar tagged with !vault
|
||||
by parsing a tiny YAML snippet. This avoids depending on yaml_set_tag().
|
||||
"""
|
||||
body = _vault_body(text)
|
||||
indented = " " + body.replace("\n", "\n ") # proper block scalar indentation
|
||||
snippet = f"v: !vault |\n{indented}\n"
|
||||
y = YAML(typ="rt")
|
||||
return y.load(snippet)["v"]
|
||||
|
||||
|
||||
def to_vault_block(vault_handler: VaultHandler, value: Union[str, Any], label: str) -> Any:
|
||||
"""
|
||||
Return a ruamel scalar tagged as !vault. If the input value is already
|
||||
vault-encrypted (string contains $ANSIBLE_VAULT or is a !vault scalar), reuse/wrap.
|
||||
Otherwise, encrypt plaintext via ansible-vault.
|
||||
"""
|
||||
# Already a ruamel !vault scalar → reuse
|
||||
if _is_ruamel_vault(value):
|
||||
return value
|
||||
|
||||
# Already an encrypted string (may include '!vault |' or just the header)
|
||||
if isinstance(value, str) and ("$ANSIBLE_VAULT" in value or "!vault" in value):
|
||||
return _make_vault_scalar_from_text(value)
|
||||
|
||||
# Plaintext → encrypt now
|
||||
snippet = vault_handler.encrypt_string(str(value), label)
|
||||
return _make_vault_scalar_from_text(snippet)
|
||||
|
||||
|
||||
def parse_overrides(pairs: list[str]) -> Dict[str, str]:
|
||||
"""
|
||||
Parse --set key=value pairs into a dict.
|
||||
Supports both 'credentials.key=val' and 'key=val' (short) forms.
|
||||
"""
|
||||
out: Dict[str, str] = {}
|
||||
for pair in pairs:
|
||||
k, v = pair.split("=", 1)
|
||||
out[k.strip()] = v.strip()
|
||||
return out
|
||||
|
||||
|
||||
# ---------- main ----------
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Selectively vault credentials + become-password in your inventory."
|
||||
description="Selectively add & vault NEW credentials in your inventory, preserving comments/formatting."
|
||||
)
|
||||
parser.add_argument("--role-path", required=True, help="Path to your role")
|
||||
parser.add_argument("--inventory-file", required=True, help="Host vars file to update")
|
||||
parser.add_argument("--vault-password-file", required=True, help="Vault password file")
|
||||
parser.add_argument(
|
||||
"--role-path", required=True, help="Path to your role"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--inventory-file", required=True, help="Host vars file to update"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--vault-password-file", required=True, help="Vault password file"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--set", nargs="*", default=[], help="Override values key.subkey=VALUE"
|
||||
"--set", nargs="*", default=[],
|
||||
help="Override values key[.subkey]=VALUE (applied to NEW keys; with --force also to existing)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-f", "--force", action="store_true",
|
||||
help="Force overwrite without confirmation"
|
||||
help="Allow overrides to replace existing values (will ask per key unless combined with --yes)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-y", "--yes", action="store_true",
|
||||
help="Non-interactive: assume 'yes' for all overwrite confirmations when --force is used"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Parse overrides
|
||||
overrides = {
|
||||
k.strip(): v.strip()
|
||||
for pair in args.set for k, v in [pair.split("=", 1)]
|
||||
}
|
||||
overrides = parse_overrides(args.set)
|
||||
|
||||
# Initialize inventory manager
|
||||
# Initialize inventory manager (provides schema + app_id + vault)
|
||||
manager = InventoryManager(
|
||||
role_path=Path(args.role_path),
|
||||
inventory_path=Path(args.inventory_file),
|
||||
@@ -54,62 +151,90 @@ def main():
|
||||
overrides=overrides
|
||||
)
|
||||
|
||||
# Load existing credentials to preserve
|
||||
existing_apps = manager.inventory.get("applications", {})
|
||||
existing_creds = {}
|
||||
if manager.app_id in existing_apps:
|
||||
existing_creds = existing_apps[manager.app_id].get("credentials", {}).copy()
|
||||
# 1) Load existing inventory with ruamel (round-trip)
|
||||
yaml_rt = YAML(typ="rt")
|
||||
yaml_rt.preserve_quotes = True
|
||||
|
||||
# Apply schema (may generate defaults)
|
||||
updated_inventory = manager.apply_schema()
|
||||
with open(args.inventory_file, "r", encoding="utf-8") as f:
|
||||
data = yaml_rt.load(f) # CommentedMap or None
|
||||
if data is None:
|
||||
data = CommentedMap()
|
||||
|
||||
# Restore existing database_password if present
|
||||
apps = updated_inventory.setdefault("applications", {})
|
||||
app_block = apps.setdefault(manager.app_id, {})
|
||||
creds = app_block.setdefault("credentials", {})
|
||||
if "database_password" in existing_creds:
|
||||
creds["database_password"] = existing_creds["database_password"]
|
||||
# 2) Get schema-applied structure (defaults etc.) for *non-destructive* merge
|
||||
schema_inventory: Dict[str, Any] = manager.apply_schema()
|
||||
|
||||
# Store original plaintext values
|
||||
original_plain = {key: str(val) for key, val in creds.items()}
|
||||
# 3) Ensure structural path exists
|
||||
apps = ensure_map(data, "applications")
|
||||
app_block = ensure_map(apps, manager.app_id)
|
||||
creds = ensure_map(app_block, "credentials")
|
||||
|
||||
for key, raw_val in list(creds.items()):
|
||||
# Skip if already vaulted
|
||||
if isinstance(raw_val, VaultScalar) or str(raw_val).lstrip().startswith("$ANSIBLE_VAULT"):
|
||||
# 4) Determine defaults we could add
|
||||
schema_apps = schema_inventory.get("applications", {})
|
||||
schema_app_block = schema_apps.get(manager.app_id, {})
|
||||
schema_creds = schema_app_block.get("credentials", {}) if isinstance(schema_app_block, dict) else {}
|
||||
|
||||
# 5) Add ONLY missing credential keys
|
||||
newly_added_keys = set()
|
||||
for key, default_val in schema_creds.items():
|
||||
if key in creds:
|
||||
# existing → do not touch (preserve plaintext/vault/formatting/comments)
|
||||
continue
|
||||
|
||||
# Determine plaintext
|
||||
plain = original_plain.get(key, "")
|
||||
if key in overrides and (args.force or ask_for_confirmation(key)):
|
||||
plain = overrides[key]
|
||||
# Value to use for the new key
|
||||
# Priority: --set exact key → default from schema → empty string
|
||||
ov = overrides.get(f"credentials.{key}", None)
|
||||
if ov is None:
|
||||
ov = overrides.get(key, None)
|
||||
|
||||
# Encrypt the plaintext
|
||||
encrypted = manager.vault_handler.encrypt_string(plain, key)
|
||||
lines = encrypted.splitlines()
|
||||
indent = len(lines[1]) - len(lines[1].lstrip())
|
||||
body = "\n".join(line[indent:] for line in lines[1:])
|
||||
creds[key] = VaultScalar(body)
|
||||
|
||||
# Vault top-level become password if present
|
||||
if "ansible_become_password" in updated_inventory:
|
||||
val = str(updated_inventory["ansible_become_password"])
|
||||
if val.lstrip().startswith("$ANSIBLE_VAULT"):
|
||||
updated_inventory["ansible_become_password"] = VaultScalar(val)
|
||||
if ov is not None:
|
||||
value_for_new_key: Union[str, Any] = ov
|
||||
else:
|
||||
snippet = manager.vault_handler.encrypt_string(
|
||||
val, "ansible_become_password"
|
||||
if _is_vault_encrypted(default_val):
|
||||
# Schema already provides a vault value → take it as-is
|
||||
creds[key] = to_vault_block(manager.vault_handler, default_val, key)
|
||||
newly_added_keys.add(key)
|
||||
continue
|
||||
value_for_new_key = "" if default_val is None else str(default_val)
|
||||
|
||||
# Insert as !vault literal (encrypt if needed)
|
||||
creds[key] = to_vault_block(manager.vault_handler, value_for_new_key, key)
|
||||
newly_added_keys.add(key)
|
||||
|
||||
# 6) ansible_become_password: only add if missing;
|
||||
# never rewrite an existing one unless --force (+ confirm/--yes) and override provided.
|
||||
if "ansible_become_password" not in data:
|
||||
val = overrides.get("ansible_become_password", None)
|
||||
if val is not None:
|
||||
data["ansible_become_password"] = to_vault_block(
|
||||
manager.vault_handler, val, "ansible_become_password"
|
||||
)
|
||||
lines = snippet.splitlines()
|
||||
indent = len(lines[1]) - len(lines[1].lstrip())
|
||||
body = "\n".join(line[indent:] for line in lines[1:])
|
||||
updated_inventory["ansible_become_password"] = VaultScalar(body)
|
||||
else:
|
||||
if args.force and "ansible_become_password" in overrides:
|
||||
do_overwrite = args.yes or ask_for_confirmation("ansible_become_password")
|
||||
if do_overwrite:
|
||||
data["ansible_become_password"] = to_vault_block(
|
||||
manager.vault_handler, overrides["ansible_become_password"], "ansible_become_password"
|
||||
)
|
||||
|
||||
# Write back to file
|
||||
# 7) Overrides for existing credential keys (only with --force)
|
||||
if args.force:
|
||||
for ov_key, ov_val in overrides.items():
|
||||
# Accept both 'credentials.key' and bare 'key'
|
||||
key = ov_key.split(".", 1)[1] if ov_key.startswith("credentials.") else ov_key
|
||||
if key in creds:
|
||||
# If we just added it in this run, don't ask again or rewrap
|
||||
if key in newly_added_keys:
|
||||
continue
|
||||
if args.yes or ask_for_confirmation(key):
|
||||
creds[key] = to_vault_block(manager.vault_handler, ov_val, key)
|
||||
|
||||
# 8) Write back with ruamel (preserve formatting & comments)
|
||||
with open(args.inventory_file, "w", encoding="utf-8") as f:
|
||||
yaml.dump(updated_inventory, f, sort_keys=False, Dumper=SafeDumper)
|
||||
yaml_rt.dump(data, f)
|
||||
|
||||
print(f"✅ Inventory selectively vaulted → {args.inventory_file}")
|
||||
print(f"✅ Added new credentials without touching existing formatting/comments → {args.inventory_file}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
sys.exit(main())
|
||||
|
@@ -1,16 +1,18 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import ipaddress
|
||||
import difflib
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
import sys, os
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
from module_utils.entity_name_utils import get_entity_name
|
||||
|
||||
# Paths to the group-vars files
|
||||
PORTS_FILE = './group_vars/all/09_ports.yml'
|
||||
NETWORKS_FILE = './group_vars/all/10_networks.yml'
|
||||
PORTS_FILE = './group_vars/all/10_ports.yml'
|
||||
NETWORKS_FILE = './group_vars/all/09_networks.yml'
|
||||
ROLE_TEMPLATE_DIR = './templates/roles/web-app'
|
||||
ROLES_DIR = './roles'
|
||||
|
||||
@@ -65,6 +67,7 @@ def prompt_conflict(dst_file):
|
||||
def render_templates(src_dir, dst_dir, context):
|
||||
env = Environment(loader=FileSystemLoader(src_dir), keep_trailing_newline=True, autoescape=False)
|
||||
env.filters['bool'] = lambda x: bool(x)
|
||||
env.filters['get_entity_name'] = get_entity_name
|
||||
|
||||
for root, _, files in os.walk(src_dir):
|
||||
rel = os.path.relpath(root, src_dir)
|
||||
|
@@ -6,7 +6,6 @@ import os
|
||||
import datetime
|
||||
import sys
|
||||
|
||||
|
||||
def run_ansible_playbook(
|
||||
inventory,
|
||||
modes,
|
||||
@@ -15,14 +14,27 @@ def run_ansible_playbook(
|
||||
password_file=None,
|
||||
verbose=0,
|
||||
skip_tests=False,
|
||||
skip_validation=False
|
||||
skip_validation=False,
|
||||
skip_build=False,
|
||||
cleanup=False,
|
||||
logs=False
|
||||
):
|
||||
start_time = datetime.datetime.now()
|
||||
print(f"\n▶️ Script started at: {start_time.isoformat()}\n")
|
||||
|
||||
print("\n🛠️ Building project (make build)...\n")
|
||||
subprocess.run(["make", "build"], check=True)
|
||||
|
||||
|
||||
if cleanup:
|
||||
cleanup_command = ["make", "clean-keep-logs"] if logs else ["make", "clean"]
|
||||
print("\n🧹 Cleaning up project (" + " ".join(cleanup_command) +")...\n")
|
||||
subprocess.run(cleanup_command, check=True)
|
||||
else:
|
||||
print("\n⚠️ Skipping build as requested.\n")
|
||||
|
||||
if not skip_build:
|
||||
print("\n🛠️ Building project (make messy-build)...\n")
|
||||
subprocess.run(["make", "messy-build"], check=True)
|
||||
else:
|
||||
print("\n⚠️ Skipping build as requested.\n")
|
||||
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
playbook = os.path.join(os.path.dirname(script_dir), "playbook.yml")
|
||||
|
||||
@@ -47,8 +59,8 @@ def run_ansible_playbook(
|
||||
print("\n⚠️ Skipping inventory validation as requested.\n")
|
||||
|
||||
if not skip_tests:
|
||||
print("\n🧪 Running tests (make test)...\n")
|
||||
subprocess.run(["make", "test"], check=True)
|
||||
print("\n🧪 Running tests (make messy-test)...\n")
|
||||
subprocess.run(["make", "messy-test"], check=True)
|
||||
|
||||
# Build ansible-playbook command
|
||||
cmd = ["ansible-playbook", "-i", inventory, playbook]
|
||||
@@ -81,6 +93,24 @@ def run_ansible_playbook(
|
||||
duration = end_time - start_time
|
||||
print(f"⏱️ Total execution time: {duration}\n")
|
||||
|
||||
def validate_application_ids(inventory, app_ids):
|
||||
"""
|
||||
Abort the script if any application IDs are invalid, with detailed reasons.
|
||||
"""
|
||||
from module_utils.valid_deploy_id import ValidDeployId
|
||||
validator = ValidDeployId()
|
||||
invalid = validator.validate(inventory, app_ids)
|
||||
if invalid:
|
||||
print("\n❌ Detected invalid application_id(s):\n")
|
||||
for app_id, status in invalid.items():
|
||||
reasons = []
|
||||
if not status['in_roles']:
|
||||
reasons.append("not defined in roles (infinito)")
|
||||
if not status['in_inventory']:
|
||||
reasons.append("not found in inventory file")
|
||||
print(f" - {app_id}: " + ", ".join(reasons))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
@@ -103,7 +133,7 @@ def main():
|
||||
)
|
||||
parser.add_argument(
|
||||
"-r", "--reset", action="store_true",
|
||||
help="Reset all CyMaIS files and configurations, and run the entire playbook (not just individual roles)."
|
||||
help="Reset all Infinito.Nexus files and configurations, and run the entire playbook (not just individual roles)."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-t", "--test", action="store_true",
|
||||
@@ -119,7 +149,7 @@ def main():
|
||||
)
|
||||
parser.add_argument(
|
||||
"-c", "--cleanup", action="store_true",
|
||||
help="Clean up unused files and outdated configurations after all tasks are complete."
|
||||
help="Clean up unused files and outdated configurations after all tasks are complete. Also cleans up the repository before the deployment procedure."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-d", "--debug", action="store_true",
|
||||
@@ -137,6 +167,10 @@ def main():
|
||||
"-V", "--skip-validation", action="store_true",
|
||||
help="Skip inventory validation before deployment."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-B", "--skip-build", action="store_true",
|
||||
help="Skip running 'make build' before deployment."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-i", "--id",
|
||||
nargs="+",
|
||||
@@ -148,16 +182,23 @@ def main():
|
||||
"-v", "--verbose", action="count", default=0,
|
||||
help="Increase verbosity level. Multiple -v flags increase detail (e.g., -vvv for maximum log output)."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--logs", action="store_true",
|
||||
help="Keep the CLI logs during cleanup command"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
validate_application_ids(args.inventory, args.id)
|
||||
|
||||
modes = {
|
||||
"mode_reset": args.reset,
|
||||
"mode_test": args.test,
|
||||
"mode_update": args.update,
|
||||
"mode_backup": args.backup,
|
||||
"mode_cleanup": args.cleanup,
|
||||
"enable_debug": args.debug,
|
||||
"MODE_RESET": args.reset,
|
||||
"MODE_TEST": args.test,
|
||||
"MODE_UPDATE": args.update,
|
||||
"MODE_BACKUP": args.backup,
|
||||
"MODE_CLEANUP": args.cleanup,
|
||||
"MODE_LOGS": args.logs,
|
||||
"MODE_DEBUG": args.debug,
|
||||
"MODE_ASSERT": not args.skip_validation,
|
||||
"host_type": args.host_type
|
||||
}
|
||||
|
||||
@@ -169,7 +210,10 @@ def main():
|
||||
password_file=args.password_file,
|
||||
verbose=args.verbose,
|
||||
skip_tests=args.skip_tests,
|
||||
skip_validation=args.skip_validation
|
||||
skip_validation=args.skip_validation,
|
||||
skip_build=args.skip_build,
|
||||
cleanup=args.cleanup,
|
||||
logs=args.logs
|
||||
)
|
||||
|
||||
|
||||
|
@@ -4,8 +4,8 @@ import sys
|
||||
from pathlib import Path
|
||||
import yaml
|
||||
from typing import Dict, Any
|
||||
from utils.handler.vault import VaultHandler, VaultScalar
|
||||
from utils.handler.yaml import YamlHandler
|
||||
from module_utils.handler.vault import VaultHandler, VaultScalar
|
||||
from module_utils.handler.yaml import YamlHandler
|
||||
from yaml.dumper import SafeDumper
|
||||
|
||||
def ask_for_confirmation(key: str) -> bool:
|
||||
|
480
cli/fix/move_unnecessary_dependencies.py
Normal file
480
cli/fix/move_unnecessary_dependencies.py
Normal file
@@ -0,0 +1,480 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Move unnecessary meta dependencies to guarded include_role/import_role
|
||||
for better performance, while preserving YAML comments, quotes, and layout.
|
||||
|
||||
Heuristic (matches tests/integration/test_unnecessary_role_dependencies.py):
|
||||
- A dependency is considered UNNECESSARY if:
|
||||
* The consumer does NOT use provider variables in defaults/vars/handlers
|
||||
(no early-var need), AND
|
||||
* In tasks, any usage of provider vars or provider-handler notifications
|
||||
occurs only AFTER an include/import of the provider in the same file,
|
||||
OR there is no usage at all.
|
||||
|
||||
Action:
|
||||
- Remove such dependencies from roles/<role>/meta/main.yml.
|
||||
- Prepend a guarded include block to roles/<role>/tasks/01_core.yml (preferred)
|
||||
or roles/<role>/tasks/main.yml if 01_core.yml is absent.
|
||||
- If multiple dependencies are moved for a role, use a loop over include_role.
|
||||
|
||||
Notes:
|
||||
- Creates .bak backups for modified YAML files.
|
||||
- Requires ruamel.yaml to preserve comments/quotes everywhere.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import glob
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
from typing import Dict, Set, List, Tuple, Optional
|
||||
|
||||
# --- Require ruamel.yaml for full round-trip preservation ---
|
||||
try:
|
||||
from ruamel.yaml import YAML
|
||||
from ruamel.yaml.comments import CommentedMap, CommentedSeq
|
||||
from ruamel.yaml.scalarstring import SingleQuotedScalarString
|
||||
_HAVE_RUAMEL = True
|
||||
except Exception:
|
||||
_HAVE_RUAMEL = False
|
||||
|
||||
if not _HAVE_RUAMEL:
|
||||
print("[ERR] ruamel.yaml is required to preserve comments/quotes. Install with: pip install ruamel.yaml", file=sys.stderr)
|
||||
sys.exit(3)
|
||||
|
||||
yaml_rt = YAML()
|
||||
yaml_rt.preserve_quotes = True
|
||||
yaml_rt.width = 10**9 # prevent line wrapping
|
||||
|
||||
# ---------------- Utilities ----------------
|
||||
|
||||
def _backup(path: str):
|
||||
if os.path.exists(path):
|
||||
shutil.copy2(path, path + ".bak")
|
||||
|
||||
def read_text(path: str) -> str:
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
return f.read()
|
||||
except Exception:
|
||||
return ""
|
||||
|
||||
def load_yaml_rt(path: str):
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
data = yaml_rt.load(f)
|
||||
return data if data is not None else CommentedMap()
|
||||
except FileNotFoundError:
|
||||
return CommentedMap()
|
||||
except Exception as e:
|
||||
print(f"[WARN] Failed to parse YAML: {path}: {e}", file=sys.stderr)
|
||||
return CommentedMap()
|
||||
|
||||
def dump_yaml_rt(data, path: str):
|
||||
_backup(path)
|
||||
with open(path, "w", encoding="utf-8") as f:
|
||||
yaml_rt.dump(data, f)
|
||||
|
||||
def roles_root(project_root: str) -> str:
|
||||
return os.path.join(project_root, "roles")
|
||||
|
||||
def iter_role_dirs(project_root: str) -> List[str]:
|
||||
root = roles_root(project_root)
|
||||
return [d for d in glob.glob(os.path.join(root, "*")) if os.path.isdir(d)]
|
||||
|
||||
def role_name_from_dir(role_dir: str) -> str:
|
||||
return os.path.basename(role_dir.rstrip(os.sep))
|
||||
|
||||
def path_if_exists(*parts) -> Optional[str]:
|
||||
p = os.path.join(*parts)
|
||||
return p if os.path.exists(p) else None
|
||||
|
||||
def gather_yaml_files(base: str, patterns: List[str]) -> List[str]:
|
||||
files: List[str] = []
|
||||
for pat in patterns:
|
||||
files.extend(glob.glob(os.path.join(base, pat), recursive=True))
|
||||
return [f for f in files if os.path.isfile(f)]
|
||||
|
||||
def sq(v: str):
|
||||
"""Return a single-quoted scalar (ruamel) for consistent quoting."""
|
||||
return SingleQuotedScalarString(v)
|
||||
|
||||
# ---------------- Providers: vars & handlers ----------------
|
||||
|
||||
def flatten_keys(data) -> Set[str]:
|
||||
out: Set[str] = set()
|
||||
if isinstance(data, dict):
|
||||
for k, v in data.items():
|
||||
if isinstance(k, str):
|
||||
out.add(k)
|
||||
out |= flatten_keys(v)
|
||||
elif isinstance(data, list):
|
||||
for item in data:
|
||||
out |= flatten_keys(item)
|
||||
return out
|
||||
|
||||
def collect_role_defined_vars(role_dir: str) -> Set[str]:
|
||||
"""Vars a role 'provides': defaults/vars keys + set_fact keys in tasks."""
|
||||
provided: Set[str] = set()
|
||||
|
||||
for rel in ("defaults/main.yml", "vars/main.yml"):
|
||||
p = path_if_exists(role_dir, rel)
|
||||
if p:
|
||||
data = load_yaml_rt(p)
|
||||
provided |= flatten_keys(data)
|
||||
|
||||
# set_fact keys
|
||||
task_files = gather_yaml_files(os.path.join(role_dir, "tasks"), ["**/*.yml", "*.yml"])
|
||||
for tf in task_files:
|
||||
data = load_yaml_rt(tf)
|
||||
if isinstance(data, list):
|
||||
for task in data:
|
||||
if isinstance(task, dict) and "set_fact" in task and isinstance(task["set_fact"], dict):
|
||||
provided |= set(task["set_fact"].keys())
|
||||
|
||||
noisy = {"when", "name", "vars", "tags", "register"}
|
||||
return {v for v in provided if isinstance(v, str) and v and v not in noisy}
|
||||
|
||||
def collect_role_handler_names(role_dir: str) -> Set[str]:
|
||||
"""Handler names defined by a role (for notify detection)."""
|
||||
handler_file = path_if_exists(role_dir, "handlers/main.yml")
|
||||
if not handler_file:
|
||||
return set()
|
||||
data = load_yaml_rt(handler_file)
|
||||
names: Set[str] = set()
|
||||
if isinstance(data, list):
|
||||
for task in data:
|
||||
if isinstance(task, dict):
|
||||
nm = task.get("name")
|
||||
if isinstance(nm, str) and nm.strip():
|
||||
names.add(nm.strip())
|
||||
return names
|
||||
|
||||
# ---------------- Consumers: usage scanning ----------------
|
||||
|
||||
def find_var_positions(text: str, varname: str) -> List[int]:
|
||||
"""Return byte offsets for occurrences of varname (word-ish boundary)."""
|
||||
positions: List[int] = []
|
||||
if not varname:
|
||||
return positions
|
||||
pattern = re.compile(rf"(?<!\w){re.escape(varname)}(?!\w)")
|
||||
for m in pattern.finditer(text):
|
||||
positions.append(m.start())
|
||||
return positions
|
||||
|
||||
def first_var_use_offset_in_text(text: str, provided_vars: Set[str]) -> Optional[int]:
|
||||
first: Optional[int] = None
|
||||
for v in provided_vars:
|
||||
for off in find_var_positions(text, v):
|
||||
if first is None or off < first:
|
||||
first = off
|
||||
return first
|
||||
|
||||
def first_include_offset_for_role(text: str, producer_role: str) -> Optional[int]:
|
||||
"""
|
||||
Find earliest include/import of a given role in this YAML text.
|
||||
Handles compact dict and block styles.
|
||||
"""
|
||||
pattern = re.compile(
|
||||
r"(include_role|import_role)\s*:\s*\{[^}]*\bname\s*:\s*['\"]?"
|
||||
+ re.escape(producer_role) + r"['\"]?[^}]*\}"
|
||||
r"|"
|
||||
r"(include_role|import_role)\s*:\s*\n(?:\s+[a-z_]+\s*:\s*.*\n)*\s*name\s*:\s*['\"]?"
|
||||
+ re.escape(producer_role) + r"['\"]?",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
m = pattern.search(text)
|
||||
return m.start() if m else None
|
||||
|
||||
def find_notify_offsets_for_handlers(text: str, handler_names: Set[str]) -> List[int]:
|
||||
"""
|
||||
Heuristic: for each handler name, find occurrences where 'notify' appears within
|
||||
the preceding ~200 chars. Works for single string or list-style notify blocks.
|
||||
"""
|
||||
if not handler_names:
|
||||
return []
|
||||
offsets: List[int] = []
|
||||
for h in handler_names:
|
||||
for m in re.finditer(re.escape(h), text):
|
||||
start = m.start()
|
||||
back = max(0, start - 200)
|
||||
context = text[back:start]
|
||||
if re.search(r"notify\s*:", context):
|
||||
offsets.append(start)
|
||||
return sorted(offsets)
|
||||
|
||||
def parse_meta_dependencies(role_dir: str) -> List[str]:
|
||||
meta = path_if_exists(role_dir, "meta/main.yml")
|
||||
if not meta:
|
||||
return []
|
||||
data = load_yaml_rt(meta)
|
||||
dd = data.get("dependencies")
|
||||
deps: List[str] = []
|
||||
if isinstance(dd, list):
|
||||
for item in dd:
|
||||
if isinstance(item, str):
|
||||
deps.append(item)
|
||||
elif isinstance(item, dict) and "role" in item:
|
||||
deps.append(str(item["role"]))
|
||||
elif isinstance(item, dict) and "name" in item:
|
||||
deps.append(str(item["name"]))
|
||||
return deps
|
||||
|
||||
# ---------------- Fix application ----------------
|
||||
|
||||
def sanitize_run_once_var(role_name: str) -> str:
|
||||
"""
|
||||
Generate run_once variable name from role name.
|
||||
Example: 'sys-front-inj-logout' -> 'run_once_sys_front_inj_logout'
|
||||
"""
|
||||
return "run_once_" + role_name.replace("-", "_")
|
||||
|
||||
def build_include_block_yaml(consumer_role: str, moved_deps: List[str]) -> List[dict]:
|
||||
"""
|
||||
Build a guarded block that includes one or many roles.
|
||||
This block will be prepended to tasks/01_core.yml or tasks/main.yml.
|
||||
"""
|
||||
guard_var = sanitize_run_once_var(consumer_role)
|
||||
|
||||
if len(moved_deps) == 1:
|
||||
inner_tasks = [
|
||||
{
|
||||
"name": f"Include dependency '{moved_deps[0]}'",
|
||||
"include_role": {"name": moved_deps[0]},
|
||||
}
|
||||
]
|
||||
else:
|
||||
inner_tasks = [
|
||||
{
|
||||
"name": "Include dependencies",
|
||||
"include_role": {"name": "{{ item }}"},
|
||||
"loop": moved_deps,
|
||||
}
|
||||
]
|
||||
|
||||
# Always set the run_once fact at the end
|
||||
inner_tasks.append({"set_fact": {guard_var: True}})
|
||||
|
||||
# Correct Ansible block structure
|
||||
block_task = {
|
||||
"name": "Load former meta dependencies once",
|
||||
"block": inner_tasks,
|
||||
"when": f"{guard_var} is not defined",
|
||||
}
|
||||
|
||||
return [block_task]
|
||||
|
||||
def prepend_tasks(tasks_path: str, new_tasks, dry_run: bool):
|
||||
"""
|
||||
Prepend new_tasks (CommentedSeq) to an existing tasks YAML list while preserving comments.
|
||||
If the file does not exist, create it with new_tasks.
|
||||
"""
|
||||
if os.path.exists(tasks_path):
|
||||
existing = load_yaml_rt(tasks_path)
|
||||
if isinstance(existing, list):
|
||||
combined = CommentedSeq()
|
||||
for item in new_tasks:
|
||||
combined.append(item)
|
||||
for item in existing:
|
||||
combined.append(item)
|
||||
elif isinstance(existing, dict):
|
||||
# Rare case: tasks file with a single mapping; coerce to list
|
||||
combined = CommentedSeq()
|
||||
for item in new_tasks:
|
||||
combined.append(item)
|
||||
combined.append(existing)
|
||||
else:
|
||||
combined = new_tasks
|
||||
else:
|
||||
os.makedirs(os.path.dirname(tasks_path), exist_ok=True)
|
||||
combined = new_tasks
|
||||
|
||||
if dry_run:
|
||||
print(f"[DRY-RUN] Would write {tasks_path} with {len(new_tasks)} prepended task(s).")
|
||||
return
|
||||
|
||||
dump_yaml_rt(combined, tasks_path)
|
||||
print(f"[OK] Updated {tasks_path} (prepended {len(new_tasks)} task(s)).")
|
||||
|
||||
def update_meta_remove_deps(meta_path: str, remove: List[str], dry_run: bool):
|
||||
"""
|
||||
Remove entries from meta.dependencies while leaving the rest of the file intact.
|
||||
Quotes, comments, key order, and line breaks are preserved.
|
||||
Returns True if a change would be made (or was made when not in dry-run).
|
||||
"""
|
||||
if not os.path.exists(meta_path):
|
||||
return False
|
||||
|
||||
doc = load_yaml_rt(meta_path)
|
||||
deps = doc.get("dependencies")
|
||||
if not isinstance(deps, list):
|
||||
return False
|
||||
|
||||
def dep_name(item):
|
||||
if isinstance(item, dict):
|
||||
return item.get("role") or item.get("name")
|
||||
return item
|
||||
|
||||
keep = CommentedSeq()
|
||||
removed = []
|
||||
for item in deps:
|
||||
name = dep_name(item)
|
||||
if name in remove:
|
||||
removed.append(name)
|
||||
else:
|
||||
keep.append(item)
|
||||
|
||||
if not removed:
|
||||
return False
|
||||
|
||||
if keep:
|
||||
doc["dependencies"] = keep
|
||||
else:
|
||||
if "dependencies" in doc:
|
||||
del doc["dependencies"]
|
||||
|
||||
if dry_run:
|
||||
print(f"[DRY-RUN] Would rewrite {meta_path}; removed: {', '.join(removed)}")
|
||||
return True
|
||||
|
||||
dump_yaml_rt(doc, meta_path)
|
||||
print(f"[OK] Rewrote {meta_path}; removed: {', '.join(removed)}")
|
||||
return True
|
||||
|
||||
def dependency_is_unnecessary(consumer_dir: str,
|
||||
consumer_name: str,
|
||||
producer_name: str,
|
||||
provider_vars: Set[str],
|
||||
provider_handlers: Set[str]) -> bool:
|
||||
"""Apply heuristic to decide if we can move this dependency."""
|
||||
# 1) Early usage in defaults/vars/handlers? If yes -> necessary
|
||||
defaults_files = [p for p in [
|
||||
path_if_exists(consumer_dir, "defaults/main.yml"),
|
||||
path_if_exists(consumer_dir, "vars/main.yml"),
|
||||
path_if_exists(consumer_dir, "handlers/main.yml"),
|
||||
] if p]
|
||||
for p in defaults_files:
|
||||
text = read_text(p)
|
||||
if first_var_use_offset_in_text(text, provider_vars) is not None:
|
||||
return False # needs meta dep
|
||||
|
||||
# 2) Tasks: any usage before include/import? If yes -> keep meta dep
|
||||
task_files = gather_yaml_files(os.path.join(consumer_dir, "tasks"), ["**/*.yml", "*.yml"])
|
||||
for p in task_files:
|
||||
text = read_text(p)
|
||||
if not text:
|
||||
continue
|
||||
include_off = first_include_offset_for_role(text, producer_name)
|
||||
var_use_off = first_var_use_offset_in_text(text, provider_vars)
|
||||
notify_offs = find_notify_offsets_for_handlers(text, provider_handlers)
|
||||
|
||||
if var_use_off is not None:
|
||||
if include_off is None or include_off > var_use_off:
|
||||
return False # used before include
|
||||
|
||||
for noff in notify_offs:
|
||||
if include_off is None or include_off > noff:
|
||||
return False # notify before include
|
||||
|
||||
# If we get here: no early use, and either no usage at all or usage after include
|
||||
return True
|
||||
|
||||
def process_role(role_dir: str,
|
||||
providers_index: Dict[str, Tuple[Set[str], Set[str]]],
|
||||
only_role: Optional[str],
|
||||
dry_run: bool) -> bool:
|
||||
"""
|
||||
Returns True if any change suggested/made for this role.
|
||||
"""
|
||||
consumer_name = role_name_from_dir(role_dir)
|
||||
if only_role and only_role != consumer_name:
|
||||
return False
|
||||
|
||||
meta_deps = parse_meta_dependencies(role_dir)
|
||||
if not meta_deps:
|
||||
return False
|
||||
|
||||
# Build provider vars/handlers accessors
|
||||
moved: List[str] = []
|
||||
for producer in meta_deps:
|
||||
# Only consider local roles we can analyze
|
||||
producer_dir = path_if_exists(os.path.dirname(role_dir), producer) or path_if_exists(os.path.dirname(roles_root(os.path.dirname(role_dir))), "roles", producer)
|
||||
if producer not in providers_index:
|
||||
# Unknown/external role → skip (we cannot verify safety)
|
||||
continue
|
||||
pvars, phandlers = providers_index[producer]
|
||||
if dependency_is_unnecessary(role_dir, consumer_name, producer, pvars, phandlers):
|
||||
moved.append(producer)
|
||||
|
||||
if not moved:
|
||||
return False
|
||||
|
||||
# 1) Remove from meta
|
||||
meta_path = os.path.join(role_dir, "meta", "main.yml")
|
||||
update_meta_remove_deps(meta_path, moved, dry_run=dry_run)
|
||||
|
||||
# 2) Prepend include block to tasks/01_core.yml or tasks/main.yml
|
||||
target_tasks = path_if_exists(role_dir, "tasks/01_core.yml")
|
||||
if not target_tasks:
|
||||
target_tasks = os.path.join(role_dir, "tasks", "main.yml")
|
||||
include_block = build_include_block_yaml(consumer_name, moved)
|
||||
prepend_tasks(target_tasks, include_block, dry_run=dry_run)
|
||||
return True
|
||||
|
||||
def build_providers_index(all_roles: List[str]) -> Dict[str, Tuple[Set[str], Set[str]]]:
|
||||
"""
|
||||
Map role_name -> (provided_vars, handler_names)
|
||||
"""
|
||||
index: Dict[str, Tuple[Set[str], Set[str]]] = {}
|
||||
for rd in all_roles:
|
||||
rn = role_name_from_dir(rd)
|
||||
index[rn] = (collect_role_defined_vars(rd), collect_role_handler_names(rd))
|
||||
return index
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Move unnecessary meta dependencies to guarded include_role for performance (preserve comments/quotes)."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--project-root",
|
||||
default=os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")),
|
||||
help="Path to project root (default: two levels up from this script).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--role",
|
||||
dest="only_role",
|
||||
default=None,
|
||||
help="Only process a specific role name (e.g., 'docker-core').",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dry-run",
|
||||
action="store_true",
|
||||
help="Analyze and print planned changes without modifying files.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
roles = iter_role_dirs(args.project_root)
|
||||
if not roles:
|
||||
print(f"[ERR] No roles found under {roles_root(args.project_root)}", file=sys.stderr)
|
||||
sys.exit(2)
|
||||
|
||||
providers_index = build_providers_index(roles)
|
||||
|
||||
changed_any = False
|
||||
for role_dir in roles:
|
||||
changed = process_role(role_dir, providers_index, args.only_role, args.dry_run)
|
||||
changed_any = changed_any or changed
|
||||
|
||||
if not changed_any:
|
||||
print("[OK] No unnecessary meta dependencies to move (per heuristic).")
|
||||
else:
|
||||
if args.dry_run:
|
||||
print("[DRY-RUN] Completed analysis. No files were changed.")
|
||||
else:
|
||||
print("[OK] Finished moving unnecessary dependencies.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
5
cli/fix/replace_by_get_app_config.sh
Executable file
5
cli/fix/replace_by_get_app_config.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
# Just a little refactoring script, you can delete it later
|
||||
ATTR="$1"
|
||||
OLD="applications[application_id].$ATTR"
|
||||
NEW="applications | get_app_conf(application_id, '$ATTR', True)"
|
||||
bsr ./ "$OLD" -rFfc -n "$NEW"
|
126
cli/integration/deploy_localhost.py
Normal file
126
cli/integration/deploy_localhost.py
Normal file
@@ -0,0 +1,126 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Run the full localhost integration flow entirely inside the infinito Docker container,
|
||||
without writing any artifacts to the host filesystem.
|
||||
Catches missing schema/config errors during credential vaulting and skips those apps.
|
||||
"""
|
||||
import subprocess
|
||||
import os
|
||||
import sys
|
||||
|
||||
def main():
|
||||
repo = os.path.abspath(os.getcwd())
|
||||
|
||||
bash_script = '''
|
||||
set -e
|
||||
|
||||
ART=/integration-artifacts
|
||||
mkdir -p "$ART"
|
||||
echo testpassword > "$ART/vaultpw.txt"
|
||||
|
||||
# 1) Generate inventory
|
||||
python3 -m cli.build.inventory.full \
|
||||
--host localhost \
|
||||
--inventory-style hostvars \
|
||||
--format yaml \
|
||||
--output "$ART/inventory.yml"
|
||||
|
||||
# 2) Credentials per-app
|
||||
apps=$(python3 <<EOF
|
||||
import yaml
|
||||
inv = yaml.safe_load(open('/integration-artifacts/inventory.yml'))
|
||||
print(' '.join(inv['_meta']['hostvars']['localhost']['invokable_applications']))
|
||||
EOF
|
||||
)
|
||||
for app in $apps; do
|
||||
echo "⏳ Vaulting credentials for $app..."
|
||||
output=$(python3 -m cli.create.credentials \
|
||||
--role-path "/repo/roles/$app" \
|
||||
--inventory-file "$ART/inventory.yml" \
|
||||
--vault-password-file "$ART/vaultpw.txt" \
|
||||
--force 2>&1) || rc=$?; rc=${rc:-0}
|
||||
|
||||
if [ "$rc" -eq 0 ]; then
|
||||
echo "✅ Credentials generated for $app"
|
||||
elif echo "$output" | grep -q "No such file or directory"; then
|
||||
echo "⚠️ Skipping $app (no schema/config)"
|
||||
elif echo "$output" | grep -q "Plain algorithm for"; then
|
||||
# Collect all plain-algo keys
|
||||
keys=( $(echo "$output" | grep -oP "Plain algorithm for '\K[^']+") )
|
||||
overrides=()
|
||||
for key in "${keys[@]}"; do
|
||||
if [[ "$key" == *api_key ]]; then
|
||||
val=$(python3 - << 'PY'
|
||||
import random, string
|
||||
print(''.join(random.choices(string.ascii_letters+string.digits, k=32)))
|
||||
PY
|
||||
)
|
||||
elif [[ "$key" == *password ]]; then
|
||||
val=$(python3 - << 'PY'
|
||||
import random, string
|
||||
print(''.join(random.choices(string.ascii_letters+string.digits, k=12)))
|
||||
PY
|
||||
)
|
||||
else
|
||||
val=$(python3 - << 'PY'
|
||||
import random, string
|
||||
print(''.join(random.choices(string.ascii_letters+string.digits, k=16)))
|
||||
PY
|
||||
)
|
||||
fi
|
||||
echo " → Overriding $key=$val"
|
||||
overrides+=("--set" "$key=$val")
|
||||
done
|
||||
# Retry with overrides
|
||||
echo "🔄 Retrying with overrides..."
|
||||
retry_out=$(python3 -m cli.create.credentials \
|
||||
--role-path "/repo/roles/$app" \
|
||||
--inventory-file "$ART/inventory.yml" \
|
||||
--vault-password-file "$ART/vaultpw.txt" \
|
||||
"${overrides[@]}" \
|
||||
--force 2>&1) || retry_rc=$?; retry_rc=${retry_rc:-0}
|
||||
if [ "$retry_rc" -eq 0 ]; then
|
||||
echo "✅ Credentials generated for $app (with overrides)"
|
||||
else
|
||||
echo "❌ Override failed for $app:"
|
||||
echo "$retry_out"
|
||||
fi
|
||||
else
|
||||
echo "❌ Credential error for $app:"
|
||||
echo "$output"
|
||||
fi
|
||||
done
|
||||
|
||||
# 3) Show generated files
|
||||
ls -R "$ART" 2>/dev/null
|
||||
|
||||
echo "
|
||||
===== inventory.yml ====="
|
||||
cat "$ART/inventory.yml"
|
||||
|
||||
echo "
|
||||
===== vaultpw.txt ====="
|
||||
cat "$ART/vaultpw.txt"
|
||||
|
||||
# 4) Deploy
|
||||
python3 -m cli.deploy \
|
||||
"$ART/inventory.yml" \
|
||||
--limit localhost \
|
||||
--vault-password-file "$ART/vaultpw.txt" \
|
||||
--verbose
|
||||
'''
|
||||
|
||||
cmd = [
|
||||
"docker", "run", "--rm",
|
||||
"-v", f"{repo}:/repo",
|
||||
"-w", "/repo",
|
||||
"--entrypoint", "bash",
|
||||
"infinito:latest",
|
||||
"-c", bash_script
|
||||
]
|
||||
print(f"\033[96m> {' '.join(cmd)}\033[0m")
|
||||
rc = subprocess.call(cmd)
|
||||
sys.exit(rc)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
50
cli/make.py
Normal file
50
cli/make.py
Normal file
@@ -0,0 +1,50 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
CLI wrapper for Makefile targets within Infinito.Nexus.
|
||||
Invokes `make` commands in the project root directory.
|
||||
"""
|
||||
import argparse
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
prog='infinito make',
|
||||
description='Run Makefile targets for Infinito.Nexus project'
|
||||
)
|
||||
parser.add_argument(
|
||||
'targets',
|
||||
nargs=argparse.REMAINDER,
|
||||
help='Make targets and options to pass to `make`'
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Default to 'build' if no target is specified
|
||||
make_args = args.targets or ['build']
|
||||
|
||||
# Determine repository root (one level up from cli/)
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
repo_root = os.path.abspath(os.path.join(script_dir, os.pardir))
|
||||
|
||||
# Check for Makefile
|
||||
makefile_path = os.path.join(repo_root, 'Makefile')
|
||||
if not os.path.isfile(makefile_path):
|
||||
print(f"Error: Makefile not found in {repo_root}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Invoke make in repo root
|
||||
cmd = ['make'] + make_args
|
||||
try:
|
||||
result = subprocess.run(cmd, cwd=repo_root)
|
||||
sys.exit(result.returncode)
|
||||
except FileNotFoundError:
|
||||
print("Error: 'make' command not found. Please install make.", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
except KeyboardInterrupt:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
49
cli/meta/applications/invokable.py
Normal file
49
cli/meta/applications/invokable.py
Normal file
@@ -0,0 +1,49 @@
|
||||
#!/usr/bin/env python3
|
||||
# cli/meta/applications/invokable.py
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Import filter plugin for get_all_invokable_apps
|
||||
try:
|
||||
from filter_plugins.get_all_invokable_apps import get_all_invokable_apps
|
||||
except ImportError:
|
||||
# Try to adjust sys.path if running outside Ansible
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..')))
|
||||
try:
|
||||
from filter_plugins.get_all_invokable_apps import get_all_invokable_apps
|
||||
except ImportError:
|
||||
sys.stderr.write("Could not import filter_plugins.get_all_invokable_apps. Check your PYTHONPATH.\n")
|
||||
sys.exit(1)
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='List all invokable applications (application_ids) based on invokable paths from categories.yml and available roles.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-c', '--categories-file',
|
||||
default=os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'roles', 'categories.yml')),
|
||||
help='Path to roles/categories.yml (default: roles/categories.yml at project root)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-r', '--roles-dir',
|
||||
default=os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'roles')),
|
||||
help='Path to roles/ directory (default: roles/ at project root)'
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
result = get_all_invokable_apps(
|
||||
categories_file=args.categories_file,
|
||||
roles_dir=args.roles_dir
|
||||
)
|
||||
except Exception as e:
|
||||
sys.stderr.write(f"Error: {e}\n")
|
||||
sys.exit(1)
|
||||
|
||||
for app_id in result:
|
||||
print(app_id)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
124
cli/sounds.py
124
cli/sounds.py
@@ -1,124 +0,0 @@
|
||||
import numpy as np
|
||||
import simpleaudio as sa
|
||||
|
||||
class Sound:
|
||||
"""
|
||||
Sound effects for the application with enhanced complexity.
|
||||
Each sound uses at least 6 distinct tones and lasts no more than max_length seconds,
|
||||
except the intro sound which is a detailed 26-second Berlin techno-style build-up, 12-second celebration with a descending-fifth chord sequence of 7 chords, and breakdown with melodic background.
|
||||
Transitions between phases now crossfade over 3 seconds for smoother flow.
|
||||
"""
|
||||
|
||||
fs = 44100 # Sampling rate (samples per second)
|
||||
complexity_factor = 10 # Number of harmonics to sum for richer timbres
|
||||
max_length = 2.0 # Maximum total duration of any sound in seconds
|
||||
|
||||
@staticmethod
|
||||
def _generate_complex_wave(frequency: float, duration: float, harmonics: int = None) -> np.ndarray:
|
||||
if harmonics is None:
|
||||
harmonics = Sound.complexity_factor
|
||||
t = np.linspace(0, duration, int(Sound.fs * duration), False)
|
||||
wave = np.zeros_like(t)
|
||||
for n in range(1, harmonics + 1):
|
||||
wave += (1 / n) * np.sin(2 * np.pi * frequency * n * t)
|
||||
# ADSR envelope
|
||||
attack = int(0.02 * Sound.fs)
|
||||
release = int(0.05 * Sound.fs)
|
||||
env = np.ones_like(wave)
|
||||
env[:attack] = np.linspace(0, 1, attack)
|
||||
env[-release:] = np.linspace(1, 0, release)
|
||||
wave *= env
|
||||
wave /= np.max(np.abs(wave))
|
||||
return (wave * (2**15 - 1)).astype(np.int16)
|
||||
|
||||
@staticmethod
|
||||
def _crossfade(w1: np.ndarray, w2: np.ndarray, fade_len: int) -> np.ndarray:
|
||||
# Ensure fade_len less than each
|
||||
fade_len = min(fade_len, len(w1), len(w2))
|
||||
fade_out = np.linspace(1, 0, fade_len)
|
||||
fade_in = np.linspace(0, 1, fade_len)
|
||||
w1_end = w1[-fade_len:] * fade_out
|
||||
w2_start = w2[:fade_len] * fade_in
|
||||
middle = (w1_end + w2_start).astype(np.int16)
|
||||
return np.concatenate([w1[:-fade_len], middle, w2[fade_len:]])
|
||||
|
||||
@staticmethod
|
||||
def _play(wave: np.ndarray):
|
||||
play_obj = sa.play_buffer(wave, 1, 2, Sound.fs)
|
||||
play_obj.wait_done()
|
||||
|
||||
@classmethod
|
||||
def play_cymais_intro_sound(cls):
|
||||
# Phase durations
|
||||
build_time = 10.0
|
||||
celebr_time = 12.0
|
||||
breakdown_time = 10.0
|
||||
overlap = 3.0 # seconds of crossfade
|
||||
bass_seg = 0.125 # 1/8s kick
|
||||
melody_seg = 0.25 # 2/8s melody
|
||||
bass_freq = 65.41 # C2 kick
|
||||
melody_freqs = [261.63, 293.66, 329.63, 392.00, 440.00, 523.25]
|
||||
|
||||
# Build-up phase
|
||||
steps = int(build_time / (bass_seg + melody_seg))
|
||||
build_seq = []
|
||||
for i in range(steps):
|
||||
amp = (i + 1) / steps
|
||||
b = cls._generate_complex_wave(bass_freq, bass_seg).astype(np.float32) * amp
|
||||
m = cls._generate_complex_wave(melody_freqs[i % len(melody_freqs)], melody_seg).astype(np.float32) * amp
|
||||
build_seq.append(b.astype(np.int16))
|
||||
build_seq.append(m.astype(np.int16))
|
||||
build_wave = np.concatenate(build_seq)
|
||||
|
||||
# Celebration phase: 7 descending-fifth chords
|
||||
roots = [523.25, 349.23, 233.08, 155.56, 103.83, 69.30, 46.25]
|
||||
chord_time = celebr_time / len(roots)
|
||||
celebr_seq = []
|
||||
for root in roots:
|
||||
t = np.linspace(0, chord_time, int(cls.fs * chord_time), False)
|
||||
chord = sum(np.sin(2 * np.pi * f * t) for f in [root, root * 5/4, root * 3/2])
|
||||
chord /= np.max(np.abs(chord))
|
||||
celebr_seq.append((chord * (2**15 - 1)).astype(np.int16))
|
||||
celebr_wave = np.concatenate(celebr_seq)
|
||||
|
||||
# Breakdown phase (mirror of build-up)
|
||||
breakdown_wave = np.concatenate(list(reversed(build_seq)))
|
||||
|
||||
# Crossfade transitions
|
||||
fade_samples = int(overlap * cls.fs)
|
||||
bc = cls._crossfade(build_wave, celebr_wave, fade_samples)
|
||||
full = cls._crossfade(bc, breakdown_wave, fade_samples)
|
||||
|
||||
cls._play(full)
|
||||
|
||||
@classmethod
|
||||
def play_start_sound(cls):
|
||||
freqs = [523.25, 659.26, 783.99, 880.00, 1046.50, 1174.66]
|
||||
cls._prepare_and_play(freqs)
|
||||
|
||||
@classmethod
|
||||
def play_finished_successfully_sound(cls):
|
||||
freqs = [523.25, 587.33, 659.26, 783.99, 880.00, 987.77]
|
||||
cls._prepare_and_play(freqs)
|
||||
|
||||
@classmethod
|
||||
def play_finished_failed_sound(cls):
|
||||
freqs = [880.00, 830.61, 783.99, 659.26, 622.25, 523.25]
|
||||
durations = [0.4, 0.3, 0.25, 0.25, 0.25, 0.25]
|
||||
cls._prepare_and_play(freqs, durations)
|
||||
|
||||
@classmethod
|
||||
def play_warning_sound(cls):
|
||||
freqs = [700.00, 550.00, 750.00, 500.00, 800.00, 450.00]
|
||||
cls._prepare_and_play(freqs)
|
||||
|
||||
@classmethod
|
||||
def _prepare_and_play(cls, freqs, durations=None):
|
||||
count = len(freqs)
|
||||
if durations is None:
|
||||
durations = [cls.max_length / count] * count
|
||||
else:
|
||||
total = sum(durations)
|
||||
durations = [d * cls.max_length / total for d in durations]
|
||||
waves = [cls._generate_complex_wave(f, d) for f, d in zip(freqs, durations)]
|
||||
cls._play(np.concatenate(waves))
|
@@ -1,8 +1,8 @@
|
||||
# CyMaIS Architecture Overview
|
||||
# Infinito.Nexus Architecture
|
||||
|
||||
## Introduction
|
||||
|
||||
CyMaIS (Cyber Master Infrastructure Solution) is a modular, open-source IT infrastructure automation platform designed to simplify the deployment, management, and security of self-hosted environments.
|
||||
[Infinito.Nexus](https://infinito.nexus) is a modular, open-source IT infrastructure automation platform designed to simplify the deployment, management, and security of self-hosted environments.
|
||||
|
||||
It provides a flexible, scalable, and secure architecture based on modern [DevOps](https://en.wikipedia.org/wiki/DevOps) principles, leveraging technologies like [Ansible](https://en.wikipedia.org/wiki/Ansible_(software)), [Docker](https://en.wikipedia.org/wiki/Docker_(software)), and [Infrastructure as Code (IaC)](https://en.wikipedia.org/wiki/Infrastructure_as_code).
|
||||
|
||||
@@ -55,4 +55,4 @@ https://github.com/kevinveenbirkenbach/hetzner-arch-luks
|
||||
|
||||
---
|
||||
|
||||
> *CyMaIS — Modular. Secure. Automated. Decentralized.*
|
||||
> *Infinito.Nexus — Modular. Secure. Automated. Decentralized.*
|
124
docs/Docker.md
Normal file
124
docs/Docker.md
Normal file
@@ -0,0 +1,124 @@
|
||||
# Docker Build Guide 🚢
|
||||
|
||||
This guide explains how to build the **Infinito.Nexus** Docker image with advanced options to avoid common issues (e.g. mirror timeouts) and control build caching.
|
||||
|
||||
---
|
||||
|
||||
## 1. Enable BuildKit (Optional but Recommended)
|
||||
|
||||
Modern versions of Docker support **BuildKit**, which speeds up build processes and offers better caching.
|
||||
|
||||
```bash
|
||||
# On your host, enable BuildKit for the current shell session:
|
||||
export DOCKER_BUILDKIT=1
|
||||
```
|
||||
|
||||
> **Note:** You only need to set this once per terminal session.
|
||||
|
||||
---
|
||||
|
||||
## 2. Build Arguments Explained
|
||||
|
||||
When you encounter errors like:
|
||||
|
||||
```text
|
||||
:: Synchronizing package databases...
|
||||
error: failed retrieving file 'core.db' from geo.mirror.pkgbuild.com : Connection timed out after 10002 milliseconds
|
||||
error: failed to synchronize all databases (failed to retrieve some files)
|
||||
```
|
||||
|
||||
it usually means the default container network cannot reach certain Arch Linux mirrors. To work around this, use:
|
||||
|
||||
* `--network=host`
|
||||
Routes all build-time network traffic through your host’s network stack.
|
||||
|
||||
* `--no-cache`
|
||||
Forces a fresh build of every layer by ignoring Docker’s layer cache. Useful if you suspect stale cache entries.
|
||||
|
||||
---
|
||||
|
||||
## 3. Recommended Build Command
|
||||
|
||||
```bash
|
||||
# 1. (Optional) Enable BuildKit
|
||||
export DOCKER_BUILDKIT=1
|
||||
|
||||
# 2. Build with host networking and no cache
|
||||
docker build \
|
||||
--network=host \
|
||||
--no-cache \
|
||||
-t infinito:latest \
|
||||
.
|
||||
```
|
||||
|
||||
**Flags:**
|
||||
|
||||
* `--network=host`
|
||||
Ensures all `pacman -Syu` and other network calls hit your host network directly—eliminating mirror connection timeouts.
|
||||
|
||||
* `--no-cache`
|
||||
Guarantees that changes to package lists or dependencies are picked up immediately by rebuilding every layer.
|
||||
|
||||
* `-t infinito:latest`
|
||||
Tags the resulting image as `infinito:latest`.
|
||||
|
||||
---
|
||||
|
||||
## 4. Running the Container
|
||||
|
||||
Once built, you can run Infinito.Nexus as usual:
|
||||
|
||||
```bash
|
||||
docker run --rm -it \
|
||||
-v "$(pwd)":/opt/infinito \
|
||||
-w /opt/infinito \
|
||||
infinito:latest --help
|
||||
```
|
||||
|
||||
Mount any host directory into `/opt/infinito/logs` to persist logs across runs.
|
||||
|
||||
---
|
||||
|
||||
## 5. Further Troubleshooting
|
||||
|
||||
* **Mirror selection:** If you still see slow or unreachable mirrors, consider customizing `/etc/pacman.d/mirrorlist` in a local Docker stage or on your host to prioritize faster mirrors.
|
||||
|
||||
* **Firewall or VPN:** Ensure your host’s firewall or VPN allows outgoing connections on port 443/80 to Arch mirror servers.
|
||||
|
||||
* **Docker daemon config:** On some networks, you may need to configure Docker’s daemon proxy settings under `/etc/docker/daemon.json`.
|
||||
|
||||
## 6. Live Development via Volume Mount
|
||||
|
||||
The Infinito.Nexus installation inside the container always resides at:
|
||||
|
||||
```
|
||||
/root/Repositories/github.com/kevinveenbirkenbach/infinito
|
||||
```
|
||||
|
||||
To apply code changes without rebuilding the image, mount your local installation directory into that static path:
|
||||
|
||||
```bash
|
||||
# 1. Determine the Infinito.Nexus install path on your host
|
||||
INFINITO_PATH=$(pkgmgr path infinito)
|
||||
|
||||
# 2. Launch the container with a bind mount:
|
||||
docker run --rm -it \
|
||||
-v "${INFINITO_PATH}:/root/Repositories/github.com/kevinveenbirkenbach/infinito" \
|
||||
-w "/root/Repositories/github.com/kevinveenbirkenbach/infinito" \
|
||||
infinito:latest make build
|
||||
```
|
||||
|
||||
Or, to test the CLI help interactively:
|
||||
|
||||
```bash
|
||||
docker run --rm -it \
|
||||
-v "${INFINITO_PATH}:/root/Repositories/github.com/kevinveenbirkenbach/infinito" \
|
||||
-w "/root/Repositories/github.com/kevinveenbirkenbach/infinito" \
|
||||
infinito:latest --help
|
||||
```
|
||||
|
||||
Any edits you make in `${INFINITO_PATH}` on your host are immediately reflected inside the container, eliminating the need for repeated `docker build` cycles.
|
||||
|
||||
---
|
||||
|
||||
With these options, your Docker builds should complete reliably, even in restrictive network environments. Happy building! 🚀
|
2
docs/TODO.md
Normal file
2
docs/TODO.md
Normal file
@@ -0,0 +1,2 @@
|
||||
# TODO
|
||||
- Move this files to https://hub.cymais.cloud
|
@@ -1,26 +0,0 @@
|
||||
# Features
|
||||
|
||||
**CyMaIS - Cyber Master Infrastructure Solution** revolutionizes IT infrastructure management, making it simpler, safer, and more adaptable for businesses of all sizes. Here’s how it can benefit your organization:
|
||||
|
||||
## Effortless Setup and Management 🚀
|
||||
Setting up and managing IT systems has never been easier. CyMaIS automates complex tasks, whether on Linux servers or personal computers, reducing manual effort and saving valuable time.
|
||||
|
||||
## Comprehensive IT Solutions 🛠️
|
||||
CyMaIS covers everything from essential system setups to advanced configurations, including VPN, Docker, Ansible-based deployments, security optimizations, and monitoring tools. This makes IT management seamless and efficient.
|
||||
|
||||
## Tailored for Your Needs 🎯
|
||||
Every business is unique, and so is CyMaIS! With a modular architecture, it adapts to specific requirements, whether for startups, growing businesses, NGOs, or large enterprises.
|
||||
|
||||
## Proactive Monitoring & Maintenance 🔍
|
||||
With automated updates, system health checks, and security audits, CyMaIS ensures your infrastructure is always up-to-date and running smoothly. Roles such as `sys-hlth-docker-container`, `sys-hlth-btrfs`, and `sys-hlth-webserver` help monitor system integrity.
|
||||
|
||||
## Uncompromised Security 🔒
|
||||
Security is a top priority! CyMaIS includes robust security features like full-disk encryption recommendations, 2FA enforcement, encrypted server deployments (`web-app-keycloak`, `svc-db-openldap`), and secure backup solutions (`sys-bkp-remote-to-local`, `svc-sys-bkp-data-to-usb`).
|
||||
|
||||
## User-Friendly with Expert Support 👩💻
|
||||
No need to be a Linux or Docker expert! CyMaIS simplifies deployment with intuitive role-based automation. Documentation and community support make IT administration accessible to all experience levels.
|
||||
|
||||
## Open Source Trust & Transparency 🔓
|
||||
As an open-source project, CyMaIS guarantees transparency, security, and community-driven development, ensuring continuous improvements and adherence to industry best practices.
|
||||
|
||||
For further information, check out the [application glosar](roles/application_glosar), [applications ordered by category](roles/application_categories) and the [detailled ansible role descriptions](roles/ansible_role_glosar).
|
@@ -1,34 +0,0 @@
|
||||
# Situation Analysis
|
||||
|
||||
This is the Situation Analysis for [CyMaIS](https://cymais.cloud), highlighting the challenges we aim to address.
|
||||
|
||||
## Short
|
||||
|
||||
The problem stems from businesses and individuals being dependent on monopolistic cloud providers, losing control over their data, facing security risks, and being vulnerable to geopolitical manipulation, while small businesses struggle to set up secure, enterprise-level IT infrastructures due to lack of resources and expertise.
|
||||
|
||||
## Explanation
|
||||
|
||||
In today’s digital landscape, data is predominantly stored in the cloud, controlled by large corporations such as Microsoft, AWS, and other cloud providers. This creates a dependency on these providers, leading to increasingly expensive services and a lack of control over critical business data.
|
||||
|
||||
As organizations rely on these monopolistic players for their cloud services, they surrender ownership of their data, becoming vulnerable to the whims of these companies. This dependency puts them at the mercy of cloud and software giants, who not only dictate pricing and service levels but also influence the very governance of data.
|
||||
|
||||
Moreover, the ease with which governments, intelligence agencies, and private corporations can access sensitive data is a growing concern. With increasing surveillance capabilities, the privacy of users and businesses is constantly at risk, further amplifying the vulnerability of data stored in centralized cloud infrastructures.
|
||||
|
||||
Additionally, the dominance of these companies in sectors like social media further exacerbates the issue, making individuals and organizations susceptible to manipulation and control.
|
||||
|
||||
The problem intensifies in times of political unrest or global conflicts. As data is often centrally stored with monopolistic providers, businesses become highly dependent on these entities for accessing their data and services. This dependency increases the risk of coercion or pressure from governments or private corporations, leading to potential **extortion**. Governments may attempt to gain leverage over businesses by threatening access to critical data or services, while private companies may exploit this dependency for their own interests.
|
||||
|
||||
In essence, the lack of sovereignty over data and the increasing control of a few monopolistic entities undermine the fundamental values of privacy, security, and independence. Organizations, especially small businesses, are left vulnerable to external pressures, making them pawns in a larger game dominated by these cloud and software giants.
|
||||
|
||||
Furthermore, for small businesses, setting up enterprise-level open-source infrastructure with integrated solutions such as **Single Sign-On (SSO)**, **Identity and Access Management (IAM)**, **encryption**, **backup solutions**, and other essential IT services is nearly impossible. These businesses lack the resources, both financial and human, to deploy secure IT infrastructures at an enterprise level.
|
||||
|
||||
System administrators in small companies often don’t have the specialized knowledge or the capacity to build and maintain such complex infrastructures, which further exacerbates the challenge of securing sensitive business data while ensuring compliance with industry standards.
|
||||
|
||||
## Key Points
|
||||
- Dependency on monopolists
|
||||
- Loss of data sovereignty
|
||||
- Geopolitical vulnerabilities
|
||||
- Lack of resources
|
||||
- Limited secure infrastructure expertise
|
||||
- Centralized data storage risks
|
||||
- Manipulation through social media
|
@@ -1,40 +0,0 @@
|
||||
# Market Analysis for CyMaIS in Berlin
|
||||
|
||||
## 1. Introduction
|
||||
Berlin is recognized as one of Europe's leading innovation and technology hubs. The capital is characterized by a dynamic start-up scene, numerous SMEs, and international corporations that drive digital transformation. This creates a promising market for modular IT infrastructure solutions like CyMaIS.
|
||||
|
||||
## 2. Market Overview and Business Landscape
|
||||
- **Diverse Economic Hub:**
|
||||
Berlin is home to an estimated several tens of thousands of companies—from innovative start-ups to established mid-sized businesses and large enterprises.
|
||||
- **Digital Innovation:**
|
||||
The city is known for its high concentration of technology companies, digital service providers, and creative industries constantly seeking efficient IT solutions.
|
||||
- **Support and Infrastructure:**
|
||||
Numerous initiatives, funding programs, and well-developed networks of technology parks and coworking spaces support the city’s digital progress.
|
||||
|
||||
## 3. Level of Digitalization and IT Needs
|
||||
- **Advanced Yet Heterogeneous Digitalization:**
|
||||
Many Berlin companies already use modern IT solutions, but traditional businesses often require significant upgrades in integrating advanced infrastructure and cybersecurity measures.
|
||||
- **Increasing Demands:**
|
||||
Rising business process complexity and stricter requirements for data protection and security are driving the need for individualized, scalable IT solutions.
|
||||
|
||||
## 4. Overall Market Volume (Estimation)
|
||||
- **Estimated Market Volume:**
|
||||
Considering the diverse company sizes and varying investment levels—from start-ups to large enterprises—the annual overall market volume for IT infrastructure modernization solutions in Berlin is roughly estimated at **€1–2 billion**.
|
||||
This figure reflects the aggregate potential of digital transformation initiatives across Berlin’s vibrant business ecosystem.
|
||||
|
||||
## 5. Price Segments and Investment Readiness
|
||||
- **Low-Priced Segment:**
|
||||
Many start-ups and small companies are capable of investing approximately €10,000–30,000 to set up basic infrastructures.
|
||||
- **Mid-Priced Segment:**
|
||||
Established SMEs in Berlin are typically prepared to invest between €40,000 and €70,000 in tailored IT solutions to incorporate additional functionalities and security standards.
|
||||
- **High-Priced Segment:**
|
||||
Large enterprises and specialized industrial businesses invest in complex integration solutions starting at around €100,000 to implement comprehensive digital transformation projects.
|
||||
|
||||
## 6. Competitive Landscape and Positioning
|
||||
- **High Innovation Pressure:**
|
||||
Berlin's vibrant IT and digital services sector is highly competitive. To stand out, solutions must be flexible, scalable, and seamlessly integrable.
|
||||
- **CyMaIS Advantages:**
|
||||
The modular architecture of CyMaIS allows it to meet the individual requirements of Berlin’s diverse businesses—from start-ups to large industrial projects—perfectly. Additionally, its focus on cybersecurity and continuous updates offers a decisive added value.
|
||||
|
||||
## 7. Conclusion
|
||||
Berlin offers an attractive market potential for IT infrastructure solutions. With a vibrant innovation landscape, a considerable overall market volume estimated at €1–2 billion, and numerous companies needing to take the next step in digital transformation, CyMaIS is well positioned as a powerful, modular solution. The combination of a dynamic start-up ecosystem and established businesses promises attractive long-term growth opportunities.
|
@@ -1,37 +0,0 @@
|
||||
# Berlin Market Diagrams
|
||||
|
||||
## 1. Digitalization in Berlin (Pie Chart)
|
||||
```mermaid
|
||||
pie
|
||||
title Berlin: IT Digitalization Status
|
||||
"Fully Modernized (25%)": 25
|
||||
"Partially Digitalized (45%)": 45
|
||||
"Requires Significant Upgrades (30%)": 30
|
||||
```
|
||||
*This pie chart displays the estimated IT digitalization status for Berlin-based companies, with 25% fully modernized, 45% partially digitalized, and 30% requiring major upgrades.*
|
||||
|
||||
## 2. Investment Segments in Berlin (Flowchart)
|
||||
```mermaid
|
||||
flowchart LR
|
||||
A[Investment Segments in Berlin]
|
||||
B[Low-Priced (€10k-30k): 40%]
|
||||
C[Mid-Priced (€40k-70k): 40%]
|
||||
D[High-Priced (€100k+): 20%]
|
||||
|
||||
A --> B
|
||||
A --> C
|
||||
A --> D
|
||||
```
|
||||
*This flowchart shows the distribution of investment segments for IT infrastructure projects in Berlin, categorized into low-, mid-, and high-priced solutions.*
|
||||
|
||||
## 3. Berlin Market Volume & Drivers (Flowchart)
|
||||
```mermaid
|
||||
flowchart TD
|
||||
A[Berlin IT Infrastructure Market]
|
||||
B[Market Volume: €1-2 Billion]
|
||||
C[Drivers: Start-up Ecosystem, Established Firms, Local Initiatives]
|
||||
|
||||
A --> B
|
||||
A --> C
|
||||
```
|
||||
*This diagram outlines Berlin's overall market volume (estimated at €1–2 billion) and identifies the main drivers such as the vibrant start-up ecosystem and support from local initiatives.*
|
@@ -1,77 +0,0 @@
|
||||
# Market Analysis for CyMaIS in Europe
|
||||
|
||||
This analysis provides a detailed overview of the potential for CyMaIS – a modular IT infrastructure solution – in the European market.
|
||||
|
||||
## 1. Introduction
|
||||
CyMaIS addresses the growing need for flexible and scalable IT infrastructure solutions that support companies in their digital transformation. The European market, characterized by diverse economic systems, offers a variety of opportunities and challenges.
|
||||
|
||||
## 2. Market Overview and Digitalization in Europe
|
||||
- **Business Landscape:**
|
||||
- Europe is home to an estimated 20–25 million companies, most of which are small and medium-sized enterprises (SMEs).
|
||||
- Business structures vary significantly between regions: while countries such as the Nordic nations, Estonia, or Germany are highly advanced, other markets lag behind in certain aspects.
|
||||
|
||||
- **Degree of Digitalization:**
|
||||
- Basic digital technologies have been implemented in many European companies; however, recent studies indicate that only about 50–60% have reached a basic level of digitalization.
|
||||
- A large share of companies – approximately 70–80% – faces the challenge of further modernizing their IT infrastructures, particularly in areas like cybersecurity and automation.
|
||||
|
||||
## 3. Analysis of the Demand for IT Infrastructure Solutions
|
||||
- **Target Market:**
|
||||
- There is significant demand across Europe for solutions that modernize outdated IT structures while meeting increased requirements for data protection, security, and efficiency.
|
||||
- SMEs, as well as larger companies in sectors with high security and compliance needs, can particularly benefit from specialized, modular solutions like CyMaIS.
|
||||
|
||||
- **Core Requirements:**
|
||||
- Integration of modern IT components
|
||||
- Enhancement of cybersecurity
|
||||
- Support for automation and data analytics
|
||||
|
||||
## 4. Pricing Segments and Cost Structure
|
||||
CyMaIS offers solutions that can be tailored to different budgets and requirements:
|
||||
|
||||
- **Low-Priced Segment (Basic Setup):**
|
||||
- **Costs:** Approximately €10,000–30,000
|
||||
- **Target Group:** Small companies requiring standardized IT solutions
|
||||
|
||||
- **Mid-Priced Segment:**
|
||||
- **Costs:** Approximately €40,000–70,000
|
||||
- **Target Group:** Medium-sized companies with specific customization needs
|
||||
|
||||
- **High-Priced Segment (Complex, Customized Solutions):**
|
||||
- **Costs:** From €100,000 and upwards
|
||||
- **Target Group:** Large companies and projects with extensive integration requirements
|
||||
|
||||
## 5. Total Market Volume and Revenue Potential
|
||||
- **Total Market Volume:**
|
||||
- The revenue potential for IT infrastructure solutions in Europe is estimated at approximately **€300–500 billion**.
|
||||
- This figure includes investments in hardware, software, consulting and integration services, as well as ongoing IT support services.
|
||||
|
||||
- **Growth Drivers:**
|
||||
- The continuous need for digital transformation
|
||||
- Increasing security requirements (cybersecurity)
|
||||
- Government funding programs and initiatives that support digitalization across many European countries
|
||||
|
||||
## 6. Competitive Environment and Positioning of CyMaIS
|
||||
- **Competition:**
|
||||
- The European market is fragmented: in addition to major global IT service providers, there are numerous local providers.
|
||||
- Cross-border differences create diverse market conditions where specialized, modular solutions can offer a strategic advantage.
|
||||
|
||||
- **Competitive Advantages of CyMaIS:**
|
||||
- **Modularity and Flexibility:** Enables tailor-made adaptation to individual business requirements
|
||||
- **Scalability:** Ranges from basic solutions for SMEs to complex system integrations for large enterprises
|
||||
- **Seamless Integration:** Incorporates modern IT components, including advanced security solutions
|
||||
|
||||
## 7. Opportunities and Challenges
|
||||
- **Opportunities:**
|
||||
- Increasing investments in digital transformation and cybersecurity
|
||||
- High demand in under-served markets and among SMEs needing to modernize their IT infrastructures
|
||||
- Potential for international expansion through adaptable, modular solutions
|
||||
|
||||
- **Challenges:**
|
||||
- Varied levels of digitalization and differing economic conditions across European countries
|
||||
- Intense competition and pricing pressure, particularly in mature markets
|
||||
- Requirements for country-specific regulations and compliance necessitating customized adaptations
|
||||
|
||||
## 8. Conclusion
|
||||
The European market offers significant potential for CyMaIS. With an estimated total market volume of €300–500 billion and a large number of companies needing to modernize their IT infrastructures, CyMaIS is well positioned as a flexible and scalable solution—ideal for meeting the diverse requirements of the European market. In the long term, ongoing digitalization and increasing security needs present attractive growth opportunities.
|
||||
|
||||
## Sources
|
||||
- Analysis based on an interactive discussion with [ChatGPT](https://chatgpt.com/c/67f95f70-865c-800f-bd97-864a36f9b498) on April 11, 2025.
|
@@ -1,38 +0,0 @@
|
||||
# Europe Market Diagrams
|
||||
|
||||
## 1. Digitalization Status (Pie Chart)
|
||||
```mermaid
|
||||
pie
|
||||
title Europe: Digitalization Status
|
||||
"Fully Modernized (20%)": 20
|
||||
"Partially Digitalized (50%)": 50
|
||||
"Needs Advanced Modernization (30%)": 30
|
||||
```
|
||||
*This pie chart illustrates the digitalization status across European companies, with 20% fully modernized, 50% partially digitalized, and 30% needing advanced modernization.*
|
||||
|
||||
## 2. Investment Segments (Flowchart)
|
||||
```mermaid
|
||||
flowchart LR
|
||||
A[European Investment Segments]
|
||||
B[Low-Priced (€10k-30k): 35%]
|
||||
C[Mid-Priced (€40k-70k): 45%]
|
||||
D[High-Priced (€100k+): 20%]
|
||||
|
||||
A --> B
|
||||
A --> C
|
||||
A --> D
|
||||
```
|
||||
*This flowchart depicts the breakdown of IT investment segments in Europe, with approximate percentages for low-, mid-, and high-priced solutions.*
|
||||
|
||||
## 3. Overall Market Volume & Drivers (Flowchart)
|
||||
```mermaid
|
||||
flowchart TD
|
||||
A[European IT Infrastructure Market]
|
||||
B[Market Volume: €300-500 Billion]
|
||||
C[Drivers: Digital Transformation, Cybersecurity, Govt. Initiatives]
|
||||
|
||||
A --> B
|
||||
A --> C
|
||||
```
|
||||
*This diagram presents the European market’s overall volume (estimated at €300–500 billion) and highlights the main growth drivers such as digital transformation initiatives and cybersecurity needs.*
|
||||
```
|
@@ -1,83 +0,0 @@
|
||||
# Market Analysis for CyMaIS in Germany
|
||||
|
||||
This analysis provides a detailed overview of the market potential of CyMaIS – a modular solution for establishing and managing modern IT infrastructures – in the German market.
|
||||
|
||||
## 1. Introduction
|
||||
CyMaIS addresses the increasing need for modern, flexible IT infrastructure solutions in Germany. In particular, small and medium-sized enterprises (SMEs) face the challenge of advancing their digitalization while meeting security requirements. CyMaIS offers modular, customizable solutions ranging from basic setups to complex integration projects.
|
||||
|
||||
## 2. Market Overview and Digitalization in Germany
|
||||
- **Business Landscape:**
|
||||
- There are approximately 3.5 million companies in Germany.
|
||||
- Over 99% of these companies are SMEs.
|
||||
|
||||
- **Degree of Digitalization:**
|
||||
- About 60–70% have already implemented basic digital technologies.
|
||||
- An estimated 75–85% of companies require additional support to build modern IT infrastructures (including cybersecurity, automation, and data management).
|
||||
|
||||
## 3. Analysis of the Demand for IT Infrastructure Solutions
|
||||
- **Target Market:**
|
||||
- Approximately 2.6 to 3 million companies – predominantly SMEs – face the challenge of modernizing outdated or incomplete IT structures.
|
||||
- Industries with high security requirements and a strong need for digital transformation particularly benefit from specialized solutions like CyMaIS.
|
||||
|
||||
- **Core Requirements:**
|
||||
- Integration of modern IT components
|
||||
- Enhancement of cybersecurity
|
||||
- Support for process automation and data analytics
|
||||
|
||||
## 4. Pricing Segments and Cost Structure
|
||||
CyMaIS caters to different pricing segments in order to meet the diverse needs of companies:
|
||||
|
||||
- **Low-Priced Segment (Basic Setup):**
|
||||
- **Costs:** Approximately €10,000–30,000
|
||||
- **Target Group:** Smaller companies and standardized IT requirements
|
||||
- **Market Share:** Estimated 30–40% of potential customers
|
||||
|
||||
- **Mid-Priced Segment:**
|
||||
- **Costs:** Approximately €40,000–70,000
|
||||
- **Target Group:** Medium-sized companies with individual customization needs
|
||||
- **Market Share:** Around 20–25% of companies
|
||||
|
||||
- **High-Priced Segment (Complex, Customized Solutions):**
|
||||
- **Costs:** Starting from €100,000 and above
|
||||
- **Target Group:** Large companies and highly specialized projects
|
||||
- **Market Share:** About 5–10% of potential customers
|
||||
|
||||
## 5. Total Market Volume and Revenue Potential
|
||||
- **Market Volume:**
|
||||
- The total market volume for IT infrastructure solutions in Germany is estimated at approximately **€80–120 billion**.
|
||||
|
||||
- **Influencing Factors:**
|
||||
- The scope of required solutions
|
||||
- Consulting and integration services
|
||||
- Ongoing investments in cybersecurity and digitalization
|
||||
|
||||
- **Growth Drivers:**
|
||||
- Increasing digitalization across all industries
|
||||
- Rising security requirements (cybersecurity)
|
||||
- Government programs and initiatives supporting digital transformation
|
||||
|
||||
## 6. Competitive Environment and Positioning of CyMaIS
|
||||
- **Competition:**
|
||||
- The market for IT infrastructure solutions in Germany is fragmented, with numerous providers offering standardized as well as specialized solutions.
|
||||
|
||||
- **Competitive Advantages of CyMaIS:**
|
||||
- **Modularity:** Flexible adaptation to individual business needs
|
||||
- **Scalability:** From basic setups to complex systems
|
||||
- **Integration:** Seamless incorporation of modern IT components, including security solutions
|
||||
|
||||
## 7. Opportunities and Challenges
|
||||
- **Opportunities:**
|
||||
- Growing demand for digital transformation and security solutions
|
||||
- High market penetration among SMEs that are yet to modernize their IT infrastructures
|
||||
- Government funding and initiatives for digitalization
|
||||
|
||||
- **Challenges:**
|
||||
- Strong competition and pricing pressure
|
||||
- Varied IT and digitalization levels across companies
|
||||
- Technological complexity and the need for customized adaptations
|
||||
|
||||
## 8. Conclusion
|
||||
The German IT market offers significant potential for CyMaIS. With an estimated market volume of €80–120 billion and approximately 2.6 to 3 million companies needing to modernize their IT infrastructures, CyMaIS is well positioned. The modular and scalable nature of its solutions enables it to serve both small and large companies with individual requirements. In the long term, ongoing digitalization and increasing security demands present attractive growth opportunities for CyMaIS.
|
||||
|
||||
## Sources
|
||||
- Analysis based on a conversation conducted with [ChatGPT](https://chatgpt.com/share/67f9608d-3904-800f-a9ca-9b893e252c05) on April 11, 2025.
|
@@ -1,37 +0,0 @@
|
||||
# Germany Market Diagrams
|
||||
|
||||
## 1. Digitalization / IT Modernization Need (Pie Chart)
|
||||
```mermaid
|
||||
pie
|
||||
title Germany: IT Modernization Status
|
||||
"Fully Modernized (20%)": 20
|
||||
"Partially Digitalized (30%)": 30
|
||||
"Requires Major Modernization (50%)": 50
|
||||
```
|
||||
*This diagram shows the estimated distribution of digitalization among German companies: 20% are fully modernized, 30% are partially digitalized, and 50% need major IT upgrades.*
|
||||
|
||||
## 2. Investment/Price Segments (Flowchart)
|
||||
```mermaid
|
||||
flowchart LR
|
||||
A[Investment Segments]
|
||||
B[Low-Priced (€10k-30k): 40%]
|
||||
C[Mid-Priced (€40k-70k): 40%]
|
||||
D[High-Priced (€100k+): 20%]
|
||||
|
||||
A --> B
|
||||
A --> C
|
||||
A --> D
|
||||
```
|
||||
*This flowchart represents the distribution of investment segments in Germany, indicating that approximately 40% of projects fall into the low- and mid-priced categories each, with 20% in the high-priced bracket.*
|
||||
|
||||
## 3. Overall Market Volume & Drivers (Flowchart)
|
||||
```mermaid
|
||||
flowchart TD
|
||||
A[German IT Infrastructure Market]
|
||||
B[Market Volume: €80-120 Billion]
|
||||
C[Drivers: Digital Transformation, Cybersecurity, Integration]
|
||||
|
||||
A --> B
|
||||
A --> C
|
||||
```
|
||||
*This diagram outlines the overall market volume (estimated at €80–120 billion) and the key drivers shaping the demand for IT infrastructure solutions in Germany.*
|
@@ -1,77 +0,0 @@
|
||||
# Global Market Analysis for CyMaIS
|
||||
|
||||
This analysis provides a detailed overview of the global potential for CyMaIS – a modular IT infrastructure solution – addressing the growing worldwide demand for digital transformation and advanced cybersecurity measures.
|
||||
|
||||
## 1. Introduction
|
||||
CyMaIS is designed to support enterprises in modernizing their IT infrastructures. As digital transformation accelerates globally, organizations of all sizes require scalable and flexible solutions to manage cybersecurity, automation, and data management. This analysis evaluates the global market potential for CyMaIS across diverse economic regions.
|
||||
|
||||
## 2. Global Market Overview and Digitalization
|
||||
- **Business Landscape:**
|
||||
- There are estimated to be hundreds of millions of companies worldwide, with tens of millions being small and medium-sized enterprises (SMEs).
|
||||
- Developed markets (North America, Europe, parts of Asia) typically exhibit higher digitalization rates, whereas emerging markets are rapidly catching up.
|
||||
|
||||
- **Degree of Digitalization:**
|
||||
- Many large enterprises have implemented advanced digital technologies, while a significant proportion of SMEs—potentially over 70% globally—still need to progress beyond basic digitalization.
|
||||
- This gap is particularly apparent in regions where legacy systems are prevalent or where investment in IT modernization has been historically low.
|
||||
|
||||
## 3. Analysis of the Demand for IT Infrastructure Solutions
|
||||
- **Target Market:**
|
||||
- Globally, the demand for modern IT infrastructure solutions is strong due to rising cybersecurity threats, the need for automation, and the increasing reliance on data analytics.
|
||||
- Industries across sectors—from finance and manufacturing to healthcare and retail—are actively seeking solutions to overhaul outdated IT systems.
|
||||
|
||||
- **Core Requirements:**
|
||||
- Seamless integration of modern IT components
|
||||
- Robust cybersecurity measures
|
||||
- Tools for process automation and data-driven decision-making
|
||||
|
||||
## 4. Pricing Segments and Cost Structure
|
||||
CyMaIS offers a range of solutions tailored to different budget levels and technical needs, including:
|
||||
|
||||
- **Low-Priced Segment (Basic Setup):**
|
||||
- **Costs:** Approximately €10,000–30,000
|
||||
- **Target Group:** Small companies looking for standardized IT solutions
|
||||
|
||||
- **Mid-Priced Segment:**
|
||||
- **Costs:** Approximately €40,000–70,000
|
||||
- **Target Group:** Medium-sized companies with customization requirements
|
||||
|
||||
- **High-Priced Segment (Complex, Customized Solutions):**
|
||||
- **Costs:** From €100,000 upwards
|
||||
- **Target Group:** Large enterprises and projects with extensive integration and security needs
|
||||
|
||||
## 5. Total Market Volume and Revenue Potential
|
||||
- **Global Market Volume:**
|
||||
- The overall revenue potential for modern IT infrastructure solutions worldwide is substantial, with estimates ranging between **€1–1.5 trillion**.
|
||||
- This figure comprises investments in hardware, software, consulting, integration services, and ongoing IT support.
|
||||
|
||||
- **Growth Drivers:**
|
||||
- The accelerating pace of digital transformation worldwide
|
||||
- Increasing incidence of cybersecurity threats
|
||||
- Government initiatives and private-sector investments that promote digitalization
|
||||
|
||||
## 6. Competitive Environment and Positioning of CyMaIS
|
||||
- **Competition:**
|
||||
- The global market is highly competitive, featuring major multinational IT service providers as well as numerous regional and niche players.
|
||||
- Diverse regulatory environments and economic conditions across regions create both challenges and opportunities for market entrants.
|
||||
|
||||
- **Competitive Advantages of CyMaIS:**
|
||||
- **Modularity and Flexibility:** Allows tailored solutions to meet a wide range of business needs
|
||||
- **Scalability:** Suitable for organizations from startups to multinational corporations
|
||||
- **Integration Capabilities:** Supports seamless incorporation of modern IT components along with advanced cybersecurity features
|
||||
|
||||
## 7. Opportunities and Challenges
|
||||
- **Opportunities:**
|
||||
- Rapid digital transformation across all regions creates a sustained demand for IT modernization
|
||||
- High potential in emerging markets where digital infrastructure is underdeveloped
|
||||
- Opportunities for strategic partnerships and government-driven digital initiatives
|
||||
|
||||
- **Challenges:**
|
||||
- Navigating diverse regulatory landscapes and varying levels of IT maturity
|
||||
- Intense global competition and pricing pressures
|
||||
- Continuously evolving cybersecurity threats and technological changes that necessitate ongoing innovation
|
||||
|
||||
## 8. Conclusion
|
||||
The global market presents significant opportunities for CyMaIS. With an estimated market volume of €1–1.5 trillion and millions of companies worldwide in need of modernized IT infrastructures, CyMaIS is well positioned to capture a diverse range of customers. Its modular and scalable solutions can meet the unique challenges and requirements of different markets, making it a competitive choice in the rapidly evolving field of digital transformation and cybersecurity.
|
||||
|
||||
## Sources
|
||||
- Analysis based on an interactive discussion with [ChatGPT](https://chat.openai.com) on April 11, 2025.
|
@@ -1,37 +0,0 @@
|
||||
# Global Market Diagrams
|
||||
|
||||
## 1. Global Digitalization Status (Pie Chart)
|
||||
```mermaid
|
||||
pie
|
||||
title Global Digitalization Status
|
||||
"Advanced Digitalization (30%)": 30
|
||||
"Moderate Digitalization (40%)": 40
|
||||
"Needs Significant Modernization (30%)": 30
|
||||
```
|
||||
*This pie chart shows an estimated global digitalization distribution: 30% of companies are advanced, 40% have moderate digitalization, and 30% require significant modernization.*
|
||||
|
||||
## 2. Global Investment Segments (Flowchart)
|
||||
```mermaid
|
||||
flowchart LR
|
||||
A[Global Investment Segments]
|
||||
B[Low-Priced (€10k-30k): 40%]
|
||||
C[Mid-Priced (€40k-70k): 40%]
|
||||
D[High-Priced (€100k+): 20%]
|
||||
|
||||
A --> B
|
||||
A --> C
|
||||
A --> D
|
||||
```
|
||||
*This flowchart illustrates the distribution of investment segments globally, indicating that roughly 40% of IT projects fall into both low and mid-price categories, with 20% in the high-price category.*
|
||||
|
||||
## 3. Overall Global Market Volume & Drivers (Flowchart)
|
||||
```mermaid
|
||||
flowchart TD
|
||||
A[Global IT Infrastructure Market]
|
||||
B[Market Volume: €1-1.5 Trillion]
|
||||
C[Drivers: Accelerated Digitalization, Cybersecurity, Global Investments]
|
||||
|
||||
A --> B
|
||||
A --> C
|
||||
```
|
||||
*This diagram outlines the global market volume (estimated between €1–1.5 trillion) and the key factors fueling growth, such as digital transformation and cybersecurity initiatives.*
|
@@ -1,53 +0,0 @@
|
||||
# Migration Feature
|
||||
|
||||
## Seamless Migration of Existing Software Solutions to CyMaIS
|
||||
|
||||
CyMaIS is designed to simplify the migration of existing software solutions and IT infrastructures. The focus is on protecting existing investments while enabling the benefits of a modern and unified platform.
|
||||
|
||||
---
|
||||
|
||||
## Integration of Existing Applications
|
||||
|
||||
Existing applications can be easily integrated into the [CyMaIS](https://example.com) dashboard. There is no need to migrate or modify existing software — CyMaIS provides a central interface to access and manage already deployed systems.
|
||||
|
||||
---
|
||||
|
||||
## Parallel Operation of Existing Infrastructure
|
||||
|
||||
CyMaIS supports a parallel operation model, allowing the existing IT infrastructure to run alongside CyMaIS without disruption. This enables a step-by-step migration strategy where applications and user groups can be transitioned gradually.
|
||||
|
||||
---
|
||||
|
||||
## Flexible User Management and Single Sign-On (SSO)
|
||||
|
||||
CyMaIS offers flexible user management by supporting multiple directory services:
|
||||
|
||||
- [Microsoft Active Directory (AD)](https://en.wikipedia.org/wiki/Active_Directory)
|
||||
- [LDAP (Lightweight Directory Access Protocol)](https://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol)
|
||||
|
||||
In both scenarios, centralized authentication is provided through [Keycloak](https://www.keycloak.org/), enabling modern [Single Sign-On (SSO)](https://en.wikipedia.org/wiki/Single_sign-on) capabilities — not only for applications managed by CyMaIS but also for existing external services.
|
||||
|
||||
---
|
||||
|
||||
## Key Points
|
||||
|
||||
- Simple migration of existing software solutions
|
||||
- Integration of existing apps into dashboard
|
||||
- Parallel operation of CyMaIS and existing infrastructure is fully supported
|
||||
- User management via [Active Directory](https://en.wikipedia.org/wiki/Active_Directory) or [LDAP](https://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol)
|
||||
- Central authentication with [SSO](https://en.wikipedia.org/wiki/Single_sign-on) using [Keycloak](https://www.keycloak.org/)
|
||||
|
||||
---
|
||||
|
||||
## Summary of Migration Benefits
|
||||
|
||||
| Feature | Description |
|
||||
|--------------------------------|-------------------------------------------------------------------|
|
||||
| Easy Application Integration | Integrate existing applications into the CyMaIS dashboard |
|
||||
| Parallel Operation Supported | Continue using your current infrastructure without disruption |
|
||||
| Flexible User Management | Support for AD and LDAP directory services |
|
||||
| Single Sign-On (SSO) | Centralized authentication via Keycloak |
|
||||
|
||||
---
|
||||
|
||||
CyMaIS enables a smooth and controlled migration path — customized to the individual needs of your organization.
|
@@ -2,7 +2,7 @@
|
||||
|
||||
## Ansible Vault Basics
|
||||
|
||||
CyMaIS uses Ansible Vault to protect sensitive data (e.g. passwords). Use these common commands:
|
||||
Infinito.Nexus uses Ansible Vault to protect sensitive data (e.g. passwords). Use these common commands:
|
||||
|
||||
### Edit an Encrypted File
|
||||
```bash
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# 🚀 Deployment Guide
|
||||
|
||||
This section explains how to deploy and manage the **Cyber Master Infrastructure Solution (CyMaIS)** using Ansible. CyMaIS uses a collection of Ansible tasks, which are controlled via different **"modes"** — such as **updates**, **backups**, **resets**, and **cleanup** operations.
|
||||
This section explains how to deploy and manage **[Infinito.Nexus](https://infinito.nexus)** using Ansible. Infinito.Nexus uses a collection of Ansible tasks, which are controlled via different **"modes"** — such as **updates**, **backups**, **resets**, and **cleanup** operations.
|
||||
|
||||
---
|
||||
|
||||
@@ -9,27 +9,27 @@ This section explains how to deploy and manage the **Cyber Master Infrastructure
|
||||
Before deploying, ensure the following are in place:
|
||||
|
||||
- **🧭 Inventory File:** A valid Ansible inventory file that defines your target systems (servers, personal computers, etc.). Adjust example paths to your environment.
|
||||
- **📦 CyMaIS Installed:** Install via [Kevin's Package-Manager](https://github.com/kevinveenbirkenbach/package-manager).
|
||||
- **📦 Infinito.Nexus Installed:** Install via [Kevin's Package-Manager](https://github.com/kevinveenbirkenbach/package-manager).
|
||||
- **🔐 (Optional) Vault Password File:** If you don't want to enter your vault password interactively, create a password file.
|
||||
|
||||
---
|
||||
|
||||
## 📘 Show CyMaIS Help
|
||||
## 📘 Show Infinito.Nexus Help
|
||||
|
||||
To get a full overview of available options and usage instructions, run:
|
||||
|
||||
```bash
|
||||
cymais --help
|
||||
infinito --help
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 💡 Example Deploy Command
|
||||
|
||||
To deploy CyMaIS on a personal computer (e.g., a laptop), you can run:
|
||||
To deploy Infinito.Nexus on a personal computer (e.g., a laptop), you can run:
|
||||
|
||||
```bash
|
||||
cymais playbook \
|
||||
infinito playbook \
|
||||
--limit hp-spectre-x360 \
|
||||
--host-type personal-computer \
|
||||
--update \
|
||||
@@ -41,7 +41,7 @@ cymais playbook \
|
||||
|
||||
| Parameter | Description |
|
||||
|----------|-------------|
|
||||
| `playbook` | Executes the playbook subcommand of CyMaIS. |
|
||||
| `playbook` | Executes the playbook subcommand of Infinito.Nexus. |
|
||||
| `--limit hp-spectre-x360` | Limits execution to a specific host (`hp-spectre-x360`). |
|
||||
| `--host-type personal-computer` | Defines the host type. Default is `server`; here it is set to `personal-computer`. |
|
||||
| `--update` | Enables update mode to apply software or configuration updates. |
|
||||
@@ -64,7 +64,7 @@ To avoid typing your vault password interactively, you can provide a file:
|
||||
|
||||
## 🔍 Full Command-Line Reference
|
||||
|
||||
Here’s a breakdown of all available parameters from `cymais playbook --help`:
|
||||
Here’s a breakdown of all available parameters from `infinito playbook --help`:
|
||||
|
||||
| Argument | Description |
|
||||
|----------|-------------|
|
||||
@@ -87,7 +87,7 @@ Here’s a breakdown of all available parameters from `cymais playbook --help`:
|
||||
You can mix and match modes like this:
|
||||
|
||||
```bash
|
||||
cymais playbook --update --backup --cleanup pcs.yml
|
||||
infinito playbook --update --backup --cleanup pcs.yml
|
||||
```
|
||||
|
||||
This will update the system, create a backup, and clean up unnecessary files in one run.
|
||||
|
22
docs/guides/administrator/README.md
Normal file
22
docs/guides/administrator/README.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# Administrator Guide
|
||||
|
||||
This guide is for **system administrators** who are deploying and managing Infinito.Nexus infrastructure.
|
||||
|
||||
## Setting Up Infinito.Nexus 🏗️
|
||||
Follow these guides to install and configure Infinito.Nexus:
|
||||
- [Setup Guide](SETUP_GUIDE.md)
|
||||
- [Configuration Guide](CONFIGURATION.md)
|
||||
- [Deployment Guide](DEPLOY.md)
|
||||
|
||||
## Key Responsibilities 🔧
|
||||
- **User Management** - Configure LDAP, Keycloak, and user permissions.
|
||||
- **Security & Backups** - Set up `sys-bkp-rmt-2-loc`, `svc-bkp-loc-2-usb`, and `core-security` roles.
|
||||
- **Application Hosting** - Deploy services like `Nextcloud`, `Matrix`, `Gitea`, and more.
|
||||
- **Networking & VPN** - Configure `WireGuard`, `OpenVPN`, and `Nginx Reverse Proxy`.
|
||||
|
||||
## Managing & Updating Infinito.Nexus 🔄
|
||||
- Regularly update services using `update-pacman`, or `update-apt`.
|
||||
- Monitor system health with `sys-ctl-hlth-btrfs`, `sys-ctl-hlth-webserver`, and `sys-ctl-hlth-docker-container`.
|
||||
- Automate system maintenance with `sys-lock`, `sys-ctl-cln-bkps`, and `sys-ctl-rpr-docker-hard`.
|
||||
|
||||
For more details, refer to the specific guides above.
|
@@ -1,22 +0,0 @@
|
||||
# Administrator Guide
|
||||
|
||||
This guide is for **system administrators** who are deploying and managing CyMaIS infrastructure.
|
||||
|
||||
## Setting Up CyMaIS 🏗️
|
||||
Follow these guides to install and configure CyMaIS:
|
||||
- [Setup Guide](SETUP_GUIDE.md)
|
||||
- [Configuration Guide](CONFIGURATION.md)
|
||||
- [Deployment Guide](DEPLOY.md)
|
||||
|
||||
## Key Responsibilities 🔧
|
||||
- **User Management** - Configure LDAP, Keycloak, and user permissions.
|
||||
- **Security & Backups** - Set up `sys-bkp-remote-to-local`, `svc-sys-bkp-data-to-usb`, and `core-security` roles.
|
||||
- **Application Hosting** - Deploy services like `Nextcloud`, `Matrix`, `Gitea`, and more.
|
||||
- **Networking & VPN** - Configure `WireGuard`, `OpenVPN`, and `Nginx Reverse Proxy`.
|
||||
|
||||
## Managing & Updating CyMaIS 🔄
|
||||
- Regularly update services using `update-docker`, `update-pacman`, or `update-apt`.
|
||||
- Monitor system health with `sys-hlth-btrfs`, `sys-hlth-webserver`, and `sys-hlth-docker-container`.
|
||||
- Automate system maintenance with `sys-lock`, `sys-cln-bkps-service`, and `sys-rpr-docker-hard`.
|
||||
|
||||
For more details, refer to the specific guides above.
|
@@ -1,27 +1,27 @@
|
||||
# Security Guidelines
|
||||
|
||||
CyMaIS is designed with security in mind. However, while following our guidelines can greatly improve your system’s security, no IT system can be 100% secure. Please report any vulnerabilities as soon as possible.
|
||||
Infinito.Nexus is designed with security in mind. However, while following our guidelines can greatly improve your system’s security, no IT system can be 100% secure. Please report any vulnerabilities as soon as possible.
|
||||
|
||||
Additional to the user securitry guidelines administrators have additional responsibilities to secure the entire system:
|
||||
|
||||
- **Deploy on an Encrypted Server**
|
||||
It is recommended to install CyMaIS on an encrypted server to prevent hosting providers from accessing end-user data. For a practical guide on setting up an encrypted server, refer to the [Hetzner Arch LUKS repository](https://github.com/kevinveenbirkenbach/hetzner-arch-luks) 🔐. (Learn more about [disk encryption](https://en.wikipedia.org/wiki/Disk_encryption) on Wikipedia.)
|
||||
It is recommended to install Infinito.Nexus on an encrypted server to prevent hosting providers from accessing end-user data. For a practical guide on setting up an encrypted server, refer to the [Hetzner Arch LUKS repository](https://github.com/kevinveenbirkenbach/hetzner-arch-luks) 🔐. (Learn more about [disk encryption](https://en.wikipedia.org/wiki/Disk_encryption) on Wikipedia.)
|
||||
|
||||
- **Centralized User Management & SSO**
|
||||
For robust authentication and central user management, set up CyMaIS using Keycloak and LDAP.
|
||||
For robust authentication and central user management, set up Infinito.Nexus using Keycloak and LDAP.
|
||||
This configuration enables centralized [Single Sign-On (SSO)](https://en.wikipedia.org/wiki/Single_sign-on) (SSO), simplifying user management and boosting security.
|
||||
|
||||
- **Enforce 2FA and Use a Password Manager**
|
||||
Administrators should also enforce [2FA](https://en.wikipedia.org/wiki/Multi-factor_authentication) and use a password manager with auto-generated passwords. We again recommend [KeePass](https://keepass.info/). The KeePass database can be stored securely in your Nextcloud instance and synchronized between devices.
|
||||
|
||||
- **Avoid Root Logins & Plaintext Passwords**
|
||||
CyMaIS forbids logging in via the root user or using simple passwords. Instead, an SSH key must be generated and transferred during system initialization. When executing commands as root, always use `sudo` (or, if necessary, `sudo su`—but only if you understand the risks). (More information on [SSH](https://en.wikipedia.org/wiki/Secure_Shell) and [sudo](https://en.wikipedia.org/wiki/Sudo) is available on Wikipedia.)
|
||||
Infinito.Nexus forbids logging in via the root user or using simple passwords. Instead, an SSH key must be generated and transferred during system initialization. When executing commands as root, always use `sudo` (or, if necessary, `sudo su`—but only if you understand the risks). (More information on [SSH](https://en.wikipedia.org/wiki/Secure_Shell) and [sudo](https://en.wikipedia.org/wiki/Sudo) is available on Wikipedia.)
|
||||
|
||||
- **Manage Inventories Securely**
|
||||
Your inventories for running CyMaIS should be managed in a separate repository and secured with tools such as [Ansible Vault](https://en.wikipedia.org/wiki/Encryption) 🔒. Sensitive credentials must never be stored in plaintext; use a password file to secure these details.
|
||||
Your inventories for running Infinito.Nexus should be managed in a separate repository and secured with tools such as [Ansible Vault](https://en.wikipedia.org/wiki/Encryption) 🔒. Sensitive credentials must never be stored in plaintext; use a password file to secure these details.
|
||||
|
||||
- **Reporting Vulnerabilities**
|
||||
If you discover a security vulnerability in CyMaIS, please report it immediately. We encourage proactive vulnerability reporting so that issues can be addressed as quickly as possible. Contact our security team at [security@cymais.cloud](mailto:security@cymais.cloud)
|
||||
If you discover a security vulnerability in Infinito.Nexus, please report it immediately. We encourage proactive vulnerability reporting so that issues can be addressed as quickly as possible. Contact our security team at [security@infinito.nexus](mailto:security@infinito.nexus)
|
||||
**DO NOT OPEN AN ISSUE.**
|
||||
|
||||
---
|
||||
|
@@ -1,26 +1,26 @@
|
||||
# Setup Guide
|
||||
|
||||
To setup CyMaIS follow this steps:
|
||||
To setup Infinito.Nexus follow this steps:
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before you setup CyMaIS you need to install [Kevin's Package Manager](https://github.com/kevinveenbirkenbach/package-manager).
|
||||
Before you setup Infinito.Nexus you need to install [Kevin's Package Manager](https://github.com/kevinveenbirkenbach/package-manager).
|
||||
Follow the installation instruction descriped [here](https://github.com/kevinveenbirkenbach/package-manager)
|
||||
|
||||
## Setup CyMaIS
|
||||
## Setup Infinito.Nexus
|
||||
|
||||
To setup CyMaIS execute:
|
||||
To setup Infinito.Nexus execute:
|
||||
|
||||
```bash
|
||||
pkgmgr install cymais
|
||||
pkgmgr install infinito
|
||||
```
|
||||
|
||||
This command will setup CyMaIS on your system with the alias **cymais**.
|
||||
This command will setup Infinito.Nexus on your system with the alias **infinito**.
|
||||
|
||||
## Get Help
|
||||
|
||||
After you setuped CyMaIS you can receive more help by executing:
|
||||
After you setuped Infinito.Nexus you can receive more help by executing:
|
||||
|
||||
```bash
|
||||
cymais --help
|
||||
infinito --help
|
||||
```
|
@@ -1,6 +1,6 @@
|
||||
## 📖 CyMaIS.Cloud Ansible & Python Directory Guide
|
||||
## 📖 Infinito.Nexus Ansible & Python Directory Guide
|
||||
|
||||
This document provides a **decision matrix** for when to use each default Ansible plugin and module directory in the context of **CyMaIS.Cloud development** with Ansible and Python. It links to official docs, explains use-cases, and points back to our conversation.
|
||||
This document provides a **decision matrix** for when to use each default Ansible plugin and module directory in the context of **Infinito.Nexus development** with Ansible and Python. It links to official docs, explains use-cases, and points back to our conversation.
|
||||
|
||||
---
|
||||
|
||||
@@ -31,12 +31,12 @@ ansible-repo/
|
||||
|
||||
### 🎯 Decision Matrix: Which Folder for What?
|
||||
|
||||
| Folder | Type | Use-Case | Example (CyMaIS.Cloud) | Emoji |
|
||||
| Folder | Type | Use-Case | Example (Infinito.Nexus) | Emoji |
|
||||
| -------------------- | -------------------- | ---------------------------------------- | ----------------------------------------------------- | ----- |
|
||||
| `library/` | **Module** | Write idempotent actions | `cloud_network.py`: manage VPCs, subnets | 📦 |
|
||||
| `filter_plugins/` | **Filter plugin** | Jinja2 data transforms in templates/vars | `to_camel_case.py`: convert keys for API calls | 🔍 |
|
||||
| `lookup_plugins/` | **Lookup plugin** | Fetch external/secure data at runtime | `vault_lookup.py`: pull secrets from CyMaIS Vault | 👉 |
|
||||
| `module_utils/` | **Utility library** | Shared Python code for modules | `cymais_client.py`: common API client base class | 🛠️ |
|
||||
| `lookup_plugins/` | **Lookup plugin** | Fetch external/secure data at runtime | `vault_lookup.py`: pull secrets from Infinito.Nexus Vault | 👉 |
|
||||
| `module_utils/` | **Utility library** | Shared Python code for modules | `infinito_client.py`: common API client base class | 🛠️ |
|
||||
| `action_plugins/` | **Action plugin** | Complex task orchestration wrappers | `deploy_stack.py`: sequence Terraform + Ansible steps | ⚙️ |
|
||||
| `callback_plugins/` | **Callback plugin** | Customize log/report behavior | `notify_slack.py`: send playbook status to Slack | 📣 |
|
||||
| `inventory_plugins/` | **Inventory plugin** | Dynamic host/group sources | `azure_inventory.py`: list hosts from Azure tags | 🌐 |
|
||||
@@ -96,16 +96,16 @@ ansible-repo/
|
||||
|
||||
---
|
||||
|
||||
### 🚀 CyMaIS.Cloud Best Practices
|
||||
### 🚀 Infinito.Nexus Best Practices
|
||||
|
||||
* **Organize modules** by service under `library/cloud/` (e.g., `vm`, `network`, `storage`).
|
||||
* **Shared client code** in `module_utils/cymais/` for authentication, request handling.
|
||||
* **Secrets lookup** via `lookup_plugins/vault_lookup.py` pointing to CyMaIS Vault.
|
||||
* **Shared client code** in `module_utils/infinito/` for authentication, request handling.
|
||||
* **Secrets lookup** via `lookup_plugins/vault_lookup.py` pointing to Infinito.Nexus Vault.
|
||||
* **Filters** to normalize data formats from cloud APIs (e.g., `snake_to_camel`).
|
||||
* **Callbacks** to stream playbook results into CyMaIS Monitoring.
|
||||
* **Callbacks** to stream playbook results into Infinito.Nexus Monitoring.
|
||||
|
||||
Use this matrix as your **single source of truth** when extending Ansible for CyMaIS.Cloud! 👍
|
||||
Use this matrix as your **single source of truth** when extending Ansible for Infinito.Nexus! 👍
|
||||
|
||||
---
|
||||
---
|
||||
|
||||
This matrix was created with the help of ChatGPT 🤖—see our conversation [here](https://chatgpt.com/canvas/shared/682b1a62d6dc819184ecdc696c51290a).
|
||||
|
@@ -1,11 +1,11 @@
|
||||
Developer Guide
|
||||
===============
|
||||
|
||||
Welcome to the **CyMaIS Developer Guide**! This guide provides essential information for developers who want to contribute to the CyMaIS open-source project.
|
||||
Welcome to the **Infinito.Nexus Developer Guide**! This guide provides essential information for developers who want to contribute to the Infinito.Nexus open-source project.
|
||||
|
||||
Explore CyMaIS Solutions
|
||||
Explore Infinito.Nexus Solutions
|
||||
------------------------
|
||||
CyMaIS offers various solutions for IT infrastructure automation. Learn more about the available applications:
|
||||
Infinito.Nexus offers various solutions for IT infrastructure automation. Learn more about the available applications:
|
||||
|
||||
- :doc:`../../../roles/application_glosar`
|
||||
- :doc:`../../../roles/application_categories`
|
||||
@@ -16,21 +16,21 @@ For Developers
|
||||
Understanding Ansible Roles
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
CyMaIS is powered by **Ansible** roles to automate deployments. Developers can explore the technical details of our roles here:
|
||||
Infinito.Nexus is powered by **Ansible** roles to automate deployments. Developers can explore the technical details of our roles here:
|
||||
|
||||
- :doc:`../../../roles/ansible_role_glosar`
|
||||
|
||||
Contributing to CyMaIS
|
||||
Contributing to Infinito.Nexus
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Want to contribute to the project or explore the source code? Check out our **GitHub repository**:
|
||||
|
||||
- `CyMaIS GitHub Repository <https://github.com/kevinveenbirkenbach/cymais/tree/master/roles>`_
|
||||
- `Infinito.Nexus GitHub Repository <https://s.infinito.nexus/code/tree/master/roles>`_
|
||||
|
||||
Contribution Guidelines
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
1. **Fork the Repository** – Start by forking the CyMaIS repository.
|
||||
1. **Fork the Repository** – Start by forking the Infinito.Nexus repository.
|
||||
2. **Create a New Branch** – Make changes in a dedicated branch.
|
||||
3. **Follow Coding Standards** – Ensure your code is well-documented and follows best practices.
|
||||
4. **Submit a Pull Request** – Once your changes are tested, submit a PR for review.
|
||||
@@ -42,12 +42,12 @@ For detailed guidelines, refer to:
|
||||
|
||||
Community & Support
|
||||
-------------------
|
||||
If you have questions or need help, visit the **CyMaIS Information Hub**:
|
||||
If you have questions or need help, visit the **Infinito.Nexus Information Hub**:
|
||||
|
||||
- `hub.cymais.cloud <https://hub.cymais.cloud>`_
|
||||
- `hub.infinito.nexus <https://hub.infinito.nexus>`_
|
||||
|
||||
This is the best place to ask questions, get support, and collaborate with other contributors.
|
||||
|
||||
Stay connected, collaborate, and help improve CyMaIS together!
|
||||
Stay connected, collaborate, and help improve Infinito.Nexus together!
|
||||
|
||||
Happy coding! 🚀
|
@@ -1,6 +1,6 @@
|
||||
# Enterprise Guide
|
||||
|
||||
Are you looking for a **reliable IT infrastructure** for your business or organization? **CyMaIS** is here to help!
|
||||
Are you looking for a **reliable IT infrastructure** for your business or organization? **Infinito.Nexus** is here to help!
|
||||
|
||||
## Who Can Benefit? 🎯
|
||||
✅ **Small & Medium Businesses** - IT infrastructure with everything included what you need. E.g. data clouds, mailservers, vpn's, homepages, documentation tools, etc.
|
||||
@@ -8,7 +8,7 @@ Are you looking for a **reliable IT infrastructure** for your business or organi
|
||||
✅ **NGOs & Organizations** - Secure, cost-effective infrastructure solutions on Open Source Base
|
||||
✅ **Journalists & Content Creators** - Host your content on your own servers, share it via the Fediverse and avoid cencorship
|
||||
|
||||
## Why Choose CyMaIS? 🚀
|
||||
## Why Choose Infinito.Nexus? 🚀
|
||||
- **Fast Deployment** - Get your IT setup running in minutes
|
||||
- **Security First** - Encrypted backups, 2FA, and secure logins
|
||||
- **Scalable & Customizable** - Adapts to your specific needs
|
@@ -1,15 +0,0 @@
|
||||
# Investor Guide
|
||||
|
||||
🚀 **CyMaIS is seeking investors** to expand its reach and continue development. With an increasing demand for automated IT solutions, **CyMaIS has the potential to revolutionize IT infrastructure management.**
|
||||
|
||||
## Market Potential 📈
|
||||
- **$500B+ Global IT Infrastructure Market**
|
||||
- Growing **open-source adoption** across enterprises
|
||||
- Increasing need for **automation & cybersecurity**
|
||||
|
||||
## Why Invest in CyMaIS? 🔥
|
||||
- **Unique Automation Approach** - Pre-configured roles for quick IT setup
|
||||
- **Security & Compliance Focus** - Built-in security best practices
|
||||
- **Scalability** - Modular framework adaptable to various industries
|
||||
|
||||
Interested in investing? Contact **[Kevin Veen-Birkenbach](mailto:kevin@veen.world)** to discuss partnership opportunities.
|
@@ -1,17 +0,0 @@
|
||||
# Enterprise Solutions
|
||||
|
||||
**CyMaIS** provides powerful **enterprise-grade IT infrastructure solutions**, enabling businesses to scale securely and efficiently.
|
||||
|
||||
## How CyMaIS Helps Enterprises 🔧
|
||||
- **Automated Deployment** - Set up secure servers & workstations effortlessly
|
||||
- **Advanced Security** - Integrated 2FA, LDAP, encrypted storage
|
||||
- **High Availability** - Scalable infrastructure for growing enterprises
|
||||
- **Compliance & Audit Logs** - Maintain regulatory standards
|
||||
|
||||
## Use Cases 💼
|
||||
- ✅ **Cloud-Based Infrastructure** (Docker, Kubernetes, CI/CD pipelines)
|
||||
- ✅ **Enterprise Networking & VPN** (WireGuard, OpenVPN, Firewall rules)
|
||||
- ✅ **Database & Business Apps** (PostgreSQL, Nextcloud, ERP systems)
|
||||
- ✅ **Custom Security Solutions** (Keycloak, LDAP, 2FA enforcement)
|
||||
|
||||
Interested? Contact [Kevin Veen-Birkenbach](mailto:kevin@veen.world) to discuss tailored enterprise solutions.
|
@@ -1,9 +1,9 @@
|
||||
# User Guide
|
||||
|
||||
Welcome to **CyMaIS**! This guide is designed for **end-users** who want to use cloud services, email, and collaboration tools securely and efficiently. Whether you're an **enterprise user** or an **individual**, CyMaIS provides a wide range of services tailored to your needs.
|
||||
Welcome to **Infinito.Nexus**! This guide is designed for **end-users** who want to use cloud services, email, and collaboration tools securely and efficiently. Whether you're an **enterprise user** or an **individual**, Infinito.Nexus provides a wide range of services tailored to your needs.
|
||||
|
||||
## What Can CyMaIS Do for You? 💡
|
||||
CyMaIS enables you to securely and efficiently use a variety of **cloud-based applications**, including:
|
||||
## What Can Infinito.Nexus Do for You? 💡
|
||||
Infinito.Nexus enables you to securely and efficiently use a variety of **cloud-based applications**, including:
|
||||
|
||||
### 📂 Cloud Storage & File Sharing
|
||||
- **Nextcloud** – Securely store, sync, and share files across devices.
|
||||
@@ -44,23 +44,23 @@ CyMaIS enables you to securely and efficiently use a variety of **cloud-based ap
|
||||
|
||||
## 🏢 Enterprise Users
|
||||
### How to Get Started 🏁
|
||||
If your organization provides CyMaIS services, follow these steps:
|
||||
If your organization provides Infinito.Nexus services, follow these steps:
|
||||
- Your **administrator** will provide login credentials.
|
||||
- Access **cloud services** via a web browser or mobile apps.
|
||||
- For support, contact your **system administrator**.
|
||||
|
||||
## 🏠 Private Users
|
||||
### How to Get Started 🏁
|
||||
If you're an **individual user**, you can sign up for CyMaIS services:
|
||||
- **Register an account** at [cymais.cloud](https://cymais.cloud).
|
||||
If you're an **individual user**, you can sign up for Infinito.Nexus services:
|
||||
- **Register an account** at [infinito.nexus](https://infinito.nexus).
|
||||
- Choose the applications and services you need.
|
||||
- Follow the setup guide and start using CyMaIS services immediately.
|
||||
- Follow the setup guide and start using Infinito.Nexus services immediately.
|
||||
|
||||
## 📚 Learn More
|
||||
Discover more about CyMaIS applications:
|
||||
Discover more about Infinito.Nexus applications:
|
||||
- :doc:`roles/application_glosar`
|
||||
- :doc:`roles/application_categories`
|
||||
|
||||
For further information, visit our **[Information Hub](https://hub.cymais.cloud)** for tutorials, FAQs, and community support.
|
||||
For further information, visit our **[Information Hub](https://hub.infinito.nexus)** for tutorials, FAQs, and community support.
|
||||
|
||||
You can also register for updates and support from our community.
|
@@ -1,6 +1,6 @@
|
||||
# Security Guidelines
|
||||
|
||||
CyMaIS is designed with security in mind. However, while following our guidelines can greatly improve your system’s security, no IT system can be 100% secure. Please report any vulnerabilities as soon as possible.
|
||||
Infinito.Nexus is designed with security in mind. However, while following our guidelines can greatly improve your system’s security, no IT system can be 100% secure. Please report any vulnerabilities as soon as possible.
|
||||
|
||||
For optimal personal security, we **strongly recommend** the following:
|
||||
|
||||
@@ -12,12 +12,12 @@ For optimal personal security, we **strongly recommend** the following:
|
||||
Synchronize your password database across devices using the [Nextcloud Client](https://nextcloud.com/) 📱💻.
|
||||
|
||||
- **Use Encrypted Systems**
|
||||
We recommend running CyMaIS only on systems with full disk encryption. For example, Linux distributions such as [Manjaro](https://manjaro.org/) (based on ArchLinux) with desktop environments like [GNOME](https://en.wikipedia.org/wiki/GNOME) provide excellent security. (Learn more about [disk encryption](https://en.wikipedia.org/wiki/Disk_encryption) on Wikipedia.)
|
||||
We recommend running Infinito.Nexus only on systems with full disk encryption. For example, Linux distributions such as [Manjaro](https://manjaro.org/) (based on ArchLinux) with desktop environments like [GNOME](https://en.wikipedia.org/wiki/GNOME) provide excellent security. (Learn more about [disk encryption](https://en.wikipedia.org/wiki/Disk_encryption) on Wikipedia.)
|
||||
|
||||
- **Beware of Phishing and Social Engineering**
|
||||
Always verify email senders, avoid clicking on unknown links, and never share your passwords or 2FA codes with anyone. (Learn more about [Phishing](https://en.wikipedia.org/wiki/Phishing) and [Social Engineering](https://en.wikipedia.org/wiki/Social_engineering_(security)) on Wikipedia.)
|
||||
|
||||
Following these guidelines will significantly enhance your personal security—but remember, no system is completely immune to risk.
|
||||
|
||||
A tutorial how to setup secure password management you will find [here](https://blog.veen.world/blog/2025/04/04/%f0%9f%9b%a1%ef%b8%8f-keepassxc-cymais-cloud-the-ultimate-guide-to-cross-device-password-security/)
|
||||
A tutorial how to setup secure password management you will find [here](https://blog.veen.world/blog/2025/04/04/%f0%9f%9b%a1%ef%b8%8f-keepassxc-infinito-cloud-the-ultimate-guide-to-cross-device-password-security/)
|
||||
---
|
@@ -1,23 +0,0 @@
|
||||
# Company Vision — CyMaIS
|
||||
|
||||
## Empowering Digital Sovereignty for Everyone.
|
||||
|
||||
CyMaIS is more than just software — it is a movement for digital independence, resilience, and transparency.
|
||||
|
||||
We believe that secure, self-hosted IT infrastructure must be accessible to everyone — regardless of company size, technical expertise, or budget.
|
||||
|
||||
### Our Mission
|
||||
- Democratize access to secure IT infrastructure
|
||||
- Enable data sovereignty and privacy for individuals and organizations
|
||||
- Reduce global dependency on monopolistic cloud providers
|
||||
- Promote Open Source, transparency, and community-driven innovation
|
||||
- Build resilient digital ecosystems in uncertain times
|
||||
|
||||
### Long-Term Goal
|
||||
We want to establish CyMaIS as the leading European and global alternative to centralized cloud platforms — open, modular, and self-sovereign.
|
||||
|
||||
Our vision is a future where every person and organization owns their infrastructure — free from control, censorship, and vendor lock-ins.
|
||||
|
||||
---
|
||||
|
||||
> *CyMaIS — Empowering a Sovereign Digital Future.*
|
@@ -1,28 +0,0 @@
|
||||
# Product Vision — CyMaIS Platform
|
||||
|
||||
## The Universal Automation Platform for Self-Hosted IT Infrastructure.
|
||||
|
||||
CyMaIS provides a modular, Open Source infrastructure automation platform that enables secure and scalable IT environments — for individuals, SMEs, NGOs, and enterprises.
|
||||
|
||||
### Key Product Goals
|
||||
- Enterprise-grade infrastructure automation for everyone
|
||||
- Rapid deployment of servers, clients, and cloud-native services
|
||||
- Modular role-based architecture (VPN, Backup, Security, Monitoring, Web Services, IAM)
|
||||
- Seamless integration of existing systems without forced migration
|
||||
- Infrastructure-as-Code and reproducible deployments
|
||||
- Reduced operational IT costs and vendor lock-ins
|
||||
- Security by Design (encryption, 2FA, auditing, hardening)
|
||||
- Support for decentralized protocols like ActivityPub, Matrix, Email
|
||||
|
||||
### Long-Term Product Vision
|
||||
CyMaIS will become the central platform for:
|
||||
|
||||
- Automating any self-hosted infrastructure within minutes
|
||||
- Maintaining full data control and regulatory compliance
|
||||
- Empowering organizations to build their own sovereign cloud ecosystem
|
||||
- Breaking the dependency on centralized and proprietary cloud services
|
||||
|
||||
---
|
||||
|
||||
> *CyMaIS — The Future of Self-Hosted Infrastructure.*
|
||||
> *Secure. Automated. Sovereign.*
|
@@ -1,33 +0,0 @@
|
||||
# Vision Statement
|
||||
|
||||
This is the Vision Statement for [CyMaIS](https://cymais.cloud), outlining our future goals and direction.
|
||||
|
||||
## Short
|
||||
|
||||
CyMaIS aims to empower individuals, businesses, NGOs, and enterprises with a secure, scalable, and decentralized IT infrastructure solution that ensures data sovereignty, promotes Open Source innovation, and reduces reliance on monopolistic cloud providers.
|
||||
|
||||
## Explanation
|
||||
|
||||
At the core of our mission is the development of a groundbreaking tool designed to address the inherent problems in managing IT infrastructure today, for individuals, businesses, non-governmental organizations (NGOs), and large enterprises alike. From the rising costs of monopolistic cloud services to the loss of data sovereignty, security concerns, and dependency on centralized cloud providers, we aim to provide an alternative that empowers users, organizations, and businesses to regain control over their data and infrastructure.
|
||||
|
||||
Our vision is to create a fully automated solution that enables all users, regardless of size or industry, to establish a secure, scalable, and self-managed IT infrastructure. This tool will break down the complexities of IT infrastructure setup, making it faster, simpler, and more secure, while being accessible to everyone—from individuals and grassroots organizations to large-scale enterprises.
|
||||
|
||||
Grounded in Open Source principles, this solution will champion transparency, security, and innovation. It will be adaptable and flexible, offering a digital infrastructure that evolves alongside the diverse needs of businesses, organizations, and communities, all while maintaining a focus on usability and accessibility.
|
||||
|
||||
We envision a future where users and organizations are no longer at the mercy of monopolistic cloud providers, where they can securely manage their own data and infrastructure. This future will see individuals and NGOs empowered with the same capabilities as large enterprises—ensuring that people of all scales can maintain control and sovereignty over their digital lives, free from external manipulation.
|
||||
|
||||
CyMaIS will democratize access to advanced IT infrastructure solutions, providing security, flexibility, and scalability for all—from small NGOs to large multinational enterprises—without the cost and dependence on centralized, proprietary cloud services. By utilizing Open Source, our solution will meet the highest standards of security while fostering a collaborative, community-driven approach to innovation and continuous improvement.
|
||||
|
||||
Moreover, our vision goes beyond just IT infrastructure; it extends to the broader goal of democratizing the internet itself. By integrating decentralized protocols like **ActivityPub**, **email**, and **Matrix**, we aim to restore the foundational principles of a decentralized, resilient internet. In today’s world, marked by political tensions, wars, and uncertainty, the importance of resilient, distributed infrastructures has never been greater. CyMaIS will enable all users—from individuals to NGOs and large enterprises—to remain independent and secure, ensuring that control over data and communications stays in their hands, not under the dominance of monopolistic entities.
|
||||
|
||||
Ultimately, our vision is to redefine the way IT infrastructure is deployed and managed, offering a solution that is swift, secure, and scalable, capable of meeting the needs of businesses, individuals, NGOs, and large enterprises. CyMaIS will empower all stakeholders by providing a foundation for a decentralized, transparent, and resilient digital future—setting a new benchmark for security, reliability, and sovereignty in the digital age.
|
||||
|
||||
## Key Points
|
||||
- Empower people and institutions
|
||||
- Data sovereignty
|
||||
- Control over infrastructure
|
||||
- Automated infrastructure setup
|
||||
- Open Source
|
||||
- Decentralized Services
|
||||
- Scalabel
|
||||
- Global resilience and security
|
27
filter_plugins/README.md
Normal file
27
filter_plugins/README.md
Normal file
@@ -0,0 +1,27 @@
|
||||
# Custom Filter Plugins for Infinito.Nexus
|
||||
|
||||
This directory contains custom **Ansible filter plugins** used within the Infinito.Nexus project.
|
||||
|
||||
## When to Use a Filter Plugin
|
||||
|
||||
- **Transform values:** Use filters to transform, extract, reformat, or compute values from existing variables or facts.
|
||||
- **Inline data manipulation:** Filters are designed for inline use in Jinja2 expressions (in templates, tasks, vars, etc.).
|
||||
- **No external lookups:** Filters only operate on data you explicitly pass to them and cannot access external files, the Ansible inventory, or runtime context.
|
||||
|
||||
### Examples
|
||||
|
||||
```jinja2
|
||||
{{ role_name | get_entity_name }}
|
||||
{{ my_list | unique }}
|
||||
{{ user_email | regex_replace('^(.+)@.*$', '\\1') }}
|
||||
````
|
||||
|
||||
## When *not* to Use a Filter Plugin
|
||||
|
||||
* If you need to **load data from an external source** (e.g., file, environment, API), use a lookup plugin instead.
|
||||
* If your logic requires **access to inventory, facts, or host-level information** that is not passed as a parameter.
|
||||
|
||||
## Further Reading
|
||||
|
||||
* [Ansible Filter Plugins Documentation](https://docs.ansible.com/ansible/latest/plugins/filter.html)
|
||||
* [Developing Ansible Filter Plugins](https://docs.ansible.com/ansible/latest/dev_guide/developing_plugins.html#developing-filter-plugins)
|
@@ -1,2 +0,0 @@
|
||||
# Todo
|
||||
- Refactor is_feature_enabled to one function
|
@@ -1,86 +0,0 @@
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {'alias_domains_map': self.alias_domains_map}
|
||||
|
||||
def alias_domains_map(self, apps, primary_domain):
|
||||
"""
|
||||
Build a map of application IDs to their alias domains.
|
||||
|
||||
- If no `domains` key → []
|
||||
- If `domains` exists but is an empty dict → return the original cfg
|
||||
- Explicit `aliases` are used (default appended if missing)
|
||||
- If only `canonical` defined and it doesn't include default, default is added
|
||||
- Invalid types raise AnsibleFilterError
|
||||
"""
|
||||
def parse_entry(domains_cfg, key, app_id):
|
||||
if key not in domains_cfg:
|
||||
return None
|
||||
entry = domains_cfg[key]
|
||||
if isinstance(entry, dict):
|
||||
values = list(entry.values())
|
||||
elif isinstance(entry, list):
|
||||
values = entry
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Unexpected type for 'domains.{key}' in application '{app_id}': {type(entry).__name__}"
|
||||
)
|
||||
for d in values:
|
||||
if not isinstance(d, str) or not d.strip():
|
||||
raise AnsibleFilterError(
|
||||
f"Invalid domain entry in '{key}' for application '{app_id}': {d!r}"
|
||||
)
|
||||
return values
|
||||
|
||||
def default_domain(app_id, primary):
|
||||
return f"{app_id}.{primary}"
|
||||
|
||||
# 1) Precompute canonical domains per app (fallback to default)
|
||||
canonical_map = {}
|
||||
for app_id, cfg in apps.items():
|
||||
domains_cfg = cfg.get('domains') or {}
|
||||
entry = domains_cfg.get('canonical')
|
||||
if entry is None:
|
||||
canonical_map[app_id] = [default_domain(app_id, primary_domain)]
|
||||
elif isinstance(entry, dict):
|
||||
canonical_map[app_id] = list(entry.values())
|
||||
elif isinstance(entry, list):
|
||||
canonical_map[app_id] = list(entry)
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Unexpected type for 'domains.canonical' in application '{app_id}': {type(entry).__name__}"
|
||||
)
|
||||
|
||||
# 2) Build alias list per app
|
||||
result = {}
|
||||
for app_id, cfg in apps.items():
|
||||
domains_cfg = cfg.get('domains')
|
||||
|
||||
# no domains key → no aliases
|
||||
if domains_cfg is None:
|
||||
result[app_id] = []
|
||||
continue
|
||||
|
||||
# empty domains dict → return the original cfg
|
||||
if isinstance(domains_cfg, dict) and not domains_cfg:
|
||||
result[app_id] = cfg
|
||||
continue
|
||||
|
||||
# otherwise, compute aliases
|
||||
aliases = parse_entry(domains_cfg, 'aliases', app_id) or []
|
||||
default = default_domain(app_id, primary_domain)
|
||||
has_aliases = 'aliases' in domains_cfg
|
||||
has_canon = 'canonical' in domains_cfg
|
||||
|
||||
if has_aliases:
|
||||
if default not in aliases:
|
||||
aliases.append(default)
|
||||
elif has_canon:
|
||||
canon = canonical_map.get(app_id, [])
|
||||
if default not in canon and default not in aliases:
|
||||
aliases.append(default)
|
||||
|
||||
result[app_id] = aliases
|
||||
|
||||
return result
|
@@ -1,21 +1,76 @@
|
||||
from ansible.errors import AnsibleFilterError
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
from module_utils.entity_name_utils import get_entity_name
|
||||
from module_utils.role_dependency_resolver import RoleDependencyResolver
|
||||
from typing import Iterable
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {'canonical_domains_map': self.canonical_domains_map}
|
||||
|
||||
def canonical_domains_map(self, apps, primary_domain):
|
||||
def canonical_domains_map(
|
||||
self,
|
||||
apps,
|
||||
PRIMARY_DOMAIN,
|
||||
*,
|
||||
recursive: bool = False,
|
||||
roles_base_dir: str | None = None,
|
||||
seed: Iterable[str] | None = None,
|
||||
):
|
||||
"""
|
||||
Maps applications to their canonical domains, checking for conflicts
|
||||
and ensuring all domains are valid and unique across applications.
|
||||
Build { app_id: [canonical domains...] }.
|
||||
|
||||
Rekursiv werden nur include_role, import_role und meta/main.yml:dependencies verfolgt.
|
||||
'run_after' wird hier absichtlich ignoriert.
|
||||
"""
|
||||
if not isinstance(apps, dict):
|
||||
raise AnsibleFilterError(f"'apps' must be a dict, got {type(apps).__name__}")
|
||||
|
||||
app_keys = set(apps.keys())
|
||||
seed_keys = set(seed) if seed is not None else app_keys
|
||||
|
||||
if recursive:
|
||||
roles_base_dir = roles_base_dir or os.path.join(os.getcwd(), "roles")
|
||||
if not os.path.isdir(roles_base_dir):
|
||||
raise AnsibleFilterError(
|
||||
f"roles_base_dir '{roles_base_dir}' not found or not a directory."
|
||||
)
|
||||
|
||||
resolver = RoleDependencyResolver(roles_base_dir)
|
||||
discovered_roles = resolver.resolve_transitively(
|
||||
start_roles=seed_keys,
|
||||
resolve_include_role=True,
|
||||
resolve_import_role=True,
|
||||
resolve_dependencies=True,
|
||||
resolve_run_after=False,
|
||||
max_depth=None,
|
||||
)
|
||||
# all discovered roles that actually have config entries in `apps`
|
||||
target_apps = discovered_roles & app_keys
|
||||
else:
|
||||
target_apps = seed_keys
|
||||
|
||||
result = {}
|
||||
seen_domains = {}
|
||||
|
||||
for app_id, cfg in apps.items():
|
||||
domains_cfg = cfg.get('domains')
|
||||
for app_id in sorted(target_apps):
|
||||
cfg = apps.get(app_id)
|
||||
if cfg is None:
|
||||
continue
|
||||
if not str(app_id).startswith(("web-", "svc-db-")):
|
||||
continue
|
||||
if not isinstance(cfg, dict):
|
||||
raise AnsibleFilterError(
|
||||
f"Invalid configuration for application '{app_id}': expected dict, got {cfg!r}"
|
||||
)
|
||||
|
||||
domains_cfg = cfg.get('server', {}).get('domains', {})
|
||||
if not domains_cfg or 'canonical' not in domains_cfg:
|
||||
self._add_default_domain(app_id, primary_domain, seen_domains, result)
|
||||
self._add_default_domain(app_id, PRIMARY_DOMAIN, seen_domains, result)
|
||||
continue
|
||||
|
||||
canonical_domains = domains_cfg['canonical']
|
||||
@@ -23,12 +78,9 @@ class FilterModule(object):
|
||||
|
||||
return result
|
||||
|
||||
def _add_default_domain(self, app_id, primary_domain, seen_domains, result):
|
||||
"""
|
||||
Add the default domain for an application if no canonical domains are defined.
|
||||
Ensures the domain is unique across applications.
|
||||
"""
|
||||
default_domain = f"{app_id}.{primary_domain}"
|
||||
def _add_default_domain(self, app_id, PRIMARY_DOMAIN, seen_domains, result):
|
||||
entity_name = get_entity_name(app_id)
|
||||
default_domain = f"{entity_name}.{PRIMARY_DOMAIN}"
|
||||
if default_domain in seen_domains:
|
||||
raise AnsibleFilterError(
|
||||
f"Domain '{default_domain}' is already configured for "
|
||||
@@ -38,40 +90,21 @@ class FilterModule(object):
|
||||
result[app_id] = [default_domain]
|
||||
|
||||
def _process_canonical_domains(self, app_id, canonical_domains, seen_domains, result):
|
||||
"""
|
||||
Process the canonical domains for an application, handling both lists and dicts,
|
||||
and ensuring each domain is unique.
|
||||
"""
|
||||
if isinstance(canonical_domains, dict):
|
||||
self._process_canonical_domains_dict(app_id, canonical_domains, seen_domains, result)
|
||||
for _, domain in canonical_domains.items():
|
||||
self._validate_and_check_domain(app_id, domain, seen_domains)
|
||||
result[app_id] = canonical_domains.copy()
|
||||
elif isinstance(canonical_domains, list):
|
||||
self._process_canonical_domains_list(app_id, canonical_domains, seen_domains, result)
|
||||
for domain in canonical_domains:
|
||||
self._validate_and_check_domain(app_id, domain, seen_domains)
|
||||
result[app_id] = list(canonical_domains)
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Unexpected type for 'domains.canonical' in application '{app_id}': "
|
||||
f"Unexpected type for 'server.domains.canonical' in application '{app_id}': "
|
||||
f"{type(canonical_domains).__name__}"
|
||||
)
|
||||
|
||||
def _process_canonical_domains_dict(self, app_id, domains_dict, seen_domains, result):
|
||||
"""
|
||||
Process a dictionary of canonical domains for an application.
|
||||
"""
|
||||
for name, domain in domains_dict.items():
|
||||
self._validate_and_check_domain(app_id, domain, seen_domains)
|
||||
result[app_id] = domains_dict.copy()
|
||||
|
||||
def _process_canonical_domains_list(self, app_id, domains_list, seen_domains, result):
|
||||
"""
|
||||
Process a list of canonical domains for an application.
|
||||
"""
|
||||
for domain in domains_list:
|
||||
self._validate_and_check_domain(app_id, domain, seen_domains)
|
||||
result[app_id] = list(domains_list)
|
||||
|
||||
def _validate_and_check_domain(self, app_id, domain, seen_domains):
|
||||
"""
|
||||
Validate the domain and check if it has already been assigned to another application.
|
||||
"""
|
||||
if not isinstance(domain, str) or not domain.strip():
|
||||
raise AnsibleFilterError(
|
||||
f"Invalid domain entry in 'canonical' for application '{app_id}': {domain!r}"
|
||||
|
@@ -1,6 +1,14 @@
|
||||
from ansible.errors import AnsibleFilterError
|
||||
import hashlib
|
||||
import base64
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Ensure module_utils is importable when this filter runs from Ansible
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
from module_utils.config_utils import get_app_conf
|
||||
from module_utils.get_url import get_url
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
"""
|
||||
@@ -12,18 +20,36 @@ class FilterModule(object):
|
||||
'build_csp_header': self.build_csp_header,
|
||||
}
|
||||
|
||||
# -------------------------------
|
||||
# Helpers
|
||||
# -------------------------------
|
||||
|
||||
@staticmethod
|
||||
def is_feature_enabled(applications: dict, feature: str, application_id: str) -> bool:
|
||||
"""
|
||||
Return True if applications[application_id].features[feature] is truthy.
|
||||
Returns True if applications[application_id].features[feature] is truthy.
|
||||
"""
|
||||
app = applications.get(application_id, {})
|
||||
return bool(app.get('features', {}).get(feature, False))
|
||||
return get_app_conf(
|
||||
applications,
|
||||
application_id,
|
||||
'features.' + feature,
|
||||
False,
|
||||
False
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_csp_whitelist(applications, application_id, directive):
|
||||
app = applications.get(application_id, {})
|
||||
wl = app.get('csp', {}).get('whitelist', {}).get(directive, [])
|
||||
"""
|
||||
Returns a list of additional whitelist entries for a given directive.
|
||||
Accepts both scalar and list in config; always returns a list.
|
||||
"""
|
||||
wl = get_app_conf(
|
||||
applications,
|
||||
application_id,
|
||||
'server.csp.whitelist.' + directive,
|
||||
False,
|
||||
[]
|
||||
)
|
||||
if isinstance(wl, list):
|
||||
return wl
|
||||
if wl:
|
||||
@@ -33,26 +59,45 @@ class FilterModule(object):
|
||||
@staticmethod
|
||||
def get_csp_flags(applications, application_id, directive):
|
||||
"""
|
||||
Dynamically extract all CSP flags for a given directive and return them as tokens,
|
||||
e.g., "'unsafe-eval'", "'unsafe-inline'", etc.
|
||||
Returns CSP flag tokens (e.g., "'unsafe-eval'", "'unsafe-inline'") for a directive,
|
||||
merging sane defaults with app config.
|
||||
Default: 'unsafe-inline' is enabled for style-src and style-src-elem.
|
||||
"""
|
||||
app = applications.get(application_id, {})
|
||||
flags = app.get('csp', {}).get('flags', {}).get(directive, {})
|
||||
tokens = []
|
||||
# Defaults that apply to all apps
|
||||
default_flags = {}
|
||||
if directive in ('style-src', 'style-src-elem'):
|
||||
default_flags = {'unsafe-inline': True}
|
||||
|
||||
for flag_name, enabled in flags.items():
|
||||
configured = get_app_conf(
|
||||
applications,
|
||||
application_id,
|
||||
'server.csp.flags.' + directive,
|
||||
False,
|
||||
{}
|
||||
)
|
||||
|
||||
# Merge defaults with configured flags (configured overrides defaults)
|
||||
merged = {**default_flags, **configured}
|
||||
|
||||
tokens = []
|
||||
for flag_name, enabled in merged.items():
|
||||
if enabled:
|
||||
tokens.append(f"'{flag_name}'")
|
||||
|
||||
return tokens
|
||||
|
||||
@staticmethod
|
||||
def get_csp_inline_content(applications, application_id, directive):
|
||||
"""
|
||||
Return inline script/style snippets to hash for a given CSP directive.
|
||||
Returns inline script/style snippets to hash for a given directive.
|
||||
Accepts both scalar and list in config; always returns a list.
|
||||
"""
|
||||
app = applications.get(application_id, {})
|
||||
snippets = app.get('csp', {}).get('hashes', {}).get(directive, [])
|
||||
snippets = get_app_conf(
|
||||
applications,
|
||||
application_id,
|
||||
'server.csp.hashes.' + directive,
|
||||
False,
|
||||
[]
|
||||
)
|
||||
if isinstance(snippets, list):
|
||||
return snippets
|
||||
if snippets:
|
||||
@@ -62,7 +107,7 @@ class FilterModule(object):
|
||||
@staticmethod
|
||||
def get_csp_hash(content):
|
||||
"""
|
||||
Compute the SHA256 hash of the given inline content and return
|
||||
Computes the SHA256 hash of the given inline content and returns
|
||||
a CSP token like "'sha256-<base64>'".
|
||||
"""
|
||||
try:
|
||||
@@ -72,6 +117,10 @@ class FilterModule(object):
|
||||
except Exception as exc:
|
||||
raise AnsibleFilterError(f"get_csp_hash failed: {exc}")
|
||||
|
||||
# -------------------------------
|
||||
# Main builder
|
||||
# -------------------------------
|
||||
|
||||
def build_csp_header(
|
||||
self,
|
||||
applications,
|
||||
@@ -81,68 +130,80 @@ class FilterModule(object):
|
||||
matomo_feature_name='matomo'
|
||||
):
|
||||
"""
|
||||
Build the Content-Security-Policy header value dynamically based on application settings.
|
||||
Inline hashes are read from applications[application_id].csp.hashes
|
||||
Builds the Content-Security-Policy header value dynamically based on application settings.
|
||||
- Flags (e.g., 'unsafe-eval', 'unsafe-inline') are read from server.csp.flags.<directive>,
|
||||
with sane defaults applied in get_csp_flags (always 'unsafe-inline' for style-src and style-src-elem).
|
||||
- Inline hashes are read from server.csp.hashes.<directive>.
|
||||
- Whitelists are read from server.csp.whitelist.<directive>.
|
||||
- Inline hashes are added only if the final tokens do NOT include 'unsafe-inline'.
|
||||
"""
|
||||
try:
|
||||
directives = [
|
||||
'default-src',
|
||||
'connect-src',
|
||||
'frame-ancestors',
|
||||
'frame-src',
|
||||
'script-src',
|
||||
'script-src-elem',
|
||||
'style-src',
|
||||
'font-src',
|
||||
'worker-src',
|
||||
'manifest-src',
|
||||
'media-src',
|
||||
'default-src', # Fallback source list for content types not explicitly listed
|
||||
'connect-src', # Allowed URLs for XHR, WebSockets, EventSource, fetch()
|
||||
'frame-ancestors', # Who may embed this page
|
||||
'frame-src', # Sources for nested browsing contexts (e.g., <iframe>)
|
||||
'script-src', # Sources for script execution
|
||||
'script-src-elem', # Sources for <script> elements
|
||||
'style-src', # Sources for inline styles and <style>/<link> elements
|
||||
'style-src-elem', # Sources for <style> and <link rel="stylesheet">
|
||||
'font-src', # Sources for fonts
|
||||
'worker-src', # Sources for workers
|
||||
'manifest-src', # Sources for web app manifests
|
||||
'media-src', # Sources for audio and video
|
||||
]
|
||||
|
||||
parts = []
|
||||
|
||||
for directive in directives:
|
||||
tokens = ["'self'"]
|
||||
|
||||
# unsafe-eval / unsafe-inline flags
|
||||
# 1) Load flags (includes defaults from get_csp_flags)
|
||||
flags = self.get_csp_flags(applications, application_id, directive)
|
||||
tokens += flags
|
||||
|
||||
# Matomo integration
|
||||
if (
|
||||
self.is_feature_enabled(applications, matomo_feature_name, application_id)
|
||||
and directive in ['script-src-elem', 'connect-src']
|
||||
):
|
||||
matomo_domain = domains.get('matomo')[0]
|
||||
if matomo_domain:
|
||||
tokens.append(f"{web_protocol}://{matomo_domain}")
|
||||
# 2) Allow fetching from internal CDN by default for selected directives
|
||||
if directive in ['script-src-elem', 'connect-src', 'style-src-elem']:
|
||||
tokens.append(get_url(domains, 'web-svc-cdn', web_protocol))
|
||||
|
||||
# ReCaptcha integration: allow loading scripts from Google if feature enabled
|
||||
# 3) Matomo integration if feature is enabled
|
||||
if directive in ['script-src-elem', 'connect-src']:
|
||||
if self.is_feature_enabled(applications, matomo_feature_name, application_id):
|
||||
tokens.append(get_url(domains, 'web-app-matomo', web_protocol))
|
||||
|
||||
# 4) ReCaptcha integration (scripts + frames) if feature is enabled
|
||||
if self.is_feature_enabled(applications, 'recaptcha', application_id):
|
||||
if directive in ['script-src-elem',"frame-src"]:
|
||||
if directive in ['script-src-elem', 'frame-src']:
|
||||
tokens.append('https://www.gstatic.com')
|
||||
tokens.append('https://www.google.com')
|
||||
|
||||
# Enable loading via ancestors
|
||||
if (
|
||||
self.is_feature_enabled(applications, 'portfolio_iframe', application_id)
|
||||
and directive == 'frame-ancestors'
|
||||
):
|
||||
domain = domains.get('web-app-port-ui')[0]
|
||||
sld_tld = ".".join(domain.split(".")[-2:]) # yields "example.com"
|
||||
tokens.append(f"{sld_tld}") # yields "*.example.com"
|
||||
# 5) Frame ancestors handling (desktop + logout support)
|
||||
if directive == 'frame-ancestors':
|
||||
if self.is_feature_enabled(applications, 'desktop', application_id):
|
||||
# Allow being embedded by the desktop app domain (and potentially its parent)
|
||||
domain = domains.get('web-app-desktop')[0]
|
||||
sld_tld = ".".join(domain.split(".")[-2:]) # e.g., example.com
|
||||
tokens.append(f"{sld_tld}")
|
||||
if self.is_feature_enabled(applications, 'logout', application_id):
|
||||
# Allow embedding via logout proxy and Keycloak app
|
||||
tokens.append(get_url(domains, 'web-svc-logout', web_protocol))
|
||||
tokens.append(get_url(domains, 'web-app-keycloak', web_protocol))
|
||||
|
||||
# whitelist
|
||||
# 6) Custom whitelist entries
|
||||
tokens += self.get_csp_whitelist(applications, application_id, directive)
|
||||
|
||||
# only add hashes if 'unsafe-inline' is NOT in flags
|
||||
if "'unsafe-inline'" not in flags:
|
||||
# 7) Add inline content hashes ONLY if final tokens do NOT include 'unsafe-inline'
|
||||
# (Check tokens, not flags, to include defaults and later modifications.)
|
||||
if "'unsafe-inline'" not in tokens:
|
||||
for snippet in self.get_csp_inline_content(applications, application_id, directive):
|
||||
tokens.append(self.get_csp_hash(snippet))
|
||||
|
||||
# Append directive
|
||||
parts.append(f"{directive} {' '.join(tokens)};")
|
||||
|
||||
# static img-src
|
||||
# 8) Static img-src directive (kept permissive for data/blob and any host)
|
||||
parts.append("img-src * data: blob:;")
|
||||
|
||||
return ' '.join(parts)
|
||||
|
||||
except Exception as exc:
|
||||
|
@@ -13,7 +13,8 @@ def append_csp_hash(applications, application_id, code_one_liner):
|
||||
|
||||
apps = copy.deepcopy(applications)
|
||||
app = apps[application_id]
|
||||
csp = app.setdefault('csp', {})
|
||||
server = app.setdefault('server', {})
|
||||
csp = server.setdefault('csp', {})
|
||||
hashes = csp.setdefault('hashes', {})
|
||||
|
||||
existing = hashes.get('script-src-elem', [])
|
||||
|
25
filter_plugins/docker_service_enabled.py
Normal file
25
filter_plugins/docker_service_enabled.py
Normal file
@@ -0,0 +1,25 @@
|
||||
class FilterModule(object):
|
||||
''' Custom filter to safely check if a docker service is enabled for an application_id '''
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
'is_docker_service_enabled': self.is_docker_service_enabled
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def is_docker_service_enabled(applications, application_id, service_name):
|
||||
"""
|
||||
Returns True if applications[application_id].docker.services[service_name].enabled is truthy,
|
||||
otherwise returns False (even if intermediate keys are missing).
|
||||
"""
|
||||
try:
|
||||
return bool(
|
||||
applications
|
||||
and application_id in applications
|
||||
and applications[application_id].get('docker', {})
|
||||
.get('services', {})
|
||||
.get(service_name, {})
|
||||
.get('enabled', False)
|
||||
)
|
||||
except Exception:
|
||||
return False
|
@@ -1,10 +1,13 @@
|
||||
from ansible.errors import AnsibleFilterError
|
||||
import sys, os
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
from module_utils.entity_name_utils import get_entity_name
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {'domain_mappings': self.domain_mappings}
|
||||
|
||||
def domain_mappings(self, apps, primary_domain):
|
||||
def domain_mappings(self, apps, PRIMARY_DOMAIN):
|
||||
"""
|
||||
Build a flat list of redirect mappings for all apps:
|
||||
- source: each alias domain
|
||||
@@ -30,38 +33,39 @@ class FilterModule(object):
|
||||
)
|
||||
return values
|
||||
|
||||
def default_domain(app_id, primary):
|
||||
return f"{app_id}.{primary}"
|
||||
def default_domain(app_id:str, primary:str):
|
||||
subdomain = get_entity_name(app_id)
|
||||
return f"{subdomain}.{primary}"
|
||||
|
||||
# 1) Compute canonical domains per app (always as a list)
|
||||
canonical_map = {}
|
||||
for app_id, cfg in apps.items():
|
||||
domains_cfg = cfg.get('domains') or {}
|
||||
domains_cfg = cfg.get('server',{}).get('domains',{})
|
||||
entry = domains_cfg.get('canonical')
|
||||
if entry is None:
|
||||
canonical_map[app_id] = [default_domain(app_id, primary_domain)]
|
||||
canonical_map[app_id] = [default_domain(app_id, PRIMARY_DOMAIN)]
|
||||
elif isinstance(entry, dict):
|
||||
canonical_map[app_id] = list(entry.values())
|
||||
elif isinstance(entry, list):
|
||||
canonical_map[app_id] = list(entry)
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Unexpected type for 'domains.canonical' in application '{app_id}': {type(entry).__name__}"
|
||||
f"Unexpected type for 'server.domains.canonical' in application '{app_id}': {type(entry).__name__}"
|
||||
)
|
||||
|
||||
# 2) Compute alias domains per app
|
||||
alias_map = {}
|
||||
for app_id, cfg in apps.items():
|
||||
domains_cfg = cfg.get('domains')
|
||||
domains_cfg = cfg.get('server',{}).get('domains',{})
|
||||
if domains_cfg is None:
|
||||
alias_map[app_id] = []
|
||||
continue
|
||||
if isinstance(domains_cfg, dict) and not domains_cfg:
|
||||
alias_map[app_id] = [default_domain(app_id, primary_domain)]
|
||||
alias_map[app_id] = [default_domain(app_id, PRIMARY_DOMAIN)]
|
||||
continue
|
||||
|
||||
aliases = parse_entry(domains_cfg, 'aliases', app_id) or []
|
||||
default = default_domain(app_id, primary_domain)
|
||||
default = default_domain(app_id, PRIMARY_DOMAIN)
|
||||
has_aliases = 'aliases' in domains_cfg
|
||||
has_canonical = 'canonical' in domains_cfg
|
||||
|
||||
@@ -80,7 +84,7 @@ class FilterModule(object):
|
||||
mappings = []
|
||||
for app_id, sources in alias_map.items():
|
||||
canon_list = canonical_map.get(app_id, [])
|
||||
target = canon_list[0] if canon_list else default_domain(app_id, primary_domain)
|
||||
target = canon_list[0] if canon_list else default_domain(app_id, PRIMARY_DOMAIN)
|
||||
for src in sources:
|
||||
if src == target:
|
||||
# skip self-redirects
|
||||
|
19
filter_plugins/domain_tools.py
Normal file
19
filter_plugins/domain_tools.py
Normal file
@@ -0,0 +1,19 @@
|
||||
# filter_plugins/domain_tools.py
|
||||
# Returns the DNS zone (SLD.TLD) from a hostname.
|
||||
# Pure-Python, no external deps; handles simple cases. For exotic TLDs use tldextract (see note).
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
def to_zone(hostname: str) -> str:
|
||||
if not isinstance(hostname, str) or not hostname.strip():
|
||||
raise AnsibleFilterError("to_zone: hostname must be a non-empty string")
|
||||
parts = hostname.strip(".").split(".")
|
||||
if len(parts) < 2:
|
||||
raise AnsibleFilterError(f"to_zone: '{hostname}' has no TLD part")
|
||||
# naive default: last two labels -> SLD.TLD
|
||||
return ".".join(parts[-2:])
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
"to_zone": to_zone,
|
||||
}
|
54
filter_plugins/get_all_invokable_apps.py
Normal file
54
filter_plugins/get_all_invokable_apps.py
Normal file
@@ -0,0 +1,54 @@
|
||||
import os
|
||||
import yaml
|
||||
|
||||
def get_all_invokable_apps(
|
||||
categories_file=None,
|
||||
roles_dir=None
|
||||
):
|
||||
"""
|
||||
Return all application_ids (or role names) for roles whose directory names match invokable paths from categories.yml.
|
||||
:param categories_file: Path to categories.yml (default: roles/categories.yml at project root)
|
||||
:param roles_dir: Path to roles directory (default: roles/ at project root)
|
||||
:return: List of application_ids (or role names)
|
||||
"""
|
||||
# Resolve defaults
|
||||
here = os.path.dirname(os.path.abspath(__file__))
|
||||
project_root = os.path.abspath(os.path.join(here, '..'))
|
||||
if not categories_file:
|
||||
categories_file = os.path.join(project_root, 'roles', 'categories.yml')
|
||||
if not roles_dir:
|
||||
roles_dir = os.path.join(project_root, 'roles')
|
||||
|
||||
# Get invokable paths
|
||||
from filter_plugins.invokable_paths import get_invokable_paths
|
||||
invokable_paths = get_invokable_paths(categories_file)
|
||||
if not invokable_paths:
|
||||
return []
|
||||
|
||||
result = []
|
||||
if not os.path.isdir(roles_dir):
|
||||
return []
|
||||
|
||||
for role in sorted(os.listdir(roles_dir)):
|
||||
role_path = os.path.join(roles_dir, role)
|
||||
if not os.path.isdir(role_path):
|
||||
continue
|
||||
if any(role == p or role.startswith(p + '-') for p in invokable_paths):
|
||||
vars_file = os.path.join(role_path, 'vars', 'main.yml')
|
||||
if os.path.isfile(vars_file):
|
||||
try:
|
||||
with open(vars_file, 'r', encoding='utf-8') as f:
|
||||
data = yaml.safe_load(f) or {}
|
||||
app_id = data.get('application_id', role)
|
||||
except Exception:
|
||||
app_id = role
|
||||
else:
|
||||
app_id = role
|
||||
result.append(app_id)
|
||||
return sorted(result)
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'get_all_invokable_apps': get_all_invokable_apps
|
||||
}
|
10
filter_plugins/get_app_conf.py
Normal file
10
filter_plugins/get_app_conf.py
Normal file
@@ -0,0 +1,10 @@
|
||||
import sys, os
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
from module_utils.config_utils import get_app_conf, AppConfigKeyError,ConfigEntryNotSetError
|
||||
|
||||
class FilterModule(object):
|
||||
''' Infinito.Nexus application config extraction filters '''
|
||||
def filters(self):
|
||||
return {
|
||||
'get_app_conf': get_app_conf,
|
||||
}
|
@@ -1,51 +0,0 @@
|
||||
# filter_plugins/get_application_id.py
|
||||
|
||||
import os
|
||||
import re
|
||||
import yaml
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
|
||||
def get_application_id(role_name):
|
||||
"""
|
||||
Jinja2/Ansible filter: given a role name, load its vars/main.yml and return the application_id value.
|
||||
"""
|
||||
# Construct path: assumes current working directory is project root
|
||||
vars_file = os.path.join(os.getcwd(), 'roles', role_name, 'vars', 'main.yml')
|
||||
|
||||
if not os.path.isfile(vars_file):
|
||||
raise AnsibleFilterError(f"Vars file not found for role '{role_name}': {vars_file}")
|
||||
|
||||
try:
|
||||
# Read entire file content to avoid lazy stream issues
|
||||
with open(vars_file, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
data = yaml.safe_load(content)
|
||||
except Exception as e:
|
||||
raise AnsibleFilterError(f"Error reading YAML from {vars_file}: {e}")
|
||||
|
||||
# Ensure parsed data is a mapping
|
||||
if not isinstance(data, dict):
|
||||
raise AnsibleFilterError(
|
||||
f"Error reading YAML from {vars_file}: expected mapping, got {type(data).__name__}"
|
||||
)
|
||||
|
||||
# Detect malformed YAML: no valid identifier-like keys
|
||||
valid_key_pattern = re.compile(r'^[A-Za-z_][A-Za-z0-9_]*$')
|
||||
if data and not any(valid_key_pattern.match(k) for k in data.keys()):
|
||||
raise AnsibleFilterError(f"Error reading YAML from {vars_file}: invalid top-level keys")
|
||||
|
||||
if 'application_id' not in data:
|
||||
raise AnsibleFilterError(f"Key 'application_id' not found in {vars_file}")
|
||||
|
||||
return data['application_id']
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
"""
|
||||
Ansible filter plugin entry point.
|
||||
"""
|
||||
def filters(self):
|
||||
return {
|
||||
'get_application_id': get_application_id,
|
||||
}
|
31
filter_plugins/get_category_entries.py
Normal file
31
filter_plugins/get_category_entries.py
Normal file
@@ -0,0 +1,31 @@
|
||||
# Custom Ansible filter to get all role names under "roles/" with a given prefix.
|
||||
|
||||
import os
|
||||
|
||||
def get_category_entries(prefix, roles_path="roles"):
|
||||
"""
|
||||
Returns a list of role names under the given roles_path
|
||||
that start with the specified prefix.
|
||||
|
||||
:param prefix: String prefix to match role names.
|
||||
:param roles_path: Path to the roles directory (default: 'roles').
|
||||
:return: List of matching role names.
|
||||
"""
|
||||
if not os.path.isdir(roles_path):
|
||||
return []
|
||||
|
||||
roles = []
|
||||
for entry in os.listdir(roles_path):
|
||||
full_path = os.path.join(roles_path, entry)
|
||||
if os.path.isdir(full_path) and entry.startswith(prefix):
|
||||
roles.append(entry)
|
||||
|
||||
return sorted(roles)
|
||||
|
||||
class FilterModule(object):
|
||||
""" Custom filters for Ansible """
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
"get_category_entries": get_category_entries
|
||||
}
|
@@ -1,16 +1,15 @@
|
||||
def is_feature_enabled(applications: dict, feature: str, application_id: str) -> bool:
|
||||
"""
|
||||
Return True if applications[application_id].features[feature] is truthy.
|
||||
"""
|
||||
app = applications.get(application_id, {})
|
||||
return bool(app.get('features', {}).get(feature, False))
|
||||
import sys, os
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
from module_utils.entity_name_utils import get_entity_name
|
||||
|
||||
def get_docker_compose(path_docker_compose_instances: str, application_id: str) -> dict:
|
||||
def get_docker_paths(application_id: str, path_docker_compose_instances: str) -> dict:
|
||||
"""
|
||||
Build the docker_compose dict based on
|
||||
path_docker_compose_instances and application_id.
|
||||
Uses get_entity_name to extract the entity name from application_id.
|
||||
"""
|
||||
base = f"{path_docker_compose_instances}{application_id}/"
|
||||
entity = get_entity_name(application_id)
|
||||
base = f"{path_docker_compose_instances}{entity}/"
|
||||
|
||||
return {
|
||||
'directories': {
|
||||
@@ -30,6 +29,5 @@ def get_docker_compose(path_docker_compose_instances: str, application_id: str)
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'is_feature_enabled': is_feature_enabled,
|
||||
'get_docker_compose': get_docker_compose,
|
||||
'get_docker_paths': get_docker_paths,
|
||||
}
|
9
filter_plugins/get_entity_name.py
Normal file
9
filter_plugins/get_entity_name.py
Normal file
@@ -0,0 +1,9 @@
|
||||
import sys, os
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
from module_utils.entity_name_utils import get_entity_name
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'get_entity_name': get_entity_name,
|
||||
}
|
37
filter_plugins/get_service_name.py
Normal file
37
filter_plugins/get_service_name.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""
|
||||
Custom Ansible filter to build a systemctl unit name (always lowercase).
|
||||
|
||||
Rules:
|
||||
- If `systemctl_id` ends with '@': drop the '@' and return
|
||||
"{systemctl_id_without_at}.{software_name}@{suffix_handling}".
|
||||
- Else: return "{systemctl_id}.{software_name}{suffix_handling}".
|
||||
|
||||
Suffix handling:
|
||||
- Default "" → automatically pick:
|
||||
- ".service" if no '@' in systemctl_id
|
||||
- ".timer" if '@' in systemctl_id
|
||||
- Explicit False → no suffix at all
|
||||
- Any string → ".{suffix}" (lowercased)
|
||||
"""
|
||||
|
||||
def get_service_name(systemctl_id, software_name, suffix=""):
|
||||
sid = str(systemctl_id).strip().lower()
|
||||
software_name = str(software_name).strip().lower()
|
||||
|
||||
# Determine suffix
|
||||
if suffix is False:
|
||||
sfx = "" # no suffix at all
|
||||
elif suffix == "" or suffix is None:
|
||||
sfx = ".service"
|
||||
else:
|
||||
sfx = str(suffix).strip().lower()
|
||||
|
||||
if sid.endswith("@"):
|
||||
base = sid[:-1] # drop the trailing '@'
|
||||
return f"{base}.{software_name}@{sfx}"
|
||||
else:
|
||||
return f"{sid}.{software_name}{sfx}"
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {"get_service_name": get_service_name}
|
24
filter_plugins/get_service_script_path.py
Normal file
24
filter_plugins/get_service_script_path.py
Normal file
@@ -0,0 +1,24 @@
|
||||
# filter_plugins/get_service_script_path.py
|
||||
# Custom Ansible filter to generate service script paths.
|
||||
|
||||
def get_service_script_path(systemctl_id, script_type):
|
||||
"""
|
||||
Build the path to a service script based on systemctl_id and type.
|
||||
|
||||
:param systemctl_id: The identifier of the system service.
|
||||
:param script_type: The script type/extension (e.g., sh, py, yml).
|
||||
:return: The full path string.
|
||||
"""
|
||||
if not systemctl_id or not script_type:
|
||||
raise ValueError("Both systemctl_id and script_type are required")
|
||||
|
||||
return f"/opt/scripts/systemctl/{systemctl_id}/script.{script_type}"
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
""" Custom filters for Ansible """
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
"get_service_script_path": get_service_script_path
|
||||
}
|
@@ -1,27 +1,11 @@
|
||||
#!/usr/bin/python
|
||||
import os
|
||||
import sys
|
||||
from ansible.errors import AnsibleFilterError
|
||||
import sys, os
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
from module_utils.get_url import get_url
|
||||
|
||||
class FilterModule(object):
|
||||
''' Infinito.Nexus application config extraction filters '''
|
||||
def filters(self):
|
||||
return {'get_url': self.get_url}
|
||||
|
||||
def get_url(self, domains, application_id, protocol):
|
||||
# 1) module_util-Verzeichnis in den Pfad aufnehmen
|
||||
plugin_dir = os.path.dirname(__file__)
|
||||
project_root = os.path.dirname(plugin_dir)
|
||||
module_utils = os.path.join(project_root, 'module_utils')
|
||||
if module_utils not in sys.path:
|
||||
sys.path.append(module_utils)
|
||||
|
||||
# 2) jetzt domain_utils importieren
|
||||
try:
|
||||
from domain_utils import get_domain
|
||||
except ImportError as e:
|
||||
raise AnsibleFilterError(f"could not import domain_utils: {e}")
|
||||
|
||||
# 3) Validierung und Aufruf
|
||||
if not isinstance(protocol, str):
|
||||
raise AnsibleFilterError("Protocol must be a string")
|
||||
return f"{protocol}://{ get_domain(domains, application_id) }"
|
||||
return {
|
||||
'get_url': get_url,
|
||||
}
|
||||
|
14
filter_plugins/has_env.py
Normal file
14
filter_plugins/has_env.py
Normal file
@@ -0,0 +1,14 @@
|
||||
import os
|
||||
|
||||
def has_env(application_id, base_dir='.'):
|
||||
"""
|
||||
Check if env.j2 exists under roles/{{ application_id }}/templates/env.j2
|
||||
"""
|
||||
path = os.path.join(base_dir, 'roles', application_id, 'templates', 'env.j2')
|
||||
return os.path.isfile(path)
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'has_env': has_env,
|
||||
}
|
@@ -1,122 +0,0 @@
|
||||
import os
|
||||
import yaml
|
||||
import re
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
# in-memory cache: application_id → (parsed_yaml, is_nested)
|
||||
_cfg_cache = {}
|
||||
|
||||
def load_configuration(application_id, key):
|
||||
if not isinstance(key, str):
|
||||
raise AnsibleFilterError("Key must be a dotted-string, e.g. 'features.matomo'")
|
||||
|
||||
# locate roles/
|
||||
here = os.path.dirname(__file__)
|
||||
root = os.path.abspath(os.path.join(here, '..'))
|
||||
roles_dir = os.path.join(root, 'roles')
|
||||
if not os.path.isdir(roles_dir):
|
||||
raise AnsibleFilterError(f"Roles directory not found at {roles_dir}")
|
||||
|
||||
# first time? load & cache
|
||||
if application_id not in _cfg_cache:
|
||||
config_path = None
|
||||
|
||||
# 1) primary: vars/main.yml declares it
|
||||
for role in os.listdir(roles_dir):
|
||||
mv = os.path.join(roles_dir, role, 'vars', 'main.yml')
|
||||
if os.path.exists(mv):
|
||||
try:
|
||||
md = yaml.safe_load(open(mv)) or {}
|
||||
except Exception:
|
||||
md = {}
|
||||
if md.get('application_id') == application_id:
|
||||
cf = os.path.join(roles_dir, role, "config" , "main.yml")
|
||||
if not os.path.exists(cf):
|
||||
raise AnsibleFilterError(
|
||||
f"Role '{role}' declares '{application_id}' but missing config/main.yml"
|
||||
)
|
||||
config_path = cf
|
||||
break
|
||||
|
||||
# 2) fallback nested
|
||||
if config_path is None:
|
||||
for role in os.listdir(roles_dir):
|
||||
cf = os.path.join(roles_dir, role, "config" , "main.yml")
|
||||
if not os.path.exists(cf):
|
||||
continue
|
||||
try:
|
||||
dd = yaml.safe_load(open(cf)) or {}
|
||||
except Exception:
|
||||
dd = {}
|
||||
if isinstance(dd, dict) and application_id in dd:
|
||||
config_path = cf
|
||||
break
|
||||
|
||||
# 3) fallback flat
|
||||
if config_path is None:
|
||||
for role in os.listdir(roles_dir):
|
||||
cf = os.path.join(roles_dir, role, "config" , "main.yml")
|
||||
if not os.path.exists(cf):
|
||||
continue
|
||||
try:
|
||||
dd = yaml.safe_load(open(cf)) or {}
|
||||
except Exception:
|
||||
dd = {}
|
||||
# flat style: dict with all non-dict values
|
||||
if isinstance(dd, dict) and not any(isinstance(v, dict) for v in dd.values()):
|
||||
config_path = cf
|
||||
break
|
||||
|
||||
if config_path is None:
|
||||
return None
|
||||
|
||||
# parse once
|
||||
try:
|
||||
parsed = yaml.safe_load(open(config_path)) or {}
|
||||
except Exception as e:
|
||||
raise AnsibleFilterError(f"Error loading config/main.yml at {config_path}: {e}")
|
||||
|
||||
# detect nested vs flat
|
||||
is_nested = isinstance(parsed, dict) and (application_id in parsed)
|
||||
_cfg_cache[application_id] = (parsed, is_nested)
|
||||
|
||||
parsed, is_nested = _cfg_cache[application_id]
|
||||
|
||||
# pick base entry
|
||||
entry = parsed[application_id] if is_nested else parsed
|
||||
|
||||
# resolve dotted key
|
||||
key_parts = key.split('.')
|
||||
for part in key_parts:
|
||||
# Check if part has an index (e.g., domains.canonical[0])
|
||||
match = re.match(r'([^\[]+)\[([0-9]+)\]', part)
|
||||
if match:
|
||||
part, index = match.groups()
|
||||
index = int(index)
|
||||
if isinstance(entry, dict) and part in entry:
|
||||
entry = entry[part]
|
||||
# Check if entry is a list and access the index
|
||||
if isinstance(entry, list) and 0 <= index < len(entry):
|
||||
entry = entry[index]
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Index '{index}' out of range for key '{part}' in application '{application_id}'"
|
||||
)
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Key '{part}' not found under application '{application_id}'"
|
||||
)
|
||||
else:
|
||||
if isinstance(entry, dict) and part in entry:
|
||||
entry = entry[part]
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Key '{part}' not found under application '{application_id}'"
|
||||
)
|
||||
|
||||
return entry
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {'load_configuration': load_configuration}
|
39
filter_plugins/merge_with_defaults.py
Normal file
39
filter_plugins/merge_with_defaults.py
Normal file
@@ -0,0 +1,39 @@
|
||||
def merge_with_defaults(defaults, customs):
|
||||
"""
|
||||
Recursively merge two dicts (customs into defaults).
|
||||
For each top-level key in customs, ensure all dict keys from defaults are present (at least empty dict).
|
||||
Customs always take precedence.
|
||||
"""
|
||||
def merge_dict(d1, d2):
|
||||
# Recursively merge d2 into d1, d2 wins
|
||||
result = dict(d1) if d1 else {}
|
||||
for k, v in (d2 or {}).items():
|
||||
if k in result and isinstance(result[k], dict) and isinstance(v, dict):
|
||||
result[k] = merge_dict(result[k], v)
|
||||
else:
|
||||
result[k] = v
|
||||
return result
|
||||
|
||||
merged = {}
|
||||
# Union of all app-keys
|
||||
all_keys = set(defaults or {}).union(set(customs or {}))
|
||||
for app_key in all_keys:
|
||||
base = (defaults or {}).get(app_key, {})
|
||||
override = (customs or {}).get(app_key, {})
|
||||
|
||||
# Step 1: merge override into base
|
||||
result = merge_dict(base, override)
|
||||
|
||||
# Step 2: ensure all dict keys from base exist in result (at least {})
|
||||
for k, v in (base or {}).items():
|
||||
if isinstance(v, dict) and k not in result:
|
||||
result[k] = {}
|
||||
merged[app_key] = result
|
||||
return merged
|
||||
|
||||
class FilterModule(object):
|
||||
'''Custom merge filter for Infinito.Nexus: merge_with_defaults'''
|
||||
def filters(self):
|
||||
return {
|
||||
'merge_with_defaults': merge_with_defaults,
|
||||
}
|
@@ -19,8 +19,8 @@ class FilterModule(object):
|
||||
Usage in Jinja:
|
||||
{{ redirect_list
|
||||
| add_redirect_if_group('lam',
|
||||
'ldap.' ~ primary_domain,
|
||||
domains | get_domain('lam'),
|
||||
'ldap.' ~ PRIMARY_DOMAIN,
|
||||
domains | get_domain('web-app-lam'),
|
||||
group_names) }}
|
||||
"""
|
||||
try:
|
||||
|
@@ -1,5 +1,3 @@
|
||||
# filter_plugins/role_path_by_app_id.py
|
||||
|
||||
import os
|
||||
import glob
|
||||
import yaml
|
||||
|
@@ -1,55 +0,0 @@
|
||||
from jinja2 import Undefined
|
||||
|
||||
|
||||
def safe_placeholders(template: str, mapping: dict = None) -> str:
|
||||
"""
|
||||
Format a template like "{url}/logo.png".
|
||||
If mapping is provided (not None) and ANY placeholder is missing or maps to None/empty string, the function will raise KeyError.
|
||||
If mapping is None, missing placeholders or invalid templates return empty string.
|
||||
Numerical zero or False are considered valid values.
|
||||
Any other formatting errors return an empty string.
|
||||
"""
|
||||
# Non-string templates yield empty
|
||||
if not isinstance(template, str):
|
||||
return ''
|
||||
|
||||
class SafeDict(dict):
|
||||
def __getitem__(self, key):
|
||||
val = super().get(key, None)
|
||||
# Treat None or empty string as missing
|
||||
if val is None or (isinstance(val, str) and val == ''):
|
||||
raise KeyError(key)
|
||||
return val
|
||||
def __missing__(self, key):
|
||||
raise KeyError(key)
|
||||
|
||||
silent = mapping is None
|
||||
data = mapping or {}
|
||||
try:
|
||||
return template.format_map(SafeDict(data))
|
||||
except KeyError:
|
||||
if silent:
|
||||
return ''
|
||||
raise
|
||||
except Exception:
|
||||
return ''
|
||||
|
||||
def safe_var(value):
|
||||
"""
|
||||
Ansible filter: returns the value unchanged unless it's Undefined or None,
|
||||
in which case returns an empty string.
|
||||
Catches all exceptions and yields ''.
|
||||
"""
|
||||
try:
|
||||
if isinstance(value, Undefined) or value is None:
|
||||
return ''
|
||||
return value
|
||||
except Exception:
|
||||
return ''
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'safe_var': safe_var,
|
||||
'safe_placeholders': safe_placeholders,
|
||||
}
|
@@ -1,29 +0,0 @@
|
||||
# file: filter_plugins/safe_join.py
|
||||
"""
|
||||
Ansible filter plugin that joins a base string and a tail path safely.
|
||||
If the base is falsy (None, empty, etc.), returns an empty string.
|
||||
"""
|
||||
|
||||
def safe_join(base, tail):
|
||||
"""
|
||||
Safely join base and tail into a path or URL.
|
||||
|
||||
- base: the base string. If falsy, returns ''.
|
||||
- tail: the string to append. Leading/trailing slashes are handled.
|
||||
- On any exception, returns ''.
|
||||
"""
|
||||
try:
|
||||
if not base:
|
||||
return ''
|
||||
base_str = str(base).rstrip('/')
|
||||
tail_str = str(tail).lstrip('/')
|
||||
return f"{base_str}/{tail_str}"
|
||||
except Exception:
|
||||
return ''
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'safe_join': safe_join,
|
||||
}
|
@@ -1,5 +1,3 @@
|
||||
# filter_plugins/text_filters.py
|
||||
|
||||
from ansible.errors import AnsibleFilterError
|
||||
import re
|
||||
|
||||
|
67
filter_plugins/timeout_start_sec_for_domains.py
Normal file
67
filter_plugins/timeout_start_sec_for_domains.py
Normal file
@@ -0,0 +1,67 @@
|
||||
# filter_plugins/timeout_start_sec_for_domains.py (nur Kern geändert)
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
"timeout_start_sec_for_domains": self.timeout_start_sec_for_domains,
|
||||
}
|
||||
|
||||
def timeout_start_sec_for_domains(
|
||||
self,
|
||||
domains_dict,
|
||||
include_www=True,
|
||||
per_domain_seconds=25,
|
||||
overhead_seconds=30,
|
||||
min_seconds=120,
|
||||
max_seconds=3600,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
domains_dict (dict | list[str] | str): Either the domain mapping dict
|
||||
(values can be str | list[str] | dict[str,str]) or an already
|
||||
flattened list of domains, or a single domain string.
|
||||
include_www (bool): If true, add 'www.<domain>' for non-www entries.
|
||||
...
|
||||
"""
|
||||
try:
|
||||
# Local flattener for dict inputs (like your generate_all_domains source)
|
||||
def _flatten_from_dict(domains_map):
|
||||
flat = []
|
||||
for v in (domains_map or {}).values():
|
||||
if isinstance(v, str):
|
||||
flat.append(v)
|
||||
elif isinstance(v, list):
|
||||
flat.extend(v)
|
||||
elif isinstance(v, dict):
|
||||
flat.extend(v.values())
|
||||
return flat
|
||||
|
||||
# Accept dict | list | str
|
||||
if isinstance(domains_dict, dict):
|
||||
flat = _flatten_from_dict(domains_dict)
|
||||
elif isinstance(domains_dict, list):
|
||||
flat = list(domains_dict)
|
||||
elif isinstance(domains_dict, str):
|
||||
flat = [domains_dict]
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
"Expected 'domains_dict' to be dict | list | str."
|
||||
)
|
||||
|
||||
if include_www:
|
||||
base_unique = sorted(set(flat))
|
||||
www_variants = [f"www.{d}" for d in base_unique if not str(d).lower().startswith("www.")]
|
||||
flat.extend(www_variants)
|
||||
|
||||
unique_domains = sorted(set(flat))
|
||||
count = len(unique_domains)
|
||||
|
||||
raw = overhead_seconds + per_domain_seconds * count
|
||||
clamped = max(min_seconds, min(max_seconds, int(raw)))
|
||||
return clamped
|
||||
|
||||
except AnsibleFilterError:
|
||||
raise
|
||||
except Exception as exc:
|
||||
raise AnsibleFilterError(f"timeout_start_sec_for_domains failed: {exc}")
|
30
filter_plugins/to_primary_domain.py
Normal file
30
filter_plugins/to_primary_domain.py
Normal file
@@ -0,0 +1,30 @@
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
try:
|
||||
import tld
|
||||
from tld.exceptions import TldDomainNotFound, TldBadUrl
|
||||
except ImportError:
|
||||
raise AnsibleFilterError("The 'tld' Python package is required for the to_primary_domain filter. Install with 'pip install tld'.")
|
||||
|
||||
class FilterModule(object):
|
||||
''' Custom filter to extract the primary/zone domain from a full domain name '''
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
'to_primary_domain': self.to_primary_domain,
|
||||
}
|
||||
|
||||
def to_primary_domain(self, domain):
|
||||
"""
|
||||
Converts a full domain or subdomain into its primary/zone domain.
|
||||
E.g. 'foo.bar.example.co.uk' -> 'example.co.uk'
|
||||
"""
|
||||
if not isinstance(domain, str):
|
||||
raise AnsibleFilterError("Input to to_primary_domain must be a string")
|
||||
try:
|
||||
res = tld.get_fld(domain, fix_protocol=True)
|
||||
if not res:
|
||||
raise AnsibleFilterError(f"Could not extract primary domain from: {domain}")
|
||||
return res
|
||||
except (TldDomainNotFound, TldBadUrl) as exc:
|
||||
raise AnsibleFilterError(str(exc))
|
146
filter_plugins/url_join.py
Normal file
146
filter_plugins/url_join.py
Normal file
@@ -0,0 +1,146 @@
|
||||
"""
|
||||
Ansible filter plugin that safely joins URL components from a list.
|
||||
- Requires a valid '<scheme>://' in the first element (any RFC-3986-ish scheme)
|
||||
- Preserves the double slash after the scheme, collapses other duplicate slashes
|
||||
- Supports query parts introduced by elements starting with '?' or '&'
|
||||
* first query element uses '?', subsequent use '&' (regardless of given prefix)
|
||||
* each query element must be exactly one 'key=value' pair
|
||||
* query elements may only appear after path elements; once query starts, no more path parts
|
||||
- Raises specific AnsibleFilterError messages for common misuse
|
||||
"""
|
||||
|
||||
import re
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
_SCHEME_RE = re.compile(r'^([a-zA-Z][a-zA-Z0-9+.\-]*://)(.*)$')
|
||||
_QUERY_PAIR_RE = re.compile(r'^[^&=?#]+=[^&?#]*$') # key=value (no '&', no extra '?' or '#')
|
||||
|
||||
def _to_str_or_error(obj, index):
|
||||
"""Cast to str, raising a specific AnsibleFilterError with index context."""
|
||||
try:
|
||||
return str(obj)
|
||||
except Exception as e:
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: unable to convert part at index {index} to string: {e}"
|
||||
)
|
||||
|
||||
def url_join(parts):
|
||||
"""
|
||||
Join a list of URL parts, URL-aware (scheme, path, query).
|
||||
|
||||
Args:
|
||||
parts (list|tuple): URL segments. First element MUST include '<scheme>://'.
|
||||
Path elements are plain strings.
|
||||
Query elements must start with '?' or '&' and contain exactly one 'key=value'.
|
||||
|
||||
Returns:
|
||||
str: Joined URL.
|
||||
|
||||
Raises:
|
||||
AnsibleFilterError: with specific, descriptive messages.
|
||||
"""
|
||||
# --- basic input validation ---
|
||||
if parts is None:
|
||||
raise AnsibleFilterError("url_join: parts must be a non-empty list; got None")
|
||||
if not isinstance(parts, (list, tuple)):
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: parts must be a list/tuple; got {type(parts).__name__}"
|
||||
)
|
||||
if len(parts) == 0:
|
||||
raise AnsibleFilterError("url_join: parts must be a non-empty list")
|
||||
|
||||
# --- first element must carry a scheme ---
|
||||
first_raw = parts[0]
|
||||
if first_raw is None:
|
||||
raise AnsibleFilterError(
|
||||
"url_join: first element must include a scheme like 'https://'; got None"
|
||||
)
|
||||
|
||||
first_str = _to_str_or_error(first_raw, 0)
|
||||
m = _SCHEME_RE.match(first_str)
|
||||
if not m:
|
||||
raise AnsibleFilterError(
|
||||
"url_join: first element must start with '<scheme>://', e.g. 'https://example.com'; "
|
||||
f"got '{first_str}'"
|
||||
)
|
||||
|
||||
scheme = m.group(1) # e.g., 'https://', 'ftp://', 'myapp+v1://'
|
||||
after_scheme = m.group(2).lstrip('/') # strip only leading slashes right after scheme
|
||||
|
||||
# --- iterate parts: collect path parts until first query part; then only query parts allowed ---
|
||||
path_parts = []
|
||||
query_pairs = []
|
||||
in_query = False
|
||||
|
||||
for i, p in enumerate(parts):
|
||||
if p is None:
|
||||
# skip None silently (consistent with path_join-ish behavior)
|
||||
continue
|
||||
|
||||
s = _to_str_or_error(p, i)
|
||||
|
||||
# disallow additional scheme in later parts
|
||||
if i > 0 and "://" in s:
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: only the first element may contain a scheme; part at index {i} "
|
||||
f"looks like a URL with scheme ('{s}')."
|
||||
)
|
||||
|
||||
# first element: replace with remainder after scheme and continue
|
||||
if i == 0:
|
||||
s = after_scheme
|
||||
|
||||
# check if this is a query element (starts with ? or &)
|
||||
if s.startswith('?') or s.startswith('&'):
|
||||
in_query = True
|
||||
raw_pair = s[1:] # strip the leading ? or &
|
||||
if raw_pair == '':
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: query element at index {i} is empty; expected '?key=value' or '&key=value'"
|
||||
)
|
||||
# Disallow multiple pairs in a single element; enforce exactly one key=value
|
||||
if '&' in raw_pair:
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: query element at index {i} must contain exactly one 'key=value' pair "
|
||||
f"without '&'; got '{s}'"
|
||||
)
|
||||
if not _QUERY_PAIR_RE.match(raw_pair):
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: query element at index {i} must match 'key=value' (no extra '?', '&', '#'); got '{s}'"
|
||||
)
|
||||
query_pairs.append(raw_pair)
|
||||
else:
|
||||
# non-query element
|
||||
if in_query:
|
||||
# once query started, no more path parts allowed
|
||||
raise AnsibleFilterError(
|
||||
f"url_join: path element found at index {i} after query parameters started; "
|
||||
f"query parts must come last"
|
||||
)
|
||||
# normal path part: strip slashes to avoid duplicate '/'
|
||||
path_parts.append(s.strip('/'))
|
||||
|
||||
# normalize path: remove empty chunks
|
||||
path_parts = [p for p in path_parts if p != '']
|
||||
|
||||
# --- build result ---
|
||||
# path portion
|
||||
if path_parts:
|
||||
joined_path = "/".join(path_parts)
|
||||
base = scheme + joined_path
|
||||
else:
|
||||
# no path beyond scheme
|
||||
base = scheme
|
||||
|
||||
# query portion
|
||||
if query_pairs:
|
||||
base = base + "?" + "&".join(query_pairs)
|
||||
|
||||
return base
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'url_join': url_join,
|
||||
}
|
21
filter_plugins/volume_path.py
Normal file
21
filter_plugins/volume_path.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
def docker_volume_path(volume_name: str) -> str:
|
||||
"""
|
||||
Returns the absolute filesystem path of a Docker volume.
|
||||
|
||||
Example:
|
||||
"akaunting_data" -> "/var/lib/docker/volumes/akaunting_data/_data/"
|
||||
"""
|
||||
if not volume_name or not isinstance(volume_name, str):
|
||||
raise AnsibleFilterError(f"Invalid volume name: {volume_name}")
|
||||
|
||||
return f"/var/lib/docker/volumes/{volume_name}/_data/"
|
||||
|
||||
class FilterModule(object):
|
||||
"""Docker volume path filters."""
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
"docker_volume_path": docker_volume_path,
|
||||
}
|
@@ -1,11 +1,20 @@
|
||||
CYMAIS_ENVIRONMENT: "production"
|
||||
SOFTWARE_NAME: "Infinito.Nexus" # Name of the software
|
||||
|
||||
# Deployment
|
||||
ENVIRONMENT: "production" # Possible values: production, development
|
||||
DEPLOYMENT_MODE: "single" # Use single, if you deploy on one server. Use cluster if you setup in cluster mode.
|
||||
|
||||
# If true, sensitive credentials will be masked or hidden from all Ansible task logs
|
||||
# Recommendet to set to true
|
||||
# @todo needs to be implemented everywhere
|
||||
MASK_CREDENTIALS_IN_LOGS: true
|
||||
|
||||
HOST_CURRENCY: "EUR"
|
||||
HOST_TIMEZONE: "UTC"
|
||||
|
||||
# https://en.wikipedia.org/wiki/ISO_639
|
||||
HOST_LL: "en" # Some applications are case sensitive
|
||||
HOST_LL_CC: "{{HOST_LL}}_{{HOST_LL | upper }}"
|
||||
HOST_LL_CC: "{{HOST_LL}}_GB"
|
||||
|
||||
HOST_DATE_FORMAT: "YYYY-MM-DD"
|
||||
HOST_TIME_FORMAT: "HH:mm"
|
||||
@@ -13,49 +22,63 @@ HOST_TIME_FORMAT: "HH:mm"
|
||||
HOST_THOUSAND_SEPARATOR: "."
|
||||
HOST_DECIMAL_MARK: ","
|
||||
|
||||
# Deployment mode
|
||||
deployment_mode: "single" # Use single, if you deploy on one server. Use cluster if you setup in cluster mode.
|
||||
# Web
|
||||
WEB_PROTOCOL: "https" # Web protocol type. Use https or http. If you run local you need to change it to http
|
||||
WEB_PORT: "{{ 443 if WEB_PROTOCOL == 'https' else 80 }}" # Default port web applications will listen to
|
||||
|
||||
web_protocol: "https" # Web protocol type. Use https or http. If you run local you need to change it to http
|
||||
web_port: "{{ 443 if web_protocol == 'https' else 80 }}" # Default port web applications will listen to
|
||||
# Websocket
|
||||
WEBSOCKET_PROTOCOL: "{{ 'wss' if WEB_PROTOCOL == 'https' else 'ws' }}"
|
||||
|
||||
## Domain
|
||||
primary_domain_tld: "localhost" # Top Level Domain of the server
|
||||
primary_domain_sld: "cymais" # Second Level Domain of the server
|
||||
primary_domain: "{{primary_domain_sld}}.{{primary_domain_tld}}" # Primary Domain of the server
|
||||
# Domain
|
||||
PRIMARY_DOMAIN: "localhost" # Primary Domain of the server
|
||||
|
||||
# Server Tact Variables
|
||||
DNS_PROVIDER: cloudflare # The DNS Provider\Registrar for the domain
|
||||
|
||||
## Ours in which the server is "awake" (100% working). Rest of the time is reserved for maintanance
|
||||
hours_server_awake: "0..23"
|
||||
|
||||
## Random delay for systemd timers to avoid peak loads.
|
||||
randomized_delay_sec: "5min"
|
||||
|
||||
# Runtime Variables for Process Control
|
||||
activate_all_timers: false # Activates all timers, independend if the handlers had been triggered
|
||||
|
||||
# This enables debugging in ansible and in the apps
|
||||
# You SHOULD NOT enable this on production servers
|
||||
enable_debug: false
|
||||
|
||||
dns_provider: cloudflare # The DNS Provider\Registrar for the domain
|
||||
HOSTING_PROVIDER: hetzner # Provider which hosts the server
|
||||
|
||||
# Which ACME method to use: webroot, cloudflare, or hetzner
|
||||
certbot_acme_challenge_method: "cloudflare"
|
||||
certbot_credentials_dir: /etc/certbot
|
||||
certbot_credentials_file: "{{ certbot_credentials_dir }}/{{ certbot_acme_challenge_method }}.ini"
|
||||
# certbot_dns_api_token # Define in inventory file
|
||||
certbot_dns_propagation_wait_seconds: 40 # How long should the script wait for DNS propagation before continuing
|
||||
certbot_flavor: san # Possible options: san (recommended, with a dns flavor like cloudflare, or hetzner), wildcard(doesn't function with www redirect), deicated
|
||||
certbot_webroot_path: "/var/lib/letsencrypt/" # Path used by Certbot to serve HTTP-01 ACME challenges
|
||||
certbot_cert_path: "/etc/letsencrypt/live" # Path containing active certificate symlinks for domains
|
||||
CERTBOT_ACME_CHALLENGE_METHOD: "cloudflare"
|
||||
CERTBOT_CREDENTIALS_DIR: /etc/certbot
|
||||
CERTBOT_CREDENTIALS_FILE: "{{ CERTBOT_CREDENTIALS_DIR }}/{{ CERTBOT_ACME_CHALLENGE_METHOD }}.ini"
|
||||
CERTBOT_DNS_PROPAGATION_WAIT_SECONDS: 300 # How long should the script wait for DNS propagation before continuing
|
||||
CERTBOT_FLAVOR: san # Possible options: san (recommended, with a dns flavor like cloudflare, or hetzner), wildcard(doesn't function with www redirect), dedicated
|
||||
|
||||
## Docker Role Specific Parameters
|
||||
docker_restart_policy: "unless-stopped"
|
||||
# Letsencrypt
|
||||
LETSENCRYPT_WEBROOT_PATH: "/var/lib/letsencrypt/" # Path where Certbot stores challenge webroot files
|
||||
LETSENCRYPT_BASE_PATH: "/etc/letsencrypt/" # Base directory containing Certbot configuration, account data, and archives
|
||||
LETSENCRYPT_LIVE_PATH: "{{ LETSENCRYPT_BASE_PATH }}live/" # Symlink directory for the current active certificate and private key
|
||||
|
||||
# helper
|
||||
_applications_nextcloud_oidc_flavor: "{{ applications.nextcloud.oidc.flavor | default('oidc_login' if applications.nextcloud.features.ldap | default(true) else 'sociallogin') }}"
|
||||
## Docker
|
||||
DOCKER_RESTART_POLICY: "unless-stopped" # Default restart parameter for docker containers
|
||||
DOCKER_VARS_FILE: "{{ playbook_dir }}/roles/docker-compose/vars/docker-compose.yml" # File containing docker compose variables used by other services
|
||||
DOCKER_WHITELISTET_ANON_VOLUMES: [] # Volumes which should be ignored during docker anonymous health check
|
||||
|
||||
# Asyn Confitguration
|
||||
ASYNC_ENABLED: "{{ not MODE_DEBUG | bool }}" # Activate async, deactivated for debugging
|
||||
ASYNC_TIME: "{{ 300 if ASYNC_ENABLED | bool else omit }}" # Run for mnax 5min
|
||||
ASYNC_POLL: "{{ 0 if ASYNC_ENABLED | bool else 10 }}" # Don't wait for task
|
||||
|
||||
# default value if not set via CLI (-e) or in playbook vars
|
||||
allowed_applications: []
|
||||
|
||||
# helper
|
||||
_applications_nextcloud_oidc_flavor: >-
|
||||
{{
|
||||
applications
|
||||
| get_app_conf(
|
||||
'web-app-nextcloud',
|
||||
'oidc.flavor',
|
||||
False,
|
||||
'oidc_login'
|
||||
if applications
|
||||
| get_app_conf('web-app-nextcloud','features.ldap',False, True)
|
||||
else 'sociallogin'
|
||||
)
|
||||
}}
|
||||
|
||||
# Role-based access control
|
||||
# @See https://en.wikipedia.org/wiki/Role-based_access_control
|
||||
RBAC:
|
||||
GROUP:
|
||||
NAME: "/roles" # Name of the group which holds the RBAC roles
|
||||
CLAIM: "groups" # Name of the claim containing the RBAC groups
|
@@ -1,8 +1,10 @@
|
||||
# Mode
|
||||
|
||||
# The following modes can be combined with each other
|
||||
mode_reset: false # Cleans up all CyMaIS files. It's necessary to run to whole playbook and not particial roles when using this function.
|
||||
mode_test: false # Executes test routines instead of productive routines
|
||||
mode_update: true # Executes updates
|
||||
mode_backup: true # Activates the backup before the update procedure
|
||||
mode_cleanup: true # Cleanup unused files and configurations
|
||||
MODE_TEST: false # Executes test routines instead of productive routines
|
||||
MODE_UPDATE: true # Executes updates
|
||||
MODE_DEBUG: false # This enables debugging in ansible and in the apps, You SHOULD NOT enable this on production servers
|
||||
MODE_RESET: false # Cleans up all Infinito.Nexus files. It's necessary to run to whole playbook and not particial roles when using this function.
|
||||
MODE_BACKUP: "{{ MODE_UPDATE }}" # Activates the backup before the update procedure
|
||||
MODE_CLEANUP: "{{ MODE_DEBUG }}" # Cleanup unused files and configurations
|
||||
MODE_ASSERT: "{{ MODE_DEBUG }}" # Executes validation tasks during the run.
|
||||
|
8
group_vars/all/02_email.yml
Normal file
8
group_vars/all/02_email.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
# Email Configuration
|
||||
DEFAULT_SYSTEM_EMAIL:
|
||||
DOMAIN: "{{ PRIMARY_DOMAIN }}"
|
||||
HOST: "mail.{{ PRIMARY_DOMAIN }}"
|
||||
PORT: 465
|
||||
TLS: true # true for TLS and false for SSL
|
||||
START_TLS: false
|
||||
SMTP: true
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user