mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-09-09 19:57:16 +02:00
Compare commits
806 Commits
a7b9467304
...
master
Author | SHA1 | Date | |
---|---|---|---|
445c94788e | |||
aac9704e8b | |||
a57a5f8828 | |||
90843726de | |||
d25da76117 | |||
d48a1b3c0a | |||
2839d2e1a4 | |||
00c99e58e9 | |||
904040589e | |||
9f3d300bca | |||
9e253a2d09 | |||
49120b0dcf | |||
b6f91ab9d3 | |||
77e8e7ed7e | |||
32bc17e0c3 | |||
e294637cb6 | |||
577767bed6 | |||
e77f8da510 | |||
4738b263ec | |||
0a588023a7 | |||
d2fa90774b | |||
0e72dcbe36 | |||
4f8ce598a9 | |||
3769e66d8d | |||
33a5fadf67 | |||
699a6b6f1e | |||
61c29eee60 | |||
d5204fb5c2 | |||
751615b1a4 | |||
e2993d2912 | |||
24b6647bfb | |||
d2dc2eab5f | |||
a1130e33d7 | |||
df122905eb | |||
d093a22d61 | |||
5e550ce3a3 | |||
0ada12e3ca | |||
1a5ce4a7fa | |||
a9abb3ce5d | |||
71ceb339fc | |||
61bba3d2ef | |||
0bde4295c7 | |||
8059f272d5 | |||
7c814e6e83 | |||
d760c042c2 | |||
6cac8085a8 | |||
3a83f3d14e | |||
61d852c508 | |||
188b098503 | |||
bc56940e55 | |||
5dfc2efb5a | |||
7f9dc65b37 | |||
163a925096 | |||
a8c88634b5 | |||
ce3fe1cd51 | |||
7ca8b7c71d | |||
110381e80c | |||
b02d88adc0 | |||
b7065837df | |||
c98a2378c4 | |||
4ae3cee36c | |||
b834f0c95c | |||
9f734dff17 | |||
6fa4d00547 | |||
7254667186 | |||
aaedaab3da | |||
7791bd8c04 | |||
34b3f3b0ad | |||
94fe58b5da | |||
9feb766e6f | |||
231fd567b3 | |||
3f8e7c1733 | |||
3bfab9ef8e | |||
f1870c07be | |||
d0cec9a7d4 | |||
1dbd714a56 | |||
3a17b2979e | |||
bb0530c2ac | |||
aa2eb53776 | |||
5f66c1a622 | |||
b3dfb8bf22 | |||
db642c1c39 | |||
2fccebbd1f | |||
c23fbd8ec4 | |||
2999d9af77 | |||
2809ffb9f0 | |||
cb12114ce8 | |||
ba99e558f7 | |||
2aed0f97d2 | |||
f36c7831b1 | |||
009bee531b | |||
4c7bb6d9db | |||
092869b29a | |||
f4ea6c6c0f | |||
3ed84717a7 | |||
1cfc2b7e23 | |||
01b9648650 | |||
65d3b3040d | |||
28f7ac5aba | |||
19926b0c57 | |||
3a79d9d630 | |||
983287a84a | |||
dd9a9b6d84 | |||
23a2e081bf | |||
4cbd848026 | |||
d67f660152 | |||
5c6349321b | |||
af1ee64246 | |||
d96bfc64a6 | |||
6ea8301364 | |||
92f5bf6481 | |||
58c17bf043 | |||
6c2d5c52c8 | |||
b919f39e35 | |||
9f2cfe65af | |||
fe399c3967 | |||
ef801aa498 | |||
18f3b1042f | |||
dece6228a4 | |||
cb66fb2978 | |||
b9da6908ec | |||
8baec17562 | |||
1401779a9d | |||
707a3fc1d0 | |||
d595d46e2e | |||
73d5651eea | |||
12a267827d | |||
c6cd6430bb | |||
67b2ebf001 | |||
ebb6660473 | |||
f62d09d8f1 | |||
de159db918 | |||
e2c2cf4bcf | |||
6e1e1ad5c5 | |||
06baa4b03a | |||
73e7fbdc8a | |||
bae2bc21ec | |||
a8f4dea9d2 | |||
5aaf2d28dc | |||
5287bb4d74 | |||
5446a1497e | |||
19889a8cfc | |||
d9980c0d8f | |||
35206aaafd | |||
942e8c9c12 | |||
97f4045c68 | |||
c182ecf516 | |||
ce033c370a | |||
a0477ad54c | |||
35c3681f55 | |||
af97e71976 | |||
19a51fd718 | |||
b916173422 | |||
9756a0f75f | |||
e417bc19bd | |||
7ad14673e1 | |||
eb781dbf8b | |||
6016da6f1f | |||
8b2f0ac47b | |||
9d6d64e11d | |||
f1a2967a37 | |||
95a2172fff | |||
dc3f4e05a8 | |||
e33944cda2 | |||
efa68cc1e0 | |||
79e702a3ab | |||
9180182d5b | |||
535094d15d | |||
658003f5b9 | |||
3ff783df17 | |||
3df511aee9 | |||
c27d16322b | |||
7a6e273ea4 | |||
384beae7c1 | |||
ad7e61e8b1 | |||
fa46523433 | |||
f4a380d802 | |||
42d6c1799b | |||
8608d89653 | |||
a4f39ac732 | |||
9cfb8f3a60 | |||
3e5344a46c | |||
ec07d1a20b | |||
594d9417d1 | |||
dc125e4843 | |||
39a54294dd | |||
a57fe718de | |||
b6aec5fe33 | |||
de07d890dc | |||
e27f355697 | |||
790762d397 | |||
4ce681e643 | |||
55cf3d0d8e | |||
2708b67751 | |||
f477ee3731 | |||
6d70f78989 | |||
b867a52471 | |||
78ee3e3c64 | |||
d7ece2a8c3 | |||
3794aa87b0 | |||
4cf996b1bb | |||
79517b2fe9 | |||
a84ee1240a | |||
7019b307c5 | |||
838a8fc7a1 | |||
95aba805c0 | |||
0856c340c7 | |||
b90a2f6c87 | |||
98e045196b | |||
a10dd402b8 | |||
6e538eabc8 | |||
82cc24a7f5 | |||
26b392ea76 | |||
b49fdc509e | |||
b1e8339283 | |||
f5db786878 | |||
7ef20474a0 | |||
83b9f697ab | |||
dd7b5e844c | |||
da01305cac | |||
1082caddae | |||
242347878d | |||
f46aabe884 | |||
d3cc187c3b | |||
0a4b9bc8e4 | |||
2887e54cca | |||
630fd43382 | |||
3114a7b586 | |||
34d771266a | |||
73b7d2728e | |||
fc4df980c5 | |||
763b43b44c | |||
db860e6ae3 | |||
2ba486902f | |||
7848226f83 | |||
185f37af52 | |||
b9461026a6 | |||
bf63e01b98 | |||
4a600ac531 | |||
dc0bb555c1 | |||
5adce08aea | |||
2569abc0be | |||
3a839cfe37 | |||
29f50da226 | |||
a5941763ff | |||
3d7bbabd7b | |||
e4b8c97e03 | |||
29df95ed82 | |||
6443771d93 | |||
d1cd87c843 | |||
5f0762e4f6 | |||
5642793f4a | |||
7d0502ebc5 | |||
20c8d46f54 | |||
a524c52f89 | |||
5c9ca20e04 | |||
bfe18dd83c | |||
0a83f3159a | |||
fb7b3a3c8e | |||
42f9ebad34 | |||
33b2d3f582 | |||
14e868a644 | |||
2a1a956739 | |||
bd2dde3af6 | |||
1126765da2 | |||
2620ee088e | |||
838a55ea94 | |||
1b26f1da8d | |||
43362e1694 | |||
14d3f65a70 | |||
b8ccd50ab2 | |||
4a39cc90c0 | |||
0de26fa6c7 | |||
1bed83078e | |||
7ffd79ebd9 | |||
2b7950920c | |||
f0b323afee | |||
eadcb62f2a | |||
cc2c1dc730 | |||
3b4821f7e7 | |||
5b64b47754 | |||
cb2b9462e1 | |||
03564b34bb | |||
e3b09e7f1a | |||
3adb08fc68 | |||
e9a41bd40c | |||
cb539b038c | |||
3ac9bd9f90 | |||
85a2f4b3d2 | |||
012426cf3b | |||
6c966bce2e | |||
3587531bda | |||
411a1f8931 | |||
cc51629337 | |||
022800425d | |||
0228014d34 | |||
1b638c366e | |||
5c90c252d0 | |||
4a65a254ae | |||
5e00deea19 | |||
bf7b24c3ee | |||
85924ab3c5 | |||
ac293c90f4 | |||
e0f35c4bbd | |||
989bee9522 | |||
2f12d8ea83 | |||
58620f6695 | |||
abc064fa56 | |||
7f42462514 | |||
41cd6b7702 | |||
a40d48bb03 | |||
2fba32d384 | |||
f2a765d69a | |||
c729edb525 | |||
597e9d5222 | |||
db0e030900 | |||
004507e233 | |||
e2014b9b59 | |||
567b1365c0 | |||
e99fa77b91 | |||
80dad1a5ed | |||
03290eafe1 | |||
58c64bd7c6 | |||
e497c001d6 | |||
4fa1c6cfbd | |||
53770f5308 | |||
13d8663796 | |||
f31565e4c5 | |||
a4d8de2152 | |||
c744ebe3f9 | |||
ce029881d0 | |||
94da112736 | |||
b62df5599d | |||
c9a7830953 | |||
53e5c563ae | |||
0b3b3a810a | |||
6d14f16dfd | |||
632d922977 | |||
26b29debc0 | |||
0c4cd283c4 | |||
5d36a806ff | |||
84de85d905 | |||
457f3659fa | |||
4c7ee0441e | |||
140572a0a4 | |||
a30cd4e8b5 | |||
2067804e9f | |||
1a42e8bd14 | |||
8634b5e1b3 | |||
1595a7c4a6 | |||
82aaf7ad74 | |||
7e4a1062af | |||
d5e5f57f92 | |||
f671678720 | |||
2219696c3f | |||
fbaee683fd | |||
b301e58ee6 | |||
de15c42de8 | |||
918355743f | |||
f6e62525d1 | |||
f72ac30884 | |||
1496f1de95 | |||
38de10ba65 | |||
e8c19b4b84 | |||
b0737b1cdb | |||
e4cc928eea | |||
c9b2136578 | |||
5709935c92 | |||
c7badc608a | |||
0e59d35129 | |||
1ba50397db | |||
6318611931 | |||
6e04ac58d2 | |||
b6e571a496 | |||
21b6362bc1 | |||
1fcf072257 | |||
ea0149b5d4 | |||
fe76fe1e62 | |||
3431796283 | |||
b5d8ac5462 | |||
5426014096 | |||
a9d77de2a4 | |||
766ef8619f | |||
66013a4da3 | |||
1cb5a12d85 | |||
6e8ae793e3 | |||
0746acedfd | |||
f5659a44f8 | |||
77816ac4e7 | |||
8779afd1f7 | |||
0074bcbd69 | |||
149c563831 | |||
e9ef62b95d | |||
aeaf84de6f | |||
fdceb0f792 | |||
2fd83eaf55 | |||
|
21eb614912 | ||
b880b98ac3 | |||
acfb1a2ee7 | |||
4885ad7eb4 | |||
d9669fc6dd | |||
8e0341c120 | |||
22c8c395f0 | |||
aae69ea15b | |||
c7b25ed093 | |||
e675aa5886 | |||
14f07adc9d | |||
dba12b89d8 | |||
0607974dac | |||
e8fa22cb43 | |||
eedfe83ece | |||
9f865dd215 | |||
220e3e1c60 | |||
2996c7cbb6 | |||
59bd4ca8eb | |||
da58691d25 | |||
c96f278ac3 | |||
2715479c95 | |||
926640371f | |||
cdc97c8ba5 | |||
4124e97aeb | |||
7f0d40bdc3 | |||
8dc2238ba2 | |||
b9b08feadd | |||
dc437c7621 | |||
7d63d92166 | |||
3eb51a32ce | |||
6272303b55 | |||
dfd7be9d72 | |||
90ad688ca9 | |||
2f02ad6c15 | |||
1257bef61d | |||
3eca5dabdf | |||
5a0684fa2d | |||
051e4accd6 | |||
7f53cc3a12 | |||
9228d51e86 | |||
99c6c9ec92 | |||
34f9d773bd | |||
5edb9d19cf | |||
7a09f223af | |||
f88e57ca52 | |||
7bc11f9b31 | |||
0b25161af6 | |||
14c3ff1253 | |||
234cfea02f | |||
|
69e29029af | ||
bc5374cf52 | |||
|
1660bcd384 | ||
|
41d924af1c | ||
80278f2bb0 | |||
44e0fea0b2 | |||
a9e7ed3605 | |||
f9f76892af | |||
996244b672 | |||
9f61b4e50b | |||
3549f4de32 | |||
552bb1bbae | |||
1b385c5215 | |||
1240d3bfdf | |||
27973c2773 | |||
f62355e490 | |||
f5213fd59c | |||
0472fecd64 | |||
d1fcbedef6 | |||
c8be88e3b1 | |||
5e315f9603 | |||
bab1035a24 | |||
30930c4136 | |||
bba663f95d | |||
c2f83abb60 | |||
3bc64023af | |||
d94254effb | |||
ff18c7cd73 | |||
a84abbdade | |||
5dc8ec2344 | |||
4b9e7dd3b7 | |||
22ff2dc1f3 | |||
16c1a5d834 | |||
b25f7f52b3 | |||
4826de621e | |||
4501c31756 | |||
c185c537cb | |||
809ac1adf4 | |||
1a2451af4e | |||
e78974b469 | |||
b1bf7aaba5 | |||
a1643870db | |||
aeeae776c7 | |||
356c214718 | |||
4717e33649 | |||
ee4ee9a1b7 | |||
57211c2076 | |||
2ffaadfaca | |||
bc5059fe62 | |||
e6db73c02a | |||
4ad6f1f8ea | |||
7e58b825ea | |||
f3aa7625fe | |||
d9c4493e0d | |||
14dde77134 | |||
fd422a14ce | |||
5343536d27 | |||
6e2e3e45a7 | |||
ed866bf177 | |||
a580f41edd | |||
dcb57af6f7 | |||
2699edd197 | |||
257d0c4673 | |||
4cbd29735f | |||
8ea86d2bd7 | |||
3951376a29 | |||
e1d36045da | |||
c572d535e2 | |||
c79dbeec68 | |||
5501e40b7b | |||
e84c7e5612 | |||
be675d5f9e | |||
bf16a44e87 | |||
98cc3d5070 | |||
2db5f75888 | |||
867b377115 | |||
1882fcfef5 | |||
15dc99a221 | |||
6b35454f35 | |||
d86ca6cc0e | |||
1b9775ccb5 | |||
45d9da3125 | |||
8ccfb1dfbe | |||
6a1a83432f | |||
85195e01f9 | |||
45624037b1 | |||
d4fbdb409f | |||
a738199868 | |||
c1da74de3f | |||
c23624e30c | |||
0f1f40f2e0 | |||
d1982af63d | |||
409e659143 | |||
562603a8cd | |||
6d4b7227ce | |||
9a8ef5e047 | |||
ad449c3b6a | |||
9469452275 | |||
fd8ef26b53 | |||
8cda54c46e | |||
90bc52632e | |||
0b8d2e0b40 | |||
40491dbc2e | |||
fac8971982 | |||
c791e86b8b | |||
d222b55f30 | |||
a04a1710d3 | |||
4f06f94023 | |||
2529c7cdb3 | |||
ab12a933f6 | |||
529efc0bd7 | |||
725fea1169 | |||
84322f81ef | |||
fd637c58e3 | |||
bfc42ce2ac | |||
1bdfb71f2f | |||
807fab42c3 | |||
2f45038bef | |||
f263992393 | |||
f4d1f2a303 | |||
3b2190f7ab | |||
7145213f45 | |||
70f7953027 | |||
c155e82f8c | |||
169493179e | |||
dea2669de2 | |||
e4ce3848fc | |||
8113e412dd | |||
94796efae8 | |||
7aed3dd8c2 | |||
1a649568ce | |||
f9f7d9b299 | |||
9d8e48d303 | |||
f9426cfb74 | |||
e56c960900 | |||
41934ab285 | |||
932ce7c8ca | |||
0730c1efd5 | |||
fd370624c7 | |||
4b8b04f29c | |||
2d276cfa5e | |||
241c5c6da8 | |||
af3ea9039c | |||
c8054ffbc3 | |||
54490faca7 | |||
b6eb73dee4 | |||
3fed9eb75a | |||
45c18b69ba | |||
ac3bc5742d | |||
f6c767f122 | |||
5e83f306b4 | |||
2e2501980c | |||
cb9a7b2ade | |||
a6afbaff38 | |||
111d6ac50d | |||
766fe39c4c | |||
8254bc9f07 | |||
a8139c2e72 | |||
f8264b88d5 | |||
779823eb09 | |||
0d5f369755 | |||
4627d9031c | |||
8ac88475d5 | |||
da88871108 | |||
b61f695aac | |||
a6000d7666 | |||
b5b65c4f67 | |||
ea79b9456a | |||
7c9b895dbe | |||
3c759cbb4c | |||
733356b4f7 | |||
21b4fdee47 | |||
294a43bd97 | |||
dd73a87e19 | |||
bb7859ab44 | |||
bbabc58cf9 | |||
959c48c1a1 | |||
253b088cdb | |||
c99def5724 | |||
75a5ab455e | |||
d5c14ad53c | |||
e90c9a18b0 | |||
fff06d52b8 | |||
f02ca50f88 | |||
4acf2137e8 | |||
6a447a1426 | |||
d1c8036fa4 | |||
30d583f0c9 | |||
f7aab39167 | |||
e4028fccf4 | |||
b6ee7b9f98 | |||
67122800f3 | |||
bfd1a2ee70 | |||
076a2058cc | |||
9dc55c5893 | |||
81ef808191 | |||
8161dd1b6d | |||
ac72544b72 | |||
732607bbb6 | |||
c6f49dc6e2 | |||
ce68391b4e | |||
c42d7cdf19 | |||
f012b4fc78 | |||
56f6a2dc3b | |||
632ad14bd8 | |||
fb0ca533ae | |||
6fbe550afe | |||
294d402990 | |||
95cbce93f0 | |||
77b3ca5fa2 | |||
33d14741e2 | |||
ed67ca0501 | |||
8f31b2fbfe | |||
325695777a | |||
4c9ae52fd7 | |||
3c22fb8d36 | |||
ae8a0d608b | |||
f9aa1ed2a4 | |||
8e4e497d2c | |||
24d2c0edb5 | |||
e1d090ce04 | |||
56caecc5d8 | |||
63bf7f7640 | |||
ad60f5fb37 | |||
991ed7d614 | |||
840836702d | |||
9142eeba3c | |||
882cf47c20 | |||
e8992f254c | |||
92245b5935 | |||
a98332bfb9 | |||
422e4c136d | |||
756597668c | |||
4cc4195fab | |||
78031855b9 | |||
5340d580ce | |||
c8669e19cf | |||
a18e888044 | |||
4e3c124f55 | |||
f744747cef | |||
bff6f8b5a0 | |||
99316c1088 | |||
3c701118e8 | |||
f07557c322 | |||
4f5afa1220 | |||
ead60dab84 | |||
066b4d59d6 | |||
0fd5cdb5d6 | |||
f15f498c1d | |||
46bba3564d | |||
e2b5491e1f | |||
32dc27aebd | |||
adec2aed84 | |||
3eb8b54a1a | |||
80ca12938b | |||
3b03c5171d | |||
e174523fc6 | |||
b2e32aacf3 | |||
6db7144b08 | |||
34d5c415bb | |||
c09dec8b0f | |||
f2187e4bc0 | |||
abd2545346 | |||
e14e6b96e9 | |||
44834f9873 | |||
5dcc13cb24 | |||
25e4a50974 | |||
33276263b0 | |||
168c5c0da6 | |||
aa61bf2a44 | |||
25cee9a4c7 | |||
6780950257 | |||
23bbe0520c | |||
3c63936970 | |||
9fa39e5f25 | |||
b494b80520 | |||
691b204512 | |||
7fba13b550 | |||
f9b3fb8cfa | |||
60ab31c623 | |||
80d26ca068 | |||
d43fdc63ea | |||
6e32b20240 | |||
292918da81 | |||
1f4dee49bc | |||
3141166fb5 | |||
dca04540d4 | |||
e6075738b7 | |||
38d83d18d2 | |||
4de60d4162 | |||
c160c58a5c | |||
8457325b5c | |||
74ebb375d0 | |||
12d833d20c | |||
8b2768daea | |||
81ab323c29 | |||
3c3739c234 | |||
e794da47e2 | |||
5a3535187a | |||
c1975faa7b | |||
bafd9e0f23 | |||
3f7a46177b | |||
ff38b86493 | |||
96268e7161 | |||
c94d623f8f | |||
707cc9d2d1 | |||
41d023abee | |||
f3439861bb | |||
7a38241485 | |||
993469fd82 | |||
944707ec41 | |||
0b80ba6f54 | |||
22049cd1ca | |||
b610d211c5 | |||
da0b339995 | |||
5adcc5b931 | |||
338b09b755 | |||
f3939661e4 | |||
c9c73cbdb2 | |||
73329506a9 | |||
e7322a239e | |||
a026681553 | |||
46cf65f296 | |||
af3767fdfa | |||
a69b2c9cb2 | |||
39d2e6c0fa | |||
69ad91ee91 | |||
5cd94c1d0a | |||
a0a61ad304 | |||
50c502d331 | |||
575df76ec3 | |||
db384c6261 | |||
2108702a2b | |||
66198ca1ec | |||
1f43536018 | |||
94bb060a5b | |||
8c411a21c7 | |||
3fdd900ed8 | |||
f548faa80f | |||
9668e74139 | |||
d0bd33fee3 | |||
ae5f021b8d | |||
dd1aab70fb | |||
e4ff99e336 | |||
ed0cd9b8c0 | |||
22b4342300 | |||
7362accab0 | |||
8da2e41463 | |||
563d5fd528 | |||
6b87a049d4 | |||
9cbd2d4f4b | |||
36ff93e64e | |||
cb29a479b3 | |||
e729706ec6 | |||
9159a0c7d3 | |||
a100c9e63d | |||
f254c9711d | |||
cbe9efbdc8 | |||
a6d226769c | |||
e2b0e7b492 |
13
.dockerignore
Normal file
13
.dockerignore
Normal file
@@ -0,0 +1,13 @@
|
||||
# The .gitignore is the single point of truth for files which should be ignored.
|
||||
# Add patterns, files and folders to the .gitignore and execute 'make build'
|
||||
# NEVER TOUCH THE .dockerignore, BECAUSE IT ANYHOW WILL BE OVERWRITTEN
|
||||
|
||||
site.retry
|
||||
*__pycache__
|
||||
venv
|
||||
*.log
|
||||
*.bak
|
||||
*tree.json
|
||||
roles/list.json
|
||||
*.pyc
|
||||
.git
|
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
* text=auto eol=lf
|
4
.github/workflows/TODO.md
vendored
Normal file
4
.github/workflows/TODO.md
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
# Todo
|
||||
- Create workflow test-server, which tests all server roles
|
||||
- Create workflow test-desktop, which tests all desktop roles
|
||||
- For the backup services keep in mind to setup a tandem, which pulls the backups from each other to verify that this also works
|
32
.github/workflows/test-cli.yml
vendored
Normal file
32
.github/workflows/test-cli.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: Build & Test Infinito.Nexus CLI in Docker Container
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
build-and-test:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Build Docker image
|
||||
run: |
|
||||
docker build -t infinito:latest .
|
||||
|
||||
- name: Clean build artifacts
|
||||
run: |
|
||||
docker run --rm infinito:latest make clean
|
||||
|
||||
- name: Generate project outputs
|
||||
run: |
|
||||
docker run --rm infinito:latest make build
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
docker run --rm infinito:latest make test
|
7
.gitignore
vendored
7
.gitignore
vendored
@@ -1,5 +1,12 @@
|
||||
# The .gitignore is the single point of truth for files which should be ignored.
|
||||
# Add patterns, files and folders to the .gitignore and execute 'make build'
|
||||
# NEVER TOUCH THE .dockerignore, BECAUSE IT ANYHOW WILL BE OVERWRITTEN
|
||||
|
||||
site.retry
|
||||
*__pycache__
|
||||
venv
|
||||
*.log
|
||||
*.bak
|
||||
*tree.json
|
||||
roles/list.json
|
||||
*.pyc
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# Code of Conduct
|
||||
|
||||
In order to foster a welcoming, open, and respectful community for everyone, we expect all contributors and participants in the CyMaIS project to abide by the following Code of Conduct.
|
||||
In order to foster a welcoming, open, and respectful community for everyone, we expect all contributors and participants in the Infinito.Nexus project to abide by the following Code of Conduct.
|
||||
|
||||
## Our Pledge
|
||||
|
||||
@@ -29,10 +29,10 @@ Our project maintainers and community leaders will review all reports and take a
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies to all spaces managed by the CyMaIS project, including GitHub repositories, mailing lists, chat rooms, and other communication channels.
|
||||
This Code of Conduct applies to all spaces managed by the Infinito.Nexus project, including GitHub repositories, mailing lists, chat rooms, and other communication channels.
|
||||
|
||||
## Acknowledgment
|
||||
|
||||
By participating in the CyMaIS project, you agree to adhere to this Code of Conduct. We appreciate your cooperation in helping us build a positive and productive community.
|
||||
By participating in the Infinito.Nexus project, you agree to adhere to this Code of Conduct. We appreciate your cooperation in helping us build a positive and productive community.
|
||||
|
||||
Thank you for contributing to a safe and inclusive CyMaIS community!
|
||||
Thank you for contributing to a safe and inclusive Infinito.Nexus community!
|
@@ -2,13 +2,13 @@
|
||||
|
||||
<img src="https://cybermaster.space/wp-content/uploads/sites/7/2023/11/FVG_8364BW-scaled.jpg" width="300" style="float: right; margin-left: 30px;">
|
||||
|
||||
My name is Kevin Veen-Birkenbach and I'm the author and founder of CyMaIS.
|
||||
My name is Kevin Veen-Birkenbach and I'm the author and founder of Infinito.Nexus.
|
||||
|
||||
I'm glad to assist you in the implementation of your secure and scalable IT infrastrucutre solution with CyMaIS.
|
||||
I'm glad to assist you in the implementation of your secure and scalable IT infrastrucutre solution with Infinito.Nexus.
|
||||
|
||||
My expertise in server administration, digital corporate infrastructure, custom software, and information security, all underpinned by a commitment to Open Source solutions, guarantees that your IT setup meets the highest industry standards.
|
||||
|
||||
Discover how CyMaIS can transform your IT landscape.
|
||||
Discover how Infinito.Nexus can transform your IT landscape.
|
||||
|
||||
Contact me for more details:
|
||||
|
||||
|
@@ -1,14 +1,14 @@
|
||||
# Contributing
|
||||
|
||||
Thank you for your interest in contributing to CyMaIS! We welcome contributions from the community to help improve and enhance this project. Your input makes the project stronger and more adaptable to a wide range of IT infrastructure needs.
|
||||
Thank you for your interest in contributing to Infinito.Nexus! We welcome contributions from the community to help improve and enhance this project. Your input makes the project stronger and more adaptable to a wide range of IT infrastructure needs.
|
||||
|
||||
## How to Contribute
|
||||
|
||||
There are several ways you can help:
|
||||
- **Reporting Issues:** Found a bug or have a feature request? Please open an issue on our [GitHub Issues page](https://github.com/kevinveenbirkenbach/cymais/issues) with a clear description and steps to reproduce the problem.
|
||||
- **Reporting Issues:** Found a bug or have a feature request? Please open an issue on our [GitHub Issues page](https://s.infinito.nexus/issues) with a clear description and steps to reproduce the problem.
|
||||
- **Code Contributions:** If you'd like to contribute code, fork the repository, create a new branch for your feature or bug fix, and submit a pull request. Ensure your code adheres to our coding style and includes tests where applicable.
|
||||
- **Documentation:** Improving the documentation is a great way to contribute. Whether it's clarifying an existing section or adding new guides, your contributions help others understand and use CyMaIS effectively.
|
||||
- **Financial Contributions:** If you appreciate CyMaIS and want to support its ongoing development, consider making a financial contribution. For more details, please see our [donate options](12_DONATE.md).
|
||||
- **Documentation:** Improving the documentation is a great way to contribute. Whether it's clarifying an existing section or adding new guides, your contributions help others understand and use Infinito.Nexus effectively.
|
||||
- **Financial Contributions:** If you appreciate Infinito.Nexus and want to support its ongoing development, consider making a financial contribution. For more details, please see our [donate options](12_DONATE.md).
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
@@ -40,7 +40,7 @@ Please follow these guidelines when contributing code:
|
||||
|
||||
## License and Commercial Use
|
||||
|
||||
CyMaIS is primarily designed for private use. Commercial use of CyMaIS is not permitted without a proper licensing agreement. By contributing to this project, you agree that your contributions will be licensed under the same terms as the rest of the project.
|
||||
Infinito.Nexus is primarily designed for private use. Commercial use of Infinito.Nexus is not permitted without a proper licensing agreement. By contributing to this project, you agree that your contributions will be licensed under the same terms as the rest of the project.
|
||||
|
||||
## Getting Started
|
||||
|
||||
@@ -54,4 +54,4 @@ CyMaIS is primarily designed for private use. Commercial use of CyMaIS is not pe
|
||||
|
||||
If you have any questions or need help, feel free to open an issue or join our community discussions. We appreciate your efforts and are here to support you.
|
||||
|
||||
Thank you for contributing to CyMaIS and helping us build a better, more efficient IT infrastructure solution!
|
||||
Thank you for contributing to Infinito.Nexus and helping us build a better, more efficient IT infrastructure solution!
|
||||
|
@@ -1,8 +1,8 @@
|
||||
# Support Us
|
||||
|
||||
CyMaIS is an Open Source Based transformative tool designed to redefine IT infrastructure setup for organizations and individuals alike. Your contributions directly support the ongoing development and innovation behind CyMaIS, ensuring that it continues to grow and serve its community effectively.
|
||||
Infinito.Nexus is an Open Source Based transformative tool designed to redefine IT infrastructure setup for organizations and individuals alike. Your contributions directly support the ongoing development and innovation behind Infinito.Nexus, ensuring that it continues to grow and serve its community effectively.
|
||||
|
||||
If you enjoy using CyMaIS and would like to contribute to its improvement, please consider donating. Every contribution, no matter the size, helps us maintain and expand this project.
|
||||
If you enjoy using Infinito.Nexus and would like to contribute to its improvement, please consider donating. Every contribution, no matter the size, helps us maintain and expand this project.
|
||||
|
||||
[](https://github.com/sponsors/kevinveenbirkenbach) [](https://www.patreon.com/c/kevinveenbirkenbach) [](https://buymeacoffee.com/kevinveenbirkenbach) [](https://s.veen.world/paypaldonate)
|
||||
|
||||
|
69
Dockerfile
Normal file
69
Dockerfile
Normal file
@@ -0,0 +1,69 @@
|
||||
FROM archlinux:latest
|
||||
|
||||
# 1) Update system and install build/runtime deps
|
||||
RUN pacman -Syu --noconfirm \
|
||||
base-devel \
|
||||
git \
|
||||
python \
|
||||
python-pip \
|
||||
python-setuptools \
|
||||
alsa-lib \
|
||||
go \
|
||||
rsync \
|
||||
&& pacman -Scc --noconfirm
|
||||
|
||||
# 2) Stub out systemctl & yay so post-install hooks and AUR calls never fail
|
||||
RUN printf '#!/bin/sh\nexit 0\n' > /usr/bin/systemctl \
|
||||
&& chmod +x /usr/bin/systemctl \
|
||||
&& printf '#!/bin/sh\nexit 0\n' > /usr/bin/yay \
|
||||
&& chmod +x /usr/bin/yay
|
||||
|
||||
# 3) Build & install python-simpleaudio from AUR manually (as non-root)
|
||||
RUN useradd -m aur_builder \
|
||||
&& su aur_builder -c "git clone https://aur.archlinux.org/python-simpleaudio.git /home/aur_builder/psa && \
|
||||
cd /home/aur_builder/psa && \
|
||||
makepkg --noconfirm --skippgpcheck" \
|
||||
&& pacman -U --noconfirm /home/aur_builder/psa/*.pkg.tar.zst \
|
||||
&& rm -rf /home/aur_builder/psa
|
||||
|
||||
# 4) Clone Kevin’s Package Manager and create its venv
|
||||
ENV PKGMGR_REPO=/opt/package-manager \
|
||||
PKGMGR_VENV=/root/.venvs/pkgmgr
|
||||
|
||||
RUN git clone https://github.com/kevinveenbirkenbach/package-manager.git $PKGMGR_REPO \
|
||||
&& python -m venv $PKGMGR_VENV \
|
||||
&& $PKGMGR_VENV/bin/pip install --upgrade pip \
|
||||
# install pkgmgr’s own deps + the ansible Python library so infinito import yaml & ansible.plugins.lookup work
|
||||
&& $PKGMGR_VENV/bin/pip install --no-cache-dir -r $PKGMGR_REPO/requirements.txt ansible \
|
||||
# drop a thin wrapper so `pkgmgr` always runs inside that venv
|
||||
&& printf '#!/bin/sh\n. %s/bin/activate\nexec python %s/main.py "$@"\n' \
|
||||
"$PKGMGR_VENV" "$PKGMGR_REPO" > /usr/local/bin/pkgmgr \
|
||||
&& chmod +x /usr/local/bin/pkgmgr
|
||||
|
||||
# 5) Ensure pkgmgr venv bin and user-local bin are on PATH
|
||||
ENV PATH="$PKGMGR_VENV/bin:/root/.local/bin:${PATH}"
|
||||
|
||||
# 6) Copy local Infinito.Nexus source into the image for override
|
||||
COPY . /opt/infinito-src
|
||||
|
||||
# 7) Install Infinito.Nexus via pkgmgr (clone-mode https)
|
||||
RUN pkgmgr install infinito --clone-mode https
|
||||
|
||||
# 8) Override installed Infinito.Nexus with local source and clean ignored files
|
||||
RUN INFINITO_PATH=$(pkgmgr path infinito) && \
|
||||
rm -rf "$INFINITO_PATH"/* && \
|
||||
rsync -a --delete --exclude='.git' /opt/infinito-src/ "$INFINITO_PATH"/
|
||||
|
||||
# 9) Symlink the infinito script into /usr/local/bin so ENTRYPOINT works
|
||||
RUN INFINITO_PATH=$(pkgmgr path infinito) && \
|
||||
ln -sf "$INFINITO_PATH"/main.py /usr/local/bin/infinito && \
|
||||
chmod +x /usr/local/bin/infinito
|
||||
|
||||
# 10) Run integration tests
|
||||
# This needed to be deactivated becaus it doesn't work with gitthub workflow
|
||||
#RUN INFINITO_PATH=$(pkgmgr path infinito) && \
|
||||
# cd "$INFINITO_PATH" && \
|
||||
# make test
|
||||
|
||||
ENTRYPOINT ["infinito"]
|
||||
CMD ["--help"]
|
@@ -1,9 +1,9 @@
|
||||
# License Agreement
|
||||
|
||||
## CyMaIS NonCommercial License (CNCL)
|
||||
## Infinito.Nexus NonCommercial License
|
||||
|
||||
### Definitions
|
||||
- **"Software":** Refers to *"[CyMaIS - Cyber Master Infrastructure Solution](https://cymais.cloud/)"* and its associated source code.
|
||||
- **"Software":** Refers to *"[Infinito.Nexus](https://infinito.nexus/)"* and its associated source code.
|
||||
- **"Commercial Use":** Any use of the Software intended for direct or indirect financial gain, including but not limited to sales, rentals, or provision of services.
|
||||
|
||||
### Provisions
|
||||
|
96
Makefile
96
Makefile
@@ -1,35 +1,85 @@
|
||||
ROLES_DIR := ./roles
|
||||
APPLICATIONS_OUT := ./group_vars/all/04_applications.yml
|
||||
APPLICATIONS_SCRIPT := ./cli/generate_applications.py
|
||||
USERS_OUT := ./group_vars/all/03_users.yml
|
||||
USERS_SCRIPT := ./cli/generate_users.py
|
||||
INCLUDES_OUT := ./tasks/utils/docker-roles.yml
|
||||
INCLUDES_SCRIPT := ./cli/generate_playbook.py
|
||||
ROLES_DIR := ./roles
|
||||
APPLICATIONS_OUT := ./group_vars/all/04_applications.yml
|
||||
APPLICATIONS_SCRIPT := ./cli/build/defaults/applications.py
|
||||
USERS_OUT := ./group_vars/all/03_users.yml
|
||||
USERS_SCRIPT := ./cli/build/defaults/users.py
|
||||
INCLUDES_SCRIPT := ./cli/build/role_include.py
|
||||
|
||||
INCLUDE_GROUPS := $(shell python3 main.py meta categories invokable -s "-" --no-signal | tr '\n' ' ')
|
||||
|
||||
# Directory where these include-files will be written
|
||||
INCLUDES_OUT_DIR := ./tasks/groups
|
||||
|
||||
# Compute extra users as before
|
||||
EXTRA_USERS := $(shell \
|
||||
find $(ROLES_DIR) -maxdepth 1 -type d -name 'docker*' -printf '%f\n' \
|
||||
| sed -E 's/^docker[_-]?//' \
|
||||
| grep -E -x '[a-z0-9]+' \
|
||||
| paste -sd, - \
|
||||
find $(ROLES_DIR) -maxdepth 1 -type d -printf '%f\n' \
|
||||
| sed -E 's/.*-//' \
|
||||
| grep -E -x '[a-z0-9]+' \
|
||||
| sort -u \
|
||||
| paste -sd, - \
|
||||
)
|
||||
|
||||
.PHONY: build install test
|
||||
|
||||
build:
|
||||
@echo "🔧 Generating applications defaults → $(APPLICATIONS_OUT) from roles in $(ROLES_DIR)…"
|
||||
python3 $(USERS_SCRIPT) --roles-dir $(ROLES_DIR) --output $(USERS_OUT) --extra-users "$(EXTRA_USERS)"
|
||||
clean-keep-logs:
|
||||
@echo "🧹 Cleaning ignored files but keeping logs/…"
|
||||
git clean -fdX -- ':!logs' ':!logs/**'
|
||||
|
||||
clean:
|
||||
@echo "Removing ignored git files"
|
||||
git clean -fdX
|
||||
|
||||
list:
|
||||
@echo Generating the roles list
|
||||
python3 main.py build roles_list
|
||||
|
||||
tree:
|
||||
@echo Generating Tree
|
||||
python3 main.py build tree -D 2 --no-signal
|
||||
|
||||
mig: list tree
|
||||
@echo Creating meta data for meta infinity graph
|
||||
|
||||
dockerignore:
|
||||
@echo Create dockerignore
|
||||
cat .gitignore > .dockerignore
|
||||
echo ".git" >> .dockerignore
|
||||
|
||||
messy-build: dockerignore
|
||||
@echo "🔧 Generating users defaults → $(USERS_OUT)…"
|
||||
python3 $(USERS_SCRIPT) \
|
||||
--roles-dir $(ROLES_DIR) \
|
||||
--output $(USERS_OUT) \
|
||||
--extra-users "$(EXTRA_USERS)"
|
||||
@echo "✅ Users defaults written to $(USERS_OUT)\n"
|
||||
python3 $(APPLICATIONS_SCRIPT) --roles-dir $(ROLES_DIR) --output-file $(APPLICATIONS_OUT)
|
||||
|
||||
@echo "🔧 Generating applications defaults → $(APPLICATIONS_OUT)…"
|
||||
python3 $(APPLICATIONS_SCRIPT) \
|
||||
--roles-dir $(ROLES_DIR) \
|
||||
--output-file $(APPLICATIONS_OUT)
|
||||
@echo "✅ Applications defaults written to $(APPLICATIONS_OUT)\n"
|
||||
@echo "🔧 Generating users defaults → $(USERS_OUT) from roles in $(ROLES_DIR)…"
|
||||
@echo "🔧 Generating Docker role includes → $(INCLUDES_OUT)…"
|
||||
@mkdir -p $(dir $(INCLUDES_OUT))
|
||||
python3 $(INCLUDES_SCRIPT) $(ROLES_DIR) -o $(INCLUDES_OUT) -p docker-
|
||||
@echo "✅ Docker role includes written to $(INCLUDES_OUT)"
|
||||
|
||||
@echo "🔧 Generating role-include files for each group…"
|
||||
@mkdir -p $(INCLUDES_OUT_DIR)
|
||||
@$(foreach grp,$(INCLUDE_GROUPS), \
|
||||
out=$(INCLUDES_OUT_DIR)/$(grp)roles.yml; \
|
||||
echo "→ Building $$out (pattern: '$(grp)')…"; \
|
||||
python3 $(INCLUDES_SCRIPT) $(ROLES_DIR) \
|
||||
-p $(grp) -o $$out; \
|
||||
echo " ✅ $$out"; \
|
||||
)
|
||||
|
||||
messy-test:
|
||||
@echo "🧪 Running Python tests…"
|
||||
PYTHONPATH=. python -m unittest discover -s tests
|
||||
@echo "📑 Checking Ansible syntax…"
|
||||
ansible-playbook playbook.yml --syntax-check
|
||||
|
||||
install: build
|
||||
@echo "⚙️ Install complete."
|
||||
|
||||
test:
|
||||
@echo "🧪 Running Tests..."
|
||||
python -m unittest discover -s tests
|
||||
build: clean messy-build
|
||||
@echo "Full build with cleanup before was executed."
|
||||
|
||||
test: build messy-test
|
||||
@echo "Full test with build before was executed."
|
||||
|
98
README.md
98
README.md
@@ -1,38 +1,94 @@
|
||||
# Infinito.Nexus 🚀
|
||||
|
||||
# CyMaIS - Cyber Master Infrastructure Solution 🚀
|
||||
**🔐 One login. ♾️ Infinite application**
|
||||
|
||||
[](https://github.com/sponsors/kevinveenbirkenbach) [](https://www.patreon.com/c/kevinveenbirkenbach) [](https://buymeacoffee.com/kevinveenbirkenbach) [](https://s.veen.world/paypaldonate)
|
||||

|
||||
---
|
||||
|
||||
Welcome to **CyMaIS (Cyber Master Infrastructure Solution)**, a powerful automation framework that simplifies IT infrastructure setup and management. Whether you are an **end-user** looking to access cloud services securely or an **administrator** responsible for deploying and maintaining infrastructure, CyMaIS provides a seamless and secure solution.
|
||||
## What is Infinito.Nexus? 📌
|
||||
|
||||

|
||||
**Infinito.Nexus** is an **automated, modular infrastructure framework** built on **Docker**, **Linux**, and **Ansible**, equally suited for cloud services, local server management, and desktop workstations. At its core lies a **web-based desktop with single sign-on**—backed by an **LDAP directory** and **OIDC**—granting **seamless access** to an almost limitless portfolio of self-hosted applications. It fully supports **ActivityPub applications** and is **Fediverse-compatible**, while integrated **monitoring**, **alerting**, **cleanup**, **self-healing**, **automated updates**, and **backup solutions** provide everything an organization needs to run at scale.
|
||||
|
||||
## What is CyMaIS? 📌
|
||||
CyMaIS leverages **Docker, Linux, and Ansible** to provide an automated and modular infrastructure solution. With more then **150 pre-configured roles**, it supports a wide range of applications, from cloud services to local server management and desktop workstation setups.
|
||||
| 📚 | 🔗 |
|
||||
|---|---|
|
||||
| 🌐 Try It Live | [](https://infinito.nexus) |
|
||||
| 🔧 Request Your Setup | [](https://cybermaster.space) |
|
||||
| 📖 About This Project | [](https://github.com/sponsors/kevinveenbirkenbach) [](https://github.com/kevinveenbirkenbach/infinito-nexus/actions/workflows/test-cli.yml) [](https://s.infinito.nexus/code) |
|
||||
| ☕️ Support Us | [](https://www.patreon.com/c/kevinveenbirkenbach) [](https://buymeacoffee.com/kevinveenbirkenbach) [](https://s.veen.world/paypaldonate) [](https://github.com/sponsors/kevinveenbirkenbach) |
|
||||
|
||||
## Guides 📖
|
||||
- **[User Guide](docs/guides/user/Readme.md)** - For end-users accessing cloud apps like Nextcloud, Matrix, and more.
|
||||
- **[Administrator Guide](docs/guides/administrator/Readme.md)** - For system administrators deploying CyMaIS.
|
||||
- **[Customer Guide](docs/guides/customer/Readme.md)** - For customers which are interested in an infrastructure setup
|
||||
- **[Developer Guide](docs/guides/developer/index)** - For developers which are interested in participating
|
||||
- **[Investor Guide](docs/guides/investor/Readme.md)** - For investors which like to get a share in the project
|
||||
---
|
||||
|
||||
## Key Features 🎯
|
||||
- **Automated IT deployment** 📦 - Pre-built roles for server and PC setups
|
||||
- **Enterprise-ready security** 🔒 - Supports LDAP, Keycloak, 2FA, and encrypted storage
|
||||
- **Scalability & flexibility** 📈 - Modular approach for small teams to large enterprises
|
||||
- **Backup & recovery solutions** 💾 - Automate data security and prevent loss
|
||||
- **Infrastructure monitoring & maintenance** 📊 - Keep your system running optimally
|
||||
|
||||
* **Automated Deployment** 📦
|
||||
Turn up servers and workstations in minutes with ready-made Ansible roles.
|
||||
|
||||
* **Enterprise-Grade Security** 🔒
|
||||
Centralized user management via LDAP & OIDC (Keycloak), plus optional 2FA and encrypted storage.
|
||||
|
||||
* **Modular Scalability** 📈
|
||||
Grow from small teams to global enterprises by composing only the services you need.
|
||||
|
||||
* **Fediverse & ActivityPub Support** 🌐
|
||||
Seamlessly integrate Mastodon, Peertube, Matrix and other ActivityPub apps out of the box.
|
||||
|
||||
* **Self-Healing & Maintenance** ⚙️
|
||||
Automated cleanup, container healing, and auto-updates keep infrastructure healthy without human intervention.
|
||||
|
||||
* **Monitoring, Alerting & Analytics** 📊
|
||||
Built-in system, application, and security monitoring with multi-channel notifications.
|
||||
|
||||
* **Backup & Disaster Recovery** 💾
|
||||
Scheduled backups and scripted recovery processes to safeguard your data.
|
||||
|
||||
* **Continuous Updates** 🔄
|
||||
Automatic patching and version upgrades across the stack.
|
||||
|
||||
* **Application Ecosystem** 🚀
|
||||
A curated suite of self-hosted apps—from **project management**, **version control**, and **CI/CD** to **chat**, **video conferencing**, **CMS**, **e-learning**, **social networking**, and **e-commerce**—all seamlessly integrated.
|
||||
|
||||
More informations about the features you will find [here](docs/overview/Features.md).
|
||||
|
||||
---
|
||||
|
||||
## Get Started 🚀
|
||||
1. **Install CyMaIS** via [Kevin's Package Manager](https://github.com/kevinveenbirkenbach/package-manager)
|
||||
2. **Setup CyMaIS** using:
|
||||
|
||||
### Use it online 🌐
|
||||
|
||||
Try [Infinito.Nexus](https://infinito.nexus) – sign up in seconds, explore the platform, and discover what our solution can do for you! 🚀🔧✨
|
||||
|
||||
### Install locally 💻
|
||||
1. **Install Infinito.Nexus** via [Kevin's Package Manager](https://github.com/kevinveenbirkenbach/package-manager)
|
||||
2. **Setup Infinito.Nexus** using:
|
||||
```sh
|
||||
pkgmgr setup cymais
|
||||
pkgmgr install infinito
|
||||
```
|
||||
3. **Explore Commands** with:
|
||||
```sh
|
||||
cymais --help
|
||||
infinito --help
|
||||
```
|
||||
---
|
||||
|
||||
### Setup with Docker🚢
|
||||
|
||||
Get Infinito.Nexus up and running inside Docker in just a few steps. For detailed build options and troubleshooting, see the [Docker Guide](docs/Docker.md).
|
||||
|
||||
```bash
|
||||
# 1. Build the Docker image: the Docker image:
|
||||
docker build -t infinito:latest .
|
||||
|
||||
# 2. Run the CLI interactively:
|
||||
docker run --rm -it infinito:latest infinito --help
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## License ⚖️
|
||||
|
||||
Infinito.Nexus is distributed under the **Infinito.Nexus NonCommercial License**. Please see [LICENSE.md](LICENSE.md) for full terms.
|
||||
|
||||
---
|
||||
|
||||
## Professional Setup & Support 💼
|
||||
|
||||
For expert installation and configuration visit [cybermaster.space](https://cybermaster.space/) or write to us at **[contact@cymais.cloud](mailto:contact@cymais.cloud)**.
|
||||
|
5
TODO.md
Normal file
5
TODO.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Todos
|
||||
- Implement multi language
|
||||
- Implement rbac administration interface
|
||||
- Implement ``MASK_CREDENTIALS_IN_LOGS`` for all sensible tasks
|
||||
- [Enable IP6 for docker](https://chatgpt.com/share/68a0acb8-db20-800f-9d2c-b34e38b5cdee).
|
3
Todo.md
3
Todo.md
@@ -1,3 +0,0 @@
|
||||
# Todos
|
||||
- Implement multi language
|
||||
- Implement rbac administration interface
|
33
ansible.cfg
33
ansible.cfg
@@ -1,4 +1,33 @@
|
||||
[defaults]
|
||||
lookup_plugins = ./lookup_plugins
|
||||
# --- Performance & Behavior ---
|
||||
forks = 25
|
||||
strategy = linear
|
||||
gathering = smart
|
||||
timeout = 120
|
||||
retry_files_enabled = False
|
||||
host_key_checking = True
|
||||
deprecation_warnings = True
|
||||
interpreter_python = auto_silent
|
||||
|
||||
# --- Output & Profiling ---
|
||||
stdout_callback = yaml
|
||||
callbacks_enabled = profile_tasks,timer
|
||||
|
||||
# --- Plugin paths ---
|
||||
filter_plugins = ./filter_plugins
|
||||
module_utils = ./module_utils
|
||||
lookup_plugins = ./lookup_plugins
|
||||
module_utils = ./module_utils
|
||||
|
||||
[ssh_connection]
|
||||
# Multiplexing: safer socket path in HOME instead of /tmp
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=20s -o ControlPath=~/.ssh/ansible-%h-%p-%r \
|
||||
-o ServerAliveInterval=15 -o ServerAliveCountMax=3 -o StrictHostKeyChecking=accept-new \
|
||||
-o PreferredAuthentications=publickey,password,keyboard-interactive
|
||||
|
||||
# Pipelining boosts speed; works fine if sudoers does not enforce "requiretty"
|
||||
pipelining = True
|
||||
scp_if_ssh = smart
|
||||
|
||||
[persistent_connection]
|
||||
connect_timeout = 30
|
||||
command_timeout = 60
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 162 KiB After Width: | Height: | Size: 157 KiB |
Binary file not shown.
Before Width: | Height: | Size: 1.4 MiB After Width: | Height: | Size: 1015 KiB |
110
cli/build/defaults/applications.py
Normal file
110
cli/build/defaults/applications.py
Normal file
@@ -0,0 +1,110 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import yaml
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
# Ensure project root on PYTHONPATH so module_utils is importable
|
||||
repo_root = Path(__file__).resolve().parent.parent.parent.parent
|
||||
sys.path.insert(0, str(repo_root))
|
||||
|
||||
# Add lookup_plugins for application_gid
|
||||
plugin_path = repo_root / "lookup_plugins"
|
||||
sys.path.insert(0, str(plugin_path))
|
||||
|
||||
from module_utils.dict_renderer import DictRenderer
|
||||
from application_gid import LookupModule
|
||||
|
||||
def load_yaml_file(path: Path) -> dict:
|
||||
if not path.exists():
|
||||
return {}
|
||||
with path.open("r", encoding="utf-8") as f:
|
||||
return yaml.safe_load(f) or {}
|
||||
|
||||
class DefaultsGenerator:
|
||||
def __init__(self, roles_dir: Path, output_file: Path, verbose: bool, timeout: float):
|
||||
self.roles_dir = roles_dir
|
||||
self.output_file = output_file
|
||||
self.verbose = verbose
|
||||
self.renderer = DictRenderer(verbose=verbose, timeout=timeout)
|
||||
self.gid_lookup = LookupModule()
|
||||
|
||||
def log(self, message: str):
|
||||
if self.verbose:
|
||||
print(f"[DefaultsGenerator] {message}")
|
||||
|
||||
def run(self):
|
||||
result = {"defaults_applications": {}}
|
||||
|
||||
for role_dir in sorted(self.roles_dir.iterdir()):
|
||||
role_name = role_dir.name
|
||||
vars_main = role_dir / "vars" / "main.yml"
|
||||
config_file = role_dir / "config" / "main.yml"
|
||||
|
||||
if not vars_main.exists():
|
||||
self.log(f"Skipping {role_name}: vars/main.yml missing")
|
||||
continue
|
||||
|
||||
vars_data = load_yaml_file(vars_main)
|
||||
application_id = vars_data.get("application_id")
|
||||
if not application_id:
|
||||
self.log(f"Skipping {role_name}: application_id not defined")
|
||||
continue
|
||||
|
||||
if not config_file.exists():
|
||||
self.log(f"Config missing for {role_name}, adding empty defaults for '{application_id}'")
|
||||
result["defaults_applications"][application_id] = {}
|
||||
continue
|
||||
|
||||
config_data = load_yaml_file(config_file)
|
||||
if config_data:
|
||||
try:
|
||||
gid_number = self.gid_lookup.run([application_id], roles_dir=str(self.roles_dir))[0]
|
||||
except Exception as e:
|
||||
print(f"Warning: failed to determine gid for '{application_id}': {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
config_data["group_id"] = gid_number
|
||||
result["defaults_applications"][application_id] = config_data
|
||||
|
||||
# Inject users mapping as Jinja2 references
|
||||
users_meta = load_yaml_file(role_dir / "users" / "main.yml")
|
||||
users_data = users_meta.get("users", {})
|
||||
transformed = {user: f"{{{{ users[\"{user}\"] }}}}" for user in users_data}
|
||||
if transformed:
|
||||
result["defaults_applications"][application_id]["users"] = transformed
|
||||
|
||||
# Render placeholders in entire result context
|
||||
self.log("Starting placeholder rendering...")
|
||||
try:
|
||||
result = self.renderer.render(result)
|
||||
except Exception as e:
|
||||
print(f"Error during rendering: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Write output
|
||||
self.output_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
with self.output_file.open("w", encoding="utf-8") as f:
|
||||
yaml.dump(result, f, sort_keys=False)
|
||||
|
||||
# Print location of generated file (absolute if not under cwd)
|
||||
try:
|
||||
rel = self.output_file.relative_to(Path.cwd())
|
||||
except ValueError:
|
||||
rel = self.output_file
|
||||
print(f"✅ Generated: {rel}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Generate defaults_applications YAML...")
|
||||
parser.add_argument("--roles-dir", default="roles", help="Path to the roles directory")
|
||||
parser.add_argument("--output-file", required=True, help="Path to output YAML file")
|
||||
parser.add_argument("--verbose", action="store_true", help="Enable verbose logging")
|
||||
parser.add_argument("--timeout", type=float, default=10.0, help="Timeout for rendering")
|
||||
|
||||
args = parser.parse_args()
|
||||
cwd = Path.cwd()
|
||||
roles_dir = (cwd / args.roles_dir).resolve()
|
||||
output_file = (cwd / args.output_file).resolve()
|
||||
|
||||
DefaultsGenerator(roles_dir, output_file, args.verbose, args.timeout).run()
|
241
cli/build/defaults/users.py
Normal file
241
cli/build/defaults/users.py
Normal file
@@ -0,0 +1,241 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import yaml
|
||||
import glob
|
||||
from collections import OrderedDict
|
||||
|
||||
|
||||
def represent_str(dumper, data):
|
||||
"""
|
||||
Custom YAML string representer that forces double quotes around any string
|
||||
containing a Jinja2 placeholder ({{ ... }}).
|
||||
"""
|
||||
if isinstance(data, str) and '{{' in data:
|
||||
return dumper.represent_scalar(
|
||||
'tag:yaml.org,2002:str',
|
||||
data,
|
||||
style='"'
|
||||
)
|
||||
return dumper.represent_scalar(
|
||||
'tag:yaml.org,2002:str',
|
||||
data
|
||||
)
|
||||
|
||||
|
||||
def build_users(defs, primary_domain, start_id, become_pwd):
|
||||
"""
|
||||
Construct user entries with auto-incremented UID/GID, default username/email,
|
||||
and optional description.
|
||||
|
||||
Args:
|
||||
defs (OrderedDict): Mapping of user keys to their override settings.
|
||||
primary_domain (str): The primary domain for email addresses (e.g. 'example.com').
|
||||
start_id (int): Starting number for UID/GID allocation (e.g. 1001).
|
||||
become_pwd (str): Default password string for users without an override.
|
||||
|
||||
Returns:
|
||||
OrderedDict: Complete user definitions with all required fields filled in.
|
||||
|
||||
Raises:
|
||||
ValueError: If there are duplicate UIDs, usernames, or emails.
|
||||
"""
|
||||
users = OrderedDict()
|
||||
used_uids = set()
|
||||
|
||||
# Collect any preset UIDs to avoid collisions
|
||||
for key, overrides in defs.items():
|
||||
if 'uid' in overrides:
|
||||
uid = overrides['uid']
|
||||
if uid in used_uids:
|
||||
raise ValueError(f"Duplicate uid {uid} for user '{key}'")
|
||||
used_uids.add(uid)
|
||||
|
||||
next_uid = start_id
|
||||
def allocate_uid():
|
||||
nonlocal next_uid
|
||||
# Find the next free UID not already used
|
||||
while next_uid in used_uids:
|
||||
next_uid += 1
|
||||
free_uid = next_uid
|
||||
used_uids.add(free_uid)
|
||||
next_uid += 1
|
||||
return free_uid
|
||||
|
||||
# Build each user entry
|
||||
for key, overrides in defs.items():
|
||||
username = overrides.get('username', key)
|
||||
email = overrides.get('email', f"{username}@{primary_domain}")
|
||||
description = overrides.get('description')
|
||||
roles = overrides.get('roles', [])
|
||||
password = overrides.get('password', become_pwd)
|
||||
|
||||
# Determine UID and GID
|
||||
if 'uid' in overrides:
|
||||
uid = overrides['uid']
|
||||
else:
|
||||
uid = allocate_uid()
|
||||
gid = overrides.get('gid', uid)
|
||||
|
||||
entry = {
|
||||
'username': username,
|
||||
'email': email,
|
||||
'password': password,
|
||||
'uid': uid,
|
||||
'gid': gid,
|
||||
'roles': roles
|
||||
}
|
||||
if description is not None:
|
||||
entry['description'] = description
|
||||
|
||||
users[key] = entry
|
||||
|
||||
# Ensure uniqueness of usernames and emails
|
||||
seen_usernames = set()
|
||||
seen_emails = set()
|
||||
|
||||
for key, entry in users.items():
|
||||
un = entry['username']
|
||||
em = entry['email']
|
||||
if un in seen_usernames:
|
||||
raise ValueError(f"Duplicate username '{un}' in merged users")
|
||||
if em in seen_emails:
|
||||
raise ValueError(f"Duplicate email '{em}' in merged users")
|
||||
seen_usernames.add(un)
|
||||
seen_emails.add(em)
|
||||
|
||||
return users
|
||||
|
||||
|
||||
def load_user_defs(roles_directory):
|
||||
"""
|
||||
Scan all roles/*/users/main.yml files and merge any 'users:' sections.
|
||||
|
||||
Args:
|
||||
roles_directory (str): Path to the directory containing role subdirectories.
|
||||
|
||||
Returns:
|
||||
OrderedDict: Merged user definitions from all roles.
|
||||
|
||||
Raises:
|
||||
ValueError: On invalid format or conflicting override values.
|
||||
"""
|
||||
pattern = os.path.join(roles_directory, '*/users/main.yml')
|
||||
files = sorted(glob.glob(pattern))
|
||||
merged = OrderedDict()
|
||||
|
||||
for filepath in files:
|
||||
with open(filepath, 'r') as f:
|
||||
data = yaml.safe_load(f) or {}
|
||||
users = data.get('users', {})
|
||||
if not isinstance(users, dict):
|
||||
continue
|
||||
|
||||
for key, overrides in users.items():
|
||||
if not isinstance(overrides, dict):
|
||||
raise ValueError(f"Invalid definition for user '{key}' in {filepath}")
|
||||
|
||||
if key not in merged:
|
||||
merged[key] = overrides.copy()
|
||||
else:
|
||||
existing = merged[key]
|
||||
for field, value in overrides.items():
|
||||
if field in existing and existing[field] != value:
|
||||
raise ValueError(
|
||||
f"Conflict for user '{key}': field '{field}' has existing value '{existing[field]}', tried to set '{value}' in {filepath}"
|
||||
)
|
||||
existing.update(overrides)
|
||||
|
||||
return merged
|
||||
|
||||
|
||||
def dictify(data):
|
||||
"""
|
||||
Recursively convert OrderedDict to regular dict for YAML dumping.
|
||||
"""
|
||||
if isinstance(data, OrderedDict):
|
||||
return {k: dictify(v) for k, v in data.items()}
|
||||
if isinstance(data, dict):
|
||||
return {k: dictify(v) for k, v in data.items()}
|
||||
if isinstance(data, list):
|
||||
return [dictify(v) for v in data]
|
||||
return data
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Generate a users.yml by merging all roles/*/users/main.yml definitions.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--roles-dir', '-r', required=True,
|
||||
help='Directory containing roles (e.g., roles/*/users/main.yml).'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--output', '-o', required=True,
|
||||
help='Path to the output YAML file (e.g., users.yml).'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--start-id', '-s', type=int, default=1001,
|
||||
help='Starting UID/GID number (default: 1001).'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--extra-users', '-e',
|
||||
help='Comma-separated list of additional usernames to include.',
|
||||
default=None
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
primary_domain = '{{ SYSTEM_EMAIL.DOMAIN }}'
|
||||
become_pwd = '{{ lookup("password", "/dev/null length=42 chars=ascii_letters,digits") }}'
|
||||
|
||||
try:
|
||||
definitions = load_user_defs(args.roles_dir)
|
||||
except ValueError as e:
|
||||
print(f"Error merging user definitions: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Add extra users if specified
|
||||
if args.extra_users:
|
||||
for name in args.extra_users.split(','):
|
||||
user_key = name.strip()
|
||||
if not user_key:
|
||||
continue
|
||||
if user_key in definitions:
|
||||
print(f"Warning: extra user '{user_key}' already defined; skipping.", file=sys.stderr)
|
||||
else:
|
||||
definitions[user_key] = {}
|
||||
|
||||
try:
|
||||
users = build_users(
|
||||
definitions,
|
||||
primary_domain,
|
||||
args.start_id,
|
||||
become_pwd
|
||||
)
|
||||
except ValueError as e:
|
||||
print(f"Error building user entries: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Convert OrderedDict into plain dict for YAML
|
||||
default_users = {'default_users': users}
|
||||
plain_data = dictify(default_users)
|
||||
|
||||
# Register custom string representer
|
||||
yaml.SafeDumper.add_representer(str, represent_str)
|
||||
|
||||
# Dump the YAML file
|
||||
with open(args.output, 'w') as f:
|
||||
yaml.safe_dump(
|
||||
plain_data,
|
||||
f,
|
||||
default_flow_style=False,
|
||||
sort_keys=False,
|
||||
width=120
|
||||
)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
173
cli/build/graph.py
Normal file
173
cli/build/graph.py
Normal file
@@ -0,0 +1,173 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import argparse
|
||||
import yaml
|
||||
import json
|
||||
import re
|
||||
from typing import List, Dict, Any, Set
|
||||
|
||||
|
||||
JINJA_PATTERN = re.compile(r'{{.*}}')
|
||||
ALL_DEP_TYPES = ['run_after', 'dependencies', 'include_tasks', 'import_tasks', 'include_role', 'import_role']
|
||||
ALL_DIRECTIONS = ['to', 'from']
|
||||
ALL_KEYS = [f"{dep}_{dir}" for dep in ALL_DEP_TYPES for dir in ALL_DIRECTIONS]
|
||||
|
||||
|
||||
def find_role_meta(roles_dir: str, role: str) -> str:
|
||||
path = os.path.join(roles_dir, role, 'meta', 'main.yml')
|
||||
if not os.path.isfile(path):
|
||||
raise FileNotFoundError(f"Metadata not found for role: {role}")
|
||||
return path
|
||||
|
||||
|
||||
def find_role_tasks(roles_dir: str, role: str) -> str:
|
||||
path = os.path.join(roles_dir, role, 'tasks', 'main.yml')
|
||||
if not os.path.isfile(path):
|
||||
raise FileNotFoundError(f"Tasks not found for role: {role}")
|
||||
return path
|
||||
|
||||
|
||||
def load_meta(path: str) -> Dict[str, Any]:
|
||||
with open(path, 'r') as f:
|
||||
data = yaml.safe_load(f) or {}
|
||||
|
||||
galaxy_info = data.get('galaxy_info', {}) or {}
|
||||
return {
|
||||
'galaxy_info': galaxy_info,
|
||||
'run_after': galaxy_info.get('run_after', []) or [],
|
||||
'dependencies': data.get('dependencies', []) or []
|
||||
}
|
||||
|
||||
|
||||
def load_tasks(path: str, dep_type: str) -> List[str]:
|
||||
with open(path, 'r') as f:
|
||||
data = yaml.safe_load(f) or []
|
||||
|
||||
included_roles = []
|
||||
|
||||
for task in data:
|
||||
if dep_type in task:
|
||||
entry = task[dep_type]
|
||||
if isinstance(entry, dict):
|
||||
entry = entry.get('name', '')
|
||||
if entry and not JINJA_PATTERN.search(entry):
|
||||
included_roles.append(entry)
|
||||
|
||||
return included_roles
|
||||
|
||||
|
||||
def build_single_graph(
|
||||
start_role: str,
|
||||
dep_type: str,
|
||||
direction: str,
|
||||
roles_dir: str,
|
||||
max_depth: int
|
||||
) -> Dict[str, Any]:
|
||||
nodes: Dict[str, Dict[str, Any]] = {}
|
||||
links: List[Dict[str, str]] = []
|
||||
|
||||
def traverse(role: str, depth: int, path: Set[str]):
|
||||
if role not in nodes:
|
||||
meta = load_meta(find_role_meta(roles_dir, role))
|
||||
node = {'id': role}
|
||||
node.update(meta['galaxy_info'])
|
||||
node['doc_url'] = f"https://docs.infinito.nexus/roles/{role}/README.html"
|
||||
node['source_url'] = f"https://s.infinito.nexus/code/tree/master/roles/{role}"
|
||||
nodes[role] = node
|
||||
|
||||
if max_depth > 0 and depth >= max_depth:
|
||||
return
|
||||
|
||||
neighbors = []
|
||||
if dep_type in ['run_after', 'dependencies']:
|
||||
meta = load_meta(find_role_meta(roles_dir, role))
|
||||
neighbors = meta.get(dep_type, [])
|
||||
else:
|
||||
try:
|
||||
neighbors = load_tasks(find_role_tasks(roles_dir, role), dep_type)
|
||||
except FileNotFoundError:
|
||||
neighbors = []
|
||||
|
||||
if direction == 'to':
|
||||
for tgt in neighbors:
|
||||
links.append({'source': role, 'target': tgt, 'type': dep_type})
|
||||
if tgt in path:
|
||||
continue
|
||||
traverse(tgt, depth + 1, path | {tgt})
|
||||
|
||||
else: # direction == 'from'
|
||||
for other in os.listdir(roles_dir):
|
||||
try:
|
||||
other_neighbors = []
|
||||
if dep_type in ['run_after', 'dependencies']:
|
||||
meta_o = load_meta(find_role_meta(roles_dir, other))
|
||||
other_neighbors = meta_o.get(dep_type, [])
|
||||
else:
|
||||
other_neighbors = load_tasks(find_role_tasks(roles_dir, other), dep_type)
|
||||
|
||||
if role in other_neighbors:
|
||||
links.append({'source': other, 'target': role, 'type': dep_type})
|
||||
if other in path:
|
||||
continue
|
||||
traverse(other, depth + 1, path | {other})
|
||||
|
||||
except FileNotFoundError:
|
||||
continue
|
||||
|
||||
traverse(start_role, depth=0, path={start_role})
|
||||
return {'nodes': list(nodes.values()), 'links': links}
|
||||
|
||||
|
||||
def build_mappings(
|
||||
start_role: str,
|
||||
roles_dir: str,
|
||||
max_depth: int
|
||||
) -> Dict[str, Any]:
|
||||
result: Dict[str, Any] = {}
|
||||
for key in ALL_KEYS:
|
||||
dep_type, direction = key.rsplit('_', 1)
|
||||
try:
|
||||
result[key] = build_single_graph(start_role, dep_type, direction, roles_dir, max_depth)
|
||||
except Exception:
|
||||
result[key] = {'nodes': [], 'links': []}
|
||||
return result
|
||||
|
||||
|
||||
def output_graph(graph_data: Any, fmt: str, start: str, key: str):
|
||||
base = f"{start}_{key}"
|
||||
if fmt == 'console':
|
||||
print(f"--- {base} ---")
|
||||
print(yaml.safe_dump(graph_data, sort_keys=False))
|
||||
elif fmt in ('yaml', 'json'):
|
||||
path = f"{base}.{fmt}"
|
||||
with open(path, 'w') as f:
|
||||
if fmt == 'yaml':
|
||||
yaml.safe_dump(graph_data, f, sort_keys=False)
|
||||
else:
|
||||
json.dump(graph_data, f, indent=2)
|
||||
print(f"Wrote {path}")
|
||||
else:
|
||||
raise ValueError(f"Unknown format: {fmt}")
|
||||
|
||||
|
||||
def main():
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
default_roles_dir = os.path.abspath(os.path.join(script_dir, '..', '..', 'roles'))
|
||||
|
||||
parser = argparse.ArgumentParser(description="Generate dependency graphs")
|
||||
parser.add_argument('-r', '--role', required=True, help="Starting role name")
|
||||
parser.add_argument('-D', '--depth', type=int, default=0, help="Max recursion depth")
|
||||
parser.add_argument('-o', '--output', choices=['yaml', 'json', 'console'], default='console')
|
||||
parser.add_argument('--roles-dir', default=default_roles_dir, help="Roles directory")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
graphs = build_mappings(args.role, args.roles_dir, args.depth)
|
||||
|
||||
for key in ALL_KEYS:
|
||||
graph_data = graphs.get(key, {'nodes': [], 'links': []})
|
||||
output_graph(graph_data, args.output, args.role, key)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
127
cli/build/inventory/full.py
Normal file
127
cli/build/inventory/full.py
Normal file
@@ -0,0 +1,127 @@
|
||||
#!/usr/bin/env python3
|
||||
# cli/build/inventory/full.py
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
import os
|
||||
|
||||
try:
|
||||
from filter_plugins.get_all_invokable_apps import get_all_invokable_apps
|
||||
except ImportError:
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..')))
|
||||
from filter_plugins.get_all_invokable_apps import get_all_invokable_apps
|
||||
|
||||
import yaml
|
||||
import json
|
||||
|
||||
def build_group_inventory(apps, host):
|
||||
"""
|
||||
Build an Ansible inventory in which each application is a group containing the given host.
|
||||
"""
|
||||
groups = {app: {"hosts": [host]} for app in apps}
|
||||
inventory = {
|
||||
"all": {
|
||||
"hosts": [host],
|
||||
"children": {app: {} for app in apps},
|
||||
},
|
||||
**groups
|
||||
}
|
||||
return inventory
|
||||
|
||||
def build_hostvar_inventory(apps, host):
|
||||
"""
|
||||
Alternative: Build an inventory where all invokable apps are set as a host variable (as a list).
|
||||
"""
|
||||
return {
|
||||
"all": {
|
||||
"hosts": [host],
|
||||
},
|
||||
"_meta": {
|
||||
"hostvars": {
|
||||
host: {
|
||||
"invokable_applications": apps
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Build a dynamic Ansible inventory for a given host with all invokable applications.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--host',
|
||||
required=True,
|
||||
help='Hostname to assign to all invokable application groups'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-f', '--format',
|
||||
choices=['json', 'yaml'],
|
||||
default='yaml',
|
||||
help='Output format (yaml [default], json)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--inventory-style',
|
||||
choices=['group', 'hostvars'],
|
||||
default='group',
|
||||
help='Inventory style: group (default, one group per app) or hostvars (list as hostvar)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-c', '--categories-file',
|
||||
default=os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'roles', 'categories.yml')),
|
||||
help='Path to roles/categories.yml (default: roles/categories.yml at project root)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-r', '--roles-dir',
|
||||
default=os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'roles')),
|
||||
help='Path to roles/ directory (default: roles/ at project root)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-o', '--output',
|
||||
help='Write output to file instead of stdout'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-i', '--ignore',
|
||||
action='append',
|
||||
default=[],
|
||||
help='Application ID(s) to ignore (can be specified multiple times or comma-separated)'
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
apps = get_all_invokable_apps(
|
||||
categories_file=args.categories_file,
|
||||
roles_dir=args.roles_dir
|
||||
)
|
||||
except Exception as e:
|
||||
sys.stderr.write(f"Error: {e}\n")
|
||||
sys.exit(1)
|
||||
|
||||
# Combine all ignore arguments into a flat set
|
||||
ignore_ids = set()
|
||||
for entry in args.ignore:
|
||||
ignore_ids.update(i.strip() for i in entry.split(',') if i.strip())
|
||||
|
||||
if ignore_ids:
|
||||
apps = [app for app in apps if app not in ignore_ids]
|
||||
|
||||
# Build the requested inventory style
|
||||
if args.inventory_style == 'group':
|
||||
inventory = build_group_inventory(apps, args.host)
|
||||
else:
|
||||
inventory = build_hostvar_inventory(apps, args.host)
|
||||
|
||||
# Output in the chosen format
|
||||
if args.format == 'json':
|
||||
output = json.dumps(inventory, indent=2)
|
||||
else:
|
||||
output = yaml.safe_dump(inventory, default_flow_style=False)
|
||||
|
||||
if args.output:
|
||||
with open(args.output, 'w') as f:
|
||||
f.write(output)
|
||||
else:
|
||||
print(output)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
224
cli/build/role_include.py
Normal file
224
cli/build/role_include.py
Normal file
@@ -0,0 +1,224 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
import argparse
|
||||
from collections import defaultdict, deque
|
||||
|
||||
def find_roles(roles_dir, prefixes=None):
|
||||
"""
|
||||
Find all roles in the given directory whose names start with
|
||||
any of the provided prefixes. If prefixes is empty or None,
|
||||
include all roles.
|
||||
"""
|
||||
for entry in os.listdir(roles_dir):
|
||||
if prefixes:
|
||||
if not any(entry.startswith(pref) for pref in prefixes):
|
||||
continue
|
||||
path = os.path.join(roles_dir, entry)
|
||||
meta_file = os.path.join(path, 'meta', 'main.yml')
|
||||
if os.path.isdir(path) and os.path.isfile(meta_file):
|
||||
yield path, meta_file
|
||||
|
||||
def load_run_after(meta_file):
|
||||
"""Load the 'run_after' from the meta/main.yml of a role."""
|
||||
with open(meta_file, 'r') as f:
|
||||
data = yaml.safe_load(f) or {}
|
||||
return data.get('galaxy_info', {}).get('run_after', [])
|
||||
|
||||
def load_application_id(role_path):
|
||||
"""Load the application_id from the vars/main.yml of the role."""
|
||||
vars_file = os.path.join(role_path, 'vars', 'main.yml')
|
||||
if os.path.exists(vars_file):
|
||||
with open(vars_file, 'r') as f:
|
||||
data = yaml.safe_load(f) or {}
|
||||
return data.get('application_id')
|
||||
return None
|
||||
|
||||
def build_dependency_graph(roles_dir, prefixes=None):
|
||||
"""
|
||||
Build a dependency graph where each key is a role name and
|
||||
its value is a list of roles that depend on it.
|
||||
Also return in_degree counts and the roles metadata map.
|
||||
"""
|
||||
graph = defaultdict(list)
|
||||
in_degree = defaultdict(int)
|
||||
roles = {}
|
||||
|
||||
for role_path, meta_file in find_roles(roles_dir, prefixes):
|
||||
run_after = load_run_after(meta_file)
|
||||
application_id = load_application_id(role_path)
|
||||
role_name = os.path.basename(role_path)
|
||||
|
||||
roles[role_name] = {
|
||||
'role_name': role_name,
|
||||
'run_after': run_after,
|
||||
'application_id': application_id,
|
||||
'path': role_path
|
||||
}
|
||||
|
||||
for dependency in run_after:
|
||||
graph[dependency].append(role_name)
|
||||
in_degree[role_name] += 1
|
||||
|
||||
if role_name not in in_degree:
|
||||
in_degree[role_name] = 0
|
||||
|
||||
return graph, in_degree, roles
|
||||
|
||||
def find_cycle(roles):
|
||||
"""
|
||||
Detect a cycle in the run_after relations:
|
||||
roles: dict mapping role_name -> { 'run_after': [...], ... }
|
||||
Returns a list of role_names forming the cycle (with the start repeated at end), or None.
|
||||
"""
|
||||
visited = set()
|
||||
stack = set()
|
||||
|
||||
def dfs(node, path):
|
||||
visited.add(node)
|
||||
stack.add(node)
|
||||
path.append(node)
|
||||
for dep in roles.get(node, {}).get('run_after', []):
|
||||
if dep not in visited:
|
||||
res = dfs(dep, path)
|
||||
if res:
|
||||
return res
|
||||
elif dep in stack:
|
||||
idx = path.index(dep)
|
||||
return path[idx:] + [dep]
|
||||
stack.remove(node)
|
||||
path.pop()
|
||||
return None
|
||||
|
||||
for role in roles:
|
||||
if role not in visited:
|
||||
cycle = dfs(role, [])
|
||||
if cycle:
|
||||
return cycle
|
||||
return None
|
||||
|
||||
def topological_sort(graph, in_degree, roles=None):
|
||||
"""
|
||||
Perform topological sort on the dependency graph.
|
||||
If a cycle is detected, raise an Exception with detailed debug info.
|
||||
"""
|
||||
from collections import deque
|
||||
|
||||
queue = deque([r for r, d in in_degree.items() if d == 0])
|
||||
sorted_roles = []
|
||||
local_in = dict(in_degree)
|
||||
|
||||
while queue:
|
||||
role = queue.popleft()
|
||||
sorted_roles.append(role)
|
||||
for nbr in graph.get(role, []):
|
||||
local_in[nbr] -= 1
|
||||
if local_in[nbr] == 0:
|
||||
queue.append(nbr)
|
||||
|
||||
if len(sorted_roles) != len(in_degree):
|
||||
# Something went wrong: likely a cycle
|
||||
cycle = find_cycle(roles or {})
|
||||
unsorted = [r for r in in_degree if r not in sorted_roles]
|
||||
|
||||
header = "❌ Dependency resolution failed"
|
||||
if cycle:
|
||||
reason = f"Circular dependency detected: {' -> '.join(cycle)}"
|
||||
else:
|
||||
reason = "Unresolved dependencies among roles (possible cycle or missing role)."
|
||||
|
||||
details = []
|
||||
if unsorted:
|
||||
details.append("Unsorted roles and their declared run_after dependencies:")
|
||||
for r in unsorted:
|
||||
deps = roles.get(r, {}).get('run_after', [])
|
||||
details.append(f" - {r} depends on {deps!r}")
|
||||
|
||||
graph_repr = f"Full dependency graph: {dict(graph)!r}"
|
||||
|
||||
raise Exception("\n".join([header, reason] + details + [graph_repr]))
|
||||
|
||||
return sorted_roles
|
||||
|
||||
def print_dependency_tree(graph):
|
||||
"""Print the dependency tree visually on the console."""
|
||||
def print_node(role, indent=0):
|
||||
print(" " * indent + role)
|
||||
for dep in graph.get(role, []):
|
||||
print_node(dep, indent + 1)
|
||||
|
||||
all_roles = set(graph.keys())
|
||||
dependent = {r for deps in graph.values() for r in deps}
|
||||
roots = all_roles - dependent
|
||||
|
||||
for root in roots:
|
||||
print_node(root)
|
||||
|
||||
def gen_condi_role_incl(roles_dir, prefixes=None):
|
||||
"""
|
||||
Generate playbook entries based on the sorted order.
|
||||
Raises a ValueError if application_id is missing.
|
||||
"""
|
||||
graph, in_degree, roles = build_dependency_graph(roles_dir, prefixes)
|
||||
sorted_names = topological_sort(graph, in_degree, roles)
|
||||
|
||||
entries = []
|
||||
for role_name in sorted_names:
|
||||
role = roles[role_name]
|
||||
|
||||
if role.get('application_id') is None:
|
||||
vars_file = os.path.join(role['path'], 'vars', 'main.yml')
|
||||
raise ValueError(f"'application_id' missing in {vars_file}")
|
||||
|
||||
app_id = role['application_id']
|
||||
entries.append(
|
||||
f"- name: setup {app_id}\n"
|
||||
f" when: ('{app_id}' | application_allowed(group_names, allowed_applications))\n"
|
||||
f" include_role:\n"
|
||||
f" name: {role_name}\n"
|
||||
)
|
||||
entries.append(
|
||||
f"- name: flush handlers after {app_id}\n"
|
||||
f" meta: flush_handlers\n"
|
||||
)
|
||||
|
||||
return entries
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Generate an Ansible playbook include file from Docker roles, sorted by run_after order.'
|
||||
)
|
||||
parser.add_argument('roles_dir', help='Path to directory containing role folders')
|
||||
parser.add_argument(
|
||||
'-p', '--prefix',
|
||||
action='append',
|
||||
help='Only include roles whose names start with any of these prefixes; can be specified multiple times'
|
||||
)
|
||||
parser.add_argument('-o', '--output', default=None,
|
||||
help='Output file path (default: stdout)')
|
||||
parser.add_argument('-t', '--tree', action='store_true',
|
||||
help='Display the dependency tree of roles and exit')
|
||||
|
||||
args = parser.parse_args()
|
||||
prefixes = args.prefix or []
|
||||
|
||||
if args.tree:
|
||||
graph, _, _ = build_dependency_graph(args.roles_dir, prefixes)
|
||||
print_dependency_tree(graph)
|
||||
sys.exit(0)
|
||||
|
||||
entries = gen_condi_role_incl(args.roles_dir, prefixes)
|
||||
output = ''.join(entries)
|
||||
|
||||
if args.output:
|
||||
os.makedirs(os.path.dirname(args.output), exist_ok=True)
|
||||
with open(args.output, 'w') as f:
|
||||
f.write(output)
|
||||
print(f"Playbook entries written to {args.output}")
|
||||
else:
|
||||
print(output)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
65
cli/build/roles_list.py
Normal file
65
cli/build/roles_list.py
Normal file
@@ -0,0 +1,65 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Generate a JSON file listing all Ansible role directories.
|
||||
|
||||
Usage:
|
||||
python roles_list.py [--roles-dir path/to/roles] [--output path/to/roles/list.json | console]
|
||||
"""
|
||||
import os
|
||||
import json
|
||||
import argparse
|
||||
|
||||
|
||||
def find_roles(roles_dir: str):
|
||||
"""Return sorted list of role names under roles_dir."""
|
||||
return sorted([
|
||||
entry for entry in os.listdir(roles_dir)
|
||||
if os.path.isdir(os.path.join(roles_dir, entry))
|
||||
])
|
||||
|
||||
|
||||
def write_roles_list(roles, out_file):
|
||||
"""Write the list of roles to out_file as JSON."""
|
||||
os.makedirs(os.path.dirname(out_file), exist_ok=True)
|
||||
with open(out_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(roles, f, indent=2)
|
||||
print(f"Wrote roles list to {out_file}")
|
||||
|
||||
|
||||
def main():
|
||||
# Determine default roles_dir relative to this script: ../../.. -> roles
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
default_roles_dir = os.path.abspath(
|
||||
os.path.join(script_dir, '..', '..', 'roles')
|
||||
)
|
||||
default_output = os.path.join(default_roles_dir, 'list.json')
|
||||
|
||||
parser = argparse.ArgumentParser(description='Generate roles/list.json')
|
||||
parser.add_argument(
|
||||
'--roles-dir', '-r',
|
||||
default=default_roles_dir,
|
||||
help=f'Directory containing role subfolders (default: {default_roles_dir})'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--output', '-o',
|
||||
default=default_output,
|
||||
help=(
|
||||
'Output path for roles list JSON '
|
||||
'(or "console" to print to stdout, default: %(default)s)'
|
||||
)
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if not os.path.isdir(args.roles_dir):
|
||||
parser.error(f"Roles directory not found: {args.roles_dir}")
|
||||
|
||||
roles = find_roles(args.roles_dir)
|
||||
|
||||
if args.output.lower() == 'console':
|
||||
# Print JSON to stdout
|
||||
print(json.dumps(roles, indent=2))
|
||||
else:
|
||||
write_roles_list(roles, args.output)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
104
cli/build/tree.py
Normal file
104
cli/build/tree.py
Normal file
@@ -0,0 +1,104 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import argparse
|
||||
import json
|
||||
from typing import Dict, Any
|
||||
|
||||
from cli.build.graph import build_mappings, output_graph
|
||||
from module_utils.role_dependency_resolver import RoleDependencyResolver
|
||||
|
||||
|
||||
def find_roles(roles_dir: str):
|
||||
for entry in os.listdir(roles_dir):
|
||||
path = os.path.join(roles_dir, entry)
|
||||
if os.path.isdir(path):
|
||||
yield entry, path
|
||||
|
||||
|
||||
def main():
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
default_roles_dir = os.path.abspath(os.path.join(script_dir, "..", "..", "roles"))
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate all graphs for each role and write meta/tree.json"
|
||||
)
|
||||
parser.add_argument("-d", "--role_dir", default=default_roles_dir,
|
||||
help=f"Path to roles directory (default: {default_roles_dir})")
|
||||
parser.add_argument("-D", "--depth", type=int, default=0,
|
||||
help="Max recursion depth (>0) or <=0 to stop on cycle")
|
||||
parser.add_argument("-o", "--output", choices=["yaml", "json", "console"],
|
||||
default="json", help="Output format")
|
||||
parser.add_argument("-p", "--preview", action="store_true",
|
||||
help="Preview graphs to console instead of writing files")
|
||||
parser.add_argument("-s", "--shadow-folder", type=str, default=None,
|
||||
help="If set, writes tree.json to this shadow folder instead of the role's actual meta/ folder")
|
||||
parser.add_argument("-v", "--verbose", action="store_true", help="Enable verbose logging")
|
||||
|
||||
# Toggles
|
||||
parser.add_argument("--no-include-role", action="store_true", help="Do not scan include_role")
|
||||
parser.add_argument("--no-import-role", action="store_true", help="Do not scan import_role")
|
||||
parser.add_argument("--no-dependencies", action="store_true", help="Do not read meta/main.yml dependencies")
|
||||
parser.add_argument("--no-run-after", action="store_true",
|
||||
help="Do not read galaxy_info.run_after from meta/main.yml")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.verbose:
|
||||
print(f"Roles directory: {args.role_dir}")
|
||||
print(f"Max depth: {args.depth}")
|
||||
print(f"Output format: {args.output}")
|
||||
print(f"Preview mode: {args.preview}")
|
||||
print(f"Shadow folder: {args.shadow_folder}")
|
||||
|
||||
resolver = RoleDependencyResolver(args.role_dir)
|
||||
|
||||
for role_name, role_path in find_roles(args.role_dir):
|
||||
if args.verbose:
|
||||
print(f"Processing role: {role_name}")
|
||||
|
||||
graphs: Dict[str, Any] = build_mappings(
|
||||
start_role=role_name,
|
||||
roles_dir=args.role_dir,
|
||||
max_depth=args.depth
|
||||
)
|
||||
|
||||
# Direct deps (depth=1) – getrennt erfasst für buckets
|
||||
inc_roles, imp_roles = resolver._scan_tasks(role_path)
|
||||
meta_deps = resolver._extract_meta_dependencies(role_path)
|
||||
run_after = set()
|
||||
if not args.no_run_after:
|
||||
run_after = resolver._extract_meta_run_after(role_path)
|
||||
|
||||
if any([not args.no_include_role and inc_roles,
|
||||
not args.no_import_role and imp_roles,
|
||||
not args.no_dependencies and meta_deps,
|
||||
not args.no_run_after and run_after]):
|
||||
deps_root = graphs.setdefault("dependencies", {})
|
||||
if not args.no_include_role and inc_roles:
|
||||
deps_root["include_role"] = sorted(inc_roles)
|
||||
if not args.no_import_role and imp_roles:
|
||||
deps_root["import_role"] = sorted(imp_roles)
|
||||
if not args.no_dependencies and meta_deps:
|
||||
deps_root["dependencies"] = sorted(meta_deps)
|
||||
if not args.no_run_after and run_after:
|
||||
deps_root["run_after"] = sorted(run_after)
|
||||
graphs["dependencies"] = deps_root
|
||||
|
||||
if args.preview:
|
||||
for key, data in graphs.items():
|
||||
if args.verbose:
|
||||
print(f"Previewing graph '{key}' for role '{role_name}'")
|
||||
output_graph(data, "console", role_name, key)
|
||||
else:
|
||||
if args.shadow_folder:
|
||||
tree_file = os.path.join(args.shadow_folder, role_name, "meta", "tree.json")
|
||||
else:
|
||||
tree_file = os.path.join(role_path, "meta", "tree.json")
|
||||
os.makedirs(os.path.dirname(tree_file), exist_ok=True)
|
||||
with open(tree_file, "w", encoding="utf-8") as f:
|
||||
json.dump(graphs, f, indent=2)
|
||||
print(f"Wrote {tree_file}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
240
cli/create/credentials.py
Normal file
240
cli/create/credentials.py
Normal file
@@ -0,0 +1,240 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Selectively add & vault NEW credentials in your inventory, preserving comments
|
||||
and formatting. Existing values are left untouched unless --force is used.
|
||||
|
||||
Usage example:
|
||||
infinito create credentials \
|
||||
--role-path roles/web-app-akaunting \
|
||||
--inventory-file host_vars/echoserver.yml \
|
||||
--vault-password-file .pass/echoserver.txt \
|
||||
--set credentials.database_password=mysecret
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, Union
|
||||
|
||||
from ruamel.yaml import YAML
|
||||
from ruamel.yaml.comments import CommentedMap
|
||||
|
||||
from module_utils.manager.inventory import InventoryManager
|
||||
from module_utils.handler.vault import VaultHandler # uses your existing handler
|
||||
|
||||
|
||||
# ---------- helpers ----------
|
||||
|
||||
def ask_for_confirmation(key: str) -> bool:
|
||||
"""Prompt the user for confirmation to overwrite an existing value."""
|
||||
confirmation = input(
|
||||
f"Are you sure you want to overwrite the value for '{key}'? (y/n): "
|
||||
).strip().lower()
|
||||
return confirmation == 'y'
|
||||
|
||||
|
||||
def ensure_map(node: CommentedMap, key: str) -> CommentedMap:
|
||||
"""
|
||||
Ensure node[key] exists and is a mapping (CommentedMap) for round-trip safety.
|
||||
"""
|
||||
if key not in node or not isinstance(node.get(key), CommentedMap):
|
||||
node[key] = CommentedMap()
|
||||
return node[key]
|
||||
|
||||
|
||||
def _is_ruamel_vault(val: Any) -> bool:
|
||||
"""Detect if a ruamel scalar already carries the !vault tag."""
|
||||
try:
|
||||
return getattr(val, 'tag', None) == '!vault'
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _is_vault_encrypted(val: Any) -> bool:
|
||||
"""
|
||||
Detect if value is already a vault string or a ruamel !vault scalar.
|
||||
Accept both '$ANSIBLE_VAULT' and '!vault' markers.
|
||||
"""
|
||||
if _is_ruamel_vault(val):
|
||||
return True
|
||||
if isinstance(val, str) and ("$ANSIBLE_VAULT" in val or "!vault" in val):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _vault_body(text: str) -> str:
|
||||
"""
|
||||
Return only the vault body starting from the first line that contains
|
||||
'$ANSIBLE_VAULT'. If not found, return the original text.
|
||||
Also strips any leading '!vault |' header if present.
|
||||
"""
|
||||
lines = text.splitlines()
|
||||
for i, ln in enumerate(lines):
|
||||
if "$ANSIBLE_VAULT" in ln:
|
||||
return "\n".join(lines[i:])
|
||||
return text
|
||||
|
||||
|
||||
def _make_vault_scalar_from_text(text: str) -> Any:
|
||||
"""
|
||||
Build a ruamel object representing a literal block scalar tagged with !vault
|
||||
by parsing a tiny YAML snippet. This avoids depending on yaml_set_tag().
|
||||
"""
|
||||
body = _vault_body(text)
|
||||
indented = " " + body.replace("\n", "\n ") # proper block scalar indentation
|
||||
snippet = f"v: !vault |\n{indented}\n"
|
||||
y = YAML(typ="rt")
|
||||
return y.load(snippet)["v"]
|
||||
|
||||
|
||||
def to_vault_block(vault_handler: VaultHandler, value: Union[str, Any], label: str) -> Any:
|
||||
"""
|
||||
Return a ruamel scalar tagged as !vault. If the input value is already
|
||||
vault-encrypted (string contains $ANSIBLE_VAULT or is a !vault scalar), reuse/wrap.
|
||||
Otherwise, encrypt plaintext via ansible-vault.
|
||||
"""
|
||||
# Already a ruamel !vault scalar → reuse
|
||||
if _is_ruamel_vault(value):
|
||||
return value
|
||||
|
||||
# Already an encrypted string (may include '!vault |' or just the header)
|
||||
if isinstance(value, str) and ("$ANSIBLE_VAULT" in value or "!vault" in value):
|
||||
return _make_vault_scalar_from_text(value)
|
||||
|
||||
# Plaintext → encrypt now
|
||||
snippet = vault_handler.encrypt_string(str(value), label)
|
||||
return _make_vault_scalar_from_text(snippet)
|
||||
|
||||
|
||||
def parse_overrides(pairs: list[str]) -> Dict[str, str]:
|
||||
"""
|
||||
Parse --set key=value pairs into a dict.
|
||||
Supports both 'credentials.key=val' and 'key=val' (short) forms.
|
||||
"""
|
||||
out: Dict[str, str] = {}
|
||||
for pair in pairs:
|
||||
k, v = pair.split("=", 1)
|
||||
out[k.strip()] = v.strip()
|
||||
return out
|
||||
|
||||
|
||||
# ---------- main ----------
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Selectively add & vault NEW credentials in your inventory, preserving comments/formatting."
|
||||
)
|
||||
parser.add_argument("--role-path", required=True, help="Path to your role")
|
||||
parser.add_argument("--inventory-file", required=True, help="Host vars file to update")
|
||||
parser.add_argument("--vault-password-file", required=True, help="Vault password file")
|
||||
parser.add_argument(
|
||||
"--set", nargs="*", default=[],
|
||||
help="Override values key[.subkey]=VALUE (applied to NEW keys; with --force also to existing)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-f", "--force", action="store_true",
|
||||
help="Allow overrides to replace existing values (will ask per key unless combined with --yes)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-y", "--yes", action="store_true",
|
||||
help="Non-interactive: assume 'yes' for all overwrite confirmations when --force is used"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
overrides = parse_overrides(args.set)
|
||||
|
||||
# Initialize inventory manager (provides schema + app_id + vault)
|
||||
manager = InventoryManager(
|
||||
role_path=Path(args.role_path),
|
||||
inventory_path=Path(args.inventory_file),
|
||||
vault_pw=args.vault_password_file,
|
||||
overrides=overrides
|
||||
)
|
||||
|
||||
# 1) Load existing inventory with ruamel (round-trip)
|
||||
yaml_rt = YAML(typ="rt")
|
||||
yaml_rt.preserve_quotes = True
|
||||
|
||||
with open(args.inventory_file, "r", encoding="utf-8") as f:
|
||||
data = yaml_rt.load(f) # CommentedMap or None
|
||||
if data is None:
|
||||
data = CommentedMap()
|
||||
|
||||
# 2) Get schema-applied structure (defaults etc.) for *non-destructive* merge
|
||||
schema_inventory: Dict[str, Any] = manager.apply_schema()
|
||||
|
||||
# 3) Ensure structural path exists
|
||||
apps = ensure_map(data, "applications")
|
||||
app_block = ensure_map(apps, manager.app_id)
|
||||
creds = ensure_map(app_block, "credentials")
|
||||
|
||||
# 4) Determine defaults we could add
|
||||
schema_apps = schema_inventory.get("applications", {})
|
||||
schema_app_block = schema_apps.get(manager.app_id, {})
|
||||
schema_creds = schema_app_block.get("credentials", {}) if isinstance(schema_app_block, dict) else {}
|
||||
|
||||
# 5) Add ONLY missing credential keys
|
||||
newly_added_keys = set()
|
||||
for key, default_val in schema_creds.items():
|
||||
if key in creds:
|
||||
# existing → do not touch (preserve plaintext/vault/formatting/comments)
|
||||
continue
|
||||
|
||||
# Value to use for the new key
|
||||
# Priority: --set exact key → default from schema → empty string
|
||||
ov = overrides.get(f"credentials.{key}", None)
|
||||
if ov is None:
|
||||
ov = overrides.get(key, None)
|
||||
|
||||
if ov is not None:
|
||||
value_for_new_key: Union[str, Any] = ov
|
||||
else:
|
||||
if _is_vault_encrypted(default_val):
|
||||
# Schema already provides a vault value → take it as-is
|
||||
creds[key] = to_vault_block(manager.vault_handler, default_val, key)
|
||||
newly_added_keys.add(key)
|
||||
continue
|
||||
value_for_new_key = "" if default_val is None else str(default_val)
|
||||
|
||||
# Insert as !vault literal (encrypt if needed)
|
||||
creds[key] = to_vault_block(manager.vault_handler, value_for_new_key, key)
|
||||
newly_added_keys.add(key)
|
||||
|
||||
# 6) ansible_become_password: only add if missing;
|
||||
# never rewrite an existing one unless --force (+ confirm/--yes) and override provided.
|
||||
if "ansible_become_password" not in data:
|
||||
val = overrides.get("ansible_become_password", None)
|
||||
if val is not None:
|
||||
data["ansible_become_password"] = to_vault_block(
|
||||
manager.vault_handler, val, "ansible_become_password"
|
||||
)
|
||||
else:
|
||||
if args.force and "ansible_become_password" in overrides:
|
||||
do_overwrite = args.yes or ask_for_confirmation("ansible_become_password")
|
||||
if do_overwrite:
|
||||
data["ansible_become_password"] = to_vault_block(
|
||||
manager.vault_handler, overrides["ansible_become_password"], "ansible_become_password"
|
||||
)
|
||||
|
||||
# 7) Overrides for existing credential keys (only with --force)
|
||||
if args.force:
|
||||
for ov_key, ov_val in overrides.items():
|
||||
# Accept both 'credentials.key' and bare 'key'
|
||||
key = ov_key.split(".", 1)[1] if ov_key.startswith("credentials.") else ov_key
|
||||
if key in creds:
|
||||
# If we just added it in this run, don't ask again or rewrap
|
||||
if key in newly_added_keys:
|
||||
continue
|
||||
if args.yes or ask_for_confirmation(key):
|
||||
creds[key] = to_vault_block(manager.vault_handler, ov_val, key)
|
||||
|
||||
# 8) Write back with ruamel (preserve formatting & comments)
|
||||
with open(args.inventory_file, "w", encoding="utf-8") as f:
|
||||
yaml_rt.dump(data, f)
|
||||
|
||||
print(f"✅ Added new credentials without touching existing formatting/comments → {args.inventory_file}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
166
cli/create/role.py
Normal file
166
cli/create/role.py
Normal file
@@ -0,0 +1,166 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import shutil
|
||||
import ipaddress
|
||||
import difflib
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
import sys, os
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
from module_utils.entity_name_utils import get_entity_name
|
||||
|
||||
# Paths to the group-vars files
|
||||
PORTS_FILE = './group_vars/all/10_ports.yml'
|
||||
NETWORKS_FILE = './group_vars/all/09_networks.yml'
|
||||
ROLE_TEMPLATE_DIR = './templates/roles/web-app'
|
||||
ROLES_DIR = './roles'
|
||||
|
||||
yaml = YAML()
|
||||
yaml.preserve_quotes = True
|
||||
|
||||
|
||||
def load_yaml_with_comments(path):
|
||||
with open(path) as f:
|
||||
return yaml.load(f)
|
||||
|
||||
|
||||
def dump_yaml_with_comments(data, path):
|
||||
with open(path, 'w') as f:
|
||||
yaml.dump(data, f)
|
||||
|
||||
|
||||
def get_next_network(networks_dict, prefixlen):
|
||||
"""Select the next contiguous subnet, based on the highest existing subnet + one network offset."""
|
||||
nets = []
|
||||
local = networks_dict['defaults_networks']['local']
|
||||
for name, info in local.items():
|
||||
# info is a dict with 'subnet' key
|
||||
net = ipaddress.ip_network(info['subnet'])
|
||||
if net.prefixlen == prefixlen:
|
||||
nets.append(net)
|
||||
if not nets:
|
||||
raise RuntimeError(f"No existing /{prefixlen} subnets to base allocation on.")
|
||||
nets.sort(key=lambda n: int(n.network_address))
|
||||
last = nets[-1]
|
||||
offset = last.num_addresses
|
||||
next_net = ipaddress.ip_network((int(last.network_address) + offset, prefixlen))
|
||||
return next_net
|
||||
|
||||
|
||||
def get_next_port(ports_dict, category):
|
||||
"""Assign the next port by taking the max existing plus one."""
|
||||
loc = ports_dict['ports']['localhost'][category]
|
||||
existing = [int(v) for v in loc.values()]
|
||||
return (max(existing) + 1) if existing else 1
|
||||
|
||||
|
||||
def prompt_conflict(dst_file):
|
||||
print(f"Conflict detected: {dst_file}")
|
||||
print("[1] overwrite, [2] skip, [3] merge")
|
||||
choice = None
|
||||
while choice not in ('1', '2', '3'):
|
||||
choice = input("Enter 1, 2, or 3: ").strip()
|
||||
return choice
|
||||
|
||||
|
||||
def render_templates(src_dir, dst_dir, context):
|
||||
env = Environment(loader=FileSystemLoader(src_dir), keep_trailing_newline=True, autoescape=False)
|
||||
env.filters['bool'] = lambda x: bool(x)
|
||||
env.filters['get_entity_name'] = get_entity_name
|
||||
|
||||
for root, _, files in os.walk(src_dir):
|
||||
rel = os.path.relpath(root, src_dir)
|
||||
target = os.path.join(dst_dir, rel)
|
||||
os.makedirs(target, exist_ok=True)
|
||||
for fn in files:
|
||||
tpl = env.get_template(os.path.join(rel, fn))
|
||||
rendered = tpl.render(**context)
|
||||
out = fn[:-3] if fn.endswith('.j2') else fn
|
||||
dst_file = os.path.join(target, out)
|
||||
|
||||
if os.path.exists(dst_file):
|
||||
choice = prompt_conflict(dst_file)
|
||||
if choice == '2':
|
||||
print(f"Skipping {dst_file}")
|
||||
continue
|
||||
if choice == '3':
|
||||
with open(dst_file) as f_old:
|
||||
old_lines = f_old.readlines()
|
||||
new_lines = rendered.splitlines(keepends=True)
|
||||
additions = [l for l in new_lines if l not in old_lines]
|
||||
if additions:
|
||||
with open(dst_file, 'a') as f:
|
||||
f.writelines(additions)
|
||||
print(f"Merged {len(additions)} lines into {dst_file}")
|
||||
else:
|
||||
print(f"No new lines to merge into {dst_file}")
|
||||
continue
|
||||
# overwrite
|
||||
print(f"Overwriting {dst_file}")
|
||||
with open(dst_file, 'w') as f:
|
||||
f.write(rendered)
|
||||
else:
|
||||
# create new file
|
||||
with open(dst_file, 'w') as f:
|
||||
f.write(rendered)
|
||||
|
||||
|
||||
def main():
|
||||
# Load dynamic port categories
|
||||
ports_data = load_yaml_with_comments(PORTS_FILE)
|
||||
categories = list(ports_data['ports']['localhost'].keys())
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Create or update a Docker Ansible role, and globally assign network and ports with comments preserved"
|
||||
)
|
||||
parser.add_argument('-a', '--application-id', required=True, help="Unique application ID")
|
||||
parser.add_argument('-n', '--network', choices=['24', '28'], required=True, help="Network prefix length (/24 or /28)")
|
||||
parser.add_argument('-p', '--ports', nargs='+', choices=categories, required=True, help=f"Port categories to assign (allowed: {', '.join(categories)})")
|
||||
args = parser.parse_args()
|
||||
|
||||
app = args.application_id
|
||||
role = f"web-app-{app}"
|
||||
role_dir = os.path.join(ROLES_DIR, role)
|
||||
|
||||
if os.path.exists(role_dir):
|
||||
if input(f"Role {role} exists. Continue? [y/N]: ").strip().lower() != 'y':
|
||||
print("Aborting.")
|
||||
sys.exit(1)
|
||||
else:
|
||||
os.makedirs(role_dir)
|
||||
|
||||
# 1) Render all templates with conflict handling
|
||||
render_templates(ROLE_TEMPLATE_DIR, role_dir, {'application_id': app, 'role_name': role, 'database_type': 0})
|
||||
print(f"→ Templates applied to {role_dir}")
|
||||
|
||||
# 2) Update global networks file, preserving comments
|
||||
networks = load_yaml_with_comments(NETWORKS_FILE)
|
||||
prefix = int(args.network)
|
||||
new_net = get_next_network(networks, prefix)
|
||||
networks['defaults_networks']['local'][app] = {'subnet': str(new_net)}
|
||||
shutil.copy(NETWORKS_FILE, NETWORKS_FILE + '.bak')
|
||||
dump_yaml_with_comments(networks, NETWORKS_FILE)
|
||||
print(f"→ Assigned network {new_net} in {NETWORKS_FILE}")
|
||||
|
||||
# 3) Update global ports file, preserving comments
|
||||
ports_data = load_yaml_with_comments(PORTS_FILE)
|
||||
assigned = {}
|
||||
for cat in args.ports:
|
||||
loc = ports_data['ports']['localhost'].setdefault(cat, {})
|
||||
if app in loc:
|
||||
print(f"→ Existing port for {cat} and {app}: {loc[app]}, skipping.")
|
||||
else:
|
||||
pnum = get_next_port(ports_data, cat)
|
||||
loc[app] = pnum
|
||||
assigned[cat] = pnum
|
||||
|
||||
if assigned:
|
||||
shutil.copy(PORTS_FILE, PORTS_FILE + '.bak')
|
||||
dump_yaml_with_comments(ports_data, PORTS_FILE)
|
||||
print(f"→ Assigned ports {assigned} in {PORTS_FILE}")
|
||||
else:
|
||||
print("→ No new ports assigned.")
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@@ -1,115 +0,0 @@
|
||||
import argparse
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
import yaml
|
||||
from typing import Dict, Any
|
||||
from utils.manager.inventory import InventoryManager
|
||||
from utils.handler.vault import VaultHandler, VaultScalar
|
||||
from utils.handler.yaml import YamlHandler
|
||||
from yaml.dumper import SafeDumper
|
||||
|
||||
|
||||
def ask_for_confirmation(key: str) -> bool:
|
||||
"""Prompt the user for confirmation to overwrite an existing value."""
|
||||
confirmation = input(
|
||||
f"Are you sure you want to overwrite the value for '{key}'? (y/n): "
|
||||
).strip().lower()
|
||||
return confirmation == 'y'
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Selectively vault credentials + become-password in your inventory."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--role-path", required=True, help="Path to your role"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--inventory-file", required=True, help="Host vars file to update"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--vault-password-file", required=True, help="Vault password file"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--set", nargs="*", default=[], help="Override values key.subkey=VALUE"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-f", "--force", action="store_true",
|
||||
help="Force overwrite without confirmation"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Parse overrides
|
||||
overrides = {
|
||||
k.strip(): v.strip()
|
||||
for pair in args.set for k, v in [pair.split("=", 1)]
|
||||
}
|
||||
|
||||
# Initialize inventory manager
|
||||
manager = InventoryManager(
|
||||
role_path=Path(args.role_path),
|
||||
inventory_path=Path(args.inventory_file),
|
||||
vault_pw=args.vault_password_file,
|
||||
overrides=overrides
|
||||
)
|
||||
|
||||
# Load existing credentials to preserve
|
||||
existing_apps = manager.inventory.get("applications", {})
|
||||
existing_creds = {}
|
||||
if manager.app_id in existing_apps:
|
||||
existing_creds = existing_apps[manager.app_id].get("credentials", {}).copy()
|
||||
|
||||
# Apply schema (may generate defaults)
|
||||
updated_inventory = manager.apply_schema()
|
||||
|
||||
# Restore existing database_password if present
|
||||
apps = updated_inventory.setdefault("applications", {})
|
||||
app_block = apps.setdefault(manager.app_id, {})
|
||||
creds = app_block.setdefault("credentials", {})
|
||||
if "database_password" in existing_creds:
|
||||
creds["database_password"] = existing_creds["database_password"]
|
||||
|
||||
# Store original plaintext values
|
||||
original_plain = {key: str(val) for key, val in creds.items()}
|
||||
|
||||
for key, raw_val in list(creds.items()):
|
||||
# Skip if already vaulted
|
||||
if isinstance(raw_val, VaultScalar) or str(raw_val).lstrip().startswith("$ANSIBLE_VAULT"):
|
||||
continue
|
||||
|
||||
# Determine plaintext
|
||||
plain = original_plain.get(key, "")
|
||||
if key in overrides and (args.force or ask_for_confirmation(key)):
|
||||
plain = overrides[key]
|
||||
|
||||
# Encrypt the plaintext
|
||||
encrypted = manager.vault_handler.encrypt_string(plain, key)
|
||||
lines = encrypted.splitlines()
|
||||
indent = len(lines[1]) - len(lines[1].lstrip())
|
||||
body = "\n".join(line[indent:] for line in lines[1:])
|
||||
creds[key] = VaultScalar(body)
|
||||
|
||||
# Vault top-level become password if present
|
||||
if "ansible_become_password" in updated_inventory:
|
||||
val = str(updated_inventory["ansible_become_password"])
|
||||
if val.lstrip().startswith("$ANSIBLE_VAULT"):
|
||||
updated_inventory["ansible_become_password"] = VaultScalar(val)
|
||||
else:
|
||||
snippet = manager.vault_handler.encrypt_string(
|
||||
val, "ansible_become_password"
|
||||
)
|
||||
lines = snippet.splitlines()
|
||||
indent = len(lines[1]) - len(lines[1].lstrip())
|
||||
body = "\n".join(line[indent:] for line in lines[1:])
|
||||
updated_inventory["ansible_become_password"] = VaultScalar(body)
|
||||
|
||||
# Write back to file
|
||||
with open(args.inventory_file, "w", encoding="utf-8") as f:
|
||||
yaml.dump(updated_inventory, f, sort_keys=False, Dumper=SafeDumper)
|
||||
|
||||
print(f"✅ Inventory selectively vaulted → {args.inventory_file}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@@ -1,163 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import ipaddress
|
||||
import difflib
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
# Paths to the group-vars files
|
||||
PORTS_FILE = './group_vars/all/09_ports.yml'
|
||||
NETWORKS_FILE = './group_vars/all/10_networks.yml'
|
||||
ROLE_TEMPLATE_DIR = './templates/docker_role'
|
||||
ROLES_DIR = './roles'
|
||||
|
||||
yaml = YAML()
|
||||
yaml.preserve_quotes = True
|
||||
|
||||
|
||||
def load_yaml_with_comments(path):
|
||||
with open(path) as f:
|
||||
return yaml.load(f)
|
||||
|
||||
|
||||
def dump_yaml_with_comments(data, path):
|
||||
with open(path, 'w') as f:
|
||||
yaml.dump(data, f)
|
||||
|
||||
|
||||
def get_next_network(networks_dict, prefixlen):
|
||||
"""Select the next contiguous subnet, based on the highest existing subnet + one network offset."""
|
||||
nets = []
|
||||
local = networks_dict['defaults_networks']['local']
|
||||
for name, info in local.items():
|
||||
# info is a dict with 'subnet' key
|
||||
net = ipaddress.ip_network(info['subnet'])
|
||||
if net.prefixlen == prefixlen:
|
||||
nets.append(net)
|
||||
if not nets:
|
||||
raise RuntimeError(f"No existing /{prefixlen} subnets to base allocation on.")
|
||||
nets.sort(key=lambda n: int(n.network_address))
|
||||
last = nets[-1]
|
||||
offset = last.num_addresses
|
||||
next_net = ipaddress.ip_network((int(last.network_address) + offset, prefixlen))
|
||||
return next_net
|
||||
|
||||
|
||||
def get_next_port(ports_dict, category):
|
||||
"""Assign the next port by taking the max existing plus one."""
|
||||
loc = ports_dict['ports']['localhost'][category]
|
||||
existing = [int(v) for v in loc.values()]
|
||||
return (max(existing) + 1) if existing else 1
|
||||
|
||||
|
||||
def prompt_conflict(dst_file):
|
||||
print(f"Conflict detected: {dst_file}")
|
||||
print("[1] overwrite, [2] skip, [3] merge")
|
||||
choice = None
|
||||
while choice not in ('1', '2', '3'):
|
||||
choice = input("Enter 1, 2, or 3: ").strip()
|
||||
return choice
|
||||
|
||||
|
||||
def render_templates(src_dir, dst_dir, context):
|
||||
env = Environment(loader=FileSystemLoader(src_dir), keep_trailing_newline=True, autoescape=False)
|
||||
env.filters['bool'] = lambda x: bool(x)
|
||||
|
||||
for root, _, files in os.walk(src_dir):
|
||||
rel = os.path.relpath(root, src_dir)
|
||||
target = os.path.join(dst_dir, rel)
|
||||
os.makedirs(target, exist_ok=True)
|
||||
for fn in files:
|
||||
tpl = env.get_template(os.path.join(rel, fn))
|
||||
rendered = tpl.render(**context)
|
||||
out = fn[:-3] if fn.endswith('.j2') else fn
|
||||
dst_file = os.path.join(target, out)
|
||||
|
||||
if os.path.exists(dst_file):
|
||||
choice = prompt_conflict(dst_file)
|
||||
if choice == '2':
|
||||
print(f"Skipping {dst_file}")
|
||||
continue
|
||||
if choice == '3':
|
||||
with open(dst_file) as f_old:
|
||||
old_lines = f_old.readlines()
|
||||
new_lines = rendered.splitlines(keepends=True)
|
||||
additions = [l for l in new_lines if l not in old_lines]
|
||||
if additions:
|
||||
with open(dst_file, 'a') as f:
|
||||
f.writelines(additions)
|
||||
print(f"Merged {len(additions)} lines into {dst_file}")
|
||||
else:
|
||||
print(f"No new lines to merge into {dst_file}")
|
||||
continue
|
||||
# overwrite
|
||||
print(f"Overwriting {dst_file}")
|
||||
with open(dst_file, 'w') as f:
|
||||
f.write(rendered)
|
||||
else:
|
||||
# create new file
|
||||
with open(dst_file, 'w') as f:
|
||||
f.write(rendered)
|
||||
|
||||
|
||||
def main():
|
||||
# Load dynamic port categories
|
||||
ports_data = load_yaml_with_comments(PORTS_FILE)
|
||||
categories = list(ports_data['ports']['localhost'].keys())
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Create or update a Docker Ansible role, and globally assign network and ports with comments preserved"
|
||||
)
|
||||
parser.add_argument('-a', '--application-id', required=True, help="Unique application ID")
|
||||
parser.add_argument('-n', '--network', choices=['24', '28'], required=True, help="Network prefix length (/24 or /28)")
|
||||
parser.add_argument('-p', '--ports', nargs='+', choices=categories, required=True, help=f"Port categories to assign (allowed: {', '.join(categories)})")
|
||||
args = parser.parse_args()
|
||||
|
||||
app = args.application_id
|
||||
role = f"docker-{app}"
|
||||
role_dir = os.path.join(ROLES_DIR, role)
|
||||
|
||||
if os.path.exists(role_dir):
|
||||
if input(f"Role {role} exists. Continue? [y/N]: ").strip().lower() != 'y':
|
||||
print("Aborting.")
|
||||
sys.exit(1)
|
||||
else:
|
||||
os.makedirs(role_dir)
|
||||
|
||||
# 1) Render all templates with conflict handling
|
||||
render_templates(ROLE_TEMPLATE_DIR, role_dir, {'application_id': app, 'role_name': role, 'database_type': 0})
|
||||
print(f"→ Templates applied to {role_dir}")
|
||||
|
||||
# 2) Update global networks file, preserving comments
|
||||
networks = load_yaml_with_comments(NETWORKS_FILE)
|
||||
prefix = int(args.network)
|
||||
new_net = get_next_network(networks, prefix)
|
||||
networks['defaults_networks']['local'][app] = {'subnet': str(new_net)}
|
||||
shutil.copy(NETWORKS_FILE, NETWORKS_FILE + '.bak')
|
||||
dump_yaml_with_comments(networks, NETWORKS_FILE)
|
||||
print(f"→ Assigned network {new_net} in {NETWORKS_FILE}")
|
||||
|
||||
# 3) Update global ports file, preserving comments
|
||||
ports_data = load_yaml_with_comments(PORTS_FILE)
|
||||
assigned = {}
|
||||
for cat in args.ports:
|
||||
loc = ports_data['ports']['localhost'].setdefault(cat, {})
|
||||
if app in loc:
|
||||
print(f"→ Existing port for {cat} and {app}: {loc[app]}, skipping.")
|
||||
else:
|
||||
pnum = get_next_port(ports_data, cat)
|
||||
loc[app] = pnum
|
||||
assigned[cat] = pnum
|
||||
|
||||
if assigned:
|
||||
shutil.copy(PORTS_FILE, PORTS_FILE + '.bak')
|
||||
dump_yaml_with_comments(ports_data, PORTS_FILE)
|
||||
print(f"→ Assigned ports {assigned} in {PORTS_FILE}")
|
||||
else:
|
||||
print("→ No new ports assigned.")
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
153
cli/deploy.py
153
cli/deploy.py
@@ -6,29 +6,72 @@ import os
|
||||
import datetime
|
||||
import sys
|
||||
|
||||
|
||||
def run_ansible_playbook(inventory, playbook, modes, limit=None, allowed_applications=None, password_file=None, verbose=0, skip_tests=False):
|
||||
def run_ansible_playbook(
|
||||
inventory,
|
||||
modes,
|
||||
limit=None,
|
||||
allowed_applications=None,
|
||||
password_file=None,
|
||||
verbose=0,
|
||||
skip_tests=False,
|
||||
skip_validation=False,
|
||||
skip_build=False,
|
||||
cleanup=False,
|
||||
logs=False
|
||||
):
|
||||
start_time = datetime.datetime.now()
|
||||
print(f"\n▶️ Script started at: {start_time.isoformat()}\n")
|
||||
|
||||
print("\n🛠️ Building project (make build)...\n")
|
||||
subprocess.run(["make", "build"], check=True)
|
||||
if cleanup:
|
||||
cleanup_command = ["make", "clean-keep-logs"] if logs else ["make", "clean"]
|
||||
print("\n🧹 Cleaning up project (" + " ".join(cleanup_command) +")...\n")
|
||||
subprocess.run(cleanup_command, check=True)
|
||||
else:
|
||||
print("\n⚠️ Skipping build as requested.\n")
|
||||
|
||||
if not skip_build:
|
||||
print("\n🛠️ Building project (make messy-build)...\n")
|
||||
subprocess.run(["make", "messy-build"], check=True)
|
||||
else:
|
||||
print("\n⚠️ Skipping build as requested.\n")
|
||||
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
playbook = os.path.join(os.path.dirname(script_dir), "playbook.yml")
|
||||
|
||||
# Inventory validation step
|
||||
if not skip_validation:
|
||||
print("\n🔍 Validating inventory before deployment...\n")
|
||||
try:
|
||||
subprocess.run(
|
||||
[sys.executable,
|
||||
os.path.join(script_dir, "validate/inventory.py"),
|
||||
os.path.dirname(inventory)
|
||||
],
|
||||
check=True
|
||||
)
|
||||
except subprocess.CalledProcessError:
|
||||
print(
|
||||
"\n❌ Inventory validation failed. Deployment aborted.\n",
|
||||
file=sys.stderr
|
||||
)
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("\n⚠️ Skipping inventory validation as requested.\n")
|
||||
|
||||
if not skip_tests:
|
||||
print("\n🧪 Running tests (make test)...\n")
|
||||
subprocess.run(["make", "test"], check=True)
|
||||
print("\n🧪 Running tests (make messy-test)...\n")
|
||||
subprocess.run(["make", "messy-test"], check=True)
|
||||
|
||||
# Build ansible-playbook command
|
||||
cmd = ["ansible-playbook", "-i", inventory, playbook]
|
||||
|
||||
if limit:
|
||||
cmd.extend(["--limit", limit])
|
||||
|
||||
# Pass application IDs parameter as extra var if provided
|
||||
if allowed_applications:
|
||||
joined = ",".join(allowed_applications)
|
||||
cmd.extend(["-e", f"allowed_applications={joined}"])
|
||||
|
||||
# Pass other mode flags
|
||||
for key, value in modes.items():
|
||||
val = str(value).lower() if isinstance(value, bool) else str(value)
|
||||
cmd.extend(["-e", f"{key}={val}"])
|
||||
@@ -50,9 +93,26 @@ def run_ansible_playbook(inventory, playbook, modes, limit=None, allowed_applica
|
||||
duration = end_time - start_time
|
||||
print(f"⏱️ Total execution time: {duration}\n")
|
||||
|
||||
def validate_application_ids(inventory, app_ids):
|
||||
"""
|
||||
Abort the script if any application IDs are invalid, with detailed reasons.
|
||||
"""
|
||||
from module_utils.valid_deploy_id import ValidDeployId
|
||||
validator = ValidDeployId()
|
||||
invalid = validator.validate(inventory, app_ids)
|
||||
if invalid:
|
||||
print("\n❌ Detected invalid application_id(s):\n")
|
||||
for app_id, status in invalid.items():
|
||||
reasons = []
|
||||
if not status['in_roles']:
|
||||
reasons.append("not defined in roles (infinito)")
|
||||
if not status['in_inventory']:
|
||||
reasons.append("not found in inventory file")
|
||||
print(f" - {app_id}: " + ", ".join(reasons))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def main():
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Run the central Ansible deployment script to manage infrastructure, updates, and tests."
|
||||
)
|
||||
@@ -62,97 +122,100 @@ def main():
|
||||
help="Path to the inventory file (INI or YAML) containing hosts and variables."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--limit",
|
||||
"-l", "--limit",
|
||||
help="Restrict execution to a specific host or host group from the inventory."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--host-type",
|
||||
choices=["server", "personal-computer"],
|
||||
"-T", "--host-type",
|
||||
choices=["server", "desktop"],
|
||||
default="server",
|
||||
help="Specify whether the target is a server or a personal computer. Affects role selection and variables."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--reset", action="store_true",
|
||||
help="Reset all CyMaIS files and configurations, and run the entire playbook (not just individual roles)."
|
||||
"-r", "--reset", action="store_true",
|
||||
help="Reset all Infinito.Nexus files and configurations, and run the entire playbook (not just individual roles)."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--test", action="store_true",
|
||||
"-t", "--test", action="store_true",
|
||||
help="Run test routines instead of production tasks. Useful for local testing and CI pipelines."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--update", action="store_true",
|
||||
"-u", "--update", action="store_true",
|
||||
help="Enable the update procedure to bring software and roles up to date."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--backup", action="store_true",
|
||||
"-b", "--backup", action="store_true",
|
||||
help="Perform a full backup of critical data and configurations before the update process."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--cleanup", action="store_true",
|
||||
help="Clean up unused files and outdated configurations after all tasks are complete."
|
||||
"-c", "--cleanup", action="store_true",
|
||||
help="Clean up unused files and outdated configurations after all tasks are complete. Also cleans up the repository before the deployment procedure."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--debug", action="store_true",
|
||||
"-d", "--debug", action="store_true",
|
||||
help="Enable detailed debug output for Ansible and this script."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--password-file",
|
||||
"-p", "--password-file",
|
||||
help="Path to the file containing the Vault password. If not provided, prompts for the password interactively."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--skip-tests", action="store_true",
|
||||
"-s", "--skip-tests", action="store_true",
|
||||
help="Skip running 'make test' even if tests are normally enabled."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--skip-validation", action="store_true",
|
||||
"-V", "--skip-validation", action="store_true",
|
||||
help="Skip inventory validation before deployment."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--id",
|
||||
"-B", "--skip-build", action="store_true",
|
||||
help="Skip running 'make build' before deployment."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-i", "--id",
|
||||
nargs="+",
|
||||
default=[],
|
||||
dest="id",
|
||||
help="List of application_id's for partial deploy. If not set, all application IDs defined in the inventory will be executed."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-v", "--verbose", action="count", default=0,
|
||||
help="Increase verbosity level. Multiple -v flags increase detail (e.g., -vvv for maximum log output)."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--logs", action="store_true",
|
||||
help="Keep the CLI logs during cleanup command"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.skip_validation:
|
||||
print("\n🔍 Validating inventory before deployment...\n")
|
||||
try:
|
||||
subprocess.run(
|
||||
[sys.executable, os.path.join(script_dir, "validate_inventory.py"), os.path.dirname(args.inventory)],
|
||||
check=True
|
||||
)
|
||||
except subprocess.CalledProcessError:
|
||||
print("\n❌ Inventory validation failed. Deployment aborted.\n", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
validate_application_ids(args.inventory, args.id)
|
||||
|
||||
modes = {
|
||||
"mode_reset": args.reset,
|
||||
"mode_test": args.test,
|
||||
"mode_update": args.update,
|
||||
"mode_backup": args.backup,
|
||||
"mode_cleanup": args.cleanup,
|
||||
"enable_debug": args.debug,
|
||||
"MODE_RESET": args.reset,
|
||||
"MODE_TEST": args.test,
|
||||
"MODE_UPDATE": args.update,
|
||||
"MODE_BACKUP": args.backup,
|
||||
"MODE_CLEANUP": args.cleanup,
|
||||
"MODE_LOGS": args.logs,
|
||||
"MODE_DEBUG": args.debug,
|
||||
"MODE_ASSERT": not args.skip_validation,
|
||||
"host_type": args.host_type
|
||||
}
|
||||
|
||||
playbook_file = os.path.join(os.path.dirname(script_dir), "playbook.yml")
|
||||
|
||||
run_ansible_playbook(
|
||||
inventory=args.inventory,
|
||||
playbook=playbook_file,
|
||||
modes=modes,
|
||||
limit=args.limit,
|
||||
allowed_applications=args.id,
|
||||
password_file=args.password_file,
|
||||
verbose=args.verbose,
|
||||
skip_tests=args.skip_tests
|
||||
skip_tests=args.skip_tests,
|
||||
skip_validation=args.skip_validation,
|
||||
skip_build=args.skip_build,
|
||||
cleanup=args.cleanup,
|
||||
logs=args.logs
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
66
cli/encrypt/inventory.py
Normal file
66
cli/encrypt/inventory.py
Normal file
@@ -0,0 +1,66 @@
|
||||
import argparse
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
import yaml
|
||||
from typing import Dict, Any
|
||||
from module_utils.handler.vault import VaultHandler, VaultScalar
|
||||
from module_utils.handler.yaml import YamlHandler
|
||||
from yaml.dumper import SafeDumper
|
||||
|
||||
def ask_for_confirmation(key: str) -> bool:
|
||||
"""Prompt the user for confirmation to overwrite an existing value."""
|
||||
confirmation = input(f"Do you want to encrypt the value for '{key}'? (y/n): ").strip().lower()
|
||||
return confirmation == 'y'
|
||||
|
||||
|
||||
def encrypt_recursively(data: Any, vault_handler: VaultHandler, ask_confirmation: bool = True, prefix: str = "") -> Any:
|
||||
"""Recursively encrypt values in the data."""
|
||||
if isinstance(data, dict):
|
||||
for key, value in data.items():
|
||||
new_prefix = f"{prefix}.{key}" if prefix else key
|
||||
data[key] = encrypt_recursively(value, vault_handler, ask_confirmation, new_prefix)
|
||||
elif isinstance(data, list):
|
||||
for i, item in enumerate(data):
|
||||
data[i] = encrypt_recursively(item, vault_handler, ask_confirmation, prefix)
|
||||
elif isinstance(data, str):
|
||||
# Only encrypt if it's not already vaulted
|
||||
if not data.lstrip().startswith("$ANSIBLE_VAULT"):
|
||||
if ask_confirmation:
|
||||
# Ask for confirmation before encrypting if not `--all`
|
||||
if not ask_for_confirmation(prefix):
|
||||
print(f"Skipping encryption for '{prefix}'.")
|
||||
return data
|
||||
encrypted_value = vault_handler.encrypt_string(data, prefix)
|
||||
lines = encrypted_value.splitlines()
|
||||
indent = len(lines[1]) - len(lines[1].lstrip())
|
||||
body = "\n".join(line[indent:] for line in lines[1:])
|
||||
return VaultScalar(body) # Store encrypted value as VaultScalar
|
||||
return data
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Encrypt all fields, ask for confirmation unless --all is specified."
|
||||
)
|
||||
parser.add_argument("--inventory-file", required=True, help="Host vars file to update")
|
||||
parser.add_argument("--vault-password-file", required=True, help="Vault password file")
|
||||
parser.add_argument("--all", action="store_true", help="Encrypt all fields without confirmation")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Initialize the VaultHandler and load the inventory
|
||||
vault_handler = VaultHandler(vault_password_file=args.vault_password_file)
|
||||
updated_inventory = YamlHandler.load_yaml(Path(args.inventory_file))
|
||||
|
||||
# 1) Encrypt all fields recursively
|
||||
updated_inventory = encrypt_recursively(updated_inventory, vault_handler, ask_confirmation=not args.all)
|
||||
|
||||
# 2) Save the updated inventory to file
|
||||
with open(args.inventory_file, "w", encoding="utf-8") as f:
|
||||
yaml.dump(updated_inventory, f, sort_keys=False, Dumper=SafeDumper)
|
||||
|
||||
print(f"✅ Inventory selectively vaulted → {args.inventory_file}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@@ -1,66 +0,0 @@
|
||||
import argparse
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
import yaml
|
||||
from typing import Dict, Any
|
||||
from utils.handler.vault import VaultHandler, VaultScalar
|
||||
from utils.handler.yaml import YamlHandler
|
||||
from yaml.dumper import SafeDumper
|
||||
|
||||
def ask_for_confirmation(key: str) -> bool:
|
||||
"""Prompt the user for confirmation to overwrite an existing value."""
|
||||
confirmation = input(f"Do you want to encrypt the value for '{key}'? (y/n): ").strip().lower()
|
||||
return confirmation == 'y'
|
||||
|
||||
|
||||
def encrypt_recursively(data: Any, vault_handler: VaultHandler, ask_confirmation: bool = True, prefix: str = "") -> Any:
|
||||
"""Recursively encrypt values in the data."""
|
||||
if isinstance(data, dict):
|
||||
for key, value in data.items():
|
||||
new_prefix = f"{prefix}.{key}" if prefix else key
|
||||
data[key] = encrypt_recursively(value, vault_handler, ask_confirmation, new_prefix)
|
||||
elif isinstance(data, list):
|
||||
for i, item in enumerate(data):
|
||||
data[i] = encrypt_recursively(item, vault_handler, ask_confirmation, prefix)
|
||||
elif isinstance(data, str):
|
||||
# Only encrypt if it's not already vaulted
|
||||
if not data.lstrip().startswith("$ANSIBLE_VAULT"):
|
||||
if ask_confirmation:
|
||||
# Ask for confirmation before encrypting if not `--all`
|
||||
if not ask_for_confirmation(prefix):
|
||||
print(f"Skipping encryption for '{prefix}'.")
|
||||
return data
|
||||
encrypted_value = vault_handler.encrypt_string(data, prefix)
|
||||
lines = encrypted_value.splitlines()
|
||||
indent = len(lines[1]) - len(lines[1].lstrip())
|
||||
body = "\n".join(line[indent:] for line in lines[1:])
|
||||
return VaultScalar(body) # Store encrypted value as VaultScalar
|
||||
return data
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Encrypt all fields, ask for confirmation unless --all is specified."
|
||||
)
|
||||
parser.add_argument("--inventory-file", required=True, help="Host vars file to update")
|
||||
parser.add_argument("--vault-password-file", required=True, help="Vault password file")
|
||||
parser.add_argument("--all", action="store_true", help="Encrypt all fields without confirmation")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Initialize the VaultHandler and load the inventory
|
||||
vault_handler = VaultHandler(vault_password_file=args.vault_password_file)
|
||||
updated_inventory = YamlHandler.load_yaml(Path(args.inventory_file))
|
||||
|
||||
# 1) Encrypt all fields recursively
|
||||
updated_inventory = encrypt_recursively(updated_inventory, vault_handler, ask_confirmation=not args.all)
|
||||
|
||||
# 2) Save the updated inventory to file
|
||||
with open(args.inventory_file, "w", encoding="utf-8") as f:
|
||||
yaml.dump(updated_inventory, f, sort_keys=False, Dumper=SafeDumper)
|
||||
|
||||
print(f"✅ Inventory selectively vaulted → {args.inventory_file}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
47
cli/fix/ini_py.py
Normal file
47
cli/fix/ini_py.py
Normal file
@@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
This script creates __init__.py files in every subdirectory under the specified
|
||||
folder relative to the project root.
|
||||
"""
|
||||
|
||||
import os
|
||||
import argparse
|
||||
|
||||
|
||||
def create_init_files(root_folder):
|
||||
"""
|
||||
Walk through all subdirectories of root_folder and create an __init__.py file
|
||||
in each directory if it doesn't already exist.
|
||||
"""
|
||||
for dirpath, dirnames, filenames in os.walk(root_folder):
|
||||
init_file = os.path.join(dirpath, '__init__.py')
|
||||
if not os.path.exists(init_file):
|
||||
open(init_file, 'w').close()
|
||||
print(f"Created: {init_file}")
|
||||
else:
|
||||
print(f"Skipped (already exists): {init_file}")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Create __init__.py files in every subdirectory.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'folder',
|
||||
help='Relative path to the target folder (e.g., cli/fix)'
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Determine the absolute path based on the current working directory
|
||||
root_folder = os.path.abspath(args.folder)
|
||||
|
||||
if not os.path.isdir(root_folder):
|
||||
print(f"Error: The folder '{args.folder}' does not exist or is not a directory.")
|
||||
exit(1)
|
||||
|
||||
create_init_files(root_folder)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
480
cli/fix/move_unnecessary_dependencies.py
Normal file
480
cli/fix/move_unnecessary_dependencies.py
Normal file
@@ -0,0 +1,480 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Move unnecessary meta dependencies to guarded include_role/import_role
|
||||
for better performance, while preserving YAML comments, quotes, and layout.
|
||||
|
||||
Heuristic (matches tests/integration/test_unnecessary_role_dependencies.py):
|
||||
- A dependency is considered UNNECESSARY if:
|
||||
* The consumer does NOT use provider variables in defaults/vars/handlers
|
||||
(no early-var need), AND
|
||||
* In tasks, any usage of provider vars or provider-handler notifications
|
||||
occurs only AFTER an include/import of the provider in the same file,
|
||||
OR there is no usage at all.
|
||||
|
||||
Action:
|
||||
- Remove such dependencies from roles/<role>/meta/main.yml.
|
||||
- Prepend a guarded include block to roles/<role>/tasks/01_core.yml (preferred)
|
||||
or roles/<role>/tasks/main.yml if 01_core.yml is absent.
|
||||
- If multiple dependencies are moved for a role, use a loop over include_role.
|
||||
|
||||
Notes:
|
||||
- Creates .bak backups for modified YAML files.
|
||||
- Requires ruamel.yaml to preserve comments/quotes everywhere.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import glob
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
from typing import Dict, Set, List, Tuple, Optional
|
||||
|
||||
# --- Require ruamel.yaml for full round-trip preservation ---
|
||||
try:
|
||||
from ruamel.yaml import YAML
|
||||
from ruamel.yaml.comments import CommentedMap, CommentedSeq
|
||||
from ruamel.yaml.scalarstring import SingleQuotedScalarString
|
||||
_HAVE_RUAMEL = True
|
||||
except Exception:
|
||||
_HAVE_RUAMEL = False
|
||||
|
||||
if not _HAVE_RUAMEL:
|
||||
print("[ERR] ruamel.yaml is required to preserve comments/quotes. Install with: pip install ruamel.yaml", file=sys.stderr)
|
||||
sys.exit(3)
|
||||
|
||||
yaml_rt = YAML()
|
||||
yaml_rt.preserve_quotes = True
|
||||
yaml_rt.width = 10**9 # prevent line wrapping
|
||||
|
||||
# ---------------- Utilities ----------------
|
||||
|
||||
def _backup(path: str):
|
||||
if os.path.exists(path):
|
||||
shutil.copy2(path, path + ".bak")
|
||||
|
||||
def read_text(path: str) -> str:
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
return f.read()
|
||||
except Exception:
|
||||
return ""
|
||||
|
||||
def load_yaml_rt(path: str):
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
data = yaml_rt.load(f)
|
||||
return data if data is not None else CommentedMap()
|
||||
except FileNotFoundError:
|
||||
return CommentedMap()
|
||||
except Exception as e:
|
||||
print(f"[WARN] Failed to parse YAML: {path}: {e}", file=sys.stderr)
|
||||
return CommentedMap()
|
||||
|
||||
def dump_yaml_rt(data, path: str):
|
||||
_backup(path)
|
||||
with open(path, "w", encoding="utf-8") as f:
|
||||
yaml_rt.dump(data, f)
|
||||
|
||||
def roles_root(project_root: str) -> str:
|
||||
return os.path.join(project_root, "roles")
|
||||
|
||||
def iter_role_dirs(project_root: str) -> List[str]:
|
||||
root = roles_root(project_root)
|
||||
return [d for d in glob.glob(os.path.join(root, "*")) if os.path.isdir(d)]
|
||||
|
||||
def role_name_from_dir(role_dir: str) -> str:
|
||||
return os.path.basename(role_dir.rstrip(os.sep))
|
||||
|
||||
def path_if_exists(*parts) -> Optional[str]:
|
||||
p = os.path.join(*parts)
|
||||
return p if os.path.exists(p) else None
|
||||
|
||||
def gather_yaml_files(base: str, patterns: List[str]) -> List[str]:
|
||||
files: List[str] = []
|
||||
for pat in patterns:
|
||||
files.extend(glob.glob(os.path.join(base, pat), recursive=True))
|
||||
return [f for f in files if os.path.isfile(f)]
|
||||
|
||||
def sq(v: str):
|
||||
"""Return a single-quoted scalar (ruamel) for consistent quoting."""
|
||||
return SingleQuotedScalarString(v)
|
||||
|
||||
# ---------------- Providers: vars & handlers ----------------
|
||||
|
||||
def flatten_keys(data) -> Set[str]:
|
||||
out: Set[str] = set()
|
||||
if isinstance(data, dict):
|
||||
for k, v in data.items():
|
||||
if isinstance(k, str):
|
||||
out.add(k)
|
||||
out |= flatten_keys(v)
|
||||
elif isinstance(data, list):
|
||||
for item in data:
|
||||
out |= flatten_keys(item)
|
||||
return out
|
||||
|
||||
def collect_role_defined_vars(role_dir: str) -> Set[str]:
|
||||
"""Vars a role 'provides': defaults/vars keys + set_fact keys in tasks."""
|
||||
provided: Set[str] = set()
|
||||
|
||||
for rel in ("defaults/main.yml", "vars/main.yml"):
|
||||
p = path_if_exists(role_dir, rel)
|
||||
if p:
|
||||
data = load_yaml_rt(p)
|
||||
provided |= flatten_keys(data)
|
||||
|
||||
# set_fact keys
|
||||
task_files = gather_yaml_files(os.path.join(role_dir, "tasks"), ["**/*.yml", "*.yml"])
|
||||
for tf in task_files:
|
||||
data = load_yaml_rt(tf)
|
||||
if isinstance(data, list):
|
||||
for task in data:
|
||||
if isinstance(task, dict) and "set_fact" in task and isinstance(task["set_fact"], dict):
|
||||
provided |= set(task["set_fact"].keys())
|
||||
|
||||
noisy = {"when", "name", "vars", "tags", "register"}
|
||||
return {v for v in provided if isinstance(v, str) and v and v not in noisy}
|
||||
|
||||
def collect_role_handler_names(role_dir: str) -> Set[str]:
|
||||
"""Handler names defined by a role (for notify detection)."""
|
||||
handler_file = path_if_exists(role_dir, "handlers/main.yml")
|
||||
if not handler_file:
|
||||
return set()
|
||||
data = load_yaml_rt(handler_file)
|
||||
names: Set[str] = set()
|
||||
if isinstance(data, list):
|
||||
for task in data:
|
||||
if isinstance(task, dict):
|
||||
nm = task.get("name")
|
||||
if isinstance(nm, str) and nm.strip():
|
||||
names.add(nm.strip())
|
||||
return names
|
||||
|
||||
# ---------------- Consumers: usage scanning ----------------
|
||||
|
||||
def find_var_positions(text: str, varname: str) -> List[int]:
|
||||
"""Return byte offsets for occurrences of varname (word-ish boundary)."""
|
||||
positions: List[int] = []
|
||||
if not varname:
|
||||
return positions
|
||||
pattern = re.compile(rf"(?<!\w){re.escape(varname)}(?!\w)")
|
||||
for m in pattern.finditer(text):
|
||||
positions.append(m.start())
|
||||
return positions
|
||||
|
||||
def first_var_use_offset_in_text(text: str, provided_vars: Set[str]) -> Optional[int]:
|
||||
first: Optional[int] = None
|
||||
for v in provided_vars:
|
||||
for off in find_var_positions(text, v):
|
||||
if first is None or off < first:
|
||||
first = off
|
||||
return first
|
||||
|
||||
def first_include_offset_for_role(text: str, producer_role: str) -> Optional[int]:
|
||||
"""
|
||||
Find earliest include/import of a given role in this YAML text.
|
||||
Handles compact dict and block styles.
|
||||
"""
|
||||
pattern = re.compile(
|
||||
r"(include_role|import_role)\s*:\s*\{[^}]*\bname\s*:\s*['\"]?"
|
||||
+ re.escape(producer_role) + r"['\"]?[^}]*\}"
|
||||
r"|"
|
||||
r"(include_role|import_role)\s*:\s*\n(?:\s+[a-z_]+\s*:\s*.*\n)*\s*name\s*:\s*['\"]?"
|
||||
+ re.escape(producer_role) + r"['\"]?",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
m = pattern.search(text)
|
||||
return m.start() if m else None
|
||||
|
||||
def find_notify_offsets_for_handlers(text: str, handler_names: Set[str]) -> List[int]:
|
||||
"""
|
||||
Heuristic: for each handler name, find occurrences where 'notify' appears within
|
||||
the preceding ~200 chars. Works for single string or list-style notify blocks.
|
||||
"""
|
||||
if not handler_names:
|
||||
return []
|
||||
offsets: List[int] = []
|
||||
for h in handler_names:
|
||||
for m in re.finditer(re.escape(h), text):
|
||||
start = m.start()
|
||||
back = max(0, start - 200)
|
||||
context = text[back:start]
|
||||
if re.search(r"notify\s*:", context):
|
||||
offsets.append(start)
|
||||
return sorted(offsets)
|
||||
|
||||
def parse_meta_dependencies(role_dir: str) -> List[str]:
|
||||
meta = path_if_exists(role_dir, "meta/main.yml")
|
||||
if not meta:
|
||||
return []
|
||||
data = load_yaml_rt(meta)
|
||||
dd = data.get("dependencies")
|
||||
deps: List[str] = []
|
||||
if isinstance(dd, list):
|
||||
for item in dd:
|
||||
if isinstance(item, str):
|
||||
deps.append(item)
|
||||
elif isinstance(item, dict) and "role" in item:
|
||||
deps.append(str(item["role"]))
|
||||
elif isinstance(item, dict) and "name" in item:
|
||||
deps.append(str(item["name"]))
|
||||
return deps
|
||||
|
||||
# ---------------- Fix application ----------------
|
||||
|
||||
def sanitize_run_once_var(role_name: str) -> str:
|
||||
"""
|
||||
Generate run_once variable name from role name.
|
||||
Example: 'sys-front-inj-logout' -> 'run_once_sys_front_inj_logout'
|
||||
"""
|
||||
return "run_once_" + role_name.replace("-", "_")
|
||||
|
||||
def build_include_block_yaml(consumer_role: str, moved_deps: List[str]) -> List[dict]:
|
||||
"""
|
||||
Build a guarded block that includes one or many roles.
|
||||
This block will be prepended to tasks/01_core.yml or tasks/main.yml.
|
||||
"""
|
||||
guard_var = sanitize_run_once_var(consumer_role)
|
||||
|
||||
if len(moved_deps) == 1:
|
||||
inner_tasks = [
|
||||
{
|
||||
"name": f"Include dependency '{moved_deps[0]}'",
|
||||
"include_role": {"name": moved_deps[0]},
|
||||
}
|
||||
]
|
||||
else:
|
||||
inner_tasks = [
|
||||
{
|
||||
"name": "Include dependencies",
|
||||
"include_role": {"name": "{{ item }}"},
|
||||
"loop": moved_deps,
|
||||
}
|
||||
]
|
||||
|
||||
# Always set the run_once fact at the end
|
||||
inner_tasks.append({"set_fact": {guard_var: True}})
|
||||
|
||||
# Correct Ansible block structure
|
||||
block_task = {
|
||||
"name": "Load former meta dependencies once",
|
||||
"block": inner_tasks,
|
||||
"when": f"{guard_var} is not defined",
|
||||
}
|
||||
|
||||
return [block_task]
|
||||
|
||||
def prepend_tasks(tasks_path: str, new_tasks, dry_run: bool):
|
||||
"""
|
||||
Prepend new_tasks (CommentedSeq) to an existing tasks YAML list while preserving comments.
|
||||
If the file does not exist, create it with new_tasks.
|
||||
"""
|
||||
if os.path.exists(tasks_path):
|
||||
existing = load_yaml_rt(tasks_path)
|
||||
if isinstance(existing, list):
|
||||
combined = CommentedSeq()
|
||||
for item in new_tasks:
|
||||
combined.append(item)
|
||||
for item in existing:
|
||||
combined.append(item)
|
||||
elif isinstance(existing, dict):
|
||||
# Rare case: tasks file with a single mapping; coerce to list
|
||||
combined = CommentedSeq()
|
||||
for item in new_tasks:
|
||||
combined.append(item)
|
||||
combined.append(existing)
|
||||
else:
|
||||
combined = new_tasks
|
||||
else:
|
||||
os.makedirs(os.path.dirname(tasks_path), exist_ok=True)
|
||||
combined = new_tasks
|
||||
|
||||
if dry_run:
|
||||
print(f"[DRY-RUN] Would write {tasks_path} with {len(new_tasks)} prepended task(s).")
|
||||
return
|
||||
|
||||
dump_yaml_rt(combined, tasks_path)
|
||||
print(f"[OK] Updated {tasks_path} (prepended {len(new_tasks)} task(s)).")
|
||||
|
||||
def update_meta_remove_deps(meta_path: str, remove: List[str], dry_run: bool):
|
||||
"""
|
||||
Remove entries from meta.dependencies while leaving the rest of the file intact.
|
||||
Quotes, comments, key order, and line breaks are preserved.
|
||||
Returns True if a change would be made (or was made when not in dry-run).
|
||||
"""
|
||||
if not os.path.exists(meta_path):
|
||||
return False
|
||||
|
||||
doc = load_yaml_rt(meta_path)
|
||||
deps = doc.get("dependencies")
|
||||
if not isinstance(deps, list):
|
||||
return False
|
||||
|
||||
def dep_name(item):
|
||||
if isinstance(item, dict):
|
||||
return item.get("role") or item.get("name")
|
||||
return item
|
||||
|
||||
keep = CommentedSeq()
|
||||
removed = []
|
||||
for item in deps:
|
||||
name = dep_name(item)
|
||||
if name in remove:
|
||||
removed.append(name)
|
||||
else:
|
||||
keep.append(item)
|
||||
|
||||
if not removed:
|
||||
return False
|
||||
|
||||
if keep:
|
||||
doc["dependencies"] = keep
|
||||
else:
|
||||
if "dependencies" in doc:
|
||||
del doc["dependencies"]
|
||||
|
||||
if dry_run:
|
||||
print(f"[DRY-RUN] Would rewrite {meta_path}; removed: {', '.join(removed)}")
|
||||
return True
|
||||
|
||||
dump_yaml_rt(doc, meta_path)
|
||||
print(f"[OK] Rewrote {meta_path}; removed: {', '.join(removed)}")
|
||||
return True
|
||||
|
||||
def dependency_is_unnecessary(consumer_dir: str,
|
||||
consumer_name: str,
|
||||
producer_name: str,
|
||||
provider_vars: Set[str],
|
||||
provider_handlers: Set[str]) -> bool:
|
||||
"""Apply heuristic to decide if we can move this dependency."""
|
||||
# 1) Early usage in defaults/vars/handlers? If yes -> necessary
|
||||
defaults_files = [p for p in [
|
||||
path_if_exists(consumer_dir, "defaults/main.yml"),
|
||||
path_if_exists(consumer_dir, "vars/main.yml"),
|
||||
path_if_exists(consumer_dir, "handlers/main.yml"),
|
||||
] if p]
|
||||
for p in defaults_files:
|
||||
text = read_text(p)
|
||||
if first_var_use_offset_in_text(text, provider_vars) is not None:
|
||||
return False # needs meta dep
|
||||
|
||||
# 2) Tasks: any usage before include/import? If yes -> keep meta dep
|
||||
task_files = gather_yaml_files(os.path.join(consumer_dir, "tasks"), ["**/*.yml", "*.yml"])
|
||||
for p in task_files:
|
||||
text = read_text(p)
|
||||
if not text:
|
||||
continue
|
||||
include_off = first_include_offset_for_role(text, producer_name)
|
||||
var_use_off = first_var_use_offset_in_text(text, provider_vars)
|
||||
notify_offs = find_notify_offsets_for_handlers(text, provider_handlers)
|
||||
|
||||
if var_use_off is not None:
|
||||
if include_off is None or include_off > var_use_off:
|
||||
return False # used before include
|
||||
|
||||
for noff in notify_offs:
|
||||
if include_off is None or include_off > noff:
|
||||
return False # notify before include
|
||||
|
||||
# If we get here: no early use, and either no usage at all or usage after include
|
||||
return True
|
||||
|
||||
def process_role(role_dir: str,
|
||||
providers_index: Dict[str, Tuple[Set[str], Set[str]]],
|
||||
only_role: Optional[str],
|
||||
dry_run: bool) -> bool:
|
||||
"""
|
||||
Returns True if any change suggested/made for this role.
|
||||
"""
|
||||
consumer_name = role_name_from_dir(role_dir)
|
||||
if only_role and only_role != consumer_name:
|
||||
return False
|
||||
|
||||
meta_deps = parse_meta_dependencies(role_dir)
|
||||
if not meta_deps:
|
||||
return False
|
||||
|
||||
# Build provider vars/handlers accessors
|
||||
moved: List[str] = []
|
||||
for producer in meta_deps:
|
||||
# Only consider local roles we can analyze
|
||||
producer_dir = path_if_exists(os.path.dirname(role_dir), producer) or path_if_exists(os.path.dirname(roles_root(os.path.dirname(role_dir))), "roles", producer)
|
||||
if producer not in providers_index:
|
||||
# Unknown/external role → skip (we cannot verify safety)
|
||||
continue
|
||||
pvars, phandlers = providers_index[producer]
|
||||
if dependency_is_unnecessary(role_dir, consumer_name, producer, pvars, phandlers):
|
||||
moved.append(producer)
|
||||
|
||||
if not moved:
|
||||
return False
|
||||
|
||||
# 1) Remove from meta
|
||||
meta_path = os.path.join(role_dir, "meta", "main.yml")
|
||||
update_meta_remove_deps(meta_path, moved, dry_run=dry_run)
|
||||
|
||||
# 2) Prepend include block to tasks/01_core.yml or tasks/main.yml
|
||||
target_tasks = path_if_exists(role_dir, "tasks/01_core.yml")
|
||||
if not target_tasks:
|
||||
target_tasks = os.path.join(role_dir, "tasks", "main.yml")
|
||||
include_block = build_include_block_yaml(consumer_name, moved)
|
||||
prepend_tasks(target_tasks, include_block, dry_run=dry_run)
|
||||
return True
|
||||
|
||||
def build_providers_index(all_roles: List[str]) -> Dict[str, Tuple[Set[str], Set[str]]]:
|
||||
"""
|
||||
Map role_name -> (provided_vars, handler_names)
|
||||
"""
|
||||
index: Dict[str, Tuple[Set[str], Set[str]]] = {}
|
||||
for rd in all_roles:
|
||||
rn = role_name_from_dir(rd)
|
||||
index[rn] = (collect_role_defined_vars(rd), collect_role_handler_names(rd))
|
||||
return index
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Move unnecessary meta dependencies to guarded include_role for performance (preserve comments/quotes)."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--project-root",
|
||||
default=os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")),
|
||||
help="Path to project root (default: two levels up from this script).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--role",
|
||||
dest="only_role",
|
||||
default=None,
|
||||
help="Only process a specific role name (e.g., 'docker-core').",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dry-run",
|
||||
action="store_true",
|
||||
help="Analyze and print planned changes without modifying files.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
roles = iter_role_dirs(args.project_root)
|
||||
if not roles:
|
||||
print(f"[ERR] No roles found under {roles_root(args.project_root)}", file=sys.stderr)
|
||||
sys.exit(2)
|
||||
|
||||
providers_index = build_providers_index(roles)
|
||||
|
||||
changed_any = False
|
||||
for role_dir in roles:
|
||||
changed = process_role(role_dir, providers_index, args.only_role, args.dry_run)
|
||||
changed_any = changed_any or changed
|
||||
|
||||
if not changed_any:
|
||||
print("[OK] No unnecessary meta dependencies to move (per heuristic).")
|
||||
else:
|
||||
if args.dry_run:
|
||||
print("[DRY-RUN] Completed analysis. No files were changed.")
|
||||
else:
|
||||
print("[OK] Finished moving unnecessary dependencies.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
5
cli/fix/replace_by_get_app_config.sh
Executable file
5
cli/fix/replace_by_get_app_config.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
# Just a little refactoring script, you can delete it later
|
||||
ATTR="$1"
|
||||
OLD="applications[application_id].$ATTR"
|
||||
NEW="applications | get_app_conf(application_id, '$ATTR', True)"
|
||||
bsr ./ "$OLD" -rFfc -n "$NEW"
|
89
cli/fix/vars_main_files.py
Normal file
89
cli/fix/vars_main_files.py
Normal file
@@ -0,0 +1,89 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Script to ensure each Ansible role under ../roles/ with a given prefix has a vars/main.yml
|
||||
containing the correct application_id. Can preview actions or overwrite mismatches.
|
||||
"""
|
||||
import argparse
|
||||
import sys
|
||||
import yaml
|
||||
from pathlib import Path
|
||||
|
||||
# Directory containing roles; can be overridden by tests
|
||||
MODULE_DIR = Path(__file__).resolve().parent
|
||||
ROLES_DIR = (MODULE_DIR.parent.parent / "roles").resolve()
|
||||
|
||||
def process_role(role_dir: Path, prefix: str, preview: bool, overwrite: bool):
|
||||
name = role_dir.name
|
||||
if not name.startswith(prefix):
|
||||
return
|
||||
# Expected application_id is role name minus prefix
|
||||
expected_id = name[len(prefix):]
|
||||
vars_dir = role_dir / "vars"
|
||||
vars_file = vars_dir / "main.yml"
|
||||
if vars_file.exists():
|
||||
# Load existing variables
|
||||
try:
|
||||
existing = yaml.safe_load(vars_file.read_text()) or {}
|
||||
except yaml.YAMLError as e:
|
||||
print(f"Error parsing YAML in {vars_file}: {e}", file=sys.stderr)
|
||||
return
|
||||
actual_id = existing.get("application_id")
|
||||
if actual_id == expected_id:
|
||||
# Already correct
|
||||
return
|
||||
if overwrite:
|
||||
# Update only application_id
|
||||
existing["application_id"] = expected_id
|
||||
if preview:
|
||||
print(f"[PREVIEW] Would update {vars_file}: application_id -> {expected_id}")
|
||||
else:
|
||||
with open(vars_file, "w") as f:
|
||||
yaml.safe_dump(existing, f, default_flow_style=False, sort_keys=False)
|
||||
print(f"Updated {vars_file}: application_id -> {expected_id}")
|
||||
else:
|
||||
print(f"Mismatch in {vars_file}: application_id='{actual_id}', expected='{expected_id}'")
|
||||
else:
|
||||
# Create new vars/main.yml
|
||||
if preview:
|
||||
print(f"[PREVIEW] Would create {vars_file} with application_id: {expected_id}")
|
||||
else:
|
||||
vars_dir.mkdir(parents=True, exist_ok=True)
|
||||
content = {"application_id": expected_id}
|
||||
with open(vars_file, "w") as f:
|
||||
yaml.safe_dump(content, f, default_flow_style=False, sort_keys=False)
|
||||
print(f"Created {vars_file} with application_id: {expected_id}")
|
||||
|
||||
|
||||
def run(prefix: str, preview: bool = False, overwrite: bool = False):
|
||||
"""
|
||||
Ensure vars/main.yml for roles under ROLES_DIR with the given prefix has correct application_id.
|
||||
"""
|
||||
for role in sorted(Path(ROLES_DIR).iterdir()):
|
||||
if role.is_dir():
|
||||
process_role(role, prefix, preview, overwrite)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Ensure vars/main.yml for roles with a given prefix has correct application_id"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--prefix", required=True,
|
||||
help="Role name prefix to filter (e.g. 'web-', 'svc-', 'desk-')"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--preview", action="store_true",
|
||||
help="Show what would be done without making changes"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--overwrite", action="store_true",
|
||||
help="If vars/main.yml exists but application_id mismatches, overwrite only that key"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Run processing
|
||||
run(prefix=args.prefix, preview=args.preview, overwrite=args.overwrite)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@@ -1,106 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import yaml
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
plugin_path = Path(__file__).resolve().parent / ".." / "lookup_plugins"
|
||||
sys.path.insert(0, str(plugin_path))
|
||||
|
||||
from application_gid import LookupModule
|
||||
|
||||
def load_yaml_file(path):
|
||||
"""Load a YAML file if it exists, otherwise return an empty dict."""
|
||||
if not path.exists():
|
||||
return {}
|
||||
with path.open("r", encoding="utf-8") as f:
|
||||
return yaml.safe_load(f) or {}
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate defaults_applications YAML from docker roles and include users meta data for each role."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--roles-dir",
|
||||
help="Path to the roles directory (default: roles)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output-file",
|
||||
help="Path to output YAML file"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
cwd = Path.cwd()
|
||||
roles_dir = (cwd / args.roles_dir).resolve()
|
||||
output_file = (cwd / args.output_file).resolve()
|
||||
# Ensure output directory exists
|
||||
output_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Initialize result structure
|
||||
result = {"defaults_applications": {}}
|
||||
|
||||
gid_lookup = LookupModule()
|
||||
# Process each role for application configs
|
||||
for role_dir in sorted(roles_dir.iterdir()):
|
||||
role_name = role_dir.name
|
||||
vars_main = role_dir / "vars" / "main.yml"
|
||||
config_file = role_dir / "vars" / "configuration.yml"
|
||||
|
||||
if not vars_main.exists():
|
||||
print(f"[!] Skipping {role_name}: vars/main.yml missing")
|
||||
continue
|
||||
|
||||
vars_data = load_yaml_file(vars_main)
|
||||
try:
|
||||
application_id = vars_data.get("application_id")
|
||||
except Exception as e:
|
||||
print(
|
||||
f"Warning: failed to read application_id from {vars_main}\nException: {e}",
|
||||
file=sys.stderr
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
if not application_id:
|
||||
print(f"[!] Skipping {role_name}: application_id not defined in vars/main.yml")
|
||||
continue
|
||||
|
||||
if not config_file.exists():
|
||||
print(f"[!] Skipping {role_name}: vars/configuration.yml missing")
|
||||
continue
|
||||
|
||||
config_data = load_yaml_file(config_file)
|
||||
if config_data:
|
||||
try:
|
||||
gid_number = gid_lookup.run([application_id], roles_dir=str(roles_dir))[0]
|
||||
except Exception as e:
|
||||
print(f"Warning: failed to determine gid for '{application_id}': {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
config_data["group_id"] = gid_number
|
||||
result["defaults_applications"][application_id] = config_data
|
||||
users_meta_file = role_dir / "meta" / "users.yml"
|
||||
transformed_users = {}
|
||||
if users_meta_file.exists():
|
||||
users_meta = load_yaml_file(users_meta_file)
|
||||
users_data = users_meta.get("users", {})
|
||||
for user, role_user_attrs in users_data.items():
|
||||
transformed_users[user] = f"{{{{ users[\"{user}\"] }}}}"
|
||||
|
||||
# Attach transformed users under each application
|
||||
if transformed_users:
|
||||
result["defaults_applications"][application_id]["users"] = transformed_users
|
||||
|
||||
# Write out result YAML
|
||||
with output_file.open("w", encoding="utf-8") as f:
|
||||
yaml.dump(result, f, sort_keys=False)
|
||||
|
||||
try:
|
||||
print(f"✅ Generated: {output_file.relative_to(cwd)}")
|
||||
except ValueError:
|
||||
print(f"✅ Generated: {output_file}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@@ -1,166 +0,0 @@
|
||||
import os
|
||||
import yaml
|
||||
import argparse
|
||||
from collections import defaultdict, deque
|
||||
|
||||
def find_roles(roles_dir, prefix=None):
|
||||
"""Find all roles in the given directory."""
|
||||
for entry in os.listdir(roles_dir):
|
||||
if prefix and not entry.startswith(prefix):
|
||||
continue
|
||||
path = os.path.join(roles_dir, entry)
|
||||
meta_file = os.path.join(path, 'meta', 'main.yml')
|
||||
if os.path.isdir(path) and os.path.isfile(meta_file):
|
||||
yield path, meta_file
|
||||
|
||||
def load_run_after(meta_file):
|
||||
"""Load the 'run_after' from the meta/main.yml of a role."""
|
||||
with open(meta_file, 'r') as f:
|
||||
data = yaml.safe_load(f) or {}
|
||||
return data.get('galaxy_info', {}).get('run_after', [])
|
||||
|
||||
def load_application_id(role_path):
|
||||
"""Load the application_id from the vars/main.yml of the role."""
|
||||
vars_file = os.path.join(role_path, 'vars', 'main.yml')
|
||||
if os.path.exists(vars_file):
|
||||
with open(vars_file, 'r') as f:
|
||||
data = yaml.safe_load(f) or {}
|
||||
return data.get('application_id')
|
||||
return None
|
||||
|
||||
def build_dependency_graph(roles_dir, prefix=None):
|
||||
"""Build a dependency graph where each role points to the roles it depends on."""
|
||||
graph = defaultdict(list)
|
||||
in_degree = defaultdict(int)
|
||||
roles = {}
|
||||
|
||||
for role_path, meta_file in find_roles(roles_dir, prefix):
|
||||
run_after = load_run_after(meta_file)
|
||||
application_id = load_application_id(role_path)
|
||||
role_name = os.path.basename(role_path)
|
||||
roles[role_name] = {
|
||||
'role_name': role_name,
|
||||
'run_after': run_after,
|
||||
'application_id': application_id,
|
||||
'path': role_path
|
||||
}
|
||||
|
||||
# If the role has dependencies, build the graph
|
||||
for dependency in run_after:
|
||||
graph[dependency].append(role_name)
|
||||
in_degree[role_name] += 1
|
||||
|
||||
# Ensure roles with no dependencies have an in-degree of 0
|
||||
if role_name not in in_degree:
|
||||
in_degree[role_name] = 0
|
||||
|
||||
return graph, in_degree, roles
|
||||
|
||||
def topological_sort(graph, in_degree):
|
||||
"""Perform topological sort on the dependency graph."""
|
||||
# Queue for roles with no incoming dependencies (in_degree == 0)
|
||||
queue = deque([role for role, degree in in_degree.items() if degree == 0])
|
||||
sorted_roles = []
|
||||
|
||||
while queue:
|
||||
role = queue.popleft()
|
||||
sorted_roles.append(role)
|
||||
|
||||
# Reduce in-degree for roles dependent on the current role
|
||||
for neighbor in graph[role]:
|
||||
in_degree[neighbor] -= 1
|
||||
if in_degree[neighbor] == 0:
|
||||
queue.append(neighbor)
|
||||
|
||||
if len(sorted_roles) != len(in_degree):
|
||||
# If the number of sorted roles doesn't match the number of roles,
|
||||
# there was a cycle in the graph (not all roles could be sorted)
|
||||
raise Exception("Circular dependency detected among the roles!")
|
||||
|
||||
return sorted_roles
|
||||
|
||||
def print_dependency_tree(graph):
|
||||
"""Print the dependency tree visually on the console."""
|
||||
def print_node(role, indent=0):
|
||||
print(" " * indent + role)
|
||||
for dependency in graph[role]:
|
||||
print_node(dependency, indent + 1)
|
||||
|
||||
# Print the tree starting from roles with no dependencies
|
||||
all_roles = set(graph.keys())
|
||||
dependent_roles = {role for dependencies in graph.values() for role in dependencies}
|
||||
root_roles = all_roles - dependent_roles
|
||||
|
||||
printed_roles = []
|
||||
|
||||
def collect_roles(role, indent=0):
|
||||
printed_roles.append(role)
|
||||
for dependency in graph[role]:
|
||||
collect_roles(dependency, indent + 1)
|
||||
|
||||
for root in root_roles:
|
||||
collect_roles(root)
|
||||
|
||||
return printed_roles
|
||||
|
||||
def generate_playbook_entries(roles_dir, prefix=None):
|
||||
"""Generate playbook entries based on the sorted order."""
|
||||
graph, in_degree, roles = build_dependency_graph(roles_dir, prefix)
|
||||
|
||||
# Detect cycles and get correct topological order
|
||||
sorted_role_names = topological_sort(graph, in_degree)
|
||||
|
||||
entries = []
|
||||
for role_name in sorted_role_names:
|
||||
role = roles[role_name]
|
||||
entries.append(
|
||||
f"- name: setup {role['application_id']}\n"
|
||||
f" when: ('{role['application_id']}' | application_allowed(group_names, allowed_applications))\n"
|
||||
f" include_role:\n"
|
||||
f" name: {role['role_name']}\n"
|
||||
)
|
||||
entries.append(
|
||||
f"- name: flush handlers after {role['application_id']}\n"
|
||||
f" meta: flush_handlers\n"
|
||||
)
|
||||
|
||||
return entries
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Generate an Ansible playbook include file from Docker roles, sorted by run_after order.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'roles_dir',
|
||||
help='Path to directory containing role folders'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-p', '--prefix',
|
||||
help='Only include roles whose names start with this prefix (e.g. docker-, client-)',
|
||||
default=None
|
||||
)
|
||||
parser.add_argument(
|
||||
'-o', '--output',
|
||||
help='Output file path (default: stdout)',
|
||||
default=None
|
||||
)
|
||||
parser.add_argument(
|
||||
'-t', '--tree',
|
||||
action='store_true',
|
||||
help='Display the dependency tree of roles visually'
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Generate and output the playbook entries
|
||||
entries = generate_playbook_entries(args.roles_dir, args.prefix)
|
||||
output = ''.join(entries)
|
||||
|
||||
if args.output:
|
||||
with open(args.output, 'w') as f:
|
||||
f.write(output)
|
||||
print(f"Playbook entries written to {args.output}")
|
||||
else:
|
||||
print(output)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@@ -1,241 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import yaml
|
||||
import glob
|
||||
from collections import OrderedDict
|
||||
|
||||
|
||||
def represent_str(dumper, data):
|
||||
"""
|
||||
Custom YAML string representer that forces double quotes around any string
|
||||
containing a Jinja2 placeholder ({{ ... }}).
|
||||
"""
|
||||
if isinstance(data, str) and '{{' in data:
|
||||
return dumper.represent_scalar(
|
||||
'tag:yaml.org,2002:str',
|
||||
data,
|
||||
style='"'
|
||||
)
|
||||
return dumper.represent_scalar(
|
||||
'tag:yaml.org,2002:str',
|
||||
data
|
||||
)
|
||||
|
||||
|
||||
def build_users(defs, primary_domain, start_id, become_pwd):
|
||||
"""
|
||||
Construct user entries with auto-incremented UID/GID, default username/email,
|
||||
and optional description.
|
||||
|
||||
Args:
|
||||
defs (OrderedDict): Mapping of user keys to their override settings.
|
||||
primary_domain (str): The primary domain for email addresses (e.g. 'example.com').
|
||||
start_id (int): Starting number for UID/GID allocation (e.g. 1001).
|
||||
become_pwd (str): Default password string for users without an override.
|
||||
|
||||
Returns:
|
||||
OrderedDict: Complete user definitions with all required fields filled in.
|
||||
|
||||
Raises:
|
||||
ValueError: If there are duplicate UIDs, usernames, or emails.
|
||||
"""
|
||||
users = OrderedDict()
|
||||
used_uids = set()
|
||||
|
||||
# Collect any preset UIDs to avoid collisions
|
||||
for key, overrides in defs.items():
|
||||
if 'uid' in overrides:
|
||||
uid = overrides['uid']
|
||||
if uid in used_uids:
|
||||
raise ValueError(f"Duplicate uid {uid} for user '{key}'")
|
||||
used_uids.add(uid)
|
||||
|
||||
next_uid = start_id
|
||||
def allocate_uid():
|
||||
nonlocal next_uid
|
||||
# Find the next free UID not already used
|
||||
while next_uid in used_uids:
|
||||
next_uid += 1
|
||||
free_uid = next_uid
|
||||
used_uids.add(free_uid)
|
||||
next_uid += 1
|
||||
return free_uid
|
||||
|
||||
# Build each user entry
|
||||
for key, overrides in defs.items():
|
||||
username = overrides.get('username', key)
|
||||
email = overrides.get('email', f"{username}@{primary_domain}")
|
||||
description = overrides.get('description')
|
||||
roles = overrides.get('roles', [])
|
||||
password = overrides.get('password', become_pwd)
|
||||
|
||||
# Determine UID and GID
|
||||
if 'uid' in overrides:
|
||||
uid = overrides['uid']
|
||||
else:
|
||||
uid = allocate_uid()
|
||||
gid = overrides.get('gid', uid)
|
||||
|
||||
entry = {
|
||||
'username': username,
|
||||
'email': email,
|
||||
'password': password,
|
||||
'uid': uid,
|
||||
'gid': gid,
|
||||
'roles': roles
|
||||
}
|
||||
if description is not None:
|
||||
entry['description'] = description
|
||||
|
||||
users[key] = entry
|
||||
|
||||
# Ensure uniqueness of usernames and emails
|
||||
seen_usernames = set()
|
||||
seen_emails = set()
|
||||
|
||||
for key, entry in users.items():
|
||||
un = entry['username']
|
||||
em = entry['email']
|
||||
if un in seen_usernames:
|
||||
raise ValueError(f"Duplicate username '{un}' in merged users")
|
||||
if em in seen_emails:
|
||||
raise ValueError(f"Duplicate email '{em}' in merged users")
|
||||
seen_usernames.add(un)
|
||||
seen_emails.add(em)
|
||||
|
||||
return users
|
||||
|
||||
|
||||
def load_user_defs(roles_directory):
|
||||
"""
|
||||
Scan all roles/*/meta/users.yml files and merge any 'users:' sections.
|
||||
|
||||
Args:
|
||||
roles_directory (str): Path to the directory containing role subdirectories.
|
||||
|
||||
Returns:
|
||||
OrderedDict: Merged user definitions from all roles.
|
||||
|
||||
Raises:
|
||||
ValueError: On invalid format or conflicting override values.
|
||||
"""
|
||||
pattern = os.path.join(roles_directory, '*/meta/users.yml')
|
||||
files = sorted(glob.glob(pattern))
|
||||
merged = OrderedDict()
|
||||
|
||||
for filepath in files:
|
||||
with open(filepath, 'r') as f:
|
||||
data = yaml.safe_load(f) or {}
|
||||
users = data.get('users', {})
|
||||
if not isinstance(users, dict):
|
||||
continue
|
||||
|
||||
for key, overrides in users.items():
|
||||
if not isinstance(overrides, dict):
|
||||
raise ValueError(f"Invalid definition for user '{key}' in {filepath}")
|
||||
|
||||
if key not in merged:
|
||||
merged[key] = overrides.copy()
|
||||
else:
|
||||
existing = merged[key]
|
||||
for field, value in overrides.items():
|
||||
if field in existing and existing[field] != value:
|
||||
raise ValueError(
|
||||
f"Conflict for user '{key}': field '{field}' has existing value '{existing[field]}', tried to set '{value}' in {filepath}"
|
||||
)
|
||||
existing.update(overrides)
|
||||
|
||||
return merged
|
||||
|
||||
|
||||
def dictify(data):
|
||||
"""
|
||||
Recursively convert OrderedDict to regular dict for YAML dumping.
|
||||
"""
|
||||
if isinstance(data, OrderedDict):
|
||||
return {k: dictify(v) for k, v in data.items()}
|
||||
if isinstance(data, dict):
|
||||
return {k: dictify(v) for k, v in data.items()}
|
||||
if isinstance(data, list):
|
||||
return [dictify(v) for v in data]
|
||||
return data
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Generate a users.yml by merging all roles/*/meta/users.yml definitions.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--roles-dir', '-r', required=True,
|
||||
help='Directory containing roles (e.g., roles/*/meta/users.yml).'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--output', '-o', required=True,
|
||||
help='Path to the output YAML file (e.g., users.yml).'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--start-id', '-s', type=int, default=1001,
|
||||
help='Starting UID/GID number (default: 1001).'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--extra-users', '-e',
|
||||
help='Comma-separated list of additional usernames to include.',
|
||||
default=None
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
primary_domain = '{{ primary_domain }}'
|
||||
become_pwd = '{{ lookup("password", "/dev/null length=42 chars=ascii_letters,digits") }}'
|
||||
|
||||
try:
|
||||
definitions = load_user_defs(args.roles_dir)
|
||||
except ValueError as e:
|
||||
print(f"Error merging user definitions: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Add extra users if specified
|
||||
if args.extra_users:
|
||||
for name in args.extra_users.split(','):
|
||||
user_key = name.strip()
|
||||
if not user_key:
|
||||
continue
|
||||
if user_key in definitions:
|
||||
print(f"Warning: extra user '{user_key}' already defined; skipping.", file=sys.stderr)
|
||||
else:
|
||||
definitions[user_key] = {}
|
||||
|
||||
try:
|
||||
users = build_users(
|
||||
definitions,
|
||||
primary_domain,
|
||||
args.start_id,
|
||||
become_pwd
|
||||
)
|
||||
except ValueError as e:
|
||||
print(f"Error building user entries: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Convert OrderedDict into plain dict for YAML
|
||||
default_users = {'default_users': users}
|
||||
plain_data = dictify(default_users)
|
||||
|
||||
# Register custom string representer
|
||||
yaml.SafeDumper.add_representer(str, represent_str)
|
||||
|
||||
# Dump the YAML file
|
||||
with open(args.output, 'w') as f:
|
||||
yaml.safe_dump(
|
||||
plain_data,
|
||||
f,
|
||||
default_flow_style=False,
|
||||
sort_keys=False,
|
||||
width=120
|
||||
)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
126
cli/integration/deploy_localhost.py
Normal file
126
cli/integration/deploy_localhost.py
Normal file
@@ -0,0 +1,126 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Run the full localhost integration flow entirely inside the infinito Docker container,
|
||||
without writing any artifacts to the host filesystem.
|
||||
Catches missing schema/config errors during credential vaulting and skips those apps.
|
||||
"""
|
||||
import subprocess
|
||||
import os
|
||||
import sys
|
||||
|
||||
def main():
|
||||
repo = os.path.abspath(os.getcwd())
|
||||
|
||||
bash_script = '''
|
||||
set -e
|
||||
|
||||
ART=/integration-artifacts
|
||||
mkdir -p "$ART"
|
||||
echo testpassword > "$ART/vaultpw.txt"
|
||||
|
||||
# 1) Generate inventory
|
||||
python3 -m cli.build.inventory.full \
|
||||
--host localhost \
|
||||
--inventory-style hostvars \
|
||||
--format yaml \
|
||||
--output "$ART/inventory.yml"
|
||||
|
||||
# 2) Credentials per-app
|
||||
apps=$(python3 <<EOF
|
||||
import yaml
|
||||
inv = yaml.safe_load(open('/integration-artifacts/inventory.yml'))
|
||||
print(' '.join(inv['_meta']['hostvars']['localhost']['invokable_applications']))
|
||||
EOF
|
||||
)
|
||||
for app in $apps; do
|
||||
echo "⏳ Vaulting credentials for $app..."
|
||||
output=$(python3 -m cli.create.credentials \
|
||||
--role-path "/repo/roles/$app" \
|
||||
--inventory-file "$ART/inventory.yml" \
|
||||
--vault-password-file "$ART/vaultpw.txt" \
|
||||
--force 2>&1) || rc=$?; rc=${rc:-0}
|
||||
|
||||
if [ "$rc" -eq 0 ]; then
|
||||
echo "✅ Credentials generated for $app"
|
||||
elif echo "$output" | grep -q "No such file or directory"; then
|
||||
echo "⚠️ Skipping $app (no schema/config)"
|
||||
elif echo "$output" | grep -q "Plain algorithm for"; then
|
||||
# Collect all plain-algo keys
|
||||
keys=( $(echo "$output" | grep -oP "Plain algorithm for '\K[^']+") )
|
||||
overrides=()
|
||||
for key in "${keys[@]}"; do
|
||||
if [[ "$key" == *api_key ]]; then
|
||||
val=$(python3 - << 'PY'
|
||||
import random, string
|
||||
print(''.join(random.choices(string.ascii_letters+string.digits, k=32)))
|
||||
PY
|
||||
)
|
||||
elif [[ "$key" == *password ]]; then
|
||||
val=$(python3 - << 'PY'
|
||||
import random, string
|
||||
print(''.join(random.choices(string.ascii_letters+string.digits, k=12)))
|
||||
PY
|
||||
)
|
||||
else
|
||||
val=$(python3 - << 'PY'
|
||||
import random, string
|
||||
print(''.join(random.choices(string.ascii_letters+string.digits, k=16)))
|
||||
PY
|
||||
)
|
||||
fi
|
||||
echo " → Overriding $key=$val"
|
||||
overrides+=("--set" "$key=$val")
|
||||
done
|
||||
# Retry with overrides
|
||||
echo "🔄 Retrying with overrides..."
|
||||
retry_out=$(python3 -m cli.create.credentials \
|
||||
--role-path "/repo/roles/$app" \
|
||||
--inventory-file "$ART/inventory.yml" \
|
||||
--vault-password-file "$ART/vaultpw.txt" \
|
||||
"${overrides[@]}" \
|
||||
--force 2>&1) || retry_rc=$?; retry_rc=${retry_rc:-0}
|
||||
if [ "$retry_rc" -eq 0 ]; then
|
||||
echo "✅ Credentials generated for $app (with overrides)"
|
||||
else
|
||||
echo "❌ Override failed for $app:"
|
||||
echo "$retry_out"
|
||||
fi
|
||||
else
|
||||
echo "❌ Credential error for $app:"
|
||||
echo "$output"
|
||||
fi
|
||||
done
|
||||
|
||||
# 3) Show generated files
|
||||
ls -R "$ART" 2>/dev/null
|
||||
|
||||
echo "
|
||||
===== inventory.yml ====="
|
||||
cat "$ART/inventory.yml"
|
||||
|
||||
echo "
|
||||
===== vaultpw.txt ====="
|
||||
cat "$ART/vaultpw.txt"
|
||||
|
||||
# 4) Deploy
|
||||
python3 -m cli.deploy \
|
||||
"$ART/inventory.yml" \
|
||||
--limit localhost \
|
||||
--vault-password-file "$ART/vaultpw.txt" \
|
||||
--verbose
|
||||
'''
|
||||
|
||||
cmd = [
|
||||
"docker", "run", "--rm",
|
||||
"-v", f"{repo}:/repo",
|
||||
"-w", "/repo",
|
||||
"--entrypoint", "bash",
|
||||
"infinito:latest",
|
||||
"-c", bash_script
|
||||
]
|
||||
print(f"\033[96m> {' '.join(cmd)}\033[0m")
|
||||
rc = subprocess.call(cmd)
|
||||
sys.exit(rc)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
50
cli/make.py
Normal file
50
cli/make.py
Normal file
@@ -0,0 +1,50 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
CLI wrapper for Makefile targets within Infinito.Nexus.
|
||||
Invokes `make` commands in the project root directory.
|
||||
"""
|
||||
import argparse
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
prog='infinito make',
|
||||
description='Run Makefile targets for Infinito.Nexus project'
|
||||
)
|
||||
parser.add_argument(
|
||||
'targets',
|
||||
nargs=argparse.REMAINDER,
|
||||
help='Make targets and options to pass to `make`'
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Default to 'build' if no target is specified
|
||||
make_args = args.targets or ['build']
|
||||
|
||||
# Determine repository root (one level up from cli/)
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
repo_root = os.path.abspath(os.path.join(script_dir, os.pardir))
|
||||
|
||||
# Check for Makefile
|
||||
makefile_path = os.path.join(repo_root, 'Makefile')
|
||||
if not os.path.isfile(makefile_path):
|
||||
print(f"Error: Makefile not found in {repo_root}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Invoke make in repo root
|
||||
cmd = ['make'] + make_args
|
||||
try:
|
||||
result = subprocess.run(cmd, cwd=repo_root)
|
||||
sys.exit(result.returncode)
|
||||
except FileNotFoundError:
|
||||
print("Error: 'make' command not found. Please install make.", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
except KeyboardInterrupt:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
40
cli/meta/applications/all.py
Normal file
40
cli/meta/applications/all.py
Normal file
@@ -0,0 +1,40 @@
|
||||
#!/usr/bin/env python3
|
||||
# cli/meta/applications/all.py
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
# Import the Ansible filter implementation
|
||||
try:
|
||||
from filter_plugins.get_all_application_ids import get_all_application_ids
|
||||
except ImportError:
|
||||
sys.stderr.write("Filter plugin `get_all_application_ids` not found. Ensure `filter_plugins/get_all_application_ids.py` is in your PYTHONPATH.\n")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def find_application_ids():
|
||||
"""
|
||||
Legacy function retained for reference.
|
||||
Delegates to the `get_all_application_ids` filter plugin.
|
||||
"""
|
||||
return get_all_application_ids()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Output a list of all application_id values defined in roles/*/vars/main.yml'
|
||||
)
|
||||
parser.parse_args()
|
||||
|
||||
try:
|
||||
ids = find_application_ids()
|
||||
except Exception as e:
|
||||
sys.stderr.write(f"Error retrieving application IDs: {e}\n")
|
||||
sys.exit(1)
|
||||
|
||||
for app_id in ids:
|
||||
print(app_id)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
107
cli/meta/applications/in_group_deps.py
Normal file
107
cli/meta/applications/in_group_deps.py
Normal file
@@ -0,0 +1,107 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
CLI wrapper for applications_if_group_and_deps filter.
|
||||
"""
|
||||
import argparse
|
||||
import sys
|
||||
import os
|
||||
import yaml
|
||||
from filter_plugins.applications_if_group_and_deps import FilterModule
|
||||
|
||||
|
||||
def find_role_dirs_by_app_id(app_ids, roles_dir):
|
||||
"""
|
||||
Map application_ids to role directory names based on vars/main.yml in each role.
|
||||
"""
|
||||
mapping = {}
|
||||
for role in os.listdir(roles_dir):
|
||||
role_path = os.path.join(roles_dir, role)
|
||||
vars_file = os.path.join(role_path, 'vars', 'main.yml')
|
||||
if not os.path.isfile(vars_file):
|
||||
continue
|
||||
try:
|
||||
with open(vars_file) as f:
|
||||
data = yaml.safe_load(f) or {}
|
||||
except Exception:
|
||||
continue
|
||||
app_id = data.get('application_id')
|
||||
if isinstance(app_id, str) and app_id:
|
||||
mapping[app_id] = role
|
||||
# Translate each requested app_id to role dir if exists
|
||||
dirs = []
|
||||
for gid in app_ids:
|
||||
if gid in mapping:
|
||||
dirs.append(mapping[gid])
|
||||
else:
|
||||
# keep original if it matches a directory
|
||||
if os.path.isdir(os.path.join(roles_dir, gid)):
|
||||
dirs.append(gid)
|
||||
return dirs
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Filter applications by group names (role dirs or application_ids) and their recursive role dependencies."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-a", "--applications",
|
||||
type=str,
|
||||
required=True,
|
||||
help="Path to YAML file defining the applications dict."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-g", "--groups",
|
||||
nargs='+',
|
||||
required=True,
|
||||
help="List of group names to filter by (role directory names or application_ids)."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Load applications
|
||||
try:
|
||||
with open(args.applications) as f:
|
||||
data = yaml.safe_load(f)
|
||||
except Exception as e:
|
||||
print(f"Error loading applications file: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Unwrap under 'applications' key if present
|
||||
if isinstance(data, dict) and 'applications' in data and isinstance(data['applications'], dict):
|
||||
applications = data['applications']
|
||||
else:
|
||||
applications = data
|
||||
|
||||
if not isinstance(applications, dict):
|
||||
print(
|
||||
f"Expected applications YAML to contain a mapping (or 'applications' mapping), got {type(applications).__name__}",
|
||||
file=sys.stderr
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Determine roles_dir relative to project root
|
||||
script_dir = os.path.dirname(__file__)
|
||||
project_root = os.path.abspath(os.path.join(script_dir, '..', '..', '..'))
|
||||
roles_dir = os.path.join(project_root, 'roles')
|
||||
|
||||
# Map user-provided groups (which may be application_ids) to role directory names
|
||||
group_dirs = find_role_dirs_by_app_id(args.groups, roles_dir)
|
||||
if not group_dirs:
|
||||
print(f"No matching role directories found for groups: {args.groups}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Run filter using role directory names
|
||||
try:
|
||||
filtered = FilterModule().applications_if_group_and_deps(
|
||||
applications,
|
||||
group_dirs
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Error running filter: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Output result as YAML
|
||||
print(yaml.safe_dump(filtered, default_flow_style=False))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
49
cli/meta/applications/invokable.py
Normal file
49
cli/meta/applications/invokable.py
Normal file
@@ -0,0 +1,49 @@
|
||||
#!/usr/bin/env python3
|
||||
# cli/meta/applications/invokable.py
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Import filter plugin for get_all_invokable_apps
|
||||
try:
|
||||
from filter_plugins.get_all_invokable_apps import get_all_invokable_apps
|
||||
except ImportError:
|
||||
# Try to adjust sys.path if running outside Ansible
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..')))
|
||||
try:
|
||||
from filter_plugins.get_all_invokable_apps import get_all_invokable_apps
|
||||
except ImportError:
|
||||
sys.stderr.write("Could not import filter_plugins.get_all_invokable_apps. Check your PYTHONPATH.\n")
|
||||
sys.exit(1)
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='List all invokable applications (application_ids) based on invokable paths from categories.yml and available roles.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-c', '--categories-file',
|
||||
default=os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'roles', 'categories.yml')),
|
||||
help='Path to roles/categories.yml (default: roles/categories.yml at project root)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-r', '--roles-dir',
|
||||
default=os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'roles')),
|
||||
help='Path to roles/ directory (default: roles/ at project root)'
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
result = get_all_invokable_apps(
|
||||
categories_file=args.categories_file,
|
||||
roles_dir=args.roles_dir
|
||||
)
|
||||
except Exception as e:
|
||||
sys.stderr.write(f"Error: {e}\n")
|
||||
sys.exit(1)
|
||||
|
||||
for app_id in result:
|
||||
print(app_id)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
74
cli/meta/applications/role_name.py
Normal file
74
cli/meta/applications/role_name.py
Normal file
@@ -0,0 +1,74 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
CLI Script: get_role_folder_cli.py
|
||||
|
||||
This script determines the appropriate Ansible role folder based on the provided application_id
|
||||
by inspecting each role's vars/main.yml within the roles directory. By default, it assumes the
|
||||
roles directory is located at the project root, relative to this script's location.
|
||||
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import yaml
|
||||
|
||||
|
||||
def get_role(application_id, roles_path):
|
||||
"""
|
||||
Find the role directory under `roles_path` whose vars/main.yml contains the specified application_id.
|
||||
|
||||
:param application_id: The application_id to match.
|
||||
:param roles_path: Path to the roles directory.
|
||||
:return: The name of the matching role directory.
|
||||
:raises RuntimeError: If no match is found or if an error occurs while reading files.
|
||||
"""
|
||||
if not os.path.isdir(roles_path):
|
||||
raise RuntimeError(f"Roles path not found: {roles_path}")
|
||||
|
||||
for role in sorted(os.listdir(roles_path)):
|
||||
role_dir = os.path.join(roles_path, role)
|
||||
vars_file = os.path.join(role_dir, 'vars', 'main.yml')
|
||||
if os.path.isfile(vars_file):
|
||||
try:
|
||||
with open(vars_file, 'r') as f:
|
||||
data = yaml.safe_load(f) or {}
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to load {vars_file}: {e}")
|
||||
|
||||
if data.get('application_id') == application_id:
|
||||
return role
|
||||
|
||||
raise RuntimeError(f"No role found with application_id '{application_id}' in {roles_path}")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Determine the Ansible role folder by application_id'
|
||||
)
|
||||
parser.add_argument(
|
||||
'application_id',
|
||||
help='The application_id defined in vars/main.yml to search for'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-r', '--roles-path',
|
||||
default=os.path.join(
|
||||
os.path.dirname(os.path.realpath(__file__)),
|
||||
os.pardir, os.pardir, os.pardir,
|
||||
'roles'
|
||||
),
|
||||
help='Path to the roles directory (default: roles/ at project root)'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
folder = get_role(args.application_id, args.roles_path)
|
||||
print(folder)
|
||||
sys.exit(0)
|
||||
except RuntimeError as err:
|
||||
print(f"Error: {err}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
78
cli/meta/categories/invokable.py
Executable file
78
cli/meta/categories/invokable.py
Executable file
@@ -0,0 +1,78 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
CLI for extracting invokable or non-invokable role paths from a nested roles YAML file using argparse.
|
||||
Assumes a default roles file at the project root if none is provided.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
# ─── Determine project root ───
|
||||
if "__file__" in globals():
|
||||
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
else:
|
||||
project_root = os.getcwd()
|
||||
|
||||
# Ensure project root on PYTHONPATH so 'filter_plugins' can be imported
|
||||
sys.path.insert(0, project_root)
|
||||
|
||||
import argparse
|
||||
import yaml
|
||||
from filter_plugins.invokable_paths import get_invokable_paths, get_non_invokable_paths
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Extract invokable or non-invokable role paths from a nested roles YAML file."
|
||||
)
|
||||
parser.add_argument(
|
||||
"roles_file",
|
||||
nargs='?',
|
||||
default=None,
|
||||
help="Path to the roles YAML file (default: roles/categories.yml at project root)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--suffix", "-s",
|
||||
help="Optional suffix to append to each path.",
|
||||
default=None
|
||||
)
|
||||
|
||||
mode_group = parser.add_mutually_exclusive_group()
|
||||
mode_group.add_argument(
|
||||
"--non-invokable", "-n",
|
||||
action='store_true',
|
||||
help="List paths where 'invokable' is False or not set."
|
||||
)
|
||||
mode_group.add_argument(
|
||||
"--invokable", "-i",
|
||||
action='store_true',
|
||||
help="List paths where 'invokable' is True. (default behavior)"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Default to invokable if neither flag is provided
|
||||
list_non = args.non_invokable
|
||||
list_inv = args.invokable or not (args.non_invokable or args.invokable)
|
||||
|
||||
try:
|
||||
if list_non:
|
||||
paths = get_non_invokable_paths(args.roles_file, args.suffix)
|
||||
else:
|
||||
paths = get_invokable_paths(args.roles_file, args.suffix)
|
||||
except FileNotFoundError as e:
|
||||
print(f"Error: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
except yaml.YAMLError as e:
|
||||
print(f"Error parsing YAML: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
except ValueError as e:
|
||||
print(f"Error: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
for p in paths:
|
||||
print(p)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
76
cli/meta/j2/compiler.py
Executable file
76
cli/meta/j2/compiler.py
Executable file
@@ -0,0 +1,76 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
# Projekt-Root: vier Ebenen über diesem File
|
||||
PROJECT_ROOT = os.path.dirname(
|
||||
os.path.dirname(
|
||||
os.path.dirname(
|
||||
os.path.dirname(__file__)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
INCLUDE_RE = re.compile(r"^(\s*)\{%\s*include\s*['\"]([^'\"]+)['\"]\s*%\}")
|
||||
|
||||
def expand_includes(rel_path, seen=None):
|
||||
"""
|
||||
Liest die Datei rel_path (relative zum PROJECT_ROOT),
|
||||
ersetzt rekursiv alle "{% include 'path' %}"-Zeilen durch den
|
||||
Inhalt der jeweiligen Datei (mit gleicher Einrückung).
|
||||
"""
|
||||
if seen is None:
|
||||
seen = set()
|
||||
rp = rel_path.replace("\\", "/")
|
||||
if rp in seen:
|
||||
raise RuntimeError(f"Circular include detected: {rp}")
|
||||
seen.add(rp)
|
||||
|
||||
abs_path = os.path.join(PROJECT_ROOT, rp)
|
||||
if not os.path.isfile(abs_path):
|
||||
raise FileNotFoundError(f"Template not found: {rp}")
|
||||
|
||||
output_lines = []
|
||||
for line in open(abs_path, encoding="utf-8"):
|
||||
m = INCLUDE_RE.match(line)
|
||||
if not m:
|
||||
output_lines.append(line.rstrip("\n"))
|
||||
else:
|
||||
indent, inc_rel = m.group(1), m.group(2)
|
||||
# rekursiver Aufruf
|
||||
for inc_line in expand_includes(inc_rel, seen):
|
||||
output_lines.append(indent + inc_line)
|
||||
seen.remove(rp)
|
||||
return output_lines
|
||||
|
||||
def parse_args():
|
||||
p = argparse.ArgumentParser(
|
||||
description="Expand all {% include '...' %} directives in a Jinja2 template (no variable rendering)."
|
||||
)
|
||||
p.add_argument("template", help="Template path relative to project root")
|
||||
p.add_argument(
|
||||
"--out",
|
||||
help="If given, write output to this file instead of stdout",
|
||||
default=None
|
||||
)
|
||||
return p.parse_args()
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
|
||||
try:
|
||||
lines = expand_includes(args.template)
|
||||
text = "\n".join(lines)
|
||||
if args.out:
|
||||
with open(args.out, "w", encoding="utf-8") as f:
|
||||
f.write(text + "\n")
|
||||
else:
|
||||
print(text)
|
||||
except Exception as e:
|
||||
sys.stderr.write(f"Error: {e}\n")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@@ -1,105 +0,0 @@
|
||||
import argparse
|
||||
import subprocess
|
||||
from ansible.parsing.vault import VaultLib, VaultSecret
|
||||
import sys
|
||||
import yaml
|
||||
import re
|
||||
from utils.handler.vault import VaultScalar
|
||||
from yaml.loader import SafeLoader
|
||||
from yaml.dumper import SafeDumper
|
||||
|
||||
# Register the custom constructor and representer for VaultScalar in PyYAML
|
||||
SafeLoader.add_constructor('!vault', lambda loader, node: VaultScalar(node.value))
|
||||
SafeDumper.add_representer(VaultScalar, lambda dumper, data: dumper.represent_scalar('!vault', data))
|
||||
|
||||
def is_vault_encrypted_data(data: str) -> bool:
|
||||
"""Check if the given data is encrypted with Ansible Vault by looking for the vault header."""
|
||||
return data.lstrip().startswith('$ANSIBLE_VAULT')
|
||||
|
||||
def decrypt_vault_data(encrypted_data: str, vault_secret: VaultSecret) -> str:
|
||||
"""
|
||||
Decrypt the given encrypted data using the provided vault_secret.
|
||||
:param encrypted_data: Encrypted string to be decrypted
|
||||
:param vault_secret: The VaultSecret instance used to decrypt the data
|
||||
:return: Decrypted data as a string
|
||||
"""
|
||||
vault = VaultLib()
|
||||
decrypted_data = vault.decrypt(encrypted_data, vault_secret)
|
||||
return decrypted_data
|
||||
|
||||
def decrypt_vault_file(vault_file: str, vault_password_file: str):
|
||||
"""
|
||||
Decrypt the Ansible Vault file and return its contents.
|
||||
:param vault_file: Path to the encrypted Ansible Vault file
|
||||
:param vault_password_file: Path to the file containing the Vault password
|
||||
:return: Decrypted contents of the Vault file
|
||||
"""
|
||||
# Read the vault password
|
||||
with open(vault_password_file, 'r') as f:
|
||||
vault_password = f.read().strip()
|
||||
|
||||
# Create a VaultSecret instance from the password
|
||||
vault_secret = VaultSecret(vault_password.encode())
|
||||
|
||||
# Read the encrypted file
|
||||
with open(vault_file, 'r') as f:
|
||||
file_content = f.read()
|
||||
|
||||
# If the file is partially encrypted, we'll decrypt only the encrypted values
|
||||
decrypted_data = file_content # Start with the unmodified content
|
||||
|
||||
# Find all vault-encrypted values (i.e., values starting with $ANSIBLE_VAULT)
|
||||
encrypted_values = re.findall(r'^\s*([\w\.\-_]+):\s*["\']?\$ANSIBLE_VAULT[^\n]+', file_content, flags=re.MULTILINE)
|
||||
|
||||
# If there are encrypted values, decrypt them
|
||||
for value in encrypted_values:
|
||||
# Extract the encrypted value and decrypt it
|
||||
encrypted_value = re.search(r'(["\']?\$ANSIBLE_VAULT[^\n]+)', value)
|
||||
if encrypted_value:
|
||||
# Remove any newlines or extra spaces from the encrypted value
|
||||
encrypted_value = encrypted_value.group(0).replace('\n', '').replace('\r', '')
|
||||
decrypted_value = decrypt_vault_data(encrypted_value, vault_secret)
|
||||
# Replace the encrypted value with the decrypted value in the content
|
||||
decrypted_data = decrypted_data.replace(encrypted_value, decrypted_value.strip())
|
||||
|
||||
return decrypted_data
|
||||
|
||||
def decrypt_and_display(vault_file: str, vault_password_file: str):
|
||||
"""
|
||||
Decrypts the Ansible Vault file and its values, then display the result.
|
||||
Supports both full file and partial value encryption.
|
||||
:param vault_file: Path to the encrypted Ansible Vault file
|
||||
:param vault_password_file: Path to the file containing the Vault password
|
||||
"""
|
||||
decrypted_data = decrypt_vault_file(vault_file, vault_password_file)
|
||||
|
||||
# Convert the decrypted data to a string format (YAML or JSON)
|
||||
output_data = yaml.dump(yaml.safe_load(decrypted_data), default_flow_style=False)
|
||||
|
||||
# Use subprocess to call `less` for paginated, scrollable output
|
||||
subprocess.run(["less"], input=output_data, text=True)
|
||||
|
||||
def main():
|
||||
# Set up the argument parser
|
||||
parser = argparse.ArgumentParser(description="Decrypt and display variables from an Ansible Vault file.")
|
||||
|
||||
# Add arguments for the vault file and vault password file
|
||||
parser.add_argument(
|
||||
'vault_file',
|
||||
type=str,
|
||||
help="Path to the encrypted Ansible Vault file"
|
||||
)
|
||||
parser.add_argument(
|
||||
'vault_password_file',
|
||||
type=str,
|
||||
help="Path to the file containing the Vault password"
|
||||
)
|
||||
|
||||
# Parse the arguments
|
||||
args = parser.parse_args()
|
||||
|
||||
# Display vault variables in a scrollable manner
|
||||
decrypt_and_display(args.vault_file, args.vault_password_file)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
124
cli/sounds.py
124
cli/sounds.py
@@ -1,124 +0,0 @@
|
||||
import numpy as np
|
||||
import simpleaudio as sa
|
||||
|
||||
class Sound:
|
||||
"""
|
||||
Sound effects for the application with enhanced complexity.
|
||||
Each sound uses at least 6 distinct tones and lasts no more than max_length seconds,
|
||||
except the intro sound which is a detailed 26-second Berlin techno-style build-up, 12-second celebration with a descending-fifth chord sequence of 7 chords, and breakdown with melodic background.
|
||||
Transitions between phases now crossfade over 3 seconds for smoother flow.
|
||||
"""
|
||||
|
||||
fs = 44100 # Sampling rate (samples per second)
|
||||
complexity_factor = 10 # Number of harmonics to sum for richer timbres
|
||||
max_length = 2.0 # Maximum total duration of any sound in seconds
|
||||
|
||||
@staticmethod
|
||||
def _generate_complex_wave(frequency: float, duration: float, harmonics: int = None) -> np.ndarray:
|
||||
if harmonics is None:
|
||||
harmonics = Sound.complexity_factor
|
||||
t = np.linspace(0, duration, int(Sound.fs * duration), False)
|
||||
wave = np.zeros_like(t)
|
||||
for n in range(1, harmonics + 1):
|
||||
wave += (1 / n) * np.sin(2 * np.pi * frequency * n * t)
|
||||
# ADSR envelope
|
||||
attack = int(0.02 * Sound.fs)
|
||||
release = int(0.05 * Sound.fs)
|
||||
env = np.ones_like(wave)
|
||||
env[:attack] = np.linspace(0, 1, attack)
|
||||
env[-release:] = np.linspace(1, 0, release)
|
||||
wave *= env
|
||||
wave /= np.max(np.abs(wave))
|
||||
return (wave * (2**15 - 1)).astype(np.int16)
|
||||
|
||||
@staticmethod
|
||||
def _crossfade(w1: np.ndarray, w2: np.ndarray, fade_len: int) -> np.ndarray:
|
||||
# Ensure fade_len less than each
|
||||
fade_len = min(fade_len, len(w1), len(w2))
|
||||
fade_out = np.linspace(1, 0, fade_len)
|
||||
fade_in = np.linspace(0, 1, fade_len)
|
||||
w1_end = w1[-fade_len:] * fade_out
|
||||
w2_start = w2[:fade_len] * fade_in
|
||||
middle = (w1_end + w2_start).astype(np.int16)
|
||||
return np.concatenate([w1[:-fade_len], middle, w2[fade_len:]])
|
||||
|
||||
@staticmethod
|
||||
def _play(wave: np.ndarray):
|
||||
play_obj = sa.play_buffer(wave, 1, 2, Sound.fs)
|
||||
play_obj.wait_done()
|
||||
|
||||
@classmethod
|
||||
def play_cymais_intro_sound(cls):
|
||||
# Phase durations
|
||||
build_time = 10.0
|
||||
celebr_time = 12.0
|
||||
breakdown_time = 10.0
|
||||
overlap = 3.0 # seconds of crossfade
|
||||
bass_seg = 0.125 # 1/8s kick
|
||||
melody_seg = 0.25 # 2/8s melody
|
||||
bass_freq = 65.41 # C2 kick
|
||||
melody_freqs = [261.63, 293.66, 329.63, 392.00, 440.00, 523.25]
|
||||
|
||||
# Build-up phase
|
||||
steps = int(build_time / (bass_seg + melody_seg))
|
||||
build_seq = []
|
||||
for i in range(steps):
|
||||
amp = (i + 1) / steps
|
||||
b = cls._generate_complex_wave(bass_freq, bass_seg).astype(np.float32) * amp
|
||||
m = cls._generate_complex_wave(melody_freqs[i % len(melody_freqs)], melody_seg).astype(np.float32) * amp
|
||||
build_seq.append(b.astype(np.int16))
|
||||
build_seq.append(m.astype(np.int16))
|
||||
build_wave = np.concatenate(build_seq)
|
||||
|
||||
# Celebration phase: 7 descending-fifth chords
|
||||
roots = [523.25, 349.23, 233.08, 155.56, 103.83, 69.30, 46.25]
|
||||
chord_time = celebr_time / len(roots)
|
||||
celebr_seq = []
|
||||
for root in roots:
|
||||
t = np.linspace(0, chord_time, int(cls.fs * chord_time), False)
|
||||
chord = sum(np.sin(2 * np.pi * f * t) for f in [root, root * 5/4, root * 3/2])
|
||||
chord /= np.max(np.abs(chord))
|
||||
celebr_seq.append((chord * (2**15 - 1)).astype(np.int16))
|
||||
celebr_wave = np.concatenate(celebr_seq)
|
||||
|
||||
# Breakdown phase (mirror of build-up)
|
||||
breakdown_wave = np.concatenate(list(reversed(build_seq)))
|
||||
|
||||
# Crossfade transitions
|
||||
fade_samples = int(overlap * cls.fs)
|
||||
bc = cls._crossfade(build_wave, celebr_wave, fade_samples)
|
||||
full = cls._crossfade(bc, breakdown_wave, fade_samples)
|
||||
|
||||
cls._play(full)
|
||||
|
||||
@classmethod
|
||||
def play_start_sound(cls):
|
||||
freqs = [523.25, 659.26, 783.99, 880.00, 1046.50, 1174.66]
|
||||
cls._prepare_and_play(freqs)
|
||||
|
||||
@classmethod
|
||||
def play_finished_successfully_sound(cls):
|
||||
freqs = [523.25, 587.33, 659.26, 783.99, 880.00, 987.77]
|
||||
cls._prepare_and_play(freqs)
|
||||
|
||||
@classmethod
|
||||
def play_finished_failed_sound(cls):
|
||||
freqs = [880.00, 830.61, 783.99, 659.26, 622.25, 523.25]
|
||||
durations = [0.4, 0.3, 0.25, 0.25, 0.25, 0.25]
|
||||
cls._prepare_and_play(freqs, durations)
|
||||
|
||||
@classmethod
|
||||
def play_warning_sound(cls):
|
||||
freqs = [700.00, 550.00, 750.00, 500.00, 800.00, 450.00]
|
||||
cls._prepare_and_play(freqs)
|
||||
|
||||
@classmethod
|
||||
def _prepare_and_play(cls, freqs, durations=None):
|
||||
count = len(freqs)
|
||||
if durations is None:
|
||||
durations = [cls.max_length / count] * count
|
||||
else:
|
||||
total = sum(durations)
|
||||
durations = [d * cls.max_length / total for d in durations]
|
||||
waves = [cls._generate_complex_wave(f, d) for f, d in zip(freqs, durations)]
|
||||
cls._play(np.concatenate(waves))
|
@@ -1,165 +0,0 @@
|
||||
import secrets
|
||||
import hashlib
|
||||
import bcrypt
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
from utils.handler.yaml import YamlHandler
|
||||
from utils.handler.vault import VaultHandler, VaultScalar
|
||||
import string
|
||||
import sys
|
||||
import base64
|
||||
|
||||
class InventoryManager:
|
||||
def __init__(self, role_path: Path, inventory_path: Path, vault_pw: str, overrides: Dict[str, str]):
|
||||
"""Initialize the Inventory Manager."""
|
||||
self.role_path = role_path
|
||||
self.inventory_path = inventory_path
|
||||
self.vault_pw = vault_pw
|
||||
self.overrides = overrides
|
||||
self.inventory = YamlHandler.load_yaml(inventory_path)
|
||||
self.schema = YamlHandler.load_yaml(role_path / "meta" / "schema.yml")
|
||||
self.app_id = self.load_application_id(role_path)
|
||||
|
||||
self.vault_handler = VaultHandler(vault_pw)
|
||||
|
||||
def load_application_id(self, role_path: Path) -> str:
|
||||
"""Load the application ID from the role's vars/main.yml file."""
|
||||
vars_file = role_path / "vars" / "main.yml"
|
||||
data = YamlHandler.load_yaml(vars_file)
|
||||
app_id = data.get("application_id")
|
||||
if not app_id:
|
||||
print(f"ERROR: 'application_id' missing in {vars_file}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
return app_id
|
||||
|
||||
def apply_schema(self) -> Dict:
|
||||
"""Apply the schema and return the updated inventory."""
|
||||
apps = self.inventory.setdefault("applications", {})
|
||||
target = apps.setdefault(self.app_id, {})
|
||||
|
||||
# Load the data from vars/main.yml
|
||||
vars_file = self.role_path / "vars" / "configuration.yml"
|
||||
data = YamlHandler.load_yaml(vars_file)
|
||||
|
||||
# Check if 'central-database' is enabled in the features section of data
|
||||
if "features" in data:
|
||||
if "central_database" in data["features"] and \
|
||||
data["features"]["central_database"]:
|
||||
# Add 'central_database' value (password) to credentials
|
||||
target.setdefault("credentials", {})["database_password"] = self.generate_value("alphanumeric")
|
||||
if "oauth2" in data["features"] and \
|
||||
data["features"]["oauth2"]:
|
||||
target.setdefault("credentials", {})["oauth2_proxy_cookie_secret"] = self.generate_value("random_hex_16")
|
||||
|
||||
# Apply recursion only for the `credentials` section
|
||||
self.recurse_credentials(self.schema, target)
|
||||
return self.inventory
|
||||
|
||||
def recurse_credentials(self, branch: dict, dest: dict, prefix: str = ""):
|
||||
"""Recursively process only the 'credentials' section and generate values."""
|
||||
for key, meta in branch.items():
|
||||
full_key = f"{prefix}.{key}" if prefix else key
|
||||
|
||||
# Only process 'credentials' section for encryption
|
||||
if prefix == "credentials" and isinstance(meta, dict) and all(k in meta for k in ("description", "algorithm", "validation")):
|
||||
alg = meta["algorithm"]
|
||||
if alg == "plain":
|
||||
# Must be supplied via --set
|
||||
if full_key not in self.overrides:
|
||||
print(f"ERROR: Plain algorithm for '{full_key}' requires override via --set {full_key}=<value>", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
plain = self.overrides[full_key]
|
||||
else:
|
||||
plain = self.overrides.get(full_key, self.generate_value(alg))
|
||||
|
||||
# Check if the value is already vaulted or if it's a dictionary
|
||||
existing_value = dest.get(key)
|
||||
|
||||
# If existing_value is a dictionary, print a warning and skip encryption
|
||||
if isinstance(existing_value, dict):
|
||||
print(f"Skipping encryption for '{key}', as it is a dictionary.")
|
||||
continue
|
||||
|
||||
# Check if the value is a VaultScalar and already vaulted
|
||||
if existing_value and isinstance(existing_value, VaultScalar):
|
||||
print(f"Skipping encryption for '{key}', as it is already vaulted.")
|
||||
continue
|
||||
|
||||
# Encrypt only if it's not already vaulted
|
||||
snippet = self.vault_handler.encrypt_string(plain, key)
|
||||
lines = snippet.splitlines()
|
||||
indent = len(lines[1]) - len(lines[1].lstrip())
|
||||
body = "\n".join(line[indent:] for line in lines[1:])
|
||||
dest[key] = VaultScalar(body)
|
||||
|
||||
elif isinstance(meta, dict):
|
||||
sub = dest.setdefault(key, {})
|
||||
self.recurse_credentials(meta, sub, full_key)
|
||||
else:
|
||||
dest[key] = meta
|
||||
|
||||
|
||||
def generate_secure_alphanumeric(self, length: int) -> str:
|
||||
"""Generate a cryptographically secure random alphanumeric string of the given length."""
|
||||
characters = string.ascii_letters + string.digits # a-zA-Z0-9
|
||||
return ''.join(secrets.choice(characters) for _ in range(length))
|
||||
|
||||
def generate_value(self, algorithm: str) -> str:
|
||||
"""
|
||||
Generate a random secret value according to the specified algorithm.
|
||||
|
||||
Supported algorithms:
|
||||
• "random_hex"
|
||||
– Returns a 64-byte (512-bit) secure random string, encoded as 128 hexadecimal characters.
|
||||
– Use when you need maximum entropy in a hex-only format.
|
||||
|
||||
• "sha256"
|
||||
– Generates 32 random bytes, hashes them with SHA-256, and returns a 64-character hex digest.
|
||||
– Good for when you want a fixed-length (256-bit) hash output.
|
||||
|
||||
• "sha1"
|
||||
– Generates 20 random bytes, hashes them with SHA-1, and returns a 40-character hex digest.
|
||||
– Only use in legacy contexts; SHA-1 is considered weaker than SHA-256.
|
||||
|
||||
• "bcrypt"
|
||||
– Creates a random 16-byte URL-safe password, then applies a bcrypt hash.
|
||||
– Suitable for storing user-style passwords where bcrypt verification is needed.
|
||||
|
||||
• "alphanumeric"
|
||||
– Produces a 64-character string drawn from [A–Z, a–z, 0–9].
|
||||
– Offers ≈380 bits of entropy; human-friendly charset.
|
||||
|
||||
• "base64_prefixed_32"
|
||||
– Generates 32 random bytes, encodes them in Base64, and prefixes the result with "base64:".
|
||||
– Useful when downstream systems expect a Base64 format.
|
||||
|
||||
• "random_hex_16"
|
||||
– Returns 16 random bytes (128 bits) encoded as 32 hexadecimal characters.
|
||||
– Handy for shorter tokens or salts.
|
||||
|
||||
Returns:
|
||||
A securely generated string according to the chosen algorithm.
|
||||
"""
|
||||
if algorithm == "random_hex":
|
||||
return secrets.token_hex(64)
|
||||
|
||||
if algorithm == "sha256":
|
||||
return hashlib.sha256(secrets.token_bytes(32)).hexdigest()
|
||||
if algorithm == "sha1":
|
||||
return hashlib.sha1(secrets.token_bytes(20)).hexdigest()
|
||||
if algorithm == "bcrypt":
|
||||
# Generate a random password and hash it with bcrypt
|
||||
pw = secrets.token_urlsafe(16).encode()
|
||||
raw_hash = bcrypt.hashpw(pw, bcrypt.gensalt()).decode()
|
||||
# Replace every '$' with a random lowercase alphanumeric character
|
||||
alnum = string.digits + string.ascii_lowercase
|
||||
escaped = "".join(secrets.choice(alnum) if ch == '$' else ch for ch in raw_hash)
|
||||
return escaped
|
||||
if algorithm == "alphanumeric":
|
||||
return self.generate_secure_alphanumeric(64)
|
||||
if algorithm == "base64_prefixed_32":
|
||||
return "base64:" + base64.b64encode(secrets.token_bytes(32)).decode()
|
||||
if algorithm == "random_hex_16":
|
||||
# 16 Bytes → 32 Hex-Characters
|
||||
return secrets.token_hex(16)
|
||||
return "undefined"
|
154
cli/validate/inventory.py
Normal file
154
cli/validate/inventory.py
Normal file
@@ -0,0 +1,154 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import sys
|
||||
import yaml
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
# Ensure imports work when run directly
|
||||
script_dir = Path(__file__).resolve().parent
|
||||
repo_root = script_dir.parent.parent
|
||||
sys.path.insert(0, str(repo_root))
|
||||
|
||||
from cli.meta.applications.all import find_application_ids
|
||||
|
||||
def load_yaml_file(path):
|
||||
try:
|
||||
with open(path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
content = re.sub(r'(?m)^([ \t]*[^\s:]+):\s*!vault[\s\S]+?(?=^\S|\Z)', r"\1: \"<vaulted>\"\n", content)
|
||||
return yaml.safe_load(content)
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not parse {path}: {e}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
|
||||
def recursive_keys(d, prefix=''):
|
||||
keys = set()
|
||||
if isinstance(d, dict):
|
||||
for k, v in d.items():
|
||||
full = f"{prefix}.{k}" if prefix else k
|
||||
keys.add(full)
|
||||
keys.update(recursive_keys(v, full))
|
||||
return keys
|
||||
|
||||
|
||||
def compare_application_keys(applications, defaults, source):
|
||||
errs = []
|
||||
for app_id, conf in applications.items():
|
||||
if app_id not in defaults:
|
||||
errs.append(f"{source}: Unknown application '{app_id}' (not in defaults_applications)")
|
||||
continue
|
||||
default = defaults[app_id]
|
||||
app_keys = recursive_keys(conf)
|
||||
def_keys = recursive_keys(default)
|
||||
for key in app_keys:
|
||||
if key.startswith('credentials'):
|
||||
continue
|
||||
if key not in def_keys:
|
||||
errs.append(f"{source}: Missing default for {app_id}: {key}")
|
||||
return errs
|
||||
|
||||
|
||||
def compare_user_keys(users, default_users, source):
|
||||
errs = []
|
||||
for user, conf in users.items():
|
||||
if user not in default_users:
|
||||
print(f"Warning: {source}: Unknown user '{user}' (not in default_users)", file=sys.stderr)
|
||||
continue
|
||||
def_conf = default_users[user]
|
||||
for key in conf:
|
||||
if key in ('password','credentials','mailu_token'):
|
||||
continue
|
||||
if key not in def_conf:
|
||||
errs.append(f"Missing default for user '{user}': key '{key}'")
|
||||
return errs
|
||||
|
||||
|
||||
def load_inventory_files(inv_dir):
|
||||
all_data = {}
|
||||
p = Path(inv_dir)
|
||||
for f in p.glob('*.yml'):
|
||||
data = load_yaml_file(f)
|
||||
if isinstance(data, dict):
|
||||
apps = data.get('applications') or data.get('defaults_applications')
|
||||
if apps:
|
||||
all_data[str(f)] = apps
|
||||
for d in p.glob('*_vars'):
|
||||
if d.is_dir():
|
||||
for f in d.rglob('*.yml'):
|
||||
data = load_yaml_file(f)
|
||||
if isinstance(data, dict):
|
||||
apps = data.get('applications') or data.get('defaults_applications')
|
||||
if apps:
|
||||
all_data[str(f)] = apps
|
||||
return all_data
|
||||
|
||||
|
||||
def validate_host_keys(app_ids, inv_dir):
|
||||
errs = []
|
||||
p = Path(inv_dir)
|
||||
# Scan all top-level YAMLs for 'all.children'
|
||||
for f in p.glob('*.yml'):
|
||||
data = load_yaml_file(f)
|
||||
if not isinstance(data, dict):
|
||||
continue
|
||||
all_node = data.get('all', {})
|
||||
children = all_node.get('children')
|
||||
if not isinstance(children, dict):
|
||||
continue
|
||||
for grp in children.keys():
|
||||
if grp not in app_ids:
|
||||
errs.append(f"{f}: Invalid group '{grp}' (not in application_ids)")
|
||||
return errs
|
||||
|
||||
|
||||
def find_single_file(pattern):
|
||||
c = list(Path('group_vars/all').glob(pattern))
|
||||
if len(c)!=1:
|
||||
raise RuntimeError(f"Expected exactly one {pattern} in group_vars/all, found {len(c)}")
|
||||
return c[0]
|
||||
|
||||
|
||||
def main():
|
||||
p = argparse.ArgumentParser()
|
||||
p.add_argument('inventory_dir')
|
||||
args = p.parse_args()
|
||||
# defaults
|
||||
dfile = find_single_file('*_applications.yml')
|
||||
ufile = find_single_file('*users.yml')
|
||||
ddata = load_yaml_file(dfile) or {}
|
||||
udata = load_yaml_file(ufile) or {}
|
||||
defaults = ddata.get('defaults_applications',{})
|
||||
default_users = udata.get('default_users',{})
|
||||
if not defaults:
|
||||
print(f"Error: No 'defaults_applications' found in {dfile}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
if not default_users:
|
||||
print(f"Error: No 'default_users' found in {ufile}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
app_errs = []
|
||||
inv_files = load_inventory_files(args.inventory_dir)
|
||||
for src, apps in inv_files.items():
|
||||
app_errs.extend(compare_application_keys(apps, defaults, src))
|
||||
user_errs = []
|
||||
for fpath in Path(args.inventory_dir).rglob('*.yml'):
|
||||
data = load_yaml_file(fpath)
|
||||
if isinstance(data, dict) and 'users' in data:
|
||||
errs = compare_user_keys(data['users'], default_users, str(fpath))
|
||||
for e in errs:
|
||||
print(e, file=sys.stderr)
|
||||
user_errs.extend(errs)
|
||||
host_errs = validate_host_keys(find_application_ids(), args.inventory_dir)
|
||||
app_errs.extend(host_errs)
|
||||
if app_errs or user_errs:
|
||||
if app_errs:
|
||||
print('Validation failed with the following issues:')
|
||||
for e in app_errs:
|
||||
print(f"- {e}")
|
||||
sys.exit(1)
|
||||
print('Inventory directory is valid against defaults and hosts.')
|
||||
sys.exit(0)
|
||||
|
||||
if __name__=='__main__':
|
||||
main()
|
@@ -1,144 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import sys
|
||||
import yaml
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def load_yaml_file(path):
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
content = re.sub(r'(?m)^([ \t]*[^\s:]+):\s*!vault[\s\S]+?(?=^\S|\Z)', r'\1: "<vaulted>"\n', content)
|
||||
return yaml.safe_load(content)
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not parse {path}: {e}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
|
||||
def recursive_keys(d, prefix=""):
|
||||
keys = set()
|
||||
if isinstance(d, dict):
|
||||
for k, v in d.items():
|
||||
full_key = f"{prefix}.{k}" if prefix else k
|
||||
keys.add(full_key)
|
||||
keys.update(recursive_keys(v, full_key))
|
||||
return keys
|
||||
|
||||
|
||||
def compare_application_keys(applications, defaults, source_file):
|
||||
errors = []
|
||||
for app_id, app_conf in applications.items():
|
||||
if app_id not in defaults:
|
||||
errors.append(f"{source_file}: Unknown application '{app_id}' (not in defaults_applications)")
|
||||
continue
|
||||
|
||||
default_conf = defaults.get(app_id, {})
|
||||
app_keys = recursive_keys(app_conf)
|
||||
default_keys = recursive_keys(default_conf)
|
||||
|
||||
for key in app_keys:
|
||||
if key.startswith("credentials."):
|
||||
continue # explicitly ignore credentials
|
||||
if key not in default_keys:
|
||||
errors.append(f"{source_file}: Missing default for {app_id}: {key}")
|
||||
return errors
|
||||
|
||||
|
||||
def compare_user_keys(users, default_users, source_file):
|
||||
errors = []
|
||||
for username, user_conf in users.items():
|
||||
if username not in default_users:
|
||||
print(f"Warning: {source_file}: Unknown user '{username}' (not in default_users)", file=sys.stderr)
|
||||
continue
|
||||
|
||||
default_conf = default_users.get(username, {})
|
||||
for key in user_conf:
|
||||
if key in ("password", "credentials", "mailu_token"):
|
||||
continue # ignore credentials/password
|
||||
if key not in default_conf:
|
||||
raise Exception(f"{source_file}: Missing default for user '{username}': key '{key}'")
|
||||
return errors
|
||||
|
||||
|
||||
def load_inventory_files(inventory_dir):
|
||||
all_data = {}
|
||||
inventory_path = Path(inventory_dir)
|
||||
|
||||
for path in inventory_path.glob("*.yml"):
|
||||
data = load_yaml_file(path)
|
||||
if isinstance(data, dict):
|
||||
applications = data.get("applications") or data.get("defaults_applications")
|
||||
if applications:
|
||||
all_data[path] = applications
|
||||
|
||||
for vars_folder in inventory_path.glob("*_vars"):
|
||||
if vars_folder.is_dir():
|
||||
for subfile in vars_folder.rglob("*.yml"):
|
||||
data = load_yaml_file(subfile)
|
||||
if isinstance(data, dict):
|
||||
applications = data.get("applications") or data.get("defaults_applications")
|
||||
if applications:
|
||||
all_data[subfile] = applications
|
||||
|
||||
return all_data
|
||||
|
||||
|
||||
def find_single_file(pattern):
|
||||
candidates = list(Path("group_vars/all").glob(pattern))
|
||||
if len(candidates) != 1:
|
||||
raise RuntimeError(f"Expected exactly one {pattern} file in group_vars/all, found {len(candidates)}")
|
||||
return candidates[0]
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Verify application and user variable consistency with defaults.")
|
||||
parser.add_argument("inventory_dir", help="Path to inventory directory (contains inventory.yml and *_vars/)")
|
||||
args = parser.parse_args()
|
||||
|
||||
defaults_path = find_single_file("*_applications.yml")
|
||||
users_path = find_single_file("*users.yml")
|
||||
|
||||
defaults_data = load_yaml_file(defaults_path)
|
||||
default_users_data = load_yaml_file(users_path)
|
||||
|
||||
defaults = defaults_data.get("defaults_applications", {}) if defaults_data else {}
|
||||
default_users = default_users_data.get("default_users", {}) if default_users_data else {}
|
||||
|
||||
if not defaults:
|
||||
print(f"Error: No 'defaults_applications' found in {defaults_path}.", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
if not default_users:
|
||||
print(f"Error: No 'default_users' found in {users_path}.", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
all_errors = []
|
||||
|
||||
inventory_files = load_inventory_files(args.inventory_dir)
|
||||
for source_path, app_data in inventory_files.items():
|
||||
errors = compare_application_keys(app_data, defaults, str(source_path))
|
||||
all_errors.extend(errors)
|
||||
|
||||
# Load all users.yml files from inventory
|
||||
for path in Path(args.inventory_dir).rglob("*.yml"):
|
||||
data = load_yaml_file(path)
|
||||
if isinstance(data, dict) and "users" in data:
|
||||
try:
|
||||
compare_user_keys(data["users"], default_users, str(path))
|
||||
except Exception as e:
|
||||
print(e, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if all_errors:
|
||||
print("Validation failed with the following issues:")
|
||||
for err in all_errors:
|
||||
print("-", err)
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("Inventory directory is valid against defaults.")
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@@ -1,8 +1,8 @@
|
||||
# CyMaIS Architecture Overview
|
||||
# Infinito.Nexus Architecture
|
||||
|
||||
## Introduction
|
||||
|
||||
CyMaIS (Cyber Master Infrastructure Solution) is a modular, open-source IT infrastructure automation platform designed to simplify the deployment, management, and security of self-hosted environments.
|
||||
[Infinito.Nexus](https://infinito.nexus) is a modular, open-source IT infrastructure automation platform designed to simplify the deployment, management, and security of self-hosted environments.
|
||||
|
||||
It provides a flexible, scalable, and secure architecture based on modern [DevOps](https://en.wikipedia.org/wiki/DevOps) principles, leveraging technologies like [Ansible](https://en.wikipedia.org/wiki/Ansible_(software)), [Docker](https://en.wikipedia.org/wiki/Docker_(software)), and [Infrastructure as Code (IaC)](https://en.wikipedia.org/wiki/Infrastructure_as_code).
|
||||
|
||||
@@ -55,4 +55,4 @@ https://github.com/kevinveenbirkenbach/hetzner-arch-luks
|
||||
|
||||
---
|
||||
|
||||
> *CyMaIS — Modular. Secure. Automated. Decentralized.*
|
||||
> *Infinito.Nexus — Modular. Secure. Automated. Decentralized.*
|
124
docs/Docker.md
Normal file
124
docs/Docker.md
Normal file
@@ -0,0 +1,124 @@
|
||||
# Docker Build Guide 🚢
|
||||
|
||||
This guide explains how to build the **Infinito.Nexus** Docker image with advanced options to avoid common issues (e.g. mirror timeouts) and control build caching.
|
||||
|
||||
---
|
||||
|
||||
## 1. Enable BuildKit (Optional but Recommended)
|
||||
|
||||
Modern versions of Docker support **BuildKit**, which speeds up build processes and offers better caching.
|
||||
|
||||
```bash
|
||||
# On your host, enable BuildKit for the current shell session:
|
||||
export DOCKER_BUILDKIT=1
|
||||
```
|
||||
|
||||
> **Note:** You only need to set this once per terminal session.
|
||||
|
||||
---
|
||||
|
||||
## 2. Build Arguments Explained
|
||||
|
||||
When you encounter errors like:
|
||||
|
||||
```text
|
||||
:: Synchronizing package databases...
|
||||
error: failed retrieving file 'core.db' from geo.mirror.pkgbuild.com : Connection timed out after 10002 milliseconds
|
||||
error: failed to synchronize all databases (failed to retrieve some files)
|
||||
```
|
||||
|
||||
it usually means the default container network cannot reach certain Arch Linux mirrors. To work around this, use:
|
||||
|
||||
* `--network=host`
|
||||
Routes all build-time network traffic through your host’s network stack.
|
||||
|
||||
* `--no-cache`
|
||||
Forces a fresh build of every layer by ignoring Docker’s layer cache. Useful if you suspect stale cache entries.
|
||||
|
||||
---
|
||||
|
||||
## 3. Recommended Build Command
|
||||
|
||||
```bash
|
||||
# 1. (Optional) Enable BuildKit
|
||||
export DOCKER_BUILDKIT=1
|
||||
|
||||
# 2. Build with host networking and no cache
|
||||
docker build \
|
||||
--network=host \
|
||||
--no-cache \
|
||||
-t infinito:latest \
|
||||
.
|
||||
```
|
||||
|
||||
**Flags:**
|
||||
|
||||
* `--network=host`
|
||||
Ensures all `pacman -Syu` and other network calls hit your host network directly—eliminating mirror connection timeouts.
|
||||
|
||||
* `--no-cache`
|
||||
Guarantees that changes to package lists or dependencies are picked up immediately by rebuilding every layer.
|
||||
|
||||
* `-t infinito:latest`
|
||||
Tags the resulting image as `infinito:latest`.
|
||||
|
||||
---
|
||||
|
||||
## 4. Running the Container
|
||||
|
||||
Once built, you can run Infinito.Nexus as usual:
|
||||
|
||||
```bash
|
||||
docker run --rm -it \
|
||||
-v "$(pwd)":/opt/infinito \
|
||||
-w /opt/infinito \
|
||||
infinito:latest --help
|
||||
```
|
||||
|
||||
Mount any host directory into `/opt/infinito/logs` to persist logs across runs.
|
||||
|
||||
---
|
||||
|
||||
## 5. Further Troubleshooting
|
||||
|
||||
* **Mirror selection:** If you still see slow or unreachable mirrors, consider customizing `/etc/pacman.d/mirrorlist` in a local Docker stage or on your host to prioritize faster mirrors.
|
||||
|
||||
* **Firewall or VPN:** Ensure your host’s firewall or VPN allows outgoing connections on port 443/80 to Arch mirror servers.
|
||||
|
||||
* **Docker daemon config:** On some networks, you may need to configure Docker’s daemon proxy settings under `/etc/docker/daemon.json`.
|
||||
|
||||
## 6. Live Development via Volume Mount
|
||||
|
||||
The Infinito.Nexus installation inside the container always resides at:
|
||||
|
||||
```
|
||||
/root/Repositories/github.com/kevinveenbirkenbach/infinito
|
||||
```
|
||||
|
||||
To apply code changes without rebuilding the image, mount your local installation directory into that static path:
|
||||
|
||||
```bash
|
||||
# 1. Determine the Infinito.Nexus install path on your host
|
||||
INFINITO_PATH=$(pkgmgr path infinito)
|
||||
|
||||
# 2. Launch the container with a bind mount:
|
||||
docker run --rm -it \
|
||||
-v "${INFINITO_PATH}:/root/Repositories/github.com/kevinveenbirkenbach/infinito" \
|
||||
-w "/root/Repositories/github.com/kevinveenbirkenbach/infinito" \
|
||||
infinito:latest make build
|
||||
```
|
||||
|
||||
Or, to test the CLI help interactively:
|
||||
|
||||
```bash
|
||||
docker run --rm -it \
|
||||
-v "${INFINITO_PATH}:/root/Repositories/github.com/kevinveenbirkenbach/infinito" \
|
||||
-w "/root/Repositories/github.com/kevinveenbirkenbach/infinito" \
|
||||
infinito:latest --help
|
||||
```
|
||||
|
||||
Any edits you make in `${INFINITO_PATH}` on your host are immediately reflected inside the container, eliminating the need for repeated `docker build` cycles.
|
||||
|
||||
---
|
||||
|
||||
With these options, your Docker builds should complete reliably, even in restrictive network environments. Happy building! 🚀
|
2
docs/TODO.md
Normal file
2
docs/TODO.md
Normal file
@@ -0,0 +1,2 @@
|
||||
# TODO
|
||||
- Move this files to https://hub.cymais.cloud
|
@@ -1,26 +0,0 @@
|
||||
# Features
|
||||
|
||||
**CyMaIS - Cyber Master Infrastructure Solution** revolutionizes IT infrastructure management, making it simpler, safer, and more adaptable for businesses of all sizes. Here’s how it can benefit your organization:
|
||||
|
||||
## Effortless Setup and Management 🚀
|
||||
Setting up and managing IT systems has never been easier. CyMaIS automates complex tasks, whether on Linux servers or personal computers, reducing manual effort and saving valuable time.
|
||||
|
||||
## Comprehensive IT Solutions 🛠️
|
||||
CyMaIS covers everything from essential system setups to advanced configurations, including VPN, Docker, Ansible-based deployments, security optimizations, and monitoring tools. This makes IT management seamless and efficient.
|
||||
|
||||
## Tailored for Your Needs 🎯
|
||||
Every business is unique, and so is CyMaIS! With a modular architecture, it adapts to specific requirements, whether for startups, growing businesses, NGOs, or large enterprises.
|
||||
|
||||
## Proactive Monitoring & Maintenance 🔍
|
||||
With automated updates, system health checks, and security audits, CyMaIS ensures your infrastructure is always up-to-date and running smoothly. Roles such as `health-docker-container`, `health-btrfs`, and `health-nginx` help monitor system integrity.
|
||||
|
||||
## Uncompromised Security 🔒
|
||||
Security is a top priority! CyMaIS includes robust security features like full-disk encryption recommendations, 2FA enforcement, encrypted server deployments (`docker-keycloak`, `docker-ldap`), and secure backup solutions (`backup-remote-to-local`, `backup-data-to-usb`).
|
||||
|
||||
## User-Friendly with Expert Support 👩💻
|
||||
No need to be a Linux or Docker expert! CyMaIS simplifies deployment with intuitive role-based automation. Documentation and community support make IT administration accessible to all experience levels.
|
||||
|
||||
## Open Source Trust & Transparency 🔓
|
||||
As an open-source project, CyMaIS guarantees transparency, security, and community-driven development, ensuring continuous improvements and adherence to industry best practices.
|
||||
|
||||
For further information, check out the [application glosar](roles/application_glosar), [applications ordered by category](roles/application_categories) and the [detailled ansible role descriptions](roles/ansible_role_glosar).
|
@@ -1,34 +0,0 @@
|
||||
# Situation Analysis
|
||||
|
||||
This is the Situation Analysis for [CyMaIS](https://cymais.cloud), highlighting the challenges we aim to address.
|
||||
|
||||
## Short
|
||||
|
||||
The problem stems from businesses and individuals being dependent on monopolistic cloud providers, losing control over their data, facing security risks, and being vulnerable to geopolitical manipulation, while small businesses struggle to set up secure, enterprise-level IT infrastructures due to lack of resources and expertise.
|
||||
|
||||
## Explanation
|
||||
|
||||
In today’s digital landscape, data is predominantly stored in the cloud, controlled by large corporations such as Microsoft, AWS, and other cloud providers. This creates a dependency on these providers, leading to increasingly expensive services and a lack of control over critical business data.
|
||||
|
||||
As organizations rely on these monopolistic players for their cloud services, they surrender ownership of their data, becoming vulnerable to the whims of these companies. This dependency puts them at the mercy of cloud and software giants, who not only dictate pricing and service levels but also influence the very governance of data.
|
||||
|
||||
Moreover, the ease with which governments, intelligence agencies, and private corporations can access sensitive data is a growing concern. With increasing surveillance capabilities, the privacy of users and businesses is constantly at risk, further amplifying the vulnerability of data stored in centralized cloud infrastructures.
|
||||
|
||||
Additionally, the dominance of these companies in sectors like social media further exacerbates the issue, making individuals and organizations susceptible to manipulation and control.
|
||||
|
||||
The problem intensifies in times of political unrest or global conflicts. As data is often centrally stored with monopolistic providers, businesses become highly dependent on these entities for accessing their data and services. This dependency increases the risk of coercion or pressure from governments or private corporations, leading to potential **extortion**. Governments may attempt to gain leverage over businesses by threatening access to critical data or services, while private companies may exploit this dependency for their own interests.
|
||||
|
||||
In essence, the lack of sovereignty over data and the increasing control of a few monopolistic entities undermine the fundamental values of privacy, security, and independence. Organizations, especially small businesses, are left vulnerable to external pressures, making them pawns in a larger game dominated by these cloud and software giants.
|
||||
|
||||
Furthermore, for small businesses, setting up enterprise-level open-source infrastructure with integrated solutions such as **Single Sign-On (SSO)**, **Identity and Access Management (IAM)**, **encryption**, **backup solutions**, and other essential IT services is nearly impossible. These businesses lack the resources, both financial and human, to deploy secure IT infrastructures at an enterprise level.
|
||||
|
||||
System administrators in small companies often don’t have the specialized knowledge or the capacity to build and maintain such complex infrastructures, which further exacerbates the challenge of securing sensitive business data while ensuring compliance with industry standards.
|
||||
|
||||
## Key Points
|
||||
- Dependency on monopolists
|
||||
- Loss of data sovereignty
|
||||
- Geopolitical vulnerabilities
|
||||
- Lack of resources
|
||||
- Limited secure infrastructure expertise
|
||||
- Centralized data storage risks
|
||||
- Manipulation through social media
|
@@ -1,40 +0,0 @@
|
||||
# Market Analysis for CyMaIS in Berlin
|
||||
|
||||
## 1. Introduction
|
||||
Berlin is recognized as one of Europe's leading innovation and technology hubs. The capital is characterized by a dynamic start-up scene, numerous SMEs, and international corporations that drive digital transformation. This creates a promising market for modular IT infrastructure solutions like CyMaIS.
|
||||
|
||||
## 2. Market Overview and Business Landscape
|
||||
- **Diverse Economic Hub:**
|
||||
Berlin is home to an estimated several tens of thousands of companies—from innovative start-ups to established mid-sized businesses and large enterprises.
|
||||
- **Digital Innovation:**
|
||||
The city is known for its high concentration of technology companies, digital service providers, and creative industries constantly seeking efficient IT solutions.
|
||||
- **Support and Infrastructure:**
|
||||
Numerous initiatives, funding programs, and well-developed networks of technology parks and coworking spaces support the city’s digital progress.
|
||||
|
||||
## 3. Level of Digitalization and IT Needs
|
||||
- **Advanced Yet Heterogeneous Digitalization:**
|
||||
Many Berlin companies already use modern IT solutions, but traditional businesses often require significant upgrades in integrating advanced infrastructure and cybersecurity measures.
|
||||
- **Increasing Demands:**
|
||||
Rising business process complexity and stricter requirements for data protection and security are driving the need for individualized, scalable IT solutions.
|
||||
|
||||
## 4. Overall Market Volume (Estimation)
|
||||
- **Estimated Market Volume:**
|
||||
Considering the diverse company sizes and varying investment levels—from start-ups to large enterprises—the annual overall market volume for IT infrastructure modernization solutions in Berlin is roughly estimated at **€1–2 billion**.
|
||||
This figure reflects the aggregate potential of digital transformation initiatives across Berlin’s vibrant business ecosystem.
|
||||
|
||||
## 5. Price Segments and Investment Readiness
|
||||
- **Low-Priced Segment:**
|
||||
Many start-ups and small companies are capable of investing approximately €10,000–30,000 to set up basic infrastructures.
|
||||
- **Mid-Priced Segment:**
|
||||
Established SMEs in Berlin are typically prepared to invest between €40,000 and €70,000 in tailored IT solutions to incorporate additional functionalities and security standards.
|
||||
- **High-Priced Segment:**
|
||||
Large enterprises and specialized industrial businesses invest in complex integration solutions starting at around €100,000 to implement comprehensive digital transformation projects.
|
||||
|
||||
## 6. Competitive Landscape and Positioning
|
||||
- **High Innovation Pressure:**
|
||||
Berlin's vibrant IT and digital services sector is highly competitive. To stand out, solutions must be flexible, scalable, and seamlessly integrable.
|
||||
- **CyMaIS Advantages:**
|
||||
The modular architecture of CyMaIS allows it to meet the individual requirements of Berlin’s diverse businesses—from start-ups to large industrial projects—perfectly. Additionally, its focus on cybersecurity and continuous updates offers a decisive added value.
|
||||
|
||||
## 7. Conclusion
|
||||
Berlin offers an attractive market potential for IT infrastructure solutions. With a vibrant innovation landscape, a considerable overall market volume estimated at €1–2 billion, and numerous companies needing to take the next step in digital transformation, CyMaIS is well positioned as a powerful, modular solution. The combination of a dynamic start-up ecosystem and established businesses promises attractive long-term growth opportunities.
|
@@ -1,37 +0,0 @@
|
||||
# Berlin Market Diagrams
|
||||
|
||||
## 1. Digitalization in Berlin (Pie Chart)
|
||||
```mermaid
|
||||
pie
|
||||
title Berlin: IT Digitalization Status
|
||||
"Fully Modernized (25%)": 25
|
||||
"Partially Digitalized (45%)": 45
|
||||
"Requires Significant Upgrades (30%)": 30
|
||||
```
|
||||
*This pie chart displays the estimated IT digitalization status for Berlin-based companies, with 25% fully modernized, 45% partially digitalized, and 30% requiring major upgrades.*
|
||||
|
||||
## 2. Investment Segments in Berlin (Flowchart)
|
||||
```mermaid
|
||||
flowchart LR
|
||||
A[Investment Segments in Berlin]
|
||||
B[Low-Priced (€10k-30k): 40%]
|
||||
C[Mid-Priced (€40k-70k): 40%]
|
||||
D[High-Priced (€100k+): 20%]
|
||||
|
||||
A --> B
|
||||
A --> C
|
||||
A --> D
|
||||
```
|
||||
*This flowchart shows the distribution of investment segments for IT infrastructure projects in Berlin, categorized into low-, mid-, and high-priced solutions.*
|
||||
|
||||
## 3. Berlin Market Volume & Drivers (Flowchart)
|
||||
```mermaid
|
||||
flowchart TD
|
||||
A[Berlin IT Infrastructure Market]
|
||||
B[Market Volume: €1-2 Billion]
|
||||
C[Drivers: Start-up Ecosystem, Established Firms, Local Initiatives]
|
||||
|
||||
A --> B
|
||||
A --> C
|
||||
```
|
||||
*This diagram outlines Berlin's overall market volume (estimated at €1–2 billion) and identifies the main drivers such as the vibrant start-up ecosystem and support from local initiatives.*
|
@@ -1,77 +0,0 @@
|
||||
# Market Analysis for CyMaIS in Europe
|
||||
|
||||
This analysis provides a detailed overview of the potential for CyMaIS – a modular IT infrastructure solution – in the European market.
|
||||
|
||||
## 1. Introduction
|
||||
CyMaIS addresses the growing need for flexible and scalable IT infrastructure solutions that support companies in their digital transformation. The European market, characterized by diverse economic systems, offers a variety of opportunities and challenges.
|
||||
|
||||
## 2. Market Overview and Digitalization in Europe
|
||||
- **Business Landscape:**
|
||||
- Europe is home to an estimated 20–25 million companies, most of which are small and medium-sized enterprises (SMEs).
|
||||
- Business structures vary significantly between regions: while countries such as the Nordic nations, Estonia, or Germany are highly advanced, other markets lag behind in certain aspects.
|
||||
|
||||
- **Degree of Digitalization:**
|
||||
- Basic digital technologies have been implemented in many European companies; however, recent studies indicate that only about 50–60% have reached a basic level of digitalization.
|
||||
- A large share of companies – approximately 70–80% – faces the challenge of further modernizing their IT infrastructures, particularly in areas like cybersecurity and automation.
|
||||
|
||||
## 3. Analysis of the Demand for IT Infrastructure Solutions
|
||||
- **Target Market:**
|
||||
- There is significant demand across Europe for solutions that modernize outdated IT structures while meeting increased requirements for data protection, security, and efficiency.
|
||||
- SMEs, as well as larger companies in sectors with high security and compliance needs, can particularly benefit from specialized, modular solutions like CyMaIS.
|
||||
|
||||
- **Core Requirements:**
|
||||
- Integration of modern IT components
|
||||
- Enhancement of cybersecurity
|
||||
- Support for automation and data analytics
|
||||
|
||||
## 4. Pricing Segments and Cost Structure
|
||||
CyMaIS offers solutions that can be tailored to different budgets and requirements:
|
||||
|
||||
- **Low-Priced Segment (Basic Setup):**
|
||||
- **Costs:** Approximately €10,000–30,000
|
||||
- **Target Group:** Small companies requiring standardized IT solutions
|
||||
|
||||
- **Mid-Priced Segment:**
|
||||
- **Costs:** Approximately €40,000–70,000
|
||||
- **Target Group:** Medium-sized companies with specific customization needs
|
||||
|
||||
- **High-Priced Segment (Complex, Customized Solutions):**
|
||||
- **Costs:** From €100,000 and upwards
|
||||
- **Target Group:** Large companies and projects with extensive integration requirements
|
||||
|
||||
## 5. Total Market Volume and Revenue Potential
|
||||
- **Total Market Volume:**
|
||||
- The revenue potential for IT infrastructure solutions in Europe is estimated at approximately **€300–500 billion**.
|
||||
- This figure includes investments in hardware, software, consulting and integration services, as well as ongoing IT support services.
|
||||
|
||||
- **Growth Drivers:**
|
||||
- The continuous need for digital transformation
|
||||
- Increasing security requirements (cybersecurity)
|
||||
- Government funding programs and initiatives that support digitalization across many European countries
|
||||
|
||||
## 6. Competitive Environment and Positioning of CyMaIS
|
||||
- **Competition:**
|
||||
- The European market is fragmented: in addition to major global IT service providers, there are numerous local providers.
|
||||
- Cross-border differences create diverse market conditions where specialized, modular solutions can offer a strategic advantage.
|
||||
|
||||
- **Competitive Advantages of CyMaIS:**
|
||||
- **Modularity and Flexibility:** Enables tailor-made adaptation to individual business requirements
|
||||
- **Scalability:** Ranges from basic solutions for SMEs to complex system integrations for large enterprises
|
||||
- **Seamless Integration:** Incorporates modern IT components, including advanced security solutions
|
||||
|
||||
## 7. Opportunities and Challenges
|
||||
- **Opportunities:**
|
||||
- Increasing investments in digital transformation and cybersecurity
|
||||
- High demand in under-served markets and among SMEs needing to modernize their IT infrastructures
|
||||
- Potential for international expansion through adaptable, modular solutions
|
||||
|
||||
- **Challenges:**
|
||||
- Varied levels of digitalization and differing economic conditions across European countries
|
||||
- Intense competition and pricing pressure, particularly in mature markets
|
||||
- Requirements for country-specific regulations and compliance necessitating customized adaptations
|
||||
|
||||
## 8. Conclusion
|
||||
The European market offers significant potential for CyMaIS. With an estimated total market volume of €300–500 billion and a large number of companies needing to modernize their IT infrastructures, CyMaIS is well positioned as a flexible and scalable solution—ideal for meeting the diverse requirements of the European market. In the long term, ongoing digitalization and increasing security needs present attractive growth opportunities.
|
||||
|
||||
## Sources
|
||||
- Analysis based on an interactive discussion with [ChatGPT](https://chatgpt.com/c/67f95f70-865c-800f-bd97-864a36f9b498) on April 11, 2025.
|
@@ -1,38 +0,0 @@
|
||||
# Europe Market Diagrams
|
||||
|
||||
## 1. Digitalization Status (Pie Chart)
|
||||
```mermaid
|
||||
pie
|
||||
title Europe: Digitalization Status
|
||||
"Fully Modernized (20%)": 20
|
||||
"Partially Digitalized (50%)": 50
|
||||
"Needs Advanced Modernization (30%)": 30
|
||||
```
|
||||
*This pie chart illustrates the digitalization status across European companies, with 20% fully modernized, 50% partially digitalized, and 30% needing advanced modernization.*
|
||||
|
||||
## 2. Investment Segments (Flowchart)
|
||||
```mermaid
|
||||
flowchart LR
|
||||
A[European Investment Segments]
|
||||
B[Low-Priced (€10k-30k): 35%]
|
||||
C[Mid-Priced (€40k-70k): 45%]
|
||||
D[High-Priced (€100k+): 20%]
|
||||
|
||||
A --> B
|
||||
A --> C
|
||||
A --> D
|
||||
```
|
||||
*This flowchart depicts the breakdown of IT investment segments in Europe, with approximate percentages for low-, mid-, and high-priced solutions.*
|
||||
|
||||
## 3. Overall Market Volume & Drivers (Flowchart)
|
||||
```mermaid
|
||||
flowchart TD
|
||||
A[European IT Infrastructure Market]
|
||||
B[Market Volume: €300-500 Billion]
|
||||
C[Drivers: Digital Transformation, Cybersecurity, Govt. Initiatives]
|
||||
|
||||
A --> B
|
||||
A --> C
|
||||
```
|
||||
*This diagram presents the European market’s overall volume (estimated at €300–500 billion) and highlights the main growth drivers such as digital transformation initiatives and cybersecurity needs.*
|
||||
```
|
@@ -1,83 +0,0 @@
|
||||
# Market Analysis for CyMaIS in Germany
|
||||
|
||||
This analysis provides a detailed overview of the market potential of CyMaIS – a modular solution for establishing and managing modern IT infrastructures – in the German market.
|
||||
|
||||
## 1. Introduction
|
||||
CyMaIS addresses the increasing need for modern, flexible IT infrastructure solutions in Germany. In particular, small and medium-sized enterprises (SMEs) face the challenge of advancing their digitalization while meeting security requirements. CyMaIS offers modular, customizable solutions ranging from basic setups to complex integration projects.
|
||||
|
||||
## 2. Market Overview and Digitalization in Germany
|
||||
- **Business Landscape:**
|
||||
- There are approximately 3.5 million companies in Germany.
|
||||
- Over 99% of these companies are SMEs.
|
||||
|
||||
- **Degree of Digitalization:**
|
||||
- About 60–70% have already implemented basic digital technologies.
|
||||
- An estimated 75–85% of companies require additional support to build modern IT infrastructures (including cybersecurity, automation, and data management).
|
||||
|
||||
## 3. Analysis of the Demand for IT Infrastructure Solutions
|
||||
- **Target Market:**
|
||||
- Approximately 2.6 to 3 million companies – predominantly SMEs – face the challenge of modernizing outdated or incomplete IT structures.
|
||||
- Industries with high security requirements and a strong need for digital transformation particularly benefit from specialized solutions like CyMaIS.
|
||||
|
||||
- **Core Requirements:**
|
||||
- Integration of modern IT components
|
||||
- Enhancement of cybersecurity
|
||||
- Support for process automation and data analytics
|
||||
|
||||
## 4. Pricing Segments and Cost Structure
|
||||
CyMaIS caters to different pricing segments in order to meet the diverse needs of companies:
|
||||
|
||||
- **Low-Priced Segment (Basic Setup):**
|
||||
- **Costs:** Approximately €10,000–30,000
|
||||
- **Target Group:** Smaller companies and standardized IT requirements
|
||||
- **Market Share:** Estimated 30–40% of potential customers
|
||||
|
||||
- **Mid-Priced Segment:**
|
||||
- **Costs:** Approximately €40,000–70,000
|
||||
- **Target Group:** Medium-sized companies with individual customization needs
|
||||
- **Market Share:** Around 20–25% of companies
|
||||
|
||||
- **High-Priced Segment (Complex, Customized Solutions):**
|
||||
- **Costs:** Starting from €100,000 and above
|
||||
- **Target Group:** Large companies and highly specialized projects
|
||||
- **Market Share:** About 5–10% of potential customers
|
||||
|
||||
## 5. Total Market Volume and Revenue Potential
|
||||
- **Market Volume:**
|
||||
- The total market volume for IT infrastructure solutions in Germany is estimated at approximately **€80–120 billion**.
|
||||
|
||||
- **Influencing Factors:**
|
||||
- The scope of required solutions
|
||||
- Consulting and integration services
|
||||
- Ongoing investments in cybersecurity and digitalization
|
||||
|
||||
- **Growth Drivers:**
|
||||
- Increasing digitalization across all industries
|
||||
- Rising security requirements (cybersecurity)
|
||||
- Government programs and initiatives supporting digital transformation
|
||||
|
||||
## 6. Competitive Environment and Positioning of CyMaIS
|
||||
- **Competition:**
|
||||
- The market for IT infrastructure solutions in Germany is fragmented, with numerous providers offering standardized as well as specialized solutions.
|
||||
|
||||
- **Competitive Advantages of CyMaIS:**
|
||||
- **Modularity:** Flexible adaptation to individual business needs
|
||||
- **Scalability:** From basic setups to complex systems
|
||||
- **Integration:** Seamless incorporation of modern IT components, including security solutions
|
||||
|
||||
## 7. Opportunities and Challenges
|
||||
- **Opportunities:**
|
||||
- Growing demand for digital transformation and security solutions
|
||||
- High market penetration among SMEs that are yet to modernize their IT infrastructures
|
||||
- Government funding and initiatives for digitalization
|
||||
|
||||
- **Challenges:**
|
||||
- Strong competition and pricing pressure
|
||||
- Varied IT and digitalization levels across companies
|
||||
- Technological complexity and the need for customized adaptations
|
||||
|
||||
## 8. Conclusion
|
||||
The German IT market offers significant potential for CyMaIS. With an estimated market volume of €80–120 billion and approximately 2.6 to 3 million companies needing to modernize their IT infrastructures, CyMaIS is well positioned. The modular and scalable nature of its solutions enables it to serve both small and large companies with individual requirements. In the long term, ongoing digitalization and increasing security demands present attractive growth opportunities for CyMaIS.
|
||||
|
||||
## Sources
|
||||
- Analysis based on a conversation conducted with [ChatGPT](https://chatgpt.com/share/67f9608d-3904-800f-a9ca-9b893e252c05) on April 11, 2025.
|
@@ -1,37 +0,0 @@
|
||||
# Germany Market Diagrams
|
||||
|
||||
## 1. Digitalization / IT Modernization Need (Pie Chart)
|
||||
```mermaid
|
||||
pie
|
||||
title Germany: IT Modernization Status
|
||||
"Fully Modernized (20%)": 20
|
||||
"Partially Digitalized (30%)": 30
|
||||
"Requires Major Modernization (50%)": 50
|
||||
```
|
||||
*This diagram shows the estimated distribution of digitalization among German companies: 20% are fully modernized, 30% are partially digitalized, and 50% need major IT upgrades.*
|
||||
|
||||
## 2. Investment/Price Segments (Flowchart)
|
||||
```mermaid
|
||||
flowchart LR
|
||||
A[Investment Segments]
|
||||
B[Low-Priced (€10k-30k): 40%]
|
||||
C[Mid-Priced (€40k-70k): 40%]
|
||||
D[High-Priced (€100k+): 20%]
|
||||
|
||||
A --> B
|
||||
A --> C
|
||||
A --> D
|
||||
```
|
||||
*This flowchart represents the distribution of investment segments in Germany, indicating that approximately 40% of projects fall into the low- and mid-priced categories each, with 20% in the high-priced bracket.*
|
||||
|
||||
## 3. Overall Market Volume & Drivers (Flowchart)
|
||||
```mermaid
|
||||
flowchart TD
|
||||
A[German IT Infrastructure Market]
|
||||
B[Market Volume: €80-120 Billion]
|
||||
C[Drivers: Digital Transformation, Cybersecurity, Integration]
|
||||
|
||||
A --> B
|
||||
A --> C
|
||||
```
|
||||
*This diagram outlines the overall market volume (estimated at €80–120 billion) and the key drivers shaping the demand for IT infrastructure solutions in Germany.*
|
@@ -1,77 +0,0 @@
|
||||
# Global Market Analysis for CyMaIS
|
||||
|
||||
This analysis provides a detailed overview of the global potential for CyMaIS – a modular IT infrastructure solution – addressing the growing worldwide demand for digital transformation and advanced cybersecurity measures.
|
||||
|
||||
## 1. Introduction
|
||||
CyMaIS is designed to support enterprises in modernizing their IT infrastructures. As digital transformation accelerates globally, organizations of all sizes require scalable and flexible solutions to manage cybersecurity, automation, and data management. This analysis evaluates the global market potential for CyMaIS across diverse economic regions.
|
||||
|
||||
## 2. Global Market Overview and Digitalization
|
||||
- **Business Landscape:**
|
||||
- There are estimated to be hundreds of millions of companies worldwide, with tens of millions being small and medium-sized enterprises (SMEs).
|
||||
- Developed markets (North America, Europe, parts of Asia) typically exhibit higher digitalization rates, whereas emerging markets are rapidly catching up.
|
||||
|
||||
- **Degree of Digitalization:**
|
||||
- Many large enterprises have implemented advanced digital technologies, while a significant proportion of SMEs—potentially over 70% globally—still need to progress beyond basic digitalization.
|
||||
- This gap is particularly apparent in regions where legacy systems are prevalent or where investment in IT modernization has been historically low.
|
||||
|
||||
## 3. Analysis of the Demand for IT Infrastructure Solutions
|
||||
- **Target Market:**
|
||||
- Globally, the demand for modern IT infrastructure solutions is strong due to rising cybersecurity threats, the need for automation, and the increasing reliance on data analytics.
|
||||
- Industries across sectors—from finance and manufacturing to healthcare and retail—are actively seeking solutions to overhaul outdated IT systems.
|
||||
|
||||
- **Core Requirements:**
|
||||
- Seamless integration of modern IT components
|
||||
- Robust cybersecurity measures
|
||||
- Tools for process automation and data-driven decision-making
|
||||
|
||||
## 4. Pricing Segments and Cost Structure
|
||||
CyMaIS offers a range of solutions tailored to different budget levels and technical needs, including:
|
||||
|
||||
- **Low-Priced Segment (Basic Setup):**
|
||||
- **Costs:** Approximately €10,000–30,000
|
||||
- **Target Group:** Small companies looking for standardized IT solutions
|
||||
|
||||
- **Mid-Priced Segment:**
|
||||
- **Costs:** Approximately €40,000–70,000
|
||||
- **Target Group:** Medium-sized companies with customization requirements
|
||||
|
||||
- **High-Priced Segment (Complex, Customized Solutions):**
|
||||
- **Costs:** From €100,000 upwards
|
||||
- **Target Group:** Large enterprises and projects with extensive integration and security needs
|
||||
|
||||
## 5. Total Market Volume and Revenue Potential
|
||||
- **Global Market Volume:**
|
||||
- The overall revenue potential for modern IT infrastructure solutions worldwide is substantial, with estimates ranging between **€1–1.5 trillion**.
|
||||
- This figure comprises investments in hardware, software, consulting, integration services, and ongoing IT support.
|
||||
|
||||
- **Growth Drivers:**
|
||||
- The accelerating pace of digital transformation worldwide
|
||||
- Increasing incidence of cybersecurity threats
|
||||
- Government initiatives and private-sector investments that promote digitalization
|
||||
|
||||
## 6. Competitive Environment and Positioning of CyMaIS
|
||||
- **Competition:**
|
||||
- The global market is highly competitive, featuring major multinational IT service providers as well as numerous regional and niche players.
|
||||
- Diverse regulatory environments and economic conditions across regions create both challenges and opportunities for market entrants.
|
||||
|
||||
- **Competitive Advantages of CyMaIS:**
|
||||
- **Modularity and Flexibility:** Allows tailored solutions to meet a wide range of business needs
|
||||
- **Scalability:** Suitable for organizations from startups to multinational corporations
|
||||
- **Integration Capabilities:** Supports seamless incorporation of modern IT components along with advanced cybersecurity features
|
||||
|
||||
## 7. Opportunities and Challenges
|
||||
- **Opportunities:**
|
||||
- Rapid digital transformation across all regions creates a sustained demand for IT modernization
|
||||
- High potential in emerging markets where digital infrastructure is underdeveloped
|
||||
- Opportunities for strategic partnerships and government-driven digital initiatives
|
||||
|
||||
- **Challenges:**
|
||||
- Navigating diverse regulatory landscapes and varying levels of IT maturity
|
||||
- Intense global competition and pricing pressures
|
||||
- Continuously evolving cybersecurity threats and technological changes that necessitate ongoing innovation
|
||||
|
||||
## 8. Conclusion
|
||||
The global market presents significant opportunities for CyMaIS. With an estimated market volume of €1–1.5 trillion and millions of companies worldwide in need of modernized IT infrastructures, CyMaIS is well positioned to capture a diverse range of customers. Its modular and scalable solutions can meet the unique challenges and requirements of different markets, making it a competitive choice in the rapidly evolving field of digital transformation and cybersecurity.
|
||||
|
||||
## Sources
|
||||
- Analysis based on an interactive discussion with [ChatGPT](https://chat.openai.com) on April 11, 2025.
|
@@ -1,37 +0,0 @@
|
||||
# Global Market Diagrams
|
||||
|
||||
## 1. Global Digitalization Status (Pie Chart)
|
||||
```mermaid
|
||||
pie
|
||||
title Global Digitalization Status
|
||||
"Advanced Digitalization (30%)": 30
|
||||
"Moderate Digitalization (40%)": 40
|
||||
"Needs Significant Modernization (30%)": 30
|
||||
```
|
||||
*This pie chart shows an estimated global digitalization distribution: 30% of companies are advanced, 40% have moderate digitalization, and 30% require significant modernization.*
|
||||
|
||||
## 2. Global Investment Segments (Flowchart)
|
||||
```mermaid
|
||||
flowchart LR
|
||||
A[Global Investment Segments]
|
||||
B[Low-Priced (€10k-30k): 40%]
|
||||
C[Mid-Priced (€40k-70k): 40%]
|
||||
D[High-Priced (€100k+): 20%]
|
||||
|
||||
A --> B
|
||||
A --> C
|
||||
A --> D
|
||||
```
|
||||
*This flowchart illustrates the distribution of investment segments globally, indicating that roughly 40% of IT projects fall into both low and mid-price categories, with 20% in the high-price category.*
|
||||
|
||||
## 3. Overall Global Market Volume & Drivers (Flowchart)
|
||||
```mermaid
|
||||
flowchart TD
|
||||
A[Global IT Infrastructure Market]
|
||||
B[Market Volume: €1-1.5 Trillion]
|
||||
C[Drivers: Accelerated Digitalization, Cybersecurity, Global Investments]
|
||||
|
||||
A --> B
|
||||
A --> C
|
||||
```
|
||||
*This diagram outlines the global market volume (estimated between €1–1.5 trillion) and the key factors fueling growth, such as digital transformation and cybersecurity initiatives.*
|
@@ -1,53 +0,0 @@
|
||||
# Migration Feature
|
||||
|
||||
## Seamless Migration of Existing Software Solutions to CyMaIS
|
||||
|
||||
CyMaIS is designed to simplify the migration of existing software solutions and IT infrastructures. The focus is on protecting existing investments while enabling the benefits of a modern and unified platform.
|
||||
|
||||
---
|
||||
|
||||
## Integration of Existing Applications
|
||||
|
||||
Existing applications can be easily integrated into the [CyMaIS](https://example.com) dashboard. There is no need to migrate or modify existing software — CyMaIS provides a central interface to access and manage already deployed systems.
|
||||
|
||||
---
|
||||
|
||||
## Parallel Operation of Existing Infrastructure
|
||||
|
||||
CyMaIS supports a parallel operation model, allowing the existing IT infrastructure to run alongside CyMaIS without disruption. This enables a step-by-step migration strategy where applications and user groups can be transitioned gradually.
|
||||
|
||||
---
|
||||
|
||||
## Flexible User Management and Single Sign-On (SSO)
|
||||
|
||||
CyMaIS offers flexible user management by supporting multiple directory services:
|
||||
|
||||
- [Microsoft Active Directory (AD)](https://en.wikipedia.org/wiki/Active_Directory)
|
||||
- [LDAP (Lightweight Directory Access Protocol)](https://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol)
|
||||
|
||||
In both scenarios, centralized authentication is provided through [Keycloak](https://www.keycloak.org/), enabling modern [Single Sign-On (SSO)](https://en.wikipedia.org/wiki/Single_sign-on) capabilities — not only for applications managed by CyMaIS but also for existing external services.
|
||||
|
||||
---
|
||||
|
||||
## Key Points
|
||||
|
||||
- Simple migration of existing software solutions
|
||||
- Integration of existing apps into dashboard
|
||||
- Parallel operation of CyMaIS and existing infrastructure is fully supported
|
||||
- User management via [Active Directory](https://en.wikipedia.org/wiki/Active_Directory) or [LDAP](https://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol)
|
||||
- Central authentication with [SSO](https://en.wikipedia.org/wiki/Single_sign-on) using [Keycloak](https://www.keycloak.org/)
|
||||
|
||||
---
|
||||
|
||||
## Summary of Migration Benefits
|
||||
|
||||
| Feature | Description |
|
||||
|--------------------------------|-------------------------------------------------------------------|
|
||||
| Easy Application Integration | Integrate existing applications into the CyMaIS dashboard |
|
||||
| Parallel Operation Supported | Continue using your current infrastructure without disruption |
|
||||
| Flexible User Management | Support for AD and LDAP directory services |
|
||||
| Single Sign-On (SSO) | Centralized authentication via Keycloak |
|
||||
|
||||
---
|
||||
|
||||
CyMaIS enables a smooth and controlled migration path — customized to the individual needs of your organization.
|
@@ -2,7 +2,7 @@
|
||||
|
||||
## Ansible Vault Basics
|
||||
|
||||
CyMaIS uses Ansible Vault to protect sensitive data (e.g. passwords). Use these common commands:
|
||||
Infinito.Nexus uses Ansible Vault to protect sensitive data (e.g. passwords). Use these common commands:
|
||||
|
||||
### Edit an Encrypted File
|
||||
```bash
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# 🚀 Deployment Guide
|
||||
|
||||
This section explains how to deploy and manage the **Cyber Master Infrastructure Solution (CyMaIS)** using Ansible. CyMaIS uses a collection of Ansible tasks, which are controlled via different **"modes"** — such as **updates**, **backups**, **resets**, and **cleanup** operations.
|
||||
This section explains how to deploy and manage **[Infinito.Nexus](https://infinito.nexus)** using Ansible. Infinito.Nexus uses a collection of Ansible tasks, which are controlled via different **"modes"** — such as **updates**, **backups**, **resets**, and **cleanup** operations.
|
||||
|
||||
---
|
||||
|
||||
@@ -9,27 +9,27 @@ This section explains how to deploy and manage the **Cyber Master Infrastructure
|
||||
Before deploying, ensure the following are in place:
|
||||
|
||||
- **🧭 Inventory File:** A valid Ansible inventory file that defines your target systems (servers, personal computers, etc.). Adjust example paths to your environment.
|
||||
- **📦 CyMaIS Installed:** Install via [Kevin's Package-Manager](https://github.com/kevinveenbirkenbach/package-manager).
|
||||
- **📦 Infinito.Nexus Installed:** Install via [Kevin's Package-Manager](https://github.com/kevinveenbirkenbach/package-manager).
|
||||
- **🔐 (Optional) Vault Password File:** If you don't want to enter your vault password interactively, create a password file.
|
||||
|
||||
---
|
||||
|
||||
## 📘 Show CyMaIS Help
|
||||
## 📘 Show Infinito.Nexus Help
|
||||
|
||||
To get a full overview of available options and usage instructions, run:
|
||||
|
||||
```bash
|
||||
cymais --help
|
||||
infinito --help
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 💡 Example Deploy Command
|
||||
|
||||
To deploy CyMaIS on a personal computer (e.g., a laptop), you can run:
|
||||
To deploy Infinito.Nexus on a personal computer (e.g., a laptop), you can run:
|
||||
|
||||
```bash
|
||||
cymais playbook \
|
||||
infinito playbook \
|
||||
--limit hp-spectre-x360 \
|
||||
--host-type personal-computer \
|
||||
--update \
|
||||
@@ -41,7 +41,7 @@ cymais playbook \
|
||||
|
||||
| Parameter | Description |
|
||||
|----------|-------------|
|
||||
| `playbook` | Executes the playbook subcommand of CyMaIS. |
|
||||
| `playbook` | Executes the playbook subcommand of Infinito.Nexus. |
|
||||
| `--limit hp-spectre-x360` | Limits execution to a specific host (`hp-spectre-x360`). |
|
||||
| `--host-type personal-computer` | Defines the host type. Default is `server`; here it is set to `personal-computer`. |
|
||||
| `--update` | Enables update mode to apply software or configuration updates. |
|
||||
@@ -64,7 +64,7 @@ To avoid typing your vault password interactively, you can provide a file:
|
||||
|
||||
## 🔍 Full Command-Line Reference
|
||||
|
||||
Here’s a breakdown of all available parameters from `cymais playbook --help`:
|
||||
Here’s a breakdown of all available parameters from `infinito playbook --help`:
|
||||
|
||||
| Argument | Description |
|
||||
|----------|-------------|
|
||||
@@ -87,7 +87,7 @@ Here’s a breakdown of all available parameters from `cymais playbook --help`:
|
||||
You can mix and match modes like this:
|
||||
|
||||
```bash
|
||||
cymais playbook --update --backup --cleanup pcs.yml
|
||||
infinito playbook --update --backup --cleanup pcs.yml
|
||||
```
|
||||
|
||||
This will update the system, create a backup, and clean up unnecessary files in one run.
|
||||
|
22
docs/guides/administrator/README.md
Normal file
22
docs/guides/administrator/README.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# Administrator Guide
|
||||
|
||||
This guide is for **system administrators** who are deploying and managing Infinito.Nexus infrastructure.
|
||||
|
||||
## Setting Up Infinito.Nexus 🏗️
|
||||
Follow these guides to install and configure Infinito.Nexus:
|
||||
- [Setup Guide](SETUP_GUIDE.md)
|
||||
- [Configuration Guide](CONFIGURATION.md)
|
||||
- [Deployment Guide](DEPLOY.md)
|
||||
|
||||
## Key Responsibilities 🔧
|
||||
- **User Management** - Configure LDAP, Keycloak, and user permissions.
|
||||
- **Security & Backups** - Set up `sys-bkp-rmt-2-loc`, `svc-bkp-loc-2-usb`, and `core-security` roles.
|
||||
- **Application Hosting** - Deploy services like `Nextcloud`, `Matrix`, `Gitea`, and more.
|
||||
- **Networking & VPN** - Configure `WireGuard`, `OpenVPN`, and `Nginx Reverse Proxy`.
|
||||
|
||||
## Managing & Updating Infinito.Nexus 🔄
|
||||
- Regularly update services using `update-pacman`, or `update-apt`.
|
||||
- Monitor system health with `sys-ctl-hlth-btrfs`, `sys-ctl-hlth-webserver`, and `sys-ctl-hlth-docker-container`.
|
||||
- Automate system maintenance with `sys-lock`, `sys-ctl-cln-bkps`, and `sys-ctl-rpr-docker-hard`.
|
||||
|
||||
For more details, refer to the specific guides above.
|
@@ -1,22 +0,0 @@
|
||||
# Administrator Guide
|
||||
|
||||
This guide is for **system administrators** who are deploying and managing CyMaIS infrastructure.
|
||||
|
||||
## Setting Up CyMaIS 🏗️
|
||||
Follow these guides to install and configure CyMaIS:
|
||||
- [Setup Guide](SETUP_GUIDE.md)
|
||||
- [Configuration Guide](CONFIGURATION.md)
|
||||
- [Deployment Guide](DEPLOY.md)
|
||||
|
||||
## Key Responsibilities 🔧
|
||||
- **User Management** - Configure LDAP, Keycloak, and user permissions.
|
||||
- **Security & Backups** - Set up `backup-remote-to-local`, `backup-data-to-usb`, and `system-security` roles.
|
||||
- **Application Hosting** - Deploy services like `Nextcloud`, `Matrix`, `Gitea`, and more.
|
||||
- **Networking & VPN** - Configure `WireGuard`, `OpenVPN`, and `Nginx Reverse Proxy`.
|
||||
|
||||
## Managing & Updating CyMaIS 🔄
|
||||
- Regularly update services using `update-docker`, `update-pacman`, or `update-apt`.
|
||||
- Monitor system health with `health-btrfs`, `health-nginx`, and `health-docker-container`.
|
||||
- Automate system maintenance with `system-maintenance-lock`, `cleanup-backups-service`, and `restart-docker`.
|
||||
|
||||
For more details, refer to the specific guides above.
|
@@ -1,27 +1,27 @@
|
||||
# Security Guidelines
|
||||
|
||||
CyMaIS is designed with security in mind. However, while following our guidelines can greatly improve your system’s security, no IT system can be 100% secure. Please report any vulnerabilities as soon as possible.
|
||||
Infinito.Nexus is designed with security in mind. However, while following our guidelines can greatly improve your system’s security, no IT system can be 100% secure. Please report any vulnerabilities as soon as possible.
|
||||
|
||||
Additional to the user securitry guidelines administrators have additional responsibilities to secure the entire system:
|
||||
|
||||
- **Deploy on an Encrypted Server**
|
||||
It is recommended to install CyMaIS on an encrypted server to prevent hosting providers from accessing end-user data. For a practical guide on setting up an encrypted server, refer to the [Hetzner Arch LUKS repository](https://github.com/kevinveenbirkenbach/hetzner-arch-luks) 🔐. (Learn more about [disk encryption](https://en.wikipedia.org/wiki/Disk_encryption) on Wikipedia.)
|
||||
It is recommended to install Infinito.Nexus on an encrypted server to prevent hosting providers from accessing end-user data. For a practical guide on setting up an encrypted server, refer to the [Hetzner Arch LUKS repository](https://github.com/kevinveenbirkenbach/hetzner-arch-luks) 🔐. (Learn more about [disk encryption](https://en.wikipedia.org/wiki/Disk_encryption) on Wikipedia.)
|
||||
|
||||
- **Centralized User Management & SSO**
|
||||
For robust authentication and central user management, set up CyMaIS using Keycloak and LDAP.
|
||||
For robust authentication and central user management, set up Infinito.Nexus using Keycloak and LDAP.
|
||||
This configuration enables centralized [Single Sign-On (SSO)](https://en.wikipedia.org/wiki/Single_sign-on) (SSO), simplifying user management and boosting security.
|
||||
|
||||
- **Enforce 2FA and Use a Password Manager**
|
||||
Administrators should also enforce [2FA](https://en.wikipedia.org/wiki/Multi-factor_authentication) and use a password manager with auto-generated passwords. We again recommend [KeePass](https://keepass.info/). The KeePass database can be stored securely in your Nextcloud instance and synchronized between devices.
|
||||
|
||||
- **Avoid Root Logins & Plaintext Passwords**
|
||||
CyMaIS forbids logging in via the root user or using simple passwords. Instead, an SSH key must be generated and transferred during system initialization. When executing commands as root, always use `sudo` (or, if necessary, `sudo su`—but only if you understand the risks). (More information on [SSH](https://en.wikipedia.org/wiki/Secure_Shell) and [sudo](https://en.wikipedia.org/wiki/Sudo) is available on Wikipedia.)
|
||||
Infinito.Nexus forbids logging in via the root user or using simple passwords. Instead, an SSH key must be generated and transferred during system initialization. When executing commands as root, always use `sudo` (or, if necessary, `sudo su`—but only if you understand the risks). (More information on [SSH](https://en.wikipedia.org/wiki/Secure_Shell) and [sudo](https://en.wikipedia.org/wiki/Sudo) is available on Wikipedia.)
|
||||
|
||||
- **Manage Inventories Securely**
|
||||
Your inventories for running CyMaIS should be managed in a separate repository and secured with tools such as [Ansible Vault](https://en.wikipedia.org/wiki/Encryption) 🔒. Sensitive credentials must never be stored in plaintext; use a password file to secure these details.
|
||||
Your inventories for running Infinito.Nexus should be managed in a separate repository and secured with tools such as [Ansible Vault](https://en.wikipedia.org/wiki/Encryption) 🔒. Sensitive credentials must never be stored in plaintext; use a password file to secure these details.
|
||||
|
||||
- **Reporting Vulnerabilities**
|
||||
If you discover a security vulnerability in CyMaIS, please report it immediately. We encourage proactive vulnerability reporting so that issues can be addressed as quickly as possible. Contact our security team at [security@cymais.cloud](mailto:security@cymais.cloud)
|
||||
If you discover a security vulnerability in Infinito.Nexus, please report it immediately. We encourage proactive vulnerability reporting so that issues can be addressed as quickly as possible. Contact our security team at [security@infinito.nexus](mailto:security@infinito.nexus)
|
||||
**DO NOT OPEN AN ISSUE.**
|
||||
|
||||
---
|
||||
|
@@ -1,26 +1,26 @@
|
||||
# Setup Guide
|
||||
|
||||
To setup CyMaIS follow this steps:
|
||||
To setup Infinito.Nexus follow this steps:
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before you setup CyMaIS you need to install [Kevin's Package Manager](https://github.com/kevinveenbirkenbach/package-manager).
|
||||
Before you setup Infinito.Nexus you need to install [Kevin's Package Manager](https://github.com/kevinveenbirkenbach/package-manager).
|
||||
Follow the installation instruction descriped [here](https://github.com/kevinveenbirkenbach/package-manager)
|
||||
|
||||
## Setup CyMaIS
|
||||
## Setup Infinito.Nexus
|
||||
|
||||
To setup CyMaIS execute:
|
||||
To setup Infinito.Nexus execute:
|
||||
|
||||
```bash
|
||||
pkgmgr install cymais
|
||||
pkgmgr install infinito
|
||||
```
|
||||
|
||||
This command will setup CyMaIS on your system with the alias **cymais**.
|
||||
This command will setup Infinito.Nexus on your system with the alias **infinito**.
|
||||
|
||||
## Get Help
|
||||
|
||||
After you setuped CyMaIS you can receive more help by executing:
|
||||
After you setuped Infinito.Nexus you can receive more help by executing:
|
||||
|
||||
```bash
|
||||
cymais --help
|
||||
infinito --help
|
||||
```
|
@@ -1,6 +1,6 @@
|
||||
## 📖 CyMaIS.Cloud Ansible & Python Directory Guide
|
||||
## 📖 Infinito.Nexus Ansible & Python Directory Guide
|
||||
|
||||
This document provides a **decision matrix** for when to use each default Ansible plugin and module directory in the context of **CyMaIS.Cloud development** with Ansible and Python. It links to official docs, explains use-cases, and points back to our conversation.
|
||||
This document provides a **decision matrix** for when to use each default Ansible plugin and module directory in the context of **Infinito.Nexus development** with Ansible and Python. It links to official docs, explains use-cases, and points back to our conversation.
|
||||
|
||||
---
|
||||
|
||||
@@ -31,12 +31,12 @@ ansible-repo/
|
||||
|
||||
### 🎯 Decision Matrix: Which Folder for What?
|
||||
|
||||
| Folder | Type | Use-Case | Example (CyMaIS.Cloud) | Emoji |
|
||||
| Folder | Type | Use-Case | Example (Infinito.Nexus) | Emoji |
|
||||
| -------------------- | -------------------- | ---------------------------------------- | ----------------------------------------------------- | ----- |
|
||||
| `library/` | **Module** | Write idempotent actions | `cloud_network.py`: manage VPCs, subnets | 📦 |
|
||||
| `filter_plugins/` | **Filter plugin** | Jinja2 data transforms in templates/vars | `to_camel_case.py`: convert keys for API calls | 🔍 |
|
||||
| `lookup_plugins/` | **Lookup plugin** | Fetch external/secure data at runtime | `vault_lookup.py`: pull secrets from CyMaIS Vault | 👉 |
|
||||
| `module_utils/` | **Utility library** | Shared Python code for modules | `cymais_client.py`: common API client base class | 🛠️ |
|
||||
| `lookup_plugins/` | **Lookup plugin** | Fetch external/secure data at runtime | `vault_lookup.py`: pull secrets from Infinito.Nexus Vault | 👉 |
|
||||
| `module_utils/` | **Utility library** | Shared Python code for modules | `infinito_client.py`: common API client base class | 🛠️ |
|
||||
| `action_plugins/` | **Action plugin** | Complex task orchestration wrappers | `deploy_stack.py`: sequence Terraform + Ansible steps | ⚙️ |
|
||||
| `callback_plugins/` | **Callback plugin** | Customize log/report behavior | `notify_slack.py`: send playbook status to Slack | 📣 |
|
||||
| `inventory_plugins/` | **Inventory plugin** | Dynamic host/group sources | `azure_inventory.py`: list hosts from Azure tags | 🌐 |
|
||||
@@ -96,15 +96,15 @@ ansible-repo/
|
||||
|
||||
---
|
||||
|
||||
### 🚀 CyMaIS.Cloud Best Practices
|
||||
### 🚀 Infinito.Nexus Best Practices
|
||||
|
||||
* **Organize modules** by service under `library/cloud/` (e.g., `vm`, `network`, `storage`).
|
||||
* **Shared client code** in `module_utils/cymais/` for authentication, request handling.
|
||||
* **Secrets lookup** via `lookup_plugins/vault_lookup.py` pointing to CyMaIS Vault.
|
||||
* **Shared client code** in `module_utils/infinito/` for authentication, request handling.
|
||||
* **Secrets lookup** via `lookup_plugins/vault_lookup.py` pointing to Infinito.Nexus Vault.
|
||||
* **Filters** to normalize data formats from cloud APIs (e.g., `snake_to_camel`).
|
||||
* **Callbacks** to stream playbook results into CyMaIS Monitoring.
|
||||
* **Callbacks** to stream playbook results into Infinito.Nexus Monitoring.
|
||||
|
||||
Use this matrix as your **single source of truth** when extending Ansible for CyMaIS.Cloud! 👍
|
||||
Use this matrix as your **single source of truth** when extending Ansible for Infinito.Nexus! 👍
|
||||
|
||||
---
|
||||
|
||||
|
@@ -1,182 +0,0 @@
|
||||
### *Guide to Create a New Docker Role for CyMaIS
|
||||
|
||||
This guide will walk you through the steps to add a new Docker role for a service (in this case, `my_service`) in **CyMaIS**. We will cover where to add the application settings, domain, and other required configuration to ensure that your new service is correctly integrated into the CyMaIS environment.
|
||||
|
||||
---
|
||||
|
||||
### **1. Define the Application Configuration in `templates/vars/applications.yml.j2`**
|
||||
|
||||
First, you'll need to add the default configuration for your new service under the `defaults_applications` section in `templates/vars/applications.yml.j2`.
|
||||
|
||||
#### **Steps:**
|
||||
- Open `templates/vars/applications.yml.j2`
|
||||
- Add the configuration for `my_service` under the `defaults_applications` section.
|
||||
|
||||
```yaml
|
||||
defaults_applications:
|
||||
|
||||
## My Service Configuration
|
||||
my_service:
|
||||
version: "latest"
|
||||
features: # Version of the service
|
||||
matomo: true # Enable Matomo tracking for analytics
|
||||
css: true # Enable or disable global CSS styling
|
||||
portfolio_iframe: false # Allow embedding the landing page in an iframe (if true)
|
||||
database: true # Enable central database integration
|
||||
ldap: true # Enable ldap integration
|
||||
oauth2: true # Enable oauth2 proxy
|
||||
oidc: true # Enable oidc
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### **2. Add the Domain for `my_service` in `group_vars/all/03_domains.yml`**
|
||||
|
||||
Next, define the domain for your service in the `group_vars/all/03_domains.yml` file. The domain should be dynamic, using the `{{ primary_domain }}` placeholder, which will automatically resolve to the correct domain based on the primary domain used for your environment.
|
||||
|
||||
#### **Steps:**
|
||||
- Open `group_vars/all/03_domains.yml`
|
||||
- Add the domain for `my_service`.
|
||||
|
||||
```yaml
|
||||
defaults_domains:
|
||||
# Other services...
|
||||
my_service: "slides.{{ primary_domain }}" # Domain for the new service
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### **3. Set the Application ID in `vars/main.yml`**
|
||||
|
||||
In the `vars/main.yml` file, set the `application_id` to `my_service`. This step is essential as it allows CyMaIS to correctly reference and configure the new service when deploying it via Docker.
|
||||
|
||||
#### **Steps:**
|
||||
- Open `vars/main.yml`
|
||||
- Add the `application_id` for the new service.
|
||||
|
||||
```yaml
|
||||
application_id: "my_service" # Set the application ID for the service
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### **4. Create the Docker Role for the New Service**
|
||||
|
||||
Now that you have defined the application settings, domain, and application ID, you need to create a Docker role that will build and run the containerized version of `my_service`.
|
||||
|
||||
#### **Steps:**
|
||||
- Create a new directory under the `roles` directory, e.g., `roles/docker-my_service`.
|
||||
- Inside the `docker-my_service` role, create the following files:
|
||||
|
||||
1. **`README.md`**:
|
||||
- Provide documentation on the new service and how it works within CyMaIS.
|
||||
|
||||
2. **`tasks/main.yml`**:
|
||||
- Define the tasks for building and running the Docker container for `my_service`.
|
||||
|
||||
Example `tasks/main.yml`:
|
||||
```yaml
|
||||
---
|
||||
# Docker Routines for my_service
|
||||
- name: "include docker-compose role"
|
||||
include_role:
|
||||
name: docker-compose
|
||||
|
||||
- name: install cymais-my_service
|
||||
command:
|
||||
cmd: "pkgmgr install cymais-my_service --clone-mode https"
|
||||
notify: docker compose project build and setup
|
||||
|
||||
- name: Get path of cymais-my_service using pkgmgr
|
||||
command: pkgmgr path cymais-my_service
|
||||
register: path_cymais_my_service_output
|
||||
|
||||
- name: "include role nginx-domain-setup for {{ application_id }}"
|
||||
include_role:
|
||||
name: nginx-domain-setup
|
||||
vars:
|
||||
domain: "{{ domains | get_domain(application_id) }}"
|
||||
http_port: "{{ ports.localhost.http[application_id] }}"
|
||||
```
|
||||
|
||||
3. **`docker-compose.yml.j2`**:
|
||||
- Define the `docker-compose.yml` template for building and running the Docker container for the new service.
|
||||
|
||||
Example `docker-compose.yml.j2`:
|
||||
```yaml
|
||||
services:
|
||||
my_service:
|
||||
build:
|
||||
context: {{ path_cymais_my_service_output.stdout }}
|
||||
dockerfile: {{ path_cymais_my_service_output.stdout }}/Dockerfile
|
||||
ports:
|
||||
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:5000"
|
||||
volumes:
|
||||
- {{ path_cymais_my_service_output.stdout }}:/app
|
||||
- {{ path_cymais_output.stdout }}:/source
|
||||
```
|
||||
|
||||
4. **`vars/main.yml`**:
|
||||
- Define any specific variables for `my_service`.
|
||||
|
||||
Example `vars/main.yml`:
|
||||
```yaml
|
||||
application_id: "my_service"
|
||||
```
|
||||
|
||||
5. **`meta/main.yml`**:
|
||||
- Add metadata for your new role.
|
||||
|
||||
Example `meta/main.yml`:
|
||||
```yaml
|
||||
galaxy_info:
|
||||
author: "Your Name"
|
||||
description: "Docker role to deploy and manage my_service within CyMaIS."
|
||||
license: "CyMaIS NonCommercial License (CNCL)"
|
||||
company: "Your Company"
|
||||
min_ansible_version: "2.9"
|
||||
platforms:
|
||||
- name: Docker
|
||||
versions:
|
||||
- all
|
||||
- name: Linux
|
||||
versions:
|
||||
- all
|
||||
repository: "https://github.com/yourrepo/my_service"
|
||||
documentation: "https://yourdocumentationlink"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### **5. Test the Configuration**
|
||||
|
||||
Once you have defined the Docker role, configuration settings, and other necessary files, it is essential to test your changes:
|
||||
|
||||
#### **Steps:**
|
||||
- Run the Ansible playbook for deploying your new service.
|
||||
- Check if `my_service` is correctly deployed and if the domain is resolving as expected.
|
||||
- Verify that the application is accessible via the assigned port (e.g., `http://slides.{{ primary_domain }}:5000`).
|
||||
|
||||
---
|
||||
|
||||
### **6. Additional Steps for Integration**
|
||||
|
||||
- You can add additional configurations or adjust existing settings based on the requirements for `my_service`. For instance:
|
||||
- Modify the health check settings in the `docker-compose.yml` template.
|
||||
- Update Nginx or other web servers to properly route traffic to your new service.
|
||||
|
||||
---
|
||||
|
||||
### **Conclusion**
|
||||
|
||||
By following this guide, you have successfully added a new Dockerized service (`my_service`) to the CyMaIS platform. You have:
|
||||
- Configured the service settings in `templates/vars/applications.yml.j2`
|
||||
- Added the domain for the service in `group_vars/all/03_domains.yml`
|
||||
- Set the `application_id` in `vars/main.yml`
|
||||
- Created the necessary Docker role for managing `my_service`.
|
||||
|
||||
This process allows you to extend the functionality of CyMaIS with new services while maintaining a consistent and reproducible deployment workflow.
|
||||
|
||||
---
|
||||
|
||||
For any further details or troubleshooting, please consult the official CyMaIS documentation or reach out to the CyMaIS community for assistance.
|
@@ -1,11 +1,11 @@
|
||||
Developer Guide
|
||||
===============
|
||||
|
||||
Welcome to the **CyMaIS Developer Guide**! This guide provides essential information for developers who want to contribute to the CyMaIS open-source project.
|
||||
Welcome to the **Infinito.Nexus Developer Guide**! This guide provides essential information for developers who want to contribute to the Infinito.Nexus open-source project.
|
||||
|
||||
Explore CyMaIS Solutions
|
||||
Explore Infinito.Nexus Solutions
|
||||
------------------------
|
||||
CyMaIS offers various solutions for IT infrastructure automation. Learn more about the available applications:
|
||||
Infinito.Nexus offers various solutions for IT infrastructure automation. Learn more about the available applications:
|
||||
|
||||
- :doc:`../../../roles/application_glosar`
|
||||
- :doc:`../../../roles/application_categories`
|
||||
@@ -16,21 +16,21 @@ For Developers
|
||||
Understanding Ansible Roles
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
CyMaIS is powered by **Ansible** roles to automate deployments. Developers can explore the technical details of our roles here:
|
||||
Infinito.Nexus is powered by **Ansible** roles to automate deployments. Developers can explore the technical details of our roles here:
|
||||
|
||||
- :doc:`../../../roles/ansible_role_glosar`
|
||||
|
||||
Contributing to CyMaIS
|
||||
Contributing to Infinito.Nexus
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Want to contribute to the project or explore the source code? Check out our **GitHub repository**:
|
||||
|
||||
- `CyMaIS GitHub Repository <https://github.com/kevinveenbirkenbach/cymais/tree/master/roles>`_
|
||||
- `Infinito.Nexus GitHub Repository <https://s.infinito.nexus/code/tree/master/roles>`_
|
||||
|
||||
Contribution Guidelines
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
1. **Fork the Repository** – Start by forking the CyMaIS repository.
|
||||
1. **Fork the Repository** – Start by forking the Infinito.Nexus repository.
|
||||
2. **Create a New Branch** – Make changes in a dedicated branch.
|
||||
3. **Follow Coding Standards** – Ensure your code is well-documented and follows best practices.
|
||||
4. **Submit a Pull Request** – Once your changes are tested, submit a PR for review.
|
||||
@@ -42,12 +42,12 @@ For detailed guidelines, refer to:
|
||||
|
||||
Community & Support
|
||||
-------------------
|
||||
If you have questions or need help, visit the **CyMaIS Information Hub**:
|
||||
If you have questions or need help, visit the **Infinito.Nexus Information Hub**:
|
||||
|
||||
- `hub.cymais.cloud <https://hub.cymais.cloud>`_
|
||||
- `hub.infinito.nexus <https://hub.infinito.nexus>`_
|
||||
|
||||
This is the best place to ask questions, get support, and collaborate with other contributors.
|
||||
|
||||
Stay connected, collaborate, and help improve CyMaIS together!
|
||||
Stay connected, collaborate, and help improve Infinito.Nexus together!
|
||||
|
||||
Happy coding! 🚀
|
17
docs/guides/enterprise/README.md
Normal file
17
docs/guides/enterprise/README.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# Enterprise Guide
|
||||
|
||||
Are you looking for a **reliable IT infrastructure** for your business or organization? **Infinito.Nexus** is here to help!
|
||||
|
||||
## Who Can Benefit? 🎯
|
||||
✅ **Small & Medium Businesses** - IT infrastructure with everything included what you need. E.g. data clouds, mailservers, vpn's, homepages, documentation tools, etc.
|
||||
✅ **Enterprises** - Scale the solutions for Small & Medium Businesses up for an unlimeted amount of users
|
||||
✅ **NGOs & Organizations** - Secure, cost-effective infrastructure solutions on Open Source Base
|
||||
✅ **Journalists & Content Creators** - Host your content on your own servers, share it via the Fediverse and avoid cencorship
|
||||
|
||||
## Why Choose Infinito.Nexus? 🚀
|
||||
- **Fast Deployment** - Get your IT setup running in minutes
|
||||
- **Security First** - Encrypted backups, 2FA, and secure logins
|
||||
- **Scalable & Customizable** - Adapts to your specific needs
|
||||
- **Cost-Effective** - Open-source, no licensing fees
|
||||
|
||||
For enterprise solutions, check [Enterprise Solutions](10_ENTERPRISE_SOLUTIONS.md) or contact [Kevin Veen-Birkenbach](mailto:kevin@veen.world).
|
@@ -1,17 +0,0 @@
|
||||
# Enterprise Guide
|
||||
|
||||
Are you looking for a **reliable IT infrastructure** for your business or organization? **CyMaIS** is here to help!
|
||||
|
||||
## Who Can Benefit? 🎯
|
||||
✅ **Small & Medium Businesses** - IT infrastructure with everything included what you need. E.g. data clouds, mailservers, vpn's, homepages, documentation tools, etc.
|
||||
✅ **Enterprises** - Scale the solutions for Small & Medium Businesses up for an unlimeted amount of users
|
||||
✅ **NGOs & Organizations** - Secure, cost-effective infrastructure solutions on Open Source Base
|
||||
✅ **Journalists & Content Creators** - Host your content on your own servers, share it via the Fediverse and avoid cencorship
|
||||
|
||||
## Why Choose CyMaIS? 🚀
|
||||
- **Fast Deployment** - Get your IT setup running in minutes
|
||||
- **Security First** - Encrypted backups, 2FA, and secure logins
|
||||
- **Scalable & Customizable** - Adapts to your specific needs
|
||||
- **Cost-Effective** - Open-source, no licensing fees
|
||||
|
||||
For enterprise solutions, check [Enterprise Solutions](10_ENTERPRISE_SOLUTIONS.md) or contact [Kevin Veen-Birkenbach](mailto:kevin@veen.world).
|
@@ -1,15 +0,0 @@
|
||||
# Investor Guide
|
||||
|
||||
🚀 **CyMaIS is seeking investors** to expand its reach and continue development. With an increasing demand for automated IT solutions, **CyMaIS has the potential to revolutionize IT infrastructure management.**
|
||||
|
||||
## Market Potential 📈
|
||||
- **$500B+ Global IT Infrastructure Market**
|
||||
- Growing **open-source adoption** across enterprises
|
||||
- Increasing need for **automation & cybersecurity**
|
||||
|
||||
## Why Invest in CyMaIS? 🔥
|
||||
- **Unique Automation Approach** - Pre-configured roles for quick IT setup
|
||||
- **Security & Compliance Focus** - Built-in security best practices
|
||||
- **Scalability** - Modular framework adaptable to various industries
|
||||
|
||||
Interested in investing? Contact **[Kevin Veen-Birkenbach](mailto:kevin@veen.world)** to discuss partnership opportunities.
|
@@ -1,17 +0,0 @@
|
||||
# Enterprise Solutions
|
||||
|
||||
**CyMaIS** provides powerful **enterprise-grade IT infrastructure solutions**, enabling businesses to scale securely and efficiently.
|
||||
|
||||
## How CyMaIS Helps Enterprises 🔧
|
||||
- **Automated Deployment** - Set up secure servers & workstations effortlessly
|
||||
- **Advanced Security** - Integrated 2FA, LDAP, encrypted storage
|
||||
- **High Availability** - Scalable infrastructure for growing enterprises
|
||||
- **Compliance & Audit Logs** - Maintain regulatory standards
|
||||
|
||||
## Use Cases 💼
|
||||
- ✅ **Cloud-Based Infrastructure** (Docker, Kubernetes, CI/CD pipelines)
|
||||
- ✅ **Enterprise Networking & VPN** (WireGuard, OpenVPN, Firewall rules)
|
||||
- ✅ **Database & Business Apps** (PostgreSQL, Nextcloud, ERP systems)
|
||||
- ✅ **Custom Security Solutions** (Keycloak, LDAP, 2FA enforcement)
|
||||
|
||||
Interested? Contact [Kevin Veen-Birkenbach](mailto:kevin@veen.world) to discuss tailored enterprise solutions.
|
66
docs/guides/user/README.md
Normal file
66
docs/guides/user/README.md
Normal file
@@ -0,0 +1,66 @@
|
||||
# User Guide
|
||||
|
||||
Welcome to **Infinito.Nexus**! This guide is designed for **end-users** who want to use cloud services, email, and collaboration tools securely and efficiently. Whether you're an **enterprise user** or an **individual**, Infinito.Nexus provides a wide range of services tailored to your needs.
|
||||
|
||||
## What Can Infinito.Nexus Do for You? 💡
|
||||
Infinito.Nexus enables you to securely and efficiently use a variety of **cloud-based applications**, including:
|
||||
|
||||
### 📂 Cloud Storage & File Sharing
|
||||
- **Nextcloud** – Securely store, sync, and share files across devices.
|
||||
- **OnlyOffice** – Work on documents, spreadsheets, and presentations directly within Nextcloud.
|
||||
- **LibreOffice** – A powerful office suite alternative to Microsoft Office.
|
||||
|
||||
### 💬 Secure Communication & Collaboration
|
||||
- **Matrix (Element)** – Encrypted messaging for teams and individuals.
|
||||
- **XMPP** – Secure instant messaging with various supported clients.
|
||||
- **Mailu** – A private, self-hosted email solution.
|
||||
- **Etherpad** – Real-time collaborative document editing.
|
||||
- **BigBlueButton** – Web conferencing with screen sharing and presentations.
|
||||
- **Jitsi** – Secure video conferencing without account requirements.
|
||||
|
||||
### 🎵 Social Media & Content Sharing
|
||||
- **Mastodon** – Decentralized microblogging platform (alternative to Twitter/X).
|
||||
- **Pixelfed** – Decentralized image sharing (alternative to Instagram).
|
||||
- **Friendica** – Social network supporting federation with Mastodon and others.
|
||||
- **Peertube** – Decentralized video streaming platform (alternative to YouTube).
|
||||
- **Funkwhale** – Self-hosted music streaming for individuals and communities.
|
||||
|
||||
### 🎮 Entertainment & Media
|
||||
- **Jellyfin** – Open-source media server for movies, TV, and music.
|
||||
- **Kodi** – Media center application with extensive plugin support.
|
||||
- **qBittorrent** – Open-source torrent client with secure remote access.
|
||||
|
||||
### 🔒 Privacy & Security
|
||||
- **WireGuard** – Secure and fast VPN solution.
|
||||
- **Tor Browser** – Browse the web anonymously and bypass censorship.
|
||||
- **Bitwarden** – Open-source password manager for secure credential storage.
|
||||
- **2FA Authentication** – Securely log in to your services with Two-Factor Authentication.
|
||||
|
||||
### 🔧 Developer & Productivity Tools
|
||||
- **Gitea** – Self-hosted Git repository management (alternative to GitHub/GitLab).
|
||||
- **Jenkins** – Automate software development pipelines.
|
||||
- **Discourse** – Community discussion forums for support and engagement.
|
||||
- **MediaWiki** – Create and manage knowledge bases and wikis.
|
||||
|
||||
## 🏢 Enterprise Users
|
||||
### How to Get Started 🏁
|
||||
If your organization provides Infinito.Nexus services, follow these steps:
|
||||
- Your **administrator** will provide login credentials.
|
||||
- Access **cloud services** via a web browser or mobile apps.
|
||||
- For support, contact your **system administrator**.
|
||||
|
||||
## 🏠 Private Users
|
||||
### How to Get Started 🏁
|
||||
If you're an **individual user**, you can sign up for Infinito.Nexus services:
|
||||
- **Register an account** at [infinito.nexus](https://infinito.nexus).
|
||||
- Choose the applications and services you need.
|
||||
- Follow the setup guide and start using Infinito.Nexus services immediately.
|
||||
|
||||
## 📚 Learn More
|
||||
Discover more about Infinito.Nexus applications:
|
||||
- :doc:`roles/application_glosar`
|
||||
- :doc:`roles/application_categories`
|
||||
|
||||
For further information, visit our **[Information Hub](https://hub.infinito.nexus)** for tutorials, FAQs, and community support.
|
||||
|
||||
You can also register for updates and support from our community.
|
@@ -1,66 +0,0 @@
|
||||
# User Guide
|
||||
|
||||
Welcome to **CyMaIS**! This guide is designed for **end-users** who want to use cloud services, email, and collaboration tools securely and efficiently. Whether you're an **enterprise user** or an **individual**, CyMaIS provides a wide range of services tailored to your needs.
|
||||
|
||||
## What Can CyMaIS Do for You? 💡
|
||||
CyMaIS enables you to securely and efficiently use a variety of **cloud-based applications**, including:
|
||||
|
||||
### 📂 Cloud Storage & File Sharing
|
||||
- **Nextcloud** – Securely store, sync, and share files across devices.
|
||||
- **OnlyOffice** – Work on documents, spreadsheets, and presentations directly within Nextcloud.
|
||||
- **LibreOffice** – A powerful office suite alternative to Microsoft Office.
|
||||
|
||||
### 💬 Secure Communication & Collaboration
|
||||
- **Matrix (Element)** – Encrypted messaging for teams and individuals.
|
||||
- **XMPP** – Secure instant messaging with various supported clients.
|
||||
- **Mailu** – A private, self-hosted email solution.
|
||||
- **Etherpad** – Real-time collaborative document editing.
|
||||
- **BigBlueButton** – Web conferencing with screen sharing and presentations.
|
||||
- **Jitsi** – Secure video conferencing without account requirements.
|
||||
|
||||
### 🎵 Social Media & Content Sharing
|
||||
- **Mastodon** – Decentralized microblogging platform (alternative to Twitter/X).
|
||||
- **Pixelfed** – Decentralized image sharing (alternative to Instagram).
|
||||
- **Friendica** – Social network supporting federation with Mastodon and others.
|
||||
- **Peertube** – Decentralized video streaming platform (alternative to YouTube).
|
||||
- **Funkwhale** – Self-hosted music streaming for individuals and communities.
|
||||
|
||||
### 🎮 Entertainment & Media
|
||||
- **Jellyfin** – Open-source media server for movies, TV, and music.
|
||||
- **Kodi** – Media center application with extensive plugin support.
|
||||
- **qBittorrent** – Open-source torrent client with secure remote access.
|
||||
|
||||
### 🔒 Privacy & Security
|
||||
- **WireGuard** – Secure and fast VPN solution.
|
||||
- **Tor Browser** – Browse the web anonymously and bypass censorship.
|
||||
- **Bitwarden** – Open-source password manager for secure credential storage.
|
||||
- **2FA Authentication** – Securely log in to your services with Two-Factor Authentication.
|
||||
|
||||
### 🔧 Developer & Productivity Tools
|
||||
- **Gitea** – Self-hosted Git repository management (alternative to GitHub/GitLab).
|
||||
- **Jenkins** – Automate software development pipelines.
|
||||
- **Discourse** – Community discussion forums for support and engagement.
|
||||
- **MediaWiki** – Create and manage knowledge bases and wikis.
|
||||
|
||||
## 🏢 Enterprise Users
|
||||
### How to Get Started 🏁
|
||||
If your organization provides CyMaIS services, follow these steps:
|
||||
- Your **administrator** will provide login credentials.
|
||||
- Access **cloud services** via a web browser or mobile apps.
|
||||
- For support, contact your **system administrator**.
|
||||
|
||||
## 🏠 Private Users
|
||||
### How to Get Started 🏁
|
||||
If you're an **individual user**, you can sign up for CyMaIS services:
|
||||
- **Register an account** at [cymais.cloud](https://cymais.cloud).
|
||||
- Choose the applications and services you need.
|
||||
- Follow the setup guide and start using CyMaIS services immediately.
|
||||
|
||||
## 📚 Learn More
|
||||
Discover more about CyMaIS applications:
|
||||
- :doc:`roles/application_glosar`
|
||||
- :doc:`roles/application_categories`
|
||||
|
||||
For further information, visit our **[Information Hub](https://hub.cymais.cloud)** for tutorials, FAQs, and community support.
|
||||
|
||||
You can also register for updates and support from our community.
|
@@ -1,6 +1,6 @@
|
||||
# Security Guidelines
|
||||
|
||||
CyMaIS is designed with security in mind. However, while following our guidelines can greatly improve your system’s security, no IT system can be 100% secure. Please report any vulnerabilities as soon as possible.
|
||||
Infinito.Nexus is designed with security in mind. However, while following our guidelines can greatly improve your system’s security, no IT system can be 100% secure. Please report any vulnerabilities as soon as possible.
|
||||
|
||||
For optimal personal security, we **strongly recommend** the following:
|
||||
|
||||
@@ -12,12 +12,12 @@ For optimal personal security, we **strongly recommend** the following:
|
||||
Synchronize your password database across devices using the [Nextcloud Client](https://nextcloud.com/) 📱💻.
|
||||
|
||||
- **Use Encrypted Systems**
|
||||
We recommend running CyMaIS only on systems with full disk encryption. For example, Linux distributions such as [Manjaro](https://manjaro.org/) (based on ArchLinux) with desktop environments like [GNOME](https://en.wikipedia.org/wiki/GNOME) provide excellent security. (Learn more about [disk encryption](https://en.wikipedia.org/wiki/Disk_encryption) on Wikipedia.)
|
||||
We recommend running Infinito.Nexus only on systems with full disk encryption. For example, Linux distributions such as [Manjaro](https://manjaro.org/) (based on ArchLinux) with desktop environments like [GNOME](https://en.wikipedia.org/wiki/GNOME) provide excellent security. (Learn more about [disk encryption](https://en.wikipedia.org/wiki/Disk_encryption) on Wikipedia.)
|
||||
|
||||
- **Beware of Phishing and Social Engineering**
|
||||
Always verify email senders, avoid clicking on unknown links, and never share your passwords or 2FA codes with anyone. (Learn more about [Phishing](https://en.wikipedia.org/wiki/Phishing) and [Social Engineering](https://en.wikipedia.org/wiki/Social_engineering_(security)) on Wikipedia.)
|
||||
|
||||
Following these guidelines will significantly enhance your personal security—but remember, no system is completely immune to risk.
|
||||
|
||||
A tutorial how to setup secure password management you will find [here](https://blog.veen.world/blog/2025/04/04/%f0%9f%9b%a1%ef%b8%8f-keepassxc-cymais-cloud-the-ultimate-guide-to-cross-device-password-security/)
|
||||
A tutorial how to setup secure password management you will find [here](https://blog.veen.world/blog/2025/04/04/%f0%9f%9b%a1%ef%b8%8f-keepassxc-infinito-cloud-the-ultimate-guide-to-cross-device-password-security/)
|
||||
---
|
@@ -1,23 +0,0 @@
|
||||
# Company Vision — CyMaIS
|
||||
|
||||
## Empowering Digital Sovereignty for Everyone.
|
||||
|
||||
CyMaIS is more than just software — it is a movement for digital independence, resilience, and transparency.
|
||||
|
||||
We believe that secure, self-hosted IT infrastructure must be accessible to everyone — regardless of company size, technical expertise, or budget.
|
||||
|
||||
### Our Mission
|
||||
- Democratize access to secure IT infrastructure
|
||||
- Enable data sovereignty and privacy for individuals and organizations
|
||||
- Reduce global dependency on monopolistic cloud providers
|
||||
- Promote Open Source, transparency, and community-driven innovation
|
||||
- Build resilient digital ecosystems in uncertain times
|
||||
|
||||
### Long-Term Goal
|
||||
We want to establish CyMaIS as the leading European and global alternative to centralized cloud platforms — open, modular, and self-sovereign.
|
||||
|
||||
Our vision is a future where every person and organization owns their infrastructure — free from control, censorship, and vendor lock-ins.
|
||||
|
||||
---
|
||||
|
||||
> *CyMaIS — Empowering a Sovereign Digital Future.*
|
@@ -1,28 +0,0 @@
|
||||
# Product Vision — CyMaIS Platform
|
||||
|
||||
## The Universal Automation Platform for Self-Hosted IT Infrastructure.
|
||||
|
||||
CyMaIS provides a modular, Open Source infrastructure automation platform that enables secure and scalable IT environments — for individuals, SMEs, NGOs, and enterprises.
|
||||
|
||||
### Key Product Goals
|
||||
- Enterprise-grade infrastructure automation for everyone
|
||||
- Rapid deployment of servers, clients, and cloud-native services
|
||||
- Modular role-based architecture (VPN, Backup, Security, Monitoring, Web Services, IAM)
|
||||
- Seamless integration of existing systems without forced migration
|
||||
- Infrastructure-as-Code and reproducible deployments
|
||||
- Reduced operational IT costs and vendor lock-ins
|
||||
- Security by Design (encryption, 2FA, auditing, hardening)
|
||||
- Support for decentralized protocols like ActivityPub, Matrix, Email
|
||||
|
||||
### Long-Term Product Vision
|
||||
CyMaIS will become the central platform for:
|
||||
|
||||
- Automating any self-hosted infrastructure within minutes
|
||||
- Maintaining full data control and regulatory compliance
|
||||
- Empowering organizations to build their own sovereign cloud ecosystem
|
||||
- Breaking the dependency on centralized and proprietary cloud services
|
||||
|
||||
---
|
||||
|
||||
> *CyMaIS — The Future of Self-Hosted Infrastructure.*
|
||||
> *Secure. Automated. Sovereign.*
|
@@ -1,33 +0,0 @@
|
||||
# Vision Statement
|
||||
|
||||
This is the Vision Statement for [CyMaIS](https://cymais.cloud), outlining our future goals and direction.
|
||||
|
||||
## Short
|
||||
|
||||
CyMaIS aims to empower individuals, businesses, NGOs, and enterprises with a secure, scalable, and decentralized IT infrastructure solution that ensures data sovereignty, promotes Open Source innovation, and reduces reliance on monopolistic cloud providers.
|
||||
|
||||
## Explanation
|
||||
|
||||
At the core of our mission is the development of a groundbreaking tool designed to address the inherent problems in managing IT infrastructure today, for individuals, businesses, non-governmental organizations (NGOs), and large enterprises alike. From the rising costs of monopolistic cloud services to the loss of data sovereignty, security concerns, and dependency on centralized cloud providers, we aim to provide an alternative that empowers users, organizations, and businesses to regain control over their data and infrastructure.
|
||||
|
||||
Our vision is to create a fully automated solution that enables all users, regardless of size or industry, to establish a secure, scalable, and self-managed IT infrastructure. This tool will break down the complexities of IT infrastructure setup, making it faster, simpler, and more secure, while being accessible to everyone—from individuals and grassroots organizations to large-scale enterprises.
|
||||
|
||||
Grounded in Open Source principles, this solution will champion transparency, security, and innovation. It will be adaptable and flexible, offering a digital infrastructure that evolves alongside the diverse needs of businesses, organizations, and communities, all while maintaining a focus on usability and accessibility.
|
||||
|
||||
We envision a future where users and organizations are no longer at the mercy of monopolistic cloud providers, where they can securely manage their own data and infrastructure. This future will see individuals and NGOs empowered with the same capabilities as large enterprises—ensuring that people of all scales can maintain control and sovereignty over their digital lives, free from external manipulation.
|
||||
|
||||
CyMaIS will democratize access to advanced IT infrastructure solutions, providing security, flexibility, and scalability for all—from small NGOs to large multinational enterprises—without the cost and dependence on centralized, proprietary cloud services. By utilizing Open Source, our solution will meet the highest standards of security while fostering a collaborative, community-driven approach to innovation and continuous improvement.
|
||||
|
||||
Moreover, our vision goes beyond just IT infrastructure; it extends to the broader goal of democratizing the internet itself. By integrating decentralized protocols like **ActivityPub**, **email**, and **Matrix**, we aim to restore the foundational principles of a decentralized, resilient internet. In today’s world, marked by political tensions, wars, and uncertainty, the importance of resilient, distributed infrastructures has never been greater. CyMaIS will enable all users—from individuals to NGOs and large enterprises—to remain independent and secure, ensuring that control over data and communications stays in their hands, not under the dominance of monopolistic entities.
|
||||
|
||||
Ultimately, our vision is to redefine the way IT infrastructure is deployed and managed, offering a solution that is swift, secure, and scalable, capable of meeting the needs of businesses, individuals, NGOs, and large enterprises. CyMaIS will empower all stakeholders by providing a foundation for a decentralized, transparent, and resilient digital future—setting a new benchmark for security, reliability, and sovereignty in the digital age.
|
||||
|
||||
## Key Points
|
||||
- Empower people and institutions
|
||||
- Data sovereignty
|
||||
- Control over infrastructure
|
||||
- Automated infrastructure setup
|
||||
- Open Source
|
||||
- Decentralized Services
|
||||
- Scalabel
|
||||
- Global resilience and security
|
27
filter_plugins/README.md
Normal file
27
filter_plugins/README.md
Normal file
@@ -0,0 +1,27 @@
|
||||
# Custom Filter Plugins for Infinito.Nexus
|
||||
|
||||
This directory contains custom **Ansible filter plugins** used within the Infinito.Nexus project.
|
||||
|
||||
## When to Use a Filter Plugin
|
||||
|
||||
- **Transform values:** Use filters to transform, extract, reformat, or compute values from existing variables or facts.
|
||||
- **Inline data manipulation:** Filters are designed for inline use in Jinja2 expressions (in templates, tasks, vars, etc.).
|
||||
- **No external lookups:** Filters only operate on data you explicitly pass to them and cannot access external files, the Ansible inventory, or runtime context.
|
||||
|
||||
### Examples
|
||||
|
||||
```jinja2
|
||||
{{ role_name | get_entity_name }}
|
||||
{{ my_list | unique }}
|
||||
{{ user_email | regex_replace('^(.+)@.*$', '\\1') }}
|
||||
````
|
||||
|
||||
## When *not* to Use a Filter Plugin
|
||||
|
||||
* If you need to **load data from an external source** (e.g., file, environment, API), use a lookup plugin instead.
|
||||
* If your logic requires **access to inventory, facts, or host-level information** that is not passed as a parameter.
|
||||
|
||||
## Further Reading
|
||||
|
||||
* [Ansible Filter Plugins Documentation](https://docs.ansible.com/ansible/latest/plugins/filter.html)
|
||||
* [Developing Ansible Filter Plugins](https://docs.ansible.com/ansible/latest/dev_guide/developing_plugins.html#developing-filter-plugins)
|
@@ -1,2 +0,0 @@
|
||||
# Todo
|
||||
- Refactor is_feature_enabled to one function
|
@@ -1,86 +0,0 @@
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {'alias_domains_map': self.alias_domains_map}
|
||||
|
||||
def alias_domains_map(self, apps, primary_domain):
|
||||
"""
|
||||
Build a map of application IDs to their alias domains.
|
||||
|
||||
- If no `domains` key → []
|
||||
- If `domains` exists but is an empty dict → return the original cfg
|
||||
- Explicit `aliases` are used (default appended if missing)
|
||||
- If only `canonical` defined and it doesn't include default, default is added
|
||||
- Invalid types raise AnsibleFilterError
|
||||
"""
|
||||
def parse_entry(domains_cfg, key, app_id):
|
||||
if key not in domains_cfg:
|
||||
return None
|
||||
entry = domains_cfg[key]
|
||||
if isinstance(entry, dict):
|
||||
values = list(entry.values())
|
||||
elif isinstance(entry, list):
|
||||
values = entry
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Unexpected type for 'domains.{key}' in application '{app_id}': {type(entry).__name__}"
|
||||
)
|
||||
for d in values:
|
||||
if not isinstance(d, str) or not d.strip():
|
||||
raise AnsibleFilterError(
|
||||
f"Invalid domain entry in '{key}' for application '{app_id}': {d!r}"
|
||||
)
|
||||
return values
|
||||
|
||||
def default_domain(app_id, primary):
|
||||
return f"{app_id}.{primary}"
|
||||
|
||||
# 1) Precompute canonical domains per app (fallback to default)
|
||||
canonical_map = {}
|
||||
for app_id, cfg in apps.items():
|
||||
domains_cfg = cfg.get('domains') or {}
|
||||
entry = domains_cfg.get('canonical')
|
||||
if entry is None:
|
||||
canonical_map[app_id] = [default_domain(app_id, primary_domain)]
|
||||
elif isinstance(entry, dict):
|
||||
canonical_map[app_id] = list(entry.values())
|
||||
elif isinstance(entry, list):
|
||||
canonical_map[app_id] = list(entry)
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Unexpected type for 'domains.canonical' in application '{app_id}': {type(entry).__name__}"
|
||||
)
|
||||
|
||||
# 2) Build alias list per app
|
||||
result = {}
|
||||
for app_id, cfg in apps.items():
|
||||
domains_cfg = cfg.get('domains')
|
||||
|
||||
# no domains key → no aliases
|
||||
if domains_cfg is None:
|
||||
result[app_id] = []
|
||||
continue
|
||||
|
||||
# empty domains dict → return the original cfg
|
||||
if isinstance(domains_cfg, dict) and not domains_cfg:
|
||||
result[app_id] = cfg
|
||||
continue
|
||||
|
||||
# otherwise, compute aliases
|
||||
aliases = parse_entry(domains_cfg, 'aliases', app_id) or []
|
||||
default = default_domain(app_id, primary_domain)
|
||||
has_aliases = 'aliases' in domains_cfg
|
||||
has_canon = 'canonical' in domains_cfg
|
||||
|
||||
if has_aliases:
|
||||
if default not in aliases:
|
||||
aliases.append(default)
|
||||
elif has_canon:
|
||||
canon = canonical_map.get(app_id, [])
|
||||
if default not in canon and default not in aliases:
|
||||
aliases.append(default)
|
||||
|
||||
result[app_id] = aliases
|
||||
|
||||
return result
|
@@ -1,21 +1,76 @@
|
||||
from ansible.errors import AnsibleFilterError
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
from module_utils.entity_name_utils import get_entity_name
|
||||
from module_utils.role_dependency_resolver import RoleDependencyResolver
|
||||
from typing import Iterable
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {'canonical_domains_map': self.canonical_domains_map}
|
||||
|
||||
def canonical_domains_map(self, apps, primary_domain):
|
||||
def canonical_domains_map(
|
||||
self,
|
||||
apps,
|
||||
PRIMARY_DOMAIN,
|
||||
*,
|
||||
recursive: bool = False,
|
||||
roles_base_dir: str | None = None,
|
||||
seed: Iterable[str] | None = None,
|
||||
):
|
||||
"""
|
||||
Maps applications to their canonical domains, checking for conflicts
|
||||
and ensuring all domains are valid and unique across applications.
|
||||
Build { app_id: [canonical domains...] }.
|
||||
|
||||
Rekursiv werden nur include_role, import_role und meta/main.yml:dependencies verfolgt.
|
||||
'run_after' wird hier absichtlich ignoriert.
|
||||
"""
|
||||
if not isinstance(apps, dict):
|
||||
raise AnsibleFilterError(f"'apps' must be a dict, got {type(apps).__name__}")
|
||||
|
||||
app_keys = set(apps.keys())
|
||||
seed_keys = set(seed) if seed is not None else app_keys
|
||||
|
||||
if recursive:
|
||||
roles_base_dir = roles_base_dir or os.path.join(os.getcwd(), "roles")
|
||||
if not os.path.isdir(roles_base_dir):
|
||||
raise AnsibleFilterError(
|
||||
f"roles_base_dir '{roles_base_dir}' not found or not a directory."
|
||||
)
|
||||
|
||||
resolver = RoleDependencyResolver(roles_base_dir)
|
||||
discovered_roles = resolver.resolve_transitively(
|
||||
start_roles=seed_keys,
|
||||
resolve_include_role=True,
|
||||
resolve_import_role=True,
|
||||
resolve_dependencies=True,
|
||||
resolve_run_after=False,
|
||||
max_depth=None,
|
||||
)
|
||||
# all discovered roles that actually have config entries in `apps`
|
||||
target_apps = discovered_roles & app_keys
|
||||
else:
|
||||
target_apps = seed_keys
|
||||
|
||||
result = {}
|
||||
seen_domains = {}
|
||||
|
||||
for app_id, cfg in apps.items():
|
||||
domains_cfg = cfg.get('domains')
|
||||
for app_id in sorted(target_apps):
|
||||
cfg = apps.get(app_id)
|
||||
if cfg is None:
|
||||
continue
|
||||
if not str(app_id).startswith(("web-", "svc-db-")):
|
||||
continue
|
||||
if not isinstance(cfg, dict):
|
||||
raise AnsibleFilterError(
|
||||
f"Invalid configuration for application '{app_id}': expected dict, got {cfg!r}"
|
||||
)
|
||||
|
||||
domains_cfg = cfg.get('server', {}).get('domains', {})
|
||||
if not domains_cfg or 'canonical' not in domains_cfg:
|
||||
self._add_default_domain(app_id, primary_domain, seen_domains, result)
|
||||
self._add_default_domain(app_id, PRIMARY_DOMAIN, seen_domains, result)
|
||||
continue
|
||||
|
||||
canonical_domains = domains_cfg['canonical']
|
||||
@@ -23,12 +78,9 @@ class FilterModule(object):
|
||||
|
||||
return result
|
||||
|
||||
def _add_default_domain(self, app_id, primary_domain, seen_domains, result):
|
||||
"""
|
||||
Add the default domain for an application if no canonical domains are defined.
|
||||
Ensures the domain is unique across applications.
|
||||
"""
|
||||
default_domain = f"{app_id}.{primary_domain}"
|
||||
def _add_default_domain(self, app_id, PRIMARY_DOMAIN, seen_domains, result):
|
||||
entity_name = get_entity_name(app_id)
|
||||
default_domain = f"{entity_name}.{PRIMARY_DOMAIN}"
|
||||
if default_domain in seen_domains:
|
||||
raise AnsibleFilterError(
|
||||
f"Domain '{default_domain}' is already configured for "
|
||||
@@ -38,40 +90,21 @@ class FilterModule(object):
|
||||
result[app_id] = [default_domain]
|
||||
|
||||
def _process_canonical_domains(self, app_id, canonical_domains, seen_domains, result):
|
||||
"""
|
||||
Process the canonical domains for an application, handling both lists and dicts,
|
||||
and ensuring each domain is unique.
|
||||
"""
|
||||
if isinstance(canonical_domains, dict):
|
||||
self._process_canonical_domains_dict(app_id, canonical_domains, seen_domains, result)
|
||||
for _, domain in canonical_domains.items():
|
||||
self._validate_and_check_domain(app_id, domain, seen_domains)
|
||||
result[app_id] = canonical_domains.copy()
|
||||
elif isinstance(canonical_domains, list):
|
||||
self._process_canonical_domains_list(app_id, canonical_domains, seen_domains, result)
|
||||
for domain in canonical_domains:
|
||||
self._validate_and_check_domain(app_id, domain, seen_domains)
|
||||
result[app_id] = list(canonical_domains)
|
||||
else:
|
||||
raise AnsibleFilterError(
|
||||
f"Unexpected type for 'domains.canonical' in application '{app_id}': "
|
||||
f"Unexpected type for 'server.domains.canonical' in application '{app_id}': "
|
||||
f"{type(canonical_domains).__name__}"
|
||||
)
|
||||
|
||||
def _process_canonical_domains_dict(self, app_id, domains_dict, seen_domains, result):
|
||||
"""
|
||||
Process a dictionary of canonical domains for an application.
|
||||
"""
|
||||
for name, domain in domains_dict.items():
|
||||
self._validate_and_check_domain(app_id, domain, seen_domains)
|
||||
result[app_id] = domains_dict.copy()
|
||||
|
||||
def _process_canonical_domains_list(self, app_id, domains_list, seen_domains, result):
|
||||
"""
|
||||
Process a list of canonical domains for an application.
|
||||
"""
|
||||
for domain in domains_list:
|
||||
self._validate_and_check_domain(app_id, domain, seen_domains)
|
||||
result[app_id] = list(domains_list)
|
||||
|
||||
def _validate_and_check_domain(self, app_id, domain, seen_domains):
|
||||
"""
|
||||
Validate the domain and check if it has already been assigned to another application.
|
||||
"""
|
||||
if not isinstance(domain, str) or not domain.strip():
|
||||
raise AnsibleFilterError(
|
||||
f"Invalid domain entry in 'canonical' for application '{app_id}': {domain!r}"
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user