mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-09-10 20:37:15 +02:00
Compare commits
1674 Commits
9095b00cfb
...
master
Author | SHA1 | Date | |
---|---|---|---|
445c94788e | |||
aac9704e8b | |||
a57a5f8828 | |||
90843726de | |||
d25da76117 | |||
d48a1b3c0a | |||
2839d2e1a4 | |||
00c99e58e9 | |||
904040589e | |||
9f3d300bca | |||
9e253a2d09 | |||
49120b0dcf | |||
b6f91ab9d3 | |||
77e8e7ed7e | |||
32bc17e0c3 | |||
e294637cb6 | |||
577767bed6 | |||
e77f8da510 | |||
4738b263ec | |||
0a588023a7 | |||
d2fa90774b | |||
0e72dcbe36 | |||
4f8ce598a9 | |||
3769e66d8d | |||
33a5fadf67 | |||
699a6b6f1e | |||
61c29eee60 | |||
d5204fb5c2 | |||
751615b1a4 | |||
e2993d2912 | |||
24b6647bfb | |||
d2dc2eab5f | |||
a1130e33d7 | |||
df122905eb | |||
d093a22d61 | |||
5e550ce3a3 | |||
0ada12e3ca | |||
1a5ce4a7fa | |||
a9abb3ce5d | |||
71ceb339fc | |||
61bba3d2ef | |||
0bde4295c7 | |||
8059f272d5 | |||
7c814e6e83 | |||
d760c042c2 | |||
6cac8085a8 | |||
3a83f3d14e | |||
61d852c508 | |||
188b098503 | |||
bc56940e55 | |||
5dfc2efb5a | |||
7f9dc65b37 | |||
163a925096 | |||
a8c88634b5 | |||
ce3fe1cd51 | |||
7ca8b7c71d | |||
110381e80c | |||
b02d88adc0 | |||
b7065837df | |||
c98a2378c4 | |||
4ae3cee36c | |||
b834f0c95c | |||
9f734dff17 | |||
6fa4d00547 | |||
7254667186 | |||
aaedaab3da | |||
7791bd8c04 | |||
34b3f3b0ad | |||
94fe58b5da | |||
9feb766e6f | |||
231fd567b3 | |||
3f8e7c1733 | |||
3bfab9ef8e | |||
f1870c07be | |||
d0cec9a7d4 | |||
1dbd714a56 | |||
3a17b2979e | |||
bb0530c2ac | |||
aa2eb53776 | |||
5f66c1a622 | |||
b3dfb8bf22 | |||
db642c1c39 | |||
2fccebbd1f | |||
c23fbd8ec4 | |||
2999d9af77 | |||
2809ffb9f0 | |||
cb12114ce8 | |||
ba99e558f7 | |||
2aed0f97d2 | |||
f36c7831b1 | |||
009bee531b | |||
4c7bb6d9db | |||
092869b29a | |||
f4ea6c6c0f | |||
3ed84717a7 | |||
1cfc2b7e23 | |||
01b9648650 | |||
65d3b3040d | |||
28f7ac5aba | |||
19926b0c57 | |||
3a79d9d630 | |||
983287a84a | |||
dd9a9b6d84 | |||
23a2e081bf | |||
4cbd848026 | |||
d67f660152 | |||
5c6349321b | |||
af1ee64246 | |||
d96bfc64a6 | |||
6ea8301364 | |||
92f5bf6481 | |||
58c17bf043 | |||
6c2d5c52c8 | |||
b919f39e35 | |||
9f2cfe65af | |||
fe399c3967 | |||
ef801aa498 | |||
18f3b1042f | |||
dece6228a4 | |||
cb66fb2978 | |||
b9da6908ec | |||
8baec17562 | |||
1401779a9d | |||
707a3fc1d0 | |||
d595d46e2e | |||
73d5651eea | |||
12a267827d | |||
c6cd6430bb | |||
67b2ebf001 | |||
ebb6660473 | |||
f62d09d8f1 | |||
de159db918 | |||
e2c2cf4bcf | |||
6e1e1ad5c5 | |||
06baa4b03a | |||
73e7fbdc8a | |||
bae2bc21ec | |||
a8f4dea9d2 | |||
5aaf2d28dc | |||
5287bb4d74 | |||
5446a1497e | |||
19889a8cfc | |||
d9980c0d8f | |||
35206aaafd | |||
942e8c9c12 | |||
97f4045c68 | |||
c182ecf516 | |||
ce033c370a | |||
a0477ad54c | |||
35c3681f55 | |||
af97e71976 | |||
19a51fd718 | |||
b916173422 | |||
9756a0f75f | |||
e417bc19bd | |||
7ad14673e1 | |||
eb781dbf8b | |||
6016da6f1f | |||
8b2f0ac47b | |||
9d6d64e11d | |||
f1a2967a37 | |||
95a2172fff | |||
dc3f4e05a8 | |||
e33944cda2 | |||
efa68cc1e0 | |||
79e702a3ab | |||
9180182d5b | |||
535094d15d | |||
658003f5b9 | |||
3ff783df17 | |||
3df511aee9 | |||
c27d16322b | |||
7a6e273ea4 | |||
384beae7c1 | |||
ad7e61e8b1 | |||
fa46523433 | |||
f4a380d802 | |||
42d6c1799b | |||
8608d89653 | |||
a4f39ac732 | |||
9cfb8f3a60 | |||
3e5344a46c | |||
ec07d1a20b | |||
594d9417d1 | |||
dc125e4843 | |||
39a54294dd | |||
a57fe718de | |||
b6aec5fe33 | |||
de07d890dc | |||
e27f355697 | |||
790762d397 | |||
4ce681e643 | |||
55cf3d0d8e | |||
2708b67751 | |||
f477ee3731 | |||
6d70f78989 | |||
b867a52471 | |||
78ee3e3c64 | |||
d7ece2a8c3 | |||
3794aa87b0 | |||
4cf996b1bb | |||
79517b2fe9 | |||
a84ee1240a | |||
7019b307c5 | |||
838a8fc7a1 | |||
95aba805c0 | |||
0856c340c7 | |||
b90a2f6c87 | |||
98e045196b | |||
a10dd402b8 | |||
6e538eabc8 | |||
82cc24a7f5 | |||
26b392ea76 | |||
b49fdc509e | |||
b1e8339283 | |||
f5db786878 | |||
7ef20474a0 | |||
83b9f697ab | |||
dd7b5e844c | |||
da01305cac | |||
1082caddae | |||
242347878d | |||
f46aabe884 | |||
d3cc187c3b | |||
0a4b9bc8e4 | |||
2887e54cca | |||
630fd43382 | |||
3114a7b586 | |||
34d771266a | |||
73b7d2728e | |||
fc4df980c5 | |||
763b43b44c | |||
db860e6ae3 | |||
2ba486902f | |||
7848226f83 | |||
185f37af52 | |||
b9461026a6 | |||
bf63e01b98 | |||
4a600ac531 | |||
dc0bb555c1 | |||
5adce08aea | |||
2569abc0be | |||
3a839cfe37 | |||
29f50da226 | |||
a5941763ff | |||
3d7bbabd7b | |||
e4b8c97e03 | |||
29df95ed82 | |||
6443771d93 | |||
d1cd87c843 | |||
5f0762e4f6 | |||
5642793f4a | |||
7d0502ebc5 | |||
20c8d46f54 | |||
a524c52f89 | |||
5c9ca20e04 | |||
bfe18dd83c | |||
0a83f3159a | |||
fb7b3a3c8e | |||
42f9ebad34 | |||
33b2d3f582 | |||
14e868a644 | |||
2a1a956739 | |||
bd2dde3af6 | |||
1126765da2 | |||
2620ee088e | |||
838a55ea94 | |||
1b26f1da8d | |||
43362e1694 | |||
14d3f65a70 | |||
b8ccd50ab2 | |||
4a39cc90c0 | |||
0de26fa6c7 | |||
1bed83078e | |||
7ffd79ebd9 | |||
2b7950920c | |||
f0b323afee | |||
eadcb62f2a | |||
cc2c1dc730 | |||
3b4821f7e7 | |||
5b64b47754 | |||
cb2b9462e1 | |||
03564b34bb | |||
e3b09e7f1a | |||
3adb08fc68 | |||
e9a41bd40c | |||
cb539b038c | |||
3ac9bd9f90 | |||
85a2f4b3d2 | |||
012426cf3b | |||
6c966bce2e | |||
3587531bda | |||
411a1f8931 | |||
cc51629337 | |||
022800425d | |||
0228014d34 | |||
1b638c366e | |||
5c90c252d0 | |||
4a65a254ae | |||
5e00deea19 | |||
bf7b24c3ee | |||
85924ab3c5 | |||
ac293c90f4 | |||
e0f35c4bbd | |||
989bee9522 | |||
2f12d8ea83 | |||
58620f6695 | |||
abc064fa56 | |||
7f42462514 | |||
41cd6b7702 | |||
a40d48bb03 | |||
2fba32d384 | |||
f2a765d69a | |||
c729edb525 | |||
597e9d5222 | |||
db0e030900 | |||
004507e233 | |||
e2014b9b59 | |||
567b1365c0 | |||
e99fa77b91 | |||
80dad1a5ed | |||
03290eafe1 | |||
58c64bd7c6 | |||
e497c001d6 | |||
4fa1c6cfbd | |||
53770f5308 | |||
13d8663796 | |||
f31565e4c5 | |||
a4d8de2152 | |||
c744ebe3f9 | |||
ce029881d0 | |||
94da112736 | |||
b62df5599d | |||
c9a7830953 | |||
53e5c563ae | |||
0b3b3a810a | |||
6d14f16dfd | |||
632d922977 | |||
26b29debc0 | |||
0c4cd283c4 | |||
5d36a806ff | |||
84de85d905 | |||
457f3659fa | |||
4c7ee0441e | |||
140572a0a4 | |||
a30cd4e8b5 | |||
2067804e9f | |||
1a42e8bd14 | |||
8634b5e1b3 | |||
1595a7c4a6 | |||
82aaf7ad74 | |||
7e4a1062af | |||
d5e5f57f92 | |||
f671678720 | |||
2219696c3f | |||
fbaee683fd | |||
b301e58ee6 | |||
de15c42de8 | |||
918355743f | |||
f6e62525d1 | |||
f72ac30884 | |||
1496f1de95 | |||
38de10ba65 | |||
e8c19b4b84 | |||
b0737b1cdb | |||
e4cc928eea | |||
c9b2136578 | |||
5709935c92 | |||
c7badc608a | |||
0e59d35129 | |||
1ba50397db | |||
6318611931 | |||
6e04ac58d2 | |||
b6e571a496 | |||
21b6362bc1 | |||
1fcf072257 | |||
ea0149b5d4 | |||
fe76fe1e62 | |||
3431796283 | |||
b5d8ac5462 | |||
5426014096 | |||
a9d77de2a4 | |||
766ef8619f | |||
66013a4da3 | |||
1cb5a12d85 | |||
6e8ae793e3 | |||
0746acedfd | |||
f5659a44f8 | |||
77816ac4e7 | |||
8779afd1f7 | |||
0074bcbd69 | |||
149c563831 | |||
e9ef62b95d | |||
aeaf84de6f | |||
fdceb0f792 | |||
2fd83eaf55 | |||
|
21eb614912 | ||
b880b98ac3 | |||
acfb1a2ee7 | |||
4885ad7eb4 | |||
d9669fc6dd | |||
8e0341c120 | |||
22c8c395f0 | |||
aae69ea15b | |||
c7b25ed093 | |||
e675aa5886 | |||
14f07adc9d | |||
dba12b89d8 | |||
0607974dac | |||
e8fa22cb43 | |||
eedfe83ece | |||
9f865dd215 | |||
220e3e1c60 | |||
2996c7cbb6 | |||
59bd4ca8eb | |||
da58691d25 | |||
c96f278ac3 | |||
2715479c95 | |||
926640371f | |||
cdc97c8ba5 | |||
4124e97aeb | |||
7f0d40bdc3 | |||
8dc2238ba2 | |||
b9b08feadd | |||
dc437c7621 | |||
7d63d92166 | |||
3eb51a32ce | |||
6272303b55 | |||
dfd7be9d72 | |||
90ad688ca9 | |||
2f02ad6c15 | |||
1257bef61d | |||
3eca5dabdf | |||
5a0684fa2d | |||
051e4accd6 | |||
7f53cc3a12 | |||
9228d51e86 | |||
99c6c9ec92 | |||
34f9d773bd | |||
5edb9d19cf | |||
7a09f223af | |||
f88e57ca52 | |||
7bc11f9b31 | |||
0b25161af6 | |||
14c3ff1253 | |||
234cfea02f | |||
|
69e29029af | ||
bc5374cf52 | |||
|
1660bcd384 | ||
|
41d924af1c | ||
80278f2bb0 | |||
44e0fea0b2 | |||
a9e7ed3605 | |||
f9f76892af | |||
996244b672 | |||
9f61b4e50b | |||
3549f4de32 | |||
552bb1bbae | |||
1b385c5215 | |||
1240d3bfdf | |||
27973c2773 | |||
f62355e490 | |||
f5213fd59c | |||
0472fecd64 | |||
d1fcbedef6 | |||
c8be88e3b1 | |||
5e315f9603 | |||
bab1035a24 | |||
30930c4136 | |||
bba663f95d | |||
c2f83abb60 | |||
3bc64023af | |||
d94254effb | |||
ff18c7cd73 | |||
a84abbdade | |||
5dc8ec2344 | |||
4b9e7dd3b7 | |||
22ff2dc1f3 | |||
16c1a5d834 | |||
b25f7f52b3 | |||
4826de621e | |||
4501c31756 | |||
c185c537cb | |||
809ac1adf4 | |||
1a2451af4e | |||
e78974b469 | |||
b1bf7aaba5 | |||
a1643870db | |||
aeeae776c7 | |||
356c214718 | |||
4717e33649 | |||
ee4ee9a1b7 | |||
57211c2076 | |||
2ffaadfaca | |||
bc5059fe62 | |||
e6db73c02a | |||
4ad6f1f8ea | |||
7e58b825ea | |||
f3aa7625fe | |||
d9c4493e0d | |||
14dde77134 | |||
fd422a14ce | |||
5343536d27 | |||
6e2e3e45a7 | |||
ed866bf177 | |||
a580f41edd | |||
dcb57af6f7 | |||
2699edd197 | |||
257d0c4673 | |||
4cbd29735f | |||
8ea86d2bd7 | |||
3951376a29 | |||
e1d36045da | |||
c572d535e2 | |||
c79dbeec68 | |||
5501e40b7b | |||
e84c7e5612 | |||
be675d5f9e | |||
bf16a44e87 | |||
98cc3d5070 | |||
2db5f75888 | |||
867b377115 | |||
1882fcfef5 | |||
15dc99a221 | |||
6b35454f35 | |||
d86ca6cc0e | |||
1b9775ccb5 | |||
45d9da3125 | |||
8ccfb1dfbe | |||
6a1a83432f | |||
85195e01f9 | |||
45624037b1 | |||
d4fbdb409f | |||
a738199868 | |||
c1da74de3f | |||
c23624e30c | |||
0f1f40f2e0 | |||
d1982af63d | |||
409e659143 | |||
562603a8cd | |||
6d4b7227ce | |||
9a8ef5e047 | |||
ad449c3b6a | |||
9469452275 | |||
fd8ef26b53 | |||
8cda54c46e | |||
90bc52632e | |||
0b8d2e0b40 | |||
40491dbc2e | |||
fac8971982 | |||
c791e86b8b | |||
d222b55f30 | |||
a04a1710d3 | |||
4f06f94023 | |||
2529c7cdb3 | |||
ab12a933f6 | |||
529efc0bd7 | |||
725fea1169 | |||
84322f81ef | |||
fd637c58e3 | |||
bfc42ce2ac | |||
1bdfb71f2f | |||
807fab42c3 | |||
2f45038bef | |||
f263992393 | |||
f4d1f2a303 | |||
3b2190f7ab | |||
7145213f45 | |||
70f7953027 | |||
c155e82f8c | |||
169493179e | |||
dea2669de2 | |||
e4ce3848fc | |||
8113e412dd | |||
94796efae8 | |||
7aed3dd8c2 | |||
1a649568ce | |||
f9f7d9b299 | |||
9d8e48d303 | |||
f9426cfb74 | |||
e56c960900 | |||
41934ab285 | |||
932ce7c8ca | |||
0730c1efd5 | |||
fd370624c7 | |||
4b8b04f29c | |||
2d276cfa5e | |||
241c5c6da8 | |||
af3ea9039c | |||
c8054ffbc3 | |||
54490faca7 | |||
b6eb73dee4 | |||
3fed9eb75a | |||
45c18b69ba | |||
ac3bc5742d | |||
f6c767f122 | |||
5e83f306b4 | |||
2e2501980c | |||
cb9a7b2ade | |||
a6afbaff38 | |||
111d6ac50d | |||
766fe39c4c | |||
8254bc9f07 | |||
a8139c2e72 | |||
f8264b88d5 | |||
779823eb09 | |||
0d5f369755 | |||
4627d9031c | |||
8ac88475d5 | |||
da88871108 | |||
b61f695aac | |||
a6000d7666 | |||
b5b65c4f67 | |||
ea79b9456a | |||
7c9b895dbe | |||
3c759cbb4c | |||
733356b4f7 | |||
21b4fdee47 | |||
294a43bd97 | |||
dd73a87e19 | |||
bb7859ab44 | |||
bbabc58cf9 | |||
959c48c1a1 | |||
253b088cdb | |||
c99def5724 | |||
75a5ab455e | |||
d5c14ad53c | |||
e90c9a18b0 | |||
fff06d52b8 | |||
f02ca50f88 | |||
4acf2137e8 | |||
6a447a1426 | |||
d1c8036fa4 | |||
30d583f0c9 | |||
f7aab39167 | |||
e4028fccf4 | |||
b6ee7b9f98 | |||
67122800f3 | |||
bfd1a2ee70 | |||
076a2058cc | |||
9dc55c5893 | |||
81ef808191 | |||
8161dd1b6d | |||
ac72544b72 | |||
732607bbb6 | |||
c6f49dc6e2 | |||
ce68391b4e | |||
c42d7cdf19 | |||
f012b4fc78 | |||
56f6a2dc3b | |||
632ad14bd8 | |||
fb0ca533ae | |||
6fbe550afe | |||
294d402990 | |||
95cbce93f0 | |||
77b3ca5fa2 | |||
33d14741e2 | |||
ed67ca0501 | |||
8f31b2fbfe | |||
325695777a | |||
4c9ae52fd7 | |||
3c22fb8d36 | |||
ae8a0d608b | |||
f9aa1ed2a4 | |||
8e4e497d2c | |||
24d2c0edb5 | |||
e1d090ce04 | |||
56caecc5d8 | |||
63bf7f7640 | |||
ad60f5fb37 | |||
991ed7d614 | |||
840836702d | |||
9142eeba3c | |||
882cf47c20 | |||
e8992f254c | |||
92245b5935 | |||
a98332bfb9 | |||
422e4c136d | |||
756597668c | |||
4cc4195fab | |||
78031855b9 | |||
5340d580ce | |||
c8669e19cf | |||
a18e888044 | |||
4e3c124f55 | |||
f744747cef | |||
bff6f8b5a0 | |||
99316c1088 | |||
3c701118e8 | |||
f07557c322 | |||
4f5afa1220 | |||
ead60dab84 | |||
066b4d59d6 | |||
0fd5cdb5d6 | |||
f15f498c1d | |||
46bba3564d | |||
e2b5491e1f | |||
32dc27aebd | |||
adec2aed84 | |||
3eb8b54a1a | |||
80ca12938b | |||
3b03c5171d | |||
e174523fc6 | |||
b2e32aacf3 | |||
6db7144b08 | |||
34d5c415bb | |||
c09dec8b0f | |||
f2187e4bc0 | |||
abd2545346 | |||
e14e6b96e9 | |||
44834f9873 | |||
5dcc13cb24 | |||
25e4a50974 | |||
33276263b0 | |||
168c5c0da6 | |||
aa61bf2a44 | |||
25cee9a4c7 | |||
6780950257 | |||
23bbe0520c | |||
3c63936970 | |||
9fa39e5f25 | |||
b494b80520 | |||
691b204512 | |||
7fba13b550 | |||
f9b3fb8cfa | |||
60ab31c623 | |||
80d26ca068 | |||
d43fdc63ea | |||
6e32b20240 | |||
292918da81 | |||
1f4dee49bc | |||
3141166fb5 | |||
dca04540d4 | |||
e6075738b7 | |||
38d83d18d2 | |||
4de60d4162 | |||
c160c58a5c | |||
8457325b5c | |||
74ebb375d0 | |||
12d833d20c | |||
8b2768daea | |||
81ab323c29 | |||
3c3739c234 | |||
e794da47e2 | |||
5a3535187a | |||
c1975faa7b | |||
bafd9e0f23 | |||
3f7a46177b | |||
ff38b86493 | |||
96268e7161 | |||
c94d623f8f | |||
707cc9d2d1 | |||
41d023abee | |||
f3439861bb | |||
7a38241485 | |||
993469fd82 | |||
944707ec41 | |||
0b80ba6f54 | |||
22049cd1ca | |||
b610d211c5 | |||
da0b339995 | |||
5adcc5b931 | |||
338b09b755 | |||
f3939661e4 | |||
c9c73cbdb2 | |||
73329506a9 | |||
e7322a239e | |||
a026681553 | |||
46cf65f296 | |||
af3767fdfa | |||
a69b2c9cb2 | |||
39d2e6c0fa | |||
69ad91ee91 | |||
5cd94c1d0a | |||
a0a61ad304 | |||
50c502d331 | |||
575df76ec3 | |||
db384c6261 | |||
2108702a2b | |||
66198ca1ec | |||
1f43536018 | |||
94bb060a5b | |||
8c411a21c7 | |||
3fdd900ed8 | |||
f548faa80f | |||
9668e74139 | |||
d0bd33fee3 | |||
ae5f021b8d | |||
dd1aab70fb | |||
e4ff99e336 | |||
ed0cd9b8c0 | |||
22b4342300 | |||
7362accab0 | |||
8da2e41463 | |||
563d5fd528 | |||
6b87a049d4 | |||
9cbd2d4f4b | |||
36ff93e64e | |||
cb29a479b3 | |||
e729706ec6 | |||
9159a0c7d3 | |||
a100c9e63d | |||
f254c9711d | |||
cbe9efbdc8 | |||
a6d226769c | |||
e2b0e7b492 | |||
a7b9467304 | |||
8200abad85 | |||
3f87f1fcd8 | |||
63af5b8ef6 | |||
a51bc1f4c7 | |||
b9e5c3a337 | |||
75d603db5b | |||
a1465ef886 | |||
eccace60f4 | |||
634f1835fc | |||
9762de2901 | |||
e25565c517 | |||
ca0602a1c8 | |||
38ed1e94e8 | |||
2ea7a606b6 | |||
e61ef82f17 | |||
acad3f217f | |||
fe9d3aa4aa | |||
86d7be5615 | |||
5919f49741 | |||
ea9cc07112 | |||
03eaf75c4a | |||
0347d238c2 | |||
bd9e43506f | |||
cfeb8a5bf8 | |||
eed72368c1 | |||
c537a1f5b6 | |||
018b4843f7 | |||
b76f0e2190 | |||
7a42e6d9ce | |||
a918dbfe03 | |||
8cd7379419 | |||
dc6454e910 | |||
1df0e38f06 | |||
c3575e5647 | |||
7950a3f517 | |||
52f467c15c | |||
9f1d153053 | |||
1858c1970f | |||
06b864ad52 | |||
ee0561db72 | |||
a9f55579a2 | |||
fe04f1955f | |||
d5af5cd78a | |||
56b3f854c5 | |||
a93e1520d4 | |||
1486862327 | |||
3600874223 | |||
a3fd74c2e0 | |||
e032fd1aa4 | |||
fd63f84f21 | |||
eea0adb764 | |||
0766bb4162 | |||
03db141316 | |||
9cf18cae0e | |||
e807a3e956 | |||
ef663a1356 | |||
821275ce70 | |||
9d1b44319c | |||
cb6fbba8f4 | |||
2ccfdf0de6 | |||
28b41382d2 | |||
4cffddab51 | |||
3ce6e958b4 | |||
ff2b402ea7 | |||
bdc0074542 | |||
abc9a46667 | |||
4963503f2c | |||
15121fd905 | |||
b83d596789 | |||
925f20f1e1 | |||
02d478186c | |||
227c206d69 | |||
68dabf6c97 | |||
1344d1a2ea | |||
e63895b5b7 | |||
c464cc6688 | |||
b3cc070394 | |||
d815b9ee62 | |||
b3e82fa457 | |||
40edaa52ad | |||
bb73e948d3 | |||
6d4723b321 | |||
f86568fb85 | |||
ffcce08f28 | |||
cfc052c129 | |||
20c3fdd455 | |||
68287c3c66 | |||
716c1c40e9 | |||
aa4d54c0c2 | |||
0ca33139b8 | |||
1ed26ab706 | |||
13141ac7d6 | |||
7abfe85021 | |||
c1c2921ce5 | |||
0114b30824 | |||
7dd8fd4a5f | |||
c700ff3ee7 | |||
03192dd4f3 | |||
ceab517dfa | |||
ada1f84c0b | |||
94dd57d5cd | |||
ae25673853 | |||
19a489b3c5 | |||
f5bacf17b3 | |||
8b5c61953e | |||
25ba93cbfd | |||
ced5e27453 | |||
380aa4a37b | |||
4fbf8f505c | |||
fedaa02067 | |||
4c53a95e79 | |||
0767d4175c | |||
66fae5815d | |||
2541cc1c91 | |||
90e9e00205 | |||
04e07b072d | |||
5077f5f6ad | |||
24cd75ac26 | |||
6d857663fb | |||
ebd74db3c4 | |||
cc9b634bb8 | |||
28c298636d | |||
b2df1ef649 | |||
4f0962b4a2 | |||
96e2a0033f | |||
aacc6877cb | |||
70bf9ad3fb | |||
b599a528b8 | |||
9e19a050a6 | |||
cdfd464bce | |||
7a7825cc61 | |||
dad7ee2f20 | |||
2717651189 | |||
c7ff39169a | |||
9d400911f4 | |||
49937f6ecc | |||
6026d7ec03 | |||
97b9e19c5b | |||
9000972de6 | |||
fa3636cf26 | |||
5470be50a9 | |||
5ad8a7e857 | |||
331da375b7 | |||
dfb67918c8 | |||
969a176be1 | |||
5948d7aa93 | |||
a051fde662 | |||
23ccaca3aa | |||
a51a474cb3 | |||
d5dd568994 | |||
2f1d6a5178 | |||
3c7825fd23 | |||
865f3577d4 | |||
f748f9cef1 | |||
efe994a4c5 | |||
03f3a31d21 | |||
1b50f73803 | |||
37dcc5f74e | |||
cc3f5d75ea | |||
ab8b99b2c1 | |||
35446b6d94 | |||
c1b94778b4 | |||
338f66352a | |||
d4f9aa21e5 | |||
624566d34e | |||
08ad58e7c8 | |||
d1a027c1cf | |||
d115a6f139 | |||
3388d3c592 | |||
ad51597e2e | |||
2506065142 | |||
cc399e3899 | |||
1c0224d1df | |||
76f303da27 | |||
ca5c3c6e8a | |||
a3d5bb3277 | |||
7756bbfd62 | |||
d51be05d83 | |||
06238343df | |||
0900126e4a | |||
39b312b997 | |||
ae48aebcd7 | |||
e9e9925bde | |||
02137576bd | |||
94a57312dd | |||
b5e27a4c89 | |||
9dd08396bc | |||
4b56393264 | |||
8ffb6a9cee | |||
52ba4dc3a1 | |||
98346c5988 | |||
a3bc86ad51 | |||
f06ad0af18 | |||
76aef5949b | |||
9c65c320f9 | |||
2302cbfeb4 | |||
9c45d070b4 | |||
2478e4013f | |||
2aac8b5f80 | |||
c492c824b7 | |||
77e32cc5a6 | |||
778c4803ed | |||
c2eed82353 | |||
db095b77dc | |||
9eca1958ec | |||
e8305fa598 | |||
a42d5e39cf | |||
fd698e9cc6 | |||
be0da93c9c | |||
20020cca92 | |||
f7cfd13d5a | |||
1031b61f6a | |||
6b7314baac | |||
779c60ef20 | |||
551c041452 | |||
25d16eb620 | |||
02d773b6e1 | |||
ec5dbc7e80 | |||
0ac9dac658 | |||
7052f8205a | |||
8fac2296fe | |||
ec0d266975 | |||
93af817d4b | |||
026855b197 | |||
d0844ce44f | |||
4ebe7ee918 | |||
13e98beed2 | |||
3cb4cbf0d2 | |||
894e31bc3f | |||
383fb5bd90 | |||
dc4964eda1 | |||
687f9b0703 | |||
b2f11bcf69 | |||
7ce480bd5c | |||
9ea92ea9ec | |||
40dd7ea5c4 | |||
24fb56845b | |||
23496f2fab | |||
72baa9ea28 | |||
1e9a1a6564 | |||
064dd01508 | |||
3405f3a8f9 | |||
0b88ad6585 | |||
8ae99aaf46 | |||
27bfee2438 | |||
e7c193f409 | |||
0d8027c908 | |||
a2e6c9881a | |||
5b47333955 | |||
82f442f40e | |||
9764941c7e | |||
9d9f11cb3d | |||
1d52fcec75 | |||
d5f194b2c0 | |||
f71c9e4b31 | |||
9fb1655111 | |||
7afa368594 | |||
9575ee31ff | |||
858cc770ec | |||
fa9831ef08 | |||
f4db4ca6ea | |||
0f12ffd513 | |||
90f9d97c54 | |||
d38d4204f8 | |||
8d5408bf42 | |||
c950862b80 | |||
9a71ad7af9 | |||
25952fc7e9 | |||
e5e394d470 | |||
d796158c61 | |||
04deeef385 | |||
0fc9c3e495 | |||
3e816130d3 | |||
da89bb6ed1 | |||
cd2f5f8717 | |||
fb96c5b7fb | |||
6c1c728acb | |||
7ae3c6cc51 | |||
39668a428c | |||
2fa5e57c5d | |||
331ff20272 | |||
4958b08ca7 | |||
87262f7373 | |||
4e04f882e5 | |||
edf2be504c | |||
79d6a68dc1 | |||
72deb13d07 | |||
ec79cb8921 | |||
3203151e84 | |||
bab66baedb | |||
ae94dd974a | |||
fca8eee8e7 | |||
36606b5594 | |||
59e985eb3b | |||
f27076a5cc | |||
250f26e03c | |||
c9ab0cd7cc | |||
f892a5b54d | |||
6a1be99f1e | |||
3b3ec5196a | |||
fd0a978a16 | |||
1376930e5c | |||
86dd349e8f | |||
8bb7f607c1 | |||
c26167ce61 | |||
965af4fbaa | |||
3653b3111a | |||
e1df746346 | |||
65b1fef24e | |||
4590331a2b | |||
a6e7303f26 | |||
3eb9163412 | |||
b0dd574c26 | |||
f8c984d6c2 | |||
ec5beff22f | |||
8f8796f598 | |||
b5b4550cfb | |||
7e24d9b1c3 | |||
7d5d69c380 | |||
a50d8159fc | |||
91fba5dbeb | |||
de94fe4ae3 | |||
9c680a26f7 | |||
c03330dcd3 | |||
dfcd35bacc | |||
4ec3037474 | |||
567622f523 | |||
4b824ecd6c | |||
c418399807 | |||
58229e2255 | |||
b5dc999584 | |||
cf44cb59b3 | |||
9e73218535 | |||
7379006327 | |||
7d502e799f | |||
794466d7e3 | |||
c8a91c1c46 | |||
2158309020 | |||
536c3091e5 | |||
2dcf8159e5 | |||
e92c331f44 | |||
8e18a5c1e3 | |||
2183038240 | |||
33a8d8b579 | |||
2ca000795d | |||
150e15625d | |||
557869802a | |||
e3ddc43944 | |||
50e7d75932 | |||
ba67306960 | |||
9107c1926a | |||
385e582fc6 | |||
72becedee5 | |||
09b08c8744 | |||
36bf5af288 | |||
fc552d1130 | |||
1913003ea2 | |||
d1d19830b0 | |||
61ad100ef1 | |||
28e25f0232 | |||
da5962c337 | |||
1be413f20d | |||
adecee43ec | |||
a13bd1883e | |||
a43d1302ac | |||
734ddc44bf | |||
87d5854831 | |||
b00988e792 | |||
aceb111f86 | |||
0b42193d3c | |||
f9b8f86fce | |||
715d5fdb85 | |||
2997fb4f5f | |||
fe39a7f701 | |||
0eaaa73e23 | |||
a9c25a28c6 | |||
df538033d9 | |||
27d9244254 | |||
37418b2658 | |||
be3bd18c34 | |||
4461f73c86 | |||
949feb912a | |||
bfd1839a8b | |||
31462aeec7 | |||
ef2d62adac | |||
8d58aa2364 | |||
ade8053430 | |||
54bef2a091 | |||
3ae3da3673 | |||
d5ba306081 | |||
c31a8fb329 | |||
4407d99a1b | |||
5151a21575 | |||
ccfc23f3fe | |||
b77f116bdf | |||
2c964cfbee | |||
e024542d8e | |||
170636d098 | |||
fa10fe558d | |||
b1d8e21772 | |||
cf823c0061 | |||
b98f032141 | |||
30b138ffa3 | |||
500f8b508d | |||
5dda19cdc5 | |||
a2955a34ab | |||
00c663a694 | |||
d1de427653 | |||
6cbbd4b84b | |||
b4ea171908 | |||
2629603012 | |||
9dc571062c | |||
83007983a5 | |||
98b7b15548 | |||
c628c1edcd | |||
28ebb675a7 | |||
080b46329e | |||
663e5604fd | |||
b488ea949f | |||
7a8a22a3a8 | |||
213b74ff84 | |||
7042f28a79 | |||
526f1c952d | |||
659c24eb14 | |||
887db59a57 | |||
4541e2d636 | |||
99f633d222 | |||
e2166235ad | |||
bb48e8ae36 | |||
12ad339221 | |||
0928245853 | |||
c51f935eff | |||
e4a0473f9e | |||
92000e5d43 | |||
ad9b22a792 | |||
c24694565f | |||
d549923538 | |||
f8512e9e35 | |||
c402583f2b | |||
fe85d4bd37 | |||
da8830493f | |||
9919e1ecc2 | |||
80d99dfd18 | |||
275e4bd453 | |||
096934e795 | |||
4c29fc9f02 | |||
d5f10276ee | |||
6868b0d8ba | |||
6680f64e50 | |||
0611ddda11 | |||
87ca1ccc11 | |||
836a3e0238 | |||
6520350731 | |||
72693e09e2 | |||
f23850068a | |||
061d2c9d17 | |||
d96ca6f799 | |||
57c3cfc8fa | |||
8e0bb19e49 | |||
fd1b160d93 | |||
718bd41666 | |||
e92515184a | |||
e78d6edefa | |||
1620a3f7ed | |||
24cb08b358 | |||
83de47921d | |||
bd4241d74e | |||
dea7052da0 | |||
8be422cc99 | |||
1712088c5a | |||
8d8016a01e | |||
d390b500b4 | |||
be62b72e90 | |||
af27d50214 | |||
e6fa74df61 | |||
4cde1ed9e4 | |||
858b906722 | |||
3a32b65454 | |||
12df136ccf | |||
71cde6bf3f | |||
9dfd019e0a | |||
756407d407 | |||
f21120e9f7 | |||
8055c7b994 | |||
efad73415d | |||
c3988ce812 | |||
75a110fde9 | |||
8b0308589d | |||
9232db4ef7 | |||
c157e93011 | |||
2667f3c259 | |||
7b17362986 | |||
9a30e0f49b | |||
9194abee4a | |||
cf22ff49fc | |||
d84ffc375b | |||
e1da02db3d | |||
bdbaabe4a0 | |||
e005ced3d4 | |||
5677a91e82 | |||
d36fc0c916 | |||
48b32c2816 | |||
4697e71c54 | |||
281413d571 | |||
3cfa69a248 | |||
74edb197de | |||
871dc04fc9 | |||
c4943ef3b3 | |||
6a1d6e40a7 | |||
b6eb9a0dd3 | |||
c9c716e5fe | |||
18fba35173 | |||
3e1a9e9dde | |||
985ed797bd | |||
2095f16402 | |||
c1aa6823b0 | |||
12e0367c66 | |||
cc458753b3 | |||
b446fb5476 | |||
cd21645148 | |||
c264424464 | |||
c7c633621f | |||
9fd96f01c9 | |||
f83d00f955 | |||
164f86a775 | |||
9ec40f8025 | |||
f6a42a4a5d | |||
c3c889784a | |||
b310e176de | |||
ce82a61018 | |||
892403cf8c | |||
17e2c992b3 | |||
abdddc0d42 | |||
26abfd441a | |||
efa139705a | |||
c480e39e39 | |||
2dbaad04d5 | |||
4c186a4204 | |||
8a5997e54e | |||
632b4e9b22 | |||
1689bdfdbc | |||
cc622d060a | |||
d014d25e40 | |||
36c76fd00e | |||
d4850ec8bb | |||
e9aef3468e | |||
0ee2c041a6 | |||
856ffb9fea | |||
5756eb33b9 | |||
de52b1a16e | |||
9c4aeff807 | |||
2a33c6a6dc | |||
d321fa043f | |||
ef74fd5363 | |||
14136a9e69 | |||
863f221780 | |||
9053cced6e | |||
c60a296e3d | |||
2898cc6086 | |||
e3f6b320de | |||
93ff9ea575 | |||
9a49e7aa3b | |||
08334be18d | |||
be0f813394 | |||
1a7e7b3851 | |||
d20e900ab2 | |||
d24b33f045 | |||
fbf95c0f41 | |||
7e4d296092 | |||
1756babbc2 | |||
3d096f1fc7 | |||
2b3cdd58d6 | |||
a9de544a8d | |||
7893ee068c | |||
0471eb5d4a | |||
9720fc1813 | |||
54dab4ba6a | |||
22ce80cd23 | |||
545af78e60 | |||
db7ef0e8a5 | |||
ae1d7c785f | |||
61eb9a3aed | |||
72b787814e | |||
9a9bae4f2c | |||
ab258cb6dd | |||
6d5113b6ea | |||
bc455a4344 | |||
5b9553d042 | |||
aa591de3e5 | |||
1aa6c01c5a | |||
9a6c835244 | |||
cd7568e4f8 | |||
1a951648cb | |||
64f8b56e2d | |||
657918c96e | |||
c2975262ea | |||
8b072c0ed2 | |||
08ee5dff9c | |||
ba49c2a840 | |||
8afb2a3b84 | |||
a61fa3e614 | |||
8c951f6a19 | |||
0805929d41 | |||
10b2ead705 | |||
c4b622ccdb | |||
82e69fc7a6 | |||
40a30cc927 | |||
88194ac3d3 | |||
9f41e25166 | |||
b6eb866b36 | |||
bdeaf14285 | |||
5694023da8 | |||
7ce58a7203 | |||
07beddb5a2 | |||
a39f1914ea | |||
6e8e19523d | |||
b9c518a6ff | |||
86bc5595f2 | |||
63c2538027 | |||
6597d980df | |||
a69f78d336 | |||
97256bfa15 | |||
a4c3bcd6af | |||
0782dc404d | |||
2f76ba32ec | |||
19aa38ac5d | |||
5b40fe1740 | |||
439072c6b1 | |||
b2ccc69628 | |||
4dd694c4e2 | |||
286517b127 | |||
6e02e09471 | |||
a70b0ed48f | |||
5829edf23c | |||
7d24e1d414 | |||
dfa740456f | |||
96f96ebe77 | |||
fd5c10b103 | |||
fce9c1a72d | |||
6f158aa749 | |||
d83c7e280f | |||
e10deb9e54 | |||
0c7af2ce89 | |||
7377aa9c20 | |||
6282330226 | |||
3f284de07b | |||
9762645b6d | |||
916cb6e314 | |||
e4502bbe54 | |||
1d360dfa95 | |||
74d8dad94c | |||
2c4ee620c1 | |||
f84774a390 | |||
bd1395926b | |||
8b1ada7450 | |||
c0916ce25b | |||
0f44e65bf1 | |||
e87c3e2090 | |||
671448dbfc | |||
82bdbbaf57 | |||
fcac18f77f | |||
c7901949cc | |||
0a26499a34 | |||
ff1f7b53af | |||
ba35f43902 | |||
c8debee1ca | |||
2e5a97b0a1 | |||
d03eb9e7f2 | |||
ce01a2f387 | |||
b740b978b5 | |||
77ad71436e | |||
0161f855ae | |||
20f56c6b91 | |||
e874fa41f0 | |||
f7953f74bd | |||
326f048639 | |||
62f9bab052 | |||
c7da4ddd8f | |||
0b98bc1541 | |||
99b262c0a6 | |||
3a6fc106a8 | |||
c5de205e87 | |||
998f4e383d | |||
d276918bcf | |||
e947c203a1 | |||
d947d0a49d | |||
19f6b181bc | |||
eaca564c6f | |||
c687b19a6d | |||
e193e92443 | |||
2a2d70e4da | |||
de475e7347 | |||
f76e7ae8b7 | |||
7fc44b9a35 | |||
a7f50e05de | |||
60c84d57ba | |||
5cb1aa45ad | |||
0c3f088810 | |||
12a390229c | |||
33aa31e55e | |||
0717d386e0 | |||
74683bc1fc | |||
762c564c61 | |||
93542ea34f | |||
87b176a7b7 | |||
26d89a9ac2 | |||
90b9422f65 | |||
490a215dce | |||
eb405b4b25 | |||
401f748509 | |||
5caf9180d1 | |||
d149ae486c | |||
769b43ce07 | |||
bebcdfd2da | |||
c11bcfa10f | |||
043a33acd9 | |||
93d2a862c4 | |||
38e031da13 | |||
c3dd6e6a34 | |||
8b9bd80d20 | |||
26ffabf0a5 | |||
b9d29ee06d | |||
fedc5572f1 | |||
9efca268c9 | |||
44438dab64 | |||
2e338b0506 | |||
20209cce7e | |||
22309acfff | |||
f0760d3969 | |||
c2f97cde59 | |||
b1ff11a0b8 | |||
eaeab18103 | |||
e641539a94 | |||
bd68e42312 | |||
95f3fdb130 | |||
31ee369a90 | |||
f5c9c3edba | |||
4192c153a2 | |||
280ef536da | |||
b788a7e32b | |||
ea30612de9 | |||
7bcd636e44 | |||
208150f797 | |||
dacb3c74fb | |||
53af17b2ab | |||
26942a9265 | |||
e98d18f369 | |||
8e6a383617 | |||
01ae2c3b38 | |||
8083e2b51c | |||
df44ea524c | |||
e50fd54f4e | |||
5503326ea6 | |||
cb6a42e97d | |||
aaac98249c | |||
49aafaf5b8 | |||
207030cb48 | |||
75ff756808 | |||
7781083161 | |||
8a6adf3958 | |||
734d4f8ed3 | |||
ccc87ad24b | |||
8e2c8360d4 | |||
b91a132407 | |||
57649698c2 | |||
b8d73f6b09 | |||
83c84f6e86 | |||
30ccd30fa6 | |||
15c5508239 | |||
d2478120ba | |||
b671f9dd05 | |||
4768d66fbb | |||
b11879650c | |||
ce13beff68 | |||
11752f5e2e | |||
b96d95f3e4 | |||
ca7d28dcf4 | |||
4c0aba2eea | |||
61890dcf1f | |||
45b56a7aaa | |||
ab66a18b53 | |||
c515c2f439 | |||
21728ab60f | |||
6c9a069cf0 | |||
4d1a71befd | |||
eb6fdd29d3 | |||
eaed9837d1 | |||
077e38e6a4 | |||
7ff03ef46b | |||
aca3399e9d | |||
13429d8e68 | |||
ad2d1e574f | |||
72c944d13d | |||
3244b7d62e | |||
7e2966b02c | |||
6f851973fa | |||
69a98c4c24 | |||
c896057400 | |||
2d38f97b17 | |||
f1607b9045 | |||
338c7b5830 | |||
b6836d9bb1 | |||
2ff54d4c50 | |||
cb972b1b91 | |||
54cac88d26 | |||
b742ffd476 | |||
ec5768f3d4 | |||
a474511e27 | |||
49fbdce398 | |||
08b56ec7cd | |||
a53ea09da6 | |||
c35eb10343 | |||
7b9959af21 | |||
d98c9cda23 | |||
221596e524 | |||
48abeda366 | |||
e6292663b4 | |||
27a31725c0 | |||
2e9eb2df1d | |||
d8afb2ec87 | |||
f64da37d92 | |||
946b8a629a | |||
8b8f50b55f | |||
b63b054dee | |||
a8f664d9bb | |||
b9c51d29ae | |||
3f0f666d25 | |||
e8bf6b18c7 | |||
e55b37b54e | |||
936fdbad66 | |||
02ae02634b | |||
299510f6ec | |||
99f9c0e3b1 | |||
70d55829b1 | |||
4cae0ec883 | |||
9181423e84 | |||
421035a1f6 | |||
c70cc32a9d | |||
1b7f0affa9 | |||
c5a7c76800 | |||
c5d4956635 | |||
0d77245b46 | |||
7b490c18d2 | |||
1a117e47b5 | |||
77f4571316 | |||
c37730aa50 | |||
209bb7c504 | |||
3e4da43627 | |||
df5b63fbc5 | |||
5648d3d32a | |||
991046eaeb | |||
6b54c0fcf9 | |||
ea0804be93 | |||
b330c22b39 | |||
124e5df985 | |||
6d62e65a20 | |||
12a599ec49 | |||
0af537187f | |||
7fa950bf8d | |||
dda032ff44 | |||
4bfbb94609 | |||
4ea66bfc76 | |||
efd1b5775e | |||
e94bcc3049 | |||
fb4cc305c2 | |||
b42fb280eb | |||
12e272af22 |
13
.dockerignore
Normal file
13
.dockerignore
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
# The .gitignore is the single point of truth for files which should be ignored.
|
||||||
|
# Add patterns, files and folders to the .gitignore and execute 'make build'
|
||||||
|
# NEVER TOUCH THE .dockerignore, BECAUSE IT ANYHOW WILL BE OVERWRITTEN
|
||||||
|
|
||||||
|
site.retry
|
||||||
|
*__pycache__
|
||||||
|
venv
|
||||||
|
*.log
|
||||||
|
*.bak
|
||||||
|
*tree.json
|
||||||
|
roles/list.json
|
||||||
|
*.pyc
|
||||||
|
.git
|
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
* text=auto eol=lf
|
7
.github/FUNDING.yml
vendored
Normal file
7
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
github: kevinveenbirkenbach
|
||||||
|
|
||||||
|
patreon: kevinveenbirkenbach
|
||||||
|
|
||||||
|
buy_me_a_coffee: kevinveenbirkenbach
|
||||||
|
|
||||||
|
custom: https://s.veen.world/paypaldonate
|
4
.github/workflows/TODO.md
vendored
Normal file
4
.github/workflows/TODO.md
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
# Todo
|
||||||
|
- Create workflow test-server, which tests all server roles
|
||||||
|
- Create workflow test-desktop, which tests all desktop roles
|
||||||
|
- For the backup services keep in mind to setup a tandem, which pulls the backups from each other to verify that this also works
|
32
.github/workflows/test-cli.yml
vendored
Normal file
32
.github/workflows/test-cli.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
name: Build & Test Infinito.Nexus CLI in Docker Container
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-and-test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 15
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Build Docker image
|
||||||
|
run: |
|
||||||
|
docker build -t infinito:latest .
|
||||||
|
|
||||||
|
- name: Clean build artifacts
|
||||||
|
run: |
|
||||||
|
docker run --rm infinito:latest make clean
|
||||||
|
|
||||||
|
- name: Generate project outputs
|
||||||
|
run: |
|
||||||
|
docker run --rm infinito:latest make build
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
run: |
|
||||||
|
docker run --rm infinito:latest make test
|
13
.gitignore
vendored
13
.gitignore
vendored
@@ -1 +1,12 @@
|
|||||||
site.retry
|
# The .gitignore is the single point of truth for files which should be ignored.
|
||||||
|
# Add patterns, files and folders to the .gitignore and execute 'make build'
|
||||||
|
# NEVER TOUCH THE .dockerignore, BECAUSE IT ANYHOW WILL BE OVERWRITTEN
|
||||||
|
|
||||||
|
site.retry
|
||||||
|
*__pycache__
|
||||||
|
venv
|
||||||
|
*.log
|
||||||
|
*.bak
|
||||||
|
*tree.json
|
||||||
|
roles/list.json
|
||||||
|
*.pyc
|
||||||
|
38
CODE_OF_CONDUCT.md
Normal file
38
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
# Code of Conduct
|
||||||
|
|
||||||
|
In order to foster a welcoming, open, and respectful community for everyone, we expect all contributors and participants in the Infinito.Nexus project to abide by the following Code of Conduct.
|
||||||
|
|
||||||
|
## Our Pledge
|
||||||
|
|
||||||
|
We are committed to creating a friendly, safe, and inclusive environment for all members of our community—regardless of age, race, gender, sexual orientation, disability, religion, or any other status. We pledge to treat everyone with respect and courtesy.
|
||||||
|
|
||||||
|
## Expected Behavior
|
||||||
|
|
||||||
|
- **Be Respectful:** Communicate and collaborate with courtesy, empathy, and respect. Listen to others’ opinions and value their input.
|
||||||
|
- **Be Inclusive:** Welcome contributions from all backgrounds and experiences. Encourage diverse perspectives and engage in constructive dialogue.
|
||||||
|
- **Practice Professionalism:** Use clear and professional language in all communications. Maintain focus on ideas and project goals rather than personal attributes.
|
||||||
|
- **Be Collaborative:** Foster an environment where everyone can contribute freely. Offer help and constructive feedback, and work together towards common goals.
|
||||||
|
- **Respect Boundaries:** Understand and honor others’ personal and professional boundaries.
|
||||||
|
|
||||||
|
## Unacceptable Behavior
|
||||||
|
|
||||||
|
- **Harassment and Discrimination:** Any form of harassment, hate speech, or discriminatory behavior will not be tolerated.
|
||||||
|
- **Intimidation and Threats:** Verbal or written intimidation, threats, or aggressive behavior toward any community member is strictly prohibited.
|
||||||
|
- **Personal Attacks:** Avoid personal insults or demeaning comments toward any contributor or participant.
|
||||||
|
- **Exclusionary Behavior:** Do not engage in behaviors or comments that might exclude or isolate community members.
|
||||||
|
|
||||||
|
## Reporting and Enforcement
|
||||||
|
|
||||||
|
If you experience or witness any behavior that violates this Code of Conduct, please report it promptly. Reports should be sent to kevin@veen.world. All reports will be treated with discretion and confidentiality.
|
||||||
|
|
||||||
|
Our project maintainers and community leaders will review all reports and take appropriate action, which may include warnings, temporary suspension, or permanent expulsion from the community if necessary.
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
This Code of Conduct applies to all spaces managed by the Infinito.Nexus project, including GitHub repositories, mailing lists, chat rooms, and other communication channels.
|
||||||
|
|
||||||
|
## Acknowledgment
|
||||||
|
|
||||||
|
By participating in the Infinito.Nexus project, you agree to adhere to this Code of Conduct. We appreciate your cooperation in helping us build a positive and productive community.
|
||||||
|
|
||||||
|
Thank you for contributing to a safe and inclusive Infinito.Nexus community!
|
@@ -1,81 +0,0 @@
|
|||||||
# Common Applications
|
|
||||||
This section outlines the common applications tailored for both servers and end-users, offering a wide range of functionalities to enhance system performance, security, and usability.
|
|
||||||
|
|
||||||
## Base Setup
|
|
||||||
Key for initial system configuration, this section includes hostname setting, systemd journal management, locale configurations, and swapfile handling. Essential for both server and end-user setups, it ensures a solid foundation for system operations.
|
|
||||||
|
|
||||||
- **[Hostname](./roles/hostname/)**: Sets the system's hostname.
|
|
||||||
- **[Journalctl](./roles/journalctl/)**: Configures systemd journal settings.
|
|
||||||
- **[Locales](./roles/locales/)**: Configures system locales.
|
|
||||||
- **[System-Swapfile](./roles/system-swapfile/)**: Configures swapfile creation and management.
|
|
||||||
|
|
||||||
## Administration Tools
|
|
||||||
These tools are crucial for effective system administration, encompassing Git setup, Linux admin tools, and sudo configuration, suitable for both server environments and power users.
|
|
||||||
|
|
||||||
- **[Git](./roles/git/)**: Basic Git version control system setup.
|
|
||||||
- **[Administrator-Tools](./roles/pc-administrator-tools/)**: Installs basic Linux administration tools.
|
|
||||||
- **[Sudo](./roles/sudo/)**: Installs and configures sudo.
|
|
||||||
|
|
||||||
## Update
|
|
||||||
This category focuses on automated updates and maintenance for the system and its components, including package managers and Docker containers, ensuring systems are up-to-date and secure.
|
|
||||||
|
|
||||||
- **[update](./roles/update/)**: Automates the process of system updates.
|
|
||||||
- **[update-apt](./roles/update-apt/)**: Updates system packages using apt (for Debian-based systems).
|
|
||||||
- **[update-docker](./roles/update-docker/)**: Keeps Docker containers up to date.
|
|
||||||
- **[update-pacman](./roles/update-pacman/)**: Updates system packages using Pacman (for Arch-based systems).
|
|
||||||
- **[update-yay](./roles/update-yay/)**: Updates system packages using yay.
|
|
||||||
|
|
||||||
## Driver
|
|
||||||
Caters to a range of devices and needs for hardware driver installation and configuration, an integral part for both server hardware optimization and end-user device functionality.
|
|
||||||
|
|
||||||
- **[driver-epson-multiprinter](./roles/driver-epson-multiprinter/)**: Installs drivers for Epson multi-function printers.
|
|
||||||
- **[driver-intel](./roles/driver-intel/)**: Installs Intel drivers, typically for graphics and other hardware.
|
|
||||||
- **[driver-msi-keyboard-color](./roles/driver-msi-keyboard-color/)**: Configures MSI keyboard color settings.
|
|
||||||
- **[driver-non-free](./roles/driver-non-free/)**: Installs non-free drivers, generally for specific hardware needs.
|
|
||||||
|
|
||||||
## Security
|
|
||||||
Enhances system security with roles focused on security measures, user configurations, and SSH settings. It's vital for protecting both server environments and end-user systems.
|
|
||||||
- **[System Security](./roles/system-security/)**: Enhances overall system security.
|
|
||||||
- **[User Administrator](./roles/user-administrator/)**: Setup for system administrator user.
|
|
||||||
- **[User Alarm](./roles/user-alarm/)**: Manages the alarm user.
|
|
||||||
- **[PC SSH](./roles/pc-ssh/)**: Configuration of SSH for secure remote access.
|
|
||||||
- **[SSHD](./roles/sshd/)**: Configures SSH daemon settings.
|
|
||||||
- **[System Maintenance Lock](./roles/system-maintenance-lock)**: Locks maintenance services to prevent dangerous inteactions between services
|
|
||||||
|
|
||||||
## Virtual Private Network (VPN)
|
|
||||||
Centers on VPN configurations for secure and efficient network connectivity, particularly crucial for remote server access and end-users needing secure connections.
|
|
||||||
- **[client-wireguard](./roles/client-wireguard/)**: Configures Wireguard VPN client.
|
|
||||||
- **[client-wireguard-behind-firewall](./roles/client-wireguard-behind-firewall/)**: Sets up Wireguard client functionality behind a firewall.
|
|
||||||
- **[wireguard](./roles/wireguard/)**: Installs and configures Wireguard for secure VPN connections.
|
|
||||||
|
|
||||||
## Notifier
|
|
||||||
Sets up system event notifications via email and Telegram, a versatile feature for server administrators and end-users alike to stay informed about their system's status.
|
|
||||||
- **[Systemd-Notifier](./roles/systemd-notifier/)**: Notifier service for systemd.
|
|
||||||
- **[Systemd-Notifier-Email](./roles/systemd-notifier-email/)**: Email notifications for systemd services.
|
|
||||||
- **[Systemd-Notifier-Telegram](./roles/systemd-notifier-telegram/)**: Telegram notifications for systemd services.
|
|
||||||
|
|
||||||
## Backup Solutions
|
|
||||||
Focuses on comprehensive backup strategies and cleanup procedures, encompassing data backups, remote server backups, and maintenance of backup storage efficiency, crucial for data integrity in both servers and personal devices.
|
|
||||||
|
|
||||||
### Backups
|
|
||||||
For USB devices, Docker volumes, remote servers, and user configurations.
|
|
||||||
- **[backup-data-to-usb](./roles/backup-data-to-usb/)**: Automates data backup to USB devices.
|
|
||||||
- **[backup-docker-to-local](./roles/backup-docker-to-local/)**: Backs up Docker volumes to local storage.
|
|
||||||
- **[backup-remote-to-local](./roles/backup-remote-to-local/)**: Pulls backups from remote servers for local storage.
|
|
||||||
- **[backups-provider](./roles/backups-provider/)**: Manages backup processes and storage solutions.
|
|
||||||
- **[backups-provider-user](./roles/backups-provider-user/)**: Creates and configures users for backup processes.
|
|
||||||
|
|
||||||
### Backups Cleanup
|
|
||||||
Manages disk space and cleans up old or failed backups.
|
|
||||||
- **[cleanup-backups-service](./roles/cleanup-backups-service/)**: Service to clean up old backups automatically.
|
|
||||||
- **[cleanup-backups-timer](./roles/cleanup-backups-timer/)**: Timer for scheduling the backup cleanup service.
|
|
||||||
- **[cleanup-disc-space](./roles/cleanup-disc-space/)**: Manages and frees up disk space on the system.
|
|
||||||
- **[cleanup-failed-docker-backups](./roles/cleanup-failed-docker-backups/)**: Cleans up failed Docker backups.
|
|
||||||
|
|
||||||
## Other
|
|
||||||
Encompasses miscellaneous essential tools and systems, including package management, spellchecking, and typesetting, beneficial for both server maintenance and enhancing end-user experience.
|
|
||||||
- **[System-Aur-Helper](./roles/system-aur-helper/)**: Installs and configures AUR helper (yay).
|
|
||||||
- **[Hunspell](./roles/hunspell/)**: Installation of Hunspell spellchecker.
|
|
||||||
- **[Latex](./roles/pc-latex/)**: Installation of LaTeX typesetting system.
|
|
||||||
- **[Java](./roles/java/)**: Installs Java Development Kit (JDK).
|
|
||||||
- **[Python Pip](./roles/python-pip/)**: Installation of Python Pip package manager.
|
|
17
CONTACT.md
Normal file
17
CONTACT.md
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
# Contact
|
||||||
|
|
||||||
|
<img src="https://cybermaster.space/wp-content/uploads/sites/7/2023/11/FVG_8364BW-scaled.jpg" width="300" style="float: right; margin-left: 30px;">
|
||||||
|
|
||||||
|
My name is Kevin Veen-Birkenbach and I'm the author and founder of Infinito.Nexus.
|
||||||
|
|
||||||
|
I'm glad to assist you in the implementation of your secure and scalable IT infrastrucutre solution with Infinito.Nexus.
|
||||||
|
|
||||||
|
My expertise in server administration, digital corporate infrastructure, custom software, and information security, all underpinned by a commitment to Open Source solutions, guarantees that your IT setup meets the highest industry standards.
|
||||||
|
|
||||||
|
Discover how Infinito.Nexus can transform your IT landscape.
|
||||||
|
|
||||||
|
Contact me for more details:
|
||||||
|
|
||||||
|
🌍 Website: [www.CyberMaster.Space](https://cybermaster.space)<br />
|
||||||
|
📧 Email: [kevin@veen.world](mailto:kevin@veen.world)<br />
|
||||||
|
☎️ Phone: [+ 49 178 179 80 23](tel:00491781798023)
|
57
CONTRIBUTING.md
Normal file
57
CONTRIBUTING.md
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
# Contributing
|
||||||
|
|
||||||
|
Thank you for your interest in contributing to Infinito.Nexus! We welcome contributions from the community to help improve and enhance this project. Your input makes the project stronger and more adaptable to a wide range of IT infrastructure needs.
|
||||||
|
|
||||||
|
## How to Contribute
|
||||||
|
|
||||||
|
There are several ways you can help:
|
||||||
|
- **Reporting Issues:** Found a bug or have a feature request? Please open an issue on our [GitHub Issues page](https://s.infinito.nexus/issues) with a clear description and steps to reproduce the problem.
|
||||||
|
- **Code Contributions:** If you'd like to contribute code, fork the repository, create a new branch for your feature or bug fix, and submit a pull request. Ensure your code adheres to our coding style and includes tests where applicable.
|
||||||
|
- **Documentation:** Improving the documentation is a great way to contribute. Whether it's clarifying an existing section or adding new guides, your contributions help others understand and use Infinito.Nexus effectively.
|
||||||
|
- **Financial Contributions:** If you appreciate Infinito.Nexus and want to support its ongoing development, consider making a financial contribution. For more details, please see our [donate options](12_DONATE.md).
|
||||||
|
|
||||||
|
## Code of Conduct
|
||||||
|
|
||||||
|
All contributors are expected to adhere to our [Code of Conduct](CODE_OF_CONDUCT.md). Please review it to ensure that our community remains welcoming and respectful.
|
||||||
|
|
||||||
|
## Pull Request Guidelines
|
||||||
|
|
||||||
|
Before submitting a pull request, please ensure that:
|
||||||
|
- Your code is well-documented and follows the project's coding standards.
|
||||||
|
- All tests pass and, if necessary, new tests are added to cover your changes.
|
||||||
|
- The commit messages are clear and descriptive.
|
||||||
|
- The relevant documentation is updated to reflect your changes.
|
||||||
|
|
||||||
|
## Reporting Issues
|
||||||
|
|
||||||
|
When reporting an issue, please include:
|
||||||
|
- A descriptive title and detailed explanation.
|
||||||
|
- Steps to reproduce the issue.
|
||||||
|
- Information about your environment (e.g., operating system, Docker version, Ansible version, etc.).
|
||||||
|
- Any relevant logs or error messages.
|
||||||
|
|
||||||
|
## Coding Style
|
||||||
|
|
||||||
|
Please follow these guidelines when contributing code:
|
||||||
|
- Write clean, readable code that integrates with the existing codebase.
|
||||||
|
- Use descriptive names for variables and functions.
|
||||||
|
- Comment your code where necessary, especially in complex sections.
|
||||||
|
- Format your code according to the project's style guidelines.
|
||||||
|
|
||||||
|
## License and Commercial Use
|
||||||
|
|
||||||
|
Infinito.Nexus is primarily designed for private use. Commercial use of Infinito.Nexus is not permitted without a proper licensing agreement. By contributing to this project, you agree that your contributions will be licensed under the same terms as the rest of the project.
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
|
||||||
|
1. **Fork** the repository on GitHub.
|
||||||
|
2. **Clone** your fork to your local machine.
|
||||||
|
3. **Create** a new branch for your feature or bug fix.
|
||||||
|
4. **Implement** your changes and commit them with clear messages.
|
||||||
|
5. **Push** your branch to GitHub and open a pull request.
|
||||||
|
|
||||||
|
## Community and Support
|
||||||
|
|
||||||
|
If you have any questions or need help, feel free to open an issue or join our community discussions. We appreciate your efforts and are here to support you.
|
||||||
|
|
||||||
|
Thank you for contributing to Infinito.Nexus and helping us build a better, more efficient IT infrastructure solution!
|
9
DONATE.md
Normal file
9
DONATE.md
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
# Support Us
|
||||||
|
|
||||||
|
Infinito.Nexus is an Open Source Based transformative tool designed to redefine IT infrastructure setup for organizations and individuals alike. Your contributions directly support the ongoing development and innovation behind Infinito.Nexus, ensuring that it continues to grow and serve its community effectively.
|
||||||
|
|
||||||
|
If you enjoy using Infinito.Nexus and would like to contribute to its improvement, please consider donating. Every contribution, no matter the size, helps us maintain and expand this project.
|
||||||
|
|
||||||
|
[](https://github.com/sponsors/kevinveenbirkenbach) [](https://www.patreon.com/c/kevinveenbirkenbach) [](https://buymeacoffee.com/kevinveenbirkenbach) [](https://s.veen.world/paypaldonate)
|
||||||
|
|
||||||
|
Thank you for your support!
|
69
Dockerfile
Normal file
69
Dockerfile
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
FROM archlinux:latest
|
||||||
|
|
||||||
|
# 1) Update system and install build/runtime deps
|
||||||
|
RUN pacman -Syu --noconfirm \
|
||||||
|
base-devel \
|
||||||
|
git \
|
||||||
|
python \
|
||||||
|
python-pip \
|
||||||
|
python-setuptools \
|
||||||
|
alsa-lib \
|
||||||
|
go \
|
||||||
|
rsync \
|
||||||
|
&& pacman -Scc --noconfirm
|
||||||
|
|
||||||
|
# 2) Stub out systemctl & yay so post-install hooks and AUR calls never fail
|
||||||
|
RUN printf '#!/bin/sh\nexit 0\n' > /usr/bin/systemctl \
|
||||||
|
&& chmod +x /usr/bin/systemctl \
|
||||||
|
&& printf '#!/bin/sh\nexit 0\n' > /usr/bin/yay \
|
||||||
|
&& chmod +x /usr/bin/yay
|
||||||
|
|
||||||
|
# 3) Build & install python-simpleaudio from AUR manually (as non-root)
|
||||||
|
RUN useradd -m aur_builder \
|
||||||
|
&& su aur_builder -c "git clone https://aur.archlinux.org/python-simpleaudio.git /home/aur_builder/psa && \
|
||||||
|
cd /home/aur_builder/psa && \
|
||||||
|
makepkg --noconfirm --skippgpcheck" \
|
||||||
|
&& pacman -U --noconfirm /home/aur_builder/psa/*.pkg.tar.zst \
|
||||||
|
&& rm -rf /home/aur_builder/psa
|
||||||
|
|
||||||
|
# 4) Clone Kevin’s Package Manager and create its venv
|
||||||
|
ENV PKGMGR_REPO=/opt/package-manager \
|
||||||
|
PKGMGR_VENV=/root/.venvs/pkgmgr
|
||||||
|
|
||||||
|
RUN git clone https://github.com/kevinveenbirkenbach/package-manager.git $PKGMGR_REPO \
|
||||||
|
&& python -m venv $PKGMGR_VENV \
|
||||||
|
&& $PKGMGR_VENV/bin/pip install --upgrade pip \
|
||||||
|
# install pkgmgr’s own deps + the ansible Python library so infinito import yaml & ansible.plugins.lookup work
|
||||||
|
&& $PKGMGR_VENV/bin/pip install --no-cache-dir -r $PKGMGR_REPO/requirements.txt ansible \
|
||||||
|
# drop a thin wrapper so `pkgmgr` always runs inside that venv
|
||||||
|
&& printf '#!/bin/sh\n. %s/bin/activate\nexec python %s/main.py "$@"\n' \
|
||||||
|
"$PKGMGR_VENV" "$PKGMGR_REPO" > /usr/local/bin/pkgmgr \
|
||||||
|
&& chmod +x /usr/local/bin/pkgmgr
|
||||||
|
|
||||||
|
# 5) Ensure pkgmgr venv bin and user-local bin are on PATH
|
||||||
|
ENV PATH="$PKGMGR_VENV/bin:/root/.local/bin:${PATH}"
|
||||||
|
|
||||||
|
# 6) Copy local Infinito.Nexus source into the image for override
|
||||||
|
COPY . /opt/infinito-src
|
||||||
|
|
||||||
|
# 7) Install Infinito.Nexus via pkgmgr (clone-mode https)
|
||||||
|
RUN pkgmgr install infinito --clone-mode https
|
||||||
|
|
||||||
|
# 8) Override installed Infinito.Nexus with local source and clean ignored files
|
||||||
|
RUN INFINITO_PATH=$(pkgmgr path infinito) && \
|
||||||
|
rm -rf "$INFINITO_PATH"/* && \
|
||||||
|
rsync -a --delete --exclude='.git' /opt/infinito-src/ "$INFINITO_PATH"/
|
||||||
|
|
||||||
|
# 9) Symlink the infinito script into /usr/local/bin so ENTRYPOINT works
|
||||||
|
RUN INFINITO_PATH=$(pkgmgr path infinito) && \
|
||||||
|
ln -sf "$INFINITO_PATH"/main.py /usr/local/bin/infinito && \
|
||||||
|
chmod +x /usr/local/bin/infinito
|
||||||
|
|
||||||
|
# 10) Run integration tests
|
||||||
|
# This needed to be deactivated becaus it doesn't work with gitthub workflow
|
||||||
|
#RUN INFINITO_PATH=$(pkgmgr path infinito) && \
|
||||||
|
# cd "$INFINITO_PATH" && \
|
||||||
|
# make test
|
||||||
|
|
||||||
|
ENTRYPOINT ["infinito"]
|
||||||
|
CMD ["--help"]
|
@@ -1,46 +0,0 @@
|
|||||||
# End User Applications
|
|
||||||
End User Applications provide a diverse suite of tools and software designed to enhance the computing experience for personal computer users, including those using desktops and laptops. These applications cover various aspects such as multimedia, productivity, virtualization, and more, catering to the everyday needs of end users.
|
|
||||||
|
|
||||||
## Common Applications
|
|
||||||
In addition to the specialized software found in this document, the [COMMON_APPLICATIONS.md](./COMMON_APPLICATIONS.md) offers a comprehensive range of functionalities that cater to both server and end-user needs. This section enhances system performance, security, and usability with a variety of tools and configurations suitable for diverse computing environments.
|
|
||||||
|
|
||||||
## Desktop
|
|
||||||
This category focuses on tools and configurations that enhance the desktop computing experience. It includes utilities to maintain system activity, and software for optimizing the desktop environment, ensuring a seamless and user-friendly interface for day-to-day computer usage.
|
|
||||||
- **[Caffeine](./roles/pc-caffeine/)**: Utility to keep your computer awake.
|
|
||||||
- **[Gnome](./roles/pc-gnome/)**: Installation and configuration of Gnome desktop environment.
|
|
||||||
|
|
||||||
## Entertainment
|
|
||||||
Geared towards leisure and entertainment, this section includes software for playing Blu-ray media, accessing a vast collection of music, and installing various computer games. It's designed to enrich your personal computing experience with multimedia enjoyment and gaming.
|
|
||||||
- **[Bluray Player Tools](./roles/pc-bluray-player-tools/)**: Software for playing Blu-ray media on personal computers.
|
|
||||||
- **[Spotify](./roles/pc-spotify/)**: Installation of Spotify for music streaming.
|
|
||||||
- **[Games](./roles/pc-games/)**: Installation of various computer games.
|
|
||||||
|
|
||||||
## Office
|
|
||||||
This segment caters to professional productivity needs. It encompasses a range of office-related software, from comprehensive office suites and video conferencing tools to cloud storage solutions, facilitating efficient and organized work in various office environments.
|
|
||||||
- **[LibreOffice](./roles/pc-libreoffice/)**: Installation of the LibreOffice suite.
|
|
||||||
- **[Office](./roles/pc-office/)**: Various office productivity tools.
|
|
||||||
- **[Video Conference](./roles/pc-video-conference/)**: Video conferencing software setup.
|
|
||||||
- **[Nextcloud Client](./roles/pc-nextcloud/)**: Client setup for Nextcloud cloud storage service.
|
|
||||||
- **[GnuCash](./roles/pc-gnucash/)**: Software to manage finances
|
|
||||||
- **[Jrnl](./roles/pc-jrnl/)**: CLI Journaling
|
|
||||||
|
|
||||||
## Anonymization
|
|
||||||
Focusing on privacy and security, the Anonymization section offers tools for secure file sharing and anonymous web browsing. It includes software solutions that prioritize user privacy, ensuring secure online activities and data protection.
|
|
||||||
- **[Qbittorrent](./roles/pc-qbittorrent/)**: Installation of qBittorrent for file sharing.
|
|
||||||
- **[Torbrowser](./roles/pc-torbrowser/)**: Installation of Tor Browser for anonymous browsing.
|
|
||||||
|
|
||||||
## Content Creation
|
|
||||||
Dedicated to creatives and content producers, this category provides tools essential for video streaming, recording, graphic design, and 3D modeling. It's tailored for those involved in digital content creation, offering the necessary software to bring creative projects to life.
|
|
||||||
- **[Streaming Tools](./roles/pc-streaming-tools/)**: Software for video streaming and recording.
|
|
||||||
- **[Designer Tools](./roles/pc-designer-tools/)**: Graphic design and 3D modeling software.
|
|
||||||
|
|
||||||
## Development Environment
|
|
||||||
Targets software developers with tools and environments for various programming languages and development needs.
|
|
||||||
- **[Developer Tools](./roles/pc-developer-tools/)**: Basic developer tools setup.
|
|
||||||
- **[Developer Tools for Arduino](./roles/pc-developer-tools-arduino/)**: Setup for Arduino development.
|
|
||||||
- **[Developer Tools for Bash](./roles/pc-developer-tools-bash/)**: Tools for Bash scripting.
|
|
||||||
- **[Developer Tools for Java](./roles/pc-developer-tools-java/)**: Java development environment setup.
|
|
||||||
- **[Developer Tools for PHP](./roles/pc-developer-tools-php/)**: PHP development environment setup.
|
|
||||||
- **[Developer Tools for Python](./roles/pc-developer-tools-python/)**: Python development environment setup.
|
|
||||||
- **[Virtual Box](./roles/pc-virtual-box/)**: VirtualBox setup for creating virtual machines.
|
|
||||||
- **[Network Analyze Tools](./roles/pc-network-analyze-tools/)**: Network analysis and troubleshooting utilities.
|
|
12
LICENSE.md
12
LICENSE.md
@@ -1,10 +1,12 @@
|
|||||||
# License Agreement
|
# License Agreement
|
||||||
|
|
||||||
## Definitions
|
## Infinito.Nexus NonCommercial License
|
||||||
- **"Software":** Refers to *"[CyMaIS - Cyber Master Infrastructure Solution](https://cymais.cloud/)"* and its associated source code.
|
|
||||||
|
### Definitions
|
||||||
|
- **"Software":** Refers to *"[Infinito.Nexus](https://infinito.nexus/)"* and its associated source code.
|
||||||
- **"Commercial Use":** Any use of the Software intended for direct or indirect financial gain, including but not limited to sales, rentals, or provision of services.
|
- **"Commercial Use":** Any use of the Software intended for direct or indirect financial gain, including but not limited to sales, rentals, or provision of services.
|
||||||
|
|
||||||
## Provisions
|
### Provisions
|
||||||
|
|
||||||
1. **Attribution of the Original Licensor:** In any distribution or publication of the Software or derivative works, the original licensor, *Kevin Veen-Birkenbach, Email: [license@veen.world](mailto:license@veen.world), Website: [https://www.veen.world/](https://www.veen.world/)* must be explicitly named.
|
1. **Attribution of the Original Licensor:** In any distribution or publication of the Software or derivative works, the original licensor, *Kevin Veen-Birkenbach, Email: [license@veen.world](mailto:license@veen.world), Website: [https://www.veen.world/](https://www.veen.world/)* must be explicitly named.
|
||||||
|
|
||||||
@@ -23,5 +25,5 @@
|
|||||||
|
|
||||||
7. **Ownership of Rights:** All rights, including copyright, trademark, and other forms of intellectual property related to the Software, belong exclusively to Kevin Veen-Birkenbach.
|
7. **Ownership of Rights:** All rights, including copyright, trademark, and other forms of intellectual property related to the Software, belong exclusively to Kevin Veen-Birkenbach.
|
||||||
|
|
||||||
## Consent
|
### Consent
|
||||||
By using, modifying, or distributing the Software, you agree to these terms.
|
By using, modifying, or distributing the Software, you agree to these terms.
|
85
Makefile
Normal file
85
Makefile
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
ROLES_DIR := ./roles
|
||||||
|
APPLICATIONS_OUT := ./group_vars/all/04_applications.yml
|
||||||
|
APPLICATIONS_SCRIPT := ./cli/build/defaults/applications.py
|
||||||
|
USERS_OUT := ./group_vars/all/03_users.yml
|
||||||
|
USERS_SCRIPT := ./cli/build/defaults/users.py
|
||||||
|
INCLUDES_SCRIPT := ./cli/build/role_include.py
|
||||||
|
|
||||||
|
INCLUDE_GROUPS := $(shell python3 main.py meta categories invokable -s "-" --no-signal | tr '\n' ' ')
|
||||||
|
|
||||||
|
# Directory where these include-files will be written
|
||||||
|
INCLUDES_OUT_DIR := ./tasks/groups
|
||||||
|
|
||||||
|
# Compute extra users as before
|
||||||
|
EXTRA_USERS := $(shell \
|
||||||
|
find $(ROLES_DIR) -maxdepth 1 -type d -printf '%f\n' \
|
||||||
|
| sed -E 's/.*-//' \
|
||||||
|
| grep -E -x '[a-z0-9]+' \
|
||||||
|
| sort -u \
|
||||||
|
| paste -sd, - \
|
||||||
|
)
|
||||||
|
|
||||||
|
.PHONY: build install test
|
||||||
|
|
||||||
|
clean-keep-logs:
|
||||||
|
@echo "🧹 Cleaning ignored files but keeping logs/…"
|
||||||
|
git clean -fdX -- ':!logs' ':!logs/**'
|
||||||
|
|
||||||
|
clean:
|
||||||
|
@echo "Removing ignored git files"
|
||||||
|
git clean -fdX
|
||||||
|
|
||||||
|
list:
|
||||||
|
@echo Generating the roles list
|
||||||
|
python3 main.py build roles_list
|
||||||
|
|
||||||
|
tree:
|
||||||
|
@echo Generating Tree
|
||||||
|
python3 main.py build tree -D 2 --no-signal
|
||||||
|
|
||||||
|
mig: list tree
|
||||||
|
@echo Creating meta data for meta infinity graph
|
||||||
|
|
||||||
|
dockerignore:
|
||||||
|
@echo Create dockerignore
|
||||||
|
cat .gitignore > .dockerignore
|
||||||
|
echo ".git" >> .dockerignore
|
||||||
|
|
||||||
|
messy-build: dockerignore
|
||||||
|
@echo "🔧 Generating users defaults → $(USERS_OUT)…"
|
||||||
|
python3 $(USERS_SCRIPT) \
|
||||||
|
--roles-dir $(ROLES_DIR) \
|
||||||
|
--output $(USERS_OUT) \
|
||||||
|
--extra-users "$(EXTRA_USERS)"
|
||||||
|
@echo "✅ Users defaults written to $(USERS_OUT)\n"
|
||||||
|
|
||||||
|
@echo "🔧 Generating applications defaults → $(APPLICATIONS_OUT)…"
|
||||||
|
python3 $(APPLICATIONS_SCRIPT) \
|
||||||
|
--roles-dir $(ROLES_DIR) \
|
||||||
|
--output-file $(APPLICATIONS_OUT)
|
||||||
|
@echo "✅ Applications defaults written to $(APPLICATIONS_OUT)\n"
|
||||||
|
|
||||||
|
@echo "🔧 Generating role-include files for each group…"
|
||||||
|
@mkdir -p $(INCLUDES_OUT_DIR)
|
||||||
|
@$(foreach grp,$(INCLUDE_GROUPS), \
|
||||||
|
out=$(INCLUDES_OUT_DIR)/$(grp)roles.yml; \
|
||||||
|
echo "→ Building $$out (pattern: '$(grp)')…"; \
|
||||||
|
python3 $(INCLUDES_SCRIPT) $(ROLES_DIR) \
|
||||||
|
-p $(grp) -o $$out; \
|
||||||
|
echo " ✅ $$out"; \
|
||||||
|
)
|
||||||
|
|
||||||
|
messy-test:
|
||||||
|
@echo "🧪 Running Python tests…"
|
||||||
|
PYTHONPATH=. python -m unittest discover -s tests
|
||||||
|
@echo "📑 Checking Ansible syntax…"
|
||||||
|
ansible-playbook playbook.yml --syntax-check
|
||||||
|
|
||||||
|
install: build
|
||||||
|
@echo "⚙️ Install complete."
|
||||||
|
|
||||||
|
build: clean messy-build
|
||||||
|
@echo "Full build with cleanup before was executed."
|
||||||
|
|
||||||
|
test: build messy-test
|
||||||
|
@echo "Full test with build before was executed."
|
110
README.md
110
README.md
@@ -1,80 +1,94 @@
|
|||||||
# CyMaIS - Cyber Master Infrastructure Solution
|
# Infinito.Nexus 🚀
|
||||||
|
|
||||||
<img src="https://cybermaster.space/wp-content/uploads/sites/7/2023/12/logo_cymais.png" width="300" style="float: right; margin-left: 10px;">
|
**🔐 One login. ♾️ Infinite application**
|
||||||
|
|
||||||
Welcome to CyMaIS (Cyber Master Infrastructure Solution), a transformative tool designed to redefine IT infrastructure setup for organizations and individuals alike.
|

|
||||||
|
---
|
||||||
|
|
||||||
At its core, CyMaIS leverages the power of Docker, Linux, and Ansible to offer a streamlined, automated solution for deploying and managing IT systems.
|
## What is Infinito.Nexus? 📌
|
||||||
|
|
||||||
Whether you're a small startup, a growing enterprise, or an individual seeking efficient IT management, CyMaIS provides a comprehensive suite of tools that cater to a wide range of needs. From simple system setups to complex server configurations and end-user PC management, CyMaIS simplifies the entire process.
|
**Infinito.Nexus** is an **automated, modular infrastructure framework** built on **Docker**, **Linux**, and **Ansible**, equally suited for cloud services, local server management, and desktop workstations. At its core lies a **web-based desktop with single sign-on**—backed by an **LDAP directory** and **OIDC**—granting **seamless access** to an almost limitless portfolio of self-hosted applications. It fully supports **ActivityPub applications** and is **Fediverse-compatible**, while integrated **monitoring**, **alerting**, **cleanup**, **self-healing**, **automated updates**, and **backup solutions** provide everything an organization needs to run at scale.
|
||||||
|
|
||||||
Our intuitive interface, coupled with in-depth documentation, makes it accessible to both tech-savvy users and those with limited IT experience.
|
| 📚 | 🔗 |
|
||||||
|
|---|---|
|
||||||
|
| 🌐 Try It Live | [](https://infinito.nexus) |
|
||||||
|
| 🔧 Request Your Setup | [](https://cybermaster.space) |
|
||||||
|
| 📖 About This Project | [](https://github.com/sponsors/kevinveenbirkenbach) [](https://github.com/kevinveenbirkenbach/infinito-nexus/actions/workflows/test-cli.yml) [](https://s.infinito.nexus/code) |
|
||||||
|
| ☕️ Support Us | [](https://www.patreon.com/c/kevinveenbirkenbach) [](https://buymeacoffee.com/kevinveenbirkenbach) [](https://s.veen.world/paypaldonate) [](https://github.com/sponsors/kevinveenbirkenbach) |
|
||||||
|
|
||||||
With CyMaIS, setting up a secure, scalable, and robust IT infrastructure is not just faster and easier, but also aligned with the best industry practices, ensuring that your organization stays ahead in the ever-evolving digital landscape.
|
---
|
||||||
|
|
||||||
## Vision
|
## Key Features 🎯
|
||||||
Our project is anchored in the vision of transforming IT infrastructure deployment into a seamless, secure, and scalable experience.
|
|
||||||
|
|
||||||
We are committed to developing a fully automated solution that enables businesses of any size and industry to set up a 100% secure and infinitely scalable IT infrastructure in just 24 hours.
|
* **Automated Deployment** 📦
|
||||||
|
Turn up servers and workstations in minutes with ready-made Ansible roles.
|
||||||
|
|
||||||
Leveraging the power of Open Source, our tool not only promises to uphold the highest standards of security and adaptability but also embodies a commitment to transparency and community-driven innovation.
|
* **Enterprise-Grade Security** 🔒
|
||||||
|
Centralized user management via LDAP & OIDC (Keycloak), plus optional 2FA and encrypted storage.
|
||||||
|
|
||||||
This is not just a step towards simplifying IT management – it's a leap towards democratizing access to advanced technology, ensuring every business can quickly adapt and thrive in the digital age.
|
* **Modular Scalability** 📈
|
||||||
|
Grow from small teams to global enterprises by composing only the services you need.
|
||||||
|
|
||||||
For a deeper understanding of our goals and the ethos driving our project, we invite you to explore our detailed **[Vision Statement](./VISION_STATEMENT.md)**. Here, you'll find the cornerstone principles that guide our development process and our commitment to making a lasting impact in the realm of IT infrastructure.
|
* **Fediverse & ActivityPub Support** 🌐
|
||||||
|
Seamlessly integrate Mastodon, Peertube, Matrix and other ActivityPub apps out of the box.
|
||||||
|
|
||||||
## Solutions Overview
|
* **Self-Healing & Maintenance** ⚙️
|
||||||
|
Automated cleanup, container healing, and auto-updates keep infrastructure healthy without human intervention.
|
||||||
|
|
||||||
To help you navigate through our repository, we have categorized our extensive range of tools and solutions into three key areas:
|
* **Monitoring, Alerting & Analytics** 📊
|
||||||
|
Built-in system, application, and security monitoring with multi-channel notifications.
|
||||||
|
|
||||||
1. **[Server Applications](./SERVER_APPLICATIONS.md)**: Detailed information on server-focused tools and configurations, ideal for managing and optimizing server environments.
|
* **Backup & Disaster Recovery** 💾
|
||||||
|
Scheduled backups and scripted recovery processes to safeguard your data.
|
||||||
2. **[End User Applications](./END_USER_APPLICATIONS.md)**: A guide to applications and tools specifically designed for end-user PCs, enhancing personal computing experience.
|
|
||||||
|
|
||||||
3. **[Common Applications](./COMMON_APPLICATIONS.md)**: A comprehensive list of tools and applications that are versatile and useful across both server and end-user environments.
|
|
||||||
|
|
||||||
Each of these documents provides a tailored overview, ensuring you can find the right tools and information relevant to your specific needs, whether for server management, personal computing, or general IT infrastructure.
|
* **Continuous Updates** 🔄
|
||||||
|
Automatic patching and version upgrades across the stack.
|
||||||
|
|
||||||
## Key Benefits of CyMaIS for Your Business
|
* **Application Ecosystem** 🚀
|
||||||
|
A curated suite of self-hosted apps—from **project management**, **version control**, and **CI/CD** to **chat**, **video conferencing**, **CMS**, **e-learning**, **social networking**, and **e-commerce**—all seamlessly integrated.
|
||||||
|
|
||||||
**CyMaIS - Cyber Master Infrastructure Solution** revolutionizes IT infrastructure management, making it simpler, safer, and more adaptable for businesses of all sizes. Here's how it can benefit your organization:
|
More informations about the features you will find [here](docs/overview/Features.md).
|
||||||
|
|
||||||
1. **Effortless Setup and Management**: CyMaIS makes setting up and managing IT systems a breeze. Whether you're using Linux servers or personal computers, our tool automates the process, saving you time and effort.
|
---
|
||||||
|
|
||||||
2. **Everything You Need in One Place**: From the basics of system setup to advanced features like VPN and Docker, CyMaIS provides a complete range of tools. It's like having an IT expert at your fingertips, offering solutions for every need.
|
## Get Started 🚀
|
||||||
|
|
||||||
3. **Tailored to Your Business**: We understand that every business is unique. That's why CyMaIS is designed to be flexible, with customizable options to fit your specific requirements, whether you're a start-up, a growing business, or an established enterprise.
|
### Use it online 🌐
|
||||||
|
|
||||||
4. **Stay Ahead with Proactive Monitoring**: Our tool doesn't just set up your IT infrastructure; it keeps it running smoothly. With automated updates and proactive monitoring, you can rest assured that your systems are always up-to-date and performing optimally.
|
Try [Infinito.Nexus](https://infinito.nexus) – sign up in seconds, explore the platform, and discover what our solution can do for you! 🚀🔧✨
|
||||||
|
|
||||||
5. **Uncompromised Security and Reliability**: Protecting your data is our top priority. CyMaIS comes with robust security features and comprehensive backup solutions, giving you peace of mind that your business's sensitive information is safe and secure.
|
### Install locally 💻
|
||||||
|
1. **Install Infinito.Nexus** via [Kevin's Package Manager](https://github.com/kevinveenbirkenbach/package-manager)
|
||||||
|
2. **Setup Infinito.Nexus** using:
|
||||||
|
```sh
|
||||||
|
pkgmgr install infinito
|
||||||
|
```
|
||||||
|
3. **Explore Commands** with:
|
||||||
|
```sh
|
||||||
|
infinito --help
|
||||||
|
```
|
||||||
|
---
|
||||||
|
|
||||||
6. **User-Friendly with Expert Support**: While familiarity with Docker, Linux, and Ansible enhances your experience with CyMaIS, it's not a requirement. Our comprehensive roles for servers and end-user PCs simplify the setup process. With these intuitive tools and our detailed guides, managing your IT infrastructure becomes more accessible, even if you're not a seasoned IT professional. Plus, our support team is always ready to assist you, bridging any knowledge gaps and ensuring a smooth operation of your systems.
|
### Setup with Docker🚢
|
||||||
|
|
||||||
7. **Open Source Trust and Transparency**: With CyMaIS, you benefit from the reliability and security of open-source software. Our tool is transparent, community-driven, and aligned with the highest standards of software ethics and security.
|
Get Infinito.Nexus up and running inside Docker in just a few steps. For detailed build options and troubleshooting, see the [Docker Guide](docs/Docker.md).
|
||||||
|
|
||||||
CyMaIS is more than just an IT solution; it's a commitment to empowering your business with the technology it needs to thrive in today’s digital landscape, effortlessly and securely.
|
```bash
|
||||||
|
# 1. Build the Docker image: the Docker image:
|
||||||
|
docker build -t infinito:latest .
|
||||||
|
|
||||||
## Professional CyMaIS Implementation
|
# 2. Run the CLI interactively:
|
||||||
<img src="https://cybermaster.space/wp-content/uploads/sites/7/2023/11/FVG_8364BW-scaled.jpg" width="300" style="float: right; margin-left: 30px;">
|
docker run --rm -it infinito:latest infinito --help
|
||||||
|
```
|
||||||
|
|
||||||
My name is Kevin Veen-Birkenbach and I'm glad to assist you in the implementation of your secure and scalable IT infrastrucutre solution with CyMaIS.
|
---
|
||||||
|
|
||||||
My expertise in server administration, digital corporate infrastructure, custom software, and information security, all underpinned by a commitment to Open Source solutions, guarantees that your IT setup meets the highest industry standards.
|
## License ⚖️
|
||||||
|
|
||||||
Discover how CyMaIS can transform your IT landscape.
|
Infinito.Nexus is distributed under the **Infinito.Nexus NonCommercial License**. Please see [LICENSE.md](LICENSE.md) for full terms.
|
||||||
|
|
||||||
Contact me for more details:
|
---
|
||||||
|
|
||||||
🌍 Website: [www.CyberMaster.Space](https://cybermaster.space)<br />
|
## Professional Setup & Support 💼
|
||||||
📧 Email: [kevin@veen.world](mailto:kevin@veen.world)<br />
|
|
||||||
☎️ Phone: [+ 49 178 179 80 23](tel:00491781798023)
|
|
||||||
|
|
||||||
## Showcases
|
For expert installation and configuration visit [cybermaster.space](https://cybermaster.space/) or write to us at **[contact@cymais.cloud](mailto:contact@cymais.cloud)**.
|
||||||
The following list showcases the extensive range of solutions that CyMaIS incorporates, each playing a vital role in providing a comprehensive, efficient, and secure IT infrastructure setup:
|
|
||||||
|
|
||||||
[ELK Stack](./roles/docker-elk), [Intel Driver](./roles/driver-intel), [Nginx Docker Reverse Proxy](./roles/nginx-docker-reverse-proxy), [Sudo](./roles/sudo), [Funkwhale](./roles/docker-funkwhale), [MSI Keyboard Color Driver](./roles/driver-msi-keyboard-color), [Nginx Domain Redirect](./roles/nginx-domain-redirect), [GnuCash](./roles/pc-gnucash), [Backup Data to USB](./roles/backup-data-to-usb), [Gitea](./roles/docker-gitea), [Non-Free Driver](./roles/driver-non-free), [Nginx Homepage](./roles/nginx-homepage), [Jrnl](./roles/pc-jrnl), [Systemd Notifier](./roles/systemd-notifier), [Backup Docker to Local](./roles/backup-docker-to-local), [Jenkins](./roles/docker-jenkins), [Git](./roles/git), [Nginx HTTPS](./roles/nginx-https), [Latex](./roles/pc-latex), [Email Notifier](./roles/systemd-notifier-email), [Remote to Local Backup Solution](./roles/backup-remote-to-local), [Joomla](./roles/docker-joomla), [Heal Defect Docker Installations](./roles/heal-docker), [Nginx Matomo Tracking](./roles/nginx-matomo-tracking), [LibreOffice](./roles/pc-libreoffice), [Telegram Notifier](./roles/systemd-notifier-telegram), [Listmonk](./roles/docker-listmonk), [Btrfs Health Check](./roles/health-btrfs), [Nginx WWW Redirect](./roles/nginx-www-redirect), [Network Analyze Tools](./roles/pc-network-analyze-tools), [System Security](./roles/system-security), [Mailu](./roles/docker-mailu), [Disc Space Health Check](./roles/health-disc-space), [Administrator Tools](./roles/pc-administrator-tools), [Nextcloud Client](./roles/pc-nextcloud), [Swapfile Setup](./roles/system-swapfile), [Backups Cleanup](./roles/cleanup-backups-service), [Mastodon](./roles/docker-mastodon), [Docker Container Health Checker](./roles/health-docker-container), [Blu-ray Player Tools](./roles/pc-bluray-player-tools), [Office](./roles/pc-office), [Update Solutions](./roles/update), [Matomo](./roles/docker-matomo), [Docker Volumes Health Checker](./roles/health-docker-volumes), [Caffeine](./roles/pc-caffeine), [Qbittorrent](./roles/pc-qbittorrent), [Update Apt](./roles/update-apt), [Disc Space Cleanup](./roles/cleanup-disc-space), [Matrix](./roles/docker-matrix), [Health Journalctl](./roles/health-journalctl), [Designer Tools](./roles/pc-designer-tools), [Security Tools](./roles/pc-security-tools), [Update Docker](./roles/update-docker), [Failed Docker Backups Cleanup](./roles/cleanup-failed-docker-backups), [MediaWiki](./roles/docker-mediawiki), [Nginx Health Checker](./roles/health-nginx), [Developer Tools](./roles/pc-developer-tools), [Spotify](./roles/pc-spotify), [Update Pacman](./roles/update-pacman), [Client Wireguard](./roles/client-wireguard), [MyBB](./roles/docker-mybb), [Developer Tools for Arduino](./roles/pc-developer-tools-arduino), [SSH](./roles/pc-ssh), [Update Yay](./roles/update-yay), [Client Setup for Wireguard Behind Firewall](./roles/client-wireguard-behind-firewall), [Nextcloud Server](./roles/docker-nextcloud), [Hunspell](./roles/hunspell), [Developer Tools for Bash](./roles/pc-developer-tools-bash), [Streaming Tools](./roles/pc-streaming-tools), [Administrator](./roles/user-administrator), [Docker](./roles/docker), [Peertube](./roles/docker-peertube), [Java](./roles/java), [Developer Tools for Java](./roles/pc-developer-tools-java), [Tor Browser](./roles/pc-torbrowser), [Video Conference](./roles/pc-video-conference), [Wireguard](./roles/wireguard), [Akaunting](./roles/docker-akaunting), [Pixelfed](./roles/docker-pixelfed), [Journalctl](./roles/journalctl), [Developer Tools for PHP](./roles/pc-developer-tools-php), [Virtual Box](./roles/pc-virtual-box), [Postfix](./roles/postfix), [Attendize](./roles/docker-attendize), [Wordpress](./roles/docker-wordpress), [Locales](./roles/locales), [Docker for End Users](./roles/pc-docker), [Games](./roles/pc-games), [Python Pip](./roles/python-pip), [Discourse](./roles/docker-discourse), [Epson Multiprinter Driver](./roles/driver-epson-multiprinter), [Nginx Certbot](./roles/nginx-certbot), [Git](./roles/pc-git), [SSHD](./roles/sshd), [YOURLS](./roles/docker-yourls), [BigBlueButton](./roles/docker-bigbluebutton),[System Maintenance Lock](./roles/system-maintenance-lock),[Open Project](./roles/docker-openproject)...
|
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
This project is licensed from Kevin Veen-Birkenbach. The full license is available in the [LICENSE.md](./LICENSE.md) of this repository.
|
|
||||||
|
@@ -1,96 +0,0 @@
|
|||||||
# Server Applications
|
|
||||||
Server applications encompass a wide array of functionalities designed to enhance the performance, reliability, and usability of server infrastructures. These applications are essential for maintaining server health, managing web services, facilitating containerization, and providing various tools for specific server needs.
|
|
||||||
|
|
||||||
## Common Applications
|
|
||||||
For a detailed overview of the broad spectrum of server applications, including base setup, administration tools, update mechanisms, driver installations, security enhancements, VPN configurations, notifier services, backup solutions, and other essential tools and systems, please refer to the **[COMMON_APPLICATIONS.md](./COMMON_APPLICATIONS.md)**. This document provides insights into categories and specific roles catered to both server and end-user environments, ensuring comprehensive server management and optimization.
|
|
||||||
|
|
||||||
## Server Health
|
|
||||||
Addresses server maintenance and health monitoring, ensuring optimal performance and reliability of the server infrastructure.
|
|
||||||
- **[Health Btrfs](./roles/health-btrfs/)**: Monitors the health of Btrfs filesystems.
|
|
||||||
- **[Health Disc Space](./roles/health-disc-space/)**: Checks for available disk space.
|
|
||||||
- **[Health Docker Container](./roles/health-docker-container/)**: Monitors the health of Docker containers.
|
|
||||||
- **[Health Docker Volumes](./roles/health-docker-volumes/)**: Checks the status of Docker volumes.
|
|
||||||
- **[Health Journalctl](./roles/health-journalctl/)**: Monitors and manages the system journal.
|
|
||||||
- **[Health Nginx](./roles/health-nginx/)**: Ensures the Nginx server is running smoothly.
|
|
||||||
- **[Heal Docker](./roles/heal-docker/)**: Automated healing and maintenance tasks for Docker.
|
|
||||||
|
|
||||||
## Webserver
|
|
||||||
Focuses on web server roles and applications, covering SSL certificates, Nginx configurations, reverse proxies, and email services.
|
|
||||||
- **[Letsencrypt](./roles/letsencrypt/)**: Configures Let's Encrypt for SSL certificates.
|
|
||||||
- **[Nginx](./roles/nginx/)**: Installs and configures Nginx web server.
|
|
||||||
- **[Nginx-Docker-Reverse-Proxy](./roles/nginx-docker-reverse-proxy/)**: Sets up a reverse proxy for Docker containers.
|
|
||||||
- **[Nginx-Homepage](./roles/nginx-homepage/)**: Configures a homepage for Nginx.
|
|
||||||
- **[Nginx-Https](./roles/nginx-https/)**: Enables HTTPS configuration for Nginx.
|
|
||||||
- **[Nginx-Matomo-Tracking](./roles/nginx-matomo-tracking/)**: Integrates Matomo tracking with Nginx.
|
|
||||||
- **[Nginx-Domain-Redirect](./roles/nginx-domain-redirect/)**: Manages URL redirects in Nginx.
|
|
||||||
- **[Nginx-WWW-Redirect](./roles/nginx-www-redirect/)**: Redirects all domains with the prefix www. from www.domain.tld to domain.tld
|
|
||||||
- **[Nginx-Certbot](./roles/nginx-certbot/)**: Integrates Certbot with Nginx for SSL certificates.
|
|
||||||
- **[Postfix](./roles/postfix/)**: Setup for the Postfix mail transfer agent.
|
|
||||||
|
|
||||||
## Docker and Containerization
|
|
||||||
Dedicated to Docker container setups and application management, offering a wide array of software deployment options.
|
|
||||||
- **[Docker](./roles/docker/)**: Basic Docker and Docker Compose setup.
|
|
||||||
|
|
||||||
### Finance and Project Management
|
|
||||||
Facilitating the deployment of finance-related and project management applications.
|
|
||||||
- **[Docker Akaunting](./roles/docker-akaunting/)**: Deployment of the Akaunting finance software.
|
|
||||||
- **[Open Project](./roles/docker-openproject)**: Project Management Software
|
|
||||||
- **[Taiga](./roles/docker-taiga)**: Scrum and Kanban Software
|
|
||||||
|
|
||||||
### Continues Integration and Continues Delivery
|
|
||||||
Setups for development platforms and version control systems.
|
|
||||||
- **[Gitea](./roles/docker-gitea/)**: Setup for the Gitea git server.
|
|
||||||
- **[Jenkins](./roles/docker-jenkins/)**: Jenkins automation server setup.
|
|
||||||
- **[ELK](./roles/docker-elk/)**: Elasticsearch, Logstash, and Kibana (ELK) stack setup.
|
|
||||||
|
|
||||||
### Content Management
|
|
||||||
Deployment of various content management systems for web platforms.
|
|
||||||
- **[Wordpress](./roles/docker-wordpress/)**: Wordpress blog and website platform setup.
|
|
||||||
- **[Joomla](./roles/docker-joomla/)**: Joomla content management system setup.
|
|
||||||
|
|
||||||
### Fediverse Networks
|
|
||||||
Implementing federated and decentralized social platforms.
|
|
||||||
- **[Funkwhale](./roles/docker-funkwhale/)**: Deployment of Funkwhale, a federated music streaming server.
|
|
||||||
- **[Mastodon](./roles/docker-mastodon/)**: Deployment of the Mastodon social network server.
|
|
||||||
- **[Peertube](./roles/docker-peertube/)**: Deployment of the PeerTube video platform.
|
|
||||||
- **[Pixelfed](./roles/docker-pixelfed/)**: Pixelfed, a federated image sharing platform, setup.
|
|
||||||
|
|
||||||
### Analytics Solutions
|
|
||||||
Tools for web and data analytics.
|
|
||||||
- **[Matomo](./roles/docker-matomo/)**: Setup for Matomo, an open-source analytics platform.
|
|
||||||
|
|
||||||
### Forum Software
|
|
||||||
Deployments for community-driven forum platforms.
|
|
||||||
- **[MyBB](./roles/docker-mybb/)**: Setup for MyBB forum software.
|
|
||||||
- **[Discourse](./roles/docker-discourse/)**: Setup of Discouse a forum and community platform.
|
|
||||||
|
|
||||||
### Wiki and Documentation
|
|
||||||
Setting up platforms for collaborative information sharing.
|
|
||||||
- **[MediaWiki](./roles/docker-mediawiki/)**: MediaWiki setup for creating wikis.
|
|
||||||
|
|
||||||
### Event and Shop Management
|
|
||||||
Tools for managing events and online retail.
|
|
||||||
- **[Attendize](./roles/docker-attendize/)**: Setup for the Attendize event management tool.
|
|
||||||
|
|
||||||
### Data and Cloud Storage
|
|
||||||
Solutions for data management and cloud-based storage.
|
|
||||||
- **[Baserow](./roles/docker-baserow/)**: Deployment of Baserow, an open-source no-code database tool.
|
|
||||||
- **[Nextcloud](./roles/docker-nextcloud/)**: Cloud storage solution setup.
|
|
||||||
|
|
||||||
### Communication and Collaboration
|
|
||||||
Platffor enhancing communication and collaborative efforts.
|
|
||||||
- **[BigBlueButton](./roles/docker-bigbluebutton/)**: Setup for the BigBlueButton video conferencing tool.
|
|
||||||
- **[Mailu](./roles/docker-mailu/)**: Complete mail server solution.
|
|
||||||
- **[Matrix](./roles/docker-matrix/)**: Setup and deployment of the Matrix server for secure, decentralized communication.
|
|
||||||
|
|
||||||
### Marketing and Communication Tools
|
|
||||||
Focusing on tools that assist in communication, marketing, and outreach efforts.
|
|
||||||
- **[Listmonk](./roles/docker-listmonk/)**: Setup for Listmonk, a self-hosted newsletter and mailing list manager.
|
|
||||||
|
|
||||||
### Web Utilities and Services
|
|
||||||
Encompassing tools that enhance web functionality or provide essential web services.
|
|
||||||
- **[YOURLS](./roles/docker-yourls/)**: Setup for YOURLS, a URL shortening service.
|
|
||||||
|
|
||||||
### Miscellaneous
|
|
||||||
Diverse tools for specific needs and utilities.
|
|
||||||
- **[Roulette Wheel](./roles/docker-roulette-wheel/)**: Setup for a custom roulette wheel application.
|
|
5
TODO.md
Normal file
5
TODO.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
# Todos
|
||||||
|
- Implement multi language
|
||||||
|
- Implement rbac administration interface
|
||||||
|
- Implement ``MASK_CREDENTIALS_IN_LOGS`` for all sensible tasks
|
||||||
|
- [Enable IP6 for docker](https://chatgpt.com/share/68a0acb8-db20-800f-9d2c-b34e38b5cdee).
|
@@ -1,17 +0,0 @@
|
|||||||
# Vision Statement
|
|
||||||
|
|
||||||
At the heart of our endeavor lies the creation of an unparalleled tool, designed to revolutionize the way IT infrastructure is deployed and managed in businesses of all scales and across various industries. Our vision is to develop a fully automated solution capable of establishing a 100% secure and infinitely scalable corporate IT infrastructure.
|
|
||||||
|
|
||||||
This tool, grounded firmly in Open Source principles, will not only champion transparency and innovation but also ensure adaptability and accessibility for every business, regardless of its size or industry. We aim to make the complex process of IT setup not just simpler but also faster – achieving full deployment within an audacious timeframe of 24 hours.
|
|
||||||
|
|
||||||
We envision a future where businesses are no longer constrained by the complexities of IT infrastructure setup. Instead, they will be empowered with a tool that seamlessly integrates into their operational fabric, offering a robust, secure, and scalable digital backbone. This tool will not only cater to the immediate IT needs of a company but also be agile enough to evolve with their growing demands and the ever-changing technological landscape.
|
|
||||||
|
|
||||||
Our commitment is to break down barriers to advanced IT infrastructure, democratizing access to high-level technology solutions. By harnessing the power of Open Source, our solution will not only uphold the highest standards of security and scalability but also foster a community-driven approach to continuous improvement and innovation.
|
|
||||||
|
|
||||||
In essence, our vision is to redefine the paradigm of IT infrastructure deployment, making it a swift, secure, and scalable journey for every business, and setting a new benchmark in the industry for efficiency and reliability.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
Kevin Veen-Birkenbach
|
|
||||||
Berlin
|
|
||||||
2023-12-13
|
|
0
__init__.py
Normal file
0
__init__.py
Normal file
33
ansible.cfg
Normal file
33
ansible.cfg
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
[defaults]
|
||||||
|
# --- Performance & Behavior ---
|
||||||
|
forks = 25
|
||||||
|
strategy = linear
|
||||||
|
gathering = smart
|
||||||
|
timeout = 120
|
||||||
|
retry_files_enabled = False
|
||||||
|
host_key_checking = True
|
||||||
|
deprecation_warnings = True
|
||||||
|
interpreter_python = auto_silent
|
||||||
|
|
||||||
|
# --- Output & Profiling ---
|
||||||
|
stdout_callback = yaml
|
||||||
|
callbacks_enabled = profile_tasks,timer
|
||||||
|
|
||||||
|
# --- Plugin paths ---
|
||||||
|
filter_plugins = ./filter_plugins
|
||||||
|
lookup_plugins = ./lookup_plugins
|
||||||
|
module_utils = ./module_utils
|
||||||
|
|
||||||
|
[ssh_connection]
|
||||||
|
# Multiplexing: safer socket path in HOME instead of /tmp
|
||||||
|
ssh_args = -o ControlMaster=auto -o ControlPersist=20s -o ControlPath=~/.ssh/ansible-%h-%p-%r \
|
||||||
|
-o ServerAliveInterval=15 -o ServerAliveCountMax=3 -o StrictHostKeyChecking=accept-new \
|
||||||
|
-o PreferredAuthentications=publickey,password,keyboard-interactive
|
||||||
|
|
||||||
|
# Pipelining boosts speed; works fine if sudoers does not enforce "requiretty"
|
||||||
|
pipelining = True
|
||||||
|
scp_if_ssh = smart
|
||||||
|
|
||||||
|
[persistent_connection]
|
||||||
|
connect_timeout = 30
|
||||||
|
command_timeout = 60
|
BIN
assets/img/favicon.ico
Normal file
BIN
assets/img/favicon.ico
Normal file
Binary file not shown.
After Width: | Height: | Size: 157 KiB |
BIN
assets/img/logo.png
Normal file
BIN
assets/img/logo.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 1015 KiB |
3
cli/TODO.md
Normal file
3
cli/TODO.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Todo
|
||||||
|
- Test this script. It's just a draft. Checkout https://chatgpt.com/c/681d9e2b-7b28-800f-aef8-4f1427e9021d
|
||||||
|
- Solve bugs in show_vault_variables.py
|
0
cli/__init__.py
Normal file
0
cli/__init__.py
Normal file
0
cli/build/__init__.py
Normal file
0
cli/build/__init__.py
Normal file
0
cli/build/defaults/__init__.py
Normal file
0
cli/build/defaults/__init__.py
Normal file
110
cli/build/defaults/applications.py
Normal file
110
cli/build/defaults/applications.py
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
import argparse
|
||||||
|
import yaml
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Ensure project root on PYTHONPATH so module_utils is importable
|
||||||
|
repo_root = Path(__file__).resolve().parent.parent.parent.parent
|
||||||
|
sys.path.insert(0, str(repo_root))
|
||||||
|
|
||||||
|
# Add lookup_plugins for application_gid
|
||||||
|
plugin_path = repo_root / "lookup_plugins"
|
||||||
|
sys.path.insert(0, str(plugin_path))
|
||||||
|
|
||||||
|
from module_utils.dict_renderer import DictRenderer
|
||||||
|
from application_gid import LookupModule
|
||||||
|
|
||||||
|
def load_yaml_file(path: Path) -> dict:
|
||||||
|
if not path.exists():
|
||||||
|
return {}
|
||||||
|
with path.open("r", encoding="utf-8") as f:
|
||||||
|
return yaml.safe_load(f) or {}
|
||||||
|
|
||||||
|
class DefaultsGenerator:
|
||||||
|
def __init__(self, roles_dir: Path, output_file: Path, verbose: bool, timeout: float):
|
||||||
|
self.roles_dir = roles_dir
|
||||||
|
self.output_file = output_file
|
||||||
|
self.verbose = verbose
|
||||||
|
self.renderer = DictRenderer(verbose=verbose, timeout=timeout)
|
||||||
|
self.gid_lookup = LookupModule()
|
||||||
|
|
||||||
|
def log(self, message: str):
|
||||||
|
if self.verbose:
|
||||||
|
print(f"[DefaultsGenerator] {message}")
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
result = {"defaults_applications": {}}
|
||||||
|
|
||||||
|
for role_dir in sorted(self.roles_dir.iterdir()):
|
||||||
|
role_name = role_dir.name
|
||||||
|
vars_main = role_dir / "vars" / "main.yml"
|
||||||
|
config_file = role_dir / "config" / "main.yml"
|
||||||
|
|
||||||
|
if not vars_main.exists():
|
||||||
|
self.log(f"Skipping {role_name}: vars/main.yml missing")
|
||||||
|
continue
|
||||||
|
|
||||||
|
vars_data = load_yaml_file(vars_main)
|
||||||
|
application_id = vars_data.get("application_id")
|
||||||
|
if not application_id:
|
||||||
|
self.log(f"Skipping {role_name}: application_id not defined")
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not config_file.exists():
|
||||||
|
self.log(f"Config missing for {role_name}, adding empty defaults for '{application_id}'")
|
||||||
|
result["defaults_applications"][application_id] = {}
|
||||||
|
continue
|
||||||
|
|
||||||
|
config_data = load_yaml_file(config_file)
|
||||||
|
if config_data:
|
||||||
|
try:
|
||||||
|
gid_number = self.gid_lookup.run([application_id], roles_dir=str(self.roles_dir))[0]
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Warning: failed to determine gid for '{application_id}': {e}", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
config_data["group_id"] = gid_number
|
||||||
|
result["defaults_applications"][application_id] = config_data
|
||||||
|
|
||||||
|
# Inject users mapping as Jinja2 references
|
||||||
|
users_meta = load_yaml_file(role_dir / "users" / "main.yml")
|
||||||
|
users_data = users_meta.get("users", {})
|
||||||
|
transformed = {user: f"{{{{ users[\"{user}\"] }}}}" for user in users_data}
|
||||||
|
if transformed:
|
||||||
|
result["defaults_applications"][application_id]["users"] = transformed
|
||||||
|
|
||||||
|
# Render placeholders in entire result context
|
||||||
|
self.log("Starting placeholder rendering...")
|
||||||
|
try:
|
||||||
|
result = self.renderer.render(result)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error during rendering: {e}", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Write output
|
||||||
|
self.output_file.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with self.output_file.open("w", encoding="utf-8") as f:
|
||||||
|
yaml.dump(result, f, sort_keys=False)
|
||||||
|
|
||||||
|
# Print location of generated file (absolute if not under cwd)
|
||||||
|
try:
|
||||||
|
rel = self.output_file.relative_to(Path.cwd())
|
||||||
|
except ValueError:
|
||||||
|
rel = self.output_file
|
||||||
|
print(f"✅ Generated: {rel}")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = argparse.ArgumentParser(description="Generate defaults_applications YAML...")
|
||||||
|
parser.add_argument("--roles-dir", default="roles", help="Path to the roles directory")
|
||||||
|
parser.add_argument("--output-file", required=True, help="Path to output YAML file")
|
||||||
|
parser.add_argument("--verbose", action="store_true", help="Enable verbose logging")
|
||||||
|
parser.add_argument("--timeout", type=float, default=10.0, help="Timeout for rendering")
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
cwd = Path.cwd()
|
||||||
|
roles_dir = (cwd / args.roles_dir).resolve()
|
||||||
|
output_file = (cwd / args.output_file).resolve()
|
||||||
|
|
||||||
|
DefaultsGenerator(roles_dir, output_file, args.verbose, args.timeout).run()
|
241
cli/build/defaults/users.py
Normal file
241
cli/build/defaults/users.py
Normal file
@@ -0,0 +1,241 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import argparse
|
||||||
|
import yaml
|
||||||
|
import glob
|
||||||
|
from collections import OrderedDict
|
||||||
|
|
||||||
|
|
||||||
|
def represent_str(dumper, data):
|
||||||
|
"""
|
||||||
|
Custom YAML string representer that forces double quotes around any string
|
||||||
|
containing a Jinja2 placeholder ({{ ... }}).
|
||||||
|
"""
|
||||||
|
if isinstance(data, str) and '{{' in data:
|
||||||
|
return dumper.represent_scalar(
|
||||||
|
'tag:yaml.org,2002:str',
|
||||||
|
data,
|
||||||
|
style='"'
|
||||||
|
)
|
||||||
|
return dumper.represent_scalar(
|
||||||
|
'tag:yaml.org,2002:str',
|
||||||
|
data
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def build_users(defs, primary_domain, start_id, become_pwd):
|
||||||
|
"""
|
||||||
|
Construct user entries with auto-incremented UID/GID, default username/email,
|
||||||
|
and optional description.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
defs (OrderedDict): Mapping of user keys to their override settings.
|
||||||
|
primary_domain (str): The primary domain for email addresses (e.g. 'example.com').
|
||||||
|
start_id (int): Starting number for UID/GID allocation (e.g. 1001).
|
||||||
|
become_pwd (str): Default password string for users without an override.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
OrderedDict: Complete user definitions with all required fields filled in.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If there are duplicate UIDs, usernames, or emails.
|
||||||
|
"""
|
||||||
|
users = OrderedDict()
|
||||||
|
used_uids = set()
|
||||||
|
|
||||||
|
# Collect any preset UIDs to avoid collisions
|
||||||
|
for key, overrides in defs.items():
|
||||||
|
if 'uid' in overrides:
|
||||||
|
uid = overrides['uid']
|
||||||
|
if uid in used_uids:
|
||||||
|
raise ValueError(f"Duplicate uid {uid} for user '{key}'")
|
||||||
|
used_uids.add(uid)
|
||||||
|
|
||||||
|
next_uid = start_id
|
||||||
|
def allocate_uid():
|
||||||
|
nonlocal next_uid
|
||||||
|
# Find the next free UID not already used
|
||||||
|
while next_uid in used_uids:
|
||||||
|
next_uid += 1
|
||||||
|
free_uid = next_uid
|
||||||
|
used_uids.add(free_uid)
|
||||||
|
next_uid += 1
|
||||||
|
return free_uid
|
||||||
|
|
||||||
|
# Build each user entry
|
||||||
|
for key, overrides in defs.items():
|
||||||
|
username = overrides.get('username', key)
|
||||||
|
email = overrides.get('email', f"{username}@{primary_domain}")
|
||||||
|
description = overrides.get('description')
|
||||||
|
roles = overrides.get('roles', [])
|
||||||
|
password = overrides.get('password', become_pwd)
|
||||||
|
|
||||||
|
# Determine UID and GID
|
||||||
|
if 'uid' in overrides:
|
||||||
|
uid = overrides['uid']
|
||||||
|
else:
|
||||||
|
uid = allocate_uid()
|
||||||
|
gid = overrides.get('gid', uid)
|
||||||
|
|
||||||
|
entry = {
|
||||||
|
'username': username,
|
||||||
|
'email': email,
|
||||||
|
'password': password,
|
||||||
|
'uid': uid,
|
||||||
|
'gid': gid,
|
||||||
|
'roles': roles
|
||||||
|
}
|
||||||
|
if description is not None:
|
||||||
|
entry['description'] = description
|
||||||
|
|
||||||
|
users[key] = entry
|
||||||
|
|
||||||
|
# Ensure uniqueness of usernames and emails
|
||||||
|
seen_usernames = set()
|
||||||
|
seen_emails = set()
|
||||||
|
|
||||||
|
for key, entry in users.items():
|
||||||
|
un = entry['username']
|
||||||
|
em = entry['email']
|
||||||
|
if un in seen_usernames:
|
||||||
|
raise ValueError(f"Duplicate username '{un}' in merged users")
|
||||||
|
if em in seen_emails:
|
||||||
|
raise ValueError(f"Duplicate email '{em}' in merged users")
|
||||||
|
seen_usernames.add(un)
|
||||||
|
seen_emails.add(em)
|
||||||
|
|
||||||
|
return users
|
||||||
|
|
||||||
|
|
||||||
|
def load_user_defs(roles_directory):
|
||||||
|
"""
|
||||||
|
Scan all roles/*/users/main.yml files and merge any 'users:' sections.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
roles_directory (str): Path to the directory containing role subdirectories.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
OrderedDict: Merged user definitions from all roles.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: On invalid format or conflicting override values.
|
||||||
|
"""
|
||||||
|
pattern = os.path.join(roles_directory, '*/users/main.yml')
|
||||||
|
files = sorted(glob.glob(pattern))
|
||||||
|
merged = OrderedDict()
|
||||||
|
|
||||||
|
for filepath in files:
|
||||||
|
with open(filepath, 'r') as f:
|
||||||
|
data = yaml.safe_load(f) or {}
|
||||||
|
users = data.get('users', {})
|
||||||
|
if not isinstance(users, dict):
|
||||||
|
continue
|
||||||
|
|
||||||
|
for key, overrides in users.items():
|
||||||
|
if not isinstance(overrides, dict):
|
||||||
|
raise ValueError(f"Invalid definition for user '{key}' in {filepath}")
|
||||||
|
|
||||||
|
if key not in merged:
|
||||||
|
merged[key] = overrides.copy()
|
||||||
|
else:
|
||||||
|
existing = merged[key]
|
||||||
|
for field, value in overrides.items():
|
||||||
|
if field in existing and existing[field] != value:
|
||||||
|
raise ValueError(
|
||||||
|
f"Conflict for user '{key}': field '{field}' has existing value '{existing[field]}', tried to set '{value}' in {filepath}"
|
||||||
|
)
|
||||||
|
existing.update(overrides)
|
||||||
|
|
||||||
|
return merged
|
||||||
|
|
||||||
|
|
||||||
|
def dictify(data):
|
||||||
|
"""
|
||||||
|
Recursively convert OrderedDict to regular dict for YAML dumping.
|
||||||
|
"""
|
||||||
|
if isinstance(data, OrderedDict):
|
||||||
|
return {k: dictify(v) for k, v in data.items()}
|
||||||
|
if isinstance(data, dict):
|
||||||
|
return {k: dictify(v) for k, v in data.items()}
|
||||||
|
if isinstance(data, list):
|
||||||
|
return [dictify(v) for v in data]
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description='Generate a users.yml by merging all roles/*/users/main.yml definitions.'
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--roles-dir', '-r', required=True,
|
||||||
|
help='Directory containing roles (e.g., roles/*/users/main.yml).'
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--output', '-o', required=True,
|
||||||
|
help='Path to the output YAML file (e.g., users.yml).'
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--start-id', '-s', type=int, default=1001,
|
||||||
|
help='Starting UID/GID number (default: 1001).'
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--extra-users', '-e',
|
||||||
|
help='Comma-separated list of additional usernames to include.',
|
||||||
|
default=None
|
||||||
|
)
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
args = parse_args()
|
||||||
|
primary_domain = '{{ SYSTEM_EMAIL.DOMAIN }}'
|
||||||
|
become_pwd = '{{ lookup("password", "/dev/null length=42 chars=ascii_letters,digits") }}'
|
||||||
|
|
||||||
|
try:
|
||||||
|
definitions = load_user_defs(args.roles_dir)
|
||||||
|
except ValueError as e:
|
||||||
|
print(f"Error merging user definitions: {e}", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Add extra users if specified
|
||||||
|
if args.extra_users:
|
||||||
|
for name in args.extra_users.split(','):
|
||||||
|
user_key = name.strip()
|
||||||
|
if not user_key:
|
||||||
|
continue
|
||||||
|
if user_key in definitions:
|
||||||
|
print(f"Warning: extra user '{user_key}' already defined; skipping.", file=sys.stderr)
|
||||||
|
else:
|
||||||
|
definitions[user_key] = {}
|
||||||
|
|
||||||
|
try:
|
||||||
|
users = build_users(
|
||||||
|
definitions,
|
||||||
|
primary_domain,
|
||||||
|
args.start_id,
|
||||||
|
become_pwd
|
||||||
|
)
|
||||||
|
except ValueError as e:
|
||||||
|
print(f"Error building user entries: {e}", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Convert OrderedDict into plain dict for YAML
|
||||||
|
default_users = {'default_users': users}
|
||||||
|
plain_data = dictify(default_users)
|
||||||
|
|
||||||
|
# Register custom string representer
|
||||||
|
yaml.SafeDumper.add_representer(str, represent_str)
|
||||||
|
|
||||||
|
# Dump the YAML file
|
||||||
|
with open(args.output, 'w') as f:
|
||||||
|
yaml.safe_dump(
|
||||||
|
plain_data,
|
||||||
|
f,
|
||||||
|
default_flow_style=False,
|
||||||
|
sort_keys=False,
|
||||||
|
width=120
|
||||||
|
)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
173
cli/build/graph.py
Normal file
173
cli/build/graph.py
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
import os
|
||||||
|
import argparse
|
||||||
|
import yaml
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
from typing import List, Dict, Any, Set
|
||||||
|
|
||||||
|
|
||||||
|
JINJA_PATTERN = re.compile(r'{{.*}}')
|
||||||
|
ALL_DEP_TYPES = ['run_after', 'dependencies', 'include_tasks', 'import_tasks', 'include_role', 'import_role']
|
||||||
|
ALL_DIRECTIONS = ['to', 'from']
|
||||||
|
ALL_KEYS = [f"{dep}_{dir}" for dep in ALL_DEP_TYPES for dir in ALL_DIRECTIONS]
|
||||||
|
|
||||||
|
|
||||||
|
def find_role_meta(roles_dir: str, role: str) -> str:
|
||||||
|
path = os.path.join(roles_dir, role, 'meta', 'main.yml')
|
||||||
|
if not os.path.isfile(path):
|
||||||
|
raise FileNotFoundError(f"Metadata not found for role: {role}")
|
||||||
|
return path
|
||||||
|
|
||||||
|
|
||||||
|
def find_role_tasks(roles_dir: str, role: str) -> str:
|
||||||
|
path = os.path.join(roles_dir, role, 'tasks', 'main.yml')
|
||||||
|
if not os.path.isfile(path):
|
||||||
|
raise FileNotFoundError(f"Tasks not found for role: {role}")
|
||||||
|
return path
|
||||||
|
|
||||||
|
|
||||||
|
def load_meta(path: str) -> Dict[str, Any]:
|
||||||
|
with open(path, 'r') as f:
|
||||||
|
data = yaml.safe_load(f) or {}
|
||||||
|
|
||||||
|
galaxy_info = data.get('galaxy_info', {}) or {}
|
||||||
|
return {
|
||||||
|
'galaxy_info': galaxy_info,
|
||||||
|
'run_after': galaxy_info.get('run_after', []) or [],
|
||||||
|
'dependencies': data.get('dependencies', []) or []
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def load_tasks(path: str, dep_type: str) -> List[str]:
|
||||||
|
with open(path, 'r') as f:
|
||||||
|
data = yaml.safe_load(f) or []
|
||||||
|
|
||||||
|
included_roles = []
|
||||||
|
|
||||||
|
for task in data:
|
||||||
|
if dep_type in task:
|
||||||
|
entry = task[dep_type]
|
||||||
|
if isinstance(entry, dict):
|
||||||
|
entry = entry.get('name', '')
|
||||||
|
if entry and not JINJA_PATTERN.search(entry):
|
||||||
|
included_roles.append(entry)
|
||||||
|
|
||||||
|
return included_roles
|
||||||
|
|
||||||
|
|
||||||
|
def build_single_graph(
|
||||||
|
start_role: str,
|
||||||
|
dep_type: str,
|
||||||
|
direction: str,
|
||||||
|
roles_dir: str,
|
||||||
|
max_depth: int
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
nodes: Dict[str, Dict[str, Any]] = {}
|
||||||
|
links: List[Dict[str, str]] = []
|
||||||
|
|
||||||
|
def traverse(role: str, depth: int, path: Set[str]):
|
||||||
|
if role not in nodes:
|
||||||
|
meta = load_meta(find_role_meta(roles_dir, role))
|
||||||
|
node = {'id': role}
|
||||||
|
node.update(meta['galaxy_info'])
|
||||||
|
node['doc_url'] = f"https://docs.infinito.nexus/roles/{role}/README.html"
|
||||||
|
node['source_url'] = f"https://s.infinito.nexus/code/tree/master/roles/{role}"
|
||||||
|
nodes[role] = node
|
||||||
|
|
||||||
|
if max_depth > 0 and depth >= max_depth:
|
||||||
|
return
|
||||||
|
|
||||||
|
neighbors = []
|
||||||
|
if dep_type in ['run_after', 'dependencies']:
|
||||||
|
meta = load_meta(find_role_meta(roles_dir, role))
|
||||||
|
neighbors = meta.get(dep_type, [])
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
neighbors = load_tasks(find_role_tasks(roles_dir, role), dep_type)
|
||||||
|
except FileNotFoundError:
|
||||||
|
neighbors = []
|
||||||
|
|
||||||
|
if direction == 'to':
|
||||||
|
for tgt in neighbors:
|
||||||
|
links.append({'source': role, 'target': tgt, 'type': dep_type})
|
||||||
|
if tgt in path:
|
||||||
|
continue
|
||||||
|
traverse(tgt, depth + 1, path | {tgt})
|
||||||
|
|
||||||
|
else: # direction == 'from'
|
||||||
|
for other in os.listdir(roles_dir):
|
||||||
|
try:
|
||||||
|
other_neighbors = []
|
||||||
|
if dep_type in ['run_after', 'dependencies']:
|
||||||
|
meta_o = load_meta(find_role_meta(roles_dir, other))
|
||||||
|
other_neighbors = meta_o.get(dep_type, [])
|
||||||
|
else:
|
||||||
|
other_neighbors = load_tasks(find_role_tasks(roles_dir, other), dep_type)
|
||||||
|
|
||||||
|
if role in other_neighbors:
|
||||||
|
links.append({'source': other, 'target': role, 'type': dep_type})
|
||||||
|
if other in path:
|
||||||
|
continue
|
||||||
|
traverse(other, depth + 1, path | {other})
|
||||||
|
|
||||||
|
except FileNotFoundError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
traverse(start_role, depth=0, path={start_role})
|
||||||
|
return {'nodes': list(nodes.values()), 'links': links}
|
||||||
|
|
||||||
|
|
||||||
|
def build_mappings(
|
||||||
|
start_role: str,
|
||||||
|
roles_dir: str,
|
||||||
|
max_depth: int
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
result: Dict[str, Any] = {}
|
||||||
|
for key in ALL_KEYS:
|
||||||
|
dep_type, direction = key.rsplit('_', 1)
|
||||||
|
try:
|
||||||
|
result[key] = build_single_graph(start_role, dep_type, direction, roles_dir, max_depth)
|
||||||
|
except Exception:
|
||||||
|
result[key] = {'nodes': [], 'links': []}
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def output_graph(graph_data: Any, fmt: str, start: str, key: str):
|
||||||
|
base = f"{start}_{key}"
|
||||||
|
if fmt == 'console':
|
||||||
|
print(f"--- {base} ---")
|
||||||
|
print(yaml.safe_dump(graph_data, sort_keys=False))
|
||||||
|
elif fmt in ('yaml', 'json'):
|
||||||
|
path = f"{base}.{fmt}"
|
||||||
|
with open(path, 'w') as f:
|
||||||
|
if fmt == 'yaml':
|
||||||
|
yaml.safe_dump(graph_data, f, sort_keys=False)
|
||||||
|
else:
|
||||||
|
json.dump(graph_data, f, indent=2)
|
||||||
|
print(f"Wrote {path}")
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unknown format: {fmt}")
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
default_roles_dir = os.path.abspath(os.path.join(script_dir, '..', '..', 'roles'))
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(description="Generate dependency graphs")
|
||||||
|
parser.add_argument('-r', '--role', required=True, help="Starting role name")
|
||||||
|
parser.add_argument('-D', '--depth', type=int, default=0, help="Max recursion depth")
|
||||||
|
parser.add_argument('-o', '--output', choices=['yaml', 'json', 'console'], default='console')
|
||||||
|
parser.add_argument('--roles-dir', default=default_roles_dir, help="Roles directory")
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
graphs = build_mappings(args.role, args.roles_dir, args.depth)
|
||||||
|
|
||||||
|
for key in ALL_KEYS:
|
||||||
|
graph_data = graphs.get(key, {'nodes': [], 'links': []})
|
||||||
|
output_graph(graph_data, args.output, args.role, key)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
0
cli/build/inventory/__init__.py
Normal file
0
cli/build/inventory/__init__.py
Normal file
127
cli/build/inventory/full.py
Normal file
127
cli/build/inventory/full.py
Normal file
@@ -0,0 +1,127 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# cli/build/inventory/full.py
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
try:
|
||||||
|
from filter_plugins.get_all_invokable_apps import get_all_invokable_apps
|
||||||
|
except ImportError:
|
||||||
|
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..')))
|
||||||
|
from filter_plugins.get_all_invokable_apps import get_all_invokable_apps
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
import json
|
||||||
|
|
||||||
|
def build_group_inventory(apps, host):
|
||||||
|
"""
|
||||||
|
Build an Ansible inventory in which each application is a group containing the given host.
|
||||||
|
"""
|
||||||
|
groups = {app: {"hosts": [host]} for app in apps}
|
||||||
|
inventory = {
|
||||||
|
"all": {
|
||||||
|
"hosts": [host],
|
||||||
|
"children": {app: {} for app in apps},
|
||||||
|
},
|
||||||
|
**groups
|
||||||
|
}
|
||||||
|
return inventory
|
||||||
|
|
||||||
|
def build_hostvar_inventory(apps, host):
|
||||||
|
"""
|
||||||
|
Alternative: Build an inventory where all invokable apps are set as a host variable (as a list).
|
||||||
|
"""
|
||||||
|
return {
|
||||||
|
"all": {
|
||||||
|
"hosts": [host],
|
||||||
|
},
|
||||||
|
"_meta": {
|
||||||
|
"hostvars": {
|
||||||
|
host: {
|
||||||
|
"invokable_applications": apps
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description='Build a dynamic Ansible inventory for a given host with all invokable applications.'
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--host',
|
||||||
|
required=True,
|
||||||
|
help='Hostname to assign to all invokable application groups'
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'-f', '--format',
|
||||||
|
choices=['json', 'yaml'],
|
||||||
|
default='yaml',
|
||||||
|
help='Output format (yaml [default], json)'
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--inventory-style',
|
||||||
|
choices=['group', 'hostvars'],
|
||||||
|
default='group',
|
||||||
|
help='Inventory style: group (default, one group per app) or hostvars (list as hostvar)'
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'-c', '--categories-file',
|
||||||
|
default=os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'roles', 'categories.yml')),
|
||||||
|
help='Path to roles/categories.yml (default: roles/categories.yml at project root)'
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'-r', '--roles-dir',
|
||||||
|
default=os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'roles')),
|
||||||
|
help='Path to roles/ directory (default: roles/ at project root)'
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'-o', '--output',
|
||||||
|
help='Write output to file instead of stdout'
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'-i', '--ignore',
|
||||||
|
action='append',
|
||||||
|
default=[],
|
||||||
|
help='Application ID(s) to ignore (can be specified multiple times or comma-separated)'
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
try:
|
||||||
|
apps = get_all_invokable_apps(
|
||||||
|
categories_file=args.categories_file,
|
||||||
|
roles_dir=args.roles_dir
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
sys.stderr.write(f"Error: {e}\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Combine all ignore arguments into a flat set
|
||||||
|
ignore_ids = set()
|
||||||
|
for entry in args.ignore:
|
||||||
|
ignore_ids.update(i.strip() for i in entry.split(',') if i.strip())
|
||||||
|
|
||||||
|
if ignore_ids:
|
||||||
|
apps = [app for app in apps if app not in ignore_ids]
|
||||||
|
|
||||||
|
# Build the requested inventory style
|
||||||
|
if args.inventory_style == 'group':
|
||||||
|
inventory = build_group_inventory(apps, args.host)
|
||||||
|
else:
|
||||||
|
inventory = build_hostvar_inventory(apps, args.host)
|
||||||
|
|
||||||
|
# Output in the chosen format
|
||||||
|
if args.format == 'json':
|
||||||
|
output = json.dumps(inventory, indent=2)
|
||||||
|
else:
|
||||||
|
output = yaml.safe_dump(inventory, default_flow_style=False)
|
||||||
|
|
||||||
|
if args.output:
|
||||||
|
with open(args.output, 'w') as f:
|
||||||
|
f.write(output)
|
||||||
|
else:
|
||||||
|
print(output)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
224
cli/build/role_include.py
Normal file
224
cli/build/role_include.py
Normal file
@@ -0,0 +1,224 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import yaml
|
||||||
|
import argparse
|
||||||
|
from collections import defaultdict, deque
|
||||||
|
|
||||||
|
def find_roles(roles_dir, prefixes=None):
|
||||||
|
"""
|
||||||
|
Find all roles in the given directory whose names start with
|
||||||
|
any of the provided prefixes. If prefixes is empty or None,
|
||||||
|
include all roles.
|
||||||
|
"""
|
||||||
|
for entry in os.listdir(roles_dir):
|
||||||
|
if prefixes:
|
||||||
|
if not any(entry.startswith(pref) for pref in prefixes):
|
||||||
|
continue
|
||||||
|
path = os.path.join(roles_dir, entry)
|
||||||
|
meta_file = os.path.join(path, 'meta', 'main.yml')
|
||||||
|
if os.path.isdir(path) and os.path.isfile(meta_file):
|
||||||
|
yield path, meta_file
|
||||||
|
|
||||||
|
def load_run_after(meta_file):
|
||||||
|
"""Load the 'run_after' from the meta/main.yml of a role."""
|
||||||
|
with open(meta_file, 'r') as f:
|
||||||
|
data = yaml.safe_load(f) or {}
|
||||||
|
return data.get('galaxy_info', {}).get('run_after', [])
|
||||||
|
|
||||||
|
def load_application_id(role_path):
|
||||||
|
"""Load the application_id from the vars/main.yml of the role."""
|
||||||
|
vars_file = os.path.join(role_path, 'vars', 'main.yml')
|
||||||
|
if os.path.exists(vars_file):
|
||||||
|
with open(vars_file, 'r') as f:
|
||||||
|
data = yaml.safe_load(f) or {}
|
||||||
|
return data.get('application_id')
|
||||||
|
return None
|
||||||
|
|
||||||
|
def build_dependency_graph(roles_dir, prefixes=None):
|
||||||
|
"""
|
||||||
|
Build a dependency graph where each key is a role name and
|
||||||
|
its value is a list of roles that depend on it.
|
||||||
|
Also return in_degree counts and the roles metadata map.
|
||||||
|
"""
|
||||||
|
graph = defaultdict(list)
|
||||||
|
in_degree = defaultdict(int)
|
||||||
|
roles = {}
|
||||||
|
|
||||||
|
for role_path, meta_file in find_roles(roles_dir, prefixes):
|
||||||
|
run_after = load_run_after(meta_file)
|
||||||
|
application_id = load_application_id(role_path)
|
||||||
|
role_name = os.path.basename(role_path)
|
||||||
|
|
||||||
|
roles[role_name] = {
|
||||||
|
'role_name': role_name,
|
||||||
|
'run_after': run_after,
|
||||||
|
'application_id': application_id,
|
||||||
|
'path': role_path
|
||||||
|
}
|
||||||
|
|
||||||
|
for dependency in run_after:
|
||||||
|
graph[dependency].append(role_name)
|
||||||
|
in_degree[role_name] += 1
|
||||||
|
|
||||||
|
if role_name not in in_degree:
|
||||||
|
in_degree[role_name] = 0
|
||||||
|
|
||||||
|
return graph, in_degree, roles
|
||||||
|
|
||||||
|
def find_cycle(roles):
|
||||||
|
"""
|
||||||
|
Detect a cycle in the run_after relations:
|
||||||
|
roles: dict mapping role_name -> { 'run_after': [...], ... }
|
||||||
|
Returns a list of role_names forming the cycle (with the start repeated at end), or None.
|
||||||
|
"""
|
||||||
|
visited = set()
|
||||||
|
stack = set()
|
||||||
|
|
||||||
|
def dfs(node, path):
|
||||||
|
visited.add(node)
|
||||||
|
stack.add(node)
|
||||||
|
path.append(node)
|
||||||
|
for dep in roles.get(node, {}).get('run_after', []):
|
||||||
|
if dep not in visited:
|
||||||
|
res = dfs(dep, path)
|
||||||
|
if res:
|
||||||
|
return res
|
||||||
|
elif dep in stack:
|
||||||
|
idx = path.index(dep)
|
||||||
|
return path[idx:] + [dep]
|
||||||
|
stack.remove(node)
|
||||||
|
path.pop()
|
||||||
|
return None
|
||||||
|
|
||||||
|
for role in roles:
|
||||||
|
if role not in visited:
|
||||||
|
cycle = dfs(role, [])
|
||||||
|
if cycle:
|
||||||
|
return cycle
|
||||||
|
return None
|
||||||
|
|
||||||
|
def topological_sort(graph, in_degree, roles=None):
|
||||||
|
"""
|
||||||
|
Perform topological sort on the dependency graph.
|
||||||
|
If a cycle is detected, raise an Exception with detailed debug info.
|
||||||
|
"""
|
||||||
|
from collections import deque
|
||||||
|
|
||||||
|
queue = deque([r for r, d in in_degree.items() if d == 0])
|
||||||
|
sorted_roles = []
|
||||||
|
local_in = dict(in_degree)
|
||||||
|
|
||||||
|
while queue:
|
||||||
|
role = queue.popleft()
|
||||||
|
sorted_roles.append(role)
|
||||||
|
for nbr in graph.get(role, []):
|
||||||
|
local_in[nbr] -= 1
|
||||||
|
if local_in[nbr] == 0:
|
||||||
|
queue.append(nbr)
|
||||||
|
|
||||||
|
if len(sorted_roles) != len(in_degree):
|
||||||
|
# Something went wrong: likely a cycle
|
||||||
|
cycle = find_cycle(roles or {})
|
||||||
|
unsorted = [r for r in in_degree if r not in sorted_roles]
|
||||||
|
|
||||||
|
header = "❌ Dependency resolution failed"
|
||||||
|
if cycle:
|
||||||
|
reason = f"Circular dependency detected: {' -> '.join(cycle)}"
|
||||||
|
else:
|
||||||
|
reason = "Unresolved dependencies among roles (possible cycle or missing role)."
|
||||||
|
|
||||||
|
details = []
|
||||||
|
if unsorted:
|
||||||
|
details.append("Unsorted roles and their declared run_after dependencies:")
|
||||||
|
for r in unsorted:
|
||||||
|
deps = roles.get(r, {}).get('run_after', [])
|
||||||
|
details.append(f" - {r} depends on {deps!r}")
|
||||||
|
|
||||||
|
graph_repr = f"Full dependency graph: {dict(graph)!r}"
|
||||||
|
|
||||||
|
raise Exception("\n".join([header, reason] + details + [graph_repr]))
|
||||||
|
|
||||||
|
return sorted_roles
|
||||||
|
|
||||||
|
def print_dependency_tree(graph):
|
||||||
|
"""Print the dependency tree visually on the console."""
|
||||||
|
def print_node(role, indent=0):
|
||||||
|
print(" " * indent + role)
|
||||||
|
for dep in graph.get(role, []):
|
||||||
|
print_node(dep, indent + 1)
|
||||||
|
|
||||||
|
all_roles = set(graph.keys())
|
||||||
|
dependent = {r for deps in graph.values() for r in deps}
|
||||||
|
roots = all_roles - dependent
|
||||||
|
|
||||||
|
for root in roots:
|
||||||
|
print_node(root)
|
||||||
|
|
||||||
|
def gen_condi_role_incl(roles_dir, prefixes=None):
|
||||||
|
"""
|
||||||
|
Generate playbook entries based on the sorted order.
|
||||||
|
Raises a ValueError if application_id is missing.
|
||||||
|
"""
|
||||||
|
graph, in_degree, roles = build_dependency_graph(roles_dir, prefixes)
|
||||||
|
sorted_names = topological_sort(graph, in_degree, roles)
|
||||||
|
|
||||||
|
entries = []
|
||||||
|
for role_name in sorted_names:
|
||||||
|
role = roles[role_name]
|
||||||
|
|
||||||
|
if role.get('application_id') is None:
|
||||||
|
vars_file = os.path.join(role['path'], 'vars', 'main.yml')
|
||||||
|
raise ValueError(f"'application_id' missing in {vars_file}")
|
||||||
|
|
||||||
|
app_id = role['application_id']
|
||||||
|
entries.append(
|
||||||
|
f"- name: setup {app_id}\n"
|
||||||
|
f" when: ('{app_id}' | application_allowed(group_names, allowed_applications))\n"
|
||||||
|
f" include_role:\n"
|
||||||
|
f" name: {role_name}\n"
|
||||||
|
)
|
||||||
|
entries.append(
|
||||||
|
f"- name: flush handlers after {app_id}\n"
|
||||||
|
f" meta: flush_handlers\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
return entries
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description='Generate an Ansible playbook include file from Docker roles, sorted by run_after order.'
|
||||||
|
)
|
||||||
|
parser.add_argument('roles_dir', help='Path to directory containing role folders')
|
||||||
|
parser.add_argument(
|
||||||
|
'-p', '--prefix',
|
||||||
|
action='append',
|
||||||
|
help='Only include roles whose names start with any of these prefixes; can be specified multiple times'
|
||||||
|
)
|
||||||
|
parser.add_argument('-o', '--output', default=None,
|
||||||
|
help='Output file path (default: stdout)')
|
||||||
|
parser.add_argument('-t', '--tree', action='store_true',
|
||||||
|
help='Display the dependency tree of roles and exit')
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
prefixes = args.prefix or []
|
||||||
|
|
||||||
|
if args.tree:
|
||||||
|
graph, _, _ = build_dependency_graph(args.roles_dir, prefixes)
|
||||||
|
print_dependency_tree(graph)
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
entries = gen_condi_role_incl(args.roles_dir, prefixes)
|
||||||
|
output = ''.join(entries)
|
||||||
|
|
||||||
|
if args.output:
|
||||||
|
os.makedirs(os.path.dirname(args.output), exist_ok=True)
|
||||||
|
with open(args.output, 'w') as f:
|
||||||
|
f.write(output)
|
||||||
|
print(f"Playbook entries written to {args.output}")
|
||||||
|
else:
|
||||||
|
print(output)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
65
cli/build/roles_list.py
Normal file
65
cli/build/roles_list.py
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Generate a JSON file listing all Ansible role directories.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python roles_list.py [--roles-dir path/to/roles] [--output path/to/roles/list.json | console]
|
||||||
|
"""
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
|
||||||
|
def find_roles(roles_dir: str):
|
||||||
|
"""Return sorted list of role names under roles_dir."""
|
||||||
|
return sorted([
|
||||||
|
entry for entry in os.listdir(roles_dir)
|
||||||
|
if os.path.isdir(os.path.join(roles_dir, entry))
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
|
def write_roles_list(roles, out_file):
|
||||||
|
"""Write the list of roles to out_file as JSON."""
|
||||||
|
os.makedirs(os.path.dirname(out_file), exist_ok=True)
|
||||||
|
with open(out_file, 'w', encoding='utf-8') as f:
|
||||||
|
json.dump(roles, f, indent=2)
|
||||||
|
print(f"Wrote roles list to {out_file}")
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# Determine default roles_dir relative to this script: ../../.. -> roles
|
||||||
|
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
default_roles_dir = os.path.abspath(
|
||||||
|
os.path.join(script_dir, '..', '..', 'roles')
|
||||||
|
)
|
||||||
|
default_output = os.path.join(default_roles_dir, 'list.json')
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(description='Generate roles/list.json')
|
||||||
|
parser.add_argument(
|
||||||
|
'--roles-dir', '-r',
|
||||||
|
default=default_roles_dir,
|
||||||
|
help=f'Directory containing role subfolders (default: {default_roles_dir})'
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--output', '-o',
|
||||||
|
default=default_output,
|
||||||
|
help=(
|
||||||
|
'Output path for roles list JSON '
|
||||||
|
'(or "console" to print to stdout, default: %(default)s)'
|
||||||
|
)
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if not os.path.isdir(args.roles_dir):
|
||||||
|
parser.error(f"Roles directory not found: {args.roles_dir}")
|
||||||
|
|
||||||
|
roles = find_roles(args.roles_dir)
|
||||||
|
|
||||||
|
if args.output.lower() == 'console':
|
||||||
|
# Print JSON to stdout
|
||||||
|
print(json.dumps(roles, indent=2))
|
||||||
|
else:
|
||||||
|
write_roles_list(roles, args.output)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
104
cli/build/tree.py
Normal file
104
cli/build/tree.py
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
import os
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
from typing import Dict, Any
|
||||||
|
|
||||||
|
from cli.build.graph import build_mappings, output_graph
|
||||||
|
from module_utils.role_dependency_resolver import RoleDependencyResolver
|
||||||
|
|
||||||
|
|
||||||
|
def find_roles(roles_dir: str):
|
||||||
|
for entry in os.listdir(roles_dir):
|
||||||
|
path = os.path.join(roles_dir, entry)
|
||||||
|
if os.path.isdir(path):
|
||||||
|
yield entry, path
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
default_roles_dir = os.path.abspath(os.path.join(script_dir, "..", "..", "roles"))
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Generate all graphs for each role and write meta/tree.json"
|
||||||
|
)
|
||||||
|
parser.add_argument("-d", "--role_dir", default=default_roles_dir,
|
||||||
|
help=f"Path to roles directory (default: {default_roles_dir})")
|
||||||
|
parser.add_argument("-D", "--depth", type=int, default=0,
|
||||||
|
help="Max recursion depth (>0) or <=0 to stop on cycle")
|
||||||
|
parser.add_argument("-o", "--output", choices=["yaml", "json", "console"],
|
||||||
|
default="json", help="Output format")
|
||||||
|
parser.add_argument("-p", "--preview", action="store_true",
|
||||||
|
help="Preview graphs to console instead of writing files")
|
||||||
|
parser.add_argument("-s", "--shadow-folder", type=str, default=None,
|
||||||
|
help="If set, writes tree.json to this shadow folder instead of the role's actual meta/ folder")
|
||||||
|
parser.add_argument("-v", "--verbose", action="store_true", help="Enable verbose logging")
|
||||||
|
|
||||||
|
# Toggles
|
||||||
|
parser.add_argument("--no-include-role", action="store_true", help="Do not scan include_role")
|
||||||
|
parser.add_argument("--no-import-role", action="store_true", help="Do not scan import_role")
|
||||||
|
parser.add_argument("--no-dependencies", action="store_true", help="Do not read meta/main.yml dependencies")
|
||||||
|
parser.add_argument("--no-run-after", action="store_true",
|
||||||
|
help="Do not read galaxy_info.run_after from meta/main.yml")
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if args.verbose:
|
||||||
|
print(f"Roles directory: {args.role_dir}")
|
||||||
|
print(f"Max depth: {args.depth}")
|
||||||
|
print(f"Output format: {args.output}")
|
||||||
|
print(f"Preview mode: {args.preview}")
|
||||||
|
print(f"Shadow folder: {args.shadow_folder}")
|
||||||
|
|
||||||
|
resolver = RoleDependencyResolver(args.role_dir)
|
||||||
|
|
||||||
|
for role_name, role_path in find_roles(args.role_dir):
|
||||||
|
if args.verbose:
|
||||||
|
print(f"Processing role: {role_name}")
|
||||||
|
|
||||||
|
graphs: Dict[str, Any] = build_mappings(
|
||||||
|
start_role=role_name,
|
||||||
|
roles_dir=args.role_dir,
|
||||||
|
max_depth=args.depth
|
||||||
|
)
|
||||||
|
|
||||||
|
# Direct deps (depth=1) – getrennt erfasst für buckets
|
||||||
|
inc_roles, imp_roles = resolver._scan_tasks(role_path)
|
||||||
|
meta_deps = resolver._extract_meta_dependencies(role_path)
|
||||||
|
run_after = set()
|
||||||
|
if not args.no_run_after:
|
||||||
|
run_after = resolver._extract_meta_run_after(role_path)
|
||||||
|
|
||||||
|
if any([not args.no_include_role and inc_roles,
|
||||||
|
not args.no_import_role and imp_roles,
|
||||||
|
not args.no_dependencies and meta_deps,
|
||||||
|
not args.no_run_after and run_after]):
|
||||||
|
deps_root = graphs.setdefault("dependencies", {})
|
||||||
|
if not args.no_include_role and inc_roles:
|
||||||
|
deps_root["include_role"] = sorted(inc_roles)
|
||||||
|
if not args.no_import_role and imp_roles:
|
||||||
|
deps_root["import_role"] = sorted(imp_roles)
|
||||||
|
if not args.no_dependencies and meta_deps:
|
||||||
|
deps_root["dependencies"] = sorted(meta_deps)
|
||||||
|
if not args.no_run_after and run_after:
|
||||||
|
deps_root["run_after"] = sorted(run_after)
|
||||||
|
graphs["dependencies"] = deps_root
|
||||||
|
|
||||||
|
if args.preview:
|
||||||
|
for key, data in graphs.items():
|
||||||
|
if args.verbose:
|
||||||
|
print(f"Previewing graph '{key}' for role '{role_name}'")
|
||||||
|
output_graph(data, "console", role_name, key)
|
||||||
|
else:
|
||||||
|
if args.shadow_folder:
|
||||||
|
tree_file = os.path.join(args.shadow_folder, role_name, "meta", "tree.json")
|
||||||
|
else:
|
||||||
|
tree_file = os.path.join(role_path, "meta", "tree.json")
|
||||||
|
os.makedirs(os.path.dirname(tree_file), exist_ok=True)
|
||||||
|
with open(tree_file, "w", encoding="utf-8") as f:
|
||||||
|
json.dump(graphs, f, indent=2)
|
||||||
|
print(f"Wrote {tree_file}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
0
cli/create/__init__.py
Normal file
0
cli/create/__init__.py
Normal file
240
cli/create/credentials.py
Normal file
240
cli/create/credentials.py
Normal file
@@ -0,0 +1,240 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Selectively add & vault NEW credentials in your inventory, preserving comments
|
||||||
|
and formatting. Existing values are left untouched unless --force is used.
|
||||||
|
|
||||||
|
Usage example:
|
||||||
|
infinito create credentials \
|
||||||
|
--role-path roles/web-app-akaunting \
|
||||||
|
--inventory-file host_vars/echoserver.yml \
|
||||||
|
--vault-password-file .pass/echoserver.txt \
|
||||||
|
--set credentials.database_password=mysecret
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, Any, Union
|
||||||
|
|
||||||
|
from ruamel.yaml import YAML
|
||||||
|
from ruamel.yaml.comments import CommentedMap
|
||||||
|
|
||||||
|
from module_utils.manager.inventory import InventoryManager
|
||||||
|
from module_utils.handler.vault import VaultHandler # uses your existing handler
|
||||||
|
|
||||||
|
|
||||||
|
# ---------- helpers ----------
|
||||||
|
|
||||||
|
def ask_for_confirmation(key: str) -> bool:
|
||||||
|
"""Prompt the user for confirmation to overwrite an existing value."""
|
||||||
|
confirmation = input(
|
||||||
|
f"Are you sure you want to overwrite the value for '{key}'? (y/n): "
|
||||||
|
).strip().lower()
|
||||||
|
return confirmation == 'y'
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_map(node: CommentedMap, key: str) -> CommentedMap:
|
||||||
|
"""
|
||||||
|
Ensure node[key] exists and is a mapping (CommentedMap) for round-trip safety.
|
||||||
|
"""
|
||||||
|
if key not in node or not isinstance(node.get(key), CommentedMap):
|
||||||
|
node[key] = CommentedMap()
|
||||||
|
return node[key]
|
||||||
|
|
||||||
|
|
||||||
|
def _is_ruamel_vault(val: Any) -> bool:
|
||||||
|
"""Detect if a ruamel scalar already carries the !vault tag."""
|
||||||
|
try:
|
||||||
|
return getattr(val, 'tag', None) == '!vault'
|
||||||
|
except Exception:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _is_vault_encrypted(val: Any) -> bool:
|
||||||
|
"""
|
||||||
|
Detect if value is already a vault string or a ruamel !vault scalar.
|
||||||
|
Accept both '$ANSIBLE_VAULT' and '!vault' markers.
|
||||||
|
"""
|
||||||
|
if _is_ruamel_vault(val):
|
||||||
|
return True
|
||||||
|
if isinstance(val, str) and ("$ANSIBLE_VAULT" in val or "!vault" in val):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _vault_body(text: str) -> str:
|
||||||
|
"""
|
||||||
|
Return only the vault body starting from the first line that contains
|
||||||
|
'$ANSIBLE_VAULT'. If not found, return the original text.
|
||||||
|
Also strips any leading '!vault |' header if present.
|
||||||
|
"""
|
||||||
|
lines = text.splitlines()
|
||||||
|
for i, ln in enumerate(lines):
|
||||||
|
if "$ANSIBLE_VAULT" in ln:
|
||||||
|
return "\n".join(lines[i:])
|
||||||
|
return text
|
||||||
|
|
||||||
|
|
||||||
|
def _make_vault_scalar_from_text(text: str) -> Any:
|
||||||
|
"""
|
||||||
|
Build a ruamel object representing a literal block scalar tagged with !vault
|
||||||
|
by parsing a tiny YAML snippet. This avoids depending on yaml_set_tag().
|
||||||
|
"""
|
||||||
|
body = _vault_body(text)
|
||||||
|
indented = " " + body.replace("\n", "\n ") # proper block scalar indentation
|
||||||
|
snippet = f"v: !vault |\n{indented}\n"
|
||||||
|
y = YAML(typ="rt")
|
||||||
|
return y.load(snippet)["v"]
|
||||||
|
|
||||||
|
|
||||||
|
def to_vault_block(vault_handler: VaultHandler, value: Union[str, Any], label: str) -> Any:
|
||||||
|
"""
|
||||||
|
Return a ruamel scalar tagged as !vault. If the input value is already
|
||||||
|
vault-encrypted (string contains $ANSIBLE_VAULT or is a !vault scalar), reuse/wrap.
|
||||||
|
Otherwise, encrypt plaintext via ansible-vault.
|
||||||
|
"""
|
||||||
|
# Already a ruamel !vault scalar → reuse
|
||||||
|
if _is_ruamel_vault(value):
|
||||||
|
return value
|
||||||
|
|
||||||
|
# Already an encrypted string (may include '!vault |' or just the header)
|
||||||
|
if isinstance(value, str) and ("$ANSIBLE_VAULT" in value or "!vault" in value):
|
||||||
|
return _make_vault_scalar_from_text(value)
|
||||||
|
|
||||||
|
# Plaintext → encrypt now
|
||||||
|
snippet = vault_handler.encrypt_string(str(value), label)
|
||||||
|
return _make_vault_scalar_from_text(snippet)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_overrides(pairs: list[str]) -> Dict[str, str]:
|
||||||
|
"""
|
||||||
|
Parse --set key=value pairs into a dict.
|
||||||
|
Supports both 'credentials.key=val' and 'key=val' (short) forms.
|
||||||
|
"""
|
||||||
|
out: Dict[str, str] = {}
|
||||||
|
for pair in pairs:
|
||||||
|
k, v = pair.split("=", 1)
|
||||||
|
out[k.strip()] = v.strip()
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
# ---------- main ----------
|
||||||
|
|
||||||
|
def main() -> int:
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Selectively add & vault NEW credentials in your inventory, preserving comments/formatting."
|
||||||
|
)
|
||||||
|
parser.add_argument("--role-path", required=True, help="Path to your role")
|
||||||
|
parser.add_argument("--inventory-file", required=True, help="Host vars file to update")
|
||||||
|
parser.add_argument("--vault-password-file", required=True, help="Vault password file")
|
||||||
|
parser.add_argument(
|
||||||
|
"--set", nargs="*", default=[],
|
||||||
|
help="Override values key[.subkey]=VALUE (applied to NEW keys; with --force also to existing)"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-f", "--force", action="store_true",
|
||||||
|
help="Allow overrides to replace existing values (will ask per key unless combined with --yes)"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-y", "--yes", action="store_true",
|
||||||
|
help="Non-interactive: assume 'yes' for all overwrite confirmations when --force is used"
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
overrides = parse_overrides(args.set)
|
||||||
|
|
||||||
|
# Initialize inventory manager (provides schema + app_id + vault)
|
||||||
|
manager = InventoryManager(
|
||||||
|
role_path=Path(args.role_path),
|
||||||
|
inventory_path=Path(args.inventory_file),
|
||||||
|
vault_pw=args.vault_password_file,
|
||||||
|
overrides=overrides
|
||||||
|
)
|
||||||
|
|
||||||
|
# 1) Load existing inventory with ruamel (round-trip)
|
||||||
|
yaml_rt = YAML(typ="rt")
|
||||||
|
yaml_rt.preserve_quotes = True
|
||||||
|
|
||||||
|
with open(args.inventory_file, "r", encoding="utf-8") as f:
|
||||||
|
data = yaml_rt.load(f) # CommentedMap or None
|
||||||
|
if data is None:
|
||||||
|
data = CommentedMap()
|
||||||
|
|
||||||
|
# 2) Get schema-applied structure (defaults etc.) for *non-destructive* merge
|
||||||
|
schema_inventory: Dict[str, Any] = manager.apply_schema()
|
||||||
|
|
||||||
|
# 3) Ensure structural path exists
|
||||||
|
apps = ensure_map(data, "applications")
|
||||||
|
app_block = ensure_map(apps, manager.app_id)
|
||||||
|
creds = ensure_map(app_block, "credentials")
|
||||||
|
|
||||||
|
# 4) Determine defaults we could add
|
||||||
|
schema_apps = schema_inventory.get("applications", {})
|
||||||
|
schema_app_block = schema_apps.get(manager.app_id, {})
|
||||||
|
schema_creds = schema_app_block.get("credentials", {}) if isinstance(schema_app_block, dict) else {}
|
||||||
|
|
||||||
|
# 5) Add ONLY missing credential keys
|
||||||
|
newly_added_keys = set()
|
||||||
|
for key, default_val in schema_creds.items():
|
||||||
|
if key in creds:
|
||||||
|
# existing → do not touch (preserve plaintext/vault/formatting/comments)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Value to use for the new key
|
||||||
|
# Priority: --set exact key → default from schema → empty string
|
||||||
|
ov = overrides.get(f"credentials.{key}", None)
|
||||||
|
if ov is None:
|
||||||
|
ov = overrides.get(key, None)
|
||||||
|
|
||||||
|
if ov is not None:
|
||||||
|
value_for_new_key: Union[str, Any] = ov
|
||||||
|
else:
|
||||||
|
if _is_vault_encrypted(default_val):
|
||||||
|
# Schema already provides a vault value → take it as-is
|
||||||
|
creds[key] = to_vault_block(manager.vault_handler, default_val, key)
|
||||||
|
newly_added_keys.add(key)
|
||||||
|
continue
|
||||||
|
value_for_new_key = "" if default_val is None else str(default_val)
|
||||||
|
|
||||||
|
# Insert as !vault literal (encrypt if needed)
|
||||||
|
creds[key] = to_vault_block(manager.vault_handler, value_for_new_key, key)
|
||||||
|
newly_added_keys.add(key)
|
||||||
|
|
||||||
|
# 6) ansible_become_password: only add if missing;
|
||||||
|
# never rewrite an existing one unless --force (+ confirm/--yes) and override provided.
|
||||||
|
if "ansible_become_password" not in data:
|
||||||
|
val = overrides.get("ansible_become_password", None)
|
||||||
|
if val is not None:
|
||||||
|
data["ansible_become_password"] = to_vault_block(
|
||||||
|
manager.vault_handler, val, "ansible_become_password"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
if args.force and "ansible_become_password" in overrides:
|
||||||
|
do_overwrite = args.yes or ask_for_confirmation("ansible_become_password")
|
||||||
|
if do_overwrite:
|
||||||
|
data["ansible_become_password"] = to_vault_block(
|
||||||
|
manager.vault_handler, overrides["ansible_become_password"], "ansible_become_password"
|
||||||
|
)
|
||||||
|
|
||||||
|
# 7) Overrides for existing credential keys (only with --force)
|
||||||
|
if args.force:
|
||||||
|
for ov_key, ov_val in overrides.items():
|
||||||
|
# Accept both 'credentials.key' and bare 'key'
|
||||||
|
key = ov_key.split(".", 1)[1] if ov_key.startswith("credentials.") else ov_key
|
||||||
|
if key in creds:
|
||||||
|
# If we just added it in this run, don't ask again or rewrap
|
||||||
|
if key in newly_added_keys:
|
||||||
|
continue
|
||||||
|
if args.yes or ask_for_confirmation(key):
|
||||||
|
creds[key] = to_vault_block(manager.vault_handler, ov_val, key)
|
||||||
|
|
||||||
|
# 8) Write back with ruamel (preserve formatting & comments)
|
||||||
|
with open(args.inventory_file, "w", encoding="utf-8") as f:
|
||||||
|
yaml_rt.dump(data, f)
|
||||||
|
|
||||||
|
print(f"✅ Added new credentials without touching existing formatting/comments → {args.inventory_file}")
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main())
|
166
cli/create/role.py
Normal file
166
cli/create/role.py
Normal file
@@ -0,0 +1,166 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
import argparse
|
||||||
|
import shutil
|
||||||
|
import ipaddress
|
||||||
|
import difflib
|
||||||
|
from jinja2 import Environment, FileSystemLoader
|
||||||
|
from ruamel.yaml import YAML
|
||||||
|
|
||||||
|
import sys, os
|
||||||
|
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||||
|
from module_utils.entity_name_utils import get_entity_name
|
||||||
|
|
||||||
|
# Paths to the group-vars files
|
||||||
|
PORTS_FILE = './group_vars/all/10_ports.yml'
|
||||||
|
NETWORKS_FILE = './group_vars/all/09_networks.yml'
|
||||||
|
ROLE_TEMPLATE_DIR = './templates/roles/web-app'
|
||||||
|
ROLES_DIR = './roles'
|
||||||
|
|
||||||
|
yaml = YAML()
|
||||||
|
yaml.preserve_quotes = True
|
||||||
|
|
||||||
|
|
||||||
|
def load_yaml_with_comments(path):
|
||||||
|
with open(path) as f:
|
||||||
|
return yaml.load(f)
|
||||||
|
|
||||||
|
|
||||||
|
def dump_yaml_with_comments(data, path):
|
||||||
|
with open(path, 'w') as f:
|
||||||
|
yaml.dump(data, f)
|
||||||
|
|
||||||
|
|
||||||
|
def get_next_network(networks_dict, prefixlen):
|
||||||
|
"""Select the next contiguous subnet, based on the highest existing subnet + one network offset."""
|
||||||
|
nets = []
|
||||||
|
local = networks_dict['defaults_networks']['local']
|
||||||
|
for name, info in local.items():
|
||||||
|
# info is a dict with 'subnet' key
|
||||||
|
net = ipaddress.ip_network(info['subnet'])
|
||||||
|
if net.prefixlen == prefixlen:
|
||||||
|
nets.append(net)
|
||||||
|
if not nets:
|
||||||
|
raise RuntimeError(f"No existing /{prefixlen} subnets to base allocation on.")
|
||||||
|
nets.sort(key=lambda n: int(n.network_address))
|
||||||
|
last = nets[-1]
|
||||||
|
offset = last.num_addresses
|
||||||
|
next_net = ipaddress.ip_network((int(last.network_address) + offset, prefixlen))
|
||||||
|
return next_net
|
||||||
|
|
||||||
|
|
||||||
|
def get_next_port(ports_dict, category):
|
||||||
|
"""Assign the next port by taking the max existing plus one."""
|
||||||
|
loc = ports_dict['ports']['localhost'][category]
|
||||||
|
existing = [int(v) for v in loc.values()]
|
||||||
|
return (max(existing) + 1) if existing else 1
|
||||||
|
|
||||||
|
|
||||||
|
def prompt_conflict(dst_file):
|
||||||
|
print(f"Conflict detected: {dst_file}")
|
||||||
|
print("[1] overwrite, [2] skip, [3] merge")
|
||||||
|
choice = None
|
||||||
|
while choice not in ('1', '2', '3'):
|
||||||
|
choice = input("Enter 1, 2, or 3: ").strip()
|
||||||
|
return choice
|
||||||
|
|
||||||
|
|
||||||
|
def render_templates(src_dir, dst_dir, context):
|
||||||
|
env = Environment(loader=FileSystemLoader(src_dir), keep_trailing_newline=True, autoescape=False)
|
||||||
|
env.filters['bool'] = lambda x: bool(x)
|
||||||
|
env.filters['get_entity_name'] = get_entity_name
|
||||||
|
|
||||||
|
for root, _, files in os.walk(src_dir):
|
||||||
|
rel = os.path.relpath(root, src_dir)
|
||||||
|
target = os.path.join(dst_dir, rel)
|
||||||
|
os.makedirs(target, exist_ok=True)
|
||||||
|
for fn in files:
|
||||||
|
tpl = env.get_template(os.path.join(rel, fn))
|
||||||
|
rendered = tpl.render(**context)
|
||||||
|
out = fn[:-3] if fn.endswith('.j2') else fn
|
||||||
|
dst_file = os.path.join(target, out)
|
||||||
|
|
||||||
|
if os.path.exists(dst_file):
|
||||||
|
choice = prompt_conflict(dst_file)
|
||||||
|
if choice == '2':
|
||||||
|
print(f"Skipping {dst_file}")
|
||||||
|
continue
|
||||||
|
if choice == '3':
|
||||||
|
with open(dst_file) as f_old:
|
||||||
|
old_lines = f_old.readlines()
|
||||||
|
new_lines = rendered.splitlines(keepends=True)
|
||||||
|
additions = [l for l in new_lines if l not in old_lines]
|
||||||
|
if additions:
|
||||||
|
with open(dst_file, 'a') as f:
|
||||||
|
f.writelines(additions)
|
||||||
|
print(f"Merged {len(additions)} lines into {dst_file}")
|
||||||
|
else:
|
||||||
|
print(f"No new lines to merge into {dst_file}")
|
||||||
|
continue
|
||||||
|
# overwrite
|
||||||
|
print(f"Overwriting {dst_file}")
|
||||||
|
with open(dst_file, 'w') as f:
|
||||||
|
f.write(rendered)
|
||||||
|
else:
|
||||||
|
# create new file
|
||||||
|
with open(dst_file, 'w') as f:
|
||||||
|
f.write(rendered)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# Load dynamic port categories
|
||||||
|
ports_data = load_yaml_with_comments(PORTS_FILE)
|
||||||
|
categories = list(ports_data['ports']['localhost'].keys())
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Create or update a Docker Ansible role, and globally assign network and ports with comments preserved"
|
||||||
|
)
|
||||||
|
parser.add_argument('-a', '--application-id', required=True, help="Unique application ID")
|
||||||
|
parser.add_argument('-n', '--network', choices=['24', '28'], required=True, help="Network prefix length (/24 or /28)")
|
||||||
|
parser.add_argument('-p', '--ports', nargs='+', choices=categories, required=True, help=f"Port categories to assign (allowed: {', '.join(categories)})")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
app = args.application_id
|
||||||
|
role = f"web-app-{app}"
|
||||||
|
role_dir = os.path.join(ROLES_DIR, role)
|
||||||
|
|
||||||
|
if os.path.exists(role_dir):
|
||||||
|
if input(f"Role {role} exists. Continue? [y/N]: ").strip().lower() != 'y':
|
||||||
|
print("Aborting.")
|
||||||
|
sys.exit(1)
|
||||||
|
else:
|
||||||
|
os.makedirs(role_dir)
|
||||||
|
|
||||||
|
# 1) Render all templates with conflict handling
|
||||||
|
render_templates(ROLE_TEMPLATE_DIR, role_dir, {'application_id': app, 'role_name': role, 'database_type': 0})
|
||||||
|
print(f"→ Templates applied to {role_dir}")
|
||||||
|
|
||||||
|
# 2) Update global networks file, preserving comments
|
||||||
|
networks = load_yaml_with_comments(NETWORKS_FILE)
|
||||||
|
prefix = int(args.network)
|
||||||
|
new_net = get_next_network(networks, prefix)
|
||||||
|
networks['defaults_networks']['local'][app] = {'subnet': str(new_net)}
|
||||||
|
shutil.copy(NETWORKS_FILE, NETWORKS_FILE + '.bak')
|
||||||
|
dump_yaml_with_comments(networks, NETWORKS_FILE)
|
||||||
|
print(f"→ Assigned network {new_net} in {NETWORKS_FILE}")
|
||||||
|
|
||||||
|
# 3) Update global ports file, preserving comments
|
||||||
|
ports_data = load_yaml_with_comments(PORTS_FILE)
|
||||||
|
assigned = {}
|
||||||
|
for cat in args.ports:
|
||||||
|
loc = ports_data['ports']['localhost'].setdefault(cat, {})
|
||||||
|
if app in loc:
|
||||||
|
print(f"→ Existing port for {cat} and {app}: {loc[app]}, skipping.")
|
||||||
|
else:
|
||||||
|
pnum = get_next_port(ports_data, cat)
|
||||||
|
loc[app] = pnum
|
||||||
|
assigned[cat] = pnum
|
||||||
|
|
||||||
|
if assigned:
|
||||||
|
shutil.copy(PORTS_FILE, PORTS_FILE + '.bak')
|
||||||
|
dump_yaml_with_comments(ports_data, PORTS_FILE)
|
||||||
|
print(f"→ Assigned ports {assigned} in {PORTS_FILE}")
|
||||||
|
else:
|
||||||
|
print("→ No new ports assigned.")
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
221
cli/deploy.py
Normal file
221
cli/deploy.py
Normal file
@@ -0,0 +1,221 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import subprocess
|
||||||
|
import os
|
||||||
|
import datetime
|
||||||
|
import sys
|
||||||
|
|
||||||
|
def run_ansible_playbook(
|
||||||
|
inventory,
|
||||||
|
modes,
|
||||||
|
limit=None,
|
||||||
|
allowed_applications=None,
|
||||||
|
password_file=None,
|
||||||
|
verbose=0,
|
||||||
|
skip_tests=False,
|
||||||
|
skip_validation=False,
|
||||||
|
skip_build=False,
|
||||||
|
cleanup=False,
|
||||||
|
logs=False
|
||||||
|
):
|
||||||
|
start_time = datetime.datetime.now()
|
||||||
|
print(f"\n▶️ Script started at: {start_time.isoformat()}\n")
|
||||||
|
|
||||||
|
if cleanup:
|
||||||
|
cleanup_command = ["make", "clean-keep-logs"] if logs else ["make", "clean"]
|
||||||
|
print("\n🧹 Cleaning up project (" + " ".join(cleanup_command) +")...\n")
|
||||||
|
subprocess.run(cleanup_command, check=True)
|
||||||
|
else:
|
||||||
|
print("\n⚠️ Skipping build as requested.\n")
|
||||||
|
|
||||||
|
if not skip_build:
|
||||||
|
print("\n🛠️ Building project (make messy-build)...\n")
|
||||||
|
subprocess.run(["make", "messy-build"], check=True)
|
||||||
|
else:
|
||||||
|
print("\n⚠️ Skipping build as requested.\n")
|
||||||
|
|
||||||
|
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
playbook = os.path.join(os.path.dirname(script_dir), "playbook.yml")
|
||||||
|
|
||||||
|
# Inventory validation step
|
||||||
|
if not skip_validation:
|
||||||
|
print("\n🔍 Validating inventory before deployment...\n")
|
||||||
|
try:
|
||||||
|
subprocess.run(
|
||||||
|
[sys.executable,
|
||||||
|
os.path.join(script_dir, "validate/inventory.py"),
|
||||||
|
os.path.dirname(inventory)
|
||||||
|
],
|
||||||
|
check=True
|
||||||
|
)
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
print(
|
||||||
|
"\n❌ Inventory validation failed. Deployment aborted.\n",
|
||||||
|
file=sys.stderr
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
else:
|
||||||
|
print("\n⚠️ Skipping inventory validation as requested.\n")
|
||||||
|
|
||||||
|
if not skip_tests:
|
||||||
|
print("\n🧪 Running tests (make messy-test)...\n")
|
||||||
|
subprocess.run(["make", "messy-test"], check=True)
|
||||||
|
|
||||||
|
# Build ansible-playbook command
|
||||||
|
cmd = ["ansible-playbook", "-i", inventory, playbook]
|
||||||
|
|
||||||
|
if limit:
|
||||||
|
cmd.extend(["--limit", limit])
|
||||||
|
|
||||||
|
if allowed_applications:
|
||||||
|
joined = ",".join(allowed_applications)
|
||||||
|
cmd.extend(["-e", f"allowed_applications={joined}"])
|
||||||
|
|
||||||
|
for key, value in modes.items():
|
||||||
|
val = str(value).lower() if isinstance(value, bool) else str(value)
|
||||||
|
cmd.extend(["-e", f"{key}={val}"])
|
||||||
|
|
||||||
|
if password_file:
|
||||||
|
cmd.extend(["--vault-password-file", password_file])
|
||||||
|
else:
|
||||||
|
cmd.extend(["--ask-vault-pass"])
|
||||||
|
|
||||||
|
if verbose:
|
||||||
|
cmd.append("-" + "v" * verbose)
|
||||||
|
|
||||||
|
print("\n🚀 Launching Ansible Playbook...\n")
|
||||||
|
subprocess.run(cmd, check=True)
|
||||||
|
|
||||||
|
end_time = datetime.datetime.now()
|
||||||
|
print(f"\n✅ Script ended at: {end_time.isoformat()}\n")
|
||||||
|
|
||||||
|
duration = end_time - start_time
|
||||||
|
print(f"⏱️ Total execution time: {duration}\n")
|
||||||
|
|
||||||
|
def validate_application_ids(inventory, app_ids):
|
||||||
|
"""
|
||||||
|
Abort the script if any application IDs are invalid, with detailed reasons.
|
||||||
|
"""
|
||||||
|
from module_utils.valid_deploy_id import ValidDeployId
|
||||||
|
validator = ValidDeployId()
|
||||||
|
invalid = validator.validate(inventory, app_ids)
|
||||||
|
if invalid:
|
||||||
|
print("\n❌ Detected invalid application_id(s):\n")
|
||||||
|
for app_id, status in invalid.items():
|
||||||
|
reasons = []
|
||||||
|
if not status['in_roles']:
|
||||||
|
reasons.append("not defined in roles (infinito)")
|
||||||
|
if not status['in_inventory']:
|
||||||
|
reasons.append("not found in inventory file")
|
||||||
|
print(f" - {app_id}: " + ", ".join(reasons))
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Run the central Ansible deployment script to manage infrastructure, updates, and tests."
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"inventory",
|
||||||
|
help="Path to the inventory file (INI or YAML) containing hosts and variables."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-l", "--limit",
|
||||||
|
help="Restrict execution to a specific host or host group from the inventory."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-T", "--host-type",
|
||||||
|
choices=["server", "desktop"],
|
||||||
|
default="server",
|
||||||
|
help="Specify whether the target is a server or a personal computer. Affects role selection and variables."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-r", "--reset", action="store_true",
|
||||||
|
help="Reset all Infinito.Nexus files and configurations, and run the entire playbook (not just individual roles)."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-t", "--test", action="store_true",
|
||||||
|
help="Run test routines instead of production tasks. Useful for local testing and CI pipelines."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-u", "--update", action="store_true",
|
||||||
|
help="Enable the update procedure to bring software and roles up to date."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-b", "--backup", action="store_true",
|
||||||
|
help="Perform a full backup of critical data and configurations before the update process."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-c", "--cleanup", action="store_true",
|
||||||
|
help="Clean up unused files and outdated configurations after all tasks are complete. Also cleans up the repository before the deployment procedure."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-d", "--debug", action="store_true",
|
||||||
|
help="Enable detailed debug output for Ansible and this script."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-p", "--password-file",
|
||||||
|
help="Path to the file containing the Vault password. If not provided, prompts for the password interactively."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-s", "--skip-tests", action="store_true",
|
||||||
|
help="Skip running 'make test' even if tests are normally enabled."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-V", "--skip-validation", action="store_true",
|
||||||
|
help="Skip inventory validation before deployment."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-B", "--skip-build", action="store_true",
|
||||||
|
help="Skip running 'make build' before deployment."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-i", "--id",
|
||||||
|
nargs="+",
|
||||||
|
default=[],
|
||||||
|
dest="id",
|
||||||
|
help="List of application_id's for partial deploy. If not set, all application IDs defined in the inventory will be executed."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-v", "--verbose", action="count", default=0,
|
||||||
|
help="Increase verbosity level. Multiple -v flags increase detail (e.g., -vvv for maximum log output)."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--logs", action="store_true",
|
||||||
|
help="Keep the CLI logs during cleanup command"
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
validate_application_ids(args.inventory, args.id)
|
||||||
|
|
||||||
|
modes = {
|
||||||
|
"MODE_RESET": args.reset,
|
||||||
|
"MODE_TEST": args.test,
|
||||||
|
"MODE_UPDATE": args.update,
|
||||||
|
"MODE_BACKUP": args.backup,
|
||||||
|
"MODE_CLEANUP": args.cleanup,
|
||||||
|
"MODE_LOGS": args.logs,
|
||||||
|
"MODE_DEBUG": args.debug,
|
||||||
|
"MODE_ASSERT": not args.skip_validation,
|
||||||
|
"host_type": args.host_type
|
||||||
|
}
|
||||||
|
|
||||||
|
run_ansible_playbook(
|
||||||
|
inventory=args.inventory,
|
||||||
|
modes=modes,
|
||||||
|
limit=args.limit,
|
||||||
|
allowed_applications=args.id,
|
||||||
|
password_file=args.password_file,
|
||||||
|
verbose=args.verbose,
|
||||||
|
skip_tests=args.skip_tests,
|
||||||
|
skip_validation=args.skip_validation,
|
||||||
|
skip_build=args.skip_build,
|
||||||
|
cleanup=args.cleanup,
|
||||||
|
logs=args.logs
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
0
cli/encrypt/__init__.py
Normal file
0
cli/encrypt/__init__.py
Normal file
66
cli/encrypt/inventory.py
Normal file
66
cli/encrypt/inventory.py
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
import argparse
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
import yaml
|
||||||
|
from typing import Dict, Any
|
||||||
|
from module_utils.handler.vault import VaultHandler, VaultScalar
|
||||||
|
from module_utils.handler.yaml import YamlHandler
|
||||||
|
from yaml.dumper import SafeDumper
|
||||||
|
|
||||||
|
def ask_for_confirmation(key: str) -> bool:
|
||||||
|
"""Prompt the user for confirmation to overwrite an existing value."""
|
||||||
|
confirmation = input(f"Do you want to encrypt the value for '{key}'? (y/n): ").strip().lower()
|
||||||
|
return confirmation == 'y'
|
||||||
|
|
||||||
|
|
||||||
|
def encrypt_recursively(data: Any, vault_handler: VaultHandler, ask_confirmation: bool = True, prefix: str = "") -> Any:
|
||||||
|
"""Recursively encrypt values in the data."""
|
||||||
|
if isinstance(data, dict):
|
||||||
|
for key, value in data.items():
|
||||||
|
new_prefix = f"{prefix}.{key}" if prefix else key
|
||||||
|
data[key] = encrypt_recursively(value, vault_handler, ask_confirmation, new_prefix)
|
||||||
|
elif isinstance(data, list):
|
||||||
|
for i, item in enumerate(data):
|
||||||
|
data[i] = encrypt_recursively(item, vault_handler, ask_confirmation, prefix)
|
||||||
|
elif isinstance(data, str):
|
||||||
|
# Only encrypt if it's not already vaulted
|
||||||
|
if not data.lstrip().startswith("$ANSIBLE_VAULT"):
|
||||||
|
if ask_confirmation:
|
||||||
|
# Ask for confirmation before encrypting if not `--all`
|
||||||
|
if not ask_for_confirmation(prefix):
|
||||||
|
print(f"Skipping encryption for '{prefix}'.")
|
||||||
|
return data
|
||||||
|
encrypted_value = vault_handler.encrypt_string(data, prefix)
|
||||||
|
lines = encrypted_value.splitlines()
|
||||||
|
indent = len(lines[1]) - len(lines[1].lstrip())
|
||||||
|
body = "\n".join(line[indent:] for line in lines[1:])
|
||||||
|
return VaultScalar(body) # Store encrypted value as VaultScalar
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Encrypt all fields, ask for confirmation unless --all is specified."
|
||||||
|
)
|
||||||
|
parser.add_argument("--inventory-file", required=True, help="Host vars file to update")
|
||||||
|
parser.add_argument("--vault-password-file", required=True, help="Vault password file")
|
||||||
|
parser.add_argument("--all", action="store_true", help="Encrypt all fields without confirmation")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Initialize the VaultHandler and load the inventory
|
||||||
|
vault_handler = VaultHandler(vault_password_file=args.vault_password_file)
|
||||||
|
updated_inventory = YamlHandler.load_yaml(Path(args.inventory_file))
|
||||||
|
|
||||||
|
# 1) Encrypt all fields recursively
|
||||||
|
updated_inventory = encrypt_recursively(updated_inventory, vault_handler, ask_confirmation=not args.all)
|
||||||
|
|
||||||
|
# 2) Save the updated inventory to file
|
||||||
|
with open(args.inventory_file, "w", encoding="utf-8") as f:
|
||||||
|
yaml.dump(updated_inventory, f, sort_keys=False, Dumper=SafeDumper)
|
||||||
|
|
||||||
|
print(f"✅ Inventory selectively vaulted → {args.inventory_file}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
0
cli/fix/__init__.py
Normal file
0
cli/fix/__init__.py
Normal file
47
cli/fix/ini_py.py
Normal file
47
cli/fix/ini_py.py
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
"""
|
||||||
|
This script creates __init__.py files in every subdirectory under the specified
|
||||||
|
folder relative to the project root.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
|
||||||
|
def create_init_files(root_folder):
|
||||||
|
"""
|
||||||
|
Walk through all subdirectories of root_folder and create an __init__.py file
|
||||||
|
in each directory if it doesn't already exist.
|
||||||
|
"""
|
||||||
|
for dirpath, dirnames, filenames in os.walk(root_folder):
|
||||||
|
init_file = os.path.join(dirpath, '__init__.py')
|
||||||
|
if not os.path.exists(init_file):
|
||||||
|
open(init_file, 'w').close()
|
||||||
|
print(f"Created: {init_file}")
|
||||||
|
else:
|
||||||
|
print(f"Skipped (already exists): {init_file}")
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description='Create __init__.py files in every subdirectory.'
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'folder',
|
||||||
|
help='Relative path to the target folder (e.g., cli/fix)'
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Determine the absolute path based on the current working directory
|
||||||
|
root_folder = os.path.abspath(args.folder)
|
||||||
|
|
||||||
|
if not os.path.isdir(root_folder):
|
||||||
|
print(f"Error: The folder '{args.folder}' does not exist or is not a directory.")
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
create_init_files(root_folder)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
480
cli/fix/move_unnecessary_dependencies.py
Normal file
480
cli/fix/move_unnecessary_dependencies.py
Normal file
@@ -0,0 +1,480 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
"""
|
||||||
|
Move unnecessary meta dependencies to guarded include_role/import_role
|
||||||
|
for better performance, while preserving YAML comments, quotes, and layout.
|
||||||
|
|
||||||
|
Heuristic (matches tests/integration/test_unnecessary_role_dependencies.py):
|
||||||
|
- A dependency is considered UNNECESSARY if:
|
||||||
|
* The consumer does NOT use provider variables in defaults/vars/handlers
|
||||||
|
(no early-var need), AND
|
||||||
|
* In tasks, any usage of provider vars or provider-handler notifications
|
||||||
|
occurs only AFTER an include/import of the provider in the same file,
|
||||||
|
OR there is no usage at all.
|
||||||
|
|
||||||
|
Action:
|
||||||
|
- Remove such dependencies from roles/<role>/meta/main.yml.
|
||||||
|
- Prepend a guarded include block to roles/<role>/tasks/01_core.yml (preferred)
|
||||||
|
or roles/<role>/tasks/main.yml if 01_core.yml is absent.
|
||||||
|
- If multiple dependencies are moved for a role, use a loop over include_role.
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
- Creates .bak backups for modified YAML files.
|
||||||
|
- Requires ruamel.yaml to preserve comments/quotes everywhere.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import glob
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import shutil
|
||||||
|
import sys
|
||||||
|
from typing import Dict, Set, List, Tuple, Optional
|
||||||
|
|
||||||
|
# --- Require ruamel.yaml for full round-trip preservation ---
|
||||||
|
try:
|
||||||
|
from ruamel.yaml import YAML
|
||||||
|
from ruamel.yaml.comments import CommentedMap, CommentedSeq
|
||||||
|
from ruamel.yaml.scalarstring import SingleQuotedScalarString
|
||||||
|
_HAVE_RUAMEL = True
|
||||||
|
except Exception:
|
||||||
|
_HAVE_RUAMEL = False
|
||||||
|
|
||||||
|
if not _HAVE_RUAMEL:
|
||||||
|
print("[ERR] ruamel.yaml is required to preserve comments/quotes. Install with: pip install ruamel.yaml", file=sys.stderr)
|
||||||
|
sys.exit(3)
|
||||||
|
|
||||||
|
yaml_rt = YAML()
|
||||||
|
yaml_rt.preserve_quotes = True
|
||||||
|
yaml_rt.width = 10**9 # prevent line wrapping
|
||||||
|
|
||||||
|
# ---------------- Utilities ----------------
|
||||||
|
|
||||||
|
def _backup(path: str):
|
||||||
|
if os.path.exists(path):
|
||||||
|
shutil.copy2(path, path + ".bak")
|
||||||
|
|
||||||
|
def read_text(path: str) -> str:
|
||||||
|
try:
|
||||||
|
with open(path, "r", encoding="utf-8") as f:
|
||||||
|
return f.read()
|
||||||
|
except Exception:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
def load_yaml_rt(path: str):
|
||||||
|
try:
|
||||||
|
with open(path, "r", encoding="utf-8") as f:
|
||||||
|
data = yaml_rt.load(f)
|
||||||
|
return data if data is not None else CommentedMap()
|
||||||
|
except FileNotFoundError:
|
||||||
|
return CommentedMap()
|
||||||
|
except Exception as e:
|
||||||
|
print(f"[WARN] Failed to parse YAML: {path}: {e}", file=sys.stderr)
|
||||||
|
return CommentedMap()
|
||||||
|
|
||||||
|
def dump_yaml_rt(data, path: str):
|
||||||
|
_backup(path)
|
||||||
|
with open(path, "w", encoding="utf-8") as f:
|
||||||
|
yaml_rt.dump(data, f)
|
||||||
|
|
||||||
|
def roles_root(project_root: str) -> str:
|
||||||
|
return os.path.join(project_root, "roles")
|
||||||
|
|
||||||
|
def iter_role_dirs(project_root: str) -> List[str]:
|
||||||
|
root = roles_root(project_root)
|
||||||
|
return [d for d in glob.glob(os.path.join(root, "*")) if os.path.isdir(d)]
|
||||||
|
|
||||||
|
def role_name_from_dir(role_dir: str) -> str:
|
||||||
|
return os.path.basename(role_dir.rstrip(os.sep))
|
||||||
|
|
||||||
|
def path_if_exists(*parts) -> Optional[str]:
|
||||||
|
p = os.path.join(*parts)
|
||||||
|
return p if os.path.exists(p) else None
|
||||||
|
|
||||||
|
def gather_yaml_files(base: str, patterns: List[str]) -> List[str]:
|
||||||
|
files: List[str] = []
|
||||||
|
for pat in patterns:
|
||||||
|
files.extend(glob.glob(os.path.join(base, pat), recursive=True))
|
||||||
|
return [f for f in files if os.path.isfile(f)]
|
||||||
|
|
||||||
|
def sq(v: str):
|
||||||
|
"""Return a single-quoted scalar (ruamel) for consistent quoting."""
|
||||||
|
return SingleQuotedScalarString(v)
|
||||||
|
|
||||||
|
# ---------------- Providers: vars & handlers ----------------
|
||||||
|
|
||||||
|
def flatten_keys(data) -> Set[str]:
|
||||||
|
out: Set[str] = set()
|
||||||
|
if isinstance(data, dict):
|
||||||
|
for k, v in data.items():
|
||||||
|
if isinstance(k, str):
|
||||||
|
out.add(k)
|
||||||
|
out |= flatten_keys(v)
|
||||||
|
elif isinstance(data, list):
|
||||||
|
for item in data:
|
||||||
|
out |= flatten_keys(item)
|
||||||
|
return out
|
||||||
|
|
||||||
|
def collect_role_defined_vars(role_dir: str) -> Set[str]:
|
||||||
|
"""Vars a role 'provides': defaults/vars keys + set_fact keys in tasks."""
|
||||||
|
provided: Set[str] = set()
|
||||||
|
|
||||||
|
for rel in ("defaults/main.yml", "vars/main.yml"):
|
||||||
|
p = path_if_exists(role_dir, rel)
|
||||||
|
if p:
|
||||||
|
data = load_yaml_rt(p)
|
||||||
|
provided |= flatten_keys(data)
|
||||||
|
|
||||||
|
# set_fact keys
|
||||||
|
task_files = gather_yaml_files(os.path.join(role_dir, "tasks"), ["**/*.yml", "*.yml"])
|
||||||
|
for tf in task_files:
|
||||||
|
data = load_yaml_rt(tf)
|
||||||
|
if isinstance(data, list):
|
||||||
|
for task in data:
|
||||||
|
if isinstance(task, dict) and "set_fact" in task and isinstance(task["set_fact"], dict):
|
||||||
|
provided |= set(task["set_fact"].keys())
|
||||||
|
|
||||||
|
noisy = {"when", "name", "vars", "tags", "register"}
|
||||||
|
return {v for v in provided if isinstance(v, str) and v and v not in noisy}
|
||||||
|
|
||||||
|
def collect_role_handler_names(role_dir: str) -> Set[str]:
|
||||||
|
"""Handler names defined by a role (for notify detection)."""
|
||||||
|
handler_file = path_if_exists(role_dir, "handlers/main.yml")
|
||||||
|
if not handler_file:
|
||||||
|
return set()
|
||||||
|
data = load_yaml_rt(handler_file)
|
||||||
|
names: Set[str] = set()
|
||||||
|
if isinstance(data, list):
|
||||||
|
for task in data:
|
||||||
|
if isinstance(task, dict):
|
||||||
|
nm = task.get("name")
|
||||||
|
if isinstance(nm, str) and nm.strip():
|
||||||
|
names.add(nm.strip())
|
||||||
|
return names
|
||||||
|
|
||||||
|
# ---------------- Consumers: usage scanning ----------------
|
||||||
|
|
||||||
|
def find_var_positions(text: str, varname: str) -> List[int]:
|
||||||
|
"""Return byte offsets for occurrences of varname (word-ish boundary)."""
|
||||||
|
positions: List[int] = []
|
||||||
|
if not varname:
|
||||||
|
return positions
|
||||||
|
pattern = re.compile(rf"(?<!\w){re.escape(varname)}(?!\w)")
|
||||||
|
for m in pattern.finditer(text):
|
||||||
|
positions.append(m.start())
|
||||||
|
return positions
|
||||||
|
|
||||||
|
def first_var_use_offset_in_text(text: str, provided_vars: Set[str]) -> Optional[int]:
|
||||||
|
first: Optional[int] = None
|
||||||
|
for v in provided_vars:
|
||||||
|
for off in find_var_positions(text, v):
|
||||||
|
if first is None or off < first:
|
||||||
|
first = off
|
||||||
|
return first
|
||||||
|
|
||||||
|
def first_include_offset_for_role(text: str, producer_role: str) -> Optional[int]:
|
||||||
|
"""
|
||||||
|
Find earliest include/import of a given role in this YAML text.
|
||||||
|
Handles compact dict and block styles.
|
||||||
|
"""
|
||||||
|
pattern = re.compile(
|
||||||
|
r"(include_role|import_role)\s*:\s*\{[^}]*\bname\s*:\s*['\"]?"
|
||||||
|
+ re.escape(producer_role) + r"['\"]?[^}]*\}"
|
||||||
|
r"|"
|
||||||
|
r"(include_role|import_role)\s*:\s*\n(?:\s+[a-z_]+\s*:\s*.*\n)*\s*name\s*:\s*['\"]?"
|
||||||
|
+ re.escape(producer_role) + r"['\"]?",
|
||||||
|
re.IGNORECASE,
|
||||||
|
)
|
||||||
|
m = pattern.search(text)
|
||||||
|
return m.start() if m else None
|
||||||
|
|
||||||
|
def find_notify_offsets_for_handlers(text: str, handler_names: Set[str]) -> List[int]:
|
||||||
|
"""
|
||||||
|
Heuristic: for each handler name, find occurrences where 'notify' appears within
|
||||||
|
the preceding ~200 chars. Works for single string or list-style notify blocks.
|
||||||
|
"""
|
||||||
|
if not handler_names:
|
||||||
|
return []
|
||||||
|
offsets: List[int] = []
|
||||||
|
for h in handler_names:
|
||||||
|
for m in re.finditer(re.escape(h), text):
|
||||||
|
start = m.start()
|
||||||
|
back = max(0, start - 200)
|
||||||
|
context = text[back:start]
|
||||||
|
if re.search(r"notify\s*:", context):
|
||||||
|
offsets.append(start)
|
||||||
|
return sorted(offsets)
|
||||||
|
|
||||||
|
def parse_meta_dependencies(role_dir: str) -> List[str]:
|
||||||
|
meta = path_if_exists(role_dir, "meta/main.yml")
|
||||||
|
if not meta:
|
||||||
|
return []
|
||||||
|
data = load_yaml_rt(meta)
|
||||||
|
dd = data.get("dependencies")
|
||||||
|
deps: List[str] = []
|
||||||
|
if isinstance(dd, list):
|
||||||
|
for item in dd:
|
||||||
|
if isinstance(item, str):
|
||||||
|
deps.append(item)
|
||||||
|
elif isinstance(item, dict) and "role" in item:
|
||||||
|
deps.append(str(item["role"]))
|
||||||
|
elif isinstance(item, dict) and "name" in item:
|
||||||
|
deps.append(str(item["name"]))
|
||||||
|
return deps
|
||||||
|
|
||||||
|
# ---------------- Fix application ----------------
|
||||||
|
|
||||||
|
def sanitize_run_once_var(role_name: str) -> str:
|
||||||
|
"""
|
||||||
|
Generate run_once variable name from role name.
|
||||||
|
Example: 'sys-front-inj-logout' -> 'run_once_sys_front_inj_logout'
|
||||||
|
"""
|
||||||
|
return "run_once_" + role_name.replace("-", "_")
|
||||||
|
|
||||||
|
def build_include_block_yaml(consumer_role: str, moved_deps: List[str]) -> List[dict]:
|
||||||
|
"""
|
||||||
|
Build a guarded block that includes one or many roles.
|
||||||
|
This block will be prepended to tasks/01_core.yml or tasks/main.yml.
|
||||||
|
"""
|
||||||
|
guard_var = sanitize_run_once_var(consumer_role)
|
||||||
|
|
||||||
|
if len(moved_deps) == 1:
|
||||||
|
inner_tasks = [
|
||||||
|
{
|
||||||
|
"name": f"Include dependency '{moved_deps[0]}'",
|
||||||
|
"include_role": {"name": moved_deps[0]},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
inner_tasks = [
|
||||||
|
{
|
||||||
|
"name": "Include dependencies",
|
||||||
|
"include_role": {"name": "{{ item }}"},
|
||||||
|
"loop": moved_deps,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
# Always set the run_once fact at the end
|
||||||
|
inner_tasks.append({"set_fact": {guard_var: True}})
|
||||||
|
|
||||||
|
# Correct Ansible block structure
|
||||||
|
block_task = {
|
||||||
|
"name": "Load former meta dependencies once",
|
||||||
|
"block": inner_tasks,
|
||||||
|
"when": f"{guard_var} is not defined",
|
||||||
|
}
|
||||||
|
|
||||||
|
return [block_task]
|
||||||
|
|
||||||
|
def prepend_tasks(tasks_path: str, new_tasks, dry_run: bool):
|
||||||
|
"""
|
||||||
|
Prepend new_tasks (CommentedSeq) to an existing tasks YAML list while preserving comments.
|
||||||
|
If the file does not exist, create it with new_tasks.
|
||||||
|
"""
|
||||||
|
if os.path.exists(tasks_path):
|
||||||
|
existing = load_yaml_rt(tasks_path)
|
||||||
|
if isinstance(existing, list):
|
||||||
|
combined = CommentedSeq()
|
||||||
|
for item in new_tasks:
|
||||||
|
combined.append(item)
|
||||||
|
for item in existing:
|
||||||
|
combined.append(item)
|
||||||
|
elif isinstance(existing, dict):
|
||||||
|
# Rare case: tasks file with a single mapping; coerce to list
|
||||||
|
combined = CommentedSeq()
|
||||||
|
for item in new_tasks:
|
||||||
|
combined.append(item)
|
||||||
|
combined.append(existing)
|
||||||
|
else:
|
||||||
|
combined = new_tasks
|
||||||
|
else:
|
||||||
|
os.makedirs(os.path.dirname(tasks_path), exist_ok=True)
|
||||||
|
combined = new_tasks
|
||||||
|
|
||||||
|
if dry_run:
|
||||||
|
print(f"[DRY-RUN] Would write {tasks_path} with {len(new_tasks)} prepended task(s).")
|
||||||
|
return
|
||||||
|
|
||||||
|
dump_yaml_rt(combined, tasks_path)
|
||||||
|
print(f"[OK] Updated {tasks_path} (prepended {len(new_tasks)} task(s)).")
|
||||||
|
|
||||||
|
def update_meta_remove_deps(meta_path: str, remove: List[str], dry_run: bool):
|
||||||
|
"""
|
||||||
|
Remove entries from meta.dependencies while leaving the rest of the file intact.
|
||||||
|
Quotes, comments, key order, and line breaks are preserved.
|
||||||
|
Returns True if a change would be made (or was made when not in dry-run).
|
||||||
|
"""
|
||||||
|
if not os.path.exists(meta_path):
|
||||||
|
return False
|
||||||
|
|
||||||
|
doc = load_yaml_rt(meta_path)
|
||||||
|
deps = doc.get("dependencies")
|
||||||
|
if not isinstance(deps, list):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def dep_name(item):
|
||||||
|
if isinstance(item, dict):
|
||||||
|
return item.get("role") or item.get("name")
|
||||||
|
return item
|
||||||
|
|
||||||
|
keep = CommentedSeq()
|
||||||
|
removed = []
|
||||||
|
for item in deps:
|
||||||
|
name = dep_name(item)
|
||||||
|
if name in remove:
|
||||||
|
removed.append(name)
|
||||||
|
else:
|
||||||
|
keep.append(item)
|
||||||
|
|
||||||
|
if not removed:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if keep:
|
||||||
|
doc["dependencies"] = keep
|
||||||
|
else:
|
||||||
|
if "dependencies" in doc:
|
||||||
|
del doc["dependencies"]
|
||||||
|
|
||||||
|
if dry_run:
|
||||||
|
print(f"[DRY-RUN] Would rewrite {meta_path}; removed: {', '.join(removed)}")
|
||||||
|
return True
|
||||||
|
|
||||||
|
dump_yaml_rt(doc, meta_path)
|
||||||
|
print(f"[OK] Rewrote {meta_path}; removed: {', '.join(removed)}")
|
||||||
|
return True
|
||||||
|
|
||||||
|
def dependency_is_unnecessary(consumer_dir: str,
|
||||||
|
consumer_name: str,
|
||||||
|
producer_name: str,
|
||||||
|
provider_vars: Set[str],
|
||||||
|
provider_handlers: Set[str]) -> bool:
|
||||||
|
"""Apply heuristic to decide if we can move this dependency."""
|
||||||
|
# 1) Early usage in defaults/vars/handlers? If yes -> necessary
|
||||||
|
defaults_files = [p for p in [
|
||||||
|
path_if_exists(consumer_dir, "defaults/main.yml"),
|
||||||
|
path_if_exists(consumer_dir, "vars/main.yml"),
|
||||||
|
path_if_exists(consumer_dir, "handlers/main.yml"),
|
||||||
|
] if p]
|
||||||
|
for p in defaults_files:
|
||||||
|
text = read_text(p)
|
||||||
|
if first_var_use_offset_in_text(text, provider_vars) is not None:
|
||||||
|
return False # needs meta dep
|
||||||
|
|
||||||
|
# 2) Tasks: any usage before include/import? If yes -> keep meta dep
|
||||||
|
task_files = gather_yaml_files(os.path.join(consumer_dir, "tasks"), ["**/*.yml", "*.yml"])
|
||||||
|
for p in task_files:
|
||||||
|
text = read_text(p)
|
||||||
|
if not text:
|
||||||
|
continue
|
||||||
|
include_off = first_include_offset_for_role(text, producer_name)
|
||||||
|
var_use_off = first_var_use_offset_in_text(text, provider_vars)
|
||||||
|
notify_offs = find_notify_offsets_for_handlers(text, provider_handlers)
|
||||||
|
|
||||||
|
if var_use_off is not None:
|
||||||
|
if include_off is None or include_off > var_use_off:
|
||||||
|
return False # used before include
|
||||||
|
|
||||||
|
for noff in notify_offs:
|
||||||
|
if include_off is None or include_off > noff:
|
||||||
|
return False # notify before include
|
||||||
|
|
||||||
|
# If we get here: no early use, and either no usage at all or usage after include
|
||||||
|
return True
|
||||||
|
|
||||||
|
def process_role(role_dir: str,
|
||||||
|
providers_index: Dict[str, Tuple[Set[str], Set[str]]],
|
||||||
|
only_role: Optional[str],
|
||||||
|
dry_run: bool) -> bool:
|
||||||
|
"""
|
||||||
|
Returns True if any change suggested/made for this role.
|
||||||
|
"""
|
||||||
|
consumer_name = role_name_from_dir(role_dir)
|
||||||
|
if only_role and only_role != consumer_name:
|
||||||
|
return False
|
||||||
|
|
||||||
|
meta_deps = parse_meta_dependencies(role_dir)
|
||||||
|
if not meta_deps:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Build provider vars/handlers accessors
|
||||||
|
moved: List[str] = []
|
||||||
|
for producer in meta_deps:
|
||||||
|
# Only consider local roles we can analyze
|
||||||
|
producer_dir = path_if_exists(os.path.dirname(role_dir), producer) or path_if_exists(os.path.dirname(roles_root(os.path.dirname(role_dir))), "roles", producer)
|
||||||
|
if producer not in providers_index:
|
||||||
|
# Unknown/external role → skip (we cannot verify safety)
|
||||||
|
continue
|
||||||
|
pvars, phandlers = providers_index[producer]
|
||||||
|
if dependency_is_unnecessary(role_dir, consumer_name, producer, pvars, phandlers):
|
||||||
|
moved.append(producer)
|
||||||
|
|
||||||
|
if not moved:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# 1) Remove from meta
|
||||||
|
meta_path = os.path.join(role_dir, "meta", "main.yml")
|
||||||
|
update_meta_remove_deps(meta_path, moved, dry_run=dry_run)
|
||||||
|
|
||||||
|
# 2) Prepend include block to tasks/01_core.yml or tasks/main.yml
|
||||||
|
target_tasks = path_if_exists(role_dir, "tasks/01_core.yml")
|
||||||
|
if not target_tasks:
|
||||||
|
target_tasks = os.path.join(role_dir, "tasks", "main.yml")
|
||||||
|
include_block = build_include_block_yaml(consumer_name, moved)
|
||||||
|
prepend_tasks(target_tasks, include_block, dry_run=dry_run)
|
||||||
|
return True
|
||||||
|
|
||||||
|
def build_providers_index(all_roles: List[str]) -> Dict[str, Tuple[Set[str], Set[str]]]:
|
||||||
|
"""
|
||||||
|
Map role_name -> (provided_vars, handler_names)
|
||||||
|
"""
|
||||||
|
index: Dict[str, Tuple[Set[str], Set[str]]] = {}
|
||||||
|
for rd in all_roles:
|
||||||
|
rn = role_name_from_dir(rd)
|
||||||
|
index[rn] = (collect_role_defined_vars(rd), collect_role_handler_names(rd))
|
||||||
|
return index
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Move unnecessary meta dependencies to guarded include_role for performance (preserve comments/quotes)."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--project-root",
|
||||||
|
default=os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")),
|
||||||
|
help="Path to project root (default: two levels up from this script).",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--role",
|
||||||
|
dest="only_role",
|
||||||
|
default=None,
|
||||||
|
help="Only process a specific role name (e.g., 'docker-core').",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--dry-run",
|
||||||
|
action="store_true",
|
||||||
|
help="Analyze and print planned changes without modifying files.",
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
roles = iter_role_dirs(args.project_root)
|
||||||
|
if not roles:
|
||||||
|
print(f"[ERR] No roles found under {roles_root(args.project_root)}", file=sys.stderr)
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
providers_index = build_providers_index(roles)
|
||||||
|
|
||||||
|
changed_any = False
|
||||||
|
for role_dir in roles:
|
||||||
|
changed = process_role(role_dir, providers_index, args.only_role, args.dry_run)
|
||||||
|
changed_any = changed_any or changed
|
||||||
|
|
||||||
|
if not changed_any:
|
||||||
|
print("[OK] No unnecessary meta dependencies to move (per heuristic).")
|
||||||
|
else:
|
||||||
|
if args.dry_run:
|
||||||
|
print("[DRY-RUN] Completed analysis. No files were changed.")
|
||||||
|
else:
|
||||||
|
print("[OK] Finished moving unnecessary dependencies.")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
5
cli/fix/replace_by_get_app_config.sh
Executable file
5
cli/fix/replace_by_get_app_config.sh
Executable file
@@ -0,0 +1,5 @@
|
|||||||
|
# Just a little refactoring script, you can delete it later
|
||||||
|
ATTR="$1"
|
||||||
|
OLD="applications[application_id].$ATTR"
|
||||||
|
NEW="applications | get_app_conf(application_id, '$ATTR', True)"
|
||||||
|
bsr ./ "$OLD" -rFfc -n "$NEW"
|
57
cli/fix/tabs.py
Normal file
57
cli/fix/tabs.py
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import os
|
||||||
|
import argparse
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
FILES_FIXED = []
|
||||||
|
|
||||||
|
def fix_tabs_in_file(file_path):
|
||||||
|
"""Replaces tab characters with two spaces in the specified file."""
|
||||||
|
with open(file_path, "r", encoding="utf-8") as f:
|
||||||
|
lines = f.readlines()
|
||||||
|
|
||||||
|
if any('\t' in line for line in lines):
|
||||||
|
fixed_lines = [line.replace('\t', ' ') for line in lines]
|
||||||
|
with open(file_path, "w", encoding="utf-8") as f:
|
||||||
|
f.writelines(fixed_lines)
|
||||||
|
FILES_FIXED.append(str(file_path))
|
||||||
|
|
||||||
|
def find_yml_files(path):
|
||||||
|
"""Yield all .yml files under a given path recursively."""
|
||||||
|
for file in path.rglob("*.yml"):
|
||||||
|
if file.is_file():
|
||||||
|
yield file
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Fix tab characters in all .yml files under a given path (recursively)."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"path",
|
||||||
|
nargs="?",
|
||||||
|
default="./",
|
||||||
|
help="Base path to search for .yml files (default: ./)"
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
base_path = Path(args.path).resolve()
|
||||||
|
|
||||||
|
if not base_path.exists():
|
||||||
|
print(f"❌ Path does not exist: {base_path}")
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
print(f"🔍 Searching for .yml files under: {base_path}\n")
|
||||||
|
|
||||||
|
for yml_file in find_yml_files(base_path):
|
||||||
|
fix_tabs_in_file(yml_file)
|
||||||
|
|
||||||
|
if FILES_FIXED:
|
||||||
|
print("✅ Fixed tab characters in the following files:")
|
||||||
|
for f in FILES_FIXED:
|
||||||
|
print(f" - {f}")
|
||||||
|
else:
|
||||||
|
print("✅ No tabs found in any .yml files.")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
89
cli/fix/vars_main_files.py
Normal file
89
cli/fix/vars_main_files.py
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Script to ensure each Ansible role under ../roles/ with a given prefix has a vars/main.yml
|
||||||
|
containing the correct application_id. Can preview actions or overwrite mismatches.
|
||||||
|
"""
|
||||||
|
import argparse
|
||||||
|
import sys
|
||||||
|
import yaml
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Directory containing roles; can be overridden by tests
|
||||||
|
MODULE_DIR = Path(__file__).resolve().parent
|
||||||
|
ROLES_DIR = (MODULE_DIR.parent.parent / "roles").resolve()
|
||||||
|
|
||||||
|
def process_role(role_dir: Path, prefix: str, preview: bool, overwrite: bool):
|
||||||
|
name = role_dir.name
|
||||||
|
if not name.startswith(prefix):
|
||||||
|
return
|
||||||
|
# Expected application_id is role name minus prefix
|
||||||
|
expected_id = name[len(prefix):]
|
||||||
|
vars_dir = role_dir / "vars"
|
||||||
|
vars_file = vars_dir / "main.yml"
|
||||||
|
if vars_file.exists():
|
||||||
|
# Load existing variables
|
||||||
|
try:
|
||||||
|
existing = yaml.safe_load(vars_file.read_text()) or {}
|
||||||
|
except yaml.YAMLError as e:
|
||||||
|
print(f"Error parsing YAML in {vars_file}: {e}", file=sys.stderr)
|
||||||
|
return
|
||||||
|
actual_id = existing.get("application_id")
|
||||||
|
if actual_id == expected_id:
|
||||||
|
# Already correct
|
||||||
|
return
|
||||||
|
if overwrite:
|
||||||
|
# Update only application_id
|
||||||
|
existing["application_id"] = expected_id
|
||||||
|
if preview:
|
||||||
|
print(f"[PREVIEW] Would update {vars_file}: application_id -> {expected_id}")
|
||||||
|
else:
|
||||||
|
with open(vars_file, "w") as f:
|
||||||
|
yaml.safe_dump(existing, f, default_flow_style=False, sort_keys=False)
|
||||||
|
print(f"Updated {vars_file}: application_id -> {expected_id}")
|
||||||
|
else:
|
||||||
|
print(f"Mismatch in {vars_file}: application_id='{actual_id}', expected='{expected_id}'")
|
||||||
|
else:
|
||||||
|
# Create new vars/main.yml
|
||||||
|
if preview:
|
||||||
|
print(f"[PREVIEW] Would create {vars_file} with application_id: {expected_id}")
|
||||||
|
else:
|
||||||
|
vars_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
content = {"application_id": expected_id}
|
||||||
|
with open(vars_file, "w") as f:
|
||||||
|
yaml.safe_dump(content, f, default_flow_style=False, sort_keys=False)
|
||||||
|
print(f"Created {vars_file} with application_id: {expected_id}")
|
||||||
|
|
||||||
|
|
||||||
|
def run(prefix: str, preview: bool = False, overwrite: bool = False):
|
||||||
|
"""
|
||||||
|
Ensure vars/main.yml for roles under ROLES_DIR with the given prefix has correct application_id.
|
||||||
|
"""
|
||||||
|
for role in sorted(Path(ROLES_DIR).iterdir()):
|
||||||
|
if role.is_dir():
|
||||||
|
process_role(role, prefix, preview, overwrite)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Ensure vars/main.yml for roles with a given prefix has correct application_id"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--prefix", required=True,
|
||||||
|
help="Role name prefix to filter (e.g. 'web-', 'svc-', 'desk-')"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--preview", action="store_true",
|
||||||
|
help="Show what would be done without making changes"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--overwrite", action="store_true",
|
||||||
|
help="If vars/main.yml exists but application_id mismatches, overwrite only that key"
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Run processing
|
||||||
|
run(prefix=args.prefix, preview=args.preview, overwrite=args.overwrite)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
126
cli/integration/deploy_localhost.py
Normal file
126
cli/integration/deploy_localhost.py
Normal file
@@ -0,0 +1,126 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Run the full localhost integration flow entirely inside the infinito Docker container,
|
||||||
|
without writing any artifacts to the host filesystem.
|
||||||
|
Catches missing schema/config errors during credential vaulting and skips those apps.
|
||||||
|
"""
|
||||||
|
import subprocess
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
def main():
|
||||||
|
repo = os.path.abspath(os.getcwd())
|
||||||
|
|
||||||
|
bash_script = '''
|
||||||
|
set -e
|
||||||
|
|
||||||
|
ART=/integration-artifacts
|
||||||
|
mkdir -p "$ART"
|
||||||
|
echo testpassword > "$ART/vaultpw.txt"
|
||||||
|
|
||||||
|
# 1) Generate inventory
|
||||||
|
python3 -m cli.build.inventory.full \
|
||||||
|
--host localhost \
|
||||||
|
--inventory-style hostvars \
|
||||||
|
--format yaml \
|
||||||
|
--output "$ART/inventory.yml"
|
||||||
|
|
||||||
|
# 2) Credentials per-app
|
||||||
|
apps=$(python3 <<EOF
|
||||||
|
import yaml
|
||||||
|
inv = yaml.safe_load(open('/integration-artifacts/inventory.yml'))
|
||||||
|
print(' '.join(inv['_meta']['hostvars']['localhost']['invokable_applications']))
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
for app in $apps; do
|
||||||
|
echo "⏳ Vaulting credentials for $app..."
|
||||||
|
output=$(python3 -m cli.create.credentials \
|
||||||
|
--role-path "/repo/roles/$app" \
|
||||||
|
--inventory-file "$ART/inventory.yml" \
|
||||||
|
--vault-password-file "$ART/vaultpw.txt" \
|
||||||
|
--force 2>&1) || rc=$?; rc=${rc:-0}
|
||||||
|
|
||||||
|
if [ "$rc" -eq 0 ]; then
|
||||||
|
echo "✅ Credentials generated for $app"
|
||||||
|
elif echo "$output" | grep -q "No such file or directory"; then
|
||||||
|
echo "⚠️ Skipping $app (no schema/config)"
|
||||||
|
elif echo "$output" | grep -q "Plain algorithm for"; then
|
||||||
|
# Collect all plain-algo keys
|
||||||
|
keys=( $(echo "$output" | grep -oP "Plain algorithm for '\K[^']+") )
|
||||||
|
overrides=()
|
||||||
|
for key in "${keys[@]}"; do
|
||||||
|
if [[ "$key" == *api_key ]]; then
|
||||||
|
val=$(python3 - << 'PY'
|
||||||
|
import random, string
|
||||||
|
print(''.join(random.choices(string.ascii_letters+string.digits, k=32)))
|
||||||
|
PY
|
||||||
|
)
|
||||||
|
elif [[ "$key" == *password ]]; then
|
||||||
|
val=$(python3 - << 'PY'
|
||||||
|
import random, string
|
||||||
|
print(''.join(random.choices(string.ascii_letters+string.digits, k=12)))
|
||||||
|
PY
|
||||||
|
)
|
||||||
|
else
|
||||||
|
val=$(python3 - << 'PY'
|
||||||
|
import random, string
|
||||||
|
print(''.join(random.choices(string.ascii_letters+string.digits, k=16)))
|
||||||
|
PY
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
echo " → Overriding $key=$val"
|
||||||
|
overrides+=("--set" "$key=$val")
|
||||||
|
done
|
||||||
|
# Retry with overrides
|
||||||
|
echo "🔄 Retrying with overrides..."
|
||||||
|
retry_out=$(python3 -m cli.create.credentials \
|
||||||
|
--role-path "/repo/roles/$app" \
|
||||||
|
--inventory-file "$ART/inventory.yml" \
|
||||||
|
--vault-password-file "$ART/vaultpw.txt" \
|
||||||
|
"${overrides[@]}" \
|
||||||
|
--force 2>&1) || retry_rc=$?; retry_rc=${retry_rc:-0}
|
||||||
|
if [ "$retry_rc" -eq 0 ]; then
|
||||||
|
echo "✅ Credentials generated for $app (with overrides)"
|
||||||
|
else
|
||||||
|
echo "❌ Override failed for $app:"
|
||||||
|
echo "$retry_out"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "❌ Credential error for $app:"
|
||||||
|
echo "$output"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# 3) Show generated files
|
||||||
|
ls -R "$ART" 2>/dev/null
|
||||||
|
|
||||||
|
echo "
|
||||||
|
===== inventory.yml ====="
|
||||||
|
cat "$ART/inventory.yml"
|
||||||
|
|
||||||
|
echo "
|
||||||
|
===== vaultpw.txt ====="
|
||||||
|
cat "$ART/vaultpw.txt"
|
||||||
|
|
||||||
|
# 4) Deploy
|
||||||
|
python3 -m cli.deploy \
|
||||||
|
"$ART/inventory.yml" \
|
||||||
|
--limit localhost \
|
||||||
|
--vault-password-file "$ART/vaultpw.txt" \
|
||||||
|
--verbose
|
||||||
|
'''
|
||||||
|
|
||||||
|
cmd = [
|
||||||
|
"docker", "run", "--rm",
|
||||||
|
"-v", f"{repo}:/repo",
|
||||||
|
"-w", "/repo",
|
||||||
|
"--entrypoint", "bash",
|
||||||
|
"infinito:latest",
|
||||||
|
"-c", bash_script
|
||||||
|
]
|
||||||
|
print(f"\033[96m> {' '.join(cmd)}\033[0m")
|
||||||
|
rc = subprocess.call(cmd)
|
||||||
|
sys.exit(rc)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
50
cli/make.py
Normal file
50
cli/make.py
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
CLI wrapper for Makefile targets within Infinito.Nexus.
|
||||||
|
Invokes `make` commands in the project root directory.
|
||||||
|
"""
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
prog='infinito make',
|
||||||
|
description='Run Makefile targets for Infinito.Nexus project'
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'targets',
|
||||||
|
nargs=argparse.REMAINDER,
|
||||||
|
help='Make targets and options to pass to `make`'
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Default to 'build' if no target is specified
|
||||||
|
make_args = args.targets or ['build']
|
||||||
|
|
||||||
|
# Determine repository root (one level up from cli/)
|
||||||
|
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
repo_root = os.path.abspath(os.path.join(script_dir, os.pardir))
|
||||||
|
|
||||||
|
# Check for Makefile
|
||||||
|
makefile_path = os.path.join(repo_root, 'Makefile')
|
||||||
|
if not os.path.isfile(makefile_path):
|
||||||
|
print(f"Error: Makefile not found in {repo_root}", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Invoke make in repo root
|
||||||
|
cmd = ['make'] + make_args
|
||||||
|
try:
|
||||||
|
result = subprocess.run(cmd, cwd=repo_root)
|
||||||
|
sys.exit(result.returncode)
|
||||||
|
except FileNotFoundError:
|
||||||
|
print("Error: 'make' command not found. Please install make.", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
0
cli/meta/__init__.py
Normal file
0
cli/meta/__init__.py
Normal file
0
cli/meta/applications/__init__.py
Normal file
0
cli/meta/applications/__init__.py
Normal file
40
cli/meta/applications/all.py
Normal file
40
cli/meta/applications/all.py
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# cli/meta/applications/all.py
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import sys
|
||||||
|
|
||||||
|
# Import the Ansible filter implementation
|
||||||
|
try:
|
||||||
|
from filter_plugins.get_all_application_ids import get_all_application_ids
|
||||||
|
except ImportError:
|
||||||
|
sys.stderr.write("Filter plugin `get_all_application_ids` not found. Ensure `filter_plugins/get_all_application_ids.py` is in your PYTHONPATH.\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def find_application_ids():
|
||||||
|
"""
|
||||||
|
Legacy function retained for reference.
|
||||||
|
Delegates to the `get_all_application_ids` filter plugin.
|
||||||
|
"""
|
||||||
|
return get_all_application_ids()
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description='Output a list of all application_id values defined in roles/*/vars/main.yml'
|
||||||
|
)
|
||||||
|
parser.parse_args()
|
||||||
|
|
||||||
|
try:
|
||||||
|
ids = find_application_ids()
|
||||||
|
except Exception as e:
|
||||||
|
sys.stderr.write(f"Error retrieving application IDs: {e}\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
for app_id in ids:
|
||||||
|
print(app_id)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
107
cli/meta/applications/in_group_deps.py
Normal file
107
cli/meta/applications/in_group_deps.py
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
CLI wrapper for applications_if_group_and_deps filter.
|
||||||
|
"""
|
||||||
|
import argparse
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import yaml
|
||||||
|
from filter_plugins.applications_if_group_and_deps import FilterModule
|
||||||
|
|
||||||
|
|
||||||
|
def find_role_dirs_by_app_id(app_ids, roles_dir):
|
||||||
|
"""
|
||||||
|
Map application_ids to role directory names based on vars/main.yml in each role.
|
||||||
|
"""
|
||||||
|
mapping = {}
|
||||||
|
for role in os.listdir(roles_dir):
|
||||||
|
role_path = os.path.join(roles_dir, role)
|
||||||
|
vars_file = os.path.join(role_path, 'vars', 'main.yml')
|
||||||
|
if not os.path.isfile(vars_file):
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
with open(vars_file) as f:
|
||||||
|
data = yaml.safe_load(f) or {}
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
app_id = data.get('application_id')
|
||||||
|
if isinstance(app_id, str) and app_id:
|
||||||
|
mapping[app_id] = role
|
||||||
|
# Translate each requested app_id to role dir if exists
|
||||||
|
dirs = []
|
||||||
|
for gid in app_ids:
|
||||||
|
if gid in mapping:
|
||||||
|
dirs.append(mapping[gid])
|
||||||
|
else:
|
||||||
|
# keep original if it matches a directory
|
||||||
|
if os.path.isdir(os.path.join(roles_dir, gid)):
|
||||||
|
dirs.append(gid)
|
||||||
|
return dirs
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Filter applications by group names (role dirs or application_ids) and their recursive role dependencies."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-a", "--applications",
|
||||||
|
type=str,
|
||||||
|
required=True,
|
||||||
|
help="Path to YAML file defining the applications dict."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-g", "--groups",
|
||||||
|
nargs='+',
|
||||||
|
required=True,
|
||||||
|
help="List of group names to filter by (role directory names or application_ids)."
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Load applications
|
||||||
|
try:
|
||||||
|
with open(args.applications) as f:
|
||||||
|
data = yaml.safe_load(f)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error loading applications file: {e}", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Unwrap under 'applications' key if present
|
||||||
|
if isinstance(data, dict) and 'applications' in data and isinstance(data['applications'], dict):
|
||||||
|
applications = data['applications']
|
||||||
|
else:
|
||||||
|
applications = data
|
||||||
|
|
||||||
|
if not isinstance(applications, dict):
|
||||||
|
print(
|
||||||
|
f"Expected applications YAML to contain a mapping (or 'applications' mapping), got {type(applications).__name__}",
|
||||||
|
file=sys.stderr
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Determine roles_dir relative to project root
|
||||||
|
script_dir = os.path.dirname(__file__)
|
||||||
|
project_root = os.path.abspath(os.path.join(script_dir, '..', '..', '..'))
|
||||||
|
roles_dir = os.path.join(project_root, 'roles')
|
||||||
|
|
||||||
|
# Map user-provided groups (which may be application_ids) to role directory names
|
||||||
|
group_dirs = find_role_dirs_by_app_id(args.groups, roles_dir)
|
||||||
|
if not group_dirs:
|
||||||
|
print(f"No matching role directories found for groups: {args.groups}", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Run filter using role directory names
|
||||||
|
try:
|
||||||
|
filtered = FilterModule().applications_if_group_and_deps(
|
||||||
|
applications,
|
||||||
|
group_dirs
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error running filter: {e}", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Output result as YAML
|
||||||
|
print(yaml.safe_dump(filtered, default_flow_style=False))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
49
cli/meta/applications/invokable.py
Normal file
49
cli/meta/applications/invokable.py
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# cli/meta/applications/invokable.py
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Import filter plugin for get_all_invokable_apps
|
||||||
|
try:
|
||||||
|
from filter_plugins.get_all_invokable_apps import get_all_invokable_apps
|
||||||
|
except ImportError:
|
||||||
|
# Try to adjust sys.path if running outside Ansible
|
||||||
|
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..')))
|
||||||
|
try:
|
||||||
|
from filter_plugins.get_all_invokable_apps import get_all_invokable_apps
|
||||||
|
except ImportError:
|
||||||
|
sys.stderr.write("Could not import filter_plugins.get_all_invokable_apps. Check your PYTHONPATH.\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description='List all invokable applications (application_ids) based on invokable paths from categories.yml and available roles.'
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'-c', '--categories-file',
|
||||||
|
default=os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'roles', 'categories.yml')),
|
||||||
|
help='Path to roles/categories.yml (default: roles/categories.yml at project root)'
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'-r', '--roles-dir',
|
||||||
|
default=os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'roles')),
|
||||||
|
help='Path to roles/ directory (default: roles/ at project root)'
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = get_all_invokable_apps(
|
||||||
|
categories_file=args.categories_file,
|
||||||
|
roles_dir=args.roles_dir
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
sys.stderr.write(f"Error: {e}\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
for app_id in result:
|
||||||
|
print(app_id)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
74
cli/meta/applications/role_name.py
Normal file
74
cli/meta/applications/role_name.py
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
CLI Script: get_role_folder_cli.py
|
||||||
|
|
||||||
|
This script determines the appropriate Ansible role folder based on the provided application_id
|
||||||
|
by inspecting each role's vars/main.yml within the roles directory. By default, it assumes the
|
||||||
|
roles directory is located at the project root, relative to this script's location.
|
||||||
|
|
||||||
|
"""
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import argparse
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
def get_role(application_id, roles_path):
|
||||||
|
"""
|
||||||
|
Find the role directory under `roles_path` whose vars/main.yml contains the specified application_id.
|
||||||
|
|
||||||
|
:param application_id: The application_id to match.
|
||||||
|
:param roles_path: Path to the roles directory.
|
||||||
|
:return: The name of the matching role directory.
|
||||||
|
:raises RuntimeError: If no match is found or if an error occurs while reading files.
|
||||||
|
"""
|
||||||
|
if not os.path.isdir(roles_path):
|
||||||
|
raise RuntimeError(f"Roles path not found: {roles_path}")
|
||||||
|
|
||||||
|
for role in sorted(os.listdir(roles_path)):
|
||||||
|
role_dir = os.path.join(roles_path, role)
|
||||||
|
vars_file = os.path.join(role_dir, 'vars', 'main.yml')
|
||||||
|
if os.path.isfile(vars_file):
|
||||||
|
try:
|
||||||
|
with open(vars_file, 'r') as f:
|
||||||
|
data = yaml.safe_load(f) or {}
|
||||||
|
except Exception as e:
|
||||||
|
raise RuntimeError(f"Failed to load {vars_file}: {e}")
|
||||||
|
|
||||||
|
if data.get('application_id') == application_id:
|
||||||
|
return role
|
||||||
|
|
||||||
|
raise RuntimeError(f"No role found with application_id '{application_id}' in {roles_path}")
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description='Determine the Ansible role folder by application_id'
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'application_id',
|
||||||
|
help='The application_id defined in vars/main.yml to search for'
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'-r', '--roles-path',
|
||||||
|
default=os.path.join(
|
||||||
|
os.path.dirname(os.path.realpath(__file__)),
|
||||||
|
os.pardir, os.pardir, os.pardir,
|
||||||
|
'roles'
|
||||||
|
),
|
||||||
|
help='Path to the roles directory (default: roles/ at project root)'
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
try:
|
||||||
|
folder = get_role(args.application_id, args.roles_path)
|
||||||
|
print(folder)
|
||||||
|
sys.exit(0)
|
||||||
|
except RuntimeError as err:
|
||||||
|
print(f"Error: {err}", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
78
cli/meta/categories/invokable.py
Executable file
78
cli/meta/categories/invokable.py
Executable file
@@ -0,0 +1,78 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
CLI for extracting invokable or non-invokable role paths from a nested roles YAML file using argparse.
|
||||||
|
Assumes a default roles file at the project root if none is provided.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
# ─── Determine project root ───
|
||||||
|
if "__file__" in globals():
|
||||||
|
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||||
|
else:
|
||||||
|
project_root = os.getcwd()
|
||||||
|
|
||||||
|
# Ensure project root on PYTHONPATH so 'filter_plugins' can be imported
|
||||||
|
sys.path.insert(0, project_root)
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import yaml
|
||||||
|
from filter_plugins.invokable_paths import get_invokable_paths, get_non_invokable_paths
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Extract invokable or non-invokable role paths from a nested roles YAML file."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"roles_file",
|
||||||
|
nargs='?',
|
||||||
|
default=None,
|
||||||
|
help="Path to the roles YAML file (default: roles/categories.yml at project root)"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--suffix", "-s",
|
||||||
|
help="Optional suffix to append to each path.",
|
||||||
|
default=None
|
||||||
|
)
|
||||||
|
|
||||||
|
mode_group = parser.add_mutually_exclusive_group()
|
||||||
|
mode_group.add_argument(
|
||||||
|
"--non-invokable", "-n",
|
||||||
|
action='store_true',
|
||||||
|
help="List paths where 'invokable' is False or not set."
|
||||||
|
)
|
||||||
|
mode_group.add_argument(
|
||||||
|
"--invokable", "-i",
|
||||||
|
action='store_true',
|
||||||
|
help="List paths where 'invokable' is True. (default behavior)"
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Default to invokable if neither flag is provided
|
||||||
|
list_non = args.non_invokable
|
||||||
|
list_inv = args.invokable or not (args.non_invokable or args.invokable)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if list_non:
|
||||||
|
paths = get_non_invokable_paths(args.roles_file, args.suffix)
|
||||||
|
else:
|
||||||
|
paths = get_invokable_paths(args.roles_file, args.suffix)
|
||||||
|
except FileNotFoundError as e:
|
||||||
|
print(f"Error: {e}", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
except yaml.YAMLError as e:
|
||||||
|
print(f"Error parsing YAML: {e}", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
except ValueError as e:
|
||||||
|
print(f"Error: {e}", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
for p in paths:
|
||||||
|
print(p)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
0
cli/meta/j2/__init__.py
Normal file
0
cli/meta/j2/__init__.py
Normal file
76
cli/meta/j2/compiler.py
Executable file
76
cli/meta/j2/compiler.py
Executable file
@@ -0,0 +1,76 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
|
||||||
|
# Projekt-Root: vier Ebenen über diesem File
|
||||||
|
PROJECT_ROOT = os.path.dirname(
|
||||||
|
os.path.dirname(
|
||||||
|
os.path.dirname(
|
||||||
|
os.path.dirname(__file__)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
INCLUDE_RE = re.compile(r"^(\s*)\{%\s*include\s*['\"]([^'\"]+)['\"]\s*%\}")
|
||||||
|
|
||||||
|
def expand_includes(rel_path, seen=None):
|
||||||
|
"""
|
||||||
|
Liest die Datei rel_path (relative zum PROJECT_ROOT),
|
||||||
|
ersetzt rekursiv alle "{% include 'path' %}"-Zeilen durch den
|
||||||
|
Inhalt der jeweiligen Datei (mit gleicher Einrückung).
|
||||||
|
"""
|
||||||
|
if seen is None:
|
||||||
|
seen = set()
|
||||||
|
rp = rel_path.replace("\\", "/")
|
||||||
|
if rp in seen:
|
||||||
|
raise RuntimeError(f"Circular include detected: {rp}")
|
||||||
|
seen.add(rp)
|
||||||
|
|
||||||
|
abs_path = os.path.join(PROJECT_ROOT, rp)
|
||||||
|
if not os.path.isfile(abs_path):
|
||||||
|
raise FileNotFoundError(f"Template not found: {rp}")
|
||||||
|
|
||||||
|
output_lines = []
|
||||||
|
for line in open(abs_path, encoding="utf-8"):
|
||||||
|
m = INCLUDE_RE.match(line)
|
||||||
|
if not m:
|
||||||
|
output_lines.append(line.rstrip("\n"))
|
||||||
|
else:
|
||||||
|
indent, inc_rel = m.group(1), m.group(2)
|
||||||
|
# rekursiver Aufruf
|
||||||
|
for inc_line in expand_includes(inc_rel, seen):
|
||||||
|
output_lines.append(indent + inc_line)
|
||||||
|
seen.remove(rp)
|
||||||
|
return output_lines
|
||||||
|
|
||||||
|
def parse_args():
|
||||||
|
p = argparse.ArgumentParser(
|
||||||
|
description="Expand all {% include '...' %} directives in a Jinja2 template (no variable rendering)."
|
||||||
|
)
|
||||||
|
p.add_argument("template", help="Template path relative to project root")
|
||||||
|
p.add_argument(
|
||||||
|
"--out",
|
||||||
|
help="If given, write output to this file instead of stdout",
|
||||||
|
default=None
|
||||||
|
)
|
||||||
|
return p.parse_args()
|
||||||
|
|
||||||
|
def main():
|
||||||
|
args = parse_args()
|
||||||
|
|
||||||
|
try:
|
||||||
|
lines = expand_includes(args.template)
|
||||||
|
text = "\n".join(lines)
|
||||||
|
if args.out:
|
||||||
|
with open(args.out, "w", encoding="utf-8") as f:
|
||||||
|
f.write(text + "\n")
|
||||||
|
else:
|
||||||
|
print(text)
|
||||||
|
except Exception as e:
|
||||||
|
sys.stderr.write(f"Error: {e}\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
0
cli/validate/__init__.py
Normal file
0
cli/validate/__init__.py
Normal file
154
cli/validate/inventory.py
Normal file
154
cli/validate/inventory.py
Normal file
@@ -0,0 +1,154 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
import argparse
|
||||||
|
import sys
|
||||||
|
import yaml
|
||||||
|
import re
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Ensure imports work when run directly
|
||||||
|
script_dir = Path(__file__).resolve().parent
|
||||||
|
repo_root = script_dir.parent.parent
|
||||||
|
sys.path.insert(0, str(repo_root))
|
||||||
|
|
||||||
|
from cli.meta.applications.all import find_application_ids
|
||||||
|
|
||||||
|
def load_yaml_file(path):
|
||||||
|
try:
|
||||||
|
with open(path, 'r', encoding='utf-8') as f:
|
||||||
|
content = f.read()
|
||||||
|
content = re.sub(r'(?m)^([ \t]*[^\s:]+):\s*!vault[\s\S]+?(?=^\S|\Z)', r"\1: \"<vaulted>\"\n", content)
|
||||||
|
return yaml.safe_load(content)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Warning: Could not parse {path}: {e}", file=sys.stderr)
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def recursive_keys(d, prefix=''):
|
||||||
|
keys = set()
|
||||||
|
if isinstance(d, dict):
|
||||||
|
for k, v in d.items():
|
||||||
|
full = f"{prefix}.{k}" if prefix else k
|
||||||
|
keys.add(full)
|
||||||
|
keys.update(recursive_keys(v, full))
|
||||||
|
return keys
|
||||||
|
|
||||||
|
|
||||||
|
def compare_application_keys(applications, defaults, source):
|
||||||
|
errs = []
|
||||||
|
for app_id, conf in applications.items():
|
||||||
|
if app_id not in defaults:
|
||||||
|
errs.append(f"{source}: Unknown application '{app_id}' (not in defaults_applications)")
|
||||||
|
continue
|
||||||
|
default = defaults[app_id]
|
||||||
|
app_keys = recursive_keys(conf)
|
||||||
|
def_keys = recursive_keys(default)
|
||||||
|
for key in app_keys:
|
||||||
|
if key.startswith('credentials'):
|
||||||
|
continue
|
||||||
|
if key not in def_keys:
|
||||||
|
errs.append(f"{source}: Missing default for {app_id}: {key}")
|
||||||
|
return errs
|
||||||
|
|
||||||
|
|
||||||
|
def compare_user_keys(users, default_users, source):
|
||||||
|
errs = []
|
||||||
|
for user, conf in users.items():
|
||||||
|
if user not in default_users:
|
||||||
|
print(f"Warning: {source}: Unknown user '{user}' (not in default_users)", file=sys.stderr)
|
||||||
|
continue
|
||||||
|
def_conf = default_users[user]
|
||||||
|
for key in conf:
|
||||||
|
if key in ('password','credentials','mailu_token'):
|
||||||
|
continue
|
||||||
|
if key not in def_conf:
|
||||||
|
errs.append(f"Missing default for user '{user}': key '{key}'")
|
||||||
|
return errs
|
||||||
|
|
||||||
|
|
||||||
|
def load_inventory_files(inv_dir):
|
||||||
|
all_data = {}
|
||||||
|
p = Path(inv_dir)
|
||||||
|
for f in p.glob('*.yml'):
|
||||||
|
data = load_yaml_file(f)
|
||||||
|
if isinstance(data, dict):
|
||||||
|
apps = data.get('applications') or data.get('defaults_applications')
|
||||||
|
if apps:
|
||||||
|
all_data[str(f)] = apps
|
||||||
|
for d in p.glob('*_vars'):
|
||||||
|
if d.is_dir():
|
||||||
|
for f in d.rglob('*.yml'):
|
||||||
|
data = load_yaml_file(f)
|
||||||
|
if isinstance(data, dict):
|
||||||
|
apps = data.get('applications') or data.get('defaults_applications')
|
||||||
|
if apps:
|
||||||
|
all_data[str(f)] = apps
|
||||||
|
return all_data
|
||||||
|
|
||||||
|
|
||||||
|
def validate_host_keys(app_ids, inv_dir):
|
||||||
|
errs = []
|
||||||
|
p = Path(inv_dir)
|
||||||
|
# Scan all top-level YAMLs for 'all.children'
|
||||||
|
for f in p.glob('*.yml'):
|
||||||
|
data = load_yaml_file(f)
|
||||||
|
if not isinstance(data, dict):
|
||||||
|
continue
|
||||||
|
all_node = data.get('all', {})
|
||||||
|
children = all_node.get('children')
|
||||||
|
if not isinstance(children, dict):
|
||||||
|
continue
|
||||||
|
for grp in children.keys():
|
||||||
|
if grp not in app_ids:
|
||||||
|
errs.append(f"{f}: Invalid group '{grp}' (not in application_ids)")
|
||||||
|
return errs
|
||||||
|
|
||||||
|
|
||||||
|
def find_single_file(pattern):
|
||||||
|
c = list(Path('group_vars/all').glob(pattern))
|
||||||
|
if len(c)!=1:
|
||||||
|
raise RuntimeError(f"Expected exactly one {pattern} in group_vars/all, found {len(c)}")
|
||||||
|
return c[0]
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
p = argparse.ArgumentParser()
|
||||||
|
p.add_argument('inventory_dir')
|
||||||
|
args = p.parse_args()
|
||||||
|
# defaults
|
||||||
|
dfile = find_single_file('*_applications.yml')
|
||||||
|
ufile = find_single_file('*users.yml')
|
||||||
|
ddata = load_yaml_file(dfile) or {}
|
||||||
|
udata = load_yaml_file(ufile) or {}
|
||||||
|
defaults = ddata.get('defaults_applications',{})
|
||||||
|
default_users = udata.get('default_users',{})
|
||||||
|
if not defaults:
|
||||||
|
print(f"Error: No 'defaults_applications' found in {dfile}", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
if not default_users:
|
||||||
|
print(f"Error: No 'default_users' found in {ufile}", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
app_errs = []
|
||||||
|
inv_files = load_inventory_files(args.inventory_dir)
|
||||||
|
for src, apps in inv_files.items():
|
||||||
|
app_errs.extend(compare_application_keys(apps, defaults, src))
|
||||||
|
user_errs = []
|
||||||
|
for fpath in Path(args.inventory_dir).rglob('*.yml'):
|
||||||
|
data = load_yaml_file(fpath)
|
||||||
|
if isinstance(data, dict) and 'users' in data:
|
||||||
|
errs = compare_user_keys(data['users'], default_users, str(fpath))
|
||||||
|
for e in errs:
|
||||||
|
print(e, file=sys.stderr)
|
||||||
|
user_errs.extend(errs)
|
||||||
|
host_errs = validate_host_keys(find_application_ids(), args.inventory_dir)
|
||||||
|
app_errs.extend(host_errs)
|
||||||
|
if app_errs or user_errs:
|
||||||
|
if app_errs:
|
||||||
|
print('Validation failed with the following issues:')
|
||||||
|
for e in app_errs:
|
||||||
|
print(f"- {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
print('Inventory directory is valid against defaults and hosts.')
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
if __name__=='__main__':
|
||||||
|
main()
|
20
cli/vault.py
Normal file
20
cli/vault.py
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
def run_ansible_vault(action, filename, password_file):
|
||||||
|
cmd = ["ansible-vault", action, filename, "--vault-password-file", password_file]
|
||||||
|
subprocess.run(cmd, check=True)
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(description="Manage Ansible Vault")
|
||||||
|
parser.add_argument("action", choices=["edit", "decrypt", "encrypt"], help="Vault action")
|
||||||
|
parser.add_argument("filename", help="File to process")
|
||||||
|
parser.add_argument("--password-file", required=True, help="Path to the Vault password file")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
run_ansible_vault(args.action, args.filename, args.password_file)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
58
docs/ARCHITECTURE.md
Normal file
58
docs/ARCHITECTURE.md
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
# Infinito.Nexus Architecture
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
[Infinito.Nexus](https://infinito.nexus) is a modular, open-source IT infrastructure automation platform designed to simplify the deployment, management, and security of self-hosted environments.
|
||||||
|
|
||||||
|
It provides a flexible, scalable, and secure architecture based on modern [DevOps](https://en.wikipedia.org/wiki/DevOps) principles, leveraging technologies like [Ansible](https://en.wikipedia.org/wiki/Ansible_(software)), [Docker](https://en.wikipedia.org/wiki/Docker_(software)), and [Infrastructure as Code (IaC)](https://en.wikipedia.org/wiki/Infrastructure_as_code).
|
||||||
|
|
||||||
|
An additional optional security layer allows full server encryption during installation using [LUKS](https://en.wikipedia.org/wiki/Linux_Unified_Key_Setup) based on this solution:
|
||||||
|
https://github.com/kevinveenbirkenbach/hetzner-arch-luks
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Key Points
|
||||||
|
|
||||||
|
- Modular role-based architecture
|
||||||
|
- Infrastructure-as-Code (IaC)
|
||||||
|
- Docker-based containerization
|
||||||
|
- Centralized Identity & Access Management (IAM)
|
||||||
|
- Security by Design
|
||||||
|
- Integration instead of forced migration
|
||||||
|
- Optional [full disk encryption](https://github.com/kevinveenbirkenbach/hetzner-arch-luks) layer for servers
|
||||||
|
|
||||||
|
## Architecture Layers
|
||||||
|
|
||||||
|
### 1. Automation Layer
|
||||||
|
- Ansible Playbooks & Roles
|
||||||
|
- Git-managed configuration repository
|
||||||
|
- Inventory-driven infrastructure definition
|
||||||
|
|
||||||
|
### 2. Container Orchestration Layer
|
||||||
|
- Docker Compose service deployment
|
||||||
|
- Per-role service templates
|
||||||
|
- Automated health checks & updates
|
||||||
|
|
||||||
|
### 3. Security & Identity Layer
|
||||||
|
- Centralized user management via LDAP
|
||||||
|
- Single Sign-On (SSO) with Keycloak
|
||||||
|
- Secrets management via Ansible Vault
|
||||||
|
|
||||||
|
### 4. Networking Layer
|
||||||
|
- Secure VPN via WireGuard & OpenVPN
|
||||||
|
- Nginx Reverse Proxy with automated TLS via Let's Encrypt
|
||||||
|
- Encrypted server setup using [hetzner-arch-luks](https://github.com/kevinveenbirkenbach/hetzner-arch-luks)
|
||||||
|
|
||||||
|
### 5. Application Layer
|
||||||
|
- Modular application roles (Nextcloud, Gitea, Matrix, etc.)
|
||||||
|
- Dynamic domain configuration
|
||||||
|
- Integration of external/legacy services into the platform
|
||||||
|
|
||||||
|
### 6. Monitoring & Maintenance Layer
|
||||||
|
- System health monitoring (BTRFS, Docker, Nginx)
|
||||||
|
- Automated backup roles (local/remote)
|
||||||
|
- Maintenance automation (cleanup, update, restart tasks)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
> *Infinito.Nexus — Modular. Secure. Automated. Decentralized.*
|
124
docs/Docker.md
Normal file
124
docs/Docker.md
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
# Docker Build Guide 🚢
|
||||||
|
|
||||||
|
This guide explains how to build the **Infinito.Nexus** Docker image with advanced options to avoid common issues (e.g. mirror timeouts) and control build caching.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1. Enable BuildKit (Optional but Recommended)
|
||||||
|
|
||||||
|
Modern versions of Docker support **BuildKit**, which speeds up build processes and offers better caching.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# On your host, enable BuildKit for the current shell session:
|
||||||
|
export DOCKER_BUILDKIT=1
|
||||||
|
```
|
||||||
|
|
||||||
|
> **Note:** You only need to set this once per terminal session.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2. Build Arguments Explained
|
||||||
|
|
||||||
|
When you encounter errors like:
|
||||||
|
|
||||||
|
```text
|
||||||
|
:: Synchronizing package databases...
|
||||||
|
error: failed retrieving file 'core.db' from geo.mirror.pkgbuild.com : Connection timed out after 10002 milliseconds
|
||||||
|
error: failed to synchronize all databases (failed to retrieve some files)
|
||||||
|
```
|
||||||
|
|
||||||
|
it usually means the default container network cannot reach certain Arch Linux mirrors. To work around this, use:
|
||||||
|
|
||||||
|
* `--network=host`
|
||||||
|
Routes all build-time network traffic through your host’s network stack.
|
||||||
|
|
||||||
|
* `--no-cache`
|
||||||
|
Forces a fresh build of every layer by ignoring Docker’s layer cache. Useful if you suspect stale cache entries.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3. Recommended Build Command
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. (Optional) Enable BuildKit
|
||||||
|
export DOCKER_BUILDKIT=1
|
||||||
|
|
||||||
|
# 2. Build with host networking and no cache
|
||||||
|
docker build \
|
||||||
|
--network=host \
|
||||||
|
--no-cache \
|
||||||
|
-t infinito:latest \
|
||||||
|
.
|
||||||
|
```
|
||||||
|
|
||||||
|
**Flags:**
|
||||||
|
|
||||||
|
* `--network=host`
|
||||||
|
Ensures all `pacman -Syu` and other network calls hit your host network directly—eliminating mirror connection timeouts.
|
||||||
|
|
||||||
|
* `--no-cache`
|
||||||
|
Guarantees that changes to package lists or dependencies are picked up immediately by rebuilding every layer.
|
||||||
|
|
||||||
|
* `-t infinito:latest`
|
||||||
|
Tags the resulting image as `infinito:latest`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4. Running the Container
|
||||||
|
|
||||||
|
Once built, you can run Infinito.Nexus as usual:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run --rm -it \
|
||||||
|
-v "$(pwd)":/opt/infinito \
|
||||||
|
-w /opt/infinito \
|
||||||
|
infinito:latest --help
|
||||||
|
```
|
||||||
|
|
||||||
|
Mount any host directory into `/opt/infinito/logs` to persist logs across runs.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5. Further Troubleshooting
|
||||||
|
|
||||||
|
* **Mirror selection:** If you still see slow or unreachable mirrors, consider customizing `/etc/pacman.d/mirrorlist` in a local Docker stage or on your host to prioritize faster mirrors.
|
||||||
|
|
||||||
|
* **Firewall or VPN:** Ensure your host’s firewall or VPN allows outgoing connections on port 443/80 to Arch mirror servers.
|
||||||
|
|
||||||
|
* **Docker daemon config:** On some networks, you may need to configure Docker’s daemon proxy settings under `/etc/docker/daemon.json`.
|
||||||
|
|
||||||
|
## 6. Live Development via Volume Mount
|
||||||
|
|
||||||
|
The Infinito.Nexus installation inside the container always resides at:
|
||||||
|
|
||||||
|
```
|
||||||
|
/root/Repositories/github.com/kevinveenbirkenbach/infinito
|
||||||
|
```
|
||||||
|
|
||||||
|
To apply code changes without rebuilding the image, mount your local installation directory into that static path:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Determine the Infinito.Nexus install path on your host
|
||||||
|
INFINITO_PATH=$(pkgmgr path infinito)
|
||||||
|
|
||||||
|
# 2. Launch the container with a bind mount:
|
||||||
|
docker run --rm -it \
|
||||||
|
-v "${INFINITO_PATH}:/root/Repositories/github.com/kevinveenbirkenbach/infinito" \
|
||||||
|
-w "/root/Repositories/github.com/kevinveenbirkenbach/infinito" \
|
||||||
|
infinito:latest make build
|
||||||
|
```
|
||||||
|
|
||||||
|
Or, to test the CLI help interactively:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run --rm -it \
|
||||||
|
-v "${INFINITO_PATH}:/root/Repositories/github.com/kevinveenbirkenbach/infinito" \
|
||||||
|
-w "/root/Repositories/github.com/kevinveenbirkenbach/infinito" \
|
||||||
|
infinito:latest --help
|
||||||
|
```
|
||||||
|
|
||||||
|
Any edits you make in `${INFINITO_PATH}` on your host are immediately reflected inside the container, eliminating the need for repeated `docker build` cycles.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
With these options, your Docker builds should complete reliably, even in restrictive network environments. Happy building! 🚀
|
2
docs/TODO.md
Normal file
2
docs/TODO.md
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
# TODO
|
||||||
|
- Move this files to https://hub.cymais.cloud
|
38
docs/guides/administrator/Configuration.md
Normal file
38
docs/guides/administrator/Configuration.md
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
# Configuration
|
||||||
|
|
||||||
|
## Ansible Vault Basics
|
||||||
|
|
||||||
|
Infinito.Nexus uses Ansible Vault to protect sensitive data (e.g. passwords). Use these common commands:
|
||||||
|
|
||||||
|
### Edit an Encrypted File
|
||||||
|
```bash
|
||||||
|
ansible-vault edit <filename.yml> --vault-password-file <your-vault-pass-file>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Decrypt a File
|
||||||
|
```bash
|
||||||
|
ansible-vault decrypt <filename.yml> --vault-password-file <your-vault-pass-file>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Encrypt a File
|
||||||
|
```bash
|
||||||
|
ansible-vault encrypt <filename.yml> --vault-password-file <your-vault-pass-file>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Encrypt a String
|
||||||
|
```bash
|
||||||
|
ansible-vault encrypt_string --vault-password-file <your-vault-pass-file> 'example' --name 'test'
|
||||||
|
```
|
||||||
|
|
||||||
|
## Password Generation
|
||||||
|
|
||||||
|
You can generate a secure random password and encrypt it with Ansible Vault. For example:
|
||||||
|
```bash
|
||||||
|
ansible-vault encrypt_string "$(cat /dev/urandom | tr -dc 'A-Za-z0-9' | head -c 32)" --vault-password-file /path/to/your/vault_pass.txt | xclip -selection clipboard
|
||||||
|
```
|
||||||
|
This command generates a 32-character alphanumeric password, encrypts it, and copies the result to your clipboard.
|
||||||
|
|
||||||
|
## Final Notes
|
||||||
|
|
||||||
|
- **Customizing Paths and Variables:**
|
||||||
|
All file paths and configuration variables are defined in group variables (e.g., `group_vars/all/*.yml`) and role variable files. Adjust these to suit your deployment environment.
|
100
docs/guides/administrator/Deploy.md
Normal file
100
docs/guides/administrator/Deploy.md
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
# 🚀 Deployment Guide
|
||||||
|
|
||||||
|
This section explains how to deploy and manage **[Infinito.Nexus](https://infinito.nexus)** using Ansible. Infinito.Nexus uses a collection of Ansible tasks, which are controlled via different **"modes"** — such as **updates**, **backups**, **resets**, and **cleanup** operations.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## ✅ Prerequisites
|
||||||
|
|
||||||
|
Before deploying, ensure the following are in place:
|
||||||
|
|
||||||
|
- **🧭 Inventory File:** A valid Ansible inventory file that defines your target systems (servers, personal computers, etc.). Adjust example paths to your environment.
|
||||||
|
- **📦 Infinito.Nexus Installed:** Install via [Kevin's Package-Manager](https://github.com/kevinveenbirkenbach/package-manager).
|
||||||
|
- **🔐 (Optional) Vault Password File:** If you don't want to enter your vault password interactively, create a password file.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📘 Show Infinito.Nexus Help
|
||||||
|
|
||||||
|
To get a full overview of available options and usage instructions, run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
infinito --help
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 💡 Example Deploy Command
|
||||||
|
|
||||||
|
To deploy Infinito.Nexus on a personal computer (e.g., a laptop), you can run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
infinito playbook \
|
||||||
|
--limit hp-spectre-x360 \
|
||||||
|
--host-type personal-computer \
|
||||||
|
--update \
|
||||||
|
--password-file ~/Repositories/git.veen.world/kevinveenbirkenbach/computer-inventory/.pass/general.txt \
|
||||||
|
~/Repositories/git.veen.world/kevinveenbirkenbach/computer-inventory/pcs.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
### 🧠 What does this command do?
|
||||||
|
|
||||||
|
| Parameter | Description |
|
||||||
|
|----------|-------------|
|
||||||
|
| `playbook` | Executes the playbook subcommand of Infinito.Nexus. |
|
||||||
|
| `--limit hp-spectre-x360` | Limits execution to a specific host (`hp-spectre-x360`). |
|
||||||
|
| `--host-type personal-computer` | Defines the host type. Default is `server`; here it is set to `personal-computer`. |
|
||||||
|
| `--update` | Enables update mode to apply software or configuration updates. |
|
||||||
|
| `--password-file` | Specifies the vault password file path for decrypting sensitive values. |
|
||||||
|
| `pcs.yml` | The path to the inventory file containing host definitions. |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🔐 Using a Vault Password File
|
||||||
|
|
||||||
|
To avoid typing your vault password interactively, you can provide a file:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
--password-file /path/to/your/vault_pass.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
> ⚠️ **Security Tip:** Ensure the password file is properly protected (e.g., `chmod 600 vault_pass.txt`).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🔍 Full Command-Line Reference
|
||||||
|
|
||||||
|
Here’s a breakdown of all available parameters from `infinito playbook --help`:
|
||||||
|
|
||||||
|
| Argument | Description |
|
||||||
|
|----------|-------------|
|
||||||
|
| `inventory` *(positional)* | Path to the Ansible inventory file. |
|
||||||
|
| `--limit <HOST>` | Run the playbook only on the specified host. |
|
||||||
|
| `--host-type {server, personal-computer}` | Define the target system type (default is `server`). |
|
||||||
|
| `--reset` | Enables reset mode (restores or resets specific configurations). |
|
||||||
|
| `--test` | Enables test mode (dry-run style). No actual changes are applied. |
|
||||||
|
| `--update` | Enables update mode to upgrade packages or configs. |
|
||||||
|
| `--backup` | Triggers backup routines for data or configurations. |
|
||||||
|
| `--cleanup` | Cleans up temporary files, old data, etc. |
|
||||||
|
| `--debug` | Enables debug logging in the playbook. |
|
||||||
|
| `--password-file <PATH>` | Uses a vault password file instead of interactive prompt. |
|
||||||
|
| `-v, -vv, -vvv` | Increases output verbosity. More `v`s = more detail. |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🔧 Combine Multiple Modes
|
||||||
|
|
||||||
|
You can mix and match modes like this:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
infinito playbook --update --backup --cleanup pcs.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
This will update the system, create a backup, and clean up unnecessary files in one run.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📝 Footnote
|
||||||
|
|
||||||
|
> 📄 *This documentation page was generated with the help of AI.*
|
||||||
|
> 🤖 [View the original conversation (ChatGPT)](https://chatgpt.com/share/67ecfe25-3fb8-800f-923d-8cd3fc4efd2f)
|
22
docs/guides/administrator/README.md
Normal file
22
docs/guides/administrator/README.md
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
# Administrator Guide
|
||||||
|
|
||||||
|
This guide is for **system administrators** who are deploying and managing Infinito.Nexus infrastructure.
|
||||||
|
|
||||||
|
## Setting Up Infinito.Nexus 🏗️
|
||||||
|
Follow these guides to install and configure Infinito.Nexus:
|
||||||
|
- [Setup Guide](SETUP_GUIDE.md)
|
||||||
|
- [Configuration Guide](CONFIGURATION.md)
|
||||||
|
- [Deployment Guide](DEPLOY.md)
|
||||||
|
|
||||||
|
## Key Responsibilities 🔧
|
||||||
|
- **User Management** - Configure LDAP, Keycloak, and user permissions.
|
||||||
|
- **Security & Backups** - Set up `sys-bkp-rmt-2-loc`, `svc-bkp-loc-2-usb`, and `core-security` roles.
|
||||||
|
- **Application Hosting** - Deploy services like `Nextcloud`, `Matrix`, `Gitea`, and more.
|
||||||
|
- **Networking & VPN** - Configure `WireGuard`, `OpenVPN`, and `Nginx Reverse Proxy`.
|
||||||
|
|
||||||
|
## Managing & Updating Infinito.Nexus 🔄
|
||||||
|
- Regularly update services using `update-pacman`, or `update-apt`.
|
||||||
|
- Monitor system health with `sys-ctl-hlth-btrfs`, `sys-ctl-hlth-webserver`, and `sys-ctl-hlth-docker-container`.
|
||||||
|
- Automate system maintenance with `sys-lock`, `sys-ctl-cln-bkps`, and `sys-ctl-rpr-docker-hard`.
|
||||||
|
|
||||||
|
For more details, refer to the specific guides above.
|
29
docs/guides/administrator/Security_Guidelines.md
Normal file
29
docs/guides/administrator/Security_Guidelines.md
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
# Security Guidelines
|
||||||
|
|
||||||
|
Infinito.Nexus is designed with security in mind. However, while following our guidelines can greatly improve your system’s security, no IT system can be 100% secure. Please report any vulnerabilities as soon as possible.
|
||||||
|
|
||||||
|
Additional to the user securitry guidelines administrators have additional responsibilities to secure the entire system:
|
||||||
|
|
||||||
|
- **Deploy on an Encrypted Server**
|
||||||
|
It is recommended to install Infinito.Nexus on an encrypted server to prevent hosting providers from accessing end-user data. For a practical guide on setting up an encrypted server, refer to the [Hetzner Arch LUKS repository](https://github.com/kevinveenbirkenbach/hetzner-arch-luks) 🔐. (Learn more about [disk encryption](https://en.wikipedia.org/wiki/Disk_encryption) on Wikipedia.)
|
||||||
|
|
||||||
|
- **Centralized User Management & SSO**
|
||||||
|
For robust authentication and central user management, set up Infinito.Nexus using Keycloak and LDAP.
|
||||||
|
This configuration enables centralized [Single Sign-On (SSO)](https://en.wikipedia.org/wiki/Single_sign-on) (SSO), simplifying user management and boosting security.
|
||||||
|
|
||||||
|
- **Enforce 2FA and Use a Password Manager**
|
||||||
|
Administrators should also enforce [2FA](https://en.wikipedia.org/wiki/Multi-factor_authentication) and use a password manager with auto-generated passwords. We again recommend [KeePass](https://keepass.info/). The KeePass database can be stored securely in your Nextcloud instance and synchronized between devices.
|
||||||
|
|
||||||
|
- **Avoid Root Logins & Plaintext Passwords**
|
||||||
|
Infinito.Nexus forbids logging in via the root user or using simple passwords. Instead, an SSH key must be generated and transferred during system initialization. When executing commands as root, always use `sudo` (or, if necessary, `sudo su`—but only if you understand the risks). (More information on [SSH](https://en.wikipedia.org/wiki/Secure_Shell) and [sudo](https://en.wikipedia.org/wiki/Sudo) is available on Wikipedia.)
|
||||||
|
|
||||||
|
- **Manage Inventories Securely**
|
||||||
|
Your inventories for running Infinito.Nexus should be managed in a separate repository and secured with tools such as [Ansible Vault](https://en.wikipedia.org/wiki/Encryption) 🔒. Sensitive credentials must never be stored in plaintext; use a password file to secure these details.
|
||||||
|
|
||||||
|
- **Reporting Vulnerabilities**
|
||||||
|
If you discover a security vulnerability in Infinito.Nexus, please report it immediately. We encourage proactive vulnerability reporting so that issues can be addressed as quickly as possible. Contact our security team at [security@infinito.nexus](mailto:security@infinito.nexus)
|
||||||
|
**DO NOT OPEN AN ISSUE.**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
By following these guidelines, both end users and administrators can achieve a high degree of security. Stay vigilant, keep your systems updated, and report any suspicious activity. Remember: while we strive for maximum security, no system is completely infallible.
|
26
docs/guides/administrator/Setup_Guide.md
Normal file
26
docs/guides/administrator/Setup_Guide.md
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
# Setup Guide
|
||||||
|
|
||||||
|
To setup Infinito.Nexus follow this steps:
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
Before you setup Infinito.Nexus you need to install [Kevin's Package Manager](https://github.com/kevinveenbirkenbach/package-manager).
|
||||||
|
Follow the installation instruction descriped [here](https://github.com/kevinveenbirkenbach/package-manager)
|
||||||
|
|
||||||
|
## Setup Infinito.Nexus
|
||||||
|
|
||||||
|
To setup Infinito.Nexus execute:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pkgmgr install infinito
|
||||||
|
```
|
||||||
|
|
||||||
|
This command will setup Infinito.Nexus on your system with the alias **infinito**.
|
||||||
|
|
||||||
|
## Get Help
|
||||||
|
|
||||||
|
After you setuped Infinito.Nexus you can receive more help by executing:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
infinito --help
|
||||||
|
```
|
111
docs/guides/developer/Ansible_Directory_Guide.md
Normal file
111
docs/guides/developer/Ansible_Directory_Guide.md
Normal file
@@ -0,0 +1,111 @@
|
|||||||
|
## 📖 Infinito.Nexus Ansible & Python Directory Guide
|
||||||
|
|
||||||
|
This document provides a **decision matrix** for when to use each default Ansible plugin and module directory in the context of **Infinito.Nexus development** with Ansible and Python. It links to official docs, explains use-cases, and points back to our conversation.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 🔗 Links & References
|
||||||
|
|
||||||
|
* Official Ansible Plugin Guide: [https://docs.ansible.com/ansible/latest/dev\_guide/developing\_plugins.html](https://docs.ansible.com/ansible/latest/dev_guide/developing_plugins.html)
|
||||||
|
* Official Ansible Module Guide: [https://docs.ansible.com/ansible/latest/dev\_guide/developing\_modules.html](https://docs.ansible.com/ansible/latest/dev_guide/developing_modules.html)
|
||||||
|
* This conversation: [Link to this conversation](https://chat.openai.com/)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 🛠️ Repo Layout & Default Directories
|
||||||
|
|
||||||
|
```plaintext
|
||||||
|
ansible-repo/
|
||||||
|
├── library/ # 📦 Custom Ansible modules
|
||||||
|
├── filter_plugins/ # 🔍 Custom Jinja2 filters
|
||||||
|
├── lookup_plugins/ # 👉 Custom lookup plugins
|
||||||
|
├── module_utils/ # 🛠️ Shared Python helpers for modules
|
||||||
|
├── action_plugins/ # ⚙️ Task-level orchestration logic
|
||||||
|
├── callback_plugins/ # 📣 Event callbacks (logging, notifications)
|
||||||
|
├── inventory_plugins/ # 🌐 Dynamic inventory sources
|
||||||
|
├── strategy_plugins/ # 🧠 Task execution strategies
|
||||||
|
└── ... # Other plugin dirs (connection, cache, etc.)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 🎯 Decision Matrix: Which Folder for What?
|
||||||
|
|
||||||
|
| Folder | Type | Use-Case | Example (Infinito.Nexus) | Emoji |
|
||||||
|
| -------------------- | -------------------- | ---------------------------------------- | ----------------------------------------------------- | ----- |
|
||||||
|
| `library/` | **Module** | Write idempotent actions | `cloud_network.py`: manage VPCs, subnets | 📦 |
|
||||||
|
| `filter_plugins/` | **Filter plugin** | Jinja2 data transforms in templates/vars | `to_camel_case.py`: convert keys for API calls | 🔍 |
|
||||||
|
| `lookup_plugins/` | **Lookup plugin** | Fetch external/secure data at runtime | `vault_lookup.py`: pull secrets from Infinito.Nexus Vault | 👉 |
|
||||||
|
| `module_utils/` | **Utility library** | Shared Python code for modules | `infinito_client.py`: common API client base class | 🛠️ |
|
||||||
|
| `action_plugins/` | **Action plugin** | Complex task orchestration wrappers | `deploy_stack.py`: sequence Terraform + Ansible steps | ⚙️ |
|
||||||
|
| `callback_plugins/` | **Callback plugin** | Customize log/report behavior | `notify_slack.py`: send playbook status to Slack | 📣 |
|
||||||
|
| `inventory_plugins/` | **Inventory plugin** | Dynamic host/group sources | `azure_inventory.py`: list hosts from Azure tags | 🌐 |
|
||||||
|
| `strategy_plugins/` | **Strategy plugin** | Control task execution order/parallelism | `rolling_batch.py`: phased rollout of VMs | 🧠 |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 📝 Detailed Guidance
|
||||||
|
|
||||||
|
1. **library/** 📦
|
||||||
|
|
||||||
|
* **When?** Implement **one-off, idempotent actions** (create/delete cloud resources).
|
||||||
|
* **Why?** Modules under `library/` are first in search path for `ansible` modules.
|
||||||
|
* **Docs:** [https://docs.ansible.com/ansible/latest/dev\_guide/developing\_modules.html](https://docs.ansible.com/ansible/latest/dev_guide/developing_modules.html)
|
||||||
|
|
||||||
|
2. **filter\_plugins/** 🔍
|
||||||
|
|
||||||
|
* **When?** You need **data manipulation** (lists, strings, dicts) inside Jinja2.
|
||||||
|
* **Why?** Extends `|` filters in templates and variable declarations.
|
||||||
|
* **Docs:** [https://docs.ansible.com/ansible/latest/dev\_guide/developing\_plugins.html#filter-plugins](https://docs.ansible.com/ansible/latest/dev_guide/developing_plugins.html#filter-plugins)
|
||||||
|
|
||||||
|
3. **lookup\_plugins/** 👉
|
||||||
|
|
||||||
|
* **When?** You must **retrieve secret/external data** during playbook compile/runtime.
|
||||||
|
* **Why?** Lookup plugins run before tasks, enabling dynamic variable resolution.
|
||||||
|
* **Docs:** [https://docs.ansible.com/ansible/latest/dev\_guide/developing\_plugins.html#lookup-plugins](https://docs.ansible.com/ansible/latest/dev_guide/developing_plugins.html#lookup-plugins)
|
||||||
|
|
||||||
|
4. **module\_utils/** 🛠️
|
||||||
|
|
||||||
|
* **When?** Multiple modules share **common Python code** (HTTP clients, validation).
|
||||||
|
* **Why?** Avoid code duplication; modules import these utilities.
|
||||||
|
* **Docs:** [https://docs.ansible.com/ansible/latest/dev\_guide/developing\_modules.html#module-utils](https://docs.ansible.com/ansible/latest/dev_guide/developing_modules.html#module-utils)
|
||||||
|
|
||||||
|
5. **action\_plugins/** ⚙️
|
||||||
|
|
||||||
|
* **When?** You need to **wrap or extend** module behavior at task invocation time.
|
||||||
|
* **Why?** Provides hooks before/after module execution.
|
||||||
|
* **Docs:** [https://docs.ansible.com/ansible/latest/dev\_guide/developing\_plugins.html#action-plugins](https://docs.ansible.com/ansible/latest/dev_guide/developing_plugins.html#action-plugins)
|
||||||
|
|
||||||
|
6. **callback\_plugins/** 📣
|
||||||
|
|
||||||
|
* **When?** You want **custom event handlers** (logging, progress, notifications).
|
||||||
|
* **Why?** Receive play/task events for custom output.
|
||||||
|
* **Docs:** [https://docs.ansible.com/ansible/latest/dev\_guide/developing\_plugins.html#callback-plugins](https://docs.ansible.com/ansible/latest/dev_guide/developing_plugins.html#callback-plugins)
|
||||||
|
|
||||||
|
7. **inventory\_plugins/** 🌐
|
||||||
|
|
||||||
|
* **When?** Hosts/groups come from **dynamic sources** (cloud APIs, databases).
|
||||||
|
* **Why?** Replace static `inventory.ini` with code-driven inventories.
|
||||||
|
* **Docs:** [https://docs.ansible.com/ansible/latest/dev\_guide/developing\_plugins.html#inventory-plugins](https://docs.ansible.com/ansible/latest/dev_guide/developing_plugins.html#inventory-plugins)
|
||||||
|
|
||||||
|
8. **strategy\_plugins/** 🧠
|
||||||
|
|
||||||
|
* **When?** You need to **customize execution strategy** (parallelism, ordering).
|
||||||
|
* **Why?** Override default `linear` strategy (e.g., `free`, custom batches).
|
||||||
|
* **Docs:** [https://docs.ansible.com/ansible/latest/dev\_guide/developing\_plugins.html#strategy-plugins](https://docs.ansible.com/ansible/latest/dev_guide/developing_plugins.html#strategy-plugins)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 🚀 Infinito.Nexus Best Practices
|
||||||
|
|
||||||
|
* **Organize modules** by service under `library/cloud/` (e.g., `vm`, `network`, `storage`).
|
||||||
|
* **Shared client code** in `module_utils/infinito/` for authentication, request handling.
|
||||||
|
* **Secrets lookup** via `lookup_plugins/vault_lookup.py` pointing to Infinito.Nexus Vault.
|
||||||
|
* **Filters** to normalize data formats from cloud APIs (e.g., `snake_to_camel`).
|
||||||
|
* **Callbacks** to stream playbook results into Infinito.Nexus Monitoring.
|
||||||
|
|
||||||
|
Use this matrix as your **single source of truth** when extending Ansible for Infinito.Nexus! 👍
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
This matrix was created with the help of ChatGPT 🤖—see our conversation [here](https://chatgpt.com/canvas/shared/682b1a62d6dc819184ecdc696c51290a).
|
53
docs/guides/developer/index.rst
Normal file
53
docs/guides/developer/index.rst
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
Developer Guide
|
||||||
|
===============
|
||||||
|
|
||||||
|
Welcome to the **Infinito.Nexus Developer Guide**! This guide provides essential information for developers who want to contribute to the Infinito.Nexus open-source project.
|
||||||
|
|
||||||
|
Explore Infinito.Nexus Solutions
|
||||||
|
------------------------
|
||||||
|
Infinito.Nexus offers various solutions for IT infrastructure automation. Learn more about the available applications:
|
||||||
|
|
||||||
|
- :doc:`../../../roles/application_glosar`
|
||||||
|
- :doc:`../../../roles/application_categories`
|
||||||
|
|
||||||
|
For Developers
|
||||||
|
--------------
|
||||||
|
|
||||||
|
Understanding Ansible Roles
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Infinito.Nexus is powered by **Ansible** roles to automate deployments. Developers can explore the technical details of our roles here:
|
||||||
|
|
||||||
|
- :doc:`../../../roles/ansible_role_glosar`
|
||||||
|
|
||||||
|
Contributing to Infinito.Nexus
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Want to contribute to the project or explore the source code? Check out our **GitHub repository**:
|
||||||
|
|
||||||
|
- `Infinito.Nexus GitHub Repository <https://s.infinito.nexus/code/tree/master/roles>`_
|
||||||
|
|
||||||
|
Contribution Guidelines
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
1. **Fork the Repository** – Start by forking the Infinito.Nexus repository.
|
||||||
|
2. **Create a New Branch** – Make changes in a dedicated branch.
|
||||||
|
3. **Follow Coding Standards** – Ensure your code is well-documented and follows best practices.
|
||||||
|
4. **Submit a Pull Request** – Once your changes are tested, submit a PR for review.
|
||||||
|
|
||||||
|
For detailed guidelines, refer to:
|
||||||
|
|
||||||
|
- :doc:`../../../CONTRIBUTING`
|
||||||
|
- :doc:`../../../CODE_OF_CONDUCT`
|
||||||
|
|
||||||
|
Community & Support
|
||||||
|
-------------------
|
||||||
|
If you have questions or need help, visit the **Infinito.Nexus Information Hub**:
|
||||||
|
|
||||||
|
- `hub.infinito.nexus <https://hub.infinito.nexus>`_
|
||||||
|
|
||||||
|
This is the best place to ask questions, get support, and collaborate with other contributors.
|
||||||
|
|
||||||
|
Stay connected, collaborate, and help improve Infinito.Nexus together!
|
||||||
|
|
||||||
|
Happy coding! 🚀
|
17
docs/guides/enterprise/README.md
Normal file
17
docs/guides/enterprise/README.md
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
# Enterprise Guide
|
||||||
|
|
||||||
|
Are you looking for a **reliable IT infrastructure** for your business or organization? **Infinito.Nexus** is here to help!
|
||||||
|
|
||||||
|
## Who Can Benefit? 🎯
|
||||||
|
✅ **Small & Medium Businesses** - IT infrastructure with everything included what you need. E.g. data clouds, mailservers, vpn's, homepages, documentation tools, etc.
|
||||||
|
✅ **Enterprises** - Scale the solutions for Small & Medium Businesses up for an unlimeted amount of users
|
||||||
|
✅ **NGOs & Organizations** - Secure, cost-effective infrastructure solutions on Open Source Base
|
||||||
|
✅ **Journalists & Content Creators** - Host your content on your own servers, share it via the Fediverse and avoid cencorship
|
||||||
|
|
||||||
|
## Why Choose Infinito.Nexus? 🚀
|
||||||
|
- **Fast Deployment** - Get your IT setup running in minutes
|
||||||
|
- **Security First** - Encrypted backups, 2FA, and secure logins
|
||||||
|
- **Scalable & Customizable** - Adapts to your specific needs
|
||||||
|
- **Cost-Effective** - Open-source, no licensing fees
|
||||||
|
|
||||||
|
For enterprise solutions, check [Enterprise Solutions](10_ENTERPRISE_SOLUTIONS.md) or contact [Kevin Veen-Birkenbach](mailto:kevin@veen.world).
|
66
docs/guides/user/README.md
Normal file
66
docs/guides/user/README.md
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
# User Guide
|
||||||
|
|
||||||
|
Welcome to **Infinito.Nexus**! This guide is designed for **end-users** who want to use cloud services, email, and collaboration tools securely and efficiently. Whether you're an **enterprise user** or an **individual**, Infinito.Nexus provides a wide range of services tailored to your needs.
|
||||||
|
|
||||||
|
## What Can Infinito.Nexus Do for You? 💡
|
||||||
|
Infinito.Nexus enables you to securely and efficiently use a variety of **cloud-based applications**, including:
|
||||||
|
|
||||||
|
### 📂 Cloud Storage & File Sharing
|
||||||
|
- **Nextcloud** – Securely store, sync, and share files across devices.
|
||||||
|
- **OnlyOffice** – Work on documents, spreadsheets, and presentations directly within Nextcloud.
|
||||||
|
- **LibreOffice** – A powerful office suite alternative to Microsoft Office.
|
||||||
|
|
||||||
|
### 💬 Secure Communication & Collaboration
|
||||||
|
- **Matrix (Element)** – Encrypted messaging for teams and individuals.
|
||||||
|
- **XMPP** – Secure instant messaging with various supported clients.
|
||||||
|
- **Mailu** – A private, self-hosted email solution.
|
||||||
|
- **Etherpad** – Real-time collaborative document editing.
|
||||||
|
- **BigBlueButton** – Web conferencing with screen sharing and presentations.
|
||||||
|
- **Jitsi** – Secure video conferencing without account requirements.
|
||||||
|
|
||||||
|
### 🎵 Social Media & Content Sharing
|
||||||
|
- **Mastodon** – Decentralized microblogging platform (alternative to Twitter/X).
|
||||||
|
- **Pixelfed** – Decentralized image sharing (alternative to Instagram).
|
||||||
|
- **Friendica** – Social network supporting federation with Mastodon and others.
|
||||||
|
- **Peertube** – Decentralized video streaming platform (alternative to YouTube).
|
||||||
|
- **Funkwhale** – Self-hosted music streaming for individuals and communities.
|
||||||
|
|
||||||
|
### 🎮 Entertainment & Media
|
||||||
|
- **Jellyfin** – Open-source media server for movies, TV, and music.
|
||||||
|
- **Kodi** – Media center application with extensive plugin support.
|
||||||
|
- **qBittorrent** – Open-source torrent client with secure remote access.
|
||||||
|
|
||||||
|
### 🔒 Privacy & Security
|
||||||
|
- **WireGuard** – Secure and fast VPN solution.
|
||||||
|
- **Tor Browser** – Browse the web anonymously and bypass censorship.
|
||||||
|
- **Bitwarden** – Open-source password manager for secure credential storage.
|
||||||
|
- **2FA Authentication** – Securely log in to your services with Two-Factor Authentication.
|
||||||
|
|
||||||
|
### 🔧 Developer & Productivity Tools
|
||||||
|
- **Gitea** – Self-hosted Git repository management (alternative to GitHub/GitLab).
|
||||||
|
- **Jenkins** – Automate software development pipelines.
|
||||||
|
- **Discourse** – Community discussion forums for support and engagement.
|
||||||
|
- **MediaWiki** – Create and manage knowledge bases and wikis.
|
||||||
|
|
||||||
|
## 🏢 Enterprise Users
|
||||||
|
### How to Get Started 🏁
|
||||||
|
If your organization provides Infinito.Nexus services, follow these steps:
|
||||||
|
- Your **administrator** will provide login credentials.
|
||||||
|
- Access **cloud services** via a web browser or mobile apps.
|
||||||
|
- For support, contact your **system administrator**.
|
||||||
|
|
||||||
|
## 🏠 Private Users
|
||||||
|
### How to Get Started 🏁
|
||||||
|
If you're an **individual user**, you can sign up for Infinito.Nexus services:
|
||||||
|
- **Register an account** at [infinito.nexus](https://infinito.nexus).
|
||||||
|
- Choose the applications and services you need.
|
||||||
|
- Follow the setup guide and start using Infinito.Nexus services immediately.
|
||||||
|
|
||||||
|
## 📚 Learn More
|
||||||
|
Discover more about Infinito.Nexus applications:
|
||||||
|
- :doc:`roles/application_glosar`
|
||||||
|
- :doc:`roles/application_categories`
|
||||||
|
|
||||||
|
For further information, visit our **[Information Hub](https://hub.infinito.nexus)** for tutorials, FAQs, and community support.
|
||||||
|
|
||||||
|
You can also register for updates and support from our community.
|
23
docs/guides/user/Security_Guidelines.md
Normal file
23
docs/guides/user/Security_Guidelines.md
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
# Security Guidelines
|
||||||
|
|
||||||
|
Infinito.Nexus is designed with security in mind. However, while following our guidelines can greatly improve your system’s security, no IT system can be 100% secure. Please report any vulnerabilities as soon as possible.
|
||||||
|
|
||||||
|
For optimal personal security, we **strongly recommend** the following:
|
||||||
|
|
||||||
|
- **Use a Password Manager**
|
||||||
|
Use a reliable password manager such as [KeePass](https://keepass.info/) 🔐. (Learn more about [password managers](https://en.wikipedia.org/wiki/Password_manager) on Wikipedia.) KeePass is available for both smartphones and PCs, and it can automatically generate strong, random passwords.
|
||||||
|
|
||||||
|
- **Enable Two-Factor Authentication (2FA)**
|
||||||
|
Always enable 2FA whenever possible. Many password managers (like KeePass) can generate [TOTP](https://en.wikipedia.org/wiki/Time-based_One-Time_Password) tokens, adding an extra layer of security even if your password is compromised.
|
||||||
|
Synchronize your password database across devices using the [Nextcloud Client](https://nextcloud.com/) 📱💻.
|
||||||
|
|
||||||
|
- **Use Encrypted Systems**
|
||||||
|
We recommend running Infinito.Nexus only on systems with full disk encryption. For example, Linux distributions such as [Manjaro](https://manjaro.org/) (based on ArchLinux) with desktop environments like [GNOME](https://en.wikipedia.org/wiki/GNOME) provide excellent security. (Learn more about [disk encryption](https://en.wikipedia.org/wiki/Disk_encryption) on Wikipedia.)
|
||||||
|
|
||||||
|
- **Beware of Phishing and Social Engineering**
|
||||||
|
Always verify email senders, avoid clicking on unknown links, and never share your passwords or 2FA codes with anyone. (Learn more about [Phishing](https://en.wikipedia.org/wiki/Phishing) and [Social Engineering](https://en.wikipedia.org/wiki/Social_engineering_(security)) on Wikipedia.)
|
||||||
|
|
||||||
|
Following these guidelines will significantly enhance your personal security—but remember, no system is completely immune to risk.
|
||||||
|
|
||||||
|
A tutorial how to setup secure password management you will find [here](https://blog.veen.world/blog/2025/04/04/%f0%9f%9b%a1%ef%b8%8f-keepassxc-infinito-cloud-the-ultimate-guide-to-cross-device-password-security/)
|
||||||
|
---
|
27
filter_plugins/README.md
Normal file
27
filter_plugins/README.md
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
# Custom Filter Plugins for Infinito.Nexus
|
||||||
|
|
||||||
|
This directory contains custom **Ansible filter plugins** used within the Infinito.Nexus project.
|
||||||
|
|
||||||
|
## When to Use a Filter Plugin
|
||||||
|
|
||||||
|
- **Transform values:** Use filters to transform, extract, reformat, or compute values from existing variables or facts.
|
||||||
|
- **Inline data manipulation:** Filters are designed for inline use in Jinja2 expressions (in templates, tasks, vars, etc.).
|
||||||
|
- **No external lookups:** Filters only operate on data you explicitly pass to them and cannot access external files, the Ansible inventory, or runtime context.
|
||||||
|
|
||||||
|
### Examples
|
||||||
|
|
||||||
|
```jinja2
|
||||||
|
{{ role_name | get_entity_name }}
|
||||||
|
{{ my_list | unique }}
|
||||||
|
{{ user_email | regex_replace('^(.+)@.*$', '\\1') }}
|
||||||
|
````
|
||||||
|
|
||||||
|
## When *not* to Use a Filter Plugin
|
||||||
|
|
||||||
|
* If you need to **load data from an external source** (e.g., file, environment, API), use a lookup plugin instead.
|
||||||
|
* If your logic requires **access to inventory, facts, or host-level information** that is not passed as a parameter.
|
||||||
|
|
||||||
|
## Further Reading
|
||||||
|
|
||||||
|
* [Ansible Filter Plugins Documentation](https://docs.ansible.com/ansible/latest/plugins/filter.html)
|
||||||
|
* [Developing Ansible Filter Plugins](https://docs.ansible.com/ansible/latest/dev_guide/developing_plugins.html#developing-filter-plugins)
|
44
filter_plugins/application_allowed.py
Normal file
44
filter_plugins/application_allowed.py
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# Provides a filter to control which applications (roles) should be deployed
|
||||||
|
|
||||||
|
from ansible.errors import AnsibleFilterError
|
||||||
|
|
||||||
|
|
||||||
|
def application_allowed(application_id: str, group_names: list, allowed_applications: list = []):
|
||||||
|
"""
|
||||||
|
Return True if:
|
||||||
|
- application_id exists in group_names, AND
|
||||||
|
- either allowed_applications is not provided (or empty), OR application_id is in allowed_applications.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
application_id (str): Name of the application/role to check.
|
||||||
|
group_names (list): List of groups the current host belongs to.
|
||||||
|
allowed_applications (list, optional): List of application IDs to allow.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if this application is allowed to deploy, False otherwise.
|
||||||
|
"""
|
||||||
|
# Ensure group_names is iterable
|
||||||
|
if not isinstance(group_names, (list, tuple)):
|
||||||
|
raise AnsibleFilterError(f"Expected group_names to be a list, str or tuple, got {type(group_names)}")
|
||||||
|
|
||||||
|
# Must be part of the host's groups
|
||||||
|
if application_id not in group_names:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# If allowed_applications provided, only allow if ID is in that list
|
||||||
|
if allowed_applications:
|
||||||
|
if not isinstance(allowed_applications, (list, tuple, str)):
|
||||||
|
raise AnsibleFilterError(f"allowed_applications must be a list or tuple if provided, got {type(allowed_applications)}")
|
||||||
|
return application_id in allowed_applications
|
||||||
|
|
||||||
|
# No filter provided → allow all in group_names
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
'application_allowed': application_allowed,
|
||||||
|
}
|
102
filter_plugins/applications_if_group_and_deps.py
Normal file
102
filter_plugins/applications_if_group_and_deps.py
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
from ansible.errors import AnsibleFilterError
|
||||||
|
import os
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
'applications_if_group_and_deps': self.applications_if_group_and_deps,
|
||||||
|
}
|
||||||
|
|
||||||
|
def applications_if_group_and_deps(self, applications, group_names):
|
||||||
|
"""
|
||||||
|
Return only those applications whose key is either:
|
||||||
|
1) directly in group_names, or
|
||||||
|
2) the application_id of any role reachable (recursively)
|
||||||
|
from any group in group_names via meta/dependencies.
|
||||||
|
"""
|
||||||
|
self._validate_inputs(applications, group_names)
|
||||||
|
|
||||||
|
roles_dir = self._get_roles_directory()
|
||||||
|
|
||||||
|
included_roles = self._collect_reachable_roles(group_names, roles_dir)
|
||||||
|
included_app_ids = self._gather_application_ids(included_roles, roles_dir)
|
||||||
|
|
||||||
|
return self._filter_applications(applications, group_names, included_app_ids)
|
||||||
|
|
||||||
|
def _validate_inputs(self, applications, group_names):
|
||||||
|
"""Validate the inputs for correct types."""
|
||||||
|
if not isinstance(applications, dict):
|
||||||
|
raise AnsibleFilterError(f"Expected applications as dict, got {type(applications).__name__}")
|
||||||
|
if not isinstance(group_names, (list, tuple)):
|
||||||
|
raise AnsibleFilterError(f"Expected group_names as list/tuple, got {type(group_names).__name__}")
|
||||||
|
|
||||||
|
def _get_roles_directory(self):
|
||||||
|
"""Locate and return the roles directory."""
|
||||||
|
plugin_dir = os.path.dirname(__file__)
|
||||||
|
project_root = os.path.abspath(os.path.join(plugin_dir, '..'))
|
||||||
|
return os.path.join(project_root, 'roles')
|
||||||
|
|
||||||
|
def _collect_reachable_roles(self, group_names, roles_dir):
|
||||||
|
"""Recursively collect all roles reachable from the given groups via meta/dependencies."""
|
||||||
|
included_roles = set()
|
||||||
|
for group in group_names:
|
||||||
|
self._collect_roles_from_group(group, included_roles, roles_dir)
|
||||||
|
return included_roles
|
||||||
|
|
||||||
|
def _collect_roles_from_group(self, group, seen, roles_dir):
|
||||||
|
"""Recursively collect roles from a specific group."""
|
||||||
|
if group in seen:
|
||||||
|
return
|
||||||
|
seen.add(group)
|
||||||
|
|
||||||
|
meta_file = os.path.join(roles_dir, group, 'meta', 'main.yml')
|
||||||
|
if not os.path.isfile(meta_file):
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(meta_file) as f:
|
||||||
|
meta = yaml.safe_load(f) or {}
|
||||||
|
except Exception:
|
||||||
|
return
|
||||||
|
|
||||||
|
for dep in meta.get('dependencies', []):
|
||||||
|
dep_name = self._get_dependency_name(dep)
|
||||||
|
if dep_name:
|
||||||
|
self._collect_roles_from_group(dep_name, seen, roles_dir)
|
||||||
|
|
||||||
|
def _get_dependency_name(self, dependency):
|
||||||
|
"""Extract the dependency role name from the meta data."""
|
||||||
|
if isinstance(dependency, str):
|
||||||
|
return dependency
|
||||||
|
elif isinstance(dependency, dict):
|
||||||
|
return dependency.get('role') or dependency.get('name')
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _gather_application_ids(self, included_roles, roles_dir):
|
||||||
|
"""Gather application_ids from the roles."""
|
||||||
|
included_app_ids = set()
|
||||||
|
for role in included_roles:
|
||||||
|
vars_file = os.path.join(roles_dir, role, 'vars', 'main.yml')
|
||||||
|
if not os.path.isfile(vars_file):
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
with open(vars_file) as f:
|
||||||
|
vars_data = yaml.safe_load(f) or {}
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
|
||||||
|
app_id = vars_data.get('application_id')
|
||||||
|
if isinstance(app_id, str) and app_id:
|
||||||
|
included_app_ids.add(app_id)
|
||||||
|
|
||||||
|
return included_app_ids
|
||||||
|
|
||||||
|
def _filter_applications(self, applications, group_names, included_app_ids):
|
||||||
|
"""Filter and return the applications that match the conditions."""
|
||||||
|
result = {}
|
||||||
|
for app_key, cfg in applications.items():
|
||||||
|
if app_key in group_names or app_key in included_app_ids:
|
||||||
|
result[app_key] = cfg
|
||||||
|
return result
|
116
filter_plugins/canonical_domains_map.py
Normal file
116
filter_plugins/canonical_domains_map.py
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
from ansible.errors import AnsibleFilterError
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||||
|
from module_utils.entity_name_utils import get_entity_name
|
||||||
|
from module_utils.role_dependency_resolver import RoleDependencyResolver
|
||||||
|
from typing import Iterable
|
||||||
|
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {'canonical_domains_map': self.canonical_domains_map}
|
||||||
|
|
||||||
|
def canonical_domains_map(
|
||||||
|
self,
|
||||||
|
apps,
|
||||||
|
PRIMARY_DOMAIN,
|
||||||
|
*,
|
||||||
|
recursive: bool = False,
|
||||||
|
roles_base_dir: str | None = None,
|
||||||
|
seed: Iterable[str] | None = None,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Build { app_id: [canonical domains...] }.
|
||||||
|
|
||||||
|
Rekursiv werden nur include_role, import_role und meta/main.yml:dependencies verfolgt.
|
||||||
|
'run_after' wird hier absichtlich ignoriert.
|
||||||
|
"""
|
||||||
|
if not isinstance(apps, dict):
|
||||||
|
raise AnsibleFilterError(f"'apps' must be a dict, got {type(apps).__name__}")
|
||||||
|
|
||||||
|
app_keys = set(apps.keys())
|
||||||
|
seed_keys = set(seed) if seed is not None else app_keys
|
||||||
|
|
||||||
|
if recursive:
|
||||||
|
roles_base_dir = roles_base_dir or os.path.join(os.getcwd(), "roles")
|
||||||
|
if not os.path.isdir(roles_base_dir):
|
||||||
|
raise AnsibleFilterError(
|
||||||
|
f"roles_base_dir '{roles_base_dir}' not found or not a directory."
|
||||||
|
)
|
||||||
|
|
||||||
|
resolver = RoleDependencyResolver(roles_base_dir)
|
||||||
|
discovered_roles = resolver.resolve_transitively(
|
||||||
|
start_roles=seed_keys,
|
||||||
|
resolve_include_role=True,
|
||||||
|
resolve_import_role=True,
|
||||||
|
resolve_dependencies=True,
|
||||||
|
resolve_run_after=False,
|
||||||
|
max_depth=None,
|
||||||
|
)
|
||||||
|
# all discovered roles that actually have config entries in `apps`
|
||||||
|
target_apps = discovered_roles & app_keys
|
||||||
|
else:
|
||||||
|
target_apps = seed_keys
|
||||||
|
|
||||||
|
result = {}
|
||||||
|
seen_domains = {}
|
||||||
|
|
||||||
|
for app_id in sorted(target_apps):
|
||||||
|
cfg = apps.get(app_id)
|
||||||
|
if cfg is None:
|
||||||
|
continue
|
||||||
|
if not str(app_id).startswith(("web-", "svc-db-")):
|
||||||
|
continue
|
||||||
|
if not isinstance(cfg, dict):
|
||||||
|
raise AnsibleFilterError(
|
||||||
|
f"Invalid configuration for application '{app_id}': expected dict, got {cfg!r}"
|
||||||
|
)
|
||||||
|
|
||||||
|
domains_cfg = cfg.get('server', {}).get('domains', {})
|
||||||
|
if not domains_cfg or 'canonical' not in domains_cfg:
|
||||||
|
self._add_default_domain(app_id, PRIMARY_DOMAIN, seen_domains, result)
|
||||||
|
continue
|
||||||
|
|
||||||
|
canonical_domains = domains_cfg['canonical']
|
||||||
|
self._process_canonical_domains(app_id, canonical_domains, seen_domains, result)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _add_default_domain(self, app_id, PRIMARY_DOMAIN, seen_domains, result):
|
||||||
|
entity_name = get_entity_name(app_id)
|
||||||
|
default_domain = f"{entity_name}.{PRIMARY_DOMAIN}"
|
||||||
|
if default_domain in seen_domains:
|
||||||
|
raise AnsibleFilterError(
|
||||||
|
f"Domain '{default_domain}' is already configured for "
|
||||||
|
f"'{seen_domains[default_domain]}' and '{app_id}'"
|
||||||
|
)
|
||||||
|
seen_domains[default_domain] = app_id
|
||||||
|
result[app_id] = [default_domain]
|
||||||
|
|
||||||
|
def _process_canonical_domains(self, app_id, canonical_domains, seen_domains, result):
|
||||||
|
if isinstance(canonical_domains, dict):
|
||||||
|
for _, domain in canonical_domains.items():
|
||||||
|
self._validate_and_check_domain(app_id, domain, seen_domains)
|
||||||
|
result[app_id] = canonical_domains.copy()
|
||||||
|
elif isinstance(canonical_domains, list):
|
||||||
|
for domain in canonical_domains:
|
||||||
|
self._validate_and_check_domain(app_id, domain, seen_domains)
|
||||||
|
result[app_id] = list(canonical_domains)
|
||||||
|
else:
|
||||||
|
raise AnsibleFilterError(
|
||||||
|
f"Unexpected type for 'server.domains.canonical' in application '{app_id}': "
|
||||||
|
f"{type(canonical_domains).__name__}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def _validate_and_check_domain(self, app_id, domain, seen_domains):
|
||||||
|
if not isinstance(domain, str) or not domain.strip():
|
||||||
|
raise AnsibleFilterError(
|
||||||
|
f"Invalid domain entry in 'canonical' for application '{app_id}': {domain!r}"
|
||||||
|
)
|
||||||
|
if domain in seen_domains:
|
||||||
|
raise AnsibleFilterError(
|
||||||
|
f"Domain '{domain}' is already configured for '{seen_domains[domain]}' and '{app_id}'"
|
||||||
|
)
|
||||||
|
seen_domains[domain] = app_id
|
210
filter_plugins/csp_filters.py
Normal file
210
filter_plugins/csp_filters.py
Normal file
@@ -0,0 +1,210 @@
|
|||||||
|
from ansible.errors import AnsibleFilterError
|
||||||
|
import hashlib
|
||||||
|
import base64
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Ensure module_utils is importable when this filter runs from Ansible
|
||||||
|
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||||
|
from module_utils.config_utils import get_app_conf
|
||||||
|
from module_utils.get_url import get_url
|
||||||
|
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
"""
|
||||||
|
Custom filters for Content Security Policy generation and CSP-related utilities.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
'build_csp_header': self.build_csp_header,
|
||||||
|
}
|
||||||
|
|
||||||
|
# -------------------------------
|
||||||
|
# Helpers
|
||||||
|
# -------------------------------
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def is_feature_enabled(applications: dict, feature: str, application_id: str) -> bool:
|
||||||
|
"""
|
||||||
|
Returns True if applications[application_id].features[feature] is truthy.
|
||||||
|
"""
|
||||||
|
return get_app_conf(
|
||||||
|
applications,
|
||||||
|
application_id,
|
||||||
|
'features.' + feature,
|
||||||
|
False,
|
||||||
|
False
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_csp_whitelist(applications, application_id, directive):
|
||||||
|
"""
|
||||||
|
Returns a list of additional whitelist entries for a given directive.
|
||||||
|
Accepts both scalar and list in config; always returns a list.
|
||||||
|
"""
|
||||||
|
wl = get_app_conf(
|
||||||
|
applications,
|
||||||
|
application_id,
|
||||||
|
'server.csp.whitelist.' + directive,
|
||||||
|
False,
|
||||||
|
[]
|
||||||
|
)
|
||||||
|
if isinstance(wl, list):
|
||||||
|
return wl
|
||||||
|
if wl:
|
||||||
|
return [wl]
|
||||||
|
return []
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_csp_flags(applications, application_id, directive):
|
||||||
|
"""
|
||||||
|
Returns CSP flag tokens (e.g., "'unsafe-eval'", "'unsafe-inline'") for a directive,
|
||||||
|
merging sane defaults with app config.
|
||||||
|
Default: 'unsafe-inline' is enabled for style-src and style-src-elem.
|
||||||
|
"""
|
||||||
|
# Defaults that apply to all apps
|
||||||
|
default_flags = {}
|
||||||
|
if directive in ('style-src', 'style-src-elem'):
|
||||||
|
default_flags = {'unsafe-inline': True}
|
||||||
|
|
||||||
|
configured = get_app_conf(
|
||||||
|
applications,
|
||||||
|
application_id,
|
||||||
|
'server.csp.flags.' + directive,
|
||||||
|
False,
|
||||||
|
{}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Merge defaults with configured flags (configured overrides defaults)
|
||||||
|
merged = {**default_flags, **configured}
|
||||||
|
|
||||||
|
tokens = []
|
||||||
|
for flag_name, enabled in merged.items():
|
||||||
|
if enabled:
|
||||||
|
tokens.append(f"'{flag_name}'")
|
||||||
|
return tokens
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_csp_inline_content(applications, application_id, directive):
|
||||||
|
"""
|
||||||
|
Returns inline script/style snippets to hash for a given directive.
|
||||||
|
Accepts both scalar and list in config; always returns a list.
|
||||||
|
"""
|
||||||
|
snippets = get_app_conf(
|
||||||
|
applications,
|
||||||
|
application_id,
|
||||||
|
'server.csp.hashes.' + directive,
|
||||||
|
False,
|
||||||
|
[]
|
||||||
|
)
|
||||||
|
if isinstance(snippets, list):
|
||||||
|
return snippets
|
||||||
|
if snippets:
|
||||||
|
return [snippets]
|
||||||
|
return []
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_csp_hash(content):
|
||||||
|
"""
|
||||||
|
Computes the SHA256 hash of the given inline content and returns
|
||||||
|
a CSP token like "'sha256-<base64>'".
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
digest = hashlib.sha256(content.encode('utf-8')).digest()
|
||||||
|
b64 = base64.b64encode(digest).decode('utf-8')
|
||||||
|
return f"'sha256-{b64}'"
|
||||||
|
except Exception as exc:
|
||||||
|
raise AnsibleFilterError(f"get_csp_hash failed: {exc}")
|
||||||
|
|
||||||
|
# -------------------------------
|
||||||
|
# Main builder
|
||||||
|
# -------------------------------
|
||||||
|
|
||||||
|
def build_csp_header(
|
||||||
|
self,
|
||||||
|
applications,
|
||||||
|
application_id,
|
||||||
|
domains,
|
||||||
|
web_protocol='https',
|
||||||
|
matomo_feature_name='matomo'
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Builds the Content-Security-Policy header value dynamically based on application settings.
|
||||||
|
- Flags (e.g., 'unsafe-eval', 'unsafe-inline') are read from server.csp.flags.<directive>,
|
||||||
|
with sane defaults applied in get_csp_flags (always 'unsafe-inline' for style-src and style-src-elem).
|
||||||
|
- Inline hashes are read from server.csp.hashes.<directive>.
|
||||||
|
- Whitelists are read from server.csp.whitelist.<directive>.
|
||||||
|
- Inline hashes are added only if the final tokens do NOT include 'unsafe-inline'.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
directives = [
|
||||||
|
'default-src', # Fallback source list for content types not explicitly listed
|
||||||
|
'connect-src', # Allowed URLs for XHR, WebSockets, EventSource, fetch()
|
||||||
|
'frame-ancestors', # Who may embed this page
|
||||||
|
'frame-src', # Sources for nested browsing contexts (e.g., <iframe>)
|
||||||
|
'script-src', # Sources for script execution
|
||||||
|
'script-src-elem', # Sources for <script> elements
|
||||||
|
'style-src', # Sources for inline styles and <style>/<link> elements
|
||||||
|
'style-src-elem', # Sources for <style> and <link rel="stylesheet">
|
||||||
|
'font-src', # Sources for fonts
|
||||||
|
'worker-src', # Sources for workers
|
||||||
|
'manifest-src', # Sources for web app manifests
|
||||||
|
'media-src', # Sources for audio and video
|
||||||
|
]
|
||||||
|
|
||||||
|
parts = []
|
||||||
|
|
||||||
|
for directive in directives:
|
||||||
|
tokens = ["'self'"]
|
||||||
|
|
||||||
|
# 1) Load flags (includes defaults from get_csp_flags)
|
||||||
|
flags = self.get_csp_flags(applications, application_id, directive)
|
||||||
|
tokens += flags
|
||||||
|
|
||||||
|
# 2) Allow fetching from internal CDN by default for selected directives
|
||||||
|
if directive in ['script-src-elem', 'connect-src', 'style-src-elem']:
|
||||||
|
tokens.append(get_url(domains, 'web-svc-cdn', web_protocol))
|
||||||
|
|
||||||
|
# 3) Matomo integration if feature is enabled
|
||||||
|
if directive in ['script-src-elem', 'connect-src']:
|
||||||
|
if self.is_feature_enabled(applications, matomo_feature_name, application_id):
|
||||||
|
tokens.append(get_url(domains, 'web-app-matomo', web_protocol))
|
||||||
|
|
||||||
|
# 4) ReCaptcha integration (scripts + frames) if feature is enabled
|
||||||
|
if self.is_feature_enabled(applications, 'recaptcha', application_id):
|
||||||
|
if directive in ['script-src-elem', 'frame-src']:
|
||||||
|
tokens.append('https://www.gstatic.com')
|
||||||
|
tokens.append('https://www.google.com')
|
||||||
|
|
||||||
|
# 5) Frame ancestors handling (desktop + logout support)
|
||||||
|
if directive == 'frame-ancestors':
|
||||||
|
if self.is_feature_enabled(applications, 'desktop', application_id):
|
||||||
|
# Allow being embedded by the desktop app domain (and potentially its parent)
|
||||||
|
domain = domains.get('web-app-desktop')[0]
|
||||||
|
sld_tld = ".".join(domain.split(".")[-2:]) # e.g., example.com
|
||||||
|
tokens.append(f"{sld_tld}")
|
||||||
|
if self.is_feature_enabled(applications, 'logout', application_id):
|
||||||
|
# Allow embedding via logout proxy and Keycloak app
|
||||||
|
tokens.append(get_url(domains, 'web-svc-logout', web_protocol))
|
||||||
|
tokens.append(get_url(domains, 'web-app-keycloak', web_protocol))
|
||||||
|
|
||||||
|
# 6) Custom whitelist entries
|
||||||
|
tokens += self.get_csp_whitelist(applications, application_id, directive)
|
||||||
|
|
||||||
|
# 7) Add inline content hashes ONLY if final tokens do NOT include 'unsafe-inline'
|
||||||
|
# (Check tokens, not flags, to include defaults and later modifications.)
|
||||||
|
if "'unsafe-inline'" not in tokens:
|
||||||
|
for snippet in self.get_csp_inline_content(applications, application_id, directive):
|
||||||
|
tokens.append(self.get_csp_hash(snippet))
|
||||||
|
|
||||||
|
# Append directive
|
||||||
|
parts.append(f"{directive} {' '.join(tokens)};")
|
||||||
|
|
||||||
|
# 8) Static img-src directive (kept permissive for data/blob and any host)
|
||||||
|
parts.append("img-src * data: blob:;")
|
||||||
|
|
||||||
|
return ' '.join(parts)
|
||||||
|
|
||||||
|
except Exception as exc:
|
||||||
|
raise AnsibleFilterError(f"build_csp_header failed: {exc}")
|
31
filter_plugins/csp_hashes.py
Normal file
31
filter_plugins/csp_hashes.py
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
from ansible.errors import AnsibleFilterError
|
||||||
|
import copy
|
||||||
|
|
||||||
|
def append_csp_hash(applications, application_id, code_one_liner):
|
||||||
|
"""
|
||||||
|
Ensures that applications[application_id].csp.hashes['script-src-elem']
|
||||||
|
exists and appends the given one-liner (if not already present).
|
||||||
|
"""
|
||||||
|
if not isinstance(applications, dict):
|
||||||
|
raise AnsibleFilterError("`applications` must be a dict")
|
||||||
|
if application_id not in applications:
|
||||||
|
raise AnsibleFilterError(f"Unknown application_id: {application_id}")
|
||||||
|
|
||||||
|
apps = copy.deepcopy(applications)
|
||||||
|
app = apps[application_id]
|
||||||
|
server = app.setdefault('server', {})
|
||||||
|
csp = server.setdefault('csp', {})
|
||||||
|
hashes = csp.setdefault('hashes', {})
|
||||||
|
|
||||||
|
existing = hashes.get('script-src-elem', [])
|
||||||
|
if code_one_liner not in existing:
|
||||||
|
existing.append(code_one_liner)
|
||||||
|
hashes['script-src-elem'] = existing
|
||||||
|
|
||||||
|
return apps
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
'append_csp_hash': append_csp_hash
|
||||||
|
}
|
25
filter_plugins/docker_service_enabled.py
Normal file
25
filter_plugins/docker_service_enabled.py
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
class FilterModule(object):
|
||||||
|
''' Custom filter to safely check if a docker service is enabled for an application_id '''
|
||||||
|
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
'is_docker_service_enabled': self.is_docker_service_enabled
|
||||||
|
}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def is_docker_service_enabled(applications, application_id, service_name):
|
||||||
|
"""
|
||||||
|
Returns True if applications[application_id].docker.services[service_name].enabled is truthy,
|
||||||
|
otherwise returns False (even if intermediate keys are missing).
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return bool(
|
||||||
|
applications
|
||||||
|
and application_id in applications
|
||||||
|
and applications[application_id].get('docker', {})
|
||||||
|
.get('services', {})
|
||||||
|
.get(service_name, {})
|
||||||
|
.get('enabled', False)
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
return False
|
97
filter_plugins/domain_redirect_mappings.py
Normal file
97
filter_plugins/domain_redirect_mappings.py
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
from ansible.errors import AnsibleFilterError
|
||||||
|
import sys, os
|
||||||
|
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||||
|
from module_utils.entity_name_utils import get_entity_name
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {'domain_mappings': self.domain_mappings}
|
||||||
|
|
||||||
|
def domain_mappings(self, apps, PRIMARY_DOMAIN):
|
||||||
|
"""
|
||||||
|
Build a flat list of redirect mappings for all apps:
|
||||||
|
- source: each alias domain
|
||||||
|
- target: the first canonical domain
|
||||||
|
Skip mappings where source == target, since they make no sense.
|
||||||
|
"""
|
||||||
|
def parse_entry(domains_cfg, key, app_id):
|
||||||
|
if key not in domains_cfg:
|
||||||
|
return None
|
||||||
|
entry = domains_cfg[key]
|
||||||
|
if isinstance(entry, dict):
|
||||||
|
values = list(entry.values())
|
||||||
|
elif isinstance(entry, list):
|
||||||
|
values = entry
|
||||||
|
else:
|
||||||
|
raise AnsibleFilterError(
|
||||||
|
f"Unexpected type for 'domains.{key}' in application '{app_id}': {type(entry).__name__}"
|
||||||
|
)
|
||||||
|
for d in values:
|
||||||
|
if not isinstance(d, str) or not d.strip():
|
||||||
|
raise AnsibleFilterError(
|
||||||
|
f"Invalid domain entry in '{key}' for application '{app_id}': {d!r}"
|
||||||
|
)
|
||||||
|
return values
|
||||||
|
|
||||||
|
def default_domain(app_id:str, primary:str):
|
||||||
|
subdomain = get_entity_name(app_id)
|
||||||
|
return f"{subdomain}.{primary}"
|
||||||
|
|
||||||
|
# 1) Compute canonical domains per app (always as a list)
|
||||||
|
canonical_map = {}
|
||||||
|
for app_id, cfg in apps.items():
|
||||||
|
domains_cfg = cfg.get('server',{}).get('domains',{})
|
||||||
|
entry = domains_cfg.get('canonical')
|
||||||
|
if entry is None:
|
||||||
|
canonical_map[app_id] = [default_domain(app_id, PRIMARY_DOMAIN)]
|
||||||
|
elif isinstance(entry, dict):
|
||||||
|
canonical_map[app_id] = list(entry.values())
|
||||||
|
elif isinstance(entry, list):
|
||||||
|
canonical_map[app_id] = list(entry)
|
||||||
|
else:
|
||||||
|
raise AnsibleFilterError(
|
||||||
|
f"Unexpected type for 'server.domains.canonical' in application '{app_id}': {type(entry).__name__}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# 2) Compute alias domains per app
|
||||||
|
alias_map = {}
|
||||||
|
for app_id, cfg in apps.items():
|
||||||
|
domains_cfg = cfg.get('server',{}).get('domains',{})
|
||||||
|
if domains_cfg is None:
|
||||||
|
alias_map[app_id] = []
|
||||||
|
continue
|
||||||
|
if isinstance(domains_cfg, dict) and not domains_cfg:
|
||||||
|
alias_map[app_id] = [default_domain(app_id, PRIMARY_DOMAIN)]
|
||||||
|
continue
|
||||||
|
|
||||||
|
aliases = parse_entry(domains_cfg, 'aliases', app_id) or []
|
||||||
|
default = default_domain(app_id, PRIMARY_DOMAIN)
|
||||||
|
has_aliases = 'aliases' in domains_cfg
|
||||||
|
has_canonical = 'canonical' in domains_cfg
|
||||||
|
|
||||||
|
if has_aliases:
|
||||||
|
if default not in aliases:
|
||||||
|
aliases.append(default)
|
||||||
|
elif has_canonical:
|
||||||
|
canon = canonical_map.get(app_id, [])
|
||||||
|
if default not in canon and default not in aliases:
|
||||||
|
aliases.append(default)
|
||||||
|
|
||||||
|
alias_map[app_id] = aliases
|
||||||
|
|
||||||
|
# 3) Build flat list of {source, target} entries,
|
||||||
|
# skipping self-mappings
|
||||||
|
mappings = []
|
||||||
|
for app_id, sources in alias_map.items():
|
||||||
|
canon_list = canonical_map.get(app_id, [])
|
||||||
|
target = canon_list[0] if canon_list else default_domain(app_id, PRIMARY_DOMAIN)
|
||||||
|
for src in sources:
|
||||||
|
if src == target:
|
||||||
|
# skip self-redirects
|
||||||
|
continue
|
||||||
|
mappings.append({
|
||||||
|
'source': src,
|
||||||
|
'target': target
|
||||||
|
})
|
||||||
|
|
||||||
|
return mappings
|
19
filter_plugins/domain_tools.py
Normal file
19
filter_plugins/domain_tools.py
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
# filter_plugins/domain_tools.py
|
||||||
|
# Returns the DNS zone (SLD.TLD) from a hostname.
|
||||||
|
# Pure-Python, no external deps; handles simple cases. For exotic TLDs use tldextract (see note).
|
||||||
|
from ansible.errors import AnsibleFilterError
|
||||||
|
|
||||||
|
def to_zone(hostname: str) -> str:
|
||||||
|
if not isinstance(hostname, str) or not hostname.strip():
|
||||||
|
raise AnsibleFilterError("to_zone: hostname must be a non-empty string")
|
||||||
|
parts = hostname.strip(".").split(".")
|
||||||
|
if len(parts) < 2:
|
||||||
|
raise AnsibleFilterError(f"to_zone: '{hostname}' has no TLD part")
|
||||||
|
# naive default: last two labels -> SLD.TLD
|
||||||
|
return ".".join(parts[-2:])
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
"to_zone": to_zone,
|
||||||
|
}
|
31
filter_plugins/generate_all_domains.py
Normal file
31
filter_plugins/generate_all_domains.py
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
from ansible.errors import AnsibleFilterError
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {'generate_all_domains': self.generate_all_domains}
|
||||||
|
|
||||||
|
def generate_all_domains(self, domains_dict, include_www=True):
|
||||||
|
"""
|
||||||
|
Transform a dict of domains (values: str, list, dict) into a flat list,
|
||||||
|
optionally add 'www.' prefixes, dedupe and sort alphabetically.
|
||||||
|
"""
|
||||||
|
# lokaler Helfer zum Flatten
|
||||||
|
def _flatten(domains):
|
||||||
|
flat = []
|
||||||
|
for v in (domains or {}).values():
|
||||||
|
if isinstance(v, str):
|
||||||
|
flat.append(v)
|
||||||
|
elif isinstance(v, list):
|
||||||
|
flat.extend(v)
|
||||||
|
elif isinstance(v, dict):
|
||||||
|
flat.extend(v.values())
|
||||||
|
return flat
|
||||||
|
|
||||||
|
try:
|
||||||
|
flat = _flatten(domains_dict)
|
||||||
|
if include_www:
|
||||||
|
original = list(flat)
|
||||||
|
flat.extend([f"www.{d}" for d in original])
|
||||||
|
return sorted(set(flat))
|
||||||
|
except Exception as exc:
|
||||||
|
raise AnsibleFilterError(f"generate_all_domains failed: {exc}")
|
44
filter_plugins/generate_base_sld_domains.py
Normal file
44
filter_plugins/generate_base_sld_domains.py
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
import re
|
||||||
|
from ansible.errors import AnsibleFilterError
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {'generate_base_sld_domains': self.generate_base_sld_domains}
|
||||||
|
|
||||||
|
def generate_base_sld_domains(self, domains_list):
|
||||||
|
"""
|
||||||
|
Given a list of hostnames, extract the second-level domain (SLD.TLD) for any hostname
|
||||||
|
with two or more labels, return single-label hostnames as-is, and reject IPs,
|
||||||
|
empty or malformed strings, and non-strings. Deduplicate and sort.
|
||||||
|
"""
|
||||||
|
if not isinstance(domains_list, list):
|
||||||
|
raise AnsibleFilterError(
|
||||||
|
f"generate_base_sld_domains expected a list, got {type(domains_list).__name__}"
|
||||||
|
)
|
||||||
|
|
||||||
|
ip_pattern = re.compile(r'^\d{1,3}(?:\.\d{1,3}){3}$')
|
||||||
|
results = set()
|
||||||
|
|
||||||
|
for hostname in domains_list:
|
||||||
|
# type check
|
||||||
|
if not isinstance(hostname, str):
|
||||||
|
raise AnsibleFilterError(f"Invalid domain entry (not a string): {hostname!r}")
|
||||||
|
|
||||||
|
# malformed or empty
|
||||||
|
if not hostname or hostname.startswith('.') or hostname.endswith('.') or '..' in hostname:
|
||||||
|
raise AnsibleFilterError(f"Invalid domain entry (malformed): {hostname!r}")
|
||||||
|
|
||||||
|
# IP addresses disallowed
|
||||||
|
if ip_pattern.match(hostname):
|
||||||
|
raise AnsibleFilterError(f"IP addresses not allowed: {hostname!r}")
|
||||||
|
|
||||||
|
# single-label hostnames
|
||||||
|
labels = hostname.split('.')
|
||||||
|
if len(labels) == 1:
|
||||||
|
results.add(hostname)
|
||||||
|
else:
|
||||||
|
# always keep only the last two labels (SLD.TLD)
|
||||||
|
sld = ".".join(labels[-2:])
|
||||||
|
results.add(sld)
|
||||||
|
|
||||||
|
return sorted(results)
|
40
filter_plugins/get_all_application_ids.py
Normal file
40
filter_plugins/get_all_application_ids.py
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# filter_plugins/get_all_application_ids.py
|
||||||
|
|
||||||
|
import glob
|
||||||
|
import os
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
def get_all_application_ids(roles_dir='roles'):
|
||||||
|
"""
|
||||||
|
Ansible filter to retrieve all unique application_id values
|
||||||
|
defined in roles/*/vars/main.yml files.
|
||||||
|
|
||||||
|
:param roles_dir: Base directory for Ansible roles (default: 'roles')
|
||||||
|
:return: Sorted list of unique application_id strings
|
||||||
|
"""
|
||||||
|
pattern = os.path.join(roles_dir, '*', 'vars', 'main.yml')
|
||||||
|
app_ids = []
|
||||||
|
|
||||||
|
for filepath in glob.glob(pattern):
|
||||||
|
try:
|
||||||
|
with open(filepath, 'r', encoding='utf-8') as f:
|
||||||
|
data = yaml.safe_load(f)
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if isinstance(data, dict) and 'application_id' in data:
|
||||||
|
app_ids.append(data['application_id'])
|
||||||
|
|
||||||
|
return sorted(set(app_ids))
|
||||||
|
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
"""
|
||||||
|
Ansible filter plugin for retrieving application IDs.
|
||||||
|
"""
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
'get_all_application_ids': get_all_application_ids
|
||||||
|
}
|
54
filter_plugins/get_all_invokable_apps.py
Normal file
54
filter_plugins/get_all_invokable_apps.py
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
import os
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
def get_all_invokable_apps(
|
||||||
|
categories_file=None,
|
||||||
|
roles_dir=None
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Return all application_ids (or role names) for roles whose directory names match invokable paths from categories.yml.
|
||||||
|
:param categories_file: Path to categories.yml (default: roles/categories.yml at project root)
|
||||||
|
:param roles_dir: Path to roles directory (default: roles/ at project root)
|
||||||
|
:return: List of application_ids (or role names)
|
||||||
|
"""
|
||||||
|
# Resolve defaults
|
||||||
|
here = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
project_root = os.path.abspath(os.path.join(here, '..'))
|
||||||
|
if not categories_file:
|
||||||
|
categories_file = os.path.join(project_root, 'roles', 'categories.yml')
|
||||||
|
if not roles_dir:
|
||||||
|
roles_dir = os.path.join(project_root, 'roles')
|
||||||
|
|
||||||
|
# Get invokable paths
|
||||||
|
from filter_plugins.invokable_paths import get_invokable_paths
|
||||||
|
invokable_paths = get_invokable_paths(categories_file)
|
||||||
|
if not invokable_paths:
|
||||||
|
return []
|
||||||
|
|
||||||
|
result = []
|
||||||
|
if not os.path.isdir(roles_dir):
|
||||||
|
return []
|
||||||
|
|
||||||
|
for role in sorted(os.listdir(roles_dir)):
|
||||||
|
role_path = os.path.join(roles_dir, role)
|
||||||
|
if not os.path.isdir(role_path):
|
||||||
|
continue
|
||||||
|
if any(role == p or role.startswith(p + '-') for p in invokable_paths):
|
||||||
|
vars_file = os.path.join(role_path, 'vars', 'main.yml')
|
||||||
|
if os.path.isfile(vars_file):
|
||||||
|
try:
|
||||||
|
with open(vars_file, 'r', encoding='utf-8') as f:
|
||||||
|
data = yaml.safe_load(f) or {}
|
||||||
|
app_id = data.get('application_id', role)
|
||||||
|
except Exception:
|
||||||
|
app_id = role
|
||||||
|
else:
|
||||||
|
app_id = role
|
||||||
|
result.append(app_id)
|
||||||
|
return sorted(result)
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
'get_all_invokable_apps': get_all_invokable_apps
|
||||||
|
}
|
10
filter_plugins/get_app_conf.py
Normal file
10
filter_plugins/get_app_conf.py
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
import sys, os
|
||||||
|
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||||
|
from module_utils.config_utils import get_app_conf, AppConfigKeyError,ConfigEntryNotSetError
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
''' Infinito.Nexus application config extraction filters '''
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
'get_app_conf': get_app_conf,
|
||||||
|
}
|
31
filter_plugins/get_category_entries.py
Normal file
31
filter_plugins/get_category_entries.py
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# Custom Ansible filter to get all role names under "roles/" with a given prefix.
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
def get_category_entries(prefix, roles_path="roles"):
|
||||||
|
"""
|
||||||
|
Returns a list of role names under the given roles_path
|
||||||
|
that start with the specified prefix.
|
||||||
|
|
||||||
|
:param prefix: String prefix to match role names.
|
||||||
|
:param roles_path: Path to the roles directory (default: 'roles').
|
||||||
|
:return: List of matching role names.
|
||||||
|
"""
|
||||||
|
if not os.path.isdir(roles_path):
|
||||||
|
return []
|
||||||
|
|
||||||
|
roles = []
|
||||||
|
for entry in os.listdir(roles_path):
|
||||||
|
full_path = os.path.join(roles_path, entry)
|
||||||
|
if os.path.isdir(full_path) and entry.startswith(prefix):
|
||||||
|
roles.append(entry)
|
||||||
|
|
||||||
|
return sorted(roles)
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
""" Custom filters for Ansible """
|
||||||
|
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
"get_category_entries": get_category_entries
|
||||||
|
}
|
19
filter_plugins/get_docker_image.py
Normal file
19
filter_plugins/get_docker_image.py
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
def get_docker_image(applications, application_id, image_key:str=None):
|
||||||
|
image_key = image_key if image_key else application_id
|
||||||
|
docker = applications.get(application_id, {}).get("docker", {})
|
||||||
|
version = docker.get("versions", {}).get(image_key)
|
||||||
|
image = docker.get("images", {}).get(image_key)
|
||||||
|
|
||||||
|
if not image:
|
||||||
|
raise ValueError(f"Missing image for {application_id}:{image_key}")
|
||||||
|
|
||||||
|
if not version:
|
||||||
|
raise ValueError(f"Missing version for {application_id}:{image_key}")
|
||||||
|
|
||||||
|
return f"{image}:{version}"
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
'get_docker_image': get_docker_image,
|
||||||
|
}
|
33
filter_plugins/get_docker_paths.py
Normal file
33
filter_plugins/get_docker_paths.py
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
import sys, os
|
||||||
|
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||||
|
from module_utils.entity_name_utils import get_entity_name
|
||||||
|
|
||||||
|
def get_docker_paths(application_id: str, path_docker_compose_instances: str) -> dict:
|
||||||
|
"""
|
||||||
|
Build the docker_compose dict based on
|
||||||
|
path_docker_compose_instances and application_id.
|
||||||
|
Uses get_entity_name to extract the entity name from application_id.
|
||||||
|
"""
|
||||||
|
entity = get_entity_name(application_id)
|
||||||
|
base = f"{path_docker_compose_instances}{entity}/"
|
||||||
|
|
||||||
|
return {
|
||||||
|
'directories': {
|
||||||
|
'instance': base,
|
||||||
|
'env': f"{base}.env/",
|
||||||
|
'services': f"{base}services/",
|
||||||
|
'volumes': f"{base}volumes/",
|
||||||
|
'config': f"{base}config/",
|
||||||
|
},
|
||||||
|
'files': {
|
||||||
|
'env': f"{base}.env/env",
|
||||||
|
'docker_compose': f"{base}docker-compose.yml",
|
||||||
|
'dockerfile': f"{base}Dockerfile",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
'get_docker_paths': get_docker_paths,
|
||||||
|
}
|
19
filter_plugins/get_domain.py
Normal file
19
filter_plugins/get_domain.py
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from ansible.errors import AnsibleFilterError
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
plugin_dir = os.path.dirname(__file__)
|
||||||
|
project_root = os.path.dirname(plugin_dir)
|
||||||
|
module_utils = os.path.join(project_root, 'module_utils')
|
||||||
|
if module_utils not in sys.path:
|
||||||
|
sys.path.append(module_utils)
|
||||||
|
|
||||||
|
try:
|
||||||
|
from domain_utils import get_domain
|
||||||
|
except ImportError as e:
|
||||||
|
raise AnsibleFilterError(f"could not import domain_utils: {e}")
|
||||||
|
|
||||||
|
return {'get_domain': get_domain}
|
9
filter_plugins/get_entity_name.py
Normal file
9
filter_plugins/get_entity_name.py
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
import sys, os
|
||||||
|
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||||
|
from module_utils.entity_name_utils import get_entity_name
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
'get_entity_name': get_entity_name,
|
||||||
|
}
|
48
filter_plugins/get_role.py
Normal file
48
filter_plugins/get_role.py
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
'''
|
||||||
|
Ansible filter plugin: get_role
|
||||||
|
|
||||||
|
This filter inspects each role under the given roles directory, loads its vars/main.yml,
|
||||||
|
and returns the role folder name whose application_id matches the provided value.
|
||||||
|
'''
|
||||||
|
|
||||||
|
from ansible.errors import AnsibleFilterError
|
||||||
|
import os
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
def get_role(application_id, roles_path='roles'):
|
||||||
|
"""
|
||||||
|
Find the role directory under `roles_path` whose vars/main.yml contains the given application_id.
|
||||||
|
|
||||||
|
:param application_id: The application_id to match.
|
||||||
|
:param roles_path: Path to the roles directory (default: 'roles').
|
||||||
|
:return: The name of the matching role directory.
|
||||||
|
:raises AnsibleFilterError: If vars file is unreadable or no match is found.
|
||||||
|
"""
|
||||||
|
if not os.path.isdir(roles_path):
|
||||||
|
raise AnsibleFilterError(f"Roles path not found: {roles_path}")
|
||||||
|
|
||||||
|
for role in os.listdir(roles_path):
|
||||||
|
role_dir = os.path.join(roles_path, role)
|
||||||
|
vars_file = os.path.join(role_dir, 'vars', 'main.yml')
|
||||||
|
if os.path.isfile(vars_file):
|
||||||
|
try:
|
||||||
|
with open(vars_file, 'r') as f:
|
||||||
|
data = yaml.safe_load(f) or {}
|
||||||
|
except Exception as e:
|
||||||
|
raise AnsibleFilterError(f"Failed to load {vars_file}: {e}")
|
||||||
|
|
||||||
|
if data.get('application_id') == application_id:
|
||||||
|
return role
|
||||||
|
|
||||||
|
raise AnsibleFilterError(f"No role found with application_id '{application_id}' in {roles_path}")
|
||||||
|
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
"""
|
||||||
|
Register the get_role filter
|
||||||
|
"""
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
'get_role': get_role,
|
||||||
|
}
|
37
filter_plugins/get_service_name.py
Normal file
37
filter_plugins/get_service_name.py
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
"""
|
||||||
|
Custom Ansible filter to build a systemctl unit name (always lowercase).
|
||||||
|
|
||||||
|
Rules:
|
||||||
|
- If `systemctl_id` ends with '@': drop the '@' and return
|
||||||
|
"{systemctl_id_without_at}.{software_name}@{suffix_handling}".
|
||||||
|
- Else: return "{systemctl_id}.{software_name}{suffix_handling}".
|
||||||
|
|
||||||
|
Suffix handling:
|
||||||
|
- Default "" → automatically pick:
|
||||||
|
- ".service" if no '@' in systemctl_id
|
||||||
|
- ".timer" if '@' in systemctl_id
|
||||||
|
- Explicit False → no suffix at all
|
||||||
|
- Any string → ".{suffix}" (lowercased)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def get_service_name(systemctl_id, software_name, suffix=""):
|
||||||
|
sid = str(systemctl_id).strip().lower()
|
||||||
|
software_name = str(software_name).strip().lower()
|
||||||
|
|
||||||
|
# Determine suffix
|
||||||
|
if suffix is False:
|
||||||
|
sfx = "" # no suffix at all
|
||||||
|
elif suffix == "" or suffix is None:
|
||||||
|
sfx = ".service"
|
||||||
|
else:
|
||||||
|
sfx = str(suffix).strip().lower()
|
||||||
|
|
||||||
|
if sid.endswith("@"):
|
||||||
|
base = sid[:-1] # drop the trailing '@'
|
||||||
|
return f"{base}.{software_name}@{sfx}"
|
||||||
|
else:
|
||||||
|
return f"{sid}.{software_name}{sfx}"
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {"get_service_name": get_service_name}
|
24
filter_plugins/get_service_script_path.py
Normal file
24
filter_plugins/get_service_script_path.py
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
# filter_plugins/get_service_script_path.py
|
||||||
|
# Custom Ansible filter to generate service script paths.
|
||||||
|
|
||||||
|
def get_service_script_path(systemctl_id, script_type):
|
||||||
|
"""
|
||||||
|
Build the path to a service script based on systemctl_id and type.
|
||||||
|
|
||||||
|
:param systemctl_id: The identifier of the system service.
|
||||||
|
:param script_type: The script type/extension (e.g., sh, py, yml).
|
||||||
|
:return: The full path string.
|
||||||
|
"""
|
||||||
|
if not systemctl_id or not script_type:
|
||||||
|
raise ValueError("Both systemctl_id and script_type are required")
|
||||||
|
|
||||||
|
return f"/opt/scripts/systemctl/{systemctl_id}/script.{script_type}"
|
||||||
|
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
""" Custom filters for Ansible """
|
||||||
|
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
"get_service_script_path": get_service_script_path
|
||||||
|
}
|
11
filter_plugins/get_url.py
Normal file
11
filter_plugins/get_url.py
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
import sys, os
|
||||||
|
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||||
|
from module_utils.get_url import get_url
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
''' Infinito.Nexus application config extraction filters '''
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
'get_url': get_url,
|
||||||
|
}
|
14
filter_plugins/has_env.py
Normal file
14
filter_plugins/has_env.py
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
def has_env(application_id, base_dir='.'):
|
||||||
|
"""
|
||||||
|
Check if env.j2 exists under roles/{{ application_id }}/templates/env.j2
|
||||||
|
"""
|
||||||
|
path = os.path.join(base_dir, 'roles', application_id, 'templates', 'env.j2')
|
||||||
|
return os.path.isfile(path)
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
'has_env': has_env,
|
||||||
|
}
|
113
filter_plugins/invokable_paths.py
Normal file
113
filter_plugins/invokable_paths.py
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
import os
|
||||||
|
import yaml
|
||||||
|
from typing import Dict, List, Optional
|
||||||
|
|
||||||
|
def get_invokable_paths(
|
||||||
|
roles_file: Optional[str] = None,
|
||||||
|
suffix: Optional[str] = None
|
||||||
|
) -> List[str]:
|
||||||
|
"""
|
||||||
|
Load nested roles YAML and return dash-joined paths where 'invokable' is True. Appends suffix if provided.
|
||||||
|
"""
|
||||||
|
if not roles_file:
|
||||||
|
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
project_root = os.path.dirname(script_dir)
|
||||||
|
roles_file = os.path.join(project_root, 'roles', 'categories.yml')
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(roles_file, 'r') as f:
|
||||||
|
data = yaml.safe_load(f) or {}
|
||||||
|
except FileNotFoundError:
|
||||||
|
raise FileNotFoundError(f"Roles file not found: {roles_file}")
|
||||||
|
except yaml.YAMLError as e:
|
||||||
|
raise yaml.YAMLError(f"Error parsing YAML {roles_file}: {e}")
|
||||||
|
|
||||||
|
if not isinstance(data, dict):
|
||||||
|
raise ValueError("YAML root is not a dictionary")
|
||||||
|
|
||||||
|
roles = data
|
||||||
|
if 'roles' in roles and isinstance(roles['roles'], dict) and len(roles) == 1:
|
||||||
|
roles = roles['roles']
|
||||||
|
|
||||||
|
def _recurse(subroles: Dict[str, dict], parent: List[str] = None) -> List[str]:
|
||||||
|
parent = parent or []
|
||||||
|
found: List[str] = []
|
||||||
|
METADATA = {'title', 'description', 'icon', 'invokable'}
|
||||||
|
|
||||||
|
for key, cfg in subroles.items():
|
||||||
|
path = parent + [key]
|
||||||
|
if cfg.get('invokable', False):
|
||||||
|
p = '-'.join(path)
|
||||||
|
if suffix:
|
||||||
|
p += suffix
|
||||||
|
found.append(p)
|
||||||
|
|
||||||
|
children = {
|
||||||
|
ck: cv for ck, cv in cfg.items()
|
||||||
|
if ck not in METADATA and isinstance(cv, dict)
|
||||||
|
}
|
||||||
|
if children:
|
||||||
|
found.extend(_recurse(children, path))
|
||||||
|
return found
|
||||||
|
|
||||||
|
return _recurse(roles)
|
||||||
|
|
||||||
|
|
||||||
|
def get_non_invokable_paths(
|
||||||
|
roles_file: Optional[str] = None,
|
||||||
|
suffix: Optional[str] = None
|
||||||
|
) -> List[str]:
|
||||||
|
"""
|
||||||
|
Load nested roles YAML and return dash-joined paths where 'invokable' is False or missing.
|
||||||
|
Appends suffix if provided.
|
||||||
|
"""
|
||||||
|
if not roles_file:
|
||||||
|
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
project_root = os.path.dirname(script_dir)
|
||||||
|
roles_file = os.path.join(project_root, 'roles', 'categories.yml')
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(roles_file, 'r') as f:
|
||||||
|
data = yaml.safe_load(f) or {}
|
||||||
|
except FileNotFoundError:
|
||||||
|
raise FileNotFoundError(f"Roles file not found: {roles_file}")
|
||||||
|
except yaml.YAMLError as e:
|
||||||
|
raise yaml.YAMLError(f"Error parsing YAML {roles_file}: {e}")
|
||||||
|
|
||||||
|
if not isinstance(data, dict):
|
||||||
|
raise ValueError("YAML root is not a dictionary")
|
||||||
|
|
||||||
|
roles = data
|
||||||
|
if 'roles' in roles and isinstance(roles['roles'], dict) and len(roles) == 1:
|
||||||
|
roles = roles['roles']
|
||||||
|
|
||||||
|
def _recurse_non(subroles: Dict[str, dict], parent: List[str] = None) -> List[str]:
|
||||||
|
parent = parent or []
|
||||||
|
found: List[str] = []
|
||||||
|
METADATA = {'title', 'description', 'icon', 'invokable'}
|
||||||
|
|
||||||
|
for key, cfg in subroles.items():
|
||||||
|
path = parent + [key]
|
||||||
|
p = '-'.join(path)
|
||||||
|
inv = cfg.get('invokable', False)
|
||||||
|
if not inv:
|
||||||
|
entry = p + (suffix or "")
|
||||||
|
found.append(entry)
|
||||||
|
|
||||||
|
children = {
|
||||||
|
ck: cv for ck, cv in cfg.items()
|
||||||
|
if ck not in METADATA and isinstance(cv, dict)
|
||||||
|
}
|
||||||
|
if children:
|
||||||
|
found.extend(_recurse_non(children, path))
|
||||||
|
return found
|
||||||
|
|
||||||
|
return _recurse_non(roles)
|
||||||
|
|
||||||
|
|
||||||
|
class FilterModule:
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
'invokable_paths': get_invokable_paths,
|
||||||
|
'non_invokable_paths': get_non_invokable_paths
|
||||||
|
}
|
42
filter_plugins/merge_mapping.py
Normal file
42
filter_plugins/merge_mapping.py
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
# filter_plugins/merge_mapping.py
|
||||||
|
|
||||||
|
from ansible.errors import AnsibleFilterError
|
||||||
|
|
||||||
|
def merge_mapping(list1, list2, key_name='source'):
|
||||||
|
"""
|
||||||
|
Merge two lists of dicts on a given key.
|
||||||
|
- list1, list2: each must be a List[Dict]
|
||||||
|
- key_name: the field to match on
|
||||||
|
If both lists contain an item with the same key_name value,
|
||||||
|
their dictionaries are merged (fields from list2 overwrite or add to list1).
|
||||||
|
"""
|
||||||
|
if not isinstance(list1, list) or not isinstance(list2, list):
|
||||||
|
raise AnsibleFilterError("merge_mapping expects two lists")
|
||||||
|
|
||||||
|
merged = {}
|
||||||
|
# First, copy items from list1
|
||||||
|
for item in list1:
|
||||||
|
if key_name not in item:
|
||||||
|
raise AnsibleFilterError(f"Item {item} is missing the key '{key_name}'")
|
||||||
|
merged[item[key_name]] = item.copy()
|
||||||
|
|
||||||
|
# Then merge in items from list2
|
||||||
|
for item in list2:
|
||||||
|
if key_name not in item:
|
||||||
|
raise AnsibleFilterError(f"Item {item} is missing the key '{key_name}'")
|
||||||
|
k = item[key_name]
|
||||||
|
if k in merged:
|
||||||
|
# update will overwrite existing fields or add new ones
|
||||||
|
merged[k].update(item)
|
||||||
|
else:
|
||||||
|
merged[k] = item.copy()
|
||||||
|
|
||||||
|
# Return as a list of dicts again
|
||||||
|
return list(merged.values())
|
||||||
|
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
'merge_mapping': merge_mapping,
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user