mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 13:54:37 +03:00
Compare commits
582 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3f6567bba0 | ||
|
|
e09a7c02a6 | ||
|
|
48b3d9c56d | ||
|
|
ca271b8a65 | ||
|
|
c264ae3016 | ||
|
|
1bcd7395fa | ||
|
|
3d76c30354 | ||
|
|
20a9e20c5a | ||
|
|
e4be213cf7 | ||
|
|
0107dbc29c | ||
|
|
72da838519 | ||
|
|
10679ebb5d | ||
|
|
8775dcf92f | ||
|
|
bd382a9c39 | ||
|
|
ffacfe3ede | ||
|
|
7dcc22fe8c | ||
|
|
47ed2b115d | ||
|
|
b9fc4ec43e | ||
|
|
7bd757da5f | ||
|
|
9dc2092042 | ||
|
|
c7cfd32c40 | ||
|
|
a4b0656d9b | ||
|
|
c33e4d7bb7 | ||
|
|
24b82917d1 | ||
|
|
9696936b59 | ||
|
|
aeca9304f4 | ||
|
|
8fef156e8f | ||
|
|
8497528240 | ||
|
|
ebd71f6ad7 | ||
|
|
c677438189 | ||
|
|
d646053c0e | ||
|
|
c9a7ae1cae | ||
|
|
e84c1004df | ||
|
|
b19b727fe7 | ||
|
|
0932318b85 | ||
|
|
e573a2f6d4 | ||
|
|
52c1826423 | ||
|
|
e1881fae02 | ||
|
|
5ed85094c2 | ||
|
|
bf29ea55cf | ||
|
|
cafe4f1352 | ||
|
|
a9ee1c4167 | ||
|
|
a8c1bccdd5 | ||
|
|
71cf553aa8 | ||
|
|
a894a5e29b | ||
|
|
9bc7492ff2 | ||
|
|
77bda0df1c | ||
|
|
4c37399c75 | ||
|
|
cd69283184 | ||
|
|
cf3b3ca6fd | ||
|
|
1955943d4a | ||
|
|
3b68d63643 | ||
|
|
d21bfb84ad | ||
|
|
2a7c9d27b2 | ||
|
|
9c610ee11d | ||
|
|
7295d13d60 | ||
|
|
2fbbb70baa | ||
|
|
b5ce69cf3c | ||
|
|
1c5f657f97 | ||
|
|
9613ed8782 | ||
|
|
b142995808 | ||
|
|
36e5d742dc | ||
|
|
b9e3861385 | ||
|
|
f2bb3aba1e | ||
|
|
4243003c94 | ||
|
|
050bd0527f | ||
|
|
fe32de94b9 | ||
|
|
d2383d27a9 | ||
|
|
788190beca | ||
|
|
13aa32278a | ||
|
|
38ce02c610 | ||
|
|
9312ae7c6e | ||
|
|
1d86919883 | ||
|
|
78c1775661 | ||
|
|
5d00b851ce | ||
|
|
f8b93fa88a | ||
|
|
0405af1107 | ||
|
|
872e173887 | ||
|
|
b42757d330 | ||
|
|
a4d8d15a0e | ||
|
|
f8f197e26b | ||
|
|
4f85b75087 | ||
|
|
8895e38060 | ||
|
|
9a896957d9 | ||
|
|
37e004164b | ||
|
|
77069354cf | ||
|
|
2aafab6c19 | ||
|
|
35aaf97216 | ||
|
|
25cb90bc2d | ||
|
|
3311e0a296 | ||
|
|
eb31653d66 | ||
|
|
180df831ba | ||
|
|
2fa64f9fd6 | ||
|
|
a1521dc16e | ||
|
|
bf31a3a872 | ||
|
|
4a8fd94a5f | ||
|
|
e214bd0e1b | ||
|
|
4ad89ef8f1 | ||
|
|
7a66be8254 | ||
|
|
db696785d5 | ||
|
|
dfec133273 | ||
|
|
41605b4135 | ||
|
|
475abcc3a8 | ||
|
|
3a7d84e014 | ||
|
|
ad3f84df98 | ||
|
|
79e742c03b | ||
|
|
d79ada931d | ||
|
|
b2f6abe4ab | ||
|
|
c5dac1cdf6 | ||
|
|
89a0f515c7 | ||
|
|
d296adcd65 | ||
|
|
141064c443 | ||
|
|
54859cb814 | ||
|
|
0f0991b145 | ||
|
|
658d62be16 | ||
|
|
0139bfdb71 | ||
|
|
efeac70e40 | ||
|
|
b4db077e6a | ||
|
|
280e4e3b57 | ||
|
|
a962fa2357 | ||
|
|
775851b00c | ||
|
|
f8fadf53cd | ||
|
|
ce13699dfa | ||
|
|
fc5937e948 | ||
|
|
729e2c565b | ||
|
|
26ed50f04a | ||
|
|
2b80d053f3 | ||
|
|
f5ee8b71ff | ||
|
|
4c76feb574 | ||
|
|
18d84db41c | ||
|
|
08a571b4a1 | ||
|
|
5ebd305d17 | ||
|
|
edc73bc3c8 | ||
|
|
b7fa2d7b87 | ||
|
|
7771ac6074 | ||
|
|
f25b6fce1c | ||
|
|
d7b79395c7 | ||
|
|
ce18b0f22d | ||
|
|
2d8f60000c | ||
|
|
0b102287d1 | ||
|
|
d325fd6af7 | ||
|
|
e949b8a1e8 | ||
|
|
ab6e284180 | ||
|
|
7421b6e180 | ||
|
|
a2f03c559a | ||
|
|
3ced391fab | ||
|
|
ea7dcd46d7 | ||
|
|
94e33bdbbf | ||
|
|
29f833e9a4 | ||
|
|
8c32be5feb | ||
|
|
0ba2e655f4 | ||
|
|
78189186e5 | ||
|
|
96e875cd50 | ||
|
|
808524bed6 | ||
|
|
75e00420ec | ||
|
|
8be5604da4 | ||
|
|
02624554ae | ||
|
|
9d1e9a6a78 | ||
|
|
861d5b763d | ||
|
|
4013c48acb | ||
|
|
f264426646 | ||
|
|
862fd2c5c4 | ||
|
|
4014a1cccb | ||
|
|
c55844b80e | ||
|
|
a4fa9aed75 | ||
|
|
659001c9d7 | ||
|
|
07647fb720 | ||
|
|
161bd55ab2 | ||
|
|
4b67c7d6a6 | ||
|
|
e26921e3e1 | ||
|
|
f80a5755c3 | ||
|
|
feeea7e512 | ||
|
|
09ea2ca688 | ||
|
|
b7a8d7a4d5 | ||
|
|
9405eb821b | ||
|
|
708677caf1 | ||
|
|
d5cdae1f16 | ||
|
|
b7a9217d77 | ||
|
|
82633c6f61 | ||
|
|
7afbdb3e1e | ||
|
|
c14d9c5c97 | ||
|
|
48035e3a7e | ||
|
|
a257e61f60 | ||
|
|
9948863d3a | ||
|
|
3a3addb91e | ||
|
|
72b8830f62 | ||
|
|
e6ba73349e | ||
|
|
55e581be3b | ||
|
|
9cd7d66332 | ||
|
|
6ea7abf443 | ||
|
|
3254080a1c | ||
|
|
4ffe138dfa | ||
|
|
86b81a855a | ||
|
|
bde261bd06 | ||
|
|
2b75552d1c | ||
|
|
951face343 | ||
|
|
07d45e6b62 | ||
|
|
9a72de54de | ||
|
|
4313c13656 | ||
|
|
c880b24a80 | ||
|
|
29827711f1 | ||
|
|
ab6d204641 | ||
|
|
426b8913c0 | ||
|
|
970ecbb008 | ||
|
|
eb951f1c2a | ||
|
|
3378c9f385 | ||
|
|
4c820b853b | ||
|
|
a505a4c71f | ||
|
|
8727f88e41 | ||
|
|
c2a8d543fb | ||
|
|
4ddbd2bd2d | ||
|
|
f9f5143c93 | ||
|
|
fccd99c96c | ||
|
|
dc7cf7ecd8 | ||
|
|
169eb34a59 | ||
|
|
4deeaba335 | ||
|
|
a59e27cb6b | ||
|
|
617af4beda | ||
|
|
b3ed25ee35 | ||
|
|
c7072b48dc | ||
|
|
02dc9fbd3e | ||
|
|
c98e1d1b5b | ||
|
|
e907d55621 | ||
|
|
cb318931aa | ||
|
|
709ae1d244 | ||
|
|
73ce6aef97 | ||
|
|
6682a843b4 | ||
|
|
dc33a1971d | ||
|
|
ed6f8df784 | ||
|
|
43216436ab | ||
|
|
cdc25523bf | ||
|
|
b77780ebf7 | ||
|
|
f27bea574e | ||
|
|
c38cf5dd5c | ||
|
|
2985b129fc | ||
|
|
107cb7f549 | ||
|
|
6c30b3f263 | ||
|
|
0104396c50 | ||
|
|
eecaec2919 | ||
|
|
4a03d13d08 | ||
|
|
fcb5e77338 | ||
|
|
ece174da7c | ||
|
|
a94b893e2c | ||
|
|
5e2cb4d244 | ||
|
|
dff58023d9 | ||
|
|
9a8f95e73d | ||
|
|
766d3696c9 | ||
|
|
b88229a662 | ||
|
|
c00cea7b17 | ||
|
|
0c4f57a093 | ||
|
|
3a6069916d | ||
|
|
e6eda9d811 | ||
|
|
e8f0fb82fe | ||
|
|
19856cf692 | ||
|
|
3450865d3f | ||
|
|
deb532ce27 | ||
|
|
1bb4f88af1 | ||
|
|
dcc04e54f3 | ||
|
|
4020a93d7e | ||
|
|
a676c106d3 | ||
|
|
acbf44a1b4 | ||
|
|
baed5f0b32 | ||
|
|
8afd74ce1f | ||
|
|
f6e4a231cb | ||
|
|
3a5f5692ca | ||
|
|
9b37699d0d | ||
|
|
cc382f2412 | ||
|
|
9a8bf0e38a | ||
|
|
97dfdcd8fe | ||
|
|
a9f52060c9 | ||
|
|
8cf5fefe84 | ||
|
|
f73b941d8a | ||
|
|
fb8631cdf6 | ||
|
|
7859aee735 | ||
|
|
83c3ce7f8f | ||
|
|
309aaee427 | ||
|
|
349c8901f8 | ||
|
|
df9aba6298 | ||
|
|
8f0bd36155 | ||
|
|
2ae3ea9ee3 | ||
|
|
99115ad04b | ||
|
|
7747ff2572 | ||
|
|
fff400513b | ||
|
|
eb4bd36f73 | ||
|
|
2d20f0c024 | ||
|
|
b0793df293 | ||
|
|
ab213a7db0 | ||
|
|
9fb1814784 | ||
|
|
1ca50f3eea | ||
|
|
82f68ca395 | ||
|
|
3a675393dc | ||
|
|
9c41769dab | ||
|
|
dba29db58d | ||
|
|
e175ccdde0 | ||
|
|
9e2104c7d3 | ||
|
|
1d9502e01d | ||
|
|
c710c93c02 | ||
|
|
13c793fd0d | ||
|
|
1555d78155 | ||
|
|
fd8260b930 | ||
|
|
6769bb32b1 | ||
|
|
677b7ecd89 | ||
|
|
659fa0eddc | ||
|
|
501deecdd0 | ||
|
|
7fec254f62 | ||
|
|
835811ec84 | ||
|
|
b7fe368469 | ||
|
|
8b3f3c04cc | ||
|
|
ecd649846a | ||
|
|
27c2d7e9e2 | ||
|
|
f366863a99 | ||
|
|
5bb54ef6a2 | ||
|
|
f7dade867a | ||
|
|
5cbcec8968 | ||
|
|
62f34c6085 | ||
|
|
d908e86590 | ||
|
|
f9ce176211 | ||
|
|
1dab5b5d9c | ||
|
|
739608454d | ||
|
|
260dad8f10 | ||
|
|
c950bfface | ||
|
|
75b07ad40c | ||
|
|
bd84353fc9 | ||
|
|
9ee2fbc51c | ||
|
|
fa92d9c0e9 | ||
|
|
4aacec4542 | ||
|
|
6278b12af6 | ||
|
|
64e4de371e | ||
|
|
ad4958249f | ||
|
|
29f01d3e5b | ||
|
|
3fd7d91452 | ||
|
|
4ba1df5237 | ||
|
|
145c80e9ab | ||
|
|
ab0e06eae6 | ||
|
|
786ce8ddd7 | ||
|
|
f06de0735f | ||
|
|
6ff845a199 | ||
|
|
fe9e11b501 | ||
|
|
3c2eb52828 | ||
|
|
2838a7c304 | ||
|
|
2788a02096 | ||
|
|
8a2e1189fb | ||
|
|
bdd1c7bcb5 | ||
|
|
d81978625c | ||
|
|
2c93c997cf | ||
|
|
10337f2fcb | ||
|
|
6c41191646 | ||
|
|
7730cfd619 | ||
|
|
1853085ffe | ||
|
|
9247137e60 | ||
|
|
e8f048c71d | ||
|
|
6cb027dfab | ||
|
|
edde594bbe | ||
|
|
0707c8ea6f | ||
|
|
c0c2cd6e03 | ||
|
|
36c6de9abd | ||
|
|
c5debf013c | ||
|
|
f9cc8ae10c | ||
|
|
94dd02121b | ||
|
|
c360501854 | ||
|
|
8523f525aa | ||
|
|
b9a34b83d4 | ||
|
|
2a24c2e359 | ||
|
|
8d6cfd6e53 | ||
|
|
1f36df666d | ||
|
|
64dbf2e429 | ||
|
|
6881398941 | ||
|
|
57638124c5 | ||
|
|
ee2193d4cf | ||
|
|
eb56130433 | ||
|
|
5fbbcedebc | ||
|
|
18f2abad2f | ||
|
|
391dd97f95 | ||
|
|
44243eada9 | ||
|
|
34d0451585 | ||
|
|
c4346e590f | ||
|
|
bd81c615c3 | ||
|
|
3d9fd082ff | ||
|
|
826282fe89 | ||
|
|
73774326b3 | ||
|
|
374438a3d6 | ||
|
|
fd80ef1ff1 | ||
|
|
235173bb5f | ||
|
|
1750dec254 | ||
|
|
52f52db8f3 | ||
|
|
db94812163 | ||
|
|
4a6eb7eaa2 | ||
|
|
58fe1a0ed6 | ||
|
|
c80bb0007a | ||
|
|
8a03bb1bb4 | ||
|
|
d919c58e21 | ||
|
|
19bc610f44 | ||
|
|
85a5a79ef5 | ||
|
|
c7cffb14a7 | ||
|
|
6f61f3d9cb | ||
|
|
6b4bb2a121 | ||
|
|
e288449c5d | ||
|
|
ea35021c96 | ||
|
|
754424eca7 | ||
|
|
4ad56e2772 | ||
|
|
6f1352eb53 | ||
|
|
bf8c64af08 | ||
|
|
a98ab40434 | ||
|
|
6549bb12fc | ||
|
|
1329d3f03b | ||
|
|
843e908fa4 | ||
|
|
0ff883afeb | ||
|
|
0d5bcd3e20 | ||
|
|
a8cef962e2 | ||
|
|
b50890172b | ||
|
|
ffad2152b3 | ||
|
|
6674438849 | ||
|
|
4bc5e8d912 | ||
|
|
8ca0bfffe0 | ||
|
|
48282a344f | ||
|
|
050fde6327 | ||
|
|
4d3104b334 | ||
|
|
85fa6af313 | ||
|
|
1c4db6132d | ||
|
|
744c81d451 | ||
|
|
61be93b173 | ||
|
|
406fbdb4e7 | ||
|
|
136f14dec4 | ||
|
|
ab80342750 | ||
|
|
2c2e608eac | ||
|
|
8267922a16 | ||
|
|
90719a9990 | ||
|
|
93f71df628 | ||
|
|
791064a3d9 | ||
|
|
e90f32bdee | ||
|
|
9fe89a0641 | ||
|
|
14699f5e98 | ||
|
|
2f81bfa25e | ||
|
|
438da0c8e6 | ||
|
|
25f317233c | ||
|
|
5e4d68b848 | ||
|
|
4728739597 | ||
|
|
fc0d58ff48 | ||
|
|
491e260d20 | ||
|
|
a132733b2d | ||
|
|
b377dbb96f | ||
|
|
c4d753c931 | ||
|
|
ee3b7c5da5 | ||
|
|
dcc267f6f4 | ||
|
|
ccf60fc9ca | ||
|
|
a38a3e7ddf | ||
|
|
beb4aa52ea | ||
|
|
f7d0fb9ab2 | ||
|
|
ff331f4eba | ||
|
|
94eae6a8dc | ||
|
|
f8d6b54dbb | ||
|
|
67c4f2d95e | ||
|
|
03fefa8933 | ||
|
|
c8ec77a734 | ||
|
|
4f32f94a51 | ||
|
|
3dc384a17a | ||
|
|
f1d0d1a9fe | ||
|
|
c036a7d871 | ||
|
|
6e63f3d2b4 | ||
|
|
09748e80e9 | ||
|
|
44a4f356ba | ||
|
|
a0f41bf82a | ||
|
|
5ae3e2818b | ||
|
|
1a0b81ac64 | ||
|
|
20d99886ca | ||
|
|
b9fe301036 | ||
|
|
b5844018f2 | ||
|
|
30508502d3 | ||
|
|
bca601d377 | ||
|
|
65191375b8 | ||
|
|
a534eb45ce | ||
|
|
e796f08184 | ||
|
|
ed38d8d3a1 | ||
|
|
07ad5ecfce | ||
|
|
4db5e663c3 | ||
|
|
529faeea9e | ||
|
|
47510899c7 | ||
|
|
4cd949c7e1 | ||
|
|
31d7e64073 | ||
|
|
7c1ee142dd | ||
|
|
25e86c5ca9 | ||
|
|
c41dd92007 | ||
|
|
a564d89d46 | ||
|
|
6c6a6e85da | ||
|
|
ed0acd8027 | ||
|
|
b9a690463d | ||
|
|
cbf4586c4c | ||
|
|
c3986957c4 | ||
|
|
8795cf6494 | ||
|
|
80af8a5e79 | ||
|
|
b60f65c1e8 | ||
|
|
943107115a | ||
|
|
ddbe9956e4 | ||
|
|
fdbcce3a5e | ||
|
|
f007c77641 | ||
|
|
9439487219 | ||
|
|
df6da52195 | ||
|
|
6ca89c80af | ||
|
|
7fe0b87d83 | ||
|
|
8a654b6955 | ||
|
|
5a8cf824f6 | ||
|
|
5c25b57989 | ||
|
|
5d1fe64bc8 | ||
|
|
a731e25778 | ||
|
|
0d6dc08578 | ||
|
|
40261fdf14 | ||
|
|
590b4aa240 | ||
|
|
2a696ddb34 | ||
|
|
d7f08d1b0c | ||
|
|
4aa1ef28ea | ||
|
|
58faef6ff6 | ||
|
|
34a52a7028 | ||
|
|
ce751cb89d | ||
|
|
5cf2883444 | ||
|
|
6bff338bad | ||
|
|
c78862052c | ||
|
|
1f54cef71c | ||
|
|
d00508105b | ||
|
|
c272421910 | ||
|
|
78624c5bcb | ||
|
|
c681435432 | ||
|
|
4d3f637684 | ||
|
|
5e14398af4 | ||
|
|
990f87acc8 | ||
|
|
eeb376460d | ||
|
|
ef707b3461 | ||
|
|
2af918132e | ||
|
|
b9b654714e | ||
|
|
fe399e0e0c | ||
|
|
b192053e28 | ||
|
|
a84271aa7e | ||
|
|
1901b512d2 | ||
|
|
9fdda7eca8 | ||
|
|
a68ed897f0 | ||
|
|
582ff96d19 | ||
|
|
0374a55eb3 | ||
|
|
ccbe38f78c | ||
|
|
958840da89 | ||
|
|
1530411218 | ||
|
|
e5ec0f18c0 | ||
|
|
0f44e8c812 | ||
|
|
1cc0f3c8c9 | ||
|
|
d9c39c274e | ||
|
|
c38fb866b7 | ||
|
|
5ad1d9db5e | ||
|
|
32f3d92d6b | ||
|
|
72b45eec2e | ||
|
|
23716b0eff | ||
|
|
859df84b45 | ||
|
|
131bd933a6 | ||
|
|
52904ee6ad | ||
|
|
e3339fe3d8 | ||
|
|
547ef747da | ||
|
|
63b27ea067 | ||
|
|
bc5881b70a | ||
|
|
f4b95d42a6 | ||
|
|
ef76a578a4 | ||
|
|
3b99d24ceb | ||
|
|
4701abff4c | ||
|
|
717b8daafe | ||
|
|
c346e46022 | ||
|
|
24632ae81b | ||
|
|
befde271eb | ||
|
|
d689f57c94 | ||
|
|
ad3f503c0c | ||
|
|
ae6c780af6 | ||
|
|
8b9cd3959a | ||
|
|
dffeab320e | ||
|
|
999586a110 | ||
|
|
f8d5487f8e | ||
|
|
4189008245 | ||
|
|
44115d7d7a | ||
|
|
841e2f44c0 | ||
|
|
a8e4984cf7 | ||
|
|
49196c2ec4 | ||
|
|
3646dc0bd2 | ||
|
|
694de1d67b | ||
|
|
31caab5f92 | ||
|
|
472996c8b3 | ||
|
|
d62c67a5f5 | ||
|
|
e486151aea | ||
|
|
9c407e667d |
@@ -7,18 +7,6 @@ skip_list:
|
|||||||
|
|
||||||
# These rules are intentionally skipped:
|
# These rules are intentionally skipped:
|
||||||
#
|
#
|
||||||
# [E204]: "Lines should be no longer than 160 chars"
|
|
||||||
# This could be re-enabled with a major rewrite in the future.
|
|
||||||
# For now, there's not enough value gain from strictly limiting line length.
|
|
||||||
# (Disabled in May 2019)
|
|
||||||
- '204'
|
|
||||||
|
|
||||||
# [E701]: "meta/main.yml should contain relevant info"
|
|
||||||
# Roles in Kubespray are not intended to be used/imported by Ansible Galaxy.
|
|
||||||
# While it can be useful to have these metadata available, they are also available in the existing documentation.
|
|
||||||
# (Disabled in May 2019)
|
|
||||||
- '701'
|
|
||||||
|
|
||||||
# [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
|
# [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
|
||||||
# Meta roles in Kubespray don't need proper names
|
# Meta roles in Kubespray don't need proper names
|
||||||
# (Disabled in June 2021)
|
# (Disabled in June 2021)
|
||||||
@@ -28,3 +16,23 @@ skip_list:
|
|||||||
# In Kubespray we use variables that use camelCase to match their k8s counterparts
|
# In Kubespray we use variables that use camelCase to match their k8s counterparts
|
||||||
# (Disabled in June 2021)
|
# (Disabled in June 2021)
|
||||||
- 'var-naming'
|
- 'var-naming'
|
||||||
|
|
||||||
|
# [fqcn-builtins]
|
||||||
|
# Roles in kubespray don't need fully qualified collection names
|
||||||
|
# (Disabled in Feb 2023)
|
||||||
|
- 'fqcn-builtins'
|
||||||
|
|
||||||
|
# We use template in names
|
||||||
|
- 'name[template]'
|
||||||
|
|
||||||
|
# No changed-when on commands
|
||||||
|
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
||||||
|
- 'no-changed-when'
|
||||||
|
|
||||||
|
# Disable run-once check with free strategy
|
||||||
|
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
||||||
|
- 'run-once[task]'
|
||||||
|
exclude_paths:
|
||||||
|
# Generated files
|
||||||
|
- tests/files/custom_cni/cilium.yaml
|
||||||
|
- venv
|
||||||
|
|||||||
8
.ansible-lint-ignore
Normal file
8
.ansible-lint-ignore
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
# This file contains ignores rule violations for ansible-lint
|
||||||
|
inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml jinja[spacing]
|
||||||
|
roles/kubernetes/control-plane/defaults/main/kube-proxy.yml jinja[spacing]
|
||||||
|
roles/kubernetes/control-plane/defaults/main/main.yml jinja[spacing]
|
||||||
|
roles/kubernetes/kubeadm/defaults/main.yml jinja[spacing]
|
||||||
|
roles/kubernetes/node/defaults/main.yml jinja[spacing]
|
||||||
|
roles/kubernetes/preinstall/defaults/main.yml jinja[spacing]
|
||||||
|
roles/kubespray-defaults/defaults/main.yaml jinja[spacing]
|
||||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -11,7 +11,8 @@ contrib/offline/offline-files.tar.gz
|
|||||||
.cache
|
.cache
|
||||||
*.bak
|
*.bak
|
||||||
*.tfstate
|
*.tfstate
|
||||||
*.tfstate.backup
|
*.tfstate*backup
|
||||||
|
*.lock.hcl
|
||||||
.terraform/
|
.terraform/
|
||||||
contrib/terraform/aws/credentials.tfvars
|
contrib/terraform/aws/credentials.tfvars
|
||||||
.terraform.lock.hcl
|
.terraform.lock.hcl
|
||||||
@@ -113,3 +114,7 @@ roles/**/molecule/**/__pycache__/
|
|||||||
# Temp location used by our scripts
|
# Temp location used by our scripts
|
||||||
scripts/tmp/
|
scripts/tmp/
|
||||||
tmp.md
|
tmp.md
|
||||||
|
|
||||||
|
# Ansible collection files
|
||||||
|
kubernetes_sigs-kubespray*tar.gz
|
||||||
|
ansible_collections
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
stages:
|
stages:
|
||||||
|
- build
|
||||||
- unit-tests
|
- unit-tests
|
||||||
- deploy-part1
|
- deploy-part1
|
||||||
- moderator
|
- moderator
|
||||||
@@ -8,12 +9,12 @@ stages:
|
|||||||
- deploy-special
|
- deploy-special
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
KUBESPRAY_VERSION: v2.19.1
|
KUBESPRAY_VERSION: v2.22.1
|
||||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||||
ANSIBLE_FORCE_COLOR: "true"
|
ANSIBLE_FORCE_COLOR: "true"
|
||||||
MAGIC: "ci check this"
|
MAGIC: "ci check this"
|
||||||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
TEST_ID: "$CI_PIPELINE_ID-$CI_JOB_ID"
|
||||||
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
||||||
CI_TEST_REGISTRY_MIRROR: "./tests/common/_docker_hub_registry_mirror.yml"
|
CI_TEST_REGISTRY_MIRROR: "./tests/common/_docker_hub_registry_mirror.yml"
|
||||||
CI_TEST_SETTING: "./tests/common/_kubespray_test_settings.yml"
|
CI_TEST_SETTING: "./tests/common/_kubespray_test_settings.yml"
|
||||||
@@ -32,21 +33,18 @@ variables:
|
|||||||
MITOGEN_ENABLE: "false"
|
MITOGEN_ENABLE: "false"
|
||||||
ANSIBLE_LOG_LEVEL: "-vv"
|
ANSIBLE_LOG_LEVEL: "-vv"
|
||||||
RECOVER_CONTROL_PLANE_TEST: "false"
|
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
|
||||||
TERRAFORM_VERSION: 1.0.8
|
TERRAFORM_VERSION: 1.3.7
|
||||||
ANSIBLE_MAJOR_VERSION: "2.11"
|
PIPELINE_IMAGE: "$CI_REGISTRY_IMAGE/pipeline:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}"
|
||||||
|
|
||||||
before_script:
|
before_script:
|
||||||
- ./tests/scripts/rebase.sh
|
- ./tests/scripts/rebase.sh
|
||||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
|
||||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
|
||||||
- python -m pip install -r tests/requirements-${ANSIBLE_MAJOR_VERSION}.txt
|
|
||||||
- mkdir -p /.ssh
|
- mkdir -p /.ssh
|
||||||
|
|
||||||
.job: &job
|
.job: &job
|
||||||
tags:
|
tags:
|
||||||
- packet
|
- packet
|
||||||
image: quay.io/kubespray/kubespray:$KUBESPRAY_VERSION
|
image: $PIPELINE_IMAGE
|
||||||
artifacts:
|
artifacts:
|
||||||
when: always
|
when: always
|
||||||
paths:
|
paths:
|
||||||
@@ -55,6 +53,7 @@ before_script:
|
|||||||
.testcases: &testcases
|
.testcases: &testcases
|
||||||
<<: *job
|
<<: *job
|
||||||
retry: 1
|
retry: 1
|
||||||
|
interruptible: true
|
||||||
before_script:
|
before_script:
|
||||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||||
- ./tests/scripts/rebase.sh
|
- ./tests/scripts/rebase.sh
|
||||||
@@ -76,6 +75,7 @@ ci-authorized:
|
|||||||
only: []
|
only: []
|
||||||
|
|
||||||
include:
|
include:
|
||||||
|
- .gitlab-ci/build.yml
|
||||||
- .gitlab-ci/lint.yml
|
- .gitlab-ci/lint.yml
|
||||||
- .gitlab-ci/shellcheck.yml
|
- .gitlab-ci/shellcheck.yml
|
||||||
- .gitlab-ci/terraform.yml
|
- .gitlab-ci/terraform.yml
|
||||||
|
|||||||
40
.gitlab-ci/build.yml
Normal file
40
.gitlab-ci/build.yml
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
---
|
||||||
|
.build:
|
||||||
|
stage: build
|
||||||
|
image:
|
||||||
|
name: moby/buildkit:rootless
|
||||||
|
entrypoint: [""]
|
||||||
|
variables:
|
||||||
|
BUILDKITD_FLAGS: --oci-worker-no-process-sandbox
|
||||||
|
before_script:
|
||||||
|
- mkdir ~/.docker
|
||||||
|
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > ~/.docker/config.json
|
||||||
|
|
||||||
|
pipeline image:
|
||||||
|
extends: .build
|
||||||
|
script:
|
||||||
|
- |
|
||||||
|
buildctl-daemonless.sh build \
|
||||||
|
--frontend=dockerfile.v0 \
|
||||||
|
--local context=. \
|
||||||
|
--local dockerfile=. \
|
||||||
|
--opt filename=./pipeline.Dockerfile \
|
||||||
|
--output type=image,name=$PIPELINE_IMAGE,push=true \
|
||||||
|
--import-cache type=registry,ref=$CI_REGISTRY_IMAGE/pipeline:cache
|
||||||
|
rules:
|
||||||
|
- if: '$CI_COMMIT_REF_NAME != $CI_DEFAULT_BRANCH'
|
||||||
|
|
||||||
|
pipeline image and build cache:
|
||||||
|
extends: .build
|
||||||
|
script:
|
||||||
|
- |
|
||||||
|
buildctl-daemonless.sh build \
|
||||||
|
--frontend=dockerfile.v0 \
|
||||||
|
--local context=. \
|
||||||
|
--local dockerfile=. \
|
||||||
|
--opt filename=./pipeline.Dockerfile \
|
||||||
|
--output type=image,name=$PIPELINE_IMAGE,push=true \
|
||||||
|
--import-cache type=registry,ref=$CI_REGISTRY_IMAGE/pipeline:cache \
|
||||||
|
--export-cache type=registry,ref=$CI_REGISTRY_IMAGE/pipeline:cache,mode=max
|
||||||
|
rules:
|
||||||
|
- if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH'
|
||||||
@@ -14,7 +14,7 @@ vagrant-validate:
|
|||||||
stage: unit-tests
|
stage: unit-tests
|
||||||
tags: [light]
|
tags: [light]
|
||||||
variables:
|
variables:
|
||||||
VAGRANT_VERSION: 2.2.19
|
VAGRANT_VERSION: 2.3.7
|
||||||
script:
|
script:
|
||||||
- ./tests/scripts/vagrant-validate.sh
|
- ./tests/scripts/vagrant-validate.sh
|
||||||
except: ['triggers', 'master']
|
except: ['triggers', 'master']
|
||||||
@@ -39,21 +39,34 @@ syntax-check:
|
|||||||
ANSIBLE_VERBOSITY: "3"
|
ANSIBLE_VERBOSITY: "3"
|
||||||
script:
|
script:
|
||||||
- ansible-playbook --syntax-check cluster.yml
|
- ansible-playbook --syntax-check cluster.yml
|
||||||
|
- ansible-playbook --syntax-check playbooks/cluster.yml
|
||||||
- ansible-playbook --syntax-check upgrade-cluster.yml
|
- ansible-playbook --syntax-check upgrade-cluster.yml
|
||||||
|
- ansible-playbook --syntax-check playbooks/upgrade_cluster.yml
|
||||||
- ansible-playbook --syntax-check reset.yml
|
- ansible-playbook --syntax-check reset.yml
|
||||||
|
- ansible-playbook --syntax-check playbooks/reset.yml
|
||||||
- ansible-playbook --syntax-check extra_playbooks/upgrade-only-k8s.yml
|
- ansible-playbook --syntax-check extra_playbooks/upgrade-only-k8s.yml
|
||||||
except: ['triggers', 'master']
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
|
collection-build-install-sanity-check:
|
||||||
|
extends: .job
|
||||||
|
stage: unit-tests
|
||||||
|
tags: [light]
|
||||||
|
variables:
|
||||||
|
ANSIBLE_COLLECTIONS_PATH: "./ansible_collections"
|
||||||
|
script:
|
||||||
|
- ansible-galaxy collection build
|
||||||
|
- ansible-galaxy collection install kubernetes_sigs-kubespray-$(grep "^version:" galaxy.yml | awk '{print $2}').tar.gz
|
||||||
|
- ansible-galaxy collection list $(egrep -i '(name:\s+|namespace:\s+)' galaxy.yml | awk '{print $2}' | tr '\n' '.' | sed 's|\.$||g') | grep "^kubernetes_sigs.kubespray"
|
||||||
|
- test -f ansible_collections/kubernetes_sigs/kubespray/playbooks/cluster.yml
|
||||||
|
- test -f ansible_collections/kubernetes_sigs/kubespray/playbooks/reset.yml
|
||||||
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
tox-inventory-builder:
|
tox-inventory-builder:
|
||||||
stage: unit-tests
|
stage: unit-tests
|
||||||
tags: [light]
|
tags: [light]
|
||||||
extends: .job
|
extends: .job
|
||||||
before_script:
|
before_script:
|
||||||
- ./tests/scripts/rebase.sh
|
- ./tests/scripts/rebase.sh
|
||||||
- apt-get update && apt-get install -y python3-pip
|
|
||||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
|
||||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
|
||||||
- python -m pip install -r tests/requirements.txt
|
|
||||||
script:
|
script:
|
||||||
- pip3 install tox
|
- pip3 install tox
|
||||||
- cd contrib/inventory_builder && tox
|
- cd contrib/inventory_builder && tox
|
||||||
@@ -75,6 +88,20 @@ check-readme-versions:
|
|||||||
script:
|
script:
|
||||||
- tests/scripts/check_readme_versions.sh
|
- tests/scripts/check_readme_versions.sh
|
||||||
|
|
||||||
|
check-galaxy-version:
|
||||||
|
stage: unit-tests
|
||||||
|
tags: [light]
|
||||||
|
image: python:3
|
||||||
|
script:
|
||||||
|
- tests/scripts/check_galaxy_version.sh
|
||||||
|
|
||||||
|
check-typo:
|
||||||
|
stage: unit-tests
|
||||||
|
tags: [light]
|
||||||
|
image: python:3
|
||||||
|
script:
|
||||||
|
- tests/scripts/check_typo.sh
|
||||||
|
|
||||||
ci-matrix:
|
ci-matrix:
|
||||||
stage: unit-tests
|
stage: unit-tests
|
||||||
tags: [light]
|
tags: [light]
|
||||||
|
|||||||
@@ -4,15 +4,11 @@
|
|||||||
tags: [c3.small.x86]
|
tags: [c3.small.x86]
|
||||||
only: [/^pr-.*$/]
|
only: [/^pr-.*$/]
|
||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
image: $PIPELINE_IMAGE
|
||||||
services: []
|
services: []
|
||||||
stage: deploy-part1
|
stage: deploy-part1
|
||||||
before_script:
|
before_script:
|
||||||
- tests/scripts/rebase.sh
|
- tests/scripts/rebase.sh
|
||||||
- apt-get update && apt-get install -y python3-pip
|
|
||||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
|
||||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
|
||||||
- python -m pip install -r tests/requirements.txt
|
|
||||||
- ./tests/scripts/vagrant_clean.sh
|
- ./tests/scripts/vagrant_clean.sh
|
||||||
script:
|
script:
|
||||||
- ./tests/scripts/molecule_run.sh
|
- ./tests/scripts/molecule_run.sh
|
||||||
@@ -58,6 +54,7 @@ molecule_cri-o:
|
|||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
script:
|
script:
|
||||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
||||||
|
allow_failure: true
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
# Stage 3 container engines don't get as much attention so allow them to fail
|
# Stage 3 container engines don't get as much attention so allow them to fail
|
||||||
|
|||||||
@@ -23,45 +23,45 @@
|
|||||||
allow_failure: true
|
allow_failure: true
|
||||||
extends: .packet
|
extends: .packet
|
||||||
|
|
||||||
# The ubuntu20-calico-aio jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
packet_cleanup_old:
|
||||||
packet_ubuntu20-calico-aio:
|
stage: deploy-part1
|
||||||
|
extends: .packet_periodic
|
||||||
|
script:
|
||||||
|
- cd tests
|
||||||
|
- make cleanup-packet
|
||||||
|
after_script: []
|
||||||
|
|
||||||
|
# The ubuntu20-calico-all-in-one jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
||||||
|
packet_ubuntu20-calico-all-in-one:
|
||||||
stage: deploy-part1
|
stage: deploy-part1
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
variables:
|
variables:
|
||||||
RESET_CHECK: "true"
|
RESET_CHECK: "true"
|
||||||
|
|
||||||
packet_ubuntu20-calico-aio-ansible-2_11:
|
|
||||||
stage: deploy-part1
|
|
||||||
extends: .packet_periodic
|
|
||||||
when: on_success
|
|
||||||
variables:
|
|
||||||
ANSIBLE_MAJOR_VERSION: "2.11"
|
|
||||||
RESET_CHECK: "true"
|
|
||||||
|
|
||||||
# ### PR JOBS PART2
|
# ### PR JOBS PART2
|
||||||
|
|
||||||
packet_ubuntu18-aio-docker:
|
packet_ubuntu20-all-in-one-docker:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_ubuntu20-aio-docker:
|
packet_ubuntu20-calico-all-in-one-hardening:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_ubuntu18-calico-aio:
|
packet_ubuntu22-all-in-one-docker:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_ubuntu22-aio-docker:
|
packet_ubuntu22-calico-all-in-one:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_ubuntu22-calico-aio:
|
packet_ubuntu22-calico-etcd-datastore:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
@@ -75,28 +75,19 @@ packet_almalinux8-crio:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
when: on_success
|
when: on_success
|
||||||
|
allow_failure: true
|
||||||
|
|
||||||
packet_ubuntu18-crio:
|
packet_ubuntu20-crio:
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_fedora35-crio:
|
packet_fedora37-crio:
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_ubuntu16-canal-ha:
|
packet_ubuntu20-flannel-ha:
|
||||||
stage: deploy-part2
|
|
||||||
extends: .packet_periodic
|
|
||||||
when: on_success
|
|
||||||
|
|
||||||
packet_ubuntu16-canal-sep:
|
|
||||||
stage: deploy-special
|
|
||||||
extends: .packet_pr
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
packet_ubuntu16-flannel-ha:
|
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
@@ -126,6 +117,21 @@ packet_debian11-docker:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
|
packet_debian12-calico:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_pr
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_debian12-docker:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_pr
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_debian12-cilium:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_periodic
|
||||||
|
when: on_success
|
||||||
|
|
||||||
packet_centos7-calico-ha-once-localhost:
|
packet_centos7-calico-ha-once-localhost:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
@@ -138,7 +144,7 @@ packet_centos7-calico-ha-once-localhost:
|
|||||||
|
|
||||||
packet_almalinux8-kube-ovn:
|
packet_almalinux8-kube-ovn:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_periodic
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_almalinux8-calico:
|
packet_almalinux8-calico:
|
||||||
@@ -156,20 +162,23 @@ packet_rockylinux9-calico:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
|
packet_rockylinux9-cilium:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_pr
|
||||||
|
when: on_success
|
||||||
|
variables:
|
||||||
|
RESET_CHECK: "true"
|
||||||
|
|
||||||
packet_almalinux8-docker:
|
packet_almalinux8-docker:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_fedora36-docker-weave:
|
packet_fedora38-docker-weave:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
allow_failure: true
|
||||||
packet_opensuse-canal:
|
|
||||||
stage: deploy-part2
|
|
||||||
extends: .packet_periodic
|
|
||||||
when: on_success
|
|
||||||
|
|
||||||
packet_opensuse-docker-cilium:
|
packet_opensuse-docker-cilium:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
@@ -178,22 +187,17 @@ packet_opensuse-docker-cilium:
|
|||||||
|
|
||||||
# ### MANUAL JOBS
|
# ### MANUAL JOBS
|
||||||
|
|
||||||
packet_ubuntu16-docker-weave-sep:
|
packet_ubuntu20-docker-weave-sep:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_ubuntu18-cilium-sep:
|
packet_ubuntu20-cilium-sep:
|
||||||
stage: deploy-special
|
stage: deploy-special
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_ubuntu18-flannel-ha:
|
packet_ubuntu20-flannel-ha-once:
|
||||||
stage: deploy-part2
|
|
||||||
extends: .packet_pr
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
packet_ubuntu18-flannel-ha-once:
|
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
@@ -204,7 +208,7 @@ packet_almalinux8-calico-ha-ebpf:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_debian9-macvlan:
|
packet_debian10-macvlan:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
@@ -219,29 +223,24 @@ packet_centos7-multus-calico:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_centos7-canal-ha:
|
packet_fedora38-docker-calico:
|
||||||
stage: deploy-part2
|
|
||||||
extends: .packet_pr
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
packet_fedora36-docker-calico:
|
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
variables:
|
variables:
|
||||||
RESET_CHECK: "true"
|
RESET_CHECK: "true"
|
||||||
|
|
||||||
packet_fedora35-calico-selinux:
|
packet_fedora37-calico-selinux:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_fedora35-calico-swap-selinux:
|
packet_fedora37-calico-swap-selinux:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_amazon-linux-2-aio:
|
packet_amazon-linux-2-all-in-one:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
@@ -251,11 +250,21 @@ packet_almalinux8-calico-nodelocaldns-secondary:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_fedora36-kube-ovn:
|
packet_fedora38-kube-ovn:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
|
packet_debian11-custom-cni:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_pr
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
packet_debian11-kubelet-csr-approver:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_pr
|
||||||
|
when: manual
|
||||||
|
|
||||||
# ### PR JOBS PART3
|
# ### PR JOBS PART3
|
||||||
# Long jobs (45min+)
|
# Long jobs (45min+)
|
||||||
|
|
||||||
@@ -306,18 +315,18 @@ packet_debian11-calico-upgrade-once:
|
|||||||
variables:
|
variables:
|
||||||
UPGRADE_TEST: graceful
|
UPGRADE_TEST: graceful
|
||||||
|
|
||||||
packet_ubuntu18-calico-ha-recover:
|
packet_ubuntu20-calico-ha-recover:
|
||||||
stage: deploy-part3
|
stage: deploy-part3
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
variables:
|
variables:
|
||||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
|
||||||
|
|
||||||
packet_ubuntu18-calico-ha-recover-noquorum:
|
packet_ubuntu20-calico-ha-recover-noquorum:
|
||||||
stage: deploy-part3
|
stage: deploy-part3
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
variables:
|
variables:
|
||||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube_control_plane[1:]"
|
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:]:kube_control_plane[1:]"
|
||||||
|
|||||||
@@ -60,11 +60,11 @@ tf-validate-openstack:
|
|||||||
PROVIDER: openstack
|
PROVIDER: openstack
|
||||||
CLUSTER: $CI_COMMIT_REF_NAME
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
|
||||||
tf-validate-metal:
|
tf-validate-equinix:
|
||||||
extends: .terraform_validate
|
extends: .terraform_validate
|
||||||
variables:
|
variables:
|
||||||
TF_VERSION: $TERRAFORM_VERSION
|
TF_VERSION: $TERRAFORM_VERSION
|
||||||
PROVIDER: metal
|
PROVIDER: equinix
|
||||||
CLUSTER: $CI_COMMIT_REF_NAME
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
|
||||||
tf-validate-aws:
|
tf-validate-aws:
|
||||||
@@ -80,6 +80,12 @@ tf-validate-exoscale:
|
|||||||
TF_VERSION: $TERRAFORM_VERSION
|
TF_VERSION: $TERRAFORM_VERSION
|
||||||
PROVIDER: exoscale
|
PROVIDER: exoscale
|
||||||
|
|
||||||
|
tf-validate-hetzner:
|
||||||
|
extends: .terraform_validate
|
||||||
|
variables:
|
||||||
|
TF_VERSION: $TERRAFORM_VERSION
|
||||||
|
PROVIDER: hetzner
|
||||||
|
|
||||||
tf-validate-vsphere:
|
tf-validate-vsphere:
|
||||||
extends: .terraform_validate
|
extends: .terraform_validate
|
||||||
variables:
|
variables:
|
||||||
@@ -94,7 +100,13 @@ tf-validate-upcloud:
|
|||||||
PROVIDER: upcloud
|
PROVIDER: upcloud
|
||||||
CLUSTER: $CI_COMMIT_REF_NAME
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
|
||||||
# tf-packet-ubuntu16-default:
|
tf-validate-nifcloud:
|
||||||
|
extends: .terraform_validate
|
||||||
|
variables:
|
||||||
|
TF_VERSION: $TERRAFORM_VERSION
|
||||||
|
PROVIDER: nifcloud
|
||||||
|
|
||||||
|
# tf-packet-ubuntu20-default:
|
||||||
# extends: .terraform_apply
|
# extends: .terraform_apply
|
||||||
# variables:
|
# variables:
|
||||||
# TF_VERSION: $TERRAFORM_VERSION
|
# TF_VERSION: $TERRAFORM_VERSION
|
||||||
@@ -104,23 +116,9 @@ tf-validate-upcloud:
|
|||||||
# TF_VAR_number_of_k8s_nodes: "1"
|
# TF_VAR_number_of_k8s_nodes: "1"
|
||||||
# TF_VAR_plan_k8s_masters: t1.small.x86
|
# TF_VAR_plan_k8s_masters: t1.small.x86
|
||||||
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||||
# TF_VAR_facility: ewr1
|
# TF_VAR_metro: am
|
||||||
# TF_VAR_public_key_path: ""
|
# TF_VAR_public_key_path: ""
|
||||||
# TF_VAR_operating_system: ubuntu_16_04
|
# TF_VAR_operating_system: ubuntu_20_04
|
||||||
#
|
|
||||||
# tf-packet-ubuntu18-default:
|
|
||||||
# extends: .terraform_apply
|
|
||||||
# variables:
|
|
||||||
# TF_VERSION: $TERRAFORM_VERSION
|
|
||||||
# PROVIDER: packet
|
|
||||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
|
||||||
# TF_VAR_number_of_k8s_masters: "1"
|
|
||||||
# TF_VAR_number_of_k8s_nodes: "1"
|
|
||||||
# TF_VAR_plan_k8s_masters: t1.small.x86
|
|
||||||
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
|
||||||
# TF_VAR_facility: ams1
|
|
||||||
# TF_VAR_public_key_path: ""
|
|
||||||
# TF_VAR_operating_system: ubuntu_18_04
|
|
||||||
|
|
||||||
.ovh_variables: &ovh_variables
|
.ovh_variables: &ovh_variables
|
||||||
OS_AUTH_URL: https://auth.cloud.ovh.net/v3
|
OS_AUTH_URL: https://auth.cloud.ovh.net/v3
|
||||||
@@ -158,7 +156,7 @@ tf-elastx_cleanup:
|
|||||||
script:
|
script:
|
||||||
- ./scripts/openstack-cleanup/main.py
|
- ./scripts/openstack-cleanup/main.py
|
||||||
|
|
||||||
tf-elastx_ubuntu18-calico:
|
tf-elastx_ubuntu20-calico:
|
||||||
extends: .terraform_apply
|
extends: .terraform_apply
|
||||||
stage: deploy-part3
|
stage: deploy-part3
|
||||||
when: on_success
|
when: on_success
|
||||||
@@ -188,7 +186,7 @@ tf-elastx_ubuntu18-calico:
|
|||||||
TF_VAR_az_list_node: '["sto1"]'
|
TF_VAR_az_list_node: '["sto1"]'
|
||||||
TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||||
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||||
TF_VAR_image: ubuntu-18.04-server-latest
|
TF_VAR_image: ubuntu-20.04-server-latest
|
||||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||||
|
|
||||||
# OVH voucher expired, commenting job until things are sorted out
|
# OVH voucher expired, commenting job until things are sorted out
|
||||||
@@ -205,7 +203,7 @@ tf-elastx_ubuntu18-calico:
|
|||||||
# script:
|
# script:
|
||||||
# - ./scripts/openstack-cleanup/main.py
|
# - ./scripts/openstack-cleanup/main.py
|
||||||
|
|
||||||
# tf-ovh_ubuntu18-calico:
|
# tf-ovh_ubuntu20-calico:
|
||||||
# extends: .terraform_apply
|
# extends: .terraform_apply
|
||||||
# when: on_success
|
# when: on_success
|
||||||
# environment: ovh
|
# environment: ovh
|
||||||
@@ -231,5 +229,5 @@ tf-elastx_ubuntu18-calico:
|
|||||||
# TF_VAR_network_name: "Ext-Net"
|
# TF_VAR_network_name: "Ext-Net"
|
||||||
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||||
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||||
# TF_VAR_image: "Ubuntu 18.04"
|
# TF_VAR_image: "Ubuntu 20.04"
|
||||||
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||||
|
|||||||
@@ -10,13 +10,9 @@
|
|||||||
tags: [c3.small.x86]
|
tags: [c3.small.x86]
|
||||||
only: [/^pr-.*$/]
|
only: [/^pr-.*$/]
|
||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
image: $PIPELINE_IMAGE
|
||||||
services: []
|
services: []
|
||||||
before_script:
|
before_script:
|
||||||
- apt-get update && apt-get install -y python3-pip
|
|
||||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
|
||||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
|
||||||
- python -m pip install -r tests/requirements.txt
|
|
||||||
- ./tests/scripts/vagrant_clean.sh
|
- ./tests/scripts/vagrant_clean.sh
|
||||||
script:
|
script:
|
||||||
- ./tests/scripts/testcases_run.sh
|
- ./tests/scripts/testcases_run.sh
|
||||||
@@ -24,17 +20,12 @@
|
|||||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
|
|
||||||
vagrant_ubuntu18-calico-dual-stack:
|
vagrant_ubuntu20-calico-dual-stack:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
vagrant_ubuntu18-flannel:
|
vagrant_ubuntu20-weave-medium:
|
||||||
stage: deploy-part2
|
|
||||||
extends: .vagrant
|
|
||||||
when: on_success
|
|
||||||
|
|
||||||
vagrant_ubuntu18-weave-medium:
|
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: manual
|
when: manual
|
||||||
@@ -43,19 +34,25 @@ vagrant_ubuntu20-flannel:
|
|||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: on_success
|
when: on_success
|
||||||
|
allow_failure: false
|
||||||
|
|
||||||
vagrant_ubuntu16-kube-router-sep:
|
vagrant_ubuntu20-flannel-collection:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .vagrant
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
vagrant_ubuntu20-kube-router-sep:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
# Service proxy test fails connectivity testing
|
# Service proxy test fails connectivity testing
|
||||||
vagrant_ubuntu16-kube-router-svc-proxy:
|
vagrant_ubuntu20-kube-router-svc-proxy:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
vagrant_fedora35-kube-router:
|
vagrant_fedora37-kube-router:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|||||||
@@ -1,5 +1,20 @@
|
|||||||
---
|
---
|
||||||
repos:
|
repos:
|
||||||
|
|
||||||
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
|
rev: v3.4.0
|
||||||
|
hooks:
|
||||||
|
- id: check-added-large-files
|
||||||
|
- id: check-case-conflict
|
||||||
|
- id: check-executables-have-shebangs
|
||||||
|
- id: check-xml
|
||||||
|
- id: check-merge-conflict
|
||||||
|
- id: detect-private-key
|
||||||
|
- id: end-of-file-fixer
|
||||||
|
- id: forbid-new-submodules
|
||||||
|
- id: requirements-txt-fixer
|
||||||
|
- id: trailing-whitespace
|
||||||
|
|
||||||
- repo: https://github.com/adrienverge/yamllint.git
|
- repo: https://github.com/adrienverge/yamllint.git
|
||||||
rev: v1.27.1
|
rev: v1.27.1
|
||||||
hooks:
|
hooks:
|
||||||
@@ -13,6 +28,14 @@ repos:
|
|||||||
args: [ -r, "~MD013,~MD029" ]
|
args: [ -r, "~MD013,~MD029" ]
|
||||||
exclude: "^.git"
|
exclude: "^.git"
|
||||||
|
|
||||||
|
- repo: https://github.com/jumanjihouse/pre-commit-hooks
|
||||||
|
rev: 3.0.0
|
||||||
|
hooks:
|
||||||
|
- id: shellcheck
|
||||||
|
args: [ --severity, "error" ]
|
||||||
|
exclude: "^.git"
|
||||||
|
files: "\\.sh$"
|
||||||
|
|
||||||
- repo: local
|
- repo: local
|
||||||
hooks:
|
hooks:
|
||||||
- id: ansible-lint
|
- id: ansible-lint
|
||||||
|
|||||||
@@ -3,6 +3,8 @@ extends: default
|
|||||||
|
|
||||||
ignore: |
|
ignore: |
|
||||||
.git/
|
.git/
|
||||||
|
# Generated file
|
||||||
|
tests/files/custom_cni/cilium.yaml
|
||||||
|
|
||||||
rules:
|
rules:
|
||||||
braces:
|
braces:
|
||||||
|
|||||||
1
CHANGELOG.md
Normal file
1
CHANGELOG.md
Normal file
@@ -0,0 +1 @@
|
|||||||
|
# See our release notes on [GitHub](https://github.com/kubernetes-sigs/kubespray/releases)
|
||||||
@@ -12,6 +12,7 @@ To install development dependencies you can set up a python virtual env with the
|
|||||||
virtualenv venv
|
virtualenv venv
|
||||||
source venv/bin/activate
|
source venv/bin/activate
|
||||||
pip install -r tests/requirements.txt
|
pip install -r tests/requirements.txt
|
||||||
|
ansible-galaxy install -r tests/requirements.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Linting
|
#### Linting
|
||||||
|
|||||||
70
Dockerfile
70
Dockerfile
@@ -1,37 +1,45 @@
|
|||||||
# Use imutable image tags rather than mutable tags (like ubuntu:20.04)
|
# Use imutable image tags rather than mutable tags (like ubuntu:22.04)
|
||||||
FROM ubuntu:focal-20220531
|
FROM ubuntu:jammy-20230308
|
||||||
|
|
||||||
ARG ARCH=amd64
|
|
||||||
ARG TZ=Etc/UTC
|
|
||||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
|
||||||
|
|
||||||
RUN apt update -y \
|
|
||||||
&& apt install -y \
|
|
||||||
libssl-dev python3-dev sshpass apt-transport-https jq moreutils \
|
|
||||||
ca-certificates curl gnupg2 software-properties-common python3-pip unzip rsync git \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
|
||||||
&& add-apt-repository \
|
|
||||||
"deb [arch=$ARCH] https://download.docker.com/linux/ubuntu \
|
|
||||||
$(lsb_release -cs) \
|
|
||||||
stable" \
|
|
||||||
&& apt update -y && apt-get install --no-install-recommends -y docker-ce \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Some tools like yamllint need this
|
# Some tools like yamllint need this
|
||||||
# Pip needs this as well at the moment to install ansible
|
# Pip needs this as well at the moment to install ansible
|
||||||
# (and potentially other packages)
|
# (and potentially other packages)
|
||||||
# See: https://github.com/pypa/pip/issues/10219
|
# See: https://github.com/pypa/pip/issues/10219
|
||||||
ENV LANG=C.UTF-8
|
ENV LANG=C.UTF-8 \
|
||||||
|
DEBIAN_FRONTEND=noninteractive \
|
||||||
|
PYTHONDONTWRITEBYTECODE=1
|
||||||
WORKDIR /kubespray
|
WORKDIR /kubespray
|
||||||
COPY . .
|
COPY *.yml ./
|
||||||
RUN /usr/bin/python3 -m pip install --no-cache-dir pip -U \
|
COPY *.cfg ./
|
||||||
&& /usr/bin/python3 -m pip install --no-cache-dir -r tests/requirements.txt \
|
COPY roles ./roles
|
||||||
&& python3 -m pip install --no-cache-dir -r requirements.txt \
|
COPY contrib ./contrib
|
||||||
&& update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
COPY inventory ./inventory
|
||||||
|
COPY library ./library
|
||||||
|
COPY extra_playbooks ./extra_playbooks
|
||||||
|
COPY playbooks ./playbooks
|
||||||
|
COPY plugins ./plugins
|
||||||
|
|
||||||
RUN KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
RUN apt update -q \
|
||||||
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$ARCH/kubectl \
|
&& apt install -yq --no-install-recommends \
|
||||||
&& chmod a+x kubectl \
|
curl \
|
||||||
&& mv kubectl /usr/local/bin/kubectl
|
python3 \
|
||||||
|
python3-pip \
|
||||||
|
sshpass \
|
||||||
|
vim \
|
||||||
|
rsync \
|
||||||
|
openssh-client \
|
||||||
|
&& pip install --no-compile --no-cache-dir \
|
||||||
|
ansible==7.6.0 \
|
||||||
|
ansible-core==2.14.6 \
|
||||||
|
cryptography==41.0.1 \
|
||||||
|
jinja2==3.1.2 \
|
||||||
|
netaddr==0.8.0 \
|
||||||
|
jmespath==1.0.1 \
|
||||||
|
MarkupSafe==2.1.3 \
|
||||||
|
ruamel.yaml==0.17.21 \
|
||||||
|
passlib==1.7.4 \
|
||||||
|
&& KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
||||||
|
&& curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
|
||||||
|
&& echo $(curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \
|
||||||
|
&& chmod a+x /usr/local/bin/kubectl \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /var/log/* \
|
||||||
|
&& find /usr -type d -name '*__pycache__' -prune -exec rm -rf {} \;
|
||||||
|
|||||||
2
LICENSE
2
LICENSE
@@ -187,7 +187,7 @@
|
|||||||
identification within third-party archives.
|
identification within third-party archives.
|
||||||
|
|
||||||
Copyright 2016 Kubespray
|
Copyright 2016 Kubespray
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
You may obtain a copy of the License at
|
You may obtain a copy of the License at
|
||||||
|
|||||||
2
OWNERS
2
OWNERS
@@ -5,4 +5,4 @@ approvers:
|
|||||||
reviewers:
|
reviewers:
|
||||||
- kubespray-reviewers
|
- kubespray-reviewers
|
||||||
emeritus_approvers:
|
emeritus_approvers:
|
||||||
- kubespray-emeritus_approvers
|
- kubespray-emeritus_approvers
|
||||||
|
|||||||
@@ -9,6 +9,8 @@ aliases:
|
|||||||
- oomichi
|
- oomichi
|
||||||
- cristicalin
|
- cristicalin
|
||||||
- liupeng0518
|
- liupeng0518
|
||||||
|
- yankay
|
||||||
|
- mzaian
|
||||||
kubespray-reviewers:
|
kubespray-reviewers:
|
||||||
- holmsten
|
- holmsten
|
||||||
- bozzo
|
- bozzo
|
||||||
@@ -18,6 +20,10 @@ aliases:
|
|||||||
- cristicalin
|
- cristicalin
|
||||||
- liupeng0518
|
- liupeng0518
|
||||||
- yankay
|
- yankay
|
||||||
|
- cyclinder
|
||||||
|
- mzaian
|
||||||
|
- mrfreezeex
|
||||||
|
- erikjiang
|
||||||
kubespray-emeritus_approvers:
|
kubespray-emeritus_approvers:
|
||||||
- riverzhang
|
- riverzhang
|
||||||
- atoms
|
- atoms
|
||||||
|
|||||||
123
README.md
123
README.md
@@ -13,7 +13,7 @@ You can get your invite [here](http://slack.k8s.io/)
|
|||||||
|
|
||||||
## Quick Start
|
## Quick Start
|
||||||
|
|
||||||
To deploy the cluster you can use :
|
Below are several ways to use Kubespray to deploy a Kubernetes cluster.
|
||||||
|
|
||||||
### Ansible
|
### Ansible
|
||||||
|
|
||||||
@@ -34,6 +34,13 @@ CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inv
|
|||||||
cat inventory/mycluster/group_vars/all/all.yml
|
cat inventory/mycluster/group_vars/all/all.yml
|
||||||
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
||||||
|
|
||||||
|
# Clean up old Kubernetes cluster with Ansible Playbook - run the playbook as root
|
||||||
|
# The option `--become` is required, as for example cleaning up SSL keys in /etc/,
|
||||||
|
# uninstalling old packages and interacting with various systemd daemons.
|
||||||
|
# Without --become the playbook will fail to run!
|
||||||
|
# And be mind it will remove the current kubernetes cluster (if it's running)!
|
||||||
|
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root reset.yml
|
||||||
|
|
||||||
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
||||||
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
||||||
# installing packages and interacting with various systemd daemons.
|
# installing packages and interacting with various systemd daemons.
|
||||||
@@ -41,34 +48,50 @@ cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
|||||||
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml
|
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu).
|
Note: When Ansible is already installed via system packages on the control node,
|
||||||
As a consequence, `ansible-playbook` command will fail with:
|
Python packages installed via `sudo pip install -r requirements.txt` will go to
|
||||||
|
a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on
|
||||||
|
Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on
|
||||||
|
Ubuntu). As a consequence, the `ansible-playbook` command will fail with:
|
||||||
|
|
||||||
```raw
|
```raw
|
||||||
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
|
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
|
||||||
```
|
```
|
||||||
|
|
||||||
probably pointing on a task depending on a module present in requirements.txt.
|
This likely indicates that a task depends on a module present in ``requirements.txt``.
|
||||||
|
|
||||||
One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible.
|
One way of addressing this is to uninstall the system Ansible package then
|
||||||
A workaround consists of setting `ANSIBLE_LIBRARY` and `ANSIBLE_MODULE_UTILS` environment variables respectively to the `ansible/modules` and `ansible/module_utils` subdirectories of pip packages installation location, which can be found in the Location field of the output of `pip show [package]` before executing `ansible-playbook`.
|
reinstall Ansible via ``pip``, but this not always possible and one must
|
||||||
|
take care regarding package versions.
|
||||||
|
A workaround consists of setting the `ANSIBLE_LIBRARY`
|
||||||
|
and `ANSIBLE_MODULE_UTILS` environment variables respectively to
|
||||||
|
the `ansible/modules` and `ansible/module_utils` subdirectories of the ``pip``
|
||||||
|
installation location, which is the ``Location`` shown by running
|
||||||
|
`pip show [package]` before executing `ansible-playbook`.
|
||||||
|
|
||||||
A simple way to ensure you get all the correct version of Ansible is to use the [pre-built docker image from Quay](https://quay.io/repository/kubespray/kubespray?tab=tags).
|
A simple way to ensure you get all the correct version of Ansible is to use
|
||||||
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
|
the [pre-built docker image from Quay](https://quay.io/repository/kubespray/kubespray?tab=tags).
|
||||||
|
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/)
|
||||||
|
to access the inventory and SSH key in the container, like this:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
docker pull quay.io/kubespray/kubespray:v2.19.1
|
git checkout v2.23.2
|
||||||
|
docker pull quay.io/kubespray/kubespray:v2.23.2
|
||||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||||
quay.io/kubespray/kubespray:v2.19.1 bash
|
quay.io/kubespray/kubespray:v2.23.2 bash
|
||||||
# Inside the container you may now run the kubespray playbooks:
|
# Inside the container you may now run the kubespray playbooks:
|
||||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Collection
|
||||||
|
|
||||||
|
See [here](docs/ansible_collection.md) if you wish to use this repository as an Ansible collection
|
||||||
|
|
||||||
### Vagrant
|
### Vagrant
|
||||||
|
|
||||||
For Vagrant we need to install python dependencies for provisioning tasks.
|
For Vagrant we need to install Python dependencies for provisioning tasks.
|
||||||
Check if Python and pip are installed:
|
Check that ``Python`` and ``pip`` are installed:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
python -V && pip -V
|
python -V && pip -V
|
||||||
@@ -113,15 +136,16 @@ vagrant up
|
|||||||
- [Air-Gap installation](docs/offline-environment.md)
|
- [Air-Gap installation](docs/offline-environment.md)
|
||||||
- [NTP](docs/ntp.md)
|
- [NTP](docs/ntp.md)
|
||||||
- [Hardening](docs/hardening.md)
|
- [Hardening](docs/hardening.md)
|
||||||
|
- [Mirror](docs/mirror.md)
|
||||||
- [Roadmap](docs/roadmap.md)
|
- [Roadmap](docs/roadmap.md)
|
||||||
|
|
||||||
## Supported Linux Distributions
|
## Supported Linux Distributions
|
||||||
|
|
||||||
- **Flatcar Container Linux by Kinvolk**
|
- **Flatcar Container Linux by Kinvolk**
|
||||||
- **Debian** Bullseye, Buster, Jessie, Stretch
|
- **Debian** Bookworm, Bullseye, Buster
|
||||||
- **Ubuntu** 16.04, 18.04, 20.04, 22.04
|
- **Ubuntu** 20.04, 22.04
|
||||||
- **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8)
|
- **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8)
|
||||||
- **Fedora** 35, 36
|
- **Fedora** 37, 38
|
||||||
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
||||||
- **openSUSE** Leap 15.x/Tumbleweed
|
- **openSUSE** Leap 15.x/Tumbleweed
|
||||||
- **Oracle Linux** 7, [8, 9](docs/centos.md#centos-8)
|
- **Oracle Linux** 7, [8, 9](docs/centos.md#centos-8)
|
||||||
@@ -129,36 +153,37 @@ vagrant up
|
|||||||
- **Rocky Linux** [8, 9](docs/centos.md#centos-8)
|
- **Rocky Linux** [8, 9](docs/centos.md#centos-8)
|
||||||
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md))
|
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md))
|
||||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
|
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
|
||||||
|
- **UOS Linux** (experimental: see [uos linux notes](docs/uoslinux.md))
|
||||||
|
- **openEuler** (experimental: see [openEuler notes](docs/openeuler.md))
|
||||||
|
|
||||||
Note: Upstart/SysV init based OS types are not supported.
|
Note: Upstart/SysV init based OS types are not supported.
|
||||||
|
|
||||||
## Supported Components
|
## Supported Components
|
||||||
|
|
||||||
- Core
|
- Core
|
||||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.24.6
|
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.27.10
|
||||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.4
|
- [etcd](https://github.com/etcd-io/etcd) v3.5.10
|
||||||
- [docker](https://www.docker.com/) v20.10 (see note)
|
- [docker](https://www.docker.com/) v20.10 (see note)
|
||||||
- [containerd](https://containerd.io/) v1.6.8
|
- [containerd](https://containerd.io/) v1.7.13
|
||||||
- [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
- [cri-o](http://cri-o.io/) v1.27 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||||
- Network Plugin
|
- Network Plugin
|
||||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1
|
- [cni-plugins](https://github.com/containernetworking/plugins) v1.2.0
|
||||||
- [calico](https://github.com/projectcalico/calico) v3.23.3
|
- [calico](https://github.com/projectcalico/calico) v3.25.2
|
||||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
- [cilium](https://github.com/cilium/cilium) v1.13.4
|
||||||
- [cilium](https://github.com/cilium/cilium) v1.12.1
|
- [flannel](https://github.com/flannel-io/flannel) v0.22.0
|
||||||
- [flannel](https://github.com/flannel-io/flannel) v0.19.2
|
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.11.5
|
||||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.9.7
|
|
||||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
|
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
|
||||||
- [multus](https://github.com/intel/multus-cni) v3.8
|
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) v3.8
|
||||||
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
||||||
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.4.2
|
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.5.12
|
||||||
- Application
|
- Application
|
||||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.9.1
|
- [cert-manager](https://github.com/jetstack/cert-manager) v1.11.1
|
||||||
- [coredns](https://github.com/coredns/coredns) v1.8.6
|
- [coredns](https://github.com/coredns/coredns) v1.10.1
|
||||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.3.1
|
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.8.1
|
||||||
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.3
|
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.4
|
||||||
- [argocd](https://argoproj.github.io/) v2.4.12
|
- [argocd](https://argoproj.github.io/) v2.8.0
|
||||||
- [helm](https://helm.sh/) v3.9.4
|
- [helm](https://helm.sh/) v3.12.3
|
||||||
- [metallb](https://metallb.universe.tf/) v0.12.1
|
- [metallb](https://metallb.universe.tf/) v0.13.9
|
||||||
- [registry](https://github.com/distribution/distribution) v2.8.1
|
- [registry](https://github.com/distribution/distribution) v2.8.1
|
||||||
- Storage Plugin
|
- Storage Plugin
|
||||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||||
@@ -166,30 +191,30 @@ Note: Upstart/SysV init based OS types are not supported.
|
|||||||
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
|
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
|
||||||
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
|
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
|
||||||
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0
|
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0
|
||||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.4.0
|
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.9.2
|
||||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.22
|
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.24
|
||||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.4.0
|
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
|
||||||
|
|
||||||
## Container Runtime Notes
|
## Container Runtime Notes
|
||||||
|
|
||||||
- The list of available docker version is 18.09, 19.03 and 20.10. The recommended docker version is 20.10. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
- Supported Docker versions are 18.09, 19.03, 20.10, 23.0 and 24.0. The *recommended* Docker version is 20.10 (except on Debian bookworm which without supporting for 20.10 and below any more). `Kubelet` might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. the YUM ``versionlock`` plugin or ``apt pin``).
|
||||||
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
|
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
- **Minimum required version of Kubernetes is v1.22**
|
- **Minimum required version of Kubernetes is v1.25**
|
||||||
- **Ansible v2.11+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
- **Ansible v2.14+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
||||||
- The target servers are configured to allow **IPv4 forwarding**.
|
- The target servers are configured to allow **IPv4 forwarding**.
|
||||||
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
|
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
|
||||||
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||||
in order to avoid any issue during deployment you should disable your firewall.
|
in order to avoid any issue during deployment you should disable your firewall.
|
||||||
- If kubespray is ran from non-root user account, correct privilege escalation method
|
- If kubespray is run from non-root user account, correct privilege escalation method
|
||||||
should be configured in the target servers. Then the `ansible_become` flag
|
should be configured in the target servers. Then the `ansible_become` flag
|
||||||
or command parameters `--become or -b` should be specified.
|
or command parameters `--become or -b` should be specified.
|
||||||
|
|
||||||
Hardware:
|
Hardware:
|
||||||
These limits are safe guarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide.
|
These limits are safeguarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide.
|
||||||
|
|
||||||
- Master
|
- Master
|
||||||
- Memory: 1500 MB
|
- Memory: 1500 MB
|
||||||
@@ -198,17 +223,15 @@ These limits are safe guarded by Kubespray. Actual requirements for your workloa
|
|||||||
|
|
||||||
## Network Plugins
|
## Network Plugins
|
||||||
|
|
||||||
You can choose between 10 network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
You can choose among ten network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
||||||
|
|
||||||
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||||
|
|
||||||
- [Calico](https://docs.projectcalico.org/latest/introduction/) is a networking and network policy provider. Calico supports a flexible set of networking options
|
- [Calico](https://docs.tigera.io/calico/latest/about/) is a networking and network policy provider. Calico supports a flexible set of networking options
|
||||||
designed to give you the most efficient networking across a range of situations, including non-overlay
|
designed to give you the most efficient networking across a range of situations, including non-overlay
|
||||||
and overlay networks, with or without BGP. Calico uses the same engine to enforce network policy for hosts,
|
and overlay networks, with or without BGP. Calico uses the same engine to enforce network policy for hosts,
|
||||||
pods, and (if using Istio and Envoy) applications at the service mesh layer.
|
pods, and (if using Istio and Envoy) applications at the service mesh layer.
|
||||||
|
|
||||||
- [canal](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
|
|
||||||
|
|
||||||
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
||||||
|
|
||||||
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
||||||
@@ -225,7 +248,10 @@ You can choose between 10 network plugins. (default: `calico`, except Vagrant us
|
|||||||
|
|
||||||
- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
|
- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
|
||||||
|
|
||||||
The choice is defined with the variable `kube_network_plugin`. There is also an
|
- [custom_cni](roles/network-plugin/custom_cni/) : You can specify some manifests that will be applied to the clusters to bring you own CNI and use non-supported ones by Kubespray.
|
||||||
|
See `tests/files/custom_cni/README.md` and `tests/files/custom_cni/values.yaml`for an example with a CNI provided by a Helm Chart.
|
||||||
|
|
||||||
|
The network plugin to use is defined by the variable `kube_network_plugin`. There is also an
|
||||||
option to leverage built-in cloud provider networking instead.
|
option to leverage built-in cloud provider networking instead.
|
||||||
See also [Network checker](docs/netcheck.md).
|
See also [Network checker](docs/netcheck.md).
|
||||||
|
|
||||||
@@ -246,10 +272,11 @@ See also [Network checker](docs/netcheck.md).
|
|||||||
|
|
||||||
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst)
|
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst)
|
||||||
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
|
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
|
||||||
|
- [Kubean](https://github.com/kubean-io/kubean)
|
||||||
|
|
||||||
## CI Tests
|
## CI Tests
|
||||||
|
|
||||||
[](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines)
|
[](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/-/pipelines)
|
||||||
|
|
||||||
CI/end-to-end tests sponsored by: [CNCF](https://cncf.io), [Equinix Metal](https://metal.equinix.com/), [OVHcloud](https://www.ovhcloud.com/), [ELASTX](https://elastx.se/).
|
CI/end-to-end tests sponsored by: [CNCF](https://cncf.io), [Equinix Metal](https://metal.equinix.com/), [OVHcloud](https://www.ovhcloud.com/), [ELASTX](https://elastx.se/).
|
||||||
|
|
||||||
|
|||||||
@@ -60,7 +60,7 @@ release-notes --start-sha <The start commit-id> --end-sha <The end commit-id> --
|
|||||||
```
|
```
|
||||||
|
|
||||||
If the release note file(/tmp/kubespray-release-note) contains "### Uncategorized" pull requests, those pull requests don't have a valid kind label(`kind/feature`, etc.).
|
If the release note file(/tmp/kubespray-release-note) contains "### Uncategorized" pull requests, those pull requests don't have a valid kind label(`kind/feature`, etc.).
|
||||||
It is necessary to put a valid label on each pull request and run the above release-notes command again to get a better release note)
|
It is necessary to put a valid label on each pull request and run the above release-notes command again to get a better release note
|
||||||
|
|
||||||
## Container image creation
|
## Container image creation
|
||||||
|
|
||||||
|
|||||||
40
Vagrantfile
vendored
40
Vagrantfile
vendored
@@ -19,9 +19,8 @@ SUPPORTED_OS = {
|
|||||||
"flatcar-beta" => {box: "flatcar-beta", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["beta"]},
|
"flatcar-beta" => {box: "flatcar-beta", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["beta"]},
|
||||||
"flatcar-alpha" => {box: "flatcar-alpha", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["alpha"]},
|
"flatcar-alpha" => {box: "flatcar-alpha", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["alpha"]},
|
||||||
"flatcar-edge" => {box: "flatcar-edge", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["edge"]},
|
"flatcar-edge" => {box: "flatcar-edge", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["edge"]},
|
||||||
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
|
|
||||||
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
|
|
||||||
"ubuntu2004" => {box: "generic/ubuntu2004", user: "vagrant"},
|
"ubuntu2004" => {box: "generic/ubuntu2004", user: "vagrant"},
|
||||||
|
"ubuntu2204" => {box: "generic/ubuntu2204", user: "vagrant"},
|
||||||
"centos" => {box: "centos/7", user: "vagrant"},
|
"centos" => {box: "centos/7", user: "vagrant"},
|
||||||
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
||||||
"centos8" => {box: "centos/8", user: "vagrant"},
|
"centos8" => {box: "centos/8", user: "vagrant"},
|
||||||
@@ -29,8 +28,8 @@ SUPPORTED_OS = {
|
|||||||
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
||||||
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
||||||
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"},
|
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"},
|
||||||
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"},
|
"fedora37" => {box: "fedora/37-cloud-base", user: "vagrant"},
|
||||||
"fedora36" => {box: "fedora/36-cloud-base", user: "vagrant"},
|
"fedora38" => {box: "fedora/38-cloud-base", user: "vagrant"},
|
||||||
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
|
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
|
||||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||||
@@ -53,16 +52,16 @@ $shared_folders ||= {}
|
|||||||
$forwarded_ports ||= {}
|
$forwarded_ports ||= {}
|
||||||
$subnet ||= "172.18.8"
|
$subnet ||= "172.18.8"
|
||||||
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
|
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
|
||||||
$os ||= "ubuntu1804"
|
$os ||= "ubuntu2004"
|
||||||
$network_plugin ||= "flannel"
|
$network_plugin ||= "flannel"
|
||||||
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
|
# Setting multi_networking to true will install Multus: https://github.com/k8snetworkplumbingwg/multus-cni
|
||||||
$multi_networking ||= "False"
|
$multi_networking ||= "False"
|
||||||
$download_run_once ||= "True"
|
$download_run_once ||= "True"
|
||||||
$download_force_cache ||= "False"
|
$download_force_cache ||= "False"
|
||||||
# The first three nodes are etcd servers
|
# The first three nodes are etcd servers
|
||||||
$etcd_instances ||= $num_instances
|
$etcd_instances ||= [$num_instances, 3].min
|
||||||
# The first two nodes are kube masters
|
# The first two nodes are kube masters
|
||||||
$kube_master_instances ||= $num_instances == 1 ? $num_instances : ($num_instances - 1)
|
$kube_master_instances ||= [$num_instances, 2].min
|
||||||
# All nodes are kube nodes
|
# All nodes are kube nodes
|
||||||
$kube_node_instances ||= $num_instances
|
$kube_node_instances ||= $num_instances
|
||||||
# The following only works when using the libvirt provider
|
# The following only works when using the libvirt provider
|
||||||
@@ -82,6 +81,13 @@ $playbook ||= "cluster.yml"
|
|||||||
|
|
||||||
host_vars = {}
|
host_vars = {}
|
||||||
|
|
||||||
|
# throw error if os is not supported
|
||||||
|
if ! SUPPORTED_OS.key?($os)
|
||||||
|
puts "Unsupported OS: #{$os}"
|
||||||
|
puts "Supported OS are: #{SUPPORTED_OS.keys.join(', ')}"
|
||||||
|
exit 1
|
||||||
|
end
|
||||||
|
|
||||||
$box = SUPPORTED_OS[$os][:box]
|
$box = SUPPORTED_OS[$os][:box]
|
||||||
# if $inventory is not set, try to use example
|
# if $inventory is not set, try to use example
|
||||||
$inventory = "inventory/sample" if ! $inventory
|
$inventory = "inventory/sample" if ! $inventory
|
||||||
@@ -201,7 +207,8 @@ Vagrant.configure("2") do |config|
|
|||||||
end
|
end
|
||||||
|
|
||||||
ip = "#{$subnet}.#{i+100}"
|
ip = "#{$subnet}.#{i+100}"
|
||||||
node.vm.network :private_network, ip: ip,
|
node.vm.network :private_network,
|
||||||
|
:ip => ip,
|
||||||
:libvirt__guest_ipv6 => 'yes',
|
:libvirt__guest_ipv6 => 'yes',
|
||||||
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
|
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
|
||||||
:libvirt__ipv6_prefix => "64",
|
:libvirt__ipv6_prefix => "64",
|
||||||
@@ -211,14 +218,22 @@ Vagrant.configure("2") do |config|
|
|||||||
# Disable swap for each vm
|
# Disable swap for each vm
|
||||||
node.vm.provision "shell", inline: "swapoff -a"
|
node.vm.provision "shell", inline: "swapoff -a"
|
||||||
|
|
||||||
# ubuntu1804 and ubuntu2004 have IPv6 explicitly disabled. This undoes that.
|
# ubuntu2004 and ubuntu2204 have IPv6 explicitly disabled. This undoes that.
|
||||||
if ["ubuntu1804", "ubuntu2004"].include? $os
|
if ["ubuntu2004", "ubuntu2204"].include? $os
|
||||||
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
|
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
|
||||||
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
|
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
|
||||||
end
|
end
|
||||||
|
# Hack for fedora37/38 to get the IP address of the second interface
|
||||||
|
if ["fedora37", "fedora38"].include? $os
|
||||||
|
config.vm.provision "shell", inline: <<-SHELL
|
||||||
|
nmcli conn modify 'Wired connection 2' ipv4.addresses $(cat /etc/sysconfig/network-scripts/ifcfg-eth1 | grep IPADDR | cut -d "=" -f2)
|
||||||
|
nmcli conn modify 'Wired connection 2' ipv4.method manual
|
||||||
|
service NetworkManager restart
|
||||||
|
SHELL
|
||||||
|
end
|
||||||
|
|
||||||
# Disable firewalld on oraclelinux/redhat vms
|
# Disable firewalld on oraclelinux/redhat vms
|
||||||
if ["oraclelinux","oraclelinux8","rhel7","rhel8"].include? $os
|
if ["oraclelinux","oraclelinux8","rhel7","rhel8","rockylinux8"].include? $os
|
||||||
node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
|
node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
|
||||||
end
|
end
|
||||||
|
|
||||||
@@ -248,6 +263,7 @@ Vagrant.configure("2") do |config|
|
|||||||
if i == $num_instances
|
if i == $num_instances
|
||||||
node.vm.provision "ansible" do |ansible|
|
node.vm.provision "ansible" do |ansible|
|
||||||
ansible.playbook = $playbook
|
ansible.playbook = $playbook
|
||||||
|
ansible.compatibility_mode = "2.0"
|
||||||
ansible.verbose = $ansible_verbosity
|
ansible.verbose = $ansible_verbosity
|
||||||
$ansible_inventory_path = File.join( $inventory, "hosts.ini")
|
$ansible_inventory_path = File.join( $inventory, "hosts.ini")
|
||||||
if File.exist?($ansible_inventory_path)
|
if File.exist?($ansible_inventory_path)
|
||||||
|
|||||||
132
cluster.yml
132
cluster.yml
@@ -1,131 +1,3 @@
|
|||||||
---
|
---
|
||||||
- name: Check ansible version
|
- name: Install Kubernetes
|
||||||
import_playbook: ansible_version.yml
|
ansible.builtin.import_playbook: playbooks/cluster.yml
|
||||||
|
|
||||||
- name: Ensure compatibility with old groups
|
|
||||||
import_playbook: legacy_groups.yml
|
|
||||||
|
|
||||||
- hosts: bastion[0]
|
|
||||||
gather_facts: False
|
|
||||||
environment: "{{ proxy_disable_env }}"
|
|
||||||
roles:
|
|
||||||
- { role: kubespray-defaults }
|
|
||||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
|
|
||||||
|
|
||||||
- hosts: k8s_cluster:etcd
|
|
||||||
strategy: linear
|
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
|
||||||
gather_facts: false
|
|
||||||
environment: "{{ proxy_disable_env }}"
|
|
||||||
roles:
|
|
||||||
- { role: kubespray-defaults }
|
|
||||||
- { role: bootstrap-os, tags: bootstrap-os}
|
|
||||||
|
|
||||||
- name: Gather facts
|
|
||||||
tags: always
|
|
||||||
import_playbook: facts.yml
|
|
||||||
|
|
||||||
- hosts: k8s_cluster:etcd
|
|
||||||
gather_facts: False
|
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
|
||||||
environment: "{{ proxy_disable_env }}"
|
|
||||||
roles:
|
|
||||||
- { role: kubespray-defaults }
|
|
||||||
- { role: kubernetes/preinstall, tags: preinstall }
|
|
||||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
|
|
||||||
- { role: download, tags: download, when: "not skip_downloads" }
|
|
||||||
|
|
||||||
- hosts: etcd:kube_control_plane
|
|
||||||
gather_facts: False
|
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
|
||||||
environment: "{{ proxy_disable_env }}"
|
|
||||||
roles:
|
|
||||||
- { role: kubespray-defaults }
|
|
||||||
- role: etcd
|
|
||||||
tags: etcd
|
|
||||||
vars:
|
|
||||||
etcd_cluster_setup: true
|
|
||||||
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
|
|
||||||
when: etcd_deployment_type != "kubeadm"
|
|
||||||
|
|
||||||
- hosts: k8s_cluster
|
|
||||||
gather_facts: False
|
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
|
||||||
environment: "{{ proxy_disable_env }}"
|
|
||||||
roles:
|
|
||||||
- { role: kubespray-defaults }
|
|
||||||
- role: etcd
|
|
||||||
tags: etcd
|
|
||||||
vars:
|
|
||||||
etcd_cluster_setup: false
|
|
||||||
etcd_events_cluster_setup: false
|
|
||||||
when:
|
|
||||||
- etcd_deployment_type != "kubeadm"
|
|
||||||
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
|
||||||
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
|
||||||
|
|
||||||
- hosts: k8s_cluster
|
|
||||||
gather_facts: False
|
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
|
||||||
environment: "{{ proxy_disable_env }}"
|
|
||||||
roles:
|
|
||||||
- { role: kubespray-defaults }
|
|
||||||
- { role: kubernetes/node, tags: node }
|
|
||||||
|
|
||||||
- hosts: kube_control_plane
|
|
||||||
gather_facts: False
|
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
|
||||||
environment: "{{ proxy_disable_env }}"
|
|
||||||
roles:
|
|
||||||
- { role: kubespray-defaults }
|
|
||||||
- { role: kubernetes/control-plane, tags: master }
|
|
||||||
- { role: kubernetes/client, tags: client }
|
|
||||||
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
|
||||||
|
|
||||||
- hosts: k8s_cluster
|
|
||||||
gather_facts: False
|
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
|
||||||
environment: "{{ proxy_disable_env }}"
|
|
||||||
roles:
|
|
||||||
- { role: kubespray-defaults }
|
|
||||||
- { role: kubernetes/kubeadm, tags: kubeadm}
|
|
||||||
- { role: kubernetes/node-label, tags: node-label }
|
|
||||||
- { role: network_plugin, tags: network }
|
|
||||||
|
|
||||||
- hosts: calico_rr
|
|
||||||
gather_facts: False
|
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
|
||||||
environment: "{{ proxy_disable_env }}"
|
|
||||||
roles:
|
|
||||||
- { role: kubespray-defaults }
|
|
||||||
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
|
|
||||||
|
|
||||||
- hosts: kube_control_plane[0]
|
|
||||||
gather_facts: False
|
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
|
||||||
environment: "{{ proxy_disable_env }}"
|
|
||||||
roles:
|
|
||||||
- { role: kubespray-defaults }
|
|
||||||
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
|
|
||||||
|
|
||||||
- hosts: kube_control_plane
|
|
||||||
gather_facts: False
|
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
|
||||||
environment: "{{ proxy_disable_env }}"
|
|
||||||
roles:
|
|
||||||
- { role: kubespray-defaults }
|
|
||||||
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
|
|
||||||
- { role: kubernetes-apps/network_plugin, tags: network }
|
|
||||||
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
|
||||||
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
|
||||||
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
|
||||||
- { role: kubernetes-apps, tags: apps }
|
|
||||||
|
|
||||||
- name: Apply resolv.conf changes now that cluster DNS is up
|
|
||||||
hosts: k8s_cluster
|
|
||||||
gather_facts: False
|
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
|
||||||
environment: "{{ proxy_disable_env }}"
|
|
||||||
roles:
|
|
||||||
- { role: kubespray-defaults }
|
|
||||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
|
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ class SearchEC2Tags(object):
|
|||||||
hosts[group] = []
|
hosts[group] = []
|
||||||
tag_key = "kubespray-role"
|
tag_key = "kubespray-role"
|
||||||
tag_value = ["*"+group+"*"]
|
tag_value = ["*"+group+"*"]
|
||||||
region = os.environ['REGION']
|
region = os.environ['AWS_REGION']
|
||||||
|
|
||||||
ec2 = boto3.resource('ec2', region)
|
ec2 = boto3.resource('ec2', region)
|
||||||
filters = [{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}]
|
filters = [{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}]
|
||||||
@@ -67,6 +67,11 @@ class SearchEC2Tags(object):
|
|||||||
if node_labels_tag:
|
if node_labels_tag:
|
||||||
ansible_host['node_labels'] = dict([ label.strip().split('=') for label in node_labels_tag[0]['Value'].split(',') ])
|
ansible_host['node_labels'] = dict([ label.strip().split('=') for label in node_labels_tag[0]['Value'].split(',') ])
|
||||||
|
|
||||||
|
##Set when instance actually has node_taints
|
||||||
|
node_taints_tag = list(filter(lambda t: t['Key'] == 'kubespray-node-taints', instance.tags))
|
||||||
|
if node_taints_tag:
|
||||||
|
ansible_host['node_taints'] = list([ taint.strip() for taint in node_taints_tag[0]['Value'].split(',') ])
|
||||||
|
|
||||||
hosts[group].append(dns_name)
|
hosts[group].append(dns_name)
|
||||||
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
||||||
|
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
boto3 # Apache-2.0
|
boto3 # Apache-2.0
|
||||||
|
|||||||
2
contrib/azurerm/.gitignore
vendored
2
contrib/azurerm/.gitignore
vendored
@@ -1,2 +1,2 @@
|
|||||||
.generated
|
.generated
|
||||||
/inventory
|
/inventory
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Generate Azure inventory
|
||||||
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- generate-inventory
|
- generate-inventory
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Generate Azure inventory
|
||||||
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- generate-inventory_2
|
- generate-inventory_2
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Generate Azure templates
|
||||||
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- generate-templates
|
- generate-templates
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
- name: Query Azure VMs # noqa 301
|
- name: Query Azure VMs
|
||||||
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
||||||
register: vm_list_cmd
|
register: vm_list_cmd
|
||||||
|
|
||||||
|
|||||||
@@ -1,14 +1,14 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
- name: Query Azure VMs IPs # noqa 301
|
- name: Query Azure VMs IPs
|
||||||
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
|
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
|
||||||
register: vm_ip_list_cmd
|
register: vm_ip_list_cmd
|
||||||
|
|
||||||
- name: Query Azure VMs Roles # noqa 301
|
- name: Query Azure VMs Roles
|
||||||
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
||||||
register: vm_list_cmd
|
register: vm_list_cmd
|
||||||
|
|
||||||
- name: Query Azure Load Balancer Public IP # noqa 301
|
- name: Query Azure Load Balancer Public IP
|
||||||
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
|
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
|
||||||
register: lb_pubip_cmd
|
register: lb_pubip_cmd
|
||||||
|
|
||||||
|
|||||||
@@ -31,4 +31,3 @@
|
|||||||
[k8s_cluster:children]
|
[k8s_cluster:children]
|
||||||
kube_node
|
kube_node
|
||||||
kube_control_plane
|
kube_control_plane
|
||||||
|
|
||||||
|
|||||||
@@ -24,14 +24,14 @@ bastionIPAddressName: bastion-pubip
|
|||||||
|
|
||||||
disablePasswordAuthentication: true
|
disablePasswordAuthentication: true
|
||||||
|
|
||||||
sshKeyPath: "/home/{{admin_username}}/.ssh/authorized_keys"
|
sshKeyPath: "/home/{{ admin_username }}/.ssh/authorized_keys"
|
||||||
|
|
||||||
imageReference:
|
imageReference:
|
||||||
publisher: "OpenLogic"
|
publisher: "OpenLogic"
|
||||||
offer: "CentOS"
|
offer: "CentOS"
|
||||||
sku: "7.5"
|
sku: "7.5"
|
||||||
version: "latest"
|
version: "latest"
|
||||||
imageReferenceJson: "{{imageReference|to_json}}"
|
imageReferenceJson: "{{ imageReference | to_json }}"
|
||||||
|
|
||||||
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
|
storageAccountName: "sa{{ nameSuffix | replace('-', '') }}"
|
||||||
storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"
|
storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"
|
||||||
|
|||||||
@@ -27,4 +27,4 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -103,4 +103,4 @@
|
|||||||
}
|
}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,4 +5,4 @@
|
|||||||
"variables": {},
|
"variables": {},
|
||||||
"resources": [],
|
"resources": [],
|
||||||
"outputs": {}
|
"outputs": {}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,4 +16,4 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Create nodes as docker containers
|
||||||
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- { role: dind-host }
|
- { role: dind-host }
|
||||||
|
|
||||||
- hosts: containers
|
- name: Customize each node containers
|
||||||
|
hosts: containers
|
||||||
roles:
|
roles:
|
||||||
- { role: dind-cluster }
|
- { role: dind-cluster }
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
---
|
---
|
||||||
- name: set_fact distro_setup
|
- name: Set_fact distro_setup
|
||||||
set_fact:
|
set_fact:
|
||||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||||
|
|
||||||
- name: set_fact other distro settings
|
- name: Set_fact other distro settings
|
||||||
set_fact:
|
set_fact:
|
||||||
distro_user: "{{ distro_setup['user'] }}"
|
distro_user: "{{ distro_setup['user'] }}"
|
||||||
distro_ssh_service: "{{ distro_setup['ssh_service'] }}"
|
distro_ssh_service: "{{ distro_setup['ssh_service'] }}"
|
||||||
@@ -43,7 +43,7 @@
|
|||||||
package:
|
package:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: present
|
state: present
|
||||||
with_items: "{{ distro_extra_packages }} + [ 'rsyslog', 'openssh-server' ]"
|
with_items: "{{ distro_extra_packages + ['rsyslog', 'openssh-server'] }}"
|
||||||
|
|
||||||
- name: Start needed services
|
- name: Start needed services
|
||||||
service:
|
service:
|
||||||
@@ -66,8 +66,8 @@
|
|||||||
dest: "/etc/sudoers.d/{{ distro_user }}"
|
dest: "/etc/sudoers.d/{{ distro_user }}"
|
||||||
mode: 0640
|
mode: 0640
|
||||||
|
|
||||||
- name: Add my pubkey to "{{ distro_user }}" user authorized keys
|
- name: "Add my pubkey to {{ distro_user }} user authorized keys"
|
||||||
authorized_key:
|
ansible.posix.authorized_key:
|
||||||
user: "{{ distro_user }}"
|
user: "{{ distro_user }}"
|
||||||
state: present
|
state: present
|
||||||
key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}"
|
key: "{{ lookup('file', lookup('env', 'HOME') + '/.ssh/id_rsa.pub') }}"
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
---
|
---
|
||||||
- name: set_fact distro_setup
|
- name: Set_fact distro_setup
|
||||||
set_fact:
|
set_fact:
|
||||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||||
|
|
||||||
- name: set_fact other distro settings
|
- name: Set_fact other distro settings
|
||||||
set_fact:
|
set_fact:
|
||||||
distro_image: "{{ distro_setup['image'] }}"
|
distro_image: "{{ distro_setup['image'] }}"
|
||||||
distro_init: "{{ distro_setup['init'] }}"
|
distro_init: "{{ distro_setup['init'] }}"
|
||||||
@@ -13,7 +13,7 @@
|
|||||||
distro_agetty_svc: "{{ distro_setup['agetty_svc'] }}"
|
distro_agetty_svc: "{{ distro_setup['agetty_svc'] }}"
|
||||||
|
|
||||||
- name: Create dind node containers from "containers" inventory section
|
- name: Create dind node containers from "containers" inventory section
|
||||||
docker_container:
|
community.docker.docker_container:
|
||||||
image: "{{ distro_image }}"
|
image: "{{ distro_image }}"
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: started
|
state: started
|
||||||
@@ -53,7 +53,7 @@
|
|||||||
{{ distro_raw_setup_done }} && echo SKIPPED && exit 0
|
{{ distro_raw_setup_done }} && echo SKIPPED && exit 0
|
||||||
until [ "$(readlink /proc/1/exe)" = "{{ distro_pid1_exe }}" ] ; do sleep 1; done
|
until [ "$(readlink /proc/1/exe)" = "{{ distro_pid1_exe }}" ] ; do sleep 1; done
|
||||||
{{ distro_raw_setup }}
|
{{ distro_raw_setup }}
|
||||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||||
with_items: "{{ containers.results }}"
|
with_items: "{{ containers.results }}"
|
||||||
register: result
|
register: result
|
||||||
changed_when: result.stdout.find("SKIPPED") < 0
|
changed_when: result.stdout.find("SKIPPED") < 0
|
||||||
@@ -63,26 +63,25 @@
|
|||||||
until test -S /var/run/dbus/system_bus_socket; do sleep 1; done
|
until test -S /var/run/dbus/system_bus_socket; do sleep 1; done
|
||||||
systemctl disable {{ distro_agetty_svc }}
|
systemctl disable {{ distro_agetty_svc }}
|
||||||
systemctl stop {{ distro_agetty_svc }}
|
systemctl stop {{ distro_agetty_svc }}
|
||||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||||
with_items: "{{ containers.results }}"
|
with_items: "{{ containers.results }}"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
|
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
|
||||||
# handle manually
|
# handle manually
|
||||||
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave) # noqa 301
|
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
|
||||||
raw: |
|
raw: |
|
||||||
echo {{ item | hash('sha1') }} > /etc/machine-id.new
|
echo {{ item | hash('sha1') }} > /etc/machine-id.new
|
||||||
mv -b /etc/machine-id.new /etc/machine-id
|
mv -b /etc/machine-id.new /etc/machine-id
|
||||||
cmp /etc/machine-id /etc/machine-id~ || true
|
cmp /etc/machine-id /etc/machine-id~ || true
|
||||||
systemctl daemon-reload
|
systemctl daemon-reload
|
||||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||||
with_items: "{{ containers.results }}"
|
with_items: "{{ containers.results }}"
|
||||||
|
|
||||||
- name: Early hack image install to adapt for DIND
|
- name: Early hack image install to adapt for DIND
|
||||||
# noqa 302 - this task uses the raw module intentionally
|
|
||||||
raw: |
|
raw: |
|
||||||
rm -fv /usr/bin/udevadm /usr/sbin/udevadm
|
rm -fv /usr/bin/udevadm /usr/sbin/udevadm
|
||||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||||
with_items: "{{ containers.results }}"
|
with_items: "{{ containers.results }}"
|
||||||
register: result
|
register: result
|
||||||
changed_when: result.stdout.find("removed") >= 0
|
changed_when: result.stdout.find("removed") >= 0
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
configparser>=3.3.0
|
configparser>=3.3.0
|
||||||
ruamel.yaml>=0.15.88
|
|
||||||
ipaddress
|
ipaddress
|
||||||
|
ruamel.yaml>=0.15.88
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
hacking>=0.10.2
|
hacking>=0.10.2
|
||||||
pytest>=2.8.0
|
|
||||||
mock>=1.3.0
|
mock>=1.3.0
|
||||||
|
pytest>=2.8.0
|
||||||
|
|||||||
@@ -13,7 +13,7 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import inventory
|
import inventory
|
||||||
from test import support
|
from io import StringIO
|
||||||
import unittest
|
import unittest
|
||||||
from unittest import mock
|
from unittest import mock
|
||||||
|
|
||||||
@@ -41,7 +41,7 @@ class TestInventoryPrintHostnames(unittest.TestCase):
|
|||||||
'access_ip': '10.90.0.3'}}}})
|
'access_ip': '10.90.0.3'}}}})
|
||||||
with mock.patch('builtins.open', mock_io):
|
with mock.patch('builtins.open', mock_io):
|
||||||
with self.assertRaises(SystemExit) as cm:
|
with self.assertRaises(SystemExit) as cm:
|
||||||
with support.captured_stdout() as stdout:
|
with mock.patch('sys.stdout', new_callable=StringIO) as stdout:
|
||||||
inventory.KubesprayInventory(
|
inventory.KubesprayInventory(
|
||||||
changed_hosts=["print_hostnames"],
|
changed_hosts=["print_hostnames"],
|
||||||
config_file="file")
|
config_file="file")
|
||||||
|
|||||||
@@ -1,21 +1,27 @@
|
|||||||
[tox]
|
[tox]
|
||||||
minversion = 1.6
|
minversion = 1.6
|
||||||
skipsdist = True
|
skipsdist = True
|
||||||
envlist = pep8, py33
|
envlist = pep8
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
whitelist_externals = py.test
|
allowlist_externals = py.test
|
||||||
usedevelop = True
|
usedevelop = True
|
||||||
deps =
|
deps =
|
||||||
-r{toxinidir}/requirements.txt
|
-r{toxinidir}/requirements.txt
|
||||||
-r{toxinidir}/test-requirements.txt
|
-r{toxinidir}/test-requirements.txt
|
||||||
setenv = VIRTUAL_ENV={envdir}
|
setenv = VIRTUAL_ENV={envdir}
|
||||||
passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
|
passenv =
|
||||||
|
http_proxy
|
||||||
|
HTTP_PROXY
|
||||||
|
https_proxy
|
||||||
|
HTTPS_PROXY
|
||||||
|
no_proxy
|
||||||
|
NO_PROXY
|
||||||
commands = pytest -vv #{posargs:./tests}
|
commands = pytest -vv #{posargs:./tests}
|
||||||
|
|
||||||
[testenv:pep8]
|
[testenv:pep8]
|
||||||
usedevelop = False
|
usedevelop = False
|
||||||
whitelist_externals = bash
|
allowlist_externals = bash
|
||||||
commands =
|
commands =
|
||||||
bash -c "find {toxinidir}/* -type f -name '*.py' -print0 | xargs -0 flake8"
|
bash -c "find {toxinidir}/* -type f -name '*.py' -print0 | xargs -0 flake8"
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,2 @@
|
|||||||
#k8s_deployment_user: kubespray
|
#k8s_deployment_user: kubespray
|
||||||
#k8s_deployment_user_pkey_path: /tmp/ssh_rsa
|
#k8s_deployment_user_pkey_path: /tmp/ssh_rsa
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Prepare Hypervisor to later install kubespray VMs
|
||||||
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
become: yes
|
become: yes
|
||||||
vars:
|
vars:
|
||||||
- bootstrap_os: none
|
bootstrap_os: none
|
||||||
roles:
|
roles:
|
||||||
- kvm-setup
|
- { role: kvm-setup }
|
||||||
|
|||||||
@@ -22,9 +22,9 @@
|
|||||||
- ntp
|
- ntp
|
||||||
when: ansible_os_family == "Debian"
|
when: ansible_os_family == "Debian"
|
||||||
|
|
||||||
# Create deployment user if required
|
- name: Create deployment user if required
|
||||||
- include: user.yml
|
include_tasks: user.yml
|
||||||
when: k8s_deployment_user is defined
|
when: k8s_deployment_user is defined
|
||||||
|
|
||||||
# Set proper sysctl values
|
- name: Set proper sysctl values
|
||||||
- include: sysctl.yml
|
import_tasks: sysctl.yml
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
- name: Load br_netfilter module
|
- name: Load br_netfilter module
|
||||||
modprobe:
|
community.general.modprobe:
|
||||||
name: br_netfilter
|
name: br_netfilter
|
||||||
state: present
|
state: present
|
||||||
register: br_netfilter
|
register: br_netfilter
|
||||||
@@ -25,7 +25,7 @@
|
|||||||
|
|
||||||
|
|
||||||
- name: Enable net.ipv4.ip_forward in sysctl
|
- name: Enable net.ipv4.ip_forward in sysctl
|
||||||
sysctl:
|
ansible.posix.sysctl:
|
||||||
name: net.ipv4.ip_forward
|
name: net.ipv4.ip_forward
|
||||||
value: 1
|
value: 1
|
||||||
sysctl_file: "{{ sysctl_file_path }}"
|
sysctl_file: "{{ sysctl_file_path }}"
|
||||||
@@ -33,7 +33,7 @@
|
|||||||
reload: yes
|
reload: yes
|
||||||
|
|
||||||
- name: Set bridge-nf-call-{arptables,iptables} to 0
|
- name: Set bridge-nf-call-{arptables,iptables} to 0
|
||||||
sysctl:
|
ansible.posix.sysctl:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: present
|
state: present
|
||||||
value: 0
|
value: 0
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
---
|
---
|
||||||
- name: Check ansible version
|
- name: Check ansible version
|
||||||
import_playbook: ansible_version.yml
|
import_playbook: kubernetes_sigs.kubespray.ansible_version
|
||||||
|
|
||||||
- hosts: localhost
|
- name: Install mitogen
|
||||||
|
hosts: localhost
|
||||||
strategy: linear
|
strategy: linear
|
||||||
vars:
|
vars:
|
||||||
mitogen_version: 0.3.2
|
mitogen_version: 0.3.2
|
||||||
@@ -19,24 +20,25 @@
|
|||||||
- "{{ playbook_dir }}/plugins/mitogen"
|
- "{{ playbook_dir }}/plugins/mitogen"
|
||||||
- "{{ playbook_dir }}/dist"
|
- "{{ playbook_dir }}/dist"
|
||||||
|
|
||||||
- name: download mitogen release
|
- name: Download mitogen release
|
||||||
get_url:
|
get_url:
|
||||||
url: "{{ mitogen_url }}"
|
url: "{{ mitogen_url }}"
|
||||||
dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
||||||
validate_certs: true
|
validate_certs: true
|
||||||
|
mode: 0644
|
||||||
|
|
||||||
- name: extract archive
|
- name: Extract archive
|
||||||
unarchive:
|
unarchive:
|
||||||
src: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
src: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
||||||
dest: "{{ playbook_dir }}/dist/"
|
dest: "{{ playbook_dir }}/dist/"
|
||||||
|
|
||||||
- name: copy plugin
|
- name: Copy plugin
|
||||||
synchronize:
|
ansible.posix.synchronize:
|
||||||
src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/"
|
src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/"
|
||||||
dest: "{{ playbook_dir }}/plugins/mitogen"
|
dest: "{{ playbook_dir }}/plugins/mitogen"
|
||||||
|
|
||||||
- name: add strategy to ansible.cfg
|
- name: Add strategy to ansible.cfg
|
||||||
ini_file:
|
community.general.ini_file:
|
||||||
path: ansible.cfg
|
path: ansible.cfg
|
||||||
mode: 0644
|
mode: 0644
|
||||||
section: "{{ item.section | d('defaults') }}"
|
section: "{{ item.section | d('defaults') }}"
|
||||||
|
|||||||
@@ -1,24 +1,29 @@
|
|||||||
---
|
---
|
||||||
- hosts: gfs-cluster
|
- name: Bootstrap hosts
|
||||||
|
hosts: gfs-cluster
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
vars:
|
vars:
|
||||||
ansible_ssh_pipelining: false
|
ansible_ssh_pipelining: false
|
||||||
roles:
|
roles:
|
||||||
- { role: bootstrap-os, tags: bootstrap-os}
|
- { role: bootstrap-os, tags: bootstrap-os}
|
||||||
|
|
||||||
- hosts: all
|
- name: Gather facts
|
||||||
|
hosts: all
|
||||||
gather_facts: true
|
gather_facts: true
|
||||||
|
|
||||||
- hosts: gfs-cluster
|
- name: Install glusterfs server
|
||||||
|
hosts: gfs-cluster
|
||||||
vars:
|
vars:
|
||||||
ansible_ssh_pipelining: true
|
ansible_ssh_pipelining: true
|
||||||
roles:
|
roles:
|
||||||
- { role: glusterfs/server }
|
- { role: glusterfs/server }
|
||||||
|
|
||||||
- hosts: k8s_cluster
|
- name: Install glusterfs servers
|
||||||
|
hosts: k8s_cluster
|
||||||
roles:
|
roles:
|
||||||
- { role: glusterfs/client }
|
- { role: glusterfs/client }
|
||||||
|
|
||||||
- hosts: kube_control_plane[0]
|
- name: Configure Kubernetes to use glusterfs
|
||||||
|
hosts: kube_control_plane[0]
|
||||||
roles:
|
roles:
|
||||||
- { role: kubernetes-pv }
|
- { role: kubernetes-pv }
|
||||||
|
|||||||
@@ -41,4 +41,3 @@
|
|||||||
|
|
||||||
# [network-storage:children]
|
# [network-storage:children]
|
||||||
# gfs-cluster
|
# gfs-cluster
|
||||||
|
|
||||||
|
|||||||
@@ -6,12 +6,12 @@ galaxy_info:
|
|||||||
description: GlusterFS installation for Linux.
|
description: GlusterFS installation for Linux.
|
||||||
company: "Midwestern Mac, LLC"
|
company: "Midwestern Mac, LLC"
|
||||||
license: "license (BSD, MIT)"
|
license: "license (BSD, MIT)"
|
||||||
min_ansible_version: 2.0
|
min_ansible_version: "2.0"
|
||||||
platforms:
|
platforms:
|
||||||
- name: EL
|
- name: EL
|
||||||
versions:
|
versions:
|
||||||
- 6
|
- "6"
|
||||||
- 7
|
- "7"
|
||||||
- name: Ubuntu
|
- name: Ubuntu
|
||||||
versions:
|
versions:
|
||||||
- precise
|
- precise
|
||||||
|
|||||||
@@ -3,14 +3,19 @@
|
|||||||
# hyperkube and needs to be installed as part of the system.
|
# hyperkube and needs to be installed as part of the system.
|
||||||
|
|
||||||
# Setup/install tasks.
|
# Setup/install tasks.
|
||||||
- include: setup-RedHat.yml
|
- name: Setup RedHat distros for glusterfs
|
||||||
|
include_tasks: setup-RedHat.yml
|
||||||
when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined
|
when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined
|
||||||
|
|
||||||
- include: setup-Debian.yml
|
- name: Setup Debian distros for glusterfs
|
||||||
|
include_tasks: setup-Debian.yml
|
||||||
when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined
|
when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined
|
||||||
|
|
||||||
- name: Ensure Gluster mount directories exist.
|
- name: Ensure Gluster mount directories exist.
|
||||||
file: "path={{ item }} state=directory mode=0775"
|
file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: directory
|
||||||
|
mode: 0775
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ gluster_mount_dir }}"
|
- "{{ gluster_mount_dir }}"
|
||||||
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
|
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
register: glusterfs_ppa_added
|
register: glusterfs_ppa_added
|
||||||
when: glusterfs_ppa_use
|
when: glusterfs_ppa_use
|
||||||
|
|
||||||
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa 503
|
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa no-handler
|
||||||
apt:
|
apt:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: absent
|
state: absent
|
||||||
|
|||||||
@@ -1,10 +1,14 @@
|
|||||||
---
|
---
|
||||||
- name: Install Prerequisites
|
- name: Install Prerequisites
|
||||||
package: name={{ item }} state=present
|
package:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: present
|
||||||
with_items:
|
with_items:
|
||||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||||
|
|
||||||
- name: Install Packages
|
- name: Install Packages
|
||||||
package: name={{ item }} state=present
|
package:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: present
|
||||||
with_items:
|
with_items:
|
||||||
- glusterfs-client
|
- glusterfs-client
|
||||||
|
|||||||
@@ -6,12 +6,12 @@ galaxy_info:
|
|||||||
description: GlusterFS installation for Linux.
|
description: GlusterFS installation for Linux.
|
||||||
company: "Midwestern Mac, LLC"
|
company: "Midwestern Mac, LLC"
|
||||||
license: "license (BSD, MIT)"
|
license: "license (BSD, MIT)"
|
||||||
min_ansible_version: 2.0
|
min_ansible_version: "2.0"
|
||||||
platforms:
|
platforms:
|
||||||
- name: EL
|
- name: EL
|
||||||
versions:
|
versions:
|
||||||
- 6
|
- "6"
|
||||||
- 7
|
- "7"
|
||||||
- name: Ubuntu
|
- name: Ubuntu
|
||||||
versions:
|
versions:
|
||||||
- precise
|
- precise
|
||||||
|
|||||||
@@ -4,78 +4,97 @@
|
|||||||
include_vars: "{{ ansible_os_family }}.yml"
|
include_vars: "{{ ansible_os_family }}.yml"
|
||||||
|
|
||||||
# Install xfs package
|
# Install xfs package
|
||||||
- name: install xfs Debian
|
- name: Install xfs Debian
|
||||||
apt: name=xfsprogs state=present
|
apt:
|
||||||
|
name: xfsprogs
|
||||||
|
state: present
|
||||||
when: ansible_os_family == "Debian"
|
when: ansible_os_family == "Debian"
|
||||||
|
|
||||||
- name: install xfs RedHat
|
- name: Install xfs RedHat
|
||||||
package: name=xfsprogs state=present
|
package:
|
||||||
|
name: xfsprogs
|
||||||
|
state: present
|
||||||
when: ansible_os_family == "RedHat"
|
when: ansible_os_family == "RedHat"
|
||||||
|
|
||||||
# Format external volumes in xfs
|
# Format external volumes in xfs
|
||||||
- name: Format volumes in xfs
|
- name: Format volumes in xfs
|
||||||
filesystem: "fstype=xfs dev={{ disk_volume_device_1 }}"
|
community.general.filesystem:
|
||||||
|
fstype: xfs
|
||||||
|
dev: "{{ disk_volume_device_1 }}"
|
||||||
|
|
||||||
# Mount external volumes
|
# Mount external volumes
|
||||||
- name: mounting new xfs filesystem
|
- name: Mounting new xfs filesystem
|
||||||
mount: "name={{ gluster_volume_node_mount_dir }} src={{ disk_volume_device_1 }} fstype=xfs state=mounted"
|
ansible.posix.mount:
|
||||||
|
name: "{{ gluster_volume_node_mount_dir }}"
|
||||||
|
src: "{{ disk_volume_device_1 }}"
|
||||||
|
fstype: xfs
|
||||||
|
state: mounted
|
||||||
|
|
||||||
# Setup/install tasks.
|
# Setup/install tasks.
|
||||||
- include: setup-RedHat.yml
|
- name: Setup RedHat distros for glusterfs
|
||||||
|
include_tasks: setup-RedHat.yml
|
||||||
when: ansible_os_family == 'RedHat'
|
when: ansible_os_family == 'RedHat'
|
||||||
|
|
||||||
- include: setup-Debian.yml
|
- name: Setup Debian distros for glusterfs
|
||||||
|
include_tasks: setup-Debian.yml
|
||||||
when: ansible_os_family == 'Debian'
|
when: ansible_os_family == 'Debian'
|
||||||
|
|
||||||
- name: Ensure GlusterFS is started and enabled at boot.
|
- name: Ensure GlusterFS is started and enabled at boot.
|
||||||
service: "name={{ glusterfs_daemon }} state=started enabled=yes"
|
service:
|
||||||
|
name: "{{ glusterfs_daemon }}"
|
||||||
|
state: started
|
||||||
|
enabled: yes
|
||||||
|
|
||||||
- name: Ensure Gluster brick and mount directories exist.
|
- name: Ensure Gluster brick and mount directories exist.
|
||||||
file: "path={{ item }} state=directory mode=0775"
|
file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: directory
|
||||||
|
mode: 0775
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ gluster_brick_dir }}"
|
- "{{ gluster_brick_dir }}"
|
||||||
- "{{ gluster_mount_dir }}"
|
- "{{ gluster_mount_dir }}"
|
||||||
|
|
||||||
- name: Configure Gluster volume with replicas
|
- name: Configure Gluster volume with replicas
|
||||||
gluster_volume:
|
gluster.gluster.gluster_volume:
|
||||||
state: present
|
state: present
|
||||||
name: "{{ gluster_brick_name }}"
|
name: "{{ gluster_brick_name }}"
|
||||||
brick: "{{ gluster_brick_dir }}"
|
brick: "{{ gluster_brick_dir }}"
|
||||||
replicas: "{{ groups['gfs-cluster'] | length }}"
|
replicas: "{{ groups['gfs-cluster'] | length }}"
|
||||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||||
host: "{{ inventory_hostname }}"
|
host: "{{ inventory_hostname }}"
|
||||||
force: yes
|
force: yes
|
||||||
run_once: true
|
run_once: true
|
||||||
when: groups['gfs-cluster']|length > 1
|
when: groups['gfs-cluster'] | length > 1
|
||||||
|
|
||||||
- name: Configure Gluster volume without replicas
|
- name: Configure Gluster volume without replicas
|
||||||
gluster_volume:
|
gluster.gluster.gluster_volume:
|
||||||
state: present
|
state: present
|
||||||
name: "{{ gluster_brick_name }}"
|
name: "{{ gluster_brick_name }}"
|
||||||
brick: "{{ gluster_brick_dir }}"
|
brick: "{{ gluster_brick_dir }}"
|
||||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||||
host: "{{ inventory_hostname }}"
|
host: "{{ inventory_hostname }}"
|
||||||
force: yes
|
force: yes
|
||||||
run_once: true
|
run_once: true
|
||||||
when: groups['gfs-cluster']|length <= 1
|
when: groups['gfs-cluster'] | length <= 1
|
||||||
|
|
||||||
- name: Mount glusterfs to retrieve disk size
|
- name: Mount glusterfs to retrieve disk size
|
||||||
mount:
|
ansible.posix.mount:
|
||||||
name: "{{ gluster_mount_dir }}"
|
name: "{{ gluster_mount_dir }}"
|
||||||
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster"
|
||||||
fstype: glusterfs
|
fstype: glusterfs
|
||||||
opts: "defaults,_netdev"
|
opts: "defaults,_netdev"
|
||||||
state: mounted
|
state: mounted
|
||||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
- name: Get Gluster disk size
|
- name: Get Gluster disk size
|
||||||
setup: filter=ansible_mounts
|
setup:
|
||||||
|
filter: ansible_mounts
|
||||||
register: mounts_data
|
register: mounts_data
|
||||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
- name: Set Gluster disk size to variable
|
- name: Set Gluster disk size to variable
|
||||||
set_fact:
|
set_fact:
|
||||||
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024*1024*1024)) | int }}"
|
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024 * 1024 * 1024)) | int }}"
|
||||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
- name: Create file on GlusterFS
|
- name: Create file on GlusterFS
|
||||||
@@ -86,9 +105,9 @@
|
|||||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
- name: Unmount glusterfs
|
- name: Unmount glusterfs
|
||||||
mount:
|
ansible.posix.mount:
|
||||||
name: "{{ gluster_mount_dir }}"
|
name: "{{ gluster_mount_dir }}"
|
||||||
fstype: glusterfs
|
fstype: glusterfs
|
||||||
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster"
|
||||||
state: unmounted
|
state: unmounted
|
||||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
register: glusterfs_ppa_added
|
register: glusterfs_ppa_added
|
||||||
when: glusterfs_ppa_use
|
when: glusterfs_ppa_use
|
||||||
|
|
||||||
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa 503
|
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa no-handler
|
||||||
apt:
|
apt:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: absent
|
state: absent
|
||||||
|
|||||||
@@ -1,11 +1,15 @@
|
|||||||
---
|
---
|
||||||
- name: Install Prerequisites
|
- name: Install Prerequisites
|
||||||
package: name={{ item }} state=present
|
package:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: present
|
||||||
with_items:
|
with_items:
|
||||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||||
|
|
||||||
- name: Install Packages
|
- name: Install Packages
|
||||||
package: name={{ item }} state=present
|
package:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: present
|
||||||
with_items:
|
with_items:
|
||||||
- glusterfs-server
|
- glusterfs-server
|
||||||
- glusterfs-client
|
- glusterfs-client
|
||||||
|
|||||||
@@ -18,6 +18,6 @@
|
|||||||
kubectl: "{{ bin_dir }}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "{{ item.item.type }}"
|
resource: "{{ item.item.type }}"
|
||||||
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
||||||
state: "{{ item.changed | ternary('latest','present') }}"
|
state: "{{ item.changed | ternary('latest', 'present') }}"
|
||||||
with_items: "{{ gluster_pv.results }}"
|
with_items: "{{ gluster_pv.results }}"
|
||||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined
|
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined
|
||||||
|
|||||||
@@ -21,4 +21,3 @@
|
|||||||
{% endfor %}
|
{% endfor %}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
---
|
---
|
||||||
- hosts: kube_control_plane[0]
|
- name: Tear down heketi
|
||||||
|
hosts: kube_control_plane[0]
|
||||||
roles:
|
roles:
|
||||||
- { role: tear-down }
|
- { role: tear-down }
|
||||||
|
|
||||||
- hosts: heketi-node
|
- name: Teardown disks in heketi
|
||||||
|
hosts: heketi-node
|
||||||
become: yes
|
become: yes
|
||||||
roles:
|
roles:
|
||||||
- { role: tear-down-disks }
|
- { role: tear-down-disks }
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
---
|
---
|
||||||
- hosts: heketi-node
|
- name: Prepare heketi install
|
||||||
|
hosts: heketi-node
|
||||||
roles:
|
roles:
|
||||||
- { role: prepare }
|
- { role: prepare }
|
||||||
|
|
||||||
- hosts: kube_control_plane[0]
|
- name: Provision heketi
|
||||||
|
hosts: kube_control_plane[0]
|
||||||
tags:
|
tags:
|
||||||
- "provision"
|
- "provision"
|
||||||
roles:
|
roles:
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
- "dm_snapshot"
|
- "dm_snapshot"
|
||||||
- "dm_mirror"
|
- "dm_mirror"
|
||||||
- "dm_thin_pool"
|
- "dm_thin_pool"
|
||||||
modprobe:
|
community.general.modprobe:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: "present"
|
state: "present"
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
---
|
---
|
||||||
- name: "stop port forwarding"
|
- name: "Stop port forwarding"
|
||||||
command: "killall "
|
command: "killall "
|
||||||
|
|||||||
@@ -7,9 +7,9 @@
|
|||||||
|
|
||||||
- name: "Bootstrap heketi."
|
- name: "Bootstrap heketi."
|
||||||
when:
|
when:
|
||||||
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Service']\"))|length == 0"
|
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Service']\")) | length == 0"
|
||||||
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Deployment']\"))|length == 0"
|
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Deployment']\")) | length == 0"
|
||||||
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod']\"))|length == 0"
|
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Pod']\")) | length == 0"
|
||||||
include_tasks: "bootstrap/deploy.yml"
|
include_tasks: "bootstrap/deploy.yml"
|
||||||
|
|
||||||
# Prepare heketi topology
|
# Prepare heketi topology
|
||||||
@@ -20,11 +20,11 @@
|
|||||||
|
|
||||||
- name: "Ensure heketi bootstrap pod is up."
|
- name: "Ensure heketi bootstrap pod is up."
|
||||||
assert:
|
assert:
|
||||||
that: "(initial_heketi_pod.stdout|from_json|json_query('items[*]'))|length == 1"
|
that: "(initial_heketi_pod.stdout | from_json | json_query('items[*]')) | length == 1"
|
||||||
|
|
||||||
- name: Store the initial heketi pod name
|
- name: Store the initial heketi pod name
|
||||||
set_fact:
|
set_fact:
|
||||||
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout|from_json|json_query(\"items[*].metadata.name|[0]\") }}"
|
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout | from_json | json_query(\"items[*].metadata.name | [0]\") }}"
|
||||||
|
|
||||||
- name: "Test heketi topology."
|
- name: "Test heketi topology."
|
||||||
changed_when: false
|
changed_when: false
|
||||||
@@ -32,7 +32,7 @@
|
|||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||||
|
|
||||||
- name: "Load heketi topology."
|
- name: "Load heketi topology."
|
||||||
when: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*]\")|flatten|length == 0"
|
when: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*]\") | flatten | length == 0"
|
||||||
include_tasks: "bootstrap/topology.yml"
|
include_tasks: "bootstrap/topology.yml"
|
||||||
|
|
||||||
# Provision heketi database volume
|
# Provision heketi database volume
|
||||||
@@ -58,7 +58,7 @@
|
|||||||
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
||||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
||||||
when:
|
when:
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0"
|
- "heketi_storage_state.stdout | from_json | json_query(secret_query) | length == 0"
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0"
|
- "heketi_storage_state.stdout | from_json | json_query(endpoints_query) | length == 0"
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0"
|
- "heketi_storage_state.stdout | from_json | json_query(service_query) | length == 0"
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0"
|
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 0"
|
||||||
|
|||||||
@@ -17,11 +17,11 @@
|
|||||||
register: "initial_heketi_state"
|
register: "initial_heketi_state"
|
||||||
vars:
|
vars:
|
||||||
initial_heketi_state: { stdout: "{}" }
|
initial_heketi_state: { stdout: "{}" }
|
||||||
pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]"
|
pods_query: "items[?kind=='Pod'].status.conditions | [0][?type=='Ready'].status | [0]"
|
||||||
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
deployments_query: "items[?kind=='Deployment'].status.conditions | [0][?type=='Available'].status | [0]"
|
||||||
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
||||||
until:
|
until:
|
||||||
- "initial_heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
- "initial_heketi_state.stdout | from_json | json_query(pods_query) == 'True'"
|
||||||
- "initial_heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
- "initial_heketi_state.stdout | from_json | json_query(deployments_query) == 'True'"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|||||||
@@ -15,10 +15,10 @@
|
|||||||
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
||||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
||||||
when:
|
when:
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0"
|
- "heketi_storage_state.stdout | from_json | json_query(secret_query) | length == 0"
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0"
|
- "heketi_storage_state.stdout | from_json | json_query(endpoints_query) | length == 0"
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0"
|
- "heketi_storage_state.stdout | from_json | json_query(service_query) | length == 0"
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0"
|
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 0"
|
||||||
register: "heketi_storage_result"
|
register: "heketi_storage_result"
|
||||||
- name: "Get state of heketi database copy job."
|
- name: "Get state of heketi database copy job."
|
||||||
command: "{{ bin_dir }}/kubectl get jobs --output=json"
|
command: "{{ bin_dir }}/kubectl get jobs --output=json"
|
||||||
@@ -28,6 +28,6 @@
|
|||||||
heketi_storage_state: { stdout: "{}" }
|
heketi_storage_state: { stdout: "{}" }
|
||||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job' && status.succeeded==1]"
|
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job' && status.succeeded==1]"
|
||||||
until:
|
until:
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 1"
|
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 1"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|||||||
@@ -5,10 +5,10 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
- name: "Delete bootstrap Heketi."
|
- name: "Delete bootstrap Heketi."
|
||||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
|
||||||
when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0"
|
when: "heketi_resources.stdout | from_json | json_query('items[*]') | length > 0"
|
||||||
- name: "Ensure there is nothing left over." # noqa 301
|
- name: "Ensure there is nothing left over."
|
||||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
|
||||||
register: "heketi_result"
|
register: "heketi_result"
|
||||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|||||||
@@ -14,7 +14,7 @@
|
|||||||
- name: "Copy topology configuration into container."
|
- name: "Copy topology configuration into container."
|
||||||
changed_when: false
|
changed_when: false
|
||||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
||||||
- name: "Load heketi topology." # noqa 503
|
- name: "Load heketi topology." # noqa no-handler
|
||||||
when: "render.changed"
|
when: "render.changed"
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||||
register: "load_heketi"
|
register: "load_heketi"
|
||||||
@@ -22,6 +22,6 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
register: "heketi_topology"
|
register: "heketi_topology"
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||||
until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length"
|
until: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\") | flatten | length == groups['heketi-node'] | length"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|||||||
@@ -6,19 +6,19 @@
|
|||||||
- name: "Get heketi volumes."
|
- name: "Get heketi volumes."
|
||||||
changed_when: false
|
changed_when: false
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
||||||
with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}"
|
with_items: "{{ heketi_volumes.stdout | from_json | json_query(\"volumes[*]\") }}"
|
||||||
loop_control: { loop_var: "volume_id" }
|
loop_control: { loop_var: "volume_id" }
|
||||||
register: "volumes_information"
|
register: "volumes_information"
|
||||||
- name: "Test heketi database volume."
|
- name: "Test heketi database volume."
|
||||||
set_fact: { heketi_database_volume_exists: true }
|
set_fact: { heketi_database_volume_exists: true }
|
||||||
with_items: "{{ volumes_information.results }}"
|
with_items: "{{ volumes_information.results }}"
|
||||||
loop_control: { loop_var: "volume_information" }
|
loop_control: { loop_var: "volume_information" }
|
||||||
vars: { volume: "{{ volume_information.stdout|from_json }}" }
|
vars: { volume: "{{ volume_information.stdout | from_json }}" }
|
||||||
when: "volume.name == 'heketidbstorage'"
|
when: "volume.name == 'heketidbstorage'"
|
||||||
- name: "Provision database volume."
|
- name: "Provision database volume."
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
|
||||||
when: "heketi_database_volume_exists is undefined"
|
when: "heketi_database_volume_exists is undefined"
|
||||||
- name: "Copy configuration from pod." # noqa 301
|
- name: "Copy configuration from pod."
|
||||||
become: true
|
become: true
|
||||||
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
||||||
- name: "Get heketi volume ids."
|
- name: "Get heketi volume ids."
|
||||||
@@ -28,14 +28,14 @@
|
|||||||
- name: "Get heketi volumes."
|
- name: "Get heketi volumes."
|
||||||
changed_when: false
|
changed_when: false
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
||||||
with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}"
|
with_items: "{{ heketi_volumes.stdout | from_json | json_query(\"volumes[*]\") }}"
|
||||||
loop_control: { loop_var: "volume_id" }
|
loop_control: { loop_var: "volume_id" }
|
||||||
register: "volumes_information"
|
register: "volumes_information"
|
||||||
- name: "Test heketi database volume."
|
- name: "Test heketi database volume."
|
||||||
set_fact: { heketi_database_volume_created: true }
|
set_fact: { heketi_database_volume_created: true }
|
||||||
with_items: "{{ volumes_information.results }}"
|
with_items: "{{ volumes_information.results }}"
|
||||||
loop_control: { loop_var: "volume_information" }
|
loop_control: { loop_var: "volume_information" }
|
||||||
vars: { volume: "{{ volume_information.stdout|from_json }}" }
|
vars: { volume: "{{ volume_information.stdout | from_json }}" }
|
||||||
when: "volume.name == 'heketidbstorage'"
|
when: "volume.name == 'heketidbstorage'"
|
||||||
- name: "Ensure heketi database volume exists."
|
- name: "Ensure heketi database volume exists."
|
||||||
assert: { that: "heketi_database_volume_created is defined", msg: "Heketi database volume does not exist." }
|
assert: { that: "heketi_database_volume_created is defined", msg: "Heketi database volume does not exist." }
|
||||||
|
|||||||
@@ -23,8 +23,8 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
vars:
|
vars:
|
||||||
daemonset_state: { stdout: "{}" }
|
daemonset_state: { stdout: "{}" }
|
||||||
ready: "{{ daemonset_state.stdout|from_json|json_query(\"status.numberReady\") }}"
|
ready: "{{ daemonset_state.stdout | from_json | json_query(\"status.numberReady\") }}"
|
||||||
desired: "{{ daemonset_state.stdout|from_json|json_query(\"status.desiredNumberScheduled\") }}"
|
desired: "{{ daemonset_state.stdout | from_json | json_query(\"status.desiredNumberScheduled\") }}"
|
||||||
until: "ready | int >= 3"
|
until: "ready | int >= 3"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: "Assign storage label"
|
- name: "Assign storage label"
|
||||||
when: "label_present.stdout_lines|length == 0"
|
when: "label_present.stdout_lines | length == 0"
|
||||||
command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
|
command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
|
||||||
|
|
||||||
- name: Get storage nodes again
|
- name: Get storage nodes again
|
||||||
@@ -15,5 +15,5 @@
|
|||||||
|
|
||||||
- name: Ensure the label has been set
|
- name: Ensure the label has been set
|
||||||
assert:
|
assert:
|
||||||
that: "label_present|length > 0"
|
that: "label_present | length > 0"
|
||||||
msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs."
|
msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs."
|
||||||
|
|||||||
@@ -24,11 +24,11 @@
|
|||||||
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
||||||
command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
|
command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
|
||||||
until:
|
until:
|
||||||
- "heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
- "heketi_state.stdout | from_json | json_query(pods_query) == 'True'"
|
||||||
- "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
- "heketi_state.stdout | from_json | json_query(deployments_query) == 'True'"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|
||||||
- name: Set the Heketi pod name
|
- name: Set the Heketi pod name
|
||||||
set_fact:
|
set_fact:
|
||||||
heketi_pod_name: "{{ heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
|
heketi_pod_name: "{{ heketi_state.stdout | from_json | json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
- name: "Render storage class configuration."
|
- name: "Render storage class configuration."
|
||||||
become: true
|
become: true
|
||||||
vars:
|
vars:
|
||||||
endpoint_address: "{{ (heketi_service.stdout|from_json).spec.clusterIP }}"
|
endpoint_address: "{{ (heketi_service.stdout | from_json).spec.clusterIP }}"
|
||||||
template:
|
template:
|
||||||
src: "storageclass.yml.j2"
|
src: "storageclass.yml.j2"
|
||||||
dest: "{{ kube_config_dir }}/storageclass.yml"
|
dest: "{{ kube_config_dir }}/storageclass.yml"
|
||||||
|
|||||||
@@ -11,16 +11,16 @@
|
|||||||
src: "topology.json.j2"
|
src: "topology.json.j2"
|
||||||
dest: "{{ kube_config_dir }}/topology.json"
|
dest: "{{ kube_config_dir }}/topology.json"
|
||||||
mode: 0644
|
mode: 0644
|
||||||
- name: "Copy topology configuration into container." # noqa 503
|
- name: "Copy topology configuration into container." # noqa no-handler
|
||||||
when: "rendering.changed"
|
when: "rendering.changed"
|
||||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
||||||
- name: "Load heketi topology." # noqa 503
|
- name: "Load heketi topology." # noqa no-handler
|
||||||
when: "rendering.changed"
|
when: "rendering.changed"
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||||
- name: "Get heketi topology."
|
- name: "Get heketi topology."
|
||||||
register: "heketi_topology"
|
register: "heketi_topology"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||||
until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length"
|
until: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\") | flatten | length == groups['heketi-node'] | length"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|||||||
@@ -22,7 +22,7 @@
|
|||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: "Remove volume groups." # noqa 301
|
- name: "Remove volume groups."
|
||||||
environment:
|
environment:
|
||||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||||
become: true
|
become: true
|
||||||
@@ -30,7 +30,7 @@
|
|||||||
with_items: "{{ volume_groups.stdout_lines }}"
|
with_items: "{{ volume_groups.stdout_lines }}"
|
||||||
loop_control: { loop_var: "volume_group" }
|
loop_control: { loop_var: "volume_group" }
|
||||||
|
|
||||||
- name: "Remove physical volume from cluster disks." # noqa 301
|
- name: "Remove physical volume from cluster disks."
|
||||||
environment:
|
environment:
|
||||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||||
become: true
|
become: true
|
||||||
|
|||||||
@@ -1,43 +1,43 @@
|
|||||||
---
|
---
|
||||||
- name: Remove storage class. # noqa 301
|
- name: Remove storage class.
|
||||||
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Tear down heketi. # noqa 301
|
- name: Tear down heketi.
|
||||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Tear down heketi. # noqa 301
|
- name: Tear down heketi.
|
||||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Tear down bootstrap.
|
- name: Tear down bootstrap.
|
||||||
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
|
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
|
||||||
- name: Ensure there is nothing left over. # noqa 301
|
- name: Ensure there is nothing left over.
|
||||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
||||||
register: "heketi_result"
|
register: "heketi_result"
|
||||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
- name: Ensure there is nothing left over. # noqa 301
|
- name: Ensure there is nothing left over.
|
||||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
||||||
register: "heketi_result"
|
register: "heketi_result"
|
||||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
- name: Tear down glusterfs. # noqa 301
|
- name: Tear down glusterfs.
|
||||||
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Remove heketi storage service. # noqa 301
|
- name: Remove heketi storage service.
|
||||||
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Remove heketi gluster role binding # noqa 301
|
- name: Remove heketi gluster role binding
|
||||||
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Remove heketi config secret # noqa 301
|
- name: Remove heketi config secret
|
||||||
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Remove heketi db backup # noqa 301
|
- name: Remove heketi db backup
|
||||||
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Remove heketi service account # noqa 301
|
- name: Remove heketi service account
|
||||||
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Get secrets
|
- name: Get secrets
|
||||||
@@ -46,6 +46,6 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
- name: Remove heketi storage secret
|
- name: Remove heketi storage secret
|
||||||
vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
|
vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
|
||||||
command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}"
|
command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout | from_json | json_query(storage_query) }}"
|
||||||
when: "storage_query is defined"
|
when: "storage_query is defined"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ manage-offline-container-images.sh register
|
|||||||
|
|
||||||
## generate_list.sh
|
## generate_list.sh
|
||||||
|
|
||||||
This script generates the list of downloaded files and the list of container images by `roles/download/defaults/main.yml` file.
|
This script generates the list of downloaded files and the list of container images by `roles/download/defaults/main/main.yml` file.
|
||||||
|
|
||||||
Run this script will execute `generate_list.yml` playbook in kubespray root directory and generate four files,
|
Run this script will execute `generate_list.yml` playbook in kubespray root directory and generate four files,
|
||||||
all downloaded files url in files.list, all container images in images.list, jinja2 templates in *.template.
|
all downloaded files url in files.list, all container images in images.list, jinja2 templates in *.template.
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ CURRENT_DIR=$(cd $(dirname $0); pwd)
|
|||||||
TEMP_DIR="${CURRENT_DIR}/temp"
|
TEMP_DIR="${CURRENT_DIR}/temp"
|
||||||
REPO_ROOT_DIR="${CURRENT_DIR%/contrib/offline}"
|
REPO_ROOT_DIR="${CURRENT_DIR%/contrib/offline}"
|
||||||
|
|
||||||
: ${DOWNLOAD_YML:="roles/download/defaults/main.yml"}
|
: ${DOWNLOAD_YML:="roles/download/defaults/main/main.yml"}
|
||||||
|
|
||||||
mkdir -p ${TEMP_DIR}
|
mkdir -p ${TEMP_DIR}
|
||||||
|
|
||||||
@@ -19,7 +19,7 @@ sed -n '/^downloads:/,/download_defaults:/p' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
|||||||
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/\"//g' > ${TEMP_DIR}/images.list.template
|
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/\"//g' > ${TEMP_DIR}/images.list.template
|
||||||
|
|
||||||
# add kube-* images to images list template
|
# add kube-* images to images list template
|
||||||
# Those container images are downloaded by kubeadm, then roles/download/defaults/main.yml
|
# Those container images are downloaded by kubeadm, then roles/download/defaults/main/main.yml
|
||||||
# doesn't contain those images. That is reason why here needs to put those images into the
|
# doesn't contain those images. That is reason why here needs to put those images into the
|
||||||
# list separately.
|
# list separately.
|
||||||
KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy"
|
KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy"
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Collect container images for offline deployment
|
||||||
|
hosts: localhost
|
||||||
become: no
|
become: no
|
||||||
|
|
||||||
roles:
|
roles:
|
||||||
@@ -11,9 +12,11 @@
|
|||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
# Generate files.list and images.list files from templates.
|
# Generate files.list and images.list files from templates.
|
||||||
- template:
|
- name: Collect container images for offline deployment
|
||||||
|
template:
|
||||||
src: ./contrib/offline/temp/{{ item }}.list.template
|
src: ./contrib/offline/temp/{{ item }}.list.template
|
||||||
dest: ./contrib/offline/temp/{{ item }}.list
|
dest: ./contrib/offline/temp/{{ item }}.list
|
||||||
|
mode: 0644
|
||||||
with_items:
|
with_items:
|
||||||
- files
|
- files
|
||||||
- images
|
- images
|
||||||
|
|||||||
@@ -39,6 +39,6 @@ if [ $? -ne 0 ]; then
|
|||||||
sudo "${runtime}" run \
|
sudo "${runtime}" run \
|
||||||
--restart=always -d -p ${NGINX_PORT}:80 \
|
--restart=always -d -p ${NGINX_PORT}:80 \
|
||||||
--volume "${OFFLINE_FILES_DIR}:/usr/share/nginx/html/download" \
|
--volume "${OFFLINE_FILES_DIR}:/usr/share/nginx/html/download" \
|
||||||
--volume "$(pwd)"/nginx.conf:/etc/nginx/nginx.conf \
|
--volume "${CURRENT_DIR}"/nginx.conf:/etc/nginx/nginx.conf \
|
||||||
--name nginx nginx:alpine
|
--name nginx nginx:alpine
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
---
|
---
|
||||||
- hosts: all
|
- name: Disable firewalld/ufw
|
||||||
|
hosts: all
|
||||||
roles:
|
roles:
|
||||||
- { role: prepare }
|
- { role: prepare }
|
||||||
|
|||||||
@@ -1,5 +1,8 @@
|
|||||||
---
|
---
|
||||||
- block:
|
- name: Disable firewalld and ufw
|
||||||
|
when:
|
||||||
|
- disable_service_firewall is defined and disable_service_firewall
|
||||||
|
block:
|
||||||
- name: List services
|
- name: List services
|
||||||
service_facts:
|
service_facts:
|
||||||
|
|
||||||
@@ -9,7 +12,7 @@
|
|||||||
state: stopped
|
state: stopped
|
||||||
enabled: no
|
enabled: no
|
||||||
when:
|
when:
|
||||||
"'firewalld.service' in services"
|
"'firewalld.service' in services and services['firewalld.service'].status != 'not-found'"
|
||||||
|
|
||||||
- name: Disable service ufw
|
- name: Disable service ufw
|
||||||
systemd:
|
systemd:
|
||||||
@@ -17,7 +20,4 @@
|
|||||||
state: stopped
|
state: stopped
|
||||||
enabled: no
|
enabled: no
|
||||||
when:
|
when:
|
||||||
"'ufw.service' in services"
|
"'ufw.service' in services and services['ufw.service'].status != 'not-found'"
|
||||||
|
|
||||||
when:
|
|
||||||
- disable_service_firewall is defined and disable_service_firewall
|
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ This will install a Kubernetes cluster on Equinix Metal. It should work in all l
|
|||||||
The terraform configuration inspects variables found in
|
The terraform configuration inspects variables found in
|
||||||
[variables.tf](variables.tf) to create resources in your Equinix Metal project.
|
[variables.tf](variables.tf) to create resources in your Equinix Metal project.
|
||||||
There is a [python script](../terraform.py) that reads the generated`.tfstate`
|
There is a [python script](../terraform.py) that reads the generated`.tfstate`
|
||||||
file to generate a dynamic inventory that is consumed by [cluster.yml](../../..//cluster.yml)
|
file to generate a dynamic inventory that is consumed by [cluster.yml](../../../cluster.yml)
|
||||||
to actually install Kubernetes with Kubespray.
|
to actually install Kubernetes with Kubespray.
|
||||||
|
|
||||||
### Kubernetes Nodes
|
### Kubernetes Nodes
|
||||||
@@ -60,16 +60,16 @@ Terraform will be used to provision all of the Equinix Metal resources with base
|
|||||||
Create an inventory directory for your cluster by copying the existing sample and linking the `hosts` script (used to build the inventory based on Terraform state):
|
Create an inventory directory for your cluster by copying the existing sample and linking the `hosts` script (used to build the inventory based on Terraform state):
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
cp -LRp contrib/terraform/metal/sample-inventory inventory/$CLUSTER
|
cp -LRp contrib/terraform/equinix/sample-inventory inventory/$CLUSTER
|
||||||
cd inventory/$CLUSTER
|
cd inventory/$CLUSTER
|
||||||
ln -s ../../contrib/terraform/metal/hosts
|
ln -s ../../contrib/terraform/equinix/hosts
|
||||||
```
|
```
|
||||||
|
|
||||||
This will be the base for subsequent Terraform commands.
|
This will be the base for subsequent Terraform commands.
|
||||||
|
|
||||||
#### Equinix Metal API access
|
#### Equinix Metal API access
|
||||||
|
|
||||||
Your Equinix Metal API key must be available in the `PACKET_AUTH_TOKEN` environment variable.
|
Your Equinix Metal API key must be available in the `METAL_AUTH_TOKEN` environment variable.
|
||||||
This key is typically stored outside of the code repo since it is considered secret.
|
This key is typically stored outside of the code repo since it is considered secret.
|
||||||
If someone gets this key, they can startup/shutdown hosts in your project!
|
If someone gets this key, they can startup/shutdown hosts in your project!
|
||||||
|
|
||||||
@@ -80,10 +80,12 @@ The Equinix Metal Project ID associated with the key will be set later in `clust
|
|||||||
|
|
||||||
For more information about the API, please see [Equinix Metal API](https://metal.equinix.com/developers/api/).
|
For more information about the API, please see [Equinix Metal API](https://metal.equinix.com/developers/api/).
|
||||||
|
|
||||||
|
For more information about terraform provider authentication, please see [the equinix provider documentation](https://registry.terraform.io/providers/equinix/equinix/latest/docs).
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
export PACKET_AUTH_TOKEN="Example-API-Token"
|
export METAL_AUTH_TOKEN="Example-API-Token"
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that to deploy several clusters within the same project you need to use [terraform workspace](https://www.terraform.io/docs/state/workspaces.html#using-workspaces).
|
Note that to deploy several clusters within the same project you need to use [terraform workspace](https://www.terraform.io/docs/state/workspaces.html#using-workspaces).
|
||||||
@@ -101,7 +103,7 @@ This helps when identifying which hosts are associated with each cluster.
|
|||||||
While the defaults in variables.tf will successfully deploy a cluster, it is recommended to set the following values:
|
While the defaults in variables.tf will successfully deploy a cluster, it is recommended to set the following values:
|
||||||
|
|
||||||
- cluster_name = the name of the inventory directory created above as $CLUSTER
|
- cluster_name = the name of the inventory directory created above as $CLUSTER
|
||||||
- metal_project_id = the Equinix Metal Project ID associated with the Equinix Metal API token above
|
- equinix_metal_project_id = the Equinix Metal Project ID associated with the Equinix Metal API token above
|
||||||
|
|
||||||
#### Enable localhost access
|
#### Enable localhost access
|
||||||
|
|
||||||
@@ -119,12 +121,13 @@ Once the Kubespray playbooks are run, a Kubernetes configuration file will be wr
|
|||||||
|
|
||||||
In the cluster's inventory folder, the following files might be created (either by Terraform
|
In the cluster's inventory folder, the following files might be created (either by Terraform
|
||||||
or manually), to prevent you from pushing them accidentally they are in a
|
or manually), to prevent you from pushing them accidentally they are in a
|
||||||
`.gitignore` file in the `terraform/metal` directory :
|
`.gitignore` file in the `contrib/terraform/equinix` directory :
|
||||||
|
|
||||||
- `.terraform`
|
- `.terraform`
|
||||||
- `.tfvars`
|
- `.tfvars`
|
||||||
- `.tfstate`
|
- `.tfstate`
|
||||||
- `.tfstate.backup`
|
- `.tfstate.backup`
|
||||||
|
- `.lock.hcl`
|
||||||
|
|
||||||
You can still add them manually if you want to.
|
You can still add them manually if you want to.
|
||||||
|
|
||||||
@@ -135,7 +138,7 @@ plugins. This is accomplished as follows:
|
|||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
cd inventory/$CLUSTER
|
cd inventory/$CLUSTER
|
||||||
terraform init ../../contrib/terraform/metal
|
terraform -chdir=../../contrib/terraform/metal init -var-file=cluster.tfvars
|
||||||
```
|
```
|
||||||
|
|
||||||
This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules.
|
This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules.
|
||||||
@@ -146,7 +149,7 @@ You can apply the Terraform configuration to your cluster with the following com
|
|||||||
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
|
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
terraform apply -var-file=cluster.tfvars ../../contrib/terraform/metal
|
terraform -chdir=../../contrib/terraform/equinix apply -var-file=cluster.tfvars
|
||||||
export ANSIBLE_HOST_KEY_CHECKING=False
|
export ANSIBLE_HOST_KEY_CHECKING=False
|
||||||
ansible-playbook -i hosts ../../cluster.yml
|
ansible-playbook -i hosts ../../cluster.yml
|
||||||
```
|
```
|
||||||
@@ -156,7 +159,7 @@ ansible-playbook -i hosts ../../cluster.yml
|
|||||||
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
|
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/metal
|
terraform -chdir=../../contrib/terraform/equinix destroy -var-file=cluster.tfvars
|
||||||
```
|
```
|
||||||
|
|
||||||
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
|
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
|
||||||
@@ -1,62 +1,57 @@
|
|||||||
# Configure the Equinix Metal Provider
|
resource "equinix_metal_ssh_key" "k8s" {
|
||||||
provider "metal" {
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "metal_ssh_key" "k8s" {
|
|
||||||
count = var.public_key_path != "" ? 1 : 0
|
count = var.public_key_path != "" ? 1 : 0
|
||||||
name = "kubernetes-${var.cluster_name}"
|
name = "kubernetes-${var.cluster_name}"
|
||||||
public_key = chomp(file(var.public_key_path))
|
public_key = chomp(file(var.public_key_path))
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "metal_device" "k8s_master" {
|
resource "equinix_metal_device" "k8s_master" {
|
||||||
depends_on = [metal_ssh_key.k8s]
|
depends_on = [equinix_metal_ssh_key.k8s]
|
||||||
|
|
||||||
count = var.number_of_k8s_masters
|
count = var.number_of_k8s_masters
|
||||||
hostname = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
hostname = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
||||||
plan = var.plan_k8s_masters
|
plan = var.plan_k8s_masters
|
||||||
facilities = [var.facility]
|
metro = var.metro
|
||||||
operating_system = var.operating_system
|
operating_system = var.operating_system
|
||||||
billing_cycle = var.billing_cycle
|
billing_cycle = var.billing_cycle
|
||||||
project_id = var.metal_project_id
|
project_id = var.equinix_metal_project_id
|
||||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane", "etcd", "kube_node"]
|
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane", "etcd", "kube_node"]
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "metal_device" "k8s_master_no_etcd" {
|
resource "equinix_metal_device" "k8s_master_no_etcd" {
|
||||||
depends_on = [metal_ssh_key.k8s]
|
depends_on = [equinix_metal_ssh_key.k8s]
|
||||||
|
|
||||||
count = var.number_of_k8s_masters_no_etcd
|
count = var.number_of_k8s_masters_no_etcd
|
||||||
hostname = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
hostname = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
||||||
plan = var.plan_k8s_masters_no_etcd
|
plan = var.plan_k8s_masters_no_etcd
|
||||||
facilities = [var.facility]
|
metro = var.metro
|
||||||
operating_system = var.operating_system
|
operating_system = var.operating_system
|
||||||
billing_cycle = var.billing_cycle
|
billing_cycle = var.billing_cycle
|
||||||
project_id = var.metal_project_id
|
project_id = var.equinix_metal_project_id
|
||||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane"]
|
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane"]
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "metal_device" "k8s_etcd" {
|
resource "equinix_metal_device" "k8s_etcd" {
|
||||||
depends_on = [metal_ssh_key.k8s]
|
depends_on = [equinix_metal_ssh_key.k8s]
|
||||||
|
|
||||||
count = var.number_of_etcd
|
count = var.number_of_etcd
|
||||||
hostname = "${var.cluster_name}-etcd-${count.index + 1}"
|
hostname = "${var.cluster_name}-etcd-${count.index + 1}"
|
||||||
plan = var.plan_etcd
|
plan = var.plan_etcd
|
||||||
facilities = [var.facility]
|
metro = var.metro
|
||||||
operating_system = var.operating_system
|
operating_system = var.operating_system
|
||||||
billing_cycle = var.billing_cycle
|
billing_cycle = var.billing_cycle
|
||||||
project_id = var.metal_project_id
|
project_id = var.equinix_metal_project_id
|
||||||
tags = ["cluster-${var.cluster_name}", "etcd"]
|
tags = ["cluster-${var.cluster_name}", "etcd"]
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "metal_device" "k8s_node" {
|
resource "equinix_metal_device" "k8s_node" {
|
||||||
depends_on = [metal_ssh_key.k8s]
|
depends_on = [equinix_metal_ssh_key.k8s]
|
||||||
|
|
||||||
count = var.number_of_k8s_nodes
|
count = var.number_of_k8s_nodes
|
||||||
hostname = "${var.cluster_name}-k8s-node-${count.index + 1}"
|
hostname = "${var.cluster_name}-k8s-node-${count.index + 1}"
|
||||||
plan = var.plan_k8s_nodes
|
plan = var.plan_k8s_nodes
|
||||||
facilities = [var.facility]
|
metro = var.metro
|
||||||
operating_system = var.operating_system
|
operating_system = var.operating_system
|
||||||
billing_cycle = var.billing_cycle
|
billing_cycle = var.billing_cycle
|
||||||
project_id = var.metal_project_id
|
project_id = var.equinix_metal_project_id
|
||||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_node"]
|
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_node"]
|
||||||
}
|
}
|
||||||
|
|
||||||
15
contrib/terraform/equinix/output.tf
Normal file
15
contrib/terraform/equinix/output.tf
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
output "k8s_masters" {
|
||||||
|
value = equinix_metal_device.k8s_master.*.access_public_ipv4
|
||||||
|
}
|
||||||
|
|
||||||
|
output "k8s_masters_no_etc" {
|
||||||
|
value = equinix_metal_device.k8s_master_no_etcd.*.access_public_ipv4
|
||||||
|
}
|
||||||
|
|
||||||
|
output "k8s_etcds" {
|
||||||
|
value = equinix_metal_device.k8s_etcd.*.access_public_ipv4
|
||||||
|
}
|
||||||
|
|
||||||
|
output "k8s_nodes" {
|
||||||
|
value = equinix_metal_device.k8s_node.*.access_public_ipv4
|
||||||
|
}
|
||||||
17
contrib/terraform/equinix/provider.tf
Normal file
17
contrib/terraform/equinix/provider.tf
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
terraform {
|
||||||
|
required_version = ">= 1.0.0"
|
||||||
|
|
||||||
|
provider_meta "equinix" {
|
||||||
|
module_name = "kubespray"
|
||||||
|
}
|
||||||
|
required_providers {
|
||||||
|
equinix = {
|
||||||
|
source = "equinix/equinix"
|
||||||
|
version = "~> 1.14"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Configure the Equinix Metal Provider
|
||||||
|
provider "equinix" {
|
||||||
|
}
|
||||||
@@ -1,16 +1,19 @@
|
|||||||
# your Kubernetes cluster name here
|
# your Kubernetes cluster name here
|
||||||
cluster_name = "mycluster"
|
cluster_name = "mycluster"
|
||||||
|
|
||||||
# Your Equinix Metal project ID. See hhttps://metal.equinix.com/developers/docs/accounts/
|
# Your Equinix Metal project ID. See https://metal.equinix.com/developers/docs/accounts/
|
||||||
metal_project_id = "Example-API-Token"
|
equinix_metal_project_id = "Example-Project-Id"
|
||||||
|
|
||||||
# The public SSH key to be uploaded into authorized_keys in bare metal Equinix Metal nodes provisioned
|
# The public SSH key to be uploaded into authorized_keys in bare metal Equinix Metal nodes provisioned
|
||||||
# leave this value blank if the public key is already setup in the Equinix Metal project
|
# leave this value blank if the public key is already setup in the Equinix Metal project
|
||||||
# Terraform will complain if the public key is setup in Equinix Metal
|
# Terraform will complain if the public key is setup in Equinix Metal
|
||||||
public_key_path = "~/.ssh/id_rsa.pub"
|
public_key_path = "~/.ssh/id_rsa.pub"
|
||||||
|
|
||||||
# cluster location
|
# Equinix interconnected bare metal across our global metros.
|
||||||
facility = "ewr1"
|
metro = "da"
|
||||||
|
|
||||||
|
# operating_system
|
||||||
|
operating_system = "ubuntu_22_04"
|
||||||
|
|
||||||
# standalone etcds
|
# standalone etcds
|
||||||
number_of_etcd = 0
|
number_of_etcd = 0
|
||||||
@@ -2,12 +2,12 @@ variable "cluster_name" {
|
|||||||
default = "kubespray"
|
default = "kubespray"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "metal_project_id" {
|
variable "equinix_metal_project_id" {
|
||||||
description = "Your Equinix Metal project ID. See https://metal.equinix.com/developers/docs/accounts/"
|
description = "Your Equinix Metal project ID. See https://metal.equinix.com/developers/docs/accounts/"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "operating_system" {
|
variable "operating_system" {
|
||||||
default = "ubuntu_20_04"
|
default = "ubuntu_22_04"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "public_key_path" {
|
variable "public_key_path" {
|
||||||
@@ -19,8 +19,8 @@ variable "billing_cycle" {
|
|||||||
default = "hourly"
|
default = "hourly"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "facility" {
|
variable "metro" {
|
||||||
default = "dfw2"
|
default = "da"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "plan_k8s_masters" {
|
variable "plan_k8s_masters" {
|
||||||
@@ -54,4 +54,3 @@ variable "number_of_etcd" {
|
|||||||
variable "number_of_k8s_nodes" {
|
variable "number_of_k8s_nodes" {
|
||||||
default = 1
|
default = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -12,7 +12,7 @@ ssh_public_keys = [
|
|||||||
machines = {
|
machines = {
|
||||||
"master-0" : {
|
"master-0" : {
|
||||||
"node_type" : "master",
|
"node_type" : "master",
|
||||||
"size" : "Medium",
|
"size" : "standard.medium",
|
||||||
"boot_disk" : {
|
"boot_disk" : {
|
||||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||||
"root_partition_size" : 50,
|
"root_partition_size" : 50,
|
||||||
@@ -22,7 +22,7 @@ machines = {
|
|||||||
},
|
},
|
||||||
"worker-0" : {
|
"worker-0" : {
|
||||||
"node_type" : "worker",
|
"node_type" : "worker",
|
||||||
"size" : "Large",
|
"size" : "standard.large",
|
||||||
"boot_disk" : {
|
"boot_disk" : {
|
||||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||||
"root_partition_size" : 50,
|
"root_partition_size" : 50,
|
||||||
@@ -32,7 +32,7 @@ machines = {
|
|||||||
},
|
},
|
||||||
"worker-1" : {
|
"worker-1" : {
|
||||||
"node_type" : "worker",
|
"node_type" : "worker",
|
||||||
"size" : "Large",
|
"size" : "standard.large",
|
||||||
"boot_disk" : {
|
"boot_disk" : {
|
||||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||||
"root_partition_size" : 50,
|
"root_partition_size" : 50,
|
||||||
@@ -42,7 +42,7 @@ machines = {
|
|||||||
},
|
},
|
||||||
"worker-2" : {
|
"worker-2" : {
|
||||||
"node_type" : "worker",
|
"node_type" : "worker",
|
||||||
"size" : "Large",
|
"size" : "standard.large",
|
||||||
"boot_disk" : {
|
"boot_disk" : {
|
||||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||||
"root_partition_size" : 50,
|
"root_partition_size" : 50,
|
||||||
|
|||||||
@@ -3,8 +3,8 @@ provider "exoscale" {}
|
|||||||
module "kubernetes" {
|
module "kubernetes" {
|
||||||
source = "./modules/kubernetes-cluster"
|
source = "./modules/kubernetes-cluster"
|
||||||
|
|
||||||
prefix = var.prefix
|
prefix = var.prefix
|
||||||
|
zone = var.zone
|
||||||
machines = var.machines
|
machines = var.machines
|
||||||
|
|
||||||
ssh_public_keys = var.ssh_public_keys
|
ssh_public_keys = var.ssh_public_keys
|
||||||
|
|||||||
@@ -1,29 +1,25 @@
|
|||||||
data "exoscale_compute_template" "os_image" {
|
data "exoscale_template" "os_image" {
|
||||||
for_each = var.machines
|
for_each = var.machines
|
||||||
|
|
||||||
zone = var.zone
|
zone = var.zone
|
||||||
name = each.value.boot_disk.image_name
|
name = each.value.boot_disk.image_name
|
||||||
}
|
}
|
||||||
|
|
||||||
data "exoscale_compute" "master_nodes" {
|
data "exoscale_compute_instance" "master_nodes" {
|
||||||
for_each = exoscale_compute.master
|
for_each = exoscale_compute_instance.master
|
||||||
|
|
||||||
id = each.value.id
|
id = each.value.id
|
||||||
|
zone = var.zone
|
||||||
# Since private IP address is not assigned until the nics are created we need this
|
|
||||||
depends_on = [exoscale_nic.master_private_network_nic]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
data "exoscale_compute" "worker_nodes" {
|
data "exoscale_compute_instance" "worker_nodes" {
|
||||||
for_each = exoscale_compute.worker
|
for_each = exoscale_compute_instance.worker
|
||||||
|
|
||||||
id = each.value.id
|
id = each.value.id
|
||||||
|
zone = var.zone
|
||||||
# Since private IP address is not assigned until the nics are created we need this
|
|
||||||
depends_on = [exoscale_nic.worker_private_network_nic]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "exoscale_network" "private_network" {
|
resource "exoscale_private_network" "private_network" {
|
||||||
zone = var.zone
|
zone = var.zone
|
||||||
name = "${var.prefix}-network"
|
name = "${var.prefix}-network"
|
||||||
|
|
||||||
@@ -34,25 +30,29 @@ resource "exoscale_network" "private_network" {
|
|||||||
netmask = cidrnetmask(var.private_network_cidr)
|
netmask = cidrnetmask(var.private_network_cidr)
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "exoscale_compute" "master" {
|
resource "exoscale_compute_instance" "master" {
|
||||||
for_each = {
|
for_each = {
|
||||||
for name, machine in var.machines :
|
for name, machine in var.machines :
|
||||||
name => machine
|
name => machine
|
||||||
if machine.node_type == "master"
|
if machine.node_type == "master"
|
||||||
}
|
}
|
||||||
|
|
||||||
display_name = "${var.prefix}-${each.key}"
|
name = "${var.prefix}-${each.key}"
|
||||||
template_id = data.exoscale_compute_template.os_image[each.key].id
|
template_id = data.exoscale_template.os_image[each.key].id
|
||||||
size = each.value.size
|
type = each.value.size
|
||||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||||
state = "Running"
|
state = "Running"
|
||||||
zone = var.zone
|
zone = var.zone
|
||||||
security_groups = [exoscale_security_group.master_sg.name]
|
security_group_ids = [exoscale_security_group.master_sg.id]
|
||||||
|
network_interface {
|
||||||
|
network_id = exoscale_private_network.private_network.id
|
||||||
|
}
|
||||||
|
elastic_ip_ids = [exoscale_elastic_ip.control_plane_lb.id]
|
||||||
|
|
||||||
user_data = templatefile(
|
user_data = templatefile(
|
||||||
"${path.module}/templates/cloud-init.tmpl",
|
"${path.module}/templates/cloud-init.tmpl",
|
||||||
{
|
{
|
||||||
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address
|
||||||
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
||||||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||||
root_partition_size = each.value.boot_disk.root_partition_size
|
root_partition_size = each.value.boot_disk.root_partition_size
|
||||||
@@ -62,25 +62,29 @@ resource "exoscale_compute" "master" {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "exoscale_compute" "worker" {
|
resource "exoscale_compute_instance" "worker" {
|
||||||
for_each = {
|
for_each = {
|
||||||
for name, machine in var.machines :
|
for name, machine in var.machines :
|
||||||
name => machine
|
name => machine
|
||||||
if machine.node_type == "worker"
|
if machine.node_type == "worker"
|
||||||
}
|
}
|
||||||
|
|
||||||
display_name = "${var.prefix}-${each.key}"
|
name = "${var.prefix}-${each.key}"
|
||||||
template_id = data.exoscale_compute_template.os_image[each.key].id
|
template_id = data.exoscale_template.os_image[each.key].id
|
||||||
size = each.value.size
|
type = each.value.size
|
||||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||||
state = "Running"
|
state = "Running"
|
||||||
zone = var.zone
|
zone = var.zone
|
||||||
security_groups = [exoscale_security_group.worker_sg.name]
|
security_group_ids = [exoscale_security_group.worker_sg.id]
|
||||||
|
network_interface {
|
||||||
|
network_id = exoscale_private_network.private_network.id
|
||||||
|
}
|
||||||
|
elastic_ip_ids = [exoscale_elastic_ip.ingress_controller_lb.id]
|
||||||
|
|
||||||
user_data = templatefile(
|
user_data = templatefile(
|
||||||
"${path.module}/templates/cloud-init.tmpl",
|
"${path.module}/templates/cloud-init.tmpl",
|
||||||
{
|
{
|
||||||
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address
|
||||||
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
||||||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||||
root_partition_size = each.value.boot_disk.root_partition_size
|
root_partition_size = each.value.boot_disk.root_partition_size
|
||||||
@@ -90,41 +94,33 @@ resource "exoscale_compute" "worker" {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "exoscale_nic" "master_private_network_nic" {
|
|
||||||
for_each = exoscale_compute.master
|
|
||||||
|
|
||||||
compute_id = each.value.id
|
|
||||||
network_id = exoscale_network.private_network.id
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "exoscale_nic" "worker_private_network_nic" {
|
|
||||||
for_each = exoscale_compute.worker
|
|
||||||
|
|
||||||
compute_id = each.value.id
|
|
||||||
network_id = exoscale_network.private_network.id
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "exoscale_security_group" "master_sg" {
|
resource "exoscale_security_group" "master_sg" {
|
||||||
name = "${var.prefix}-master-sg"
|
name = "${var.prefix}-master-sg"
|
||||||
description = "Security group for Kubernetes masters"
|
description = "Security group for Kubernetes masters"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "exoscale_security_group_rules" "master_sg_rules" {
|
resource "exoscale_security_group_rule" "master_sg_rule_ssh" {
|
||||||
security_group_id = exoscale_security_group.master_sg.id
|
security_group_id = exoscale_security_group.master_sg.id
|
||||||
|
|
||||||
|
for_each = toset(var.ssh_whitelist)
|
||||||
# SSH
|
# SSH
|
||||||
ingress {
|
type = "INGRESS"
|
||||||
protocol = "TCP"
|
start_port = 22
|
||||||
cidr_list = var.ssh_whitelist
|
end_port = 22
|
||||||
ports = ["22"]
|
protocol = "TCP"
|
||||||
}
|
cidr = each.value
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "exoscale_security_group_rule" "master_sg_rule_k8s_api" {
|
||||||
|
security_group_id = exoscale_security_group.master_sg.id
|
||||||
|
|
||||||
|
for_each = toset(var.api_server_whitelist)
|
||||||
# Kubernetes API
|
# Kubernetes API
|
||||||
ingress {
|
type = "INGRESS"
|
||||||
protocol = "TCP"
|
start_port = 6443
|
||||||
cidr_list = var.api_server_whitelist
|
end_port = 6443
|
||||||
ports = ["6443"]
|
protocol = "TCP"
|
||||||
}
|
cidr = each.value
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "exoscale_security_group" "worker_sg" {
|
resource "exoscale_security_group" "worker_sg" {
|
||||||
@@ -132,62 +128,64 @@ resource "exoscale_security_group" "worker_sg" {
|
|||||||
description = "security group for kubernetes worker nodes"
|
description = "security group for kubernetes worker nodes"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "exoscale_security_group_rules" "worker_sg_rules" {
|
resource "exoscale_security_group_rule" "worker_sg_rule_ssh" {
|
||||||
security_group_id = exoscale_security_group.worker_sg.id
|
security_group_id = exoscale_security_group.worker_sg.id
|
||||||
|
|
||||||
# SSH
|
# SSH
|
||||||
ingress {
|
for_each = toset(var.ssh_whitelist)
|
||||||
protocol = "TCP"
|
type = "INGRESS"
|
||||||
cidr_list = var.ssh_whitelist
|
start_port = 22
|
||||||
ports = ["22"]
|
end_port = 22
|
||||||
}
|
protocol = "TCP"
|
||||||
|
cidr = each.value
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "exoscale_security_group_rule" "worker_sg_rule_http" {
|
||||||
|
security_group_id = exoscale_security_group.worker_sg.id
|
||||||
|
|
||||||
# HTTP(S)
|
# HTTP(S)
|
||||||
ingress {
|
for_each = toset(["80", "443"])
|
||||||
protocol = "TCP"
|
type = "INGRESS"
|
||||||
cidr_list = ["0.0.0.0/0"]
|
start_port = each.value
|
||||||
ports = ["80", "443"]
|
end_port = each.value
|
||||||
}
|
protocol = "TCP"
|
||||||
|
cidr = "0.0.0.0/0"
|
||||||
|
}
|
||||||
|
|
||||||
# Kubernetes Nodeport
|
|
||||||
ingress {
|
resource "exoscale_security_group_rule" "worker_sg_rule_nodeport" {
|
||||||
protocol = "TCP"
|
security_group_id = exoscale_security_group.worker_sg.id
|
||||||
cidr_list = var.nodeport_whitelist
|
|
||||||
ports = ["30000-32767"]
|
# HTTP(S)
|
||||||
|
for_each = toset(var.nodeport_whitelist)
|
||||||
|
type = "INGRESS"
|
||||||
|
start_port = 30000
|
||||||
|
end_port = 32767
|
||||||
|
protocol = "TCP"
|
||||||
|
cidr = each.value
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "exoscale_elastic_ip" "ingress_controller_lb" {
|
||||||
|
zone = var.zone
|
||||||
|
healthcheck {
|
||||||
|
mode = "http"
|
||||||
|
port = 80
|
||||||
|
uri = "/healthz"
|
||||||
|
interval = 10
|
||||||
|
timeout = 2
|
||||||
|
strikes_ok = 2
|
||||||
|
strikes_fail = 3
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "exoscale_ipaddress" "ingress_controller_lb" {
|
resource "exoscale_elastic_ip" "control_plane_lb" {
|
||||||
zone = var.zone
|
zone = var.zone
|
||||||
healthcheck_mode = "http"
|
healthcheck {
|
||||||
healthcheck_port = 80
|
mode = "tcp"
|
||||||
healthcheck_path = "/healthz"
|
port = 6443
|
||||||
healthcheck_interval = 10
|
interval = 10
|
||||||
healthcheck_timeout = 2
|
timeout = 2
|
||||||
healthcheck_strikes_ok = 2
|
strikes_ok = 2
|
||||||
healthcheck_strikes_fail = 3
|
strikes_fail = 3
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "exoscale_secondary_ipaddress" "ingress_controller_lb" {
|
|
||||||
for_each = exoscale_compute.worker
|
|
||||||
|
|
||||||
compute_id = each.value.id
|
|
||||||
ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "exoscale_ipaddress" "control_plane_lb" {
|
|
||||||
zone = var.zone
|
|
||||||
healthcheck_mode = "tcp"
|
|
||||||
healthcheck_port = 6443
|
|
||||||
healthcheck_interval = 10
|
|
||||||
healthcheck_timeout = 2
|
|
||||||
healthcheck_strikes_ok = 2
|
|
||||||
healthcheck_strikes_fail = 3
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "exoscale_secondary_ipaddress" "control_plane_lb" {
|
|
||||||
for_each = exoscale_compute.master
|
|
||||||
|
|
||||||
compute_id = each.value.id
|
|
||||||
ip_address = exoscale_ipaddress.control_plane_lb.ip_address
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,19 +1,19 @@
|
|||||||
output "master_ip_addresses" {
|
output "master_ip_addresses" {
|
||||||
value = {
|
value = {
|
||||||
for key, instance in exoscale_compute.master :
|
for key, instance in exoscale_compute_instance.master :
|
||||||
instance.name => {
|
instance.name => {
|
||||||
"private_ip" = contains(keys(data.exoscale_compute.master_nodes), key) ? data.exoscale_compute.master_nodes[key].private_network_ip_addresses[0] : ""
|
"private_ip" = contains(keys(data.exoscale_compute_instance.master_nodes), key) ? data.exoscale_compute_instance.master_nodes[key].private_network_ip_addresses[0] : ""
|
||||||
"public_ip" = exoscale_compute.master[key].ip_address
|
"public_ip" = exoscale_compute_instance.master[key].ip_address
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
output "worker_ip_addresses" {
|
output "worker_ip_addresses" {
|
||||||
value = {
|
value = {
|
||||||
for key, instance in exoscale_compute.worker :
|
for key, instance in exoscale_compute_instance.worker :
|
||||||
instance.name => {
|
instance.name => {
|
||||||
"private_ip" = contains(keys(data.exoscale_compute.worker_nodes), key) ? data.exoscale_compute.worker_nodes[key].private_network_ip_addresses[0] : ""
|
"private_ip" = contains(keys(data.exoscale_compute_instance.worker_nodes), key) ? data.exoscale_compute_instance.worker_nodes[key].private_network_ip_addresses[0] : ""
|
||||||
"public_ip" = exoscale_compute.worker[key].ip_address
|
"public_ip" = exoscale_compute_instance.worker[key].ip_address
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -23,9 +23,9 @@ output "cluster_private_network_cidr" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
output "ingress_controller_lb_ip_address" {
|
output "ingress_controller_lb_ip_address" {
|
||||||
value = exoscale_ipaddress.ingress_controller_lb.ip_address
|
value = exoscale_elastic_ip.ingress_controller_lb.ip_address
|
||||||
}
|
}
|
||||||
|
|
||||||
output "control_plane_lb_ip_address" {
|
output "control_plane_lb_ip_address" {
|
||||||
value = exoscale_ipaddress.control_plane_lb.ip_address
|
value = exoscale_elastic_ip.control_plane_lb.ip_address
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
terraform {
|
terraform {
|
||||||
required_providers {
|
required_providers {
|
||||||
exoscale = {
|
exoscale = {
|
||||||
source = "exoscale/exoscale"
|
source = "exoscale/exoscale"
|
||||||
version = ">= 0.21"
|
version = ">= 0.21"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -75,6 +75,11 @@ ansible-playbook -i contrib/terraform/gcs/inventory.ini cluster.yml -b -v
|
|||||||
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server
|
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server
|
||||||
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports)
|
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports)
|
||||||
* `ingress_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to ingress on ports 80 and 443
|
* `ingress_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to ingress on ports 80 and 443
|
||||||
|
* `extra_ingress_firewalls`: Additional ingress firewall rules. Key will be used as the name of the rule
|
||||||
|
* `source_ranges`: List of IP ranges (CIDR). Example: `["8.8.8.8"]`
|
||||||
|
* `protocol`: Protocol. Example `"tcp"`
|
||||||
|
* `ports`: List of ports, as string. Example `["53"]`
|
||||||
|
* `target_tags`: List of target tag (either the machine name or `control-plane` or `worker`). Example: `["control-plane", "worker-0"]`
|
||||||
|
|
||||||
### Optional
|
### Optional
|
||||||
|
|
||||||
|
|||||||
@@ -34,4 +34,6 @@ module "kubernetes" {
|
|||||||
api_server_whitelist = var.api_server_whitelist
|
api_server_whitelist = var.api_server_whitelist
|
||||||
nodeport_whitelist = var.nodeport_whitelist
|
nodeport_whitelist = var.nodeport_whitelist
|
||||||
ingress_whitelist = var.ingress_whitelist
|
ingress_whitelist = var.ingress_whitelist
|
||||||
|
|
||||||
|
extra_ingress_firewalls = var.extra_ingress_firewalls
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -219,7 +219,7 @@ resource "google_compute_instance" "master" {
|
|||||||
machine_type = each.value.size
|
machine_type = each.value.size
|
||||||
zone = each.value.zone
|
zone = each.value.zone
|
||||||
|
|
||||||
tags = ["master"]
|
tags = ["control-plane", "master", each.key]
|
||||||
|
|
||||||
boot_disk {
|
boot_disk {
|
||||||
initialize_params {
|
initialize_params {
|
||||||
@@ -325,7 +325,7 @@ resource "google_compute_instance" "worker" {
|
|||||||
machine_type = each.value.size
|
machine_type = each.value.size
|
||||||
zone = each.value.zone
|
zone = each.value.zone
|
||||||
|
|
||||||
tags = ["worker"]
|
tags = ["worker", each.key]
|
||||||
|
|
||||||
boot_disk {
|
boot_disk {
|
||||||
initialize_params {
|
initialize_params {
|
||||||
@@ -398,3 +398,24 @@ resource "google_compute_target_pool" "worker_lb" {
|
|||||||
name = "${var.prefix}-worker-lb-pool"
|
name = "${var.prefix}-worker-lb-pool"
|
||||||
instances = local.worker_target_list
|
instances = local.worker_target_list
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resource "google_compute_firewall" "extra_ingress_firewall" {
|
||||||
|
for_each = {
|
||||||
|
for name, firewall in var.extra_ingress_firewalls :
|
||||||
|
name => firewall
|
||||||
|
}
|
||||||
|
|
||||||
|
name = "${var.prefix}-${each.key}-ingress"
|
||||||
|
network = google_compute_network.main.name
|
||||||
|
|
||||||
|
priority = 100
|
||||||
|
|
||||||
|
source_ranges = each.value.source_ranges
|
||||||
|
|
||||||
|
target_tags = each.value.target_tags
|
||||||
|
|
||||||
|
allow {
|
||||||
|
protocol = each.value.protocol
|
||||||
|
ports = each.value.ports
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user