mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 13:54:37 +03:00
Compare commits
525 Commits
v2.22.0
...
lean/pre-c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
37d824fd2d | ||
|
|
ff48144607 | ||
|
|
0faa805525 | ||
|
|
bc21433a05 | ||
|
|
19851bb07c | ||
|
|
7f7b65d388 | ||
|
|
d50f61eae5 | ||
|
|
77bfb53455 | ||
|
|
0e449ca75e | ||
|
|
f6d9ff4196 | ||
|
|
21aba10e08 | ||
|
|
5616a4a3ee | ||
|
|
4b9349a052 | ||
|
|
7e0a407250 | ||
|
|
1173711acc | ||
|
|
998e04e5a7 | ||
|
|
40cbdceb3c | ||
|
|
e54e7c0e1d | ||
|
|
53ad8d9126 | ||
|
|
96bb0a3e12 | ||
|
|
76dae63c69 | ||
|
|
fae41172ed | ||
|
|
f85111f6d4 | ||
|
|
30d057a0a8 | ||
|
|
4123cf13ef | ||
|
|
5d01dfa179 | ||
|
|
4dbfd42f1d | ||
|
|
0b464b5239 | ||
|
|
dac4705ebe | ||
|
|
d5f6838fba | ||
|
|
354ffe7bd6 | ||
|
|
427f868718 | ||
|
|
d7756d85ef | ||
|
|
2c2b2ed96e | ||
|
|
361d2def09 | ||
|
|
f47ad82991 | ||
|
|
f488ecb6cc | ||
|
|
08293f2ef7 | ||
|
|
fe1a2d5dd9 | ||
|
|
73c2722d00 | ||
|
|
a5714a8c6b | ||
|
|
e410e30694 | ||
|
|
0b2533143f | ||
|
|
3e4ea1065a | ||
|
|
6dbb09435c | ||
|
|
d8a4aea9bc | ||
|
|
a8f58c244b | ||
|
|
169280ba64 | ||
|
|
fa03f4ffd0 | ||
|
|
7aa415e707 | ||
|
|
cd459a04f3 | ||
|
|
a00b0c48fe | ||
|
|
8a1ee990a2 | ||
|
|
523d016767 | ||
|
|
d321e42d9e | ||
|
|
a512b861e0 | ||
|
|
d870a3ee4e | ||
|
|
41036e3b53 | ||
|
|
975362249c | ||
|
|
ce2642f27b | ||
|
|
5dc12b2a15 | ||
|
|
edc33888a3 | ||
|
|
8c12dedf05 | ||
|
|
1697182e73 | ||
|
|
1c638bdb06 | ||
|
|
7eaf2bc4b8 | ||
|
|
0b0faf8f72 | ||
|
|
9bb38163c2 | ||
|
|
a09c73a356 | ||
|
|
d94f3ce965 | ||
|
|
966a8b95de | ||
|
|
a01d0c047a | ||
|
|
21e8809186 | ||
|
|
4cb688d5e4 | ||
|
|
e385ac7b40 | ||
|
|
5ce530c909 | ||
|
|
f82cf29a8a | ||
|
|
9f62f60814 | ||
|
|
315cfe1edd | ||
|
|
e01355834b | ||
|
|
001df231a6 | ||
|
|
def88b26a4 | ||
|
|
537891a380 | ||
|
|
85ae701b0f | ||
|
|
e57e958a39 | ||
|
|
91dea023ae | ||
|
|
245454855d | ||
|
|
3a112e834c | ||
|
|
cf0de0904c | ||
|
|
d772350b04 | ||
|
|
3351dc0925 | ||
|
|
f0e20705aa | ||
|
|
ff4e572e0c | ||
|
|
97e71da97b | ||
|
|
a7f98116ca | ||
|
|
088b1b0cec | ||
|
|
11f35e462c | ||
|
|
da3ff1cc11 | ||
|
|
663fcd104c | ||
|
|
a2019c1c24 | ||
|
|
3a43ac4506 | ||
|
|
f91e00a61b | ||
|
|
c6bdc38776 | ||
|
|
08a7010e80 | ||
|
|
538deff9ea | ||
|
|
cd7d11fea2 | ||
|
|
23b56e3f89 | ||
|
|
eee5b5890d | ||
|
|
ab0ef182fb | ||
|
|
4db3e2c3cf | ||
|
|
3d19e744f0 | ||
|
|
929c818b63 | ||
|
|
4baa2c8704 | ||
|
|
f3065cc5c4 | ||
|
|
ed2059395c | ||
|
|
8919901ed5 | ||
|
|
cc0c3d73dc | ||
|
|
dd0f42171f | ||
|
|
1b870a1862 | ||
|
|
8a423abd0f | ||
|
|
3ec2e497c6 | ||
|
|
7844b8dbac | ||
|
|
e87040d5ba | ||
|
|
d58343d201 | ||
|
|
b2cce8d6dc | ||
|
|
3067e565c0 | ||
|
|
c6fcbf6ee0 | ||
|
|
fdf5988ea8 | ||
|
|
a7d42824be | ||
|
|
9ef6678b7e | ||
|
|
70a54451b1 | ||
|
|
c6758fe544 | ||
|
|
10315590c7 | ||
|
|
03ac02afe4 | ||
|
|
fd83ec9d91 | ||
|
|
c58497cde9 | ||
|
|
baf4842774 | ||
|
|
01c86af77f | ||
|
|
e7d29715b4 | ||
|
|
30da721f82 | ||
|
|
a1cf8291a9 | ||
|
|
ef95eb078a | ||
|
|
7ddc175b70 | ||
|
|
3305383873 | ||
|
|
7f6ca804a1 | ||
|
|
7f785a5e4e | ||
|
|
eff331ad32 | ||
|
|
71fa66c08d | ||
|
|
26af6c7fda | ||
|
|
43c1e3b15e | ||
|
|
69bf6639f3 | ||
|
|
c275b3db37 | ||
|
|
66eaba3775 | ||
|
|
44950efc34 | ||
|
|
90b0151caf | ||
|
|
04e40f2e6f | ||
|
|
7a9def547e | ||
|
|
4317723d3c | ||
|
|
26034b296e | ||
|
|
e250bb65bb | ||
|
|
12c8d0456f | ||
|
|
667bb2c913 | ||
|
|
d40b073f97 | ||
|
|
5d822ad8cb | ||
|
|
4a259ee3f0 | ||
|
|
b34b7e0385 | ||
|
|
a0d2bda742 | ||
|
|
c13b21e830 | ||
|
|
9442f28c60 | ||
|
|
8fa5ae1865 | ||
|
|
65b0604db7 | ||
|
|
082ac10fbb | ||
|
|
8d5091a3f7 | ||
|
|
b60220c597 | ||
|
|
bf42ccee4e | ||
|
|
bfbb3f8d33 | ||
|
|
250b80ee7c | ||
|
|
ffda3656d1 | ||
|
|
f5474ec6cc | ||
|
|
ad9f194c24 | ||
|
|
ef7197f925 | ||
|
|
9648300994 | ||
|
|
4b0a134bc9 | ||
|
|
ad565ad922 | ||
|
|
65e22481c6 | ||
|
|
6f419aa18e | ||
|
|
c698790122 | ||
|
|
de4d6a69ee | ||
|
|
989ba207e9 | ||
|
|
f2bdd4bb2f | ||
|
|
200b630319 | ||
|
|
21289db181 | ||
|
|
c9a44e4089 | ||
|
|
0dbde7536f | ||
|
|
8d53c1723c | ||
|
|
dce68e6839 | ||
|
|
11c01ef600 | ||
|
|
785366c2de | ||
|
|
e3ea19307a | ||
|
|
ee8b909a67 | ||
|
|
1d119f1a3c | ||
|
|
4ea1a0132e | ||
|
|
0ddf872163 | ||
|
|
a487667b9d | ||
|
|
7863fde552 | ||
|
|
758d34a7d1 | ||
|
|
c80f2cd573 | ||
|
|
0e26f6f3e2 | ||
|
|
ab0163a3ad | ||
|
|
2eb588bed9 | ||
|
|
a88bad7947 | ||
|
|
89d42a7716 | ||
|
|
b4dd8b4313 | ||
|
|
4fc1fc729e | ||
|
|
13e1f33898 | ||
|
|
de2c4429a4 | ||
|
|
22bb0976d5 | ||
|
|
a2ed5fcd3d | ||
|
|
6497ecc767 | ||
|
|
54fb75f0e0 | ||
|
|
5a405336ae | ||
|
|
fd6bb0f7fd | ||
|
|
0e971a37aa | ||
|
|
4e52fb7a1f | ||
|
|
3e7b568d3e | ||
|
|
a45a40a398 | ||
|
|
4cb1f529d1 | ||
|
|
fe819a6ec3 | ||
|
|
df5a06dc70 | ||
|
|
64447e745e | ||
|
|
78eb74c252 | ||
|
|
669589f761 | ||
|
|
b7a83531e7 | ||
|
|
a9e29a9eb2 | ||
|
|
a0a2f40295 | ||
|
|
7b7c9f509e | ||
|
|
beb2660aa8 | ||
|
|
3f78bf9298 | ||
|
|
06a2a3ed6c | ||
|
|
eb40523388 | ||
|
|
50fbfa2a9a | ||
|
|
747d8bb4c2 | ||
|
|
e90cae9344 | ||
|
|
bb67d9524d | ||
|
|
a306f15a74 | ||
|
|
8c09c3fda2 | ||
|
|
a656b7ed9a | ||
|
|
2e8b72e278 | ||
|
|
ddf5c6ee12 | ||
|
|
eda7ea5695 | ||
|
|
08c0b34270 | ||
|
|
1a86b4cb6d | ||
|
|
aea150e5dc | ||
|
|
ee2dd4fd28 | ||
|
|
c3b674526d | ||
|
|
565eab901b | ||
|
|
c3315ac742 | ||
|
|
da9b34d1b0 | ||
|
|
243ca5d08f | ||
|
|
29ea790c30 | ||
|
|
ae780e6a9b | ||
|
|
471326f458 | ||
|
|
7395c27932 | ||
|
|
d435edefc4 | ||
|
|
eb73f1d27d | ||
|
|
9a31f3285a | ||
|
|
45a070f1ba | ||
|
|
ccb742c7ab | ||
|
|
cb848fa7cb | ||
|
|
8abf49ae13 | ||
|
|
8f2390a120 | ||
|
|
81a3f81aa1 | ||
|
|
0fb404c775 | ||
|
|
51069223f5 | ||
|
|
17b51240c9 | ||
|
|
306103ed05 | ||
|
|
eb628efbc4 | ||
|
|
2c3ea84e6f | ||
|
|
85f15900a4 | ||
|
|
af1f318852 | ||
|
|
b31afe235f | ||
|
|
a9321aaf86 | ||
|
|
d2944d2813 | ||
|
|
fe02d21d23 | ||
|
|
5160e7e20b | ||
|
|
c440106eff | ||
|
|
a1c47b1b20 | ||
|
|
93724ed29c | ||
|
|
75fecf1542 | ||
|
|
0d7bdc6cca | ||
|
|
c87d70b04b | ||
|
|
fa7a504fa5 | ||
|
|
612cfdceb1 | ||
|
|
70bb19dd23 | ||
|
|
94d3f65f09 | ||
|
|
cf3ac625da | ||
|
|
c2e3071a33 | ||
|
|
21e8b96e22 | ||
|
|
3acacc6150 | ||
|
|
d583d331b5 | ||
|
|
b321ca3e64 | ||
|
|
6b1188e3dc | ||
|
|
0d4f57aa22 | ||
|
|
bc5b38a771 | ||
|
|
f46910eac3 | ||
|
|
adb8ff14b9 | ||
|
|
7ba85710ad | ||
|
|
cbd3a83a06 | ||
|
|
eb015c0362 | ||
|
|
17681a7e31 | ||
|
|
cca7615456 | ||
|
|
a4b15690b8 | ||
|
|
32743868c7 | ||
|
|
7d221be408 | ||
|
|
2d75077d4a | ||
|
|
802da0bcb0 | ||
|
|
6305dd39e9 | ||
|
|
b3f6d05131 | ||
|
|
8ebeb88e57 | ||
|
|
c9d685833b | ||
|
|
f3332af3f2 | ||
|
|
870065517f | ||
|
|
267a8c6025 | ||
|
|
edff3f8afd | ||
|
|
cdc8d17d0b | ||
|
|
8f0e553e11 | ||
|
|
5f9a7b9d49 | ||
|
|
af7bc17c9a | ||
|
|
e2b62ba154 | ||
|
|
5da421c178 | ||
|
|
becb6267fb | ||
|
|
34754ccb38 | ||
|
|
dcd0edce40 | ||
|
|
7a0030b145 | ||
|
|
fa9e41047e | ||
|
|
f5f1f9478c | ||
|
|
6a70f02662 | ||
|
|
3bc0dfb354 | ||
|
|
418df29ff0 | ||
|
|
1f47d5b74f | ||
|
|
e52d70885e | ||
|
|
3f1409d87d | ||
|
|
0b2e5b2f82 | ||
|
|
228efcba0e | ||
|
|
401ea552c2 | ||
|
|
8cce6df80a | ||
|
|
3e522a9f59 | ||
|
|
ae45de3584 | ||
|
|
513b6dd6ad | ||
|
|
e65050d3f4 | ||
|
|
4a8a47d438 | ||
|
|
b2d8ec68a4 | ||
|
|
d3101d65aa | ||
|
|
abaddb4c9b | ||
|
|
acb86c23f9 | ||
|
|
bea5034ddf | ||
|
|
5194d8306e | ||
|
|
4846f33136 | ||
|
|
de8d1f1a3b | ||
|
|
ddd7aa844c | ||
|
|
1fd31ccc28 | ||
|
|
6f520eacf7 | ||
|
|
a0eb7c0d5c | ||
|
|
94322ef72e | ||
|
|
c6ab6406c2 | ||
|
|
2c132dccba | ||
|
|
7919a47165 | ||
|
|
7b2586943b | ||
|
|
f964b3438d | ||
|
|
09f3caedaa | ||
|
|
fe4b1f6dee | ||
|
|
bc5e33791f | ||
|
|
d669b93c4f | ||
|
|
a81c6d5448 | ||
|
|
6b34e3ef08 | ||
|
|
dbdc4d4123 | ||
|
|
c24c279df7 | ||
|
|
0f243d751f | ||
|
|
31f6d38cd2 | ||
|
|
c31bb9aca7 | ||
|
|
748b0b294d | ||
|
|
af8210dfea | ||
|
|
493969588e | ||
|
|
293573c665 | ||
|
|
5ffdb7355a | ||
|
|
c33e4d7bb7 | ||
|
|
24b82917d1 | ||
|
|
9696936b59 | ||
|
|
aeca9304f4 | ||
|
|
8fef156e8f | ||
|
|
8497528240 | ||
|
|
ebd71f6ad7 | ||
|
|
c677438189 | ||
|
|
d646053c0e | ||
|
|
c9a7ae1cae | ||
|
|
e84c1004df | ||
|
|
b19b727fe7 | ||
|
|
0932318b85 | ||
|
|
e573a2f6d4 | ||
|
|
52c1826423 | ||
|
|
e1881fae02 | ||
|
|
5ed85094c2 | ||
|
|
bf29ea55cf | ||
|
|
cafe4f1352 | ||
|
|
a9ee1c4167 | ||
|
|
a8c1bccdd5 | ||
|
|
71cf553aa8 | ||
|
|
a894a5e29b | ||
|
|
9bc7492ff2 | ||
|
|
77bda0df1c | ||
|
|
4c37399c75 | ||
|
|
cd69283184 | ||
|
|
cf3b3ca6fd | ||
|
|
1955943d4a | ||
|
|
3b68d63643 | ||
|
|
d21bfb84ad | ||
|
|
2a7c9d27b2 | ||
|
|
9c610ee11d | ||
|
|
7295d13d60 | ||
|
|
2fbbb70baa | ||
|
|
b5ce69cf3c | ||
|
|
1c5f657f97 | ||
|
|
9613ed8782 | ||
|
|
b142995808 | ||
|
|
36e5d742dc | ||
|
|
b9e3861385 | ||
|
|
f2bb3aba1e | ||
|
|
4243003c94 | ||
|
|
050bd0527f | ||
|
|
fe32de94b9 | ||
|
|
d2383d27a9 | ||
|
|
788190beca | ||
|
|
13aa32278a | ||
|
|
38ce02c610 | ||
|
|
9312ae7c6e | ||
|
|
1d86919883 | ||
|
|
78c1775661 | ||
|
|
5d00b851ce | ||
|
|
f8b93fa88a | ||
|
|
0405af1107 | ||
|
|
872e173887 | ||
|
|
b42757d330 | ||
|
|
a4d8d15a0e | ||
|
|
f8f197e26b | ||
|
|
4f85b75087 | ||
|
|
8895e38060 | ||
|
|
9a896957d9 | ||
|
|
37e004164b | ||
|
|
77069354cf | ||
|
|
2aafab6c19 | ||
|
|
35aaf97216 | ||
|
|
25cb90bc2d | ||
|
|
3311e0a296 | ||
|
|
eb31653d66 | ||
|
|
180df831ba | ||
|
|
2fa64f9fd6 | ||
|
|
a1521dc16e | ||
|
|
bf31a3a872 | ||
|
|
4a8fd94a5f | ||
|
|
e214bd0e1b | ||
|
|
4ad89ef8f1 | ||
|
|
7a66be8254 | ||
|
|
db696785d5 | ||
|
|
dfec133273 | ||
|
|
41605b4135 | ||
|
|
475abcc3a8 | ||
|
|
3a7d84e014 | ||
|
|
ad3f84df98 | ||
|
|
79e742c03b | ||
|
|
d79ada931d | ||
|
|
b2f6abe4ab | ||
|
|
c5dac1cdf6 | ||
|
|
89a0f515c7 | ||
|
|
d296adcd65 | ||
|
|
141064c443 | ||
|
|
54859cb814 | ||
|
|
0f0991b145 | ||
|
|
658d62be16 | ||
|
|
0139bfdb71 | ||
|
|
efeac70e40 | ||
|
|
b4db077e6a | ||
|
|
280e4e3b57 | ||
|
|
a962fa2357 | ||
|
|
775851b00c | ||
|
|
f8fadf53cd | ||
|
|
ce13699dfa | ||
|
|
fc5937e948 | ||
|
|
729e2c565b | ||
|
|
26ed50f04a | ||
|
|
2b80d053f3 | ||
|
|
f5ee8b71ff | ||
|
|
4c76feb574 | ||
|
|
18d84db41c | ||
|
|
08a571b4a1 | ||
|
|
5ebd305d17 | ||
|
|
edc73bc3c8 | ||
|
|
b7fa2d7b87 | ||
|
|
7771ac6074 | ||
|
|
f25b6fce1c | ||
|
|
d7b79395c7 | ||
|
|
ce18b0f22d | ||
|
|
2d8f60000c | ||
|
|
0b102287d1 | ||
|
|
d325fd6af7 | ||
|
|
e949b8a1e8 | ||
|
|
ab6e284180 | ||
|
|
7421b6e180 | ||
|
|
a2f03c559a | ||
|
|
3ced391fab | ||
|
|
ea7dcd46d7 | ||
|
|
94e33bdbbf | ||
|
|
29f833e9a4 | ||
|
|
8c32be5feb | ||
|
|
0ba2e655f4 | ||
|
|
78189186e5 | ||
|
|
96e875cd50 | ||
|
|
808524bed6 | ||
|
|
75e00420ec | ||
|
|
8be5604da4 | ||
|
|
02624554ae | ||
|
|
9d1e9a6a78 | ||
|
|
861d5b763d | ||
|
|
4013c48acb | ||
|
|
f264426646 | ||
|
|
862fd2c5c4 |
@@ -7,34 +7,33 @@ skip_list:
|
|||||||
|
|
||||||
# These rules are intentionally skipped:
|
# These rules are intentionally skipped:
|
||||||
#
|
#
|
||||||
# [E204]: "Lines should be no longer than 160 chars"
|
|
||||||
# This could be re-enabled with a major rewrite in the future.
|
|
||||||
# For now, there's not enough value gain from strictly limiting line length.
|
|
||||||
# (Disabled in May 2019)
|
|
||||||
- '204'
|
|
||||||
|
|
||||||
# [E701]: "meta/main.yml should contain relevant info"
|
|
||||||
# Roles in Kubespray are not intended to be used/imported by Ansible Galaxy.
|
|
||||||
# While it can be useful to have these metadata available, they are also available in the existing documentation.
|
|
||||||
# (Disabled in May 2019)
|
|
||||||
- '701'
|
|
||||||
|
|
||||||
# [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
|
# [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
|
||||||
# Meta roles in Kubespray don't need proper names
|
# Meta roles in Kubespray don't need proper names
|
||||||
# (Disabled in June 2021)
|
# (Disabled in June 2021)
|
||||||
- 'role-name'
|
- 'role-name'
|
||||||
|
|
||||||
- 'experimental'
|
|
||||||
# [var-naming] "defaults/main.yml" File defines variable 'apiVersion' that violates variable naming standards
|
# [var-naming] "defaults/main.yml" File defines variable 'apiVersion' that violates variable naming standards
|
||||||
# In Kubespray we use variables that use camelCase to match their k8s counterparts
|
# In Kubespray we use variables that use camelCase to match their k8s counterparts
|
||||||
# (Disabled in June 2021)
|
# (Disabled in June 2021)
|
||||||
- 'var-naming'
|
- 'var-naming'
|
||||||
- 'var-spacing'
|
|
||||||
|
|
||||||
# [fqcn-builtins]
|
# [fqcn-builtins]
|
||||||
# Roles in kubespray don't need fully qualified collection names
|
# Roles in kubespray don't need fully qualified collection names
|
||||||
# (Disabled in Feb 2023)
|
# (Disabled in Feb 2023)
|
||||||
- 'fqcn-builtins'
|
- 'fqcn-builtins'
|
||||||
|
|
||||||
|
# We use template in names
|
||||||
|
- 'name[template]'
|
||||||
|
|
||||||
|
# No changed-when on commands
|
||||||
|
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
||||||
|
- 'no-changed-when'
|
||||||
|
|
||||||
|
# Disable run-once check with free strategy
|
||||||
|
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
||||||
|
- 'run-once[task]'
|
||||||
exclude_paths:
|
exclude_paths:
|
||||||
# Generated files
|
# Generated files
|
||||||
- tests/files/custom_cni/cilium.yaml
|
- tests/files/custom_cni/cilium.yaml
|
||||||
|
- venv
|
||||||
|
- .github
|
||||||
|
|||||||
8
.ansible-lint-ignore
Normal file
8
.ansible-lint-ignore
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
# This file contains ignores rule violations for ansible-lint
|
||||||
|
inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml jinja[spacing]
|
||||||
|
roles/kubernetes/control-plane/defaults/main/kube-proxy.yml jinja[spacing]
|
||||||
|
roles/kubernetes/control-plane/defaults/main/main.yml jinja[spacing]
|
||||||
|
roles/kubernetes/kubeadm/defaults/main.yml jinja[spacing]
|
||||||
|
roles/kubernetes/node/defaults/main.yml jinja[spacing]
|
||||||
|
roles/kubernetes/preinstall/defaults/main.yml jinja[spacing]
|
||||||
|
roles/kubespray-defaults/defaults/main/main.yml jinja[spacing]
|
||||||
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
docs/_sidebar.md linguist-generated=true
|
||||||
44
.github/ISSUE_TEMPLATE/bug-report.md
vendored
44
.github/ISSUE_TEMPLATE/bug-report.md
vendored
@@ -1,44 +0,0 @@
|
|||||||
---
|
|
||||||
name: Bug Report
|
|
||||||
about: Report a bug encountered while operating Kubernetes
|
|
||||||
labels: kind/bug
|
|
||||||
|
|
||||||
---
|
|
||||||
<!--
|
|
||||||
Please, be ready for followup questions, and please respond in a timely
|
|
||||||
manner. If we can't reproduce a bug or think a feature already exists, we
|
|
||||||
might close your issue. If we're wrong, PLEASE feel free to reopen it and
|
|
||||||
explain why.
|
|
||||||
-->
|
|
||||||
|
|
||||||
**Environment**:
|
|
||||||
- **Cloud provider or hardware configuration:**
|
|
||||||
|
|
||||||
- **OS (`printf "$(uname -srm)\n$(cat /etc/os-release)\n"`):**
|
|
||||||
|
|
||||||
- **Version of Ansible** (`ansible --version`):
|
|
||||||
|
|
||||||
- **Version of Python** (`python --version`):
|
|
||||||
|
|
||||||
|
|
||||||
**Kubespray version (commit) (`git rev-parse --short HEAD`):**
|
|
||||||
|
|
||||||
|
|
||||||
**Network plugin used**:
|
|
||||||
|
|
||||||
|
|
||||||
**Full inventory with variables (`ansible -i inventory/sample/inventory.ini all -m debug -a "var=hostvars[inventory_hostname]"`):**
|
|
||||||
<!-- We recommend using snippets services like https://gist.github.com/ etc. -->
|
|
||||||
|
|
||||||
**Command used to invoke ansible**:
|
|
||||||
|
|
||||||
|
|
||||||
**Output of ansible run**:
|
|
||||||
<!-- We recommend using snippets services like https://gist.github.com/ etc. -->
|
|
||||||
|
|
||||||
**Anything else do we need to know**:
|
|
||||||
<!-- By running scripts/collect-info.yaml you can get a lot of useful informations.
|
|
||||||
Script can be started by:
|
|
||||||
ansible-playbook -i <inventory_file_path> -u <ssh_user> -e ansible_ssh_user=<ssh_user> -b --become-user=root -e dir=`pwd` scripts/collect-info.yaml
|
|
||||||
(If you using CoreOS remember to add '-e ansible_python_interpreter=/opt/bin/python').
|
|
||||||
After running this command you can find logs in `pwd`/logs.tar.gz. You can even upload somewhere entire file and paste link here.-->
|
|
||||||
124
.github/ISSUE_TEMPLATE/bug-report.yaml
vendored
Normal file
124
.github/ISSUE_TEMPLATE/bug-report.yaml
vendored
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
---
|
||||||
|
name: Bug Report
|
||||||
|
description: Report a bug encountered while using Kubespray
|
||||||
|
labels: kind/bug
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
Please, be ready for followup questions, and please respond in a timely
|
||||||
|
manner. If we can't reproduce a bug or think a feature already exists, we
|
||||||
|
might close your issue. If we're wrong, PLEASE feel free to reopen it and
|
||||||
|
explain why.
|
||||||
|
- type: textarea
|
||||||
|
id: problem
|
||||||
|
attributes:
|
||||||
|
label: What happened?
|
||||||
|
description: |
|
||||||
|
Please provide as much info as possible. Not doing so may result in your bug not being addressed in a timely manner.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: expected
|
||||||
|
attributes:
|
||||||
|
label: What did you expect to happen?
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: repro
|
||||||
|
attributes:
|
||||||
|
label: How can we reproduce it (as minimally and precisely as possible)?
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: '### Environment'
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: os
|
||||||
|
attributes:
|
||||||
|
label: OS
|
||||||
|
placeholder: 'printf "$(uname -srm)\n$(cat /etc/os-release)\n"'
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: ansible_version
|
||||||
|
attributes:
|
||||||
|
label: Version of Ansible
|
||||||
|
placeholder: 'ansible --version'
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: input
|
||||||
|
id: python_version
|
||||||
|
attributes:
|
||||||
|
label: Version of Python
|
||||||
|
placeholder: 'python --version'
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: input
|
||||||
|
id: kubespray_version
|
||||||
|
attributes:
|
||||||
|
label: Version of Kubespray (commit)
|
||||||
|
placeholder: 'git rev-parse --short HEAD'
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: dropdown
|
||||||
|
id: network_plugin
|
||||||
|
attributes:
|
||||||
|
label: Network plugin used
|
||||||
|
options:
|
||||||
|
- calico
|
||||||
|
- cilium
|
||||||
|
- cni
|
||||||
|
- custom_cni
|
||||||
|
- flannel
|
||||||
|
- kube-ovn
|
||||||
|
- kube-router
|
||||||
|
- macvlan
|
||||||
|
- meta
|
||||||
|
- multus
|
||||||
|
- ovn4nfv
|
||||||
|
- weave
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: inventory
|
||||||
|
attributes:
|
||||||
|
label: Full inventory with variables
|
||||||
|
placeholder: 'ansible -i inventory/sample/inventory.ini all -m debug -a "var=hostvars[inventory_hostname]"'
|
||||||
|
description: We recommend using snippets services like https://gist.github.com/ etc.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: input
|
||||||
|
id: ansible_command
|
||||||
|
attributes:
|
||||||
|
label: Command used to invoke ansible
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: ansible_output
|
||||||
|
attributes:
|
||||||
|
label: Output of ansible run
|
||||||
|
description: We recommend using snippets services like https://gist.github.com/ etc.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: anything_else
|
||||||
|
attributes:
|
||||||
|
label: Anything else we need to know
|
||||||
|
description: |
|
||||||
|
By running scripts/collect-info.yaml you can get a lot of useful informations.
|
||||||
|
Script can be started by:
|
||||||
|
ansible-playbook -i <inventory_file_path> -u <ssh_user> -e ansible_ssh_user=<ssh_user> -b --become-user=root -e dir=`pwd` scripts/collect-info.yaml
|
||||||
|
(If you using CoreOS remember to add '-e ansible_python_interpreter=/opt/bin/python').
|
||||||
|
After running this command you can find logs in `pwd`/logs.tar.gz. You can even upload somewhere entire file and paste link here
|
||||||
5
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
5
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
contact_links:
|
||||||
|
- name: Support Request
|
||||||
|
url: https://kubernetes.slack.com/channels/kubespray
|
||||||
|
about: Support request or question relating to Kubernetes
|
||||||
11
.github/ISSUE_TEMPLATE/enhancement.md
vendored
11
.github/ISSUE_TEMPLATE/enhancement.md
vendored
@@ -1,11 +0,0 @@
|
|||||||
---
|
|
||||||
name: Enhancement Request
|
|
||||||
about: Suggest an enhancement to the Kubespray project
|
|
||||||
labels: kind/feature
|
|
||||||
|
|
||||||
---
|
|
||||||
<!-- Please only use this template for submitting enhancement requests -->
|
|
||||||
|
|
||||||
**What would you like to be added**:
|
|
||||||
|
|
||||||
**Why is this needed**:
|
|
||||||
20
.github/ISSUE_TEMPLATE/enhancement.yaml
vendored
Normal file
20
.github/ISSUE_TEMPLATE/enhancement.yaml
vendored
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
name: Enhancement Request
|
||||||
|
description: Suggest an enhancement to the Kubespray project
|
||||||
|
labels: kind/feature
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: Please only use this template for submitting enhancement requests
|
||||||
|
- type: textarea
|
||||||
|
id: what
|
||||||
|
attributes:
|
||||||
|
label: What would you like to be added
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: why
|
||||||
|
attributes:
|
||||||
|
label: Why is this needed
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
20
.github/ISSUE_TEMPLATE/failing-test.md
vendored
20
.github/ISSUE_TEMPLATE/failing-test.md
vendored
@@ -1,20 +0,0 @@
|
|||||||
---
|
|
||||||
name: Failing Test
|
|
||||||
about: Report test failures in Kubespray CI jobs
|
|
||||||
labels: kind/failing-test
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<!-- Please only use this template for submitting reports about failing tests in Kubespray CI jobs -->
|
|
||||||
|
|
||||||
**Which jobs are failing**:
|
|
||||||
|
|
||||||
**Which test(s) are failing**:
|
|
||||||
|
|
||||||
**Since when has it been failing**:
|
|
||||||
|
|
||||||
**Testgrid link**:
|
|
||||||
|
|
||||||
**Reason for failure**:
|
|
||||||
|
|
||||||
**Anything else we need to know**:
|
|
||||||
41
.github/ISSUE_TEMPLATE/failing-test.yaml
vendored
Normal file
41
.github/ISSUE_TEMPLATE/failing-test.yaml
vendored
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
---
|
||||||
|
name: Failing Test
|
||||||
|
description: Report test failures in Kubespray CI jobs
|
||||||
|
labels: kind/failing-test
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: Please only use this template for submitting reports about failing tests in Kubespray CI jobs
|
||||||
|
- type: textarea
|
||||||
|
id: failing_jobs
|
||||||
|
attributes:
|
||||||
|
label: Which jobs are failing ?
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: failing_tests
|
||||||
|
attributes:
|
||||||
|
label: Which tests are failing ?
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: input
|
||||||
|
id: since_when
|
||||||
|
attributes:
|
||||||
|
label: Since when has it been failing ?
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: failure_reason
|
||||||
|
attributes:
|
||||||
|
label: Reason for failure
|
||||||
|
description: If you don't know and have no guess, just put "Unknown"
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: anything_else
|
||||||
|
attributes:
|
||||||
|
label: Anything else we need to know
|
||||||
18
.github/ISSUE_TEMPLATE/support.md
vendored
18
.github/ISSUE_TEMPLATE/support.md
vendored
@@ -1,18 +0,0 @@
|
|||||||
---
|
|
||||||
name: Support Request
|
|
||||||
about: Support request or question relating to Kubespray
|
|
||||||
labels: kind/support
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<!--
|
|
||||||
STOP -- PLEASE READ!
|
|
||||||
|
|
||||||
GitHub is not the right place for support requests.
|
|
||||||
|
|
||||||
If you're looking for help, check [Stack Overflow](https://stackoverflow.com/questions/tagged/kubespray) and the [troubleshooting guide](https://kubernetes.io/docs/tasks/debug-application-cluster/troubleshooting/).
|
|
||||||
|
|
||||||
You can also post your question on the [Kubernetes Slack](http://slack.k8s.io/) or the [Discuss Kubernetes](https://discuss.kubernetes.io/) forum.
|
|
||||||
|
|
||||||
If the matter is security related, please disclose it privately via https://kubernetes.io/security/.
|
|
||||||
-->
|
|
||||||
7
.github/dependabot.yml
vendored
Normal file
7
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
version: 2
|
||||||
|
updates:
|
||||||
|
- package-ecosystem: "pip"
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: "weekly"
|
||||||
|
labels: [ "dependencies" ]
|
||||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -3,6 +3,8 @@
|
|||||||
**/vagrant_ansible_inventory
|
**/vagrant_ansible_inventory
|
||||||
*.iml
|
*.iml
|
||||||
temp
|
temp
|
||||||
|
contrib/offline/container-images
|
||||||
|
contrib/offline/container-images.tar.gz
|
||||||
contrib/offline/offline-files
|
contrib/offline/offline-files
|
||||||
contrib/offline/offline-files.tar.gz
|
contrib/offline/offline-files.tar.gz
|
||||||
.idea
|
.idea
|
||||||
@@ -11,7 +13,7 @@ contrib/offline/offline-files.tar.gz
|
|||||||
.cache
|
.cache
|
||||||
*.bak
|
*.bak
|
||||||
*.tfstate
|
*.tfstate
|
||||||
*.tfstate.backup
|
*.tfstate*backup
|
||||||
*.lock.hcl
|
*.lock.hcl
|
||||||
.terraform/
|
.terraform/
|
||||||
contrib/terraform/aws/credentials.tfvars
|
contrib/terraform/aws/credentials.tfvars
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ stages:
|
|||||||
- deploy-special
|
- deploy-special
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
KUBESPRAY_VERSION: v2.21.0
|
KUBESPRAY_VERSION: v2.25.0
|
||||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||||
ANSIBLE_FORCE_COLOR: "true"
|
ANSIBLE_FORCE_COLOR: "true"
|
||||||
@@ -33,16 +33,12 @@ variables:
|
|||||||
MITOGEN_ENABLE: "false"
|
MITOGEN_ENABLE: "false"
|
||||||
ANSIBLE_LOG_LEVEL: "-vv"
|
ANSIBLE_LOG_LEVEL: "-vv"
|
||||||
RECOVER_CONTROL_PLANE_TEST: "false"
|
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
|
||||||
TERRAFORM_VERSION: 1.3.7
|
TERRAFORM_VERSION: 1.3.7
|
||||||
ANSIBLE_MAJOR_VERSION: "2.11"
|
|
||||||
PIPELINE_IMAGE: "$CI_REGISTRY_IMAGE/pipeline:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}"
|
PIPELINE_IMAGE: "$CI_REGISTRY_IMAGE/pipeline:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}"
|
||||||
|
|
||||||
before_script:
|
before_script:
|
||||||
- ./tests/scripts/rebase.sh
|
- ./tests/scripts/rebase.sh
|
||||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
|
||||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
|
||||||
- python -m pip install -r tests/requirements-${ANSIBLE_MAJOR_VERSION}.txt
|
|
||||||
- mkdir -p /.ssh
|
- mkdir -p /.ssh
|
||||||
|
|
||||||
.job: &job
|
.job: &job
|
||||||
@@ -57,6 +53,7 @@ before_script:
|
|||||||
.testcases: &testcases
|
.testcases: &testcases
|
||||||
<<: *job
|
<<: *job
|
||||||
retry: 1
|
retry: 1
|
||||||
|
interruptible: true
|
||||||
before_script:
|
before_script:
|
||||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||||
- ./tests/scripts/rebase.sh
|
- ./tests/scripts/rebase.sh
|
||||||
@@ -80,7 +77,6 @@ ci-authorized:
|
|||||||
include:
|
include:
|
||||||
- .gitlab-ci/build.yml
|
- .gitlab-ci/build.yml
|
||||||
- .gitlab-ci/lint.yml
|
- .gitlab-ci/lint.yml
|
||||||
- .gitlab-ci/shellcheck.yml
|
|
||||||
- .gitlab-ci/terraform.yml
|
- .gitlab-ci/terraform.yml
|
||||||
- .gitlab-ci/packet.yml
|
- .gitlab-ci/packet.yml
|
||||||
- .gitlab-ci/vagrant.yml
|
- .gitlab-ci/vagrant.yml
|
||||||
|
|||||||
@@ -1,114 +1,40 @@
|
|||||||
---
|
---
|
||||||
yamllint:
|
generate-pre-commit:
|
||||||
extends: .job
|
image: 'mikefarah/yq@sha256:bcb889a1f9bdb0613c8a054542d02360c2b1b35521041be3e1bd8fbd0534d411'
|
||||||
stage: unit-tests
|
stage: build
|
||||||
tags: [light]
|
before_script: []
|
||||||
variables:
|
|
||||||
LANG: C.UTF-8
|
|
||||||
script:
|
script:
|
||||||
- yamllint --strict .
|
- >
|
||||||
except: ['triggers', 'master']
|
yq -r < .pre-commit-config.yaml '.repos[].hooks[].id' |
|
||||||
|
sed 's/^/ - /' |
|
||||||
|
cat .gitlab-ci/pre-commit-dynamic-stub.yml - > pre-commit-generated.yml
|
||||||
|
artifacts:
|
||||||
|
paths:
|
||||||
|
- pre-commit-generated.yml
|
||||||
|
|
||||||
|
run-pre-commit:
|
||||||
|
stage: unit-tests
|
||||||
|
trigger:
|
||||||
|
include:
|
||||||
|
- artifact: pre-commit-generated.yml
|
||||||
|
job: generate-pre-commit
|
||||||
|
strategy: depend
|
||||||
|
|
||||||
vagrant-validate:
|
vagrant-validate:
|
||||||
extends: .job
|
extends: .job
|
||||||
stage: unit-tests
|
stage: unit-tests
|
||||||
tags: [light]
|
tags: [light]
|
||||||
variables:
|
variables:
|
||||||
VAGRANT_VERSION: 2.3.4
|
VAGRANT_VERSION: 2.3.7
|
||||||
script:
|
script:
|
||||||
- ./tests/scripts/vagrant-validate.sh
|
- ./tests/scripts/vagrant-validate.sh
|
||||||
except: ['triggers', 'master']
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
ansible-lint:
|
|
||||||
extends: .job
|
|
||||||
stage: unit-tests
|
|
||||||
tags: [light]
|
|
||||||
script:
|
|
||||||
- ansible-lint -v
|
|
||||||
except: ['triggers', 'master']
|
|
||||||
|
|
||||||
syntax-check:
|
|
||||||
extends: .job
|
|
||||||
stage: unit-tests
|
|
||||||
tags: [light]
|
|
||||||
variables:
|
|
||||||
ANSIBLE_INVENTORY: inventory/local-tests.cfg
|
|
||||||
ANSIBLE_REMOTE_USER: root
|
|
||||||
ANSIBLE_BECOME: "true"
|
|
||||||
ANSIBLE_BECOME_USER: root
|
|
||||||
ANSIBLE_VERBOSITY: "3"
|
|
||||||
script:
|
|
||||||
- ansible-playbook --syntax-check cluster.yml
|
|
||||||
- ansible-playbook --syntax-check playbooks/cluster.yml
|
|
||||||
- ansible-playbook --syntax-check upgrade-cluster.yml
|
|
||||||
- ansible-playbook --syntax-check playbooks/upgrade_cluster.yml
|
|
||||||
- ansible-playbook --syntax-check reset.yml
|
|
||||||
- ansible-playbook --syntax-check playbooks/reset.yml
|
|
||||||
- ansible-playbook --syntax-check extra_playbooks/upgrade-only-k8s.yml
|
|
||||||
except: ['triggers', 'master']
|
|
||||||
|
|
||||||
collection-build-install-sanity-check:
|
|
||||||
extends: .job
|
|
||||||
stage: unit-tests
|
|
||||||
tags: [light]
|
|
||||||
variables:
|
|
||||||
ANSIBLE_COLLECTIONS_PATH: "./ansible_collections"
|
|
||||||
script:
|
|
||||||
- ansible-galaxy collection build
|
|
||||||
- ansible-galaxy collection install kubernetes_sigs-kubespray-$(grep "^version:" galaxy.yml | awk '{print $2}').tar.gz
|
|
||||||
- ansible-galaxy collection list $(egrep -i '(name:\s+|namespace:\s+)' galaxy.yml | awk '{print $2}' | tr '\n' '.' | sed 's|\.$||g') | grep "^kubernetes_sigs.kubespray"
|
|
||||||
- test -f ansible_collections/kubernetes_sigs/kubespray/playbooks/cluster.yml
|
|
||||||
- test -f ansible_collections/kubernetes_sigs/kubespray/playbooks/reset.yml
|
|
||||||
except: ['triggers', 'master']
|
|
||||||
|
|
||||||
tox-inventory-builder:
|
|
||||||
stage: unit-tests
|
|
||||||
tags: [light]
|
|
||||||
extends: .job
|
|
||||||
before_script:
|
|
||||||
- ./tests/scripts/rebase.sh
|
|
||||||
- apt-get update && apt-get install -y python3-pip
|
|
||||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
|
||||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
|
||||||
- python -m pip install -r tests/requirements.txt
|
|
||||||
script:
|
|
||||||
- pip3 install tox
|
|
||||||
- cd contrib/inventory_builder && tox
|
|
||||||
except: ['triggers', 'master']
|
|
||||||
|
|
||||||
markdownlint:
|
|
||||||
stage: unit-tests
|
|
||||||
tags: [light]
|
|
||||||
image: node
|
|
||||||
before_script:
|
|
||||||
- npm install -g markdownlint-cli@0.22.0
|
|
||||||
script:
|
|
||||||
- markdownlint $(find . -name '*.md' | grep -vF './.git') --ignore docs/_sidebar.md --ignore contrib/dind/README.md
|
|
||||||
|
|
||||||
check-readme-versions:
|
|
||||||
stage: unit-tests
|
|
||||||
tags: [light]
|
|
||||||
image: python:3
|
|
||||||
script:
|
|
||||||
- tests/scripts/check_readme_versions.sh
|
|
||||||
|
|
||||||
|
# TODO: convert to pre-commit hook
|
||||||
check-galaxy-version:
|
check-galaxy-version:
|
||||||
stage: unit-tests
|
stage: unit-tests
|
||||||
tags: [light]
|
tags: [light]
|
||||||
image: python:3
|
image: python:3
|
||||||
script:
|
script:
|
||||||
- tests/scripts/check_galaxy_version.sh
|
- tests/scripts/check_galaxy_version.sh
|
||||||
|
|
||||||
check-typo:
|
|
||||||
stage: unit-tests
|
|
||||||
tags: [light]
|
|
||||||
image: python:3
|
|
||||||
script:
|
|
||||||
- tests/scripts/check_typo.sh
|
|
||||||
|
|
||||||
ci-matrix:
|
|
||||||
stage: unit-tests
|
|
||||||
tags: [light]
|
|
||||||
image: python:3
|
|
||||||
script:
|
|
||||||
- tests/scripts/md-table/test.sh
|
|
||||||
|
|||||||
@@ -9,10 +9,6 @@
|
|||||||
stage: deploy-part1
|
stage: deploy-part1
|
||||||
before_script:
|
before_script:
|
||||||
- tests/scripts/rebase.sh
|
- tests/scripts/rebase.sh
|
||||||
- apt-get update && apt-get install -y python3-pip
|
|
||||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
|
||||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
|
||||||
- python -m pip install -r tests/requirements.txt
|
|
||||||
- ./tests/scripts/vagrant_clean.sh
|
- ./tests/scripts/vagrant_clean.sh
|
||||||
script:
|
script:
|
||||||
- ./tests/scripts/molecule_run.sh
|
- ./tests/scripts/molecule_run.sh
|
||||||
@@ -58,29 +54,30 @@ molecule_cri-o:
|
|||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
script:
|
script:
|
||||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
||||||
|
allow_failure: true
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
# Stage 3 container engines don't get as much attention so allow them to fail
|
# Stage 3 container engines don't get as much attention so allow them to fail
|
||||||
molecule_kata:
|
molecule_kata:
|
||||||
extends: .molecule
|
extends: .molecule
|
||||||
stage: deploy-part3
|
stage: deploy-part3
|
||||||
allow_failure: true
|
|
||||||
script:
|
script:
|
||||||
- ./tests/scripts/molecule_run.sh -i container-engine/kata-containers
|
- ./tests/scripts/molecule_run.sh -i container-engine/kata-containers
|
||||||
when: on_success
|
when: manual
|
||||||
|
# FIXME: this test is broken (perma-failing)
|
||||||
|
|
||||||
molecule_gvisor:
|
molecule_gvisor:
|
||||||
extends: .molecule
|
extends: .molecule
|
||||||
stage: deploy-part3
|
stage: deploy-part3
|
||||||
allow_failure: true
|
|
||||||
script:
|
script:
|
||||||
- ./tests/scripts/molecule_run.sh -i container-engine/gvisor
|
- ./tests/scripts/molecule_run.sh -i container-engine/gvisor
|
||||||
when: on_success
|
when: manual
|
||||||
|
# FIXME: this test is broken (perma-failing)
|
||||||
|
|
||||||
molecule_youki:
|
molecule_youki:
|
||||||
extends: .molecule
|
extends: .molecule
|
||||||
stage: deploy-part3
|
stage: deploy-part3
|
||||||
allow_failure: true
|
|
||||||
script:
|
script:
|
||||||
- ./tests/scripts/molecule_run.sh -i container-engine/youki
|
- ./tests/scripts/molecule_run.sh -i container-engine/youki
|
||||||
when: on_success
|
when: manual
|
||||||
|
# FIXME: this test is broken (perma-failing)
|
||||||
|
|||||||
@@ -23,50 +23,55 @@
|
|||||||
allow_failure: true
|
allow_failure: true
|
||||||
extends: .packet
|
extends: .packet
|
||||||
|
|
||||||
# The ubuntu20-calico-aio jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
packet_cleanup_old:
|
||||||
packet_ubuntu20-calico-aio:
|
stage: deploy-part1
|
||||||
|
extends: .packet_periodic
|
||||||
|
script:
|
||||||
|
- cd tests
|
||||||
|
- make cleanup-packet
|
||||||
|
after_script: []
|
||||||
|
|
||||||
|
# The ubuntu20-calico-all-in-one jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
||||||
|
packet_ubuntu20-calico-all-in-one:
|
||||||
stage: deploy-part1
|
stage: deploy-part1
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
variables:
|
variables:
|
||||||
RESET_CHECK: "true"
|
RESET_CHECK: "true"
|
||||||
|
|
||||||
packet_ubuntu20-calico-aio-ansible-2_11:
|
|
||||||
stage: deploy-part1
|
|
||||||
extends: .packet_periodic
|
|
||||||
when: on_success
|
|
||||||
variables:
|
|
||||||
ANSIBLE_MAJOR_VERSION: "2.11"
|
|
||||||
RESET_CHECK: "true"
|
|
||||||
|
|
||||||
# ### PR JOBS PART2
|
# ### PR JOBS PART2
|
||||||
|
|
||||||
packet_ubuntu18-aio-docker:
|
packet_ubuntu20-all-in-one-docker:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_ubuntu20-aio-docker:
|
packet_ubuntu20-calico-all-in-one-hardening:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_ubuntu20-calico-aio-hardening:
|
packet_ubuntu22-all-in-one-docker:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_ubuntu18-calico-aio:
|
packet_ubuntu22-calico-all-in-one:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_ubuntu22-aio-docker:
|
packet_ubuntu24-all-in-one-docker:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_ubuntu22-calico-aio:
|
packet_ubuntu24-calico-all-in-one:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_pr
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_ubuntu24-calico-etcd-datastore:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
@@ -80,18 +85,19 @@ packet_almalinux8-crio:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
when: on_success
|
when: on_success
|
||||||
|
allow_failure: true
|
||||||
|
|
||||||
packet_ubuntu18-crio:
|
packet_ubuntu20-crio:
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_fedora35-crio:
|
packet_fedora37-crio:
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_ubuntu16-flannel-ha:
|
packet_ubuntu20-flannel-ha:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
@@ -121,6 +127,21 @@ packet_debian11-docker:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
|
packet_debian12-calico:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_pr
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_debian12-docker:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_pr
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_debian12-cilium:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_periodic
|
||||||
|
when: on_success
|
||||||
|
|
||||||
packet_centos7-calico-ha-once-localhost:
|
packet_centos7-calico-ha-once-localhost:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
@@ -133,7 +154,7 @@ packet_centos7-calico-ha-once-localhost:
|
|||||||
|
|
||||||
packet_almalinux8-kube-ovn:
|
packet_almalinux8-kube-ovn:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_periodic
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_almalinux8-calico:
|
packet_almalinux8-calico:
|
||||||
@@ -163,34 +184,35 @@ packet_almalinux8-docker:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_fedora36-docker-weave:
|
packet_amazon-linux-2-all-in-one:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
|
packet_fedora38-docker-weave:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_pr
|
||||||
|
when: on_success
|
||||||
|
allow_failure: true
|
||||||
|
|
||||||
packet_opensuse-docker-cilium:
|
packet_opensuse-docker-cilium:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: on_success
|
||||||
|
|
||||||
# ### MANUAL JOBS
|
# ### MANUAL JOBS
|
||||||
|
|
||||||
packet_ubuntu16-docker-weave-sep:
|
packet_ubuntu20-docker-weave-sep:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_ubuntu18-cilium-sep:
|
packet_ubuntu20-cilium-sep:
|
||||||
stage: deploy-special
|
stage: deploy-special
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_ubuntu18-flannel-ha:
|
packet_ubuntu20-flannel-ha-once:
|
||||||
stage: deploy-part2
|
|
||||||
extends: .packet_pr
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
packet_ubuntu18-flannel-ha-once:
|
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
@@ -216,24 +238,19 @@ packet_centos7-multus-calico:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_fedora36-docker-calico:
|
packet_fedora38-docker-calico:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
variables:
|
variables:
|
||||||
RESET_CHECK: "true"
|
RESET_CHECK: "true"
|
||||||
|
|
||||||
packet_fedora35-calico-selinux:
|
packet_fedora37-calico-selinux:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_fedora35-calico-swap-selinux:
|
packet_fedora37-calico-swap-selinux:
|
||||||
stage: deploy-part2
|
|
||||||
extends: .packet_pr
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
packet_amazon-linux-2-aio:
|
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
@@ -243,7 +260,7 @@ packet_almalinux8-calico-nodelocaldns-secondary:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_fedora36-kube-ovn:
|
packet_fedora38-kube-ovn:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
@@ -258,6 +275,11 @@ packet_debian11-kubelet-csr-approver:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
|
packet_debian12-custom-cni-helm:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_pr
|
||||||
|
when: manual
|
||||||
|
|
||||||
# ### PR JOBS PART3
|
# ### PR JOBS PART3
|
||||||
# Long jobs (45min+)
|
# Long jobs (45min+)
|
||||||
|
|
||||||
@@ -308,18 +330,18 @@ packet_debian11-calico-upgrade-once:
|
|||||||
variables:
|
variables:
|
||||||
UPGRADE_TEST: graceful
|
UPGRADE_TEST: graceful
|
||||||
|
|
||||||
packet_ubuntu18-calico-ha-recover:
|
packet_ubuntu20-calico-ha-recover:
|
||||||
stage: deploy-part3
|
stage: deploy-part3
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
variables:
|
variables:
|
||||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
|
||||||
|
|
||||||
packet_ubuntu18-calico-ha-recover-noquorum:
|
packet_ubuntu20-calico-ha-recover-noquorum:
|
||||||
stage: deploy-part3
|
stage: deploy-part3
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
variables:
|
variables:
|
||||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube_control_plane[1:]"
|
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:]:kube_control_plane[1:]"
|
||||||
|
|||||||
17
.gitlab-ci/pre-commit-dynamic-stub.yml
Normal file
17
.gitlab-ci/pre-commit-dynamic-stub.yml
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
# stub pipeline for dynamic generation
|
||||||
|
pre-commit:
|
||||||
|
tags:
|
||||||
|
- light
|
||||||
|
image: 'ghcr.io/pre-commit-ci/runner-image@sha256:aaf2c7b38b22286f2d381c11673bec571c28f61dd086d11b43a1c9444a813cef'
|
||||||
|
variables:
|
||||||
|
PRE_COMMIT_HOME: /pre-commit-cache
|
||||||
|
script:
|
||||||
|
- pre-commit run -a $HOOK_ID
|
||||||
|
cache:
|
||||||
|
key: pre-commit-$HOOK_ID
|
||||||
|
paths:
|
||||||
|
- /pre-commit-cache
|
||||||
|
parallel:
|
||||||
|
matrix:
|
||||||
|
- HOOK_ID:
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
---
|
|
||||||
shellcheck:
|
|
||||||
extends: .job
|
|
||||||
stage: unit-tests
|
|
||||||
tags: [light]
|
|
||||||
variables:
|
|
||||||
SHELLCHECK_VERSION: v0.7.1
|
|
||||||
before_script:
|
|
||||||
- ./tests/scripts/rebase.sh
|
|
||||||
- curl --silent --location "https://github.com/koalaman/shellcheck/releases/download/"${SHELLCHECK_VERSION}"/shellcheck-"${SHELLCHECK_VERSION}".linux.x86_64.tar.xz" | tar -xJv
|
|
||||||
- cp shellcheck-"${SHELLCHECK_VERSION}"/shellcheck /usr/bin/
|
|
||||||
- shellcheck --version
|
|
||||||
script:
|
|
||||||
# Run shellcheck for all *.sh
|
|
||||||
- find . -name '*.sh' -not -path './.git/*' | xargs shellcheck --severity error
|
|
||||||
except: ['triggers', 'master']
|
|
||||||
@@ -100,21 +100,13 @@ tf-validate-upcloud:
|
|||||||
PROVIDER: upcloud
|
PROVIDER: upcloud
|
||||||
CLUSTER: $CI_COMMIT_REF_NAME
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
|
||||||
# tf-packet-ubuntu16-default:
|
tf-validate-nifcloud:
|
||||||
# extends: .terraform_apply
|
extends: .terraform_validate
|
||||||
# variables:
|
variables:
|
||||||
# TF_VERSION: $TERRAFORM_VERSION
|
TF_VERSION: $TERRAFORM_VERSION
|
||||||
# PROVIDER: packet
|
PROVIDER: nifcloud
|
||||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
|
||||||
# TF_VAR_number_of_k8s_masters: "1"
|
# tf-packet-ubuntu20-default:
|
||||||
# TF_VAR_number_of_k8s_nodes: "1"
|
|
||||||
# TF_VAR_plan_k8s_masters: t1.small.x86
|
|
||||||
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
|
||||||
# TF_VAR_metro: ny
|
|
||||||
# TF_VAR_public_key_path: ""
|
|
||||||
# TF_VAR_operating_system: ubuntu_16_04
|
|
||||||
#
|
|
||||||
# tf-packet-ubuntu18-default:
|
|
||||||
# extends: .terraform_apply
|
# extends: .terraform_apply
|
||||||
# variables:
|
# variables:
|
||||||
# TF_VERSION: $TERRAFORM_VERSION
|
# TF_VERSION: $TERRAFORM_VERSION
|
||||||
@@ -126,7 +118,7 @@ tf-validate-upcloud:
|
|||||||
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||||
# TF_VAR_metro: am
|
# TF_VAR_metro: am
|
||||||
# TF_VAR_public_key_path: ""
|
# TF_VAR_public_key_path: ""
|
||||||
# TF_VAR_operating_system: ubuntu_18_04
|
# TF_VAR_operating_system: ubuntu_20_04
|
||||||
|
|
||||||
.ovh_variables: &ovh_variables
|
.ovh_variables: &ovh_variables
|
||||||
OS_AUTH_URL: https://auth.cloud.ovh.net/v3
|
OS_AUTH_URL: https://auth.cloud.ovh.net/v3
|
||||||
@@ -164,7 +156,7 @@ tf-elastx_cleanup:
|
|||||||
script:
|
script:
|
||||||
- ./scripts/openstack-cleanup/main.py
|
- ./scripts/openstack-cleanup/main.py
|
||||||
|
|
||||||
tf-elastx_ubuntu18-calico:
|
tf-elastx_ubuntu20-calico:
|
||||||
extends: .terraform_apply
|
extends: .terraform_apply
|
||||||
stage: deploy-part3
|
stage: deploy-part3
|
||||||
when: on_success
|
when: on_success
|
||||||
@@ -194,7 +186,7 @@ tf-elastx_ubuntu18-calico:
|
|||||||
TF_VAR_az_list_node: '["sto1"]'
|
TF_VAR_az_list_node: '["sto1"]'
|
||||||
TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||||
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||||
TF_VAR_image: ubuntu-18.04-server-latest
|
TF_VAR_image: ubuntu-20.04-server-latest
|
||||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||||
|
|
||||||
# OVH voucher expired, commenting job until things are sorted out
|
# OVH voucher expired, commenting job until things are sorted out
|
||||||
@@ -211,7 +203,7 @@ tf-elastx_ubuntu18-calico:
|
|||||||
# script:
|
# script:
|
||||||
# - ./scripts/openstack-cleanup/main.py
|
# - ./scripts/openstack-cleanup/main.py
|
||||||
|
|
||||||
# tf-ovh_ubuntu18-calico:
|
# tf-ovh_ubuntu20-calico:
|
||||||
# extends: .terraform_apply
|
# extends: .terraform_apply
|
||||||
# when: on_success
|
# when: on_success
|
||||||
# environment: ovh
|
# environment: ovh
|
||||||
@@ -237,5 +229,5 @@ tf-elastx_ubuntu18-calico:
|
|||||||
# TF_VAR_network_name: "Ext-Net"
|
# TF_VAR_network_name: "Ext-Net"
|
||||||
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||||
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||||
# TF_VAR_image: "Ubuntu 18.04"
|
# TF_VAR_image: "Ubuntu 20.04"
|
||||||
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||||
|
|||||||
@@ -13,28 +13,19 @@
|
|||||||
image: $PIPELINE_IMAGE
|
image: $PIPELINE_IMAGE
|
||||||
services: []
|
services: []
|
||||||
before_script:
|
before_script:
|
||||||
- apt-get update && apt-get install -y python3-pip
|
|
||||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
|
||||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
|
||||||
- python -m pip install -r tests/requirements.txt
|
|
||||||
- ./tests/scripts/vagrant_clean.sh
|
- ./tests/scripts/vagrant_clean.sh
|
||||||
script:
|
script:
|
||||||
- ./tests/scripts/testcases_run.sh
|
- ./tests/scripts/testcases_run.sh
|
||||||
after_script:
|
after_script:
|
||||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||||
allow_failure: true
|
|
||||||
|
|
||||||
vagrant_ubuntu18-calico-dual-stack:
|
vagrant_ubuntu20-calico-dual-stack:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: on_success
|
when: manual
|
||||||
|
# FIXME: this test if broken (perma-failing)
|
||||||
|
|
||||||
vagrant_ubuntu18-flannel:
|
vagrant_ubuntu20-weave-medium:
|
||||||
stage: deploy-part2
|
|
||||||
extends: .vagrant
|
|
||||||
when: on_success
|
|
||||||
|
|
||||||
vagrant_ubuntu18-weave-medium:
|
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: manual
|
when: manual
|
||||||
@@ -50,21 +41,22 @@ vagrant_ubuntu20-flannel-collection:
|
|||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
vagrant_ubuntu16-kube-router-sep:
|
vagrant_ubuntu20-kube-router-sep:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
# Service proxy test fails connectivity testing
|
# Service proxy test fails connectivity testing
|
||||||
vagrant_ubuntu16-kube-router-svc-proxy:
|
vagrant_ubuntu20-kube-router-svc-proxy:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
vagrant_fedora35-kube-router:
|
vagrant_fedora37-kube-router:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: on_success
|
when: manual
|
||||||
|
# FIXME: this test if broken (perma-failing)
|
||||||
|
|
||||||
vagrant_centos7-kube-router:
|
vagrant_centos7-kube-router:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
|
|||||||
@@ -1,3 +0,0 @@
|
|||||||
---
|
|
||||||
MD013: false
|
|
||||||
MD029: false
|
|
||||||
4
.md_style.rb
Normal file
4
.md_style.rb
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
all
|
||||||
|
exclude_rule 'MD013'
|
||||||
|
exclude_rule 'MD029'
|
||||||
|
rule 'MD007', :indent => 2
|
||||||
@@ -1,8 +1,7 @@
|
|||||||
---
|
---
|
||||||
repos:
|
repos:
|
||||||
|
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
rev: v3.4.0
|
rev: v4.6.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: check-added-large-files
|
- id: check-added-large-files
|
||||||
- id: check-case-conflict
|
- id: check-case-conflict
|
||||||
@@ -16,47 +15,59 @@ repos:
|
|||||||
- id: trailing-whitespace
|
- id: trailing-whitespace
|
||||||
|
|
||||||
- repo: https://github.com/adrienverge/yamllint.git
|
- repo: https://github.com/adrienverge/yamllint.git
|
||||||
rev: v1.27.1
|
rev: v1.35.1
|
||||||
hooks:
|
hooks:
|
||||||
- id: yamllint
|
- id: yamllint
|
||||||
args: [--strict]
|
args: [--strict]
|
||||||
|
|
||||||
- repo: https://github.com/markdownlint/markdownlint
|
- repo: https://github.com/markdownlint/markdownlint
|
||||||
rev: v0.11.0
|
rev: v0.12.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: markdownlint
|
- id: markdownlint
|
||||||
args: [ -r, "~MD013,~MD029" ]
|
exclude: "^.github|(^docs/_sidebar\\.md$)"
|
||||||
exclude: "^.git"
|
|
||||||
|
|
||||||
- repo: https://github.com/jumanjihouse/pre-commit-hooks
|
- repo: https://github.com/shellcheck-py/shellcheck-py
|
||||||
rev: 3.0.0
|
rev: v0.10.0.1
|
||||||
hooks:
|
hooks:
|
||||||
- id: shellcheck
|
- id: shellcheck
|
||||||
args: [ --severity, "error" ]
|
args: ["--severity=error"]
|
||||||
exclude: "^.git"
|
exclude: "^.git"
|
||||||
files: "\\.sh$"
|
files: "\\.sh$"
|
||||||
|
|
||||||
- repo: local
|
- repo: https://github.com/ansible/ansible-lint
|
||||||
|
rev: v24.5.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: ansible-lint
|
- id: ansible-lint
|
||||||
name: ansible-lint
|
|
||||||
entry: ansible-lint -v
|
|
||||||
language: python
|
|
||||||
pass_filenames: false
|
|
||||||
additional_dependencies:
|
additional_dependencies:
|
||||||
- .[community]
|
- ansible==9.5.1
|
||||||
|
- jsonschema==4.22.0
|
||||||
|
- jmespath==1.0.1
|
||||||
|
- netaddr==1.2.1
|
||||||
|
|
||||||
|
- repo: https://github.com/VannTen/misspell
|
||||||
|
# Waiting on https://github.com/golangci/misspell/pull/19 to get merged
|
||||||
|
rev: 8592a4e
|
||||||
|
hooks:
|
||||||
|
- id: misspell
|
||||||
|
exclude: "OWNERS_ALIASES$"
|
||||||
|
|
||||||
|
- repo: local
|
||||||
|
hooks:
|
||||||
- id: ansible-syntax-check
|
- id: ansible-syntax-check
|
||||||
name: ansible-syntax-check
|
name: ansible-syntax-check
|
||||||
entry: env ANSIBLE_INVENTORY=inventory/local-tests.cfg ANSIBLE_REMOTE_USER=root ANSIBLE_BECOME="true" ANSIBLE_BECOME_USER=root ANSIBLE_VERBOSITY="3" ansible-playbook --syntax-check
|
entry: env ANSIBLE_INVENTORY=inventory/local-tests.cfg ANSIBLE_REMOTE_USER=root ANSIBLE_BECOME="true" ANSIBLE_BECOME_USER=root ANSIBLE_VERBOSITY="3" ansible-playbook --syntax-check
|
||||||
language: python
|
language: python
|
||||||
files: "^cluster.yml|^upgrade-cluster.yml|^reset.yml|^extra_playbooks/upgrade-only-k8s.yml"
|
files: "^cluster.yml|^upgrade-cluster.yml|^reset.yml|^extra_playbooks/upgrade-only-k8s.yml"
|
||||||
|
additional_dependencies:
|
||||||
|
- ansible==9.5.1
|
||||||
|
|
||||||
- id: tox-inventory-builder
|
- id: tox-inventory-builder
|
||||||
name: tox-inventory-builder
|
name: tox-inventory-builder
|
||||||
entry: bash -c "cd contrib/inventory_builder && tox"
|
entry: bash -c "cd contrib/inventory_builder && tox"
|
||||||
language: python
|
language: python
|
||||||
pass_filenames: false
|
pass_filenames: false
|
||||||
|
additional_dependencies:
|
||||||
|
- tox==4.15.0
|
||||||
|
|
||||||
- id: check-readme-versions
|
- id: check-readme-versions
|
||||||
name: check-readme-versions
|
name: check-readme-versions
|
||||||
@@ -64,8 +75,35 @@ repos:
|
|||||||
language: script
|
language: script
|
||||||
pass_filenames: false
|
pass_filenames: false
|
||||||
|
|
||||||
- id: ci-matrix
|
- id: collection-build-install
|
||||||
name: ci-matrix
|
name: Build and install kubernetes-sigs.kubespray Ansible collection
|
||||||
entry: tests/scripts/md-table/test.sh
|
language: python
|
||||||
|
additional_dependencies:
|
||||||
|
- ansible-core>=2.16.4
|
||||||
|
entry: tests/scripts/collection-build-install.sh
|
||||||
|
pass_filenames: false
|
||||||
|
|
||||||
|
- id: generate-docs-sidebar
|
||||||
|
name: generate-docs-sidebar
|
||||||
|
entry: scripts/gen_docs_sidebar.sh
|
||||||
language: script
|
language: script
|
||||||
pass_filenames: false
|
pass_filenames: false
|
||||||
|
|
||||||
|
- id: ci-matrix
|
||||||
|
name: ci-matrix
|
||||||
|
entry: tests/scripts/md-table/main.py
|
||||||
|
language: python
|
||||||
|
pass_filenames: false
|
||||||
|
additional_dependencies:
|
||||||
|
- jinja2
|
||||||
|
- pathlib
|
||||||
|
- pyaml
|
||||||
|
|
||||||
|
- id: jinja-syntax-check
|
||||||
|
name: jinja-syntax-check
|
||||||
|
entry: tests/scripts/check-templates.py
|
||||||
|
language: python
|
||||||
|
types:
|
||||||
|
- jinja
|
||||||
|
additional_dependencies:
|
||||||
|
- jinja2
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ extends: default
|
|||||||
|
|
||||||
ignore: |
|
ignore: |
|
||||||
.git/
|
.git/
|
||||||
|
.github/
|
||||||
# Generated file
|
# Generated file
|
||||||
tests/files/custom_cni/cilium.yaml
|
tests/files/custom_cni/cilium.yaml
|
||||||
|
|
||||||
|
|||||||
1
CHANGELOG.md
Normal file
1
CHANGELOG.md
Normal file
@@ -0,0 +1 @@
|
|||||||
|
# See our release notes on [GitHub](https://github.com/kubernetes-sigs/kubespray/releases)
|
||||||
@@ -12,6 +12,7 @@ To install development dependencies you can set up a python virtual env with the
|
|||||||
virtualenv venv
|
virtualenv venv
|
||||||
source venv/bin/activate
|
source venv/bin/activate
|
||||||
pip install -r tests/requirements.txt
|
pip install -r tests/requirements.txt
|
||||||
|
ansible-galaxy install -r tests/requirements.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Linting
|
#### Linting
|
||||||
|
|||||||
67
Dockerfile
67
Dockerfile
@@ -1,41 +1,52 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
# Use imutable image tags rather than mutable tags (like ubuntu:22.04)
|
# Use imutable image tags rather than mutable tags (like ubuntu:22.04)
|
||||||
FROM ubuntu:jammy-20230308
|
FROM ubuntu:22.04@sha256:149d67e29f765f4db62aa52161009e99e389544e25a8f43c8c89d4a445a7ca37
|
||||||
|
|
||||||
# Some tools like yamllint need this
|
# Some tools like yamllint need this
|
||||||
# Pip needs this as well at the moment to install ansible
|
# Pip needs this as well at the moment to install ansible
|
||||||
# (and potentially other packages)
|
# (and potentially other packages)
|
||||||
# See: https://github.com/pypa/pip/issues/10219
|
# See: https://github.com/pypa/pip/issues/10219
|
||||||
ENV LANG=C.UTF-8 \
|
ENV LANG=C.UTF-8 \
|
||||||
DEBIAN_FRONTEND=noninteractive \
|
DEBIAN_FRONTEND=noninteractive \
|
||||||
PYTHONDONTWRITEBYTECODE=1
|
PYTHONDONTWRITEBYTECODE=1
|
||||||
|
|
||||||
WORKDIR /kubespray
|
WORKDIR /kubespray
|
||||||
COPY *yml .
|
|
||||||
|
# hadolint ignore=DL3008
|
||||||
|
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||||
|
apt-get update -q \
|
||||||
|
&& apt-get install -yq --no-install-recommends \
|
||||||
|
curl \
|
||||||
|
python3 \
|
||||||
|
python3-pip \
|
||||||
|
sshpass \
|
||||||
|
vim \
|
||||||
|
rsync \
|
||||||
|
openssh-client \
|
||||||
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /var/log/*
|
||||||
|
|
||||||
|
RUN --mount=type=bind,source=requirements.txt,target=requirements.txt \
|
||||||
|
--mount=type=cache,sharing=locked,id=pipcache,mode=0777,target=/root/.cache/pip \
|
||||||
|
pip install --no-compile --no-cache-dir -r requirements.txt \
|
||||||
|
&& find /usr -type d -name '*__pycache__' -prune -exec rm -rf {} \;
|
||||||
|
|
||||||
|
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||||
|
|
||||||
|
RUN --mount=type=bind,source=roles/kubespray-defaults/defaults/main/main.yml,target=roles/kubespray-defaults/defaults/main/main.yml \
|
||||||
|
KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main/main.yml) \
|
||||||
|
OS_ARCHITECTURE=$(dpkg --print-architecture) \
|
||||||
|
&& curl -L "https://dl.k8s.io/release/${KUBE_VERSION}/bin/linux/${OS_ARCHITECTURE}/kubectl" -o /usr/local/bin/kubectl \
|
||||||
|
&& echo "$(curl -L "https://dl.k8s.io/release/${KUBE_VERSION}/bin/linux/${OS_ARCHITECTURE}/kubectl.sha256")" /usr/local/bin/kubectl | sha256sum --check \
|
||||||
|
&& chmod a+x /usr/local/bin/kubectl
|
||||||
|
|
||||||
|
COPY *.yml ./
|
||||||
|
COPY *.cfg ./
|
||||||
COPY roles ./roles
|
COPY roles ./roles
|
||||||
COPY contrib ./contrib
|
COPY contrib ./contrib
|
||||||
COPY inventory ./inventory
|
COPY inventory ./inventory
|
||||||
COPY library ./library
|
COPY library ./library
|
||||||
COPY extra_playbooks ./extra_playbooks
|
COPY extra_playbooks ./extra_playbooks
|
||||||
|
COPY playbooks ./playbooks
|
||||||
RUN apt update -q \
|
COPY plugins ./plugins
|
||||||
&& apt install -yq --no-install-recommends \
|
|
||||||
curl \
|
|
||||||
python3 \
|
|
||||||
python3-pip \
|
|
||||||
sshpass \
|
|
||||||
vim \
|
|
||||||
rsync \
|
|
||||||
openssh-client \
|
|
||||||
&& pip install --no-compile --no-cache-dir \
|
|
||||||
ansible==5.7.1 \
|
|
||||||
ansible-core==2.12.5 \
|
|
||||||
cryptography==3.4.8 \
|
|
||||||
jinja2==3.1.2 \
|
|
||||||
netaddr==0.8.0 \
|
|
||||||
jmespath==1.0.1 \
|
|
||||||
MarkupSafe==2.1.2 \
|
|
||||||
ruamel.yaml==0.17.21 \
|
|
||||||
&& KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
|
||||||
&& curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
|
|
||||||
&& echo $(curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \
|
|
||||||
&& chmod a+x /usr/local/bin/kubectl \
|
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/log/* \
|
|
||||||
&& find / -type d -name '*__pycache__' -prune -exec rm -rf {} \;
|
|
||||||
|
|||||||
@@ -1,30 +1,24 @@
|
|||||||
aliases:
|
aliases:
|
||||||
kubespray-approvers:
|
kubespray-approvers:
|
||||||
- mattymo
|
- cristicalin
|
||||||
- chadswen
|
|
||||||
- mirwan
|
|
||||||
- miouge1
|
|
||||||
- luckysb
|
|
||||||
- floryut
|
- floryut
|
||||||
- oomichi
|
|
||||||
- cristicalin
|
|
||||||
- liupeng0518
|
- liupeng0518
|
||||||
- yankay
|
|
||||||
- mzaian
|
- mzaian
|
||||||
|
- oomichi
|
||||||
|
- yankay
|
||||||
kubespray-reviewers:
|
kubespray-reviewers:
|
||||||
- holmsten
|
|
||||||
- bozzo
|
|
||||||
- eppo
|
|
||||||
- oomichi
|
|
||||||
- jayonlau
|
|
||||||
- cristicalin
|
|
||||||
- liupeng0518
|
|
||||||
- yankay
|
|
||||||
- cyclinder
|
- cyclinder
|
||||||
- mzaian
|
- erikjiang
|
||||||
- mrfreezeex
|
- mrfreezeex
|
||||||
|
- mzaian
|
||||||
|
- vannten
|
||||||
|
- yankay
|
||||||
kubespray-emeritus_approvers:
|
kubespray-emeritus_approvers:
|
||||||
- riverzhang
|
|
||||||
- atoms
|
|
||||||
- ant31
|
- ant31
|
||||||
|
- atoms
|
||||||
|
- chadswen
|
||||||
|
- luckysb
|
||||||
|
- mattymo
|
||||||
|
- miouge1
|
||||||
|
- riverzhang
|
||||||
- woopstar
|
- woopstar
|
||||||
|
|||||||
163
README.md
163
README.md
@@ -5,7 +5,7 @@
|
|||||||
If you have questions, check the documentation at [kubespray.io](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
If you have questions, check the documentation at [kubespray.io](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
||||||
You can get your invite [here](http://slack.k8s.io/)
|
You can get your invite [here](http://slack.k8s.io/)
|
||||||
|
|
||||||
- Can be deployed on **[AWS](docs/aws.md), GCE, [Azure](docs/azure.md), [OpenStack](docs/openstack.md), [vSphere](docs/vsphere.md), [Equinix Metal](docs/equinix-metal.md) (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
- Can be deployed on **[AWS](docs/cloud_providers/aws.md), GCE, [Azure](docs/cloud_providers/azure.md), [OpenStack](docs/cloud_providers/openstack.md), [vSphere](docs/cloud_providers/vsphere.md), [Equinix Metal](docs/cloud_providers/equinix-metal.md) (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
||||||
- **Highly available** cluster
|
- **Highly available** cluster
|
||||||
- **Composable** (Choice of the network plugin for instance)
|
- **Composable** (Choice of the network plugin for instance)
|
||||||
- Supports most popular **Linux distributions**
|
- Supports most popular **Linux distributions**
|
||||||
@@ -19,7 +19,7 @@ Below are several ways to use Kubespray to deploy a Kubernetes cluster.
|
|||||||
|
|
||||||
#### Usage
|
#### Usage
|
||||||
|
|
||||||
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
|
Install Ansible according to [Ansible installation guide](/docs/ansible/ansible.md#installing-ansible)
|
||||||
then run the following steps:
|
then run the following steps:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
@@ -34,7 +34,7 @@ CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inv
|
|||||||
cat inventory/mycluster/group_vars/all/all.yml
|
cat inventory/mycluster/group_vars/all/all.yml
|
||||||
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
||||||
|
|
||||||
# Clean up old Kubernete cluster with Ansible Playbook - run the playbook as root
|
# Clean up old Kubernetes cluster with Ansible Playbook - run the playbook as root
|
||||||
# The option `--become` is required, as for example cleaning up SSL keys in /etc/,
|
# The option `--become` is required, as for example cleaning up SSL keys in /etc/,
|
||||||
# uninstalling old packages and interacting with various systemd daemons.
|
# uninstalling old packages and interacting with various systemd daemons.
|
||||||
# Without --become the playbook will fail to run!
|
# Without --become the playbook will fail to run!
|
||||||
@@ -75,18 +75,18 @@ You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mou
|
|||||||
to access the inventory and SSH key in the container, like this:
|
to access the inventory and SSH key in the container, like this:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
git checkout v2.21.0
|
git checkout v2.25.0
|
||||||
docker pull quay.io/kubespray/kubespray:v2.21.0
|
docker pull quay.io/kubespray/kubespray:v2.25.0
|
||||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||||
quay.io/kubespray/kubespray:v2.21.0 bash
|
quay.io/kubespray/kubespray:v2.25.0 bash
|
||||||
# Inside the container you may now run the kubespray playbooks:
|
# Inside the container you may now run the kubespray playbooks:
|
||||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Collection
|
#### Collection
|
||||||
|
|
||||||
See [here](docs/ansible_collection.md) if you wish to use this repository as an Ansible collection
|
See [here](docs/ansible/ansible_collection.md) if you wish to use this repository as an Ansible collection
|
||||||
|
|
||||||
### Vagrant
|
### Vagrant
|
||||||
|
|
||||||
@@ -99,7 +99,7 @@ python -V && pip -V
|
|||||||
|
|
||||||
If this returns the version of the software, you're good to go. If not, download and install Python from here <https://www.python.org/downloads/source/>
|
If this returns the version of the software, you're good to go. If not, download and install Python from here <https://www.python.org/downloads/source/>
|
||||||
|
|
||||||
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
|
Install Ansible according to [Ansible installation guide](/docs/ansible/ansible.md#installing-ansible)
|
||||||
then run the following step:
|
then run the following step:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
@@ -109,80 +109,79 @@ vagrant up
|
|||||||
## Documents
|
## Documents
|
||||||
|
|
||||||
- [Requirements](#requirements)
|
- [Requirements](#requirements)
|
||||||
- [Kubespray vs ...](docs/comparisons.md)
|
- [Kubespray vs ...](docs/getting_started/comparisons.md)
|
||||||
- [Getting started](docs/getting-started.md)
|
- [Getting started](docs/getting_started/getting-started.md)
|
||||||
- [Setting up your first cluster](docs/setting-up-your-first-cluster.md)
|
- [Setting up your first cluster](docs/getting_started/setting-up-your-first-cluster.md)
|
||||||
- [Ansible inventory and tags](docs/ansible.md)
|
- [Ansible inventory and tags](docs/ansible/ansible.md)
|
||||||
- [Integration with existing ansible repo](docs/integration.md)
|
- [Integration with existing ansible repo](docs/operations/integration.md)
|
||||||
- [Deployment data variables](docs/vars.md)
|
- [Deployment data variables](docs/ansible/vars.md)
|
||||||
- [DNS stack](docs/dns-stack.md)
|
- [DNS stack](docs/advanced/dns-stack.md)
|
||||||
- [HA mode](docs/ha-mode.md)
|
- [HA mode](docs/operations/ha-mode.md)
|
||||||
- [Network plugins](#network-plugins)
|
- [Network plugins](#network-plugins)
|
||||||
- [Vagrant install](docs/vagrant.md)
|
- [Vagrant install](docs/developers/vagrant.md)
|
||||||
- [Flatcar Container Linux bootstrap](docs/flatcar.md)
|
- [Flatcar Container Linux bootstrap](docs/operating_systems/flatcar.md)
|
||||||
- [Fedora CoreOS bootstrap](docs/fcos.md)
|
- [Fedora CoreOS bootstrap](docs/operating_systems/fcos.md)
|
||||||
- [Debian Jessie setup](docs/debian.md)
|
- [openSUSE setup](docs/operating_systems/opensuse.md)
|
||||||
- [openSUSE setup](docs/opensuse.md)
|
- [Downloaded artifacts](docs/advanced/downloads.md)
|
||||||
- [Downloaded artifacts](docs/downloads.md)
|
- [Cloud providers](docs/cloud_providers/cloud.md)
|
||||||
- [Cloud providers](docs/cloud.md)
|
- [OpenStack](docs/cloud_providers/openstack.md)
|
||||||
- [OpenStack](docs/openstack.md)
|
- [AWS](docs/cloud_providers/aws.md)
|
||||||
- [AWS](docs/aws.md)
|
- [Azure](docs/cloud_providers/azure.md)
|
||||||
- [Azure](docs/azure.md)
|
- [vSphere](docs/cloud_providers/vsphere.md)
|
||||||
- [vSphere](docs/vsphere.md)
|
- [Equinix Metal](docs/cloud_providers/equinix-metal.md)
|
||||||
- [Equinix Metal](docs/equinix-metal.md)
|
- [Large deployments](docs/operations/large-deployments.md)
|
||||||
- [Large deployments](docs/large-deployments.md)
|
- [Adding/replacing a node](docs/operations/nodes.md)
|
||||||
- [Adding/replacing a node](docs/nodes.md)
|
- [Upgrades basics](docs/operations/upgrades.md)
|
||||||
- [Upgrades basics](docs/upgrades.md)
|
- [Air-Gap installation](docs/operations/offline-environment.md)
|
||||||
- [Air-Gap installation](docs/offline-environment.md)
|
- [NTP](docs/advanced/ntp.md)
|
||||||
- [NTP](docs/ntp.md)
|
- [Hardening](docs/operations/hardening.md)
|
||||||
- [Hardening](docs/hardening.md)
|
- [Mirror](docs/operations/mirror.md)
|
||||||
- [Mirror](docs/mirror.md)
|
- [Roadmap](docs/roadmap/roadmap.md)
|
||||||
- [Roadmap](docs/roadmap.md)
|
|
||||||
|
|
||||||
## Supported Linux Distributions
|
## Supported Linux Distributions
|
||||||
|
|
||||||
- **Flatcar Container Linux by Kinvolk**
|
- **Flatcar Container Linux by Kinvolk**
|
||||||
- **Debian** Bullseye, Buster
|
- **Debian** Bookworm, Bullseye, Buster
|
||||||
- **Ubuntu** 16.04, 18.04, 20.04, 22.04
|
- **Ubuntu** 20.04, 22.04, 24.04
|
||||||
- **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8)
|
- **CentOS/RHEL** 7, [8, 9](docs/operating_systems/centos.md#centos-8)
|
||||||
- **Fedora** 35, 36
|
- **Fedora** 37, 38
|
||||||
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
- **Fedora CoreOS** (see [fcos Note](docs/operating_systems/fcos.md))
|
||||||
- **openSUSE** Leap 15.x/Tumbleweed
|
- **openSUSE** Leap 15.x/Tumbleweed
|
||||||
- **Oracle Linux** 7, [8, 9](docs/centos.md#centos-8)
|
- **Oracle Linux** 7, [8, 9](docs/operating_systems/centos.md#centos-8)
|
||||||
- **Alma Linux** [8, 9](docs/centos.md#centos-8)
|
- **Alma Linux** [8, 9](docs/operating_systems/centos.md#centos-8)
|
||||||
- **Rocky Linux** [8, 9](docs/centos.md#centos-8)
|
- **Rocky Linux** [8, 9](docs/operating_systems/centos.md#centos-8)
|
||||||
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md))
|
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/operating_systems/kylinlinux.md))
|
||||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
|
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/operating_systems/amazonlinux.md))
|
||||||
- **UOS Linux** (experimental: see [uos linux notes](docs/uoslinux.md))
|
- **UOS Linux** (experimental: see [uos linux notes](docs/operating_systems/uoslinux.md))
|
||||||
- **openEuler** (experimental: see [openEuler notes](docs/openeuler.md))
|
- **openEuler** (experimental: see [openEuler notes](docs/operating_systems/openeuler.md))
|
||||||
|
|
||||||
Note: Upstart/SysV init based OS types are not supported.
|
Note: Upstart/SysV init based OS types are not supported.
|
||||||
|
|
||||||
## Supported Components
|
## Supported Components
|
||||||
|
|
||||||
- Core
|
- Core
|
||||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.26.5
|
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.29.5
|
||||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.6
|
- [etcd](https://github.com/etcd-io/etcd) v3.5.12
|
||||||
- [docker](https://www.docker.com/) v20.10 (see note)
|
- [docker](https://www.docker.com/) v26.1
|
||||||
- [containerd](https://containerd.io/) v1.7.1
|
- [containerd](https://containerd.io/) v1.7.16
|
||||||
- [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
- [cri-o](http://cri-o.io/) v1.29.1 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||||
- Network Plugin
|
- Network Plugin
|
||||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.2.0
|
- [cni-plugins](https://github.com/containernetworking/plugins) v1.2.0
|
||||||
- [calico](https://github.com/projectcalico/calico) v3.25.1
|
- [calico](https://github.com/projectcalico/calico) v3.27.3
|
||||||
- [cilium](https://github.com/cilium/cilium) v1.13.0
|
- [cilium](https://github.com/cilium/cilium) v1.15.4
|
||||||
- [flannel](https://github.com/flannel-io/flannel) v0.21.4
|
- [flannel](https://github.com/flannel-io/flannel) v0.22.0
|
||||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.10.7
|
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.11.5
|
||||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
|
- [kube-router](https://github.com/cloudnativelabs/kube-router) v2.0.0
|
||||||
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) v3.8
|
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) v3.8
|
||||||
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
||||||
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.5.12
|
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.8.0
|
||||||
- Application
|
- Application
|
||||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.11.1
|
- [cert-manager](https://github.com/jetstack/cert-manager) v1.13.2
|
||||||
- [coredns](https://github.com/coredns/coredns) v1.9.3
|
- [coredns](https://github.com/coredns/coredns) v1.11.1
|
||||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.7.1
|
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.10.1
|
||||||
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.3
|
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.4
|
||||||
- [argocd](https://argoproj.github.io/) v2.7.2
|
- [argocd](https://argoproj.github.io/) v2.11.0
|
||||||
- [helm](https://helm.sh/) v3.12.0
|
- [helm](https://helm.sh/) v3.14.2
|
||||||
- [metallb](https://metallb.universe.tf/) v0.13.9
|
- [metallb](https://metallb.universe.tf/) v0.13.9
|
||||||
- [registry](https://github.com/distribution/distribution) v2.8.1
|
- [registry](https://github.com/distribution/distribution) v2.8.1
|
||||||
- Storage Plugin
|
- Storage Plugin
|
||||||
@@ -190,21 +189,21 @@ Note: Upstart/SysV init based OS types are not supported.
|
|||||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||||
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
|
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
|
||||||
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
|
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
|
||||||
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0
|
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.29.0
|
||||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.4.0
|
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.9.2
|
||||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.23
|
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.24
|
||||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
|
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
|
||||||
|
- [node-feature-discovery](https://github.com/kubernetes-sigs/node-feature-discovery) v0.14.2
|
||||||
|
|
||||||
## Container Runtime Notes
|
## Container Runtime Notes
|
||||||
|
|
||||||
- Supported Docker versions are 18.09, 19.03 and 20.10. The *recommended* Docker version is 20.10. `Kubelet` might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. the YUM ``versionlock`` plugin or ``apt pin``).
|
|
||||||
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
|
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
- **Minimum required version of Kubernetes is v1.24**
|
- **Minimum required version of Kubernetes is v1.28**
|
||||||
- **Ansible v2.11+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
- **Ansible v2.14+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/operations/offline-environment.md))
|
||||||
- The target servers are configured to allow **IPv4 forwarding**.
|
- The target servers are configured to allow **IPv4 forwarding**.
|
||||||
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
|
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
|
||||||
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||||
@@ -225,41 +224,41 @@ These limits are safeguarded by Kubespray. Actual requirements for your workload
|
|||||||
|
|
||||||
You can choose among ten network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
You can choose among ten network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
||||||
|
|
||||||
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
- [flannel](docs/CNI/flannel.md): gre/vxlan (layer 2) networking.
|
||||||
|
|
||||||
- [Calico](https://docs.projectcalico.org/latest/introduction/) is a networking and network policy provider. Calico supports a flexible set of networking options
|
- [Calico](https://docs.tigera.io/calico/latest/about/) is a networking and network policy provider. Calico supports a flexible set of networking options
|
||||||
designed to give you the most efficient networking across a range of situations, including non-overlay
|
designed to give you the most efficient networking across a range of situations, including non-overlay
|
||||||
and overlay networks, with or without BGP. Calico uses the same engine to enforce network policy for hosts,
|
and overlay networks, with or without BGP. Calico uses the same engine to enforce network policy for hosts,
|
||||||
pods, and (if using Istio and Envoy) applications at the service mesh layer.
|
pods, and (if using Istio and Envoy) applications at the service mesh layer.
|
||||||
|
|
||||||
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
||||||
|
|
||||||
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
- [weave](docs/CNI/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
||||||
(Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)).
|
(Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)).
|
||||||
|
|
||||||
- [kube-ovn](docs/kube-ovn.md): Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
|
- [kube-ovn](docs/CNI/kube-ovn.md): Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
|
||||||
|
|
||||||
- [kube-router](docs/kube-router.md): Kube-router is a L3 CNI for Kubernetes networking aiming to provide operational
|
- [kube-router](docs/CNI/kube-router.md): Kube-router is a L3 CNI for Kubernetes networking aiming to provide operational
|
||||||
simplicity and high performance: it uses IPVS to provide Kube Services Proxy (if setup to replace kube-proxy),
|
simplicity and high performance: it uses IPVS to provide Kube Services Proxy (if setup to replace kube-proxy),
|
||||||
iptables for network policies, and BGP for ods L3 networking (with optionally BGP peering with out-of-cluster BGP peers).
|
iptables for network policies, and BGP for ods L3 networking (with optionally BGP peering with out-of-cluster BGP peers).
|
||||||
It can also optionally advertise routes to Kubernetes cluster Pods CIDRs, ClusterIPs, ExternalIPs and LoadBalancerIPs.
|
It can also optionally advertise routes to Kubernetes cluster Pods CIDRs, ClusterIPs, ExternalIPs and LoadBalancerIPs.
|
||||||
|
|
||||||
- [macvlan](docs/macvlan.md): Macvlan is a Linux network driver. Pods have their own unique Mac and Ip address, connected directly the physical (layer 2) network.
|
- [macvlan](docs/CNI/macvlan.md): Macvlan is a Linux network driver. Pods have their own unique Mac and Ip address, connected directly the physical (layer 2) network.
|
||||||
|
|
||||||
- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
|
- [multus](docs/CNI/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
|
||||||
|
|
||||||
- [custom_cni](roles/network-plugin/custom_cni/) : You can specify some manifests that will be applied to the clusters to bring you own CNI and use non-supported ones by Kubespray.
|
- [custom_cni](roles/network-plugin/custom_cni/) : You can specify some manifests that will be applied to the clusters to bring you own CNI and use non-supported ones by Kubespray.
|
||||||
See `tests/files/custom_cni/README.md` and `tests/files/custom_cni/values.yaml`for an example with a CNI provided by a Helm Chart.
|
See `tests/files/custom_cni/README.md` and `tests/files/custom_cni/values.yaml`for an example with a CNI provided by a Helm Chart.
|
||||||
|
|
||||||
The network plugin to use is defined by the variable `kube_network_plugin`. There is also an
|
The network plugin to use is defined by the variable `kube_network_plugin`. There is also an
|
||||||
option to leverage built-in cloud provider networking instead.
|
option to leverage built-in cloud provider networking instead.
|
||||||
See also [Network checker](docs/netcheck.md).
|
See also [Network checker](docs/advanced/netcheck.md).
|
||||||
|
|
||||||
## Ingress Plugins
|
## Ingress Plugins
|
||||||
|
|
||||||
- [nginx](https://kubernetes.github.io/ingress-nginx): the NGINX Ingress Controller.
|
- [nginx](https://kubernetes.github.io/ingress-nginx): the NGINX Ingress Controller.
|
||||||
|
|
||||||
- [metallb](docs/metallb.md): the MetalLB bare-metal service LoadBalancer provider.
|
- [metallb](docs/ingress/metallb.md): the MetalLB bare-metal service LoadBalancer provider.
|
||||||
|
|
||||||
## Community docs and resources
|
## Community docs and resources
|
||||||
|
|
||||||
@@ -280,4 +279,4 @@ See also [Network checker](docs/netcheck.md).
|
|||||||
|
|
||||||
CI/end-to-end tests sponsored by: [CNCF](https://cncf.io), [Equinix Metal](https://metal.equinix.com/), [OVHcloud](https://www.ovhcloud.com/), [ELASTX](https://elastx.se/).
|
CI/end-to-end tests sponsored by: [CNCF](https://cncf.io), [Equinix Metal](https://metal.equinix.com/), [OVHcloud](https://www.ovhcloud.com/), [ELASTX](https://elastx.se/).
|
||||||
|
|
||||||
See the [test matrix](docs/test_cases.md) for details.
|
See the [test matrix](docs/developers/test_cases.md) for details.
|
||||||
|
|||||||
24
RELEASE.md
24
RELEASE.md
@@ -3,17 +3,19 @@
|
|||||||
The Kubespray Project is released on an as-needed basis. The process is as follows:
|
The Kubespray Project is released on an as-needed basis. The process is as follows:
|
||||||
|
|
||||||
1. An issue is proposing a new release with a changelog since the last release. Please see [a good sample issue](https://github.com/kubernetes-sigs/kubespray/issues/8325)
|
1. An issue is proposing a new release with a changelog since the last release. Please see [a good sample issue](https://github.com/kubernetes-sigs/kubespray/issues/8325)
|
||||||
2. At least one of the [approvers](OWNERS_ALIASES) must approve this release
|
1. At least one of the [approvers](OWNERS_ALIASES) must approve this release
|
||||||
3. The `kube_version_min_required` variable is set to `n-1`
|
1. (Only for major releases) The `kube_version_min_required` variable is set to `n-1`
|
||||||
4. Remove hashes for [EOL versions](https://github.com/kubernetes/website/blob/main/content/en/releases/patch-releases.md) of kubernetes from `*_checksums` variables.
|
1. (Only for major releases) Remove hashes for [EOL versions](https://github.com/kubernetes/website/blob/main/content/en/releases/patch-releases.md) of kubernetes from `*_checksums` variables.
|
||||||
5. Create the release note with [Kubernetes Release Notes Generator](https://github.com/kubernetes/release/blob/master/cmd/release-notes/README.md). See the following `Release note creation` section for the details.
|
1. Create the release note with [Kubernetes Release Notes Generator](https://github.com/kubernetes/release/blob/master/cmd/release-notes/README.md). See the following `Release note creation` section for the details.
|
||||||
6. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes
|
1. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes
|
||||||
7. An approver creates a release branch in the form `release-X.Y`
|
1. (Only for major releases) An approver creates a release branch in the form `release-X.Y`
|
||||||
8. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) container images are built and tagged. See the following `Container image creation` section for the details.
|
1. (For major releases) On the `master` branch: bump the version in `galaxy.yml` to the next expected major release (X.y.0 with y = Y + 1), make a Pull Request.
|
||||||
9. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml`
|
1. (For minor releases) On the `release-X.Y` branch: bump the version in `galaxy.yml` to the next expected minor release (X.Y.z with z = Z + 1), make a Pull Request.
|
||||||
10. The release issue is closed
|
1. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) container images are built and tagged. See the following `Container image creation` section for the details.
|
||||||
11. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
1. (Only for major releases) The `KUBESPRAY_VERSION` in `.gitlab-ci.yml` is upgraded to the version we just released # TODO clarify this, this variable is for testing upgrades.
|
||||||
12. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
1. The release issue is closed
|
||||||
|
1. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||||
|
1. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||||
|
|
||||||
## Major/minor releases and milestones
|
## Major/minor releases and milestones
|
||||||
|
|
||||||
|
|||||||
58
Vagrantfile
vendored
58
Vagrantfile
vendored
@@ -10,7 +10,6 @@ Vagrant.require_version ">= 2.0.0"
|
|||||||
CONFIG = File.join(File.dirname(__FILE__), ENV['KUBESPRAY_VAGRANT_CONFIG'] || 'vagrant/config.rb')
|
CONFIG = File.join(File.dirname(__FILE__), ENV['KUBESPRAY_VAGRANT_CONFIG'] || 'vagrant/config.rb')
|
||||||
|
|
||||||
FLATCAR_URL_TEMPLATE = "https://%s.release.flatcar-linux.net/amd64-usr/current/flatcar_production_vagrant.json"
|
FLATCAR_URL_TEMPLATE = "https://%s.release.flatcar-linux.net/amd64-usr/current/flatcar_production_vagrant.json"
|
||||||
FEDORA35_MIRROR = "https://download.fedoraproject.org/pub/fedora/linux/releases/35/Cloud/x86_64/images/Fedora-Cloud-Base-Vagrant-35-1.2.x86_64.vagrant-libvirt.box"
|
|
||||||
|
|
||||||
# Uniq disk UUID for libvirt
|
# Uniq disk UUID for libvirt
|
||||||
DISK_UUID = Time.now.utc.to_i
|
DISK_UUID = Time.now.utc.to_i
|
||||||
@@ -20,24 +19,27 @@ SUPPORTED_OS = {
|
|||||||
"flatcar-beta" => {box: "flatcar-beta", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["beta"]},
|
"flatcar-beta" => {box: "flatcar-beta", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["beta"]},
|
||||||
"flatcar-alpha" => {box: "flatcar-alpha", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["alpha"]},
|
"flatcar-alpha" => {box: "flatcar-alpha", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["alpha"]},
|
||||||
"flatcar-edge" => {box: "flatcar-edge", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["edge"]},
|
"flatcar-edge" => {box: "flatcar-edge", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["edge"]},
|
||||||
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
|
|
||||||
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
|
|
||||||
"ubuntu2004" => {box: "generic/ubuntu2004", user: "vagrant"},
|
"ubuntu2004" => {box: "generic/ubuntu2004", user: "vagrant"},
|
||||||
|
"ubuntu2204" => {box: "generic/ubuntu2204", user: "vagrant"},
|
||||||
|
"ubuntu2404" => {box: "bento/ubuntu-24.04", user: "vagrant"},
|
||||||
"centos" => {box: "centos/7", user: "vagrant"},
|
"centos" => {box: "centos/7", user: "vagrant"},
|
||||||
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
||||||
"centos8" => {box: "centos/8", user: "vagrant"},
|
"centos8" => {box: "centos/8", user: "vagrant"},
|
||||||
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
||||||
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
||||||
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
||||||
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"},
|
"rockylinux8" => {box: "rockylinux/8", user: "vagrant"},
|
||||||
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant", box_url: FEDORA35_MIRROR},
|
"rockylinux9" => {box: "rockylinux/9", user: "vagrant"},
|
||||||
"fedora36" => {box: "fedora/36-cloud-base", user: "vagrant"},
|
"fedora37" => {box: "fedora/37-cloud-base", user: "vagrant"},
|
||||||
|
"fedora38" => {box: "fedora/38-cloud-base", user: "vagrant"},
|
||||||
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
|
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
|
||||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||||
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
||||||
"rhel7" => {box: "generic/rhel7", user: "vagrant"},
|
"rhel7" => {box: "generic/rhel7", user: "vagrant"},
|
||||||
"rhel8" => {box: "generic/rhel8", user: "vagrant"},
|
"rhel8" => {box: "generic/rhel8", user: "vagrant"},
|
||||||
|
"debian11" => {box: "debian/bullseye64", user: "vagrant"},
|
||||||
|
"debian12" => {box: "debian/bookworm64", user: "vagrant"},
|
||||||
}
|
}
|
||||||
|
|
||||||
if File.exist?(CONFIG)
|
if File.exist?(CONFIG)
|
||||||
@@ -54,7 +56,7 @@ $shared_folders ||= {}
|
|||||||
$forwarded_ports ||= {}
|
$forwarded_ports ||= {}
|
||||||
$subnet ||= "172.18.8"
|
$subnet ||= "172.18.8"
|
||||||
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
|
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
|
||||||
$os ||= "ubuntu1804"
|
$os ||= "ubuntu2004"
|
||||||
$network_plugin ||= "flannel"
|
$network_plugin ||= "flannel"
|
||||||
# Setting multi_networking to true will install Multus: https://github.com/k8snetworkplumbingwg/multus-cni
|
# Setting multi_networking to true will install Multus: https://github.com/k8snetworkplumbingwg/multus-cni
|
||||||
$multi_networking ||= "False"
|
$multi_networking ||= "False"
|
||||||
@@ -79,7 +81,10 @@ $libvirt_nested ||= false
|
|||||||
$ansible_verbosity ||= false
|
$ansible_verbosity ||= false
|
||||||
$ansible_tags ||= ENV['VAGRANT_ANSIBLE_TAGS'] || ""
|
$ansible_tags ||= ENV['VAGRANT_ANSIBLE_TAGS'] || ""
|
||||||
|
|
||||||
|
$vagrant_dir ||= File.join(File.dirname(__FILE__), ".vagrant")
|
||||||
|
|
||||||
$playbook ||= "cluster.yml"
|
$playbook ||= "cluster.yml"
|
||||||
|
$extra_vars ||= {}
|
||||||
|
|
||||||
host_vars = {}
|
host_vars = {}
|
||||||
|
|
||||||
@@ -98,7 +103,7 @@ $inventory = File.absolute_path($inventory, File.dirname(__FILE__))
|
|||||||
# if $inventory has a hosts.ini file use it, otherwise copy over
|
# if $inventory has a hosts.ini file use it, otherwise copy over
|
||||||
# vars etc to where vagrant expects dynamic inventory to be
|
# vars etc to where vagrant expects dynamic inventory to be
|
||||||
if ! File.exist?(File.join(File.dirname($inventory), "hosts.ini"))
|
if ! File.exist?(File.join(File.dirname($inventory), "hosts.ini"))
|
||||||
$vagrant_ansible = File.join(File.dirname(__FILE__), ".vagrant", "provisioners", "ansible")
|
$vagrant_ansible = File.join(File.absolute_path($vagrant_dir), "provisioners", "ansible")
|
||||||
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
|
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
|
||||||
$vagrant_inventory = File.join($vagrant_ansible,"inventory")
|
$vagrant_inventory = File.join($vagrant_ansible,"inventory")
|
||||||
FileUtils.rm_f($vagrant_inventory)
|
FileUtils.rm_f($vagrant_inventory)
|
||||||
@@ -184,6 +189,14 @@ Vagrant.configure("2") do |config|
|
|||||||
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "scsi"
|
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "scsi"
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
node.vm.provider :virtualbox do |vb|
|
||||||
|
# always make /dev/sd{a/b/c} so that CI can ensure that
|
||||||
|
# virtualbox and libvirt will have the same devices to use for OSDs
|
||||||
|
(1..$kube_node_instances_with_disks_number).each do |d|
|
||||||
|
vb.customize ['createhd', '--filename', "disk-#{i}-#{driverletters[d]}-#{DISK_UUID}.disk", '--size', $kube_node_instances_with_disks_size] # 10GB disk
|
||||||
|
vb.customize ['storageattach', :id, '--storagectl', 'SATA Controller', '--port', d, '--device', 0, '--type', 'hdd', '--medium', "disk-#{i}-#{driverletters[d]}-#{DISK_UUID}.disk", '--nonrotational', 'on', '--mtype', 'normal']
|
||||||
|
end
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
if $expose_docker_tcp
|
if $expose_docker_tcp
|
||||||
@@ -209,7 +222,8 @@ Vagrant.configure("2") do |config|
|
|||||||
end
|
end
|
||||||
|
|
||||||
ip = "#{$subnet}.#{i+100}"
|
ip = "#{$subnet}.#{i+100}"
|
||||||
node.vm.network :private_network, ip: ip,
|
node.vm.network :private_network,
|
||||||
|
:ip => ip,
|
||||||
:libvirt__guest_ipv6 => 'yes',
|
:libvirt__guest_ipv6 => 'yes',
|
||||||
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
|
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
|
||||||
:libvirt__ipv6_prefix => "64",
|
:libvirt__ipv6_prefix => "64",
|
||||||
@@ -219,14 +233,29 @@ Vagrant.configure("2") do |config|
|
|||||||
# Disable swap for each vm
|
# Disable swap for each vm
|
||||||
node.vm.provision "shell", inline: "swapoff -a"
|
node.vm.provision "shell", inline: "swapoff -a"
|
||||||
|
|
||||||
# ubuntu1804 and ubuntu2004 have IPv6 explicitly disabled. This undoes that.
|
# ubuntu2004 and ubuntu2204 have IPv6 explicitly disabled. This undoes that.
|
||||||
if ["ubuntu1804", "ubuntu2004"].include? $os
|
if ["ubuntu2004", "ubuntu2204"].include? $os
|
||||||
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
|
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
|
||||||
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
|
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
|
||||||
end
|
end
|
||||||
|
# Hack for fedora37/38 to get the IP address of the second interface
|
||||||
|
if ["fedora37", "fedora38"].include? $os
|
||||||
|
config.vm.provision "shell", inline: <<-SHELL
|
||||||
|
nmcli conn modify 'Wired connection 2' ipv4.addresses $(cat /etc/sysconfig/network-scripts/ifcfg-eth1 | grep IPADDR | cut -d "=" -f2)
|
||||||
|
nmcli conn modify 'Wired connection 2' ipv4.method manual
|
||||||
|
service NetworkManager restart
|
||||||
|
SHELL
|
||||||
|
end
|
||||||
|
|
||||||
|
# Rockylinux boxes needs UEFI
|
||||||
|
if ["rockylinux8", "rockylinux9"].include? $os
|
||||||
|
config.vm.provider "libvirt" do |domain|
|
||||||
|
domain.loader = "/usr/share/OVMF/x64/OVMF_CODE.fd"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
# Disable firewalld on oraclelinux/redhat vms
|
# Disable firewalld on oraclelinux/redhat vms
|
||||||
if ["oraclelinux","oraclelinux8","rhel7","rhel8"].include? $os
|
if ["oraclelinux","oraclelinux8","rhel7","rhel8","rockylinux8"].include? $os
|
||||||
node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
|
node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
|
||||||
end
|
end
|
||||||
|
|
||||||
@@ -248,7 +277,8 @@ Vagrant.configure("2") do |config|
|
|||||||
"kubectl_localhost": "True",
|
"kubectl_localhost": "True",
|
||||||
"local_path_provisioner_enabled": "#{$local_path_provisioner_enabled}",
|
"local_path_provisioner_enabled": "#{$local_path_provisioner_enabled}",
|
||||||
"local_path_provisioner_claim_root": "#{$local_path_provisioner_claim_root}",
|
"local_path_provisioner_claim_root": "#{$local_path_provisioner_claim_root}",
|
||||||
"ansible_ssh_user": SUPPORTED_OS[$os][:user]
|
"ansible_ssh_user": SUPPORTED_OS[$os][:user],
|
||||||
|
"unsafe_show_logs": "True"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Only execute the Ansible provisioner once, when all the machines are up and ready.
|
# Only execute the Ansible provisioner once, when all the machines are up and ready.
|
||||||
@@ -256,6 +286,7 @@ Vagrant.configure("2") do |config|
|
|||||||
if i == $num_instances
|
if i == $num_instances
|
||||||
node.vm.provision "ansible" do |ansible|
|
node.vm.provision "ansible" do |ansible|
|
||||||
ansible.playbook = $playbook
|
ansible.playbook = $playbook
|
||||||
|
ansible.compatibility_mode = "2.0"
|
||||||
ansible.verbose = $ansible_verbosity
|
ansible.verbose = $ansible_verbosity
|
||||||
$ansible_inventory_path = File.join( $inventory, "hosts.ini")
|
$ansible_inventory_path = File.join( $inventory, "hosts.ini")
|
||||||
if File.exist?($ansible_inventory_path)
|
if File.exist?($ansible_inventory_path)
|
||||||
@@ -266,6 +297,7 @@ Vagrant.configure("2") do |config|
|
|||||||
ansible.host_key_checking = false
|
ansible.host_key_checking = false
|
||||||
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "-e ansible_become_pass=vagrant"]
|
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "-e ansible_become_pass=vagrant"]
|
||||||
ansible.host_vars = host_vars
|
ansible.host_vars = host_vars
|
||||||
|
ansible.extra_vars = $extra_vars
|
||||||
if $ansible_tags != ""
|
if $ansible_tags != ""
|
||||||
ansible.tags = [$ansible_tags]
|
ansible.tags = [$ansible_tags]
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[ssh_connection]
|
[ssh_connection]
|
||||||
pipelining=True
|
pipelining=True
|
||||||
ansible_ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
||||||
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
||||||
[defaults]
|
[defaults]
|
||||||
# https://github.com/ansible/ansible/issues/56930 (to ignore group names with - and .)
|
# https://github.com/ansible/ansible/issues/56930 (to ignore group names with - and .)
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ class SearchEC2Tags(object):
|
|||||||
hosts[group] = []
|
hosts[group] = []
|
||||||
tag_key = "kubespray-role"
|
tag_key = "kubespray-role"
|
||||||
tag_value = ["*"+group+"*"]
|
tag_value = ["*"+group+"*"]
|
||||||
region = os.environ['REGION']
|
region = os.environ['AWS_REGION']
|
||||||
|
|
||||||
ec2 = boto3.resource('ec2', region)
|
ec2 = boto3.resource('ec2', region)
|
||||||
filters = [{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}]
|
filters = [{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}]
|
||||||
@@ -67,6 +67,11 @@ class SearchEC2Tags(object):
|
|||||||
if node_labels_tag:
|
if node_labels_tag:
|
||||||
ansible_host['node_labels'] = dict([ label.strip().split('=') for label in node_labels_tag[0]['Value'].split(',') ])
|
ansible_host['node_labels'] = dict([ label.strip().split('=') for label in node_labels_tag[0]['Value'].split(',') ])
|
||||||
|
|
||||||
|
##Set when instance actually has node_taints
|
||||||
|
node_taints_tag = list(filter(lambda t: t['Key'] == 'kubespray-node-taints', instance.tags))
|
||||||
|
if node_taints_tag:
|
||||||
|
ansible_host['node_taints'] = list([ taint.strip() for taint in node_taints_tag[0]['Value'].split(',') ])
|
||||||
|
|
||||||
hosts[group].append(dns_name)
|
hosts[group].append(dns_name)
|
||||||
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
||||||
|
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ If you need to delete all resources from a resource group, simply call:
|
|||||||
|
|
||||||
## Installing Ansible and the dependencies
|
## Installing Ansible and the dependencies
|
||||||
|
|
||||||
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
|
Install Ansible according to [Ansible installation guide](/docs/ansible/ansible.md#installing-ansible)
|
||||||
|
|
||||||
## Generating an inventory for kubespray
|
## Generating an inventory for kubespray
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Generate Azure inventory
|
||||||
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- generate-inventory
|
- generate-inventory
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Generate Azure inventory
|
||||||
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- generate-inventory_2
|
- generate-inventory_2
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Generate Azure templates
|
||||||
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- generate-templates
|
- generate-templates
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
- name: Query Azure VMs # noqa 301
|
- name: Query Azure VMs
|
||||||
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
||||||
register: vm_list_cmd
|
register: vm_list_cmd
|
||||||
|
|
||||||
|
|||||||
@@ -1,14 +1,14 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
- name: Query Azure VMs IPs # noqa 301
|
- name: Query Azure VMs IPs
|
||||||
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
|
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
|
||||||
register: vm_ip_list_cmd
|
register: vm_ip_list_cmd
|
||||||
|
|
||||||
- name: Query Azure VMs Roles # noqa 301
|
- name: Query Azure VMs Roles
|
||||||
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
||||||
register: vm_list_cmd
|
register: vm_list_cmd
|
||||||
|
|
||||||
- name: Query Azure Load Balancer Public IP # noqa 301
|
- name: Query Azure Load Balancer Public IP
|
||||||
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
|
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
|
||||||
register: lb_pubip_cmd
|
register: lb_pubip_cmd
|
||||||
|
|
||||||
|
|||||||
@@ -24,14 +24,14 @@ bastionIPAddressName: bastion-pubip
|
|||||||
|
|
||||||
disablePasswordAuthentication: true
|
disablePasswordAuthentication: true
|
||||||
|
|
||||||
sshKeyPath: "/home/{{admin_username}}/.ssh/authorized_keys"
|
sshKeyPath: "/home/{{ admin_username }}/.ssh/authorized_keys"
|
||||||
|
|
||||||
imageReference:
|
imageReference:
|
||||||
publisher: "OpenLogic"
|
publisher: "OpenLogic"
|
||||||
offer: "CentOS"
|
offer: "CentOS"
|
||||||
sku: "7.5"
|
sku: "7.5"
|
||||||
version: "latest"
|
version: "latest"
|
||||||
imageReferenceJson: "{{imageReference|to_json}}"
|
imageReferenceJson: "{{ imageReference | to_json }}"
|
||||||
|
|
||||||
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
|
storageAccountName: "sa{{ nameSuffix | replace('-', '') }}"
|
||||||
storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"
|
storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Create nodes as docker containers
|
||||||
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- { role: dind-host }
|
- { role: dind-host }
|
||||||
|
|
||||||
- hosts: containers
|
- name: Customize each node containers
|
||||||
|
hosts: containers
|
||||||
roles:
|
roles:
|
||||||
- { role: dind-cluster }
|
- { role: dind-cluster }
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
---
|
---
|
||||||
- name: set_fact distro_setup
|
- name: Set_fact distro_setup
|
||||||
set_fact:
|
set_fact:
|
||||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||||
|
|
||||||
- name: set_fact other distro settings
|
- name: Set_fact other distro settings
|
||||||
set_fact:
|
set_fact:
|
||||||
distro_user: "{{ distro_setup['user'] }}"
|
distro_user: "{{ distro_setup['user'] }}"
|
||||||
distro_ssh_service: "{{ distro_setup['ssh_service'] }}"
|
distro_ssh_service: "{{ distro_setup['ssh_service'] }}"
|
||||||
@@ -43,7 +43,7 @@
|
|||||||
package:
|
package:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: present
|
state: present
|
||||||
with_items: "{{ distro_extra_packages + [ 'rsyslog', 'openssh-server' ] }}"
|
with_items: "{{ distro_extra_packages + ['rsyslog', 'openssh-server'] }}"
|
||||||
|
|
||||||
- name: Start needed services
|
- name: Start needed services
|
||||||
service:
|
service:
|
||||||
@@ -66,8 +66,8 @@
|
|||||||
dest: "/etc/sudoers.d/{{ distro_user }}"
|
dest: "/etc/sudoers.d/{{ distro_user }}"
|
||||||
mode: 0640
|
mode: 0640
|
||||||
|
|
||||||
- name: Add my pubkey to "{{ distro_user }}" user authorized keys
|
- name: "Add my pubkey to {{ distro_user }} user authorized keys"
|
||||||
authorized_key:
|
ansible.posix.authorized_key:
|
||||||
user: "{{ distro_user }}"
|
user: "{{ distro_user }}"
|
||||||
state: present
|
state: present
|
||||||
key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}"
|
key: "{{ lookup('file', lookup('env', 'HOME') + '/.ssh/id_rsa.pub') }}"
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
---
|
---
|
||||||
- name: set_fact distro_setup
|
- name: Set_fact distro_setup
|
||||||
set_fact:
|
set_fact:
|
||||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||||
|
|
||||||
- name: set_fact other distro settings
|
- name: Set_fact other distro settings
|
||||||
set_fact:
|
set_fact:
|
||||||
distro_image: "{{ distro_setup['image'] }}"
|
distro_image: "{{ distro_setup['image'] }}"
|
||||||
distro_init: "{{ distro_setup['init'] }}"
|
distro_init: "{{ distro_setup['init'] }}"
|
||||||
@@ -13,7 +13,7 @@
|
|||||||
distro_agetty_svc: "{{ distro_setup['agetty_svc'] }}"
|
distro_agetty_svc: "{{ distro_setup['agetty_svc'] }}"
|
||||||
|
|
||||||
- name: Create dind node containers from "containers" inventory section
|
- name: Create dind node containers from "containers" inventory section
|
||||||
docker_container:
|
community.docker.docker_container:
|
||||||
image: "{{ distro_image }}"
|
image: "{{ distro_image }}"
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: started
|
state: started
|
||||||
@@ -53,7 +53,7 @@
|
|||||||
{{ distro_raw_setup_done }} && echo SKIPPED && exit 0
|
{{ distro_raw_setup_done }} && echo SKIPPED && exit 0
|
||||||
until [ "$(readlink /proc/1/exe)" = "{{ distro_pid1_exe }}" ] ; do sleep 1; done
|
until [ "$(readlink /proc/1/exe)" = "{{ distro_pid1_exe }}" ] ; do sleep 1; done
|
||||||
{{ distro_raw_setup }}
|
{{ distro_raw_setup }}
|
||||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||||
with_items: "{{ containers.results }}"
|
with_items: "{{ containers.results }}"
|
||||||
register: result
|
register: result
|
||||||
changed_when: result.stdout.find("SKIPPED") < 0
|
changed_when: result.stdout.find("SKIPPED") < 0
|
||||||
@@ -63,26 +63,25 @@
|
|||||||
until test -S /var/run/dbus/system_bus_socket; do sleep 1; done
|
until test -S /var/run/dbus/system_bus_socket; do sleep 1; done
|
||||||
systemctl disable {{ distro_agetty_svc }}
|
systemctl disable {{ distro_agetty_svc }}
|
||||||
systemctl stop {{ distro_agetty_svc }}
|
systemctl stop {{ distro_agetty_svc }}
|
||||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||||
with_items: "{{ containers.results }}"
|
with_items: "{{ containers.results }}"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
|
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
|
||||||
# handle manually
|
# handle manually
|
||||||
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave) # noqa 301
|
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
|
||||||
raw: |
|
raw: |
|
||||||
echo {{ item | hash('sha1') }} > /etc/machine-id.new
|
echo {{ item | hash('sha1') }} > /etc/machine-id.new
|
||||||
mv -b /etc/machine-id.new /etc/machine-id
|
mv -b /etc/machine-id.new /etc/machine-id
|
||||||
cmp /etc/machine-id /etc/machine-id~ || true
|
cmp /etc/machine-id /etc/machine-id~ || true
|
||||||
systemctl daemon-reload
|
systemctl daemon-reload
|
||||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||||
with_items: "{{ containers.results }}"
|
with_items: "{{ containers.results }}"
|
||||||
|
|
||||||
- name: Early hack image install to adapt for DIND
|
- name: Early hack image install to adapt for DIND
|
||||||
# noqa 302 - this task uses the raw module intentionally
|
|
||||||
raw: |
|
raw: |
|
||||||
rm -fv /usr/bin/udevadm /usr/sbin/udevadm
|
rm -fv /usr/bin/udevadm /usr/sbin/udevadm
|
||||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||||
with_items: "{{ containers.results }}"
|
with_items: "{{ containers.results }}"
|
||||||
register: result
|
register: result
|
||||||
changed_when: result.stdout.find("removed") >= 0
|
changed_when: result.stdout.find("removed") >= 0
|
||||||
|
|||||||
@@ -1,21 +1,27 @@
|
|||||||
[tox]
|
[tox]
|
||||||
minversion = 1.6
|
minversion = 1.6
|
||||||
skipsdist = True
|
skipsdist = True
|
||||||
envlist = pep8, py33
|
envlist = pep8
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
whitelist_externals = py.test
|
allowlist_externals = py.test
|
||||||
usedevelop = True
|
usedevelop = True
|
||||||
deps =
|
deps =
|
||||||
-r{toxinidir}/requirements.txt
|
-r{toxinidir}/requirements.txt
|
||||||
-r{toxinidir}/test-requirements.txt
|
-r{toxinidir}/test-requirements.txt
|
||||||
setenv = VIRTUAL_ENV={envdir}
|
setenv = VIRTUAL_ENV={envdir}
|
||||||
passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
|
passenv =
|
||||||
|
http_proxy
|
||||||
|
HTTP_PROXY
|
||||||
|
https_proxy
|
||||||
|
HTTPS_PROXY
|
||||||
|
no_proxy
|
||||||
|
NO_PROXY
|
||||||
commands = pytest -vv #{posargs:./tests}
|
commands = pytest -vv #{posargs:./tests}
|
||||||
|
|
||||||
[testenv:pep8]
|
[testenv:pep8]
|
||||||
usedevelop = False
|
usedevelop = False
|
||||||
whitelist_externals = bash
|
allowlist_externals = bash
|
||||||
commands =
|
commands =
|
||||||
bash -c "find {toxinidir}/* -type f -name '*.py' -print0 | xargs -0 flake8"
|
bash -c "find {toxinidir}/* -type f -name '*.py' -print0 | xargs -0 flake8"
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Prepare Hypervisor to later install kubespray VMs
|
||||||
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
become: yes
|
become: yes
|
||||||
vars:
|
vars:
|
||||||
- bootstrap_os: none
|
bootstrap_os: none
|
||||||
roles:
|
roles:
|
||||||
- kvm-setup
|
- { role: kvm-setup }
|
||||||
|
|||||||
@@ -22,9 +22,9 @@
|
|||||||
- ntp
|
- ntp
|
||||||
when: ansible_os_family == "Debian"
|
when: ansible_os_family == "Debian"
|
||||||
|
|
||||||
# Create deployment user if required
|
- name: Create deployment user if required
|
||||||
- include: user.yml
|
include_tasks: user.yml
|
||||||
when: k8s_deployment_user is defined
|
when: k8s_deployment_user is defined
|
||||||
|
|
||||||
# Set proper sysctl values
|
- name: Set proper sysctl values
|
||||||
- include: sysctl.yml
|
import_tasks: sysctl.yml
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
- name: Load br_netfilter module
|
- name: Load br_netfilter module
|
||||||
modprobe:
|
community.general.modprobe:
|
||||||
name: br_netfilter
|
name: br_netfilter
|
||||||
state: present
|
state: present
|
||||||
register: br_netfilter
|
register: br_netfilter
|
||||||
@@ -25,7 +25,7 @@
|
|||||||
|
|
||||||
|
|
||||||
- name: Enable net.ipv4.ip_forward in sysctl
|
- name: Enable net.ipv4.ip_forward in sysctl
|
||||||
sysctl:
|
ansible.posix.sysctl:
|
||||||
name: net.ipv4.ip_forward
|
name: net.ipv4.ip_forward
|
||||||
value: 1
|
value: 1
|
||||||
sysctl_file: "{{ sysctl_file_path }}"
|
sysctl_file: "{{ sysctl_file_path }}"
|
||||||
@@ -33,7 +33,7 @@
|
|||||||
reload: yes
|
reload: yes
|
||||||
|
|
||||||
- name: Set bridge-nf-call-{arptables,iptables} to 0
|
- name: Set bridge-nf-call-{arptables,iptables} to 0
|
||||||
sysctl:
|
ansible.posix.sysctl:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: present
|
state: present
|
||||||
value: 0
|
value: 0
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
---
|
---
|
||||||
- name: Check ansible version
|
- name: Check ansible version
|
||||||
import_playbook: ansible_version.yml
|
import_playbook: kubernetes_sigs.kubespray.ansible_version
|
||||||
|
|
||||||
- hosts: localhost
|
- name: Install mitogen
|
||||||
|
hosts: localhost
|
||||||
strategy: linear
|
strategy: linear
|
||||||
vars:
|
vars:
|
||||||
mitogen_version: 0.3.2
|
mitogen_version: 0.3.2
|
||||||
@@ -19,24 +20,25 @@
|
|||||||
- "{{ playbook_dir }}/plugins/mitogen"
|
- "{{ playbook_dir }}/plugins/mitogen"
|
||||||
- "{{ playbook_dir }}/dist"
|
- "{{ playbook_dir }}/dist"
|
||||||
|
|
||||||
- name: download mitogen release
|
- name: Download mitogen release
|
||||||
get_url:
|
get_url:
|
||||||
url: "{{ mitogen_url }}"
|
url: "{{ mitogen_url }}"
|
||||||
dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
||||||
validate_certs: true
|
validate_certs: true
|
||||||
|
mode: 0644
|
||||||
|
|
||||||
- name: extract archive
|
- name: Extract archive
|
||||||
unarchive:
|
unarchive:
|
||||||
src: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
src: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
||||||
dest: "{{ playbook_dir }}/dist/"
|
dest: "{{ playbook_dir }}/dist/"
|
||||||
|
|
||||||
- name: copy plugin
|
- name: Copy plugin
|
||||||
synchronize:
|
ansible.posix.synchronize:
|
||||||
src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/"
|
src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/"
|
||||||
dest: "{{ playbook_dir }}/plugins/mitogen"
|
dest: "{{ playbook_dir }}/plugins/mitogen"
|
||||||
|
|
||||||
- name: add strategy to ansible.cfg
|
- name: Add strategy to ansible.cfg
|
||||||
ini_file:
|
community.general.ini_file:
|
||||||
path: ansible.cfg
|
path: ansible.cfg
|
||||||
mode: 0644
|
mode: 0644
|
||||||
section: "{{ item.section | d('defaults') }}"
|
section: "{{ item.section | d('defaults') }}"
|
||||||
|
|||||||
@@ -1,24 +1,29 @@
|
|||||||
---
|
---
|
||||||
- hosts: gfs-cluster
|
- name: Bootstrap hosts
|
||||||
|
hosts: gfs-cluster
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
vars:
|
vars:
|
||||||
ansible_ssh_pipelining: false
|
ansible_ssh_pipelining: false
|
||||||
roles:
|
roles:
|
||||||
- { role: bootstrap-os, tags: bootstrap-os}
|
- { role: bootstrap-os, tags: bootstrap-os}
|
||||||
|
|
||||||
- hosts: all
|
- name: Gather facts
|
||||||
|
hosts: all
|
||||||
gather_facts: true
|
gather_facts: true
|
||||||
|
|
||||||
- hosts: gfs-cluster
|
- name: Install glusterfs server
|
||||||
|
hosts: gfs-cluster
|
||||||
vars:
|
vars:
|
||||||
ansible_ssh_pipelining: true
|
ansible_ssh_pipelining: true
|
||||||
roles:
|
roles:
|
||||||
- { role: glusterfs/server }
|
- { role: glusterfs/server }
|
||||||
|
|
||||||
- hosts: k8s_cluster
|
- name: Install glusterfs servers
|
||||||
|
hosts: k8s_cluster
|
||||||
roles:
|
roles:
|
||||||
- { role: glusterfs/client }
|
- { role: glusterfs/client }
|
||||||
|
|
||||||
- hosts: kube_control_plane[0]
|
- name: Configure Kubernetes to use glusterfs
|
||||||
|
hosts: kube_control_plane[0]
|
||||||
roles:
|
roles:
|
||||||
- { role: kubernetes-pv }
|
- { role: kubernetes-pv }
|
||||||
|
|||||||
@@ -6,12 +6,12 @@ galaxy_info:
|
|||||||
description: GlusterFS installation for Linux.
|
description: GlusterFS installation for Linux.
|
||||||
company: "Midwestern Mac, LLC"
|
company: "Midwestern Mac, LLC"
|
||||||
license: "license (BSD, MIT)"
|
license: "license (BSD, MIT)"
|
||||||
min_ansible_version: 2.0
|
min_ansible_version: "2.0"
|
||||||
platforms:
|
platforms:
|
||||||
- name: EL
|
- name: EL
|
||||||
versions:
|
versions:
|
||||||
- 6
|
- "6"
|
||||||
- 7
|
- "7"
|
||||||
- name: Ubuntu
|
- name: Ubuntu
|
||||||
versions:
|
versions:
|
||||||
- precise
|
- precise
|
||||||
|
|||||||
@@ -3,14 +3,19 @@
|
|||||||
# hyperkube and needs to be installed as part of the system.
|
# hyperkube and needs to be installed as part of the system.
|
||||||
|
|
||||||
# Setup/install tasks.
|
# Setup/install tasks.
|
||||||
- include: setup-RedHat.yml
|
- name: Setup RedHat distros for glusterfs
|
||||||
|
include_tasks: setup-RedHat.yml
|
||||||
when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined
|
when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined
|
||||||
|
|
||||||
- include: setup-Debian.yml
|
- name: Setup Debian distros for glusterfs
|
||||||
|
include_tasks: setup-Debian.yml
|
||||||
when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined
|
when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined
|
||||||
|
|
||||||
- name: Ensure Gluster mount directories exist.
|
- name: Ensure Gluster mount directories exist.
|
||||||
file: "path={{ item }} state=directory mode=0775"
|
file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: directory
|
||||||
|
mode: 0775
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ gluster_mount_dir }}"
|
- "{{ gluster_mount_dir }}"
|
||||||
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
|
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
register: glusterfs_ppa_added
|
register: glusterfs_ppa_added
|
||||||
when: glusterfs_ppa_use
|
when: glusterfs_ppa_use
|
||||||
|
|
||||||
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa 503
|
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa no-handler
|
||||||
apt:
|
apt:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: absent
|
state: absent
|
||||||
|
|||||||
@@ -1,10 +1,14 @@
|
|||||||
---
|
---
|
||||||
- name: Install Prerequisites
|
- name: Install Prerequisites
|
||||||
package: name={{ item }} state=present
|
package:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: present
|
||||||
with_items:
|
with_items:
|
||||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||||
|
|
||||||
- name: Install Packages
|
- name: Install Packages
|
||||||
package: name={{ item }} state=present
|
package:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: present
|
||||||
with_items:
|
with_items:
|
||||||
- glusterfs-client
|
- glusterfs-client
|
||||||
|
|||||||
@@ -6,12 +6,12 @@ galaxy_info:
|
|||||||
description: GlusterFS installation for Linux.
|
description: GlusterFS installation for Linux.
|
||||||
company: "Midwestern Mac, LLC"
|
company: "Midwestern Mac, LLC"
|
||||||
license: "license (BSD, MIT)"
|
license: "license (BSD, MIT)"
|
||||||
min_ansible_version: 2.0
|
min_ansible_version: "2.0"
|
||||||
platforms:
|
platforms:
|
||||||
- name: EL
|
- name: EL
|
||||||
versions:
|
versions:
|
||||||
- 6
|
- "6"
|
||||||
- 7
|
- "7"
|
||||||
- name: Ubuntu
|
- name: Ubuntu
|
||||||
versions:
|
versions:
|
||||||
- precise
|
- precise
|
||||||
|
|||||||
@@ -4,78 +4,97 @@
|
|||||||
include_vars: "{{ ansible_os_family }}.yml"
|
include_vars: "{{ ansible_os_family }}.yml"
|
||||||
|
|
||||||
# Install xfs package
|
# Install xfs package
|
||||||
- name: install xfs Debian
|
- name: Install xfs Debian
|
||||||
apt: name=xfsprogs state=present
|
apt:
|
||||||
|
name: xfsprogs
|
||||||
|
state: present
|
||||||
when: ansible_os_family == "Debian"
|
when: ansible_os_family == "Debian"
|
||||||
|
|
||||||
- name: install xfs RedHat
|
- name: Install xfs RedHat
|
||||||
package: name=xfsprogs state=present
|
package:
|
||||||
|
name: xfsprogs
|
||||||
|
state: present
|
||||||
when: ansible_os_family == "RedHat"
|
when: ansible_os_family == "RedHat"
|
||||||
|
|
||||||
# Format external volumes in xfs
|
# Format external volumes in xfs
|
||||||
- name: Format volumes in xfs
|
- name: Format volumes in xfs
|
||||||
filesystem: "fstype=xfs dev={{ disk_volume_device_1 }}"
|
community.general.filesystem:
|
||||||
|
fstype: xfs
|
||||||
|
dev: "{{ disk_volume_device_1 }}"
|
||||||
|
|
||||||
# Mount external volumes
|
# Mount external volumes
|
||||||
- name: mounting new xfs filesystem
|
- name: Mounting new xfs filesystem
|
||||||
mount: "name={{ gluster_volume_node_mount_dir }} src={{ disk_volume_device_1 }} fstype=xfs state=mounted"
|
ansible.posix.mount:
|
||||||
|
name: "{{ gluster_volume_node_mount_dir }}"
|
||||||
|
src: "{{ disk_volume_device_1 }}"
|
||||||
|
fstype: xfs
|
||||||
|
state: mounted
|
||||||
|
|
||||||
# Setup/install tasks.
|
# Setup/install tasks.
|
||||||
- include: setup-RedHat.yml
|
- name: Setup RedHat distros for glusterfs
|
||||||
|
include_tasks: setup-RedHat.yml
|
||||||
when: ansible_os_family == 'RedHat'
|
when: ansible_os_family == 'RedHat'
|
||||||
|
|
||||||
- include: setup-Debian.yml
|
- name: Setup Debian distros for glusterfs
|
||||||
|
include_tasks: setup-Debian.yml
|
||||||
when: ansible_os_family == 'Debian'
|
when: ansible_os_family == 'Debian'
|
||||||
|
|
||||||
- name: Ensure GlusterFS is started and enabled at boot.
|
- name: Ensure GlusterFS is started and enabled at boot.
|
||||||
service: "name={{ glusterfs_daemon }} state=started enabled=yes"
|
service:
|
||||||
|
name: "{{ glusterfs_daemon }}"
|
||||||
|
state: started
|
||||||
|
enabled: yes
|
||||||
|
|
||||||
- name: Ensure Gluster brick and mount directories exist.
|
- name: Ensure Gluster brick and mount directories exist.
|
||||||
file: "path={{ item }} state=directory mode=0775"
|
file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: directory
|
||||||
|
mode: 0775
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ gluster_brick_dir }}"
|
- "{{ gluster_brick_dir }}"
|
||||||
- "{{ gluster_mount_dir }}"
|
- "{{ gluster_mount_dir }}"
|
||||||
|
|
||||||
- name: Configure Gluster volume with replicas
|
- name: Configure Gluster volume with replicas
|
||||||
gluster_volume:
|
gluster.gluster.gluster_volume:
|
||||||
state: present
|
state: present
|
||||||
name: "{{ gluster_brick_name }}"
|
name: "{{ gluster_brick_name }}"
|
||||||
brick: "{{ gluster_brick_dir }}"
|
brick: "{{ gluster_brick_dir }}"
|
||||||
replicas: "{{ groups['gfs-cluster'] | length }}"
|
replicas: "{{ groups['gfs-cluster'] | length }}"
|
||||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||||
host: "{{ inventory_hostname }}"
|
host: "{{ inventory_hostname }}"
|
||||||
force: yes
|
force: yes
|
||||||
run_once: true
|
run_once: true
|
||||||
when: groups['gfs-cluster']|length > 1
|
when: groups['gfs-cluster'] | length > 1
|
||||||
|
|
||||||
- name: Configure Gluster volume without replicas
|
- name: Configure Gluster volume without replicas
|
||||||
gluster_volume:
|
gluster.gluster.gluster_volume:
|
||||||
state: present
|
state: present
|
||||||
name: "{{ gluster_brick_name }}"
|
name: "{{ gluster_brick_name }}"
|
||||||
brick: "{{ gluster_brick_dir }}"
|
brick: "{{ gluster_brick_dir }}"
|
||||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||||
host: "{{ inventory_hostname }}"
|
host: "{{ inventory_hostname }}"
|
||||||
force: yes
|
force: yes
|
||||||
run_once: true
|
run_once: true
|
||||||
when: groups['gfs-cluster']|length <= 1
|
when: groups['gfs-cluster'] | length <= 1
|
||||||
|
|
||||||
- name: Mount glusterfs to retrieve disk size
|
- name: Mount glusterfs to retrieve disk size
|
||||||
mount:
|
ansible.posix.mount:
|
||||||
name: "{{ gluster_mount_dir }}"
|
name: "{{ gluster_mount_dir }}"
|
||||||
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster"
|
||||||
fstype: glusterfs
|
fstype: glusterfs
|
||||||
opts: "defaults,_netdev"
|
opts: "defaults,_netdev"
|
||||||
state: mounted
|
state: mounted
|
||||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
- name: Get Gluster disk size
|
- name: Get Gluster disk size
|
||||||
setup: filter=ansible_mounts
|
setup:
|
||||||
|
filter: ansible_mounts
|
||||||
register: mounts_data
|
register: mounts_data
|
||||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
- name: Set Gluster disk size to variable
|
- name: Set Gluster disk size to variable
|
||||||
set_fact:
|
set_fact:
|
||||||
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024*1024*1024)) | int }}"
|
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024 * 1024 * 1024)) | int }}"
|
||||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
- name: Create file on GlusterFS
|
- name: Create file on GlusterFS
|
||||||
@@ -86,9 +105,9 @@
|
|||||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
- name: Unmount glusterfs
|
- name: Unmount glusterfs
|
||||||
mount:
|
ansible.posix.mount:
|
||||||
name: "{{ gluster_mount_dir }}"
|
name: "{{ gluster_mount_dir }}"
|
||||||
fstype: glusterfs
|
fstype: glusterfs
|
||||||
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster"
|
||||||
state: unmounted
|
state: unmounted
|
||||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
register: glusterfs_ppa_added
|
register: glusterfs_ppa_added
|
||||||
when: glusterfs_ppa_use
|
when: glusterfs_ppa_use
|
||||||
|
|
||||||
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa 503
|
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa no-handler
|
||||||
apt:
|
apt:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: absent
|
state: absent
|
||||||
|
|||||||
@@ -1,11 +1,15 @@
|
|||||||
---
|
---
|
||||||
- name: Install Prerequisites
|
- name: Install Prerequisites
|
||||||
package: name={{ item }} state=present
|
package:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: present
|
||||||
with_items:
|
with_items:
|
||||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||||
|
|
||||||
- name: Install Packages
|
- name: Install Packages
|
||||||
package: name={{ item }} state=present
|
package:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: present
|
||||||
with_items:
|
with_items:
|
||||||
- glusterfs-server
|
- glusterfs-server
|
||||||
- glusterfs-client
|
- glusterfs-client
|
||||||
|
|||||||
@@ -18,6 +18,6 @@
|
|||||||
kubectl: "{{ bin_dir }}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "{{ item.item.type }}"
|
resource: "{{ item.item.type }}"
|
||||||
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
||||||
state: "{{ item.changed | ternary('latest','present') }}"
|
state: "{{ item.changed | ternary('latest', 'present') }}"
|
||||||
with_items: "{{ gluster_pv.results }}"
|
with_items: "{{ gluster_pv.results }}"
|
||||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined
|
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
---
|
---
|
||||||
- hosts: kube_control_plane[0]
|
- name: Tear down heketi
|
||||||
|
hosts: kube_control_plane[0]
|
||||||
roles:
|
roles:
|
||||||
- { role: tear-down }
|
- { role: tear-down }
|
||||||
|
|
||||||
- hosts: heketi-node
|
- name: Teardown disks in heketi
|
||||||
|
hosts: heketi-node
|
||||||
become: yes
|
become: yes
|
||||||
roles:
|
roles:
|
||||||
- { role: tear-down-disks }
|
- { role: tear-down-disks }
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
---
|
---
|
||||||
- hosts: heketi-node
|
- name: Prepare heketi install
|
||||||
|
hosts: heketi-node
|
||||||
roles:
|
roles:
|
||||||
- { role: prepare }
|
- { role: prepare }
|
||||||
|
|
||||||
- hosts: kube_control_plane[0]
|
- name: Provision heketi
|
||||||
|
hosts: kube_control_plane[0]
|
||||||
tags:
|
tags:
|
||||||
- "provision"
|
- "provision"
|
||||||
roles:
|
roles:
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
- "dm_snapshot"
|
- "dm_snapshot"
|
||||||
- "dm_mirror"
|
- "dm_mirror"
|
||||||
- "dm_thin_pool"
|
- "dm_thin_pool"
|
||||||
modprobe:
|
community.general.modprobe:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: "present"
|
state: "present"
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
---
|
---
|
||||||
- name: "stop port forwarding"
|
- name: "Stop port forwarding"
|
||||||
command: "killall "
|
command: "killall "
|
||||||
|
|||||||
@@ -7,9 +7,9 @@
|
|||||||
|
|
||||||
- name: "Bootstrap heketi."
|
- name: "Bootstrap heketi."
|
||||||
when:
|
when:
|
||||||
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Service']\"))|length == 0"
|
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Service']\")) | length == 0"
|
||||||
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Deployment']\"))|length == 0"
|
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Deployment']\")) | length == 0"
|
||||||
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod']\"))|length == 0"
|
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Pod']\")) | length == 0"
|
||||||
include_tasks: "bootstrap/deploy.yml"
|
include_tasks: "bootstrap/deploy.yml"
|
||||||
|
|
||||||
# Prepare heketi topology
|
# Prepare heketi topology
|
||||||
@@ -20,11 +20,11 @@
|
|||||||
|
|
||||||
- name: "Ensure heketi bootstrap pod is up."
|
- name: "Ensure heketi bootstrap pod is up."
|
||||||
assert:
|
assert:
|
||||||
that: "(initial_heketi_pod.stdout|from_json|json_query('items[*]'))|length == 1"
|
that: "(initial_heketi_pod.stdout | from_json | json_query('items[*]')) | length == 1"
|
||||||
|
|
||||||
- name: Store the initial heketi pod name
|
- name: Store the initial heketi pod name
|
||||||
set_fact:
|
set_fact:
|
||||||
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout|from_json|json_query(\"items[*].metadata.name|[0]\") }}"
|
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout | from_json | json_query(\"items[*].metadata.name | [0]\") }}"
|
||||||
|
|
||||||
- name: "Test heketi topology."
|
- name: "Test heketi topology."
|
||||||
changed_when: false
|
changed_when: false
|
||||||
@@ -32,7 +32,7 @@
|
|||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||||
|
|
||||||
- name: "Load heketi topology."
|
- name: "Load heketi topology."
|
||||||
when: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*]\")|flatten|length == 0"
|
when: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*]\") | flatten | length == 0"
|
||||||
include_tasks: "bootstrap/topology.yml"
|
include_tasks: "bootstrap/topology.yml"
|
||||||
|
|
||||||
# Provision heketi database volume
|
# Provision heketi database volume
|
||||||
@@ -58,7 +58,7 @@
|
|||||||
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
||||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
||||||
when:
|
when:
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0"
|
- "heketi_storage_state.stdout | from_json | json_query(secret_query) | length == 0"
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0"
|
- "heketi_storage_state.stdout | from_json | json_query(endpoints_query) | length == 0"
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0"
|
- "heketi_storage_state.stdout | from_json | json_query(service_query) | length == 0"
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0"
|
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 0"
|
||||||
|
|||||||
@@ -17,11 +17,11 @@
|
|||||||
register: "initial_heketi_state"
|
register: "initial_heketi_state"
|
||||||
vars:
|
vars:
|
||||||
initial_heketi_state: { stdout: "{}" }
|
initial_heketi_state: { stdout: "{}" }
|
||||||
pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]"
|
pods_query: "items[?kind=='Pod'].status.conditions | [0][?type=='Ready'].status | [0]"
|
||||||
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
deployments_query: "items[?kind=='Deployment'].status.conditions | [0][?type=='Available'].status | [0]"
|
||||||
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
||||||
until:
|
until:
|
||||||
- "initial_heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
- "initial_heketi_state.stdout | from_json | json_query(pods_query) == 'True'"
|
||||||
- "initial_heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
- "initial_heketi_state.stdout | from_json | json_query(deployments_query) == 'True'"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|||||||
@@ -15,10 +15,10 @@
|
|||||||
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
||||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
||||||
when:
|
when:
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0"
|
- "heketi_storage_state.stdout | from_json | json_query(secret_query) | length == 0"
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0"
|
- "heketi_storage_state.stdout | from_json | json_query(endpoints_query) | length == 0"
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0"
|
- "heketi_storage_state.stdout | from_json | json_query(service_query) | length == 0"
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0"
|
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 0"
|
||||||
register: "heketi_storage_result"
|
register: "heketi_storage_result"
|
||||||
- name: "Get state of heketi database copy job."
|
- name: "Get state of heketi database copy job."
|
||||||
command: "{{ bin_dir }}/kubectl get jobs --output=json"
|
command: "{{ bin_dir }}/kubectl get jobs --output=json"
|
||||||
@@ -28,6 +28,6 @@
|
|||||||
heketi_storage_state: { stdout: "{}" }
|
heketi_storage_state: { stdout: "{}" }
|
||||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job' && status.succeeded==1]"
|
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job' && status.succeeded==1]"
|
||||||
until:
|
until:
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 1"
|
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 1"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|||||||
@@ -5,10 +5,10 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
- name: "Delete bootstrap Heketi."
|
- name: "Delete bootstrap Heketi."
|
||||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
|
||||||
when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0"
|
when: "heketi_resources.stdout | from_json | json_query('items[*]') | length > 0"
|
||||||
- name: "Ensure there is nothing left over." # noqa 301
|
- name: "Ensure there is nothing left over."
|
||||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
|
||||||
register: "heketi_result"
|
register: "heketi_result"
|
||||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|||||||
@@ -14,7 +14,7 @@
|
|||||||
- name: "Copy topology configuration into container."
|
- name: "Copy topology configuration into container."
|
||||||
changed_when: false
|
changed_when: false
|
||||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
||||||
- name: "Load heketi topology." # noqa 503
|
- name: "Load heketi topology." # noqa no-handler
|
||||||
when: "render.changed"
|
when: "render.changed"
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||||
register: "load_heketi"
|
register: "load_heketi"
|
||||||
@@ -22,6 +22,6 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
register: "heketi_topology"
|
register: "heketi_topology"
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||||
until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length"
|
until: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\") | flatten | length == groups['heketi-node'] | length"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|||||||
@@ -6,19 +6,19 @@
|
|||||||
- name: "Get heketi volumes."
|
- name: "Get heketi volumes."
|
||||||
changed_when: false
|
changed_when: false
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
||||||
with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}"
|
with_items: "{{ heketi_volumes.stdout | from_json | json_query(\"volumes[*]\") }}"
|
||||||
loop_control: { loop_var: "volume_id" }
|
loop_control: { loop_var: "volume_id" }
|
||||||
register: "volumes_information"
|
register: "volumes_information"
|
||||||
- name: "Test heketi database volume."
|
- name: "Test heketi database volume."
|
||||||
set_fact: { heketi_database_volume_exists: true }
|
set_fact: { heketi_database_volume_exists: true }
|
||||||
with_items: "{{ volumes_information.results }}"
|
with_items: "{{ volumes_information.results }}"
|
||||||
loop_control: { loop_var: "volume_information" }
|
loop_control: { loop_var: "volume_information" }
|
||||||
vars: { volume: "{{ volume_information.stdout|from_json }}" }
|
vars: { volume: "{{ volume_information.stdout | from_json }}" }
|
||||||
when: "volume.name == 'heketidbstorage'"
|
when: "volume.name == 'heketidbstorage'"
|
||||||
- name: "Provision database volume."
|
- name: "Provision database volume."
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
|
||||||
when: "heketi_database_volume_exists is undefined"
|
when: "heketi_database_volume_exists is undefined"
|
||||||
- name: "Copy configuration from pod." # noqa 301
|
- name: "Copy configuration from pod."
|
||||||
become: true
|
become: true
|
||||||
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
||||||
- name: "Get heketi volume ids."
|
- name: "Get heketi volume ids."
|
||||||
@@ -28,14 +28,14 @@
|
|||||||
- name: "Get heketi volumes."
|
- name: "Get heketi volumes."
|
||||||
changed_when: false
|
changed_when: false
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
||||||
with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}"
|
with_items: "{{ heketi_volumes.stdout | from_json | json_query(\"volumes[*]\") }}"
|
||||||
loop_control: { loop_var: "volume_id" }
|
loop_control: { loop_var: "volume_id" }
|
||||||
register: "volumes_information"
|
register: "volumes_information"
|
||||||
- name: "Test heketi database volume."
|
- name: "Test heketi database volume."
|
||||||
set_fact: { heketi_database_volume_created: true }
|
set_fact: { heketi_database_volume_created: true }
|
||||||
with_items: "{{ volumes_information.results }}"
|
with_items: "{{ volumes_information.results }}"
|
||||||
loop_control: { loop_var: "volume_information" }
|
loop_control: { loop_var: "volume_information" }
|
||||||
vars: { volume: "{{ volume_information.stdout|from_json }}" }
|
vars: { volume: "{{ volume_information.stdout | from_json }}" }
|
||||||
when: "volume.name == 'heketidbstorage'"
|
when: "volume.name == 'heketidbstorage'"
|
||||||
- name: "Ensure heketi database volume exists."
|
- name: "Ensure heketi database volume exists."
|
||||||
assert: { that: "heketi_database_volume_created is defined", msg: "Heketi database volume does not exist." }
|
assert: { that: "heketi_database_volume_created is defined", msg: "Heketi database volume does not exist." }
|
||||||
|
|||||||
@@ -23,8 +23,8 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
vars:
|
vars:
|
||||||
daemonset_state: { stdout: "{}" }
|
daemonset_state: { stdout: "{}" }
|
||||||
ready: "{{ daemonset_state.stdout|from_json|json_query(\"status.numberReady\") }}"
|
ready: "{{ daemonset_state.stdout | from_json | json_query(\"status.numberReady\") }}"
|
||||||
desired: "{{ daemonset_state.stdout|from_json|json_query(\"status.desiredNumberScheduled\") }}"
|
desired: "{{ daemonset_state.stdout | from_json | json_query(\"status.desiredNumberScheduled\") }}"
|
||||||
until: "ready | int >= 3"
|
until: "ready | int >= 3"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: "Assign storage label"
|
- name: "Assign storage label"
|
||||||
when: "label_present.stdout_lines|length == 0"
|
when: "label_present.stdout_lines | length == 0"
|
||||||
command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
|
command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
|
||||||
|
|
||||||
- name: Get storage nodes again
|
- name: Get storage nodes again
|
||||||
@@ -15,5 +15,5 @@
|
|||||||
|
|
||||||
- name: Ensure the label has been set
|
- name: Ensure the label has been set
|
||||||
assert:
|
assert:
|
||||||
that: "label_present|length > 0"
|
that: "label_present | length > 0"
|
||||||
msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs."
|
msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs."
|
||||||
|
|||||||
@@ -24,11 +24,11 @@
|
|||||||
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
||||||
command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
|
command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
|
||||||
until:
|
until:
|
||||||
- "heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
- "heketi_state.stdout | from_json | json_query(pods_query) == 'True'"
|
||||||
- "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
- "heketi_state.stdout | from_json | json_query(deployments_query) == 'True'"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|
||||||
- name: Set the Heketi pod name
|
- name: Set the Heketi pod name
|
||||||
set_fact:
|
set_fact:
|
||||||
heketi_pod_name: "{{ heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
|
heketi_pod_name: "{{ heketi_state.stdout | from_json | json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
- name: "Render storage class configuration."
|
- name: "Render storage class configuration."
|
||||||
become: true
|
become: true
|
||||||
vars:
|
vars:
|
||||||
endpoint_address: "{{ (heketi_service.stdout|from_json).spec.clusterIP }}"
|
endpoint_address: "{{ (heketi_service.stdout | from_json).spec.clusterIP }}"
|
||||||
template:
|
template:
|
||||||
src: "storageclass.yml.j2"
|
src: "storageclass.yml.j2"
|
||||||
dest: "{{ kube_config_dir }}/storageclass.yml"
|
dest: "{{ kube_config_dir }}/storageclass.yml"
|
||||||
|
|||||||
@@ -11,16 +11,16 @@
|
|||||||
src: "topology.json.j2"
|
src: "topology.json.j2"
|
||||||
dest: "{{ kube_config_dir }}/topology.json"
|
dest: "{{ kube_config_dir }}/topology.json"
|
||||||
mode: 0644
|
mode: 0644
|
||||||
- name: "Copy topology configuration into container." # noqa 503
|
- name: "Copy topology configuration into container." # noqa no-handler
|
||||||
when: "rendering.changed"
|
when: "rendering.changed"
|
||||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
||||||
- name: "Load heketi topology." # noqa 503
|
- name: "Load heketi topology." # noqa no-handler
|
||||||
when: "rendering.changed"
|
when: "rendering.changed"
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||||
- name: "Get heketi topology."
|
- name: "Get heketi topology."
|
||||||
register: "heketi_topology"
|
register: "heketi_topology"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||||
until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length"
|
until: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\") | flatten | length == groups['heketi-node'] | length"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|||||||
@@ -22,7 +22,7 @@
|
|||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: "Remove volume groups." # noqa 301
|
- name: "Remove volume groups."
|
||||||
environment:
|
environment:
|
||||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||||
become: true
|
become: true
|
||||||
@@ -30,7 +30,7 @@
|
|||||||
with_items: "{{ volume_groups.stdout_lines }}"
|
with_items: "{{ volume_groups.stdout_lines }}"
|
||||||
loop_control: { loop_var: "volume_group" }
|
loop_control: { loop_var: "volume_group" }
|
||||||
|
|
||||||
- name: "Remove physical volume from cluster disks." # noqa 301
|
- name: "Remove physical volume from cluster disks."
|
||||||
environment:
|
environment:
|
||||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||||
become: true
|
become: true
|
||||||
|
|||||||
@@ -1,43 +1,43 @@
|
|||||||
---
|
---
|
||||||
- name: Remove storage class. # noqa 301
|
- name: Remove storage class.
|
||||||
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Tear down heketi. # noqa 301
|
- name: Tear down heketi.
|
||||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Tear down heketi. # noqa 301
|
- name: Tear down heketi.
|
||||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Tear down bootstrap.
|
- name: Tear down bootstrap.
|
||||||
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
|
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
|
||||||
- name: Ensure there is nothing left over. # noqa 301
|
- name: Ensure there is nothing left over.
|
||||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
||||||
register: "heketi_result"
|
register: "heketi_result"
|
||||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
- name: Ensure there is nothing left over. # noqa 301
|
- name: Ensure there is nothing left over.
|
||||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
||||||
register: "heketi_result"
|
register: "heketi_result"
|
||||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
- name: Tear down glusterfs. # noqa 301
|
- name: Tear down glusterfs.
|
||||||
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Remove heketi storage service. # noqa 301
|
- name: Remove heketi storage service.
|
||||||
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Remove heketi gluster role binding # noqa 301
|
- name: Remove heketi gluster role binding
|
||||||
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Remove heketi config secret # noqa 301
|
- name: Remove heketi config secret
|
||||||
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Remove heketi db backup # noqa 301
|
- name: Remove heketi db backup
|
||||||
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Remove heketi service account # noqa 301
|
- name: Remove heketi service account
|
||||||
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Get secrets
|
- name: Get secrets
|
||||||
@@ -46,6 +46,6 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
- name: Remove heketi storage secret
|
- name: Remove heketi storage secret
|
||||||
vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
|
vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
|
||||||
command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}"
|
command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout | from_json | json_query(storage_query) }}"
|
||||||
when: "storage_query is defined"
|
when: "storage_query is defined"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
|
|||||||
@@ -5,13 +5,17 @@
|
|||||||
Container image collecting script for offline deployment
|
Container image collecting script for offline deployment
|
||||||
|
|
||||||
This script has two features:
|
This script has two features:
|
||||||
(1) Get container images from an environment which is deployed online.
|
(1) Get container images from an environment which is deployed online, or set IMAGES_FROM_FILE
|
||||||
|
environment variable to get images from a file (e.g. temp/images.list after running the
|
||||||
|
./generate_list.sh script).
|
||||||
(2) Deploy local container registry and register the container images to the registry.
|
(2) Deploy local container registry and register the container images to the registry.
|
||||||
|
|
||||||
Step(1) should be done online site as a preparation, then we bring the gotten images
|
Step(1) should be done online site as a preparation, then we bring the gotten images
|
||||||
to the target offline environment. if images are from a private registry,
|
to the target offline environment. if images are from a private registry,
|
||||||
you need to set `PRIVATE_REGISTRY` environment variable.
|
you need to set `PRIVATE_REGISTRY` environment variable.
|
||||||
Then we will run step(2) for registering the images to local registry.
|
Then we will run step(2) for registering the images to local registry, or to an existing
|
||||||
|
registry set by the `DESTINATION_REGISTRY` environment variable. By default, the local registry
|
||||||
|
will run on port 5000. This can be changed with the `REGISTRY_PORT` environment variable
|
||||||
|
|
||||||
Step(1) can be operated with:
|
Step(1) can be operated with:
|
||||||
|
|
||||||
@@ -27,7 +31,7 @@ manage-offline-container-images.sh register
|
|||||||
|
|
||||||
## generate_list.sh
|
## generate_list.sh
|
||||||
|
|
||||||
This script generates the list of downloaded files and the list of container images by `roles/download/defaults/main.yml` file.
|
This script generates the list of downloaded files and the list of container images by `roles/kubespray-defaults/defaults/main/download.yml` file.
|
||||||
|
|
||||||
Run this script will execute `generate_list.yml` playbook in kubespray root directory and generate four files,
|
Run this script will execute `generate_list.yml` playbook in kubespray root directory and generate four files,
|
||||||
all downloaded files url in files.list, all container images in images.list, jinja2 templates in *.template.
|
all downloaded files url in files.list, all container images in images.list, jinja2 templates in *.template.
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ CURRENT_DIR=$(cd $(dirname $0); pwd)
|
|||||||
TEMP_DIR="${CURRENT_DIR}/temp"
|
TEMP_DIR="${CURRENT_DIR}/temp"
|
||||||
REPO_ROOT_DIR="${CURRENT_DIR%/contrib/offline}"
|
REPO_ROOT_DIR="${CURRENT_DIR%/contrib/offline}"
|
||||||
|
|
||||||
: ${DOWNLOAD_YML:="roles/download/defaults/main.yml"}
|
: ${DOWNLOAD_YML:="roles/kubespray-defaults/defaults/main/download.yml"}
|
||||||
|
|
||||||
mkdir -p ${TEMP_DIR}
|
mkdir -p ${TEMP_DIR}
|
||||||
|
|
||||||
@@ -19,7 +19,7 @@ sed -n '/^downloads:/,/download_defaults:/p' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
|||||||
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/\"//g' > ${TEMP_DIR}/images.list.template
|
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/\"//g' > ${TEMP_DIR}/images.list.template
|
||||||
|
|
||||||
# add kube-* images to images list template
|
# add kube-* images to images list template
|
||||||
# Those container images are downloaded by kubeadm, then roles/download/defaults/main.yml
|
# Those container images are downloaded by kubeadm, then roles/kubespray-defaults/defaults/main/download.yml
|
||||||
# doesn't contain those images. That is reason why here needs to put those images into the
|
# doesn't contain those images. That is reason why here needs to put those images into the
|
||||||
# list separately.
|
# list separately.
|
||||||
KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy"
|
KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy"
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Collect container images for offline deployment
|
||||||
|
hosts: localhost
|
||||||
become: no
|
become: no
|
||||||
|
|
||||||
roles:
|
roles:
|
||||||
@@ -11,9 +12,11 @@
|
|||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
# Generate files.list and images.list files from templates.
|
# Generate files.list and images.list files from templates.
|
||||||
- template:
|
- name: Collect container images for offline deployment
|
||||||
|
template:
|
||||||
src: ./contrib/offline/temp/{{ item }}.list.template
|
src: ./contrib/offline/temp/{{ item }}.list.template
|
||||||
dest: ./contrib/offline/temp/{{ item }}.list
|
dest: ./contrib/offline/temp/{{ item }}.list
|
||||||
|
mode: 0644
|
||||||
with_items:
|
with_items:
|
||||||
- files
|
- files
|
||||||
- images
|
- images
|
||||||
|
|||||||
@@ -12,27 +12,40 @@ RETRY_COUNT=5
|
|||||||
function create_container_image_tar() {
|
function create_container_image_tar() {
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
IMAGES=$(kubectl describe pods --all-namespaces | grep " Image:" | awk '{print $2}' | sort | uniq)
|
if [ -z "${IMAGES_FROM_FILE}" ]; then
|
||||||
# NOTE: etcd and pause cannot be seen as pods.
|
echo "Getting images from current \"$(kubectl config current-context)\""
|
||||||
# The pause image is used for --pod-infra-container-image option of kubelet.
|
|
||||||
EXT_IMAGES=$(kubectl cluster-info dump | egrep "quay.io/coreos/etcd:|registry.k8s.io/pause:" | sed s@\"@@g)
|
IMAGES=$(mktemp --suffix=-images)
|
||||||
IMAGES="${IMAGES} ${EXT_IMAGES}"
|
trap 'rm -f "${IMAGES}"' EXIT
|
||||||
|
|
||||||
|
kubectl describe cronjobs,jobs,pods --all-namespaces | grep " Image:" | awk '{print $2}' | sort | uniq > "${IMAGES}"
|
||||||
|
# NOTE: etcd and pause cannot be seen as pods.
|
||||||
|
# The pause image is used for --pod-infra-container-image option of kubelet.
|
||||||
|
kubectl cluster-info dump | grep -E "quay.io/coreos/etcd:|registry.k8s.io/pause:" | sed s@\"@@g >> "${IMAGES}"
|
||||||
|
else
|
||||||
|
echo "Getting images from file \"${IMAGES_FROM_FILE}\""
|
||||||
|
if [ ! -f "${IMAGES_FROM_FILE}" ]; then
|
||||||
|
echo "${IMAGES_FROM_FILE} is not a file"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
IMAGES=$(realpath $IMAGES_FROM_FILE)
|
||||||
|
fi
|
||||||
|
|
||||||
rm -f ${IMAGE_TAR_FILE}
|
rm -f ${IMAGE_TAR_FILE}
|
||||||
rm -rf ${IMAGE_DIR}
|
rm -rf ${IMAGE_DIR}
|
||||||
mkdir ${IMAGE_DIR}
|
mkdir ${IMAGE_DIR}
|
||||||
cd ${IMAGE_DIR}
|
cd ${IMAGE_DIR}
|
||||||
|
|
||||||
sudo docker pull registry:latest
|
sudo ${runtime} pull registry:latest
|
||||||
sudo docker save -o registry-latest.tar registry:latest
|
sudo ${runtime} save -o registry-latest.tar registry:latest
|
||||||
|
|
||||||
for image in ${IMAGES}
|
while read -r image
|
||||||
do
|
do
|
||||||
FILE_NAME="$(echo ${image} | sed s@"/"@"-"@g | sed s/":"/"-"/g)".tar
|
FILE_NAME="$(echo ${image} | sed s@"/"@"-"@g | sed s/":"/"-"/g | sed -E 's/\@.*//g')".tar
|
||||||
set +e
|
set +e
|
||||||
for step in $(seq 1 ${RETRY_COUNT})
|
for step in $(seq 1 ${RETRY_COUNT})
|
||||||
do
|
do
|
||||||
sudo docker pull ${image}
|
sudo ${runtime} pull ${image}
|
||||||
if [ $? -eq 0 ]; then
|
if [ $? -eq 0 ]; then
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
@@ -42,24 +55,26 @@ function create_container_image_tar() {
|
|||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
set -e
|
set -e
|
||||||
sudo docker save -o ${FILE_NAME} ${image}
|
sudo ${runtime} save -o ${FILE_NAME} ${image}
|
||||||
|
|
||||||
# NOTE: Here removes the following repo parts from each image
|
# NOTE: Here removes the following repo parts from each image
|
||||||
# so that these parts will be replaced with Kubespray.
|
# so that these parts will be replaced with Kubespray.
|
||||||
# - kube_image_repo: "registry.k8s.io"
|
# - kube_image_repo: "registry.k8s.io"
|
||||||
# - gcr_image_repo: "gcr.io"
|
# - gcr_image_repo: "gcr.io"
|
||||||
|
# - ghcr_image_repo: "ghcr.io"
|
||||||
# - docker_image_repo: "docker.io"
|
# - docker_image_repo: "docker.io"
|
||||||
# - quay_image_repo: "quay.io"
|
# - quay_image_repo: "quay.io"
|
||||||
FIRST_PART=$(echo ${image} | awk -F"/" '{print $1}')
|
FIRST_PART=$(echo ${image} | awk -F"/" '{print $1}')
|
||||||
if [ "${FIRST_PART}" = "registry.k8s.io" ] ||
|
if [ "${FIRST_PART}" = "registry.k8s.io" ] ||
|
||||||
[ "${FIRST_PART}" = "gcr.io" ] ||
|
[ "${FIRST_PART}" = "gcr.io" ] ||
|
||||||
|
[ "${FIRST_PART}" = "ghcr.io" ] ||
|
||||||
[ "${FIRST_PART}" = "docker.io" ] ||
|
[ "${FIRST_PART}" = "docker.io" ] ||
|
||||||
[ "${FIRST_PART}" = "quay.io" ] ||
|
[ "${FIRST_PART}" = "quay.io" ] ||
|
||||||
[ "${FIRST_PART}" = "${PRIVATE_REGISTRY}" ]; then
|
[ "${FIRST_PART}" = "${PRIVATE_REGISTRY}" ]; then
|
||||||
image=$(echo ${image} | sed s@"${FIRST_PART}/"@@)
|
image=$(echo ${image} | sed s@"${FIRST_PART}/"@@ | sed -E 's/\@.*/\n/g')
|
||||||
fi
|
fi
|
||||||
echo "${FILE_NAME} ${image}" >> ${IMAGE_LIST}
|
echo "${FILE_NAME} ${image}" >> ${IMAGE_LIST}
|
||||||
done
|
done < "${IMAGES}"
|
||||||
|
|
||||||
cd ..
|
cd ..
|
||||||
sudo chown ${USER} ${IMAGE_DIR}/*
|
sudo chown ${USER} ${IMAGE_DIR}/*
|
||||||
@@ -72,6 +87,16 @@ function create_container_image_tar() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function register_container_images() {
|
function register_container_images() {
|
||||||
|
create_registry=false
|
||||||
|
REGISTRY_PORT=${REGISTRY_PORT:-"5000"}
|
||||||
|
|
||||||
|
if [ -z "${DESTINATION_REGISTRY}" ]; then
|
||||||
|
echo "DESTINATION_REGISTRY not set, will create local registry"
|
||||||
|
create_registry=true
|
||||||
|
DESTINATION_REGISTRY="$(hostname):${REGISTRY_PORT}"
|
||||||
|
fi
|
||||||
|
echo "Images will be pushed to ${DESTINATION_REGISTRY}"
|
||||||
|
|
||||||
if [ ! -f ${IMAGE_TAR_FILE} ]; then
|
if [ ! -f ${IMAGE_TAR_FILE} ]; then
|
||||||
echo "${IMAGE_TAR_FILE} should exist."
|
echo "${IMAGE_TAR_FILE} should exist."
|
||||||
exit 1
|
exit 1
|
||||||
@@ -81,39 +106,47 @@ function register_container_images() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# To avoid "http: server gave http response to https client" error.
|
# To avoid "http: server gave http response to https client" error.
|
||||||
LOCALHOST_NAME=$(hostname)
|
|
||||||
if [ -d /etc/docker/ ]; then
|
if [ -d /etc/docker/ ]; then
|
||||||
set -e
|
set -e
|
||||||
# Ubuntu18.04, RHEL7/CentOS7
|
# Ubuntu18.04, RHEL7/CentOS7
|
||||||
cp ${CURRENT_DIR}/docker-daemon.json ${TEMP_DIR}/docker-daemon.json
|
cp ${CURRENT_DIR}/docker-daemon.json ${TEMP_DIR}/docker-daemon.json
|
||||||
sed -i s@"HOSTNAME"@"${LOCALHOST_NAME}"@ ${TEMP_DIR}/docker-daemon.json
|
sed -i s@"HOSTNAME"@"$(hostname)"@ ${TEMP_DIR}/docker-daemon.json
|
||||||
sudo cp ${TEMP_DIR}/docker-daemon.json /etc/docker/daemon.json
|
sudo cp ${TEMP_DIR}/docker-daemon.json /etc/docker/daemon.json
|
||||||
elif [ -d /etc/containers/ ]; then
|
elif [ -d /etc/containers/ ]; then
|
||||||
set -e
|
set -e
|
||||||
# RHEL8/CentOS8
|
# RHEL8/CentOS8
|
||||||
cp ${CURRENT_DIR}/registries.conf ${TEMP_DIR}/registries.conf
|
cp ${CURRENT_DIR}/registries.conf ${TEMP_DIR}/registries.conf
|
||||||
sed -i s@"HOSTNAME"@"${LOCALHOST_NAME}"@ ${TEMP_DIR}/registries.conf
|
sed -i s@"HOSTNAME"@"$(hostname)"@ ${TEMP_DIR}/registries.conf
|
||||||
sudo cp ${TEMP_DIR}/registries.conf /etc/containers/registries.conf
|
sudo cp ${TEMP_DIR}/registries.conf /etc/containers/registries.conf
|
||||||
else
|
else
|
||||||
echo "docker package(docker-ce, etc.) should be installed"
|
echo "runtime package(docker-ce, podman, nerctl, etc.) should be installed"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
tar -zxvf ${IMAGE_TAR_FILE}
|
tar -zxvf ${IMAGE_TAR_FILE}
|
||||||
sudo docker load -i ${IMAGE_DIR}/registry-latest.tar
|
|
||||||
set +e
|
if [ "${create_registry}" ]; then
|
||||||
sudo docker container inspect registry >/dev/null 2>&1
|
sudo ${runtime} load -i ${IMAGE_DIR}/registry-latest.tar
|
||||||
if [ $? -ne 0 ]; then
|
set +e
|
||||||
sudo docker run --restart=always -d -p 5000:5000 --name registry registry:latest
|
|
||||||
|
sudo ${runtime} container inspect registry >/dev/null 2>&1
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
sudo ${runtime} run --restart=always -d -p "${REGISTRY_PORT}":"${REGISTRY_PORT}" --name registry registry:latest
|
||||||
|
fi
|
||||||
|
set -e
|
||||||
fi
|
fi
|
||||||
set -e
|
|
||||||
|
|
||||||
while read -r line; do
|
while read -r line; do
|
||||||
file_name=$(echo ${line} | awk '{print $1}')
|
file_name=$(echo ${line} | awk '{print $1}')
|
||||||
raw_image=$(echo ${line} | awk '{print $2}')
|
raw_image=$(echo ${line} | awk '{print $2}')
|
||||||
new_image="${LOCALHOST_NAME}:5000/${raw_image}"
|
new_image="${DESTINATION_REGISTRY}/${raw_image}"
|
||||||
org_image=$(sudo docker load -i ${IMAGE_DIR}/${file_name} | head -n1 | awk '{print $3}')
|
load_image=$(sudo ${runtime} load -i ${IMAGE_DIR}/${file_name} | head -n1)
|
||||||
image_id=$(sudo docker image inspect ${org_image} | grep "\"Id\":" | awk -F: '{print $3}'| sed s/'\",'//)
|
org_image=$(echo "${load_image}" | awk '{print $3}')
|
||||||
|
# special case for tags containing the digest when using docker or podman as the container runtime
|
||||||
|
if [ "${org_image}" == "ID:" ]; then
|
||||||
|
org_image=$(echo "${load_image}" | awk '{print $4}')
|
||||||
|
fi
|
||||||
|
image_id=$(sudo ${runtime} image inspect ${org_image} | grep "\"Id\":" | awk -F: '{print $3}'| sed s/'\",'//)
|
||||||
if [ -z "${file_name}" ]; then
|
if [ -z "${file_name}" ]; then
|
||||||
echo "Failed to get file_name for line ${line}"
|
echo "Failed to get file_name for line ${line}"
|
||||||
exit 1
|
exit 1
|
||||||
@@ -130,32 +163,48 @@ function register_container_images() {
|
|||||||
echo "Failed to get image_id for file ${file_name}"
|
echo "Failed to get image_id for file ${file_name}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
sudo docker load -i ${IMAGE_DIR}/${file_name}
|
sudo ${runtime} load -i ${IMAGE_DIR}/${file_name}
|
||||||
sudo docker tag ${image_id} ${new_image}
|
sudo ${runtime} tag ${image_id} ${new_image}
|
||||||
sudo docker push ${new_image}
|
sudo ${runtime} push ${new_image}
|
||||||
done <<< "$(cat ${IMAGE_LIST})"
|
done <<< "$(cat ${IMAGE_LIST})"
|
||||||
|
|
||||||
echo "Succeeded to register container images to local registry."
|
echo "Succeeded to register container images to local registry."
|
||||||
echo "Please specify ${LOCALHOST_NAME}:5000 for the following options in your inventry:"
|
echo "Please specify \"${DESTINATION_REGISTRY}\" for the following options in your inventry:"
|
||||||
echo "- kube_image_repo"
|
echo "- kube_image_repo"
|
||||||
echo "- gcr_image_repo"
|
echo "- gcr_image_repo"
|
||||||
echo "- docker_image_repo"
|
echo "- docker_image_repo"
|
||||||
echo "- quay_image_repo"
|
echo "- quay_image_repo"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# get runtime command
|
||||||
|
if command -v nerdctl 1>/dev/null 2>&1; then
|
||||||
|
runtime="nerdctl"
|
||||||
|
elif command -v podman 1>/dev/null 2>&1; then
|
||||||
|
runtime="podman"
|
||||||
|
elif command -v docker 1>/dev/null 2>&1; then
|
||||||
|
runtime="docker"
|
||||||
|
else
|
||||||
|
echo "No supported container runtime found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
if [ "${OPTION}" == "create" ]; then
|
if [ "${OPTION}" == "create" ]; then
|
||||||
create_container_image_tar
|
create_container_image_tar
|
||||||
elif [ "${OPTION}" == "register" ]; then
|
elif [ "${OPTION}" == "register" ]; then
|
||||||
register_container_images
|
register_container_images
|
||||||
else
|
else
|
||||||
echo "This script has two features:"
|
echo "This script has two features:"
|
||||||
echo "(1) Get container images from an environment which is deployed online."
|
echo "(1) Get container images from an environment which is deployed online, or set IMAGES_FROM_FILE"
|
||||||
|
echo " environment variable to get images from a file (e.g. temp/images.list after running the"
|
||||||
|
echo " ./generate_list.sh script)."
|
||||||
echo "(2) Deploy local container registry and register the container images to the registry."
|
echo "(2) Deploy local container registry and register the container images to the registry."
|
||||||
echo ""
|
echo ""
|
||||||
echo "Step(1) should be done online site as a preparation, then we bring"
|
echo "Step(1) should be done online site as a preparation, then we bring"
|
||||||
echo "the gotten images to the target offline environment. if images are from"
|
echo "the gotten images to the target offline environment. if images are from"
|
||||||
echo "a private registry, you need to set PRIVATE_REGISTRY environment variable."
|
echo "a private registry, you need to set PRIVATE_REGISTRY environment variable."
|
||||||
echo "Then we will run step(2) for registering the images to local registry."
|
echo "Then we will run step(2) for registering the images to local registry, or to an existing"
|
||||||
|
echo "registry set by the DESTINATION_REGISTRY environment variable. By default, the local registry"
|
||||||
|
echo "will run on port 5000. This can be changed with the REGISTRY_PORT environment variable"
|
||||||
echo ""
|
echo ""
|
||||||
echo "${IMAGE_TAR_FILE} is created to contain your container images."
|
echo "${IMAGE_TAR_FILE} is created to contain your container images."
|
||||||
echo "Please keep this file and bring it to your offline environment."
|
echo "Please keep this file and bring it to your offline environment."
|
||||||
|
|||||||
@@ -17,7 +17,12 @@ rm -rf "${OFFLINE_FILES_DIR}"
|
|||||||
rm "${OFFLINE_FILES_ARCHIVE}"
|
rm "${OFFLINE_FILES_ARCHIVE}"
|
||||||
mkdir "${OFFLINE_FILES_DIR}"
|
mkdir "${OFFLINE_FILES_DIR}"
|
||||||
|
|
||||||
wget -x -P "${OFFLINE_FILES_DIR}" -i "${FILES_LIST}"
|
while read -r url; do
|
||||||
|
if ! wget -x -P "${OFFLINE_FILES_DIR}" "${url}"; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done < "${FILES_LIST}"
|
||||||
|
|
||||||
tar -czvf "${OFFLINE_FILES_ARCHIVE}" "${OFFLINE_FILES_DIR_NAME}"
|
tar -czvf "${OFFLINE_FILES_ARCHIVE}" "${OFFLINE_FILES_DIR_NAME}"
|
||||||
|
|
||||||
[ -n "$NO_HTTP_SERVER" ] && echo "skip to run nginx" && exit 0
|
[ -n "$NO_HTTP_SERVER" ] && echo "skip to run nginx" && exit 0
|
||||||
@@ -38,7 +43,7 @@ sudo "${runtime}" container inspect nginx >/dev/null 2>&1
|
|||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
sudo "${runtime}" run \
|
sudo "${runtime}" run \
|
||||||
--restart=always -d -p ${NGINX_PORT}:80 \
|
--restart=always -d -p ${NGINX_PORT}:80 \
|
||||||
--volume "${OFFLINE_FILES_DIR}:/usr/share/nginx/html/download" \
|
--volume "${OFFLINE_FILES_DIR}":/usr/share/nginx/html/download \
|
||||||
--volume "$(pwd)"/nginx.conf:/etc/nginx/nginx.conf \
|
--volume "${CURRENT_DIR}"/nginx.conf:/etc/nginx/nginx.conf \
|
||||||
--name nginx nginx:alpine
|
--name nginx nginx:alpine
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
---
|
---
|
||||||
- hosts: all
|
- name: Disable firewalld/ufw
|
||||||
|
hosts: all
|
||||||
roles:
|
roles:
|
||||||
- { role: prepare }
|
- { role: prepare }
|
||||||
|
|||||||
@@ -1,5 +1,8 @@
|
|||||||
---
|
---
|
||||||
- block:
|
- name: Disable firewalld and ufw
|
||||||
|
when:
|
||||||
|
- disable_service_firewall is defined and disable_service_firewall
|
||||||
|
block:
|
||||||
- name: List services
|
- name: List services
|
||||||
service_facts:
|
service_facts:
|
||||||
|
|
||||||
@@ -9,7 +12,7 @@
|
|||||||
state: stopped
|
state: stopped
|
||||||
enabled: no
|
enabled: no
|
||||||
when:
|
when:
|
||||||
"'firewalld.service' in services"
|
"'firewalld.service' in services and services['firewalld.service'].status != 'not-found'"
|
||||||
|
|
||||||
- name: Disable service ufw
|
- name: Disable service ufw
|
||||||
systemd:
|
systemd:
|
||||||
@@ -17,7 +20,4 @@
|
|||||||
state: stopped
|
state: stopped
|
||||||
enabled: no
|
enabled: no
|
||||||
when:
|
when:
|
||||||
"'ufw.service' in services"
|
"'ufw.service' in services and services['ufw.service'].status != 'not-found'"
|
||||||
|
|
||||||
when:
|
|
||||||
- disable_service_firewall is defined and disable_service_firewall
|
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
# See the OWNERS docs at https://go.k8s.io/owners
|
# See the OWNERS docs at https://go.k8s.io/owners
|
||||||
|
|
||||||
approvers:
|
approvers:
|
||||||
- holmsten
|
|
||||||
- miouge1
|
- miouge1
|
||||||
|
|||||||
@@ -50,70 +50,32 @@ Example (this one assumes you are using Ubuntu)
|
|||||||
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=ubuntu -b --become-user=root --flush-cache
|
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=ubuntu -b --become-user=root --flush-cache
|
||||||
```
|
```
|
||||||
|
|
||||||
***Using other distrib than Ubuntu***
|
## Using other distrib than Ubuntu***
|
||||||
If you want to use another distribution than Ubuntu 18.04 (Bionic) LTS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.
|
|
||||||
|
|
||||||
For example, to use:
|
To leverage a Linux distribution other than Ubuntu 18.04 (Bionic) LTS for your Terraform configurations, you can adjust the AMI search filters within the 'data "aws_ami" "distro"' block by utilizing variables in your `terraform.tfvars` file. This approach ensures a flexible configuration that adapts to various Linux distributions without directly modifying the core Terraform files.
|
||||||
|
|
||||||
- Debian Jessie, replace 'data "aws_ami" "distro"' in variables.tf with
|
### Example Usages
|
||||||
|
|
||||||
```ini
|
- **Debian Jessie**: To configure the usage of Debian Jessie, insert the subsequent lines into your `terraform.tfvars`:
|
||||||
data "aws_ami" "distro" {
|
|
||||||
most_recent = true
|
|
||||||
|
|
||||||
filter {
|
```hcl
|
||||||
name = "name"
|
ami_name_pattern = "debian-jessie-amd64-hvm-*"
|
||||||
values = ["debian-jessie-amd64-hvm-*"]
|
ami_owners = ["379101102735"]
|
||||||
}
|
```
|
||||||
|
|
||||||
filter {
|
- **Ubuntu 16.04**: To utilize Ubuntu 16.04 instead, apply the following configuration in your `terraform.tfvars`:
|
||||||
name = "virtualization-type"
|
|
||||||
values = ["hvm"]
|
|
||||||
}
|
|
||||||
|
|
||||||
owners = ["379101102735"]
|
```hcl
|
||||||
}
|
ami_name_pattern = "ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-*"
|
||||||
```
|
ami_owners = ["099720109477"]
|
||||||
|
```
|
||||||
|
|
||||||
- Ubuntu 16.04, replace 'data "aws_ami" "distro"' in variables.tf with
|
- **Centos 7**: For employing Centos 7, incorporate these lines into your `terraform.tfvars`:
|
||||||
|
|
||||||
```ini
|
```hcl
|
||||||
data "aws_ami" "distro" {
|
ami_name_pattern = "dcos-centos7-*"
|
||||||
most_recent = true
|
ami_owners = ["688023202711"]
|
||||||
|
```
|
||||||
filter {
|
|
||||||
name = "name"
|
|
||||||
values = ["ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-*"]
|
|
||||||
}
|
|
||||||
|
|
||||||
filter {
|
|
||||||
name = "virtualization-type"
|
|
||||||
values = ["hvm"]
|
|
||||||
}
|
|
||||||
|
|
||||||
owners = ["099720109477"]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
- Centos 7, replace 'data "aws_ami" "distro"' in variables.tf with
|
|
||||||
|
|
||||||
```ini
|
|
||||||
data "aws_ami" "distro" {
|
|
||||||
most_recent = true
|
|
||||||
|
|
||||||
filter {
|
|
||||||
name = "name"
|
|
||||||
values = ["dcos-centos7-*"]
|
|
||||||
}
|
|
||||||
|
|
||||||
filter {
|
|
||||||
name = "virtualization-type"
|
|
||||||
values = ["hvm"]
|
|
||||||
}
|
|
||||||
|
|
||||||
owners = ["688023202711"]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Connecting to Kubernetes
|
## Connecting to Kubernetes
|
||||||
|
|
||||||
|
|||||||
@@ -20,20 +20,38 @@ variable "aws_cluster_name" {
|
|||||||
description = "Name of AWS Cluster"
|
description = "Name of AWS Cluster"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "ami_name_pattern" {
|
||||||
|
description = "The name pattern to use for AMI lookup"
|
||||||
|
type = string
|
||||||
|
default = "debian-10-amd64-*"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "ami_virtualization_type" {
|
||||||
|
description = "The virtualization type to use for AMI lookup"
|
||||||
|
type = string
|
||||||
|
default = "hvm"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "ami_owners" {
|
||||||
|
description = "The owners to use for AMI lookup"
|
||||||
|
type = list(string)
|
||||||
|
default = ["136693071363"]
|
||||||
|
}
|
||||||
|
|
||||||
data "aws_ami" "distro" {
|
data "aws_ami" "distro" {
|
||||||
most_recent = true
|
most_recent = true
|
||||||
|
|
||||||
filter {
|
filter {
|
||||||
name = "name"
|
name = "name"
|
||||||
values = ["debian-10-amd64-*"]
|
values = [var.ami_name_pattern]
|
||||||
}
|
}
|
||||||
|
|
||||||
filter {
|
filter {
|
||||||
name = "virtualization-type"
|
name = "virtualization-type"
|
||||||
values = ["hvm"]
|
values = [var.ami_virtualization_type]
|
||||||
}
|
}
|
||||||
|
|
||||||
owners = ["136693071363"] # Debian-10
|
owners = var.ami_owners
|
||||||
}
|
}
|
||||||
|
|
||||||
//AWS VPC Variables
|
//AWS VPC Variables
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ now six total etcd replicas.
|
|||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
|
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
|
||||||
- [Install Ansible dependencies](/docs/ansible.md#installing-ansible)
|
- [Install Ansible dependencies](/docs/ansible/ansible.md#installing-ansible)
|
||||||
- Account with Equinix Metal
|
- Account with Equinix Metal
|
||||||
- An SSH key pair
|
- An SSH key pair
|
||||||
|
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ terraform {
|
|||||||
required_providers {
|
required_providers {
|
||||||
equinix = {
|
equinix = {
|
||||||
source = "equinix/equinix"
|
source = "equinix/equinix"
|
||||||
version = "~> 1.14"
|
version = "1.24.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ ssh_public_keys = [
|
|||||||
machines = {
|
machines = {
|
||||||
"master-0" : {
|
"master-0" : {
|
||||||
"node_type" : "master",
|
"node_type" : "master",
|
||||||
"size" : "Medium",
|
"size" : "standard.medium",
|
||||||
"boot_disk" : {
|
"boot_disk" : {
|
||||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||||
"root_partition_size" : 50,
|
"root_partition_size" : 50,
|
||||||
@@ -22,7 +22,7 @@ machines = {
|
|||||||
},
|
},
|
||||||
"worker-0" : {
|
"worker-0" : {
|
||||||
"node_type" : "worker",
|
"node_type" : "worker",
|
||||||
"size" : "Large",
|
"size" : "standard.large",
|
||||||
"boot_disk" : {
|
"boot_disk" : {
|
||||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||||
"root_partition_size" : 50,
|
"root_partition_size" : 50,
|
||||||
@@ -32,7 +32,7 @@ machines = {
|
|||||||
},
|
},
|
||||||
"worker-1" : {
|
"worker-1" : {
|
||||||
"node_type" : "worker",
|
"node_type" : "worker",
|
||||||
"size" : "Large",
|
"size" : "standard.large",
|
||||||
"boot_disk" : {
|
"boot_disk" : {
|
||||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||||
"root_partition_size" : 50,
|
"root_partition_size" : 50,
|
||||||
@@ -42,7 +42,7 @@ machines = {
|
|||||||
},
|
},
|
||||||
"worker-2" : {
|
"worker-2" : {
|
||||||
"node_type" : "worker",
|
"node_type" : "worker",
|
||||||
"size" : "Large",
|
"size" : "standard.large",
|
||||||
"boot_disk" : {
|
"boot_disk" : {
|
||||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||||
"root_partition_size" : 50,
|
"root_partition_size" : 50,
|
||||||
|
|||||||
@@ -1,29 +1,25 @@
|
|||||||
data "exoscale_compute_template" "os_image" {
|
data "exoscale_template" "os_image" {
|
||||||
for_each = var.machines
|
for_each = var.machines
|
||||||
|
|
||||||
zone = var.zone
|
zone = var.zone
|
||||||
name = each.value.boot_disk.image_name
|
name = each.value.boot_disk.image_name
|
||||||
}
|
}
|
||||||
|
|
||||||
data "exoscale_compute" "master_nodes" {
|
data "exoscale_compute_instance" "master_nodes" {
|
||||||
for_each = exoscale_compute.master
|
for_each = exoscale_compute_instance.master
|
||||||
|
|
||||||
id = each.value.id
|
id = each.value.id
|
||||||
|
zone = var.zone
|
||||||
# Since private IP address is not assigned until the nics are created we need this
|
|
||||||
depends_on = [exoscale_nic.master_private_network_nic]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
data "exoscale_compute" "worker_nodes" {
|
data "exoscale_compute_instance" "worker_nodes" {
|
||||||
for_each = exoscale_compute.worker
|
for_each = exoscale_compute_instance.worker
|
||||||
|
|
||||||
id = each.value.id
|
id = each.value.id
|
||||||
|
zone = var.zone
|
||||||
# Since private IP address is not assigned until the nics are created we need this
|
|
||||||
depends_on = [exoscale_nic.worker_private_network_nic]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "exoscale_network" "private_network" {
|
resource "exoscale_private_network" "private_network" {
|
||||||
zone = var.zone
|
zone = var.zone
|
||||||
name = "${var.prefix}-network"
|
name = "${var.prefix}-network"
|
||||||
|
|
||||||
@@ -34,25 +30,29 @@ resource "exoscale_network" "private_network" {
|
|||||||
netmask = cidrnetmask(var.private_network_cidr)
|
netmask = cidrnetmask(var.private_network_cidr)
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "exoscale_compute" "master" {
|
resource "exoscale_compute_instance" "master" {
|
||||||
for_each = {
|
for_each = {
|
||||||
for name, machine in var.machines :
|
for name, machine in var.machines :
|
||||||
name => machine
|
name => machine
|
||||||
if machine.node_type == "master"
|
if machine.node_type == "master"
|
||||||
}
|
}
|
||||||
|
|
||||||
display_name = "${var.prefix}-${each.key}"
|
name = "${var.prefix}-${each.key}"
|
||||||
template_id = data.exoscale_compute_template.os_image[each.key].id
|
template_id = data.exoscale_template.os_image[each.key].id
|
||||||
size = each.value.size
|
type = each.value.size
|
||||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||||
state = "Running"
|
state = "Running"
|
||||||
zone = var.zone
|
zone = var.zone
|
||||||
security_groups = [exoscale_security_group.master_sg.name]
|
security_group_ids = [exoscale_security_group.master_sg.id]
|
||||||
|
network_interface {
|
||||||
|
network_id = exoscale_private_network.private_network.id
|
||||||
|
}
|
||||||
|
elastic_ip_ids = [exoscale_elastic_ip.control_plane_lb.id]
|
||||||
|
|
||||||
user_data = templatefile(
|
user_data = templatefile(
|
||||||
"${path.module}/templates/cloud-init.tmpl",
|
"${path.module}/templates/cloud-init.tmpl",
|
||||||
{
|
{
|
||||||
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address
|
||||||
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
||||||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||||
root_partition_size = each.value.boot_disk.root_partition_size
|
root_partition_size = each.value.boot_disk.root_partition_size
|
||||||
@@ -62,25 +62,29 @@ resource "exoscale_compute" "master" {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "exoscale_compute" "worker" {
|
resource "exoscale_compute_instance" "worker" {
|
||||||
for_each = {
|
for_each = {
|
||||||
for name, machine in var.machines :
|
for name, machine in var.machines :
|
||||||
name => machine
|
name => machine
|
||||||
if machine.node_type == "worker"
|
if machine.node_type == "worker"
|
||||||
}
|
}
|
||||||
|
|
||||||
display_name = "${var.prefix}-${each.key}"
|
name = "${var.prefix}-${each.key}"
|
||||||
template_id = data.exoscale_compute_template.os_image[each.key].id
|
template_id = data.exoscale_template.os_image[each.key].id
|
||||||
size = each.value.size
|
type = each.value.size
|
||||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||||
state = "Running"
|
state = "Running"
|
||||||
zone = var.zone
|
zone = var.zone
|
||||||
security_groups = [exoscale_security_group.worker_sg.name]
|
security_group_ids = [exoscale_security_group.worker_sg.id]
|
||||||
|
network_interface {
|
||||||
|
network_id = exoscale_private_network.private_network.id
|
||||||
|
}
|
||||||
|
elastic_ip_ids = [exoscale_elastic_ip.ingress_controller_lb.id]
|
||||||
|
|
||||||
user_data = templatefile(
|
user_data = templatefile(
|
||||||
"${path.module}/templates/cloud-init.tmpl",
|
"${path.module}/templates/cloud-init.tmpl",
|
||||||
{
|
{
|
||||||
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address
|
||||||
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
||||||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||||
root_partition_size = each.value.boot_disk.root_partition_size
|
root_partition_size = each.value.boot_disk.root_partition_size
|
||||||
@@ -90,41 +94,33 @@ resource "exoscale_compute" "worker" {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "exoscale_nic" "master_private_network_nic" {
|
|
||||||
for_each = exoscale_compute.master
|
|
||||||
|
|
||||||
compute_id = each.value.id
|
|
||||||
network_id = exoscale_network.private_network.id
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "exoscale_nic" "worker_private_network_nic" {
|
|
||||||
for_each = exoscale_compute.worker
|
|
||||||
|
|
||||||
compute_id = each.value.id
|
|
||||||
network_id = exoscale_network.private_network.id
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "exoscale_security_group" "master_sg" {
|
resource "exoscale_security_group" "master_sg" {
|
||||||
name = "${var.prefix}-master-sg"
|
name = "${var.prefix}-master-sg"
|
||||||
description = "Security group for Kubernetes masters"
|
description = "Security group for Kubernetes masters"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "exoscale_security_group_rules" "master_sg_rules" {
|
resource "exoscale_security_group_rule" "master_sg_rule_ssh" {
|
||||||
security_group_id = exoscale_security_group.master_sg.id
|
security_group_id = exoscale_security_group.master_sg.id
|
||||||
|
|
||||||
|
for_each = toset(var.ssh_whitelist)
|
||||||
# SSH
|
# SSH
|
||||||
ingress {
|
type = "INGRESS"
|
||||||
protocol = "TCP"
|
start_port = 22
|
||||||
cidr_list = var.ssh_whitelist
|
end_port = 22
|
||||||
ports = ["22"]
|
protocol = "TCP"
|
||||||
}
|
cidr = each.value
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "exoscale_security_group_rule" "master_sg_rule_k8s_api" {
|
||||||
|
security_group_id = exoscale_security_group.master_sg.id
|
||||||
|
|
||||||
|
for_each = toset(var.api_server_whitelist)
|
||||||
# Kubernetes API
|
# Kubernetes API
|
||||||
ingress {
|
type = "INGRESS"
|
||||||
protocol = "TCP"
|
start_port = 6443
|
||||||
cidr_list = var.api_server_whitelist
|
end_port = 6443
|
||||||
ports = ["6443"]
|
protocol = "TCP"
|
||||||
}
|
cidr = each.value
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "exoscale_security_group" "worker_sg" {
|
resource "exoscale_security_group" "worker_sg" {
|
||||||
@@ -132,62 +128,64 @@ resource "exoscale_security_group" "worker_sg" {
|
|||||||
description = "security group for kubernetes worker nodes"
|
description = "security group for kubernetes worker nodes"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "exoscale_security_group_rules" "worker_sg_rules" {
|
resource "exoscale_security_group_rule" "worker_sg_rule_ssh" {
|
||||||
security_group_id = exoscale_security_group.worker_sg.id
|
security_group_id = exoscale_security_group.worker_sg.id
|
||||||
|
|
||||||
# SSH
|
# SSH
|
||||||
ingress {
|
for_each = toset(var.ssh_whitelist)
|
||||||
protocol = "TCP"
|
type = "INGRESS"
|
||||||
cidr_list = var.ssh_whitelist
|
start_port = 22
|
||||||
ports = ["22"]
|
end_port = 22
|
||||||
}
|
protocol = "TCP"
|
||||||
|
cidr = each.value
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "exoscale_security_group_rule" "worker_sg_rule_http" {
|
||||||
|
security_group_id = exoscale_security_group.worker_sg.id
|
||||||
|
|
||||||
# HTTP(S)
|
# HTTP(S)
|
||||||
ingress {
|
for_each = toset(["80", "443"])
|
||||||
protocol = "TCP"
|
type = "INGRESS"
|
||||||
cidr_list = ["0.0.0.0/0"]
|
start_port = each.value
|
||||||
ports = ["80", "443"]
|
end_port = each.value
|
||||||
}
|
protocol = "TCP"
|
||||||
|
cidr = "0.0.0.0/0"
|
||||||
|
}
|
||||||
|
|
||||||
# Kubernetes Nodeport
|
|
||||||
ingress {
|
resource "exoscale_security_group_rule" "worker_sg_rule_nodeport" {
|
||||||
protocol = "TCP"
|
security_group_id = exoscale_security_group.worker_sg.id
|
||||||
cidr_list = var.nodeport_whitelist
|
|
||||||
ports = ["30000-32767"]
|
# HTTP(S)
|
||||||
|
for_each = toset(var.nodeport_whitelist)
|
||||||
|
type = "INGRESS"
|
||||||
|
start_port = 30000
|
||||||
|
end_port = 32767
|
||||||
|
protocol = "TCP"
|
||||||
|
cidr = each.value
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "exoscale_elastic_ip" "ingress_controller_lb" {
|
||||||
|
zone = var.zone
|
||||||
|
healthcheck {
|
||||||
|
mode = "http"
|
||||||
|
port = 80
|
||||||
|
uri = "/healthz"
|
||||||
|
interval = 10
|
||||||
|
timeout = 2
|
||||||
|
strikes_ok = 2
|
||||||
|
strikes_fail = 3
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "exoscale_ipaddress" "ingress_controller_lb" {
|
resource "exoscale_elastic_ip" "control_plane_lb" {
|
||||||
zone = var.zone
|
zone = var.zone
|
||||||
healthcheck_mode = "http"
|
healthcheck {
|
||||||
healthcheck_port = 80
|
mode = "tcp"
|
||||||
healthcheck_path = "/healthz"
|
port = 6443
|
||||||
healthcheck_interval = 10
|
interval = 10
|
||||||
healthcheck_timeout = 2
|
timeout = 2
|
||||||
healthcheck_strikes_ok = 2
|
strikes_ok = 2
|
||||||
healthcheck_strikes_fail = 3
|
strikes_fail = 3
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "exoscale_secondary_ipaddress" "ingress_controller_lb" {
|
|
||||||
for_each = exoscale_compute.worker
|
|
||||||
|
|
||||||
compute_id = each.value.id
|
|
||||||
ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "exoscale_ipaddress" "control_plane_lb" {
|
|
||||||
zone = var.zone
|
|
||||||
healthcheck_mode = "tcp"
|
|
||||||
healthcheck_port = 6443
|
|
||||||
healthcheck_interval = 10
|
|
||||||
healthcheck_timeout = 2
|
|
||||||
healthcheck_strikes_ok = 2
|
|
||||||
healthcheck_strikes_fail = 3
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "exoscale_secondary_ipaddress" "control_plane_lb" {
|
|
||||||
for_each = exoscale_compute.master
|
|
||||||
|
|
||||||
compute_id = each.value.id
|
|
||||||
ip_address = exoscale_ipaddress.control_plane_lb.ip_address
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,19 +1,19 @@
|
|||||||
output "master_ip_addresses" {
|
output "master_ip_addresses" {
|
||||||
value = {
|
value = {
|
||||||
for key, instance in exoscale_compute.master :
|
for key, instance in exoscale_compute_instance.master :
|
||||||
instance.name => {
|
instance.name => {
|
||||||
"private_ip" = contains(keys(data.exoscale_compute.master_nodes), key) ? data.exoscale_compute.master_nodes[key].private_network_ip_addresses[0] : ""
|
"private_ip" = contains(keys(data.exoscale_compute_instance.master_nodes), key) ? data.exoscale_compute_instance.master_nodes[key].private_network_ip_addresses[0] : ""
|
||||||
"public_ip" = exoscale_compute.master[key].ip_address
|
"public_ip" = exoscale_compute_instance.master[key].ip_address
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
output "worker_ip_addresses" {
|
output "worker_ip_addresses" {
|
||||||
value = {
|
value = {
|
||||||
for key, instance in exoscale_compute.worker :
|
for key, instance in exoscale_compute_instance.worker :
|
||||||
instance.name => {
|
instance.name => {
|
||||||
"private_ip" = contains(keys(data.exoscale_compute.worker_nodes), key) ? data.exoscale_compute.worker_nodes[key].private_network_ip_addresses[0] : ""
|
"private_ip" = contains(keys(data.exoscale_compute_instance.worker_nodes), key) ? data.exoscale_compute_instance.worker_nodes[key].private_network_ip_addresses[0] : ""
|
||||||
"public_ip" = exoscale_compute.worker[key].ip_address
|
"public_ip" = exoscale_compute_instance.worker[key].ip_address
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -23,9 +23,9 @@ output "cluster_private_network_cidr" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
output "ingress_controller_lb_ip_address" {
|
output "ingress_controller_lb_ip_address" {
|
||||||
value = exoscale_ipaddress.ingress_controller_lb.ip_address
|
value = exoscale_elastic_ip.ingress_controller_lb.ip_address
|
||||||
}
|
}
|
||||||
|
|
||||||
output "control_plane_lb_ip_address" {
|
output "control_plane_lb_ip_address" {
|
||||||
value = exoscale_ipaddress.control_plane_lb.ip_address
|
value = exoscale_elastic_ip.control_plane_lb.ip_address
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
terraform {
|
terraform {
|
||||||
required_providers {
|
required_providers {
|
||||||
exoscale = {
|
exoscale = {
|
||||||
source = "exoscale/exoscale"
|
source = "exoscale/exoscale"
|
||||||
version = ">= 0.21"
|
version = ">= 0.21"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
5
contrib/terraform/nifcloud/.gitignore
vendored
Normal file
5
contrib/terraform/nifcloud/.gitignore
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
*.tfstate*
|
||||||
|
.terraform.lock.hcl
|
||||||
|
.terraform
|
||||||
|
|
||||||
|
sample-inventory/inventory.ini
|
||||||
138
contrib/terraform/nifcloud/README.md
Normal file
138
contrib/terraform/nifcloud/README.md
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
# Kubernetes on NIFCLOUD with Terraform
|
||||||
|
|
||||||
|
Provision a Kubernetes cluster on [NIFCLOUD](https://pfs.nifcloud.com/) using Terraform and Kubespray
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The setup looks like following
|
||||||
|
|
||||||
|
```text
|
||||||
|
Kubernetes cluster
|
||||||
|
+----------------------------+
|
||||||
|
+---------------+ | +--------------------+ |
|
||||||
|
| | | | +--------------------+ |
|
||||||
|
| API server LB +---------> | | | |
|
||||||
|
| | | | | Control Plane/etcd | |
|
||||||
|
+---------------+ | | | node(s) | |
|
||||||
|
| +-+ | |
|
||||||
|
| +--------------------+ |
|
||||||
|
| ^ |
|
||||||
|
| | |
|
||||||
|
| v |
|
||||||
|
| +--------------------+ |
|
||||||
|
| | +--------------------+ |
|
||||||
|
| | | | |
|
||||||
|
| | | Worker | |
|
||||||
|
| | | node(s) | |
|
||||||
|
| +-+ | |
|
||||||
|
| +--------------------+ |
|
||||||
|
+----------------------------+
|
||||||
|
```
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
* Terraform 1.3.7
|
||||||
|
|
||||||
|
## Quickstart
|
||||||
|
|
||||||
|
### Export Variables
|
||||||
|
|
||||||
|
* Your NIFCLOUD credentials:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export NIFCLOUD_ACCESS_KEY_ID=<YOUR ACCESS KEY>
|
||||||
|
export NIFCLOUD_SECRET_ACCESS_KEY=<YOUR SECRET ACCESS KEY>
|
||||||
|
```
|
||||||
|
|
||||||
|
* The SSH KEY used to connect to the instance:
|
||||||
|
* FYI: [Cloud Help(SSH Key)](https://pfs.nifcloud.com/help/ssh.htm)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export TF_VAR_SSHKEY_NAME=<YOUR SSHKEY NAME>
|
||||||
|
```
|
||||||
|
|
||||||
|
* The IP address to connect to bastion server:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export TF_VAR_working_instance_ip=$(curl ifconfig.me)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create The Infrastructure
|
||||||
|
|
||||||
|
* Run terraform:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
terraform init
|
||||||
|
terraform apply -var-file ./sample-inventory/cluster.tfvars
|
||||||
|
```
|
||||||
|
|
||||||
|
### Setup The Kubernetes
|
||||||
|
|
||||||
|
* Generate cluster configuration file:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./generate-inventory.sh > sample-inventory/inventory.ini
|
||||||
|
```
|
||||||
|
|
||||||
|
* Export Variables:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
BASTION_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.bastion_info | to_entries[].value.public_ip')
|
||||||
|
API_LB_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_lb')
|
||||||
|
CP01_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[0].value.private_ip')
|
||||||
|
export ANSIBLE_SSH_ARGS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ProxyCommand=\"ssh root@${BASTION_IP} -W %h:%p\""
|
||||||
|
```
|
||||||
|
|
||||||
|
* Set ssh-agent"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
eval `ssh-agent`
|
||||||
|
ssh-add <THE PATH TO YOUR SSH KEY>
|
||||||
|
```
|
||||||
|
|
||||||
|
* Run cluster.yml playbook:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd ./../../../
|
||||||
|
ansible-playbook -i contrib/terraform/nifcloud/inventory/inventory.ini cluster.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
### Connecting to Kubernetes
|
||||||
|
|
||||||
|
* [Install kubectl](https://kubernetes.io/docs/tasks/tools/) on the localhost
|
||||||
|
* Fetching kubeconfig file:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mkdir -p ~/.kube
|
||||||
|
scp -o ProxyCommand="ssh root@${BASTION_IP} -W %h:%p" root@${CP01_IP}:/etc/kubernetes/admin.conf ~/.kube/config
|
||||||
|
```
|
||||||
|
|
||||||
|
* Rewrite /etc/hosts
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo echo "${API_LB_IP} lb-apiserver.kubernetes.local" >> /etc/hosts
|
||||||
|
```
|
||||||
|
|
||||||
|
* Run kubectl
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl get node
|
||||||
|
```
|
||||||
|
|
||||||
|
## Variables
|
||||||
|
|
||||||
|
* `region`: Region where to run the cluster
|
||||||
|
* `az`: Availability zone where to run the cluster
|
||||||
|
* `private_ip_bn`: Private ip address of bastion server
|
||||||
|
* `private_network_cidr`: Subnet of private network
|
||||||
|
* `instances_cp`: Machine to provision as Control Plane. Key of this object will be used as part of the machine' name
|
||||||
|
* `private_ip`: private ip address of machine
|
||||||
|
* `instances_wk`: Machine to provision as Worker Node. Key of this object will be used as part of the machine' name
|
||||||
|
* `private_ip`: private ip address of machine
|
||||||
|
* `instance_key_name`: The key name of the Key Pair to use for the instance
|
||||||
|
* `instance_type_bn`: The instance type of bastion server
|
||||||
|
* `instance_type_wk`: The instance type of worker node
|
||||||
|
* `instance_type_cp`: The instance type of control plane
|
||||||
|
* `image_name`: OS image used for the instance
|
||||||
|
* `working_instance_ip`: The IP address to connect to bastion server
|
||||||
|
* `accounting_type`: Accounting type. (1: monthly, 2: pay per use)
|
||||||
64
contrib/terraform/nifcloud/generate-inventory.sh
Executable file
64
contrib/terraform/nifcloud/generate-inventory.sh
Executable file
@@ -0,0 +1,64 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
#
|
||||||
|
# Generates a inventory file based on the terraform output.
|
||||||
|
# After provisioning a cluster, simply run this command and supply the terraform state file
|
||||||
|
# Default state file is terraform.tfstate
|
||||||
|
#
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
TF_OUT=$(terraform output -json)
|
||||||
|
|
||||||
|
CONTROL_PLANES=$(jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[]' <(echo "${TF_OUT}"))
|
||||||
|
WORKERS=$(jq -r '.kubernetes_cluster.value.worker_info | to_entries[]' <(echo "${TF_OUT}"))
|
||||||
|
mapfile -t CONTROL_PLANE_NAMES < <(jq -r '.key' <(echo "${CONTROL_PLANES}"))
|
||||||
|
mapfile -t WORKER_NAMES < <(jq -r '.key' <(echo "${WORKERS}"))
|
||||||
|
|
||||||
|
API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}"))
|
||||||
|
|
||||||
|
echo "[all]"
|
||||||
|
# Generate control plane hosts
|
||||||
|
i=1
|
||||||
|
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
||||||
|
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${CONTROL_PLANES}"))
|
||||||
|
echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip} etcd_member_name=etcd${i}"
|
||||||
|
i=$(( i + 1 ))
|
||||||
|
done
|
||||||
|
|
||||||
|
# Generate worker hosts
|
||||||
|
for name in "${WORKER_NAMES[@]}"; do
|
||||||
|
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${WORKERS}"))
|
||||||
|
echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip}"
|
||||||
|
done
|
||||||
|
|
||||||
|
API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}"))
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "[all:vars]"
|
||||||
|
echo "upstream_dns_servers=['8.8.8.8','8.8.4.4']"
|
||||||
|
echo "loadbalancer_apiserver={'address':'${API_LB}','port':'6443'}"
|
||||||
|
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "[kube_control_plane]"
|
||||||
|
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
||||||
|
echo "${name}"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "[etcd]"
|
||||||
|
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
||||||
|
echo "${name}"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "[kube_node]"
|
||||||
|
for name in "${WORKER_NAMES[@]}"; do
|
||||||
|
echo "${name}"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "[k8s_cluster:children]"
|
||||||
|
echo "kube_control_plane"
|
||||||
|
echo "kube_node"
|
||||||
36
contrib/terraform/nifcloud/main.tf
Normal file
36
contrib/terraform/nifcloud/main.tf
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
provider "nifcloud" {
|
||||||
|
region = var.region
|
||||||
|
}
|
||||||
|
|
||||||
|
module "kubernetes_cluster" {
|
||||||
|
source = "./modules/kubernetes-cluster"
|
||||||
|
|
||||||
|
availability_zone = var.az
|
||||||
|
prefix = "dev"
|
||||||
|
|
||||||
|
private_network_cidr = var.private_network_cidr
|
||||||
|
|
||||||
|
instance_key_name = var.instance_key_name
|
||||||
|
instances_cp = var.instances_cp
|
||||||
|
instances_wk = var.instances_wk
|
||||||
|
image_name = var.image_name
|
||||||
|
|
||||||
|
instance_type_bn = var.instance_type_bn
|
||||||
|
instance_type_cp = var.instance_type_cp
|
||||||
|
instance_type_wk = var.instance_type_wk
|
||||||
|
|
||||||
|
private_ip_bn = var.private_ip_bn
|
||||||
|
|
||||||
|
additional_lb_filter = [var.working_instance_ip]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nifcloud_security_group_rule" "ssh_from_bastion" {
|
||||||
|
security_group_names = [
|
||||||
|
module.kubernetes_cluster.security_group_name.bastion
|
||||||
|
]
|
||||||
|
type = "IN"
|
||||||
|
from_port = 22
|
||||||
|
to_port = 22
|
||||||
|
protocol = "TCP"
|
||||||
|
cidr_ip = var.working_instance_ip
|
||||||
|
}
|
||||||
301
contrib/terraform/nifcloud/modules/kubernetes-cluster/main.tf
Normal file
301
contrib/terraform/nifcloud/modules/kubernetes-cluster/main.tf
Normal file
@@ -0,0 +1,301 @@
|
|||||||
|
#################################################
|
||||||
|
##
|
||||||
|
## Local variables
|
||||||
|
##
|
||||||
|
locals {
|
||||||
|
# e.g. east-11 is 11
|
||||||
|
az_num = reverse(split("-", var.availability_zone))[0]
|
||||||
|
# e.g. east-11 is e11
|
||||||
|
az_short_name = "${substr(reverse(split("-", var.availability_zone))[1], 0, 1)}${local.az_num}"
|
||||||
|
|
||||||
|
# Port used by the protocol
|
||||||
|
port_ssh = 22
|
||||||
|
port_kubectl = 6443
|
||||||
|
port_kubelet = 10250
|
||||||
|
|
||||||
|
# calico: https://docs.tigera.io/calico/latest/getting-started/kubernetes/requirements#network-requirements
|
||||||
|
port_bgp = 179
|
||||||
|
port_vxlan = 4789
|
||||||
|
port_etcd = 2379
|
||||||
|
}
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
##
|
||||||
|
## General
|
||||||
|
##
|
||||||
|
|
||||||
|
# data
|
||||||
|
data "nifcloud_image" "this" {
|
||||||
|
image_name = var.image_name
|
||||||
|
}
|
||||||
|
|
||||||
|
# private lan
|
||||||
|
resource "nifcloud_private_lan" "this" {
|
||||||
|
private_lan_name = "${var.prefix}lan"
|
||||||
|
availability_zone = var.availability_zone
|
||||||
|
cidr_block = var.private_network_cidr
|
||||||
|
accounting_type = var.accounting_type
|
||||||
|
}
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
##
|
||||||
|
## Bastion
|
||||||
|
##
|
||||||
|
resource "nifcloud_security_group" "bn" {
|
||||||
|
group_name = "${var.prefix}bn"
|
||||||
|
description = "${var.prefix} bastion"
|
||||||
|
availability_zone = var.availability_zone
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nifcloud_instance" "bn" {
|
||||||
|
|
||||||
|
instance_id = "${local.az_short_name}${var.prefix}bn01"
|
||||||
|
security_group = nifcloud_security_group.bn.group_name
|
||||||
|
instance_type = var.instance_type_bn
|
||||||
|
|
||||||
|
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
||||||
|
private_ip_address = var.private_ip_bn
|
||||||
|
ssh_port = local.port_ssh
|
||||||
|
hostname = "${local.az_short_name}${var.prefix}bn01"
|
||||||
|
})
|
||||||
|
|
||||||
|
availability_zone = var.availability_zone
|
||||||
|
accounting_type = var.accounting_type
|
||||||
|
image_id = data.nifcloud_image.this.image_id
|
||||||
|
key_name = var.instance_key_name
|
||||||
|
|
||||||
|
network_interface {
|
||||||
|
network_id = "net-COMMON_GLOBAL"
|
||||||
|
}
|
||||||
|
network_interface {
|
||||||
|
network_id = nifcloud_private_lan.this.network_id
|
||||||
|
ip_address = "static"
|
||||||
|
}
|
||||||
|
|
||||||
|
# The image_id changes when the OS image type is demoted from standard to public.
|
||||||
|
lifecycle {
|
||||||
|
ignore_changes = [
|
||||||
|
image_id,
|
||||||
|
user_data,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
##
|
||||||
|
## Control Plane
|
||||||
|
##
|
||||||
|
resource "nifcloud_security_group" "cp" {
|
||||||
|
group_name = "${var.prefix}cp"
|
||||||
|
description = "${var.prefix} control plane"
|
||||||
|
availability_zone = var.availability_zone
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nifcloud_instance" "cp" {
|
||||||
|
for_each = var.instances_cp
|
||||||
|
|
||||||
|
instance_id = "${local.az_short_name}${var.prefix}${each.key}"
|
||||||
|
security_group = nifcloud_security_group.cp.group_name
|
||||||
|
instance_type = var.instance_type_cp
|
||||||
|
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
||||||
|
private_ip_address = each.value.private_ip
|
||||||
|
ssh_port = local.port_ssh
|
||||||
|
hostname = "${local.az_short_name}${var.prefix}${each.key}"
|
||||||
|
})
|
||||||
|
|
||||||
|
availability_zone = var.availability_zone
|
||||||
|
accounting_type = var.accounting_type
|
||||||
|
image_id = data.nifcloud_image.this.image_id
|
||||||
|
key_name = var.instance_key_name
|
||||||
|
|
||||||
|
network_interface {
|
||||||
|
network_id = "net-COMMON_GLOBAL"
|
||||||
|
}
|
||||||
|
network_interface {
|
||||||
|
network_id = nifcloud_private_lan.this.network_id
|
||||||
|
ip_address = "static"
|
||||||
|
}
|
||||||
|
|
||||||
|
# The image_id changes when the OS image type is demoted from standard to public.
|
||||||
|
lifecycle {
|
||||||
|
ignore_changes = [
|
||||||
|
image_id,
|
||||||
|
user_data,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nifcloud_load_balancer" "this" {
|
||||||
|
load_balancer_name = "${local.az_short_name}${var.prefix}cp"
|
||||||
|
accounting_type = var.accounting_type
|
||||||
|
balancing_type = 1 // Round-Robin
|
||||||
|
load_balancer_port = local.port_kubectl
|
||||||
|
instance_port = local.port_kubectl
|
||||||
|
instances = [for v in nifcloud_instance.cp : v.instance_id]
|
||||||
|
filter = concat(
|
||||||
|
[for k, v in nifcloud_instance.cp : v.public_ip],
|
||||||
|
[for k, v in nifcloud_instance.wk : v.public_ip],
|
||||||
|
var.additional_lb_filter,
|
||||||
|
)
|
||||||
|
filter_type = 1 // Allow
|
||||||
|
}
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
##
|
||||||
|
## Worker
|
||||||
|
##
|
||||||
|
resource "nifcloud_security_group" "wk" {
|
||||||
|
group_name = "${var.prefix}wk"
|
||||||
|
description = "${var.prefix} worker"
|
||||||
|
availability_zone = var.availability_zone
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nifcloud_instance" "wk" {
|
||||||
|
for_each = var.instances_wk
|
||||||
|
|
||||||
|
instance_id = "${local.az_short_name}${var.prefix}${each.key}"
|
||||||
|
security_group = nifcloud_security_group.wk.group_name
|
||||||
|
instance_type = var.instance_type_wk
|
||||||
|
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
||||||
|
private_ip_address = each.value.private_ip
|
||||||
|
ssh_port = local.port_ssh
|
||||||
|
hostname = "${local.az_short_name}${var.prefix}${each.key}"
|
||||||
|
})
|
||||||
|
|
||||||
|
availability_zone = var.availability_zone
|
||||||
|
accounting_type = var.accounting_type
|
||||||
|
image_id = data.nifcloud_image.this.image_id
|
||||||
|
key_name = var.instance_key_name
|
||||||
|
|
||||||
|
network_interface {
|
||||||
|
network_id = "net-COMMON_GLOBAL"
|
||||||
|
}
|
||||||
|
network_interface {
|
||||||
|
network_id = nifcloud_private_lan.this.network_id
|
||||||
|
ip_address = "static"
|
||||||
|
}
|
||||||
|
|
||||||
|
# The image_id changes when the OS image type is demoted from standard to public.
|
||||||
|
lifecycle {
|
||||||
|
ignore_changes = [
|
||||||
|
image_id,
|
||||||
|
user_data,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
##
|
||||||
|
## Security Group Rule: Kubernetes
|
||||||
|
##
|
||||||
|
|
||||||
|
# ssh
|
||||||
|
resource "nifcloud_security_group_rule" "ssh_from_bastion" {
|
||||||
|
security_group_names = [
|
||||||
|
nifcloud_security_group.wk.group_name,
|
||||||
|
nifcloud_security_group.cp.group_name,
|
||||||
|
]
|
||||||
|
type = "IN"
|
||||||
|
from_port = local.port_ssh
|
||||||
|
to_port = local.port_ssh
|
||||||
|
protocol = "TCP"
|
||||||
|
source_security_group_name = nifcloud_security_group.bn.group_name
|
||||||
|
}
|
||||||
|
|
||||||
|
# kubectl
|
||||||
|
resource "nifcloud_security_group_rule" "kubectl_from_worker" {
|
||||||
|
security_group_names = [
|
||||||
|
nifcloud_security_group.cp.group_name,
|
||||||
|
]
|
||||||
|
type = "IN"
|
||||||
|
from_port = local.port_kubectl
|
||||||
|
to_port = local.port_kubectl
|
||||||
|
protocol = "TCP"
|
||||||
|
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||||
|
}
|
||||||
|
|
||||||
|
# kubelet
|
||||||
|
resource "nifcloud_security_group_rule" "kubelet_from_worker" {
|
||||||
|
security_group_names = [
|
||||||
|
nifcloud_security_group.cp.group_name,
|
||||||
|
]
|
||||||
|
type = "IN"
|
||||||
|
from_port = local.port_kubelet
|
||||||
|
to_port = local.port_kubelet
|
||||||
|
protocol = "TCP"
|
||||||
|
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nifcloud_security_group_rule" "kubelet_from_control_plane" {
|
||||||
|
security_group_names = [
|
||||||
|
nifcloud_security_group.wk.group_name,
|
||||||
|
]
|
||||||
|
type = "IN"
|
||||||
|
from_port = local.port_kubelet
|
||||||
|
to_port = local.port_kubelet
|
||||||
|
protocol = "TCP"
|
||||||
|
source_security_group_name = nifcloud_security_group.cp.group_name
|
||||||
|
}
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
##
|
||||||
|
## Security Group Rule: calico
|
||||||
|
##
|
||||||
|
|
||||||
|
# vslan
|
||||||
|
resource "nifcloud_security_group_rule" "vxlan_from_control_plane" {
|
||||||
|
security_group_names = [
|
||||||
|
nifcloud_security_group.wk.group_name,
|
||||||
|
]
|
||||||
|
type = "IN"
|
||||||
|
from_port = local.port_vxlan
|
||||||
|
to_port = local.port_vxlan
|
||||||
|
protocol = "UDP"
|
||||||
|
source_security_group_name = nifcloud_security_group.cp.group_name
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nifcloud_security_group_rule" "vxlan_from_worker" {
|
||||||
|
security_group_names = [
|
||||||
|
nifcloud_security_group.cp.group_name,
|
||||||
|
]
|
||||||
|
type = "IN"
|
||||||
|
from_port = local.port_vxlan
|
||||||
|
to_port = local.port_vxlan
|
||||||
|
protocol = "UDP"
|
||||||
|
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||||
|
}
|
||||||
|
|
||||||
|
# bgp
|
||||||
|
resource "nifcloud_security_group_rule" "bgp_from_control_plane" {
|
||||||
|
security_group_names = [
|
||||||
|
nifcloud_security_group.wk.group_name,
|
||||||
|
]
|
||||||
|
type = "IN"
|
||||||
|
from_port = local.port_bgp
|
||||||
|
to_port = local.port_bgp
|
||||||
|
protocol = "TCP"
|
||||||
|
source_security_group_name = nifcloud_security_group.cp.group_name
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nifcloud_security_group_rule" "bgp_from_worker" {
|
||||||
|
security_group_names = [
|
||||||
|
nifcloud_security_group.cp.group_name,
|
||||||
|
]
|
||||||
|
type = "IN"
|
||||||
|
from_port = local.port_bgp
|
||||||
|
to_port = local.port_bgp
|
||||||
|
protocol = "TCP"
|
||||||
|
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||||
|
}
|
||||||
|
|
||||||
|
# etcd
|
||||||
|
resource "nifcloud_security_group_rule" "etcd_from_worker" {
|
||||||
|
security_group_names = [
|
||||||
|
nifcloud_security_group.cp.group_name,
|
||||||
|
]
|
||||||
|
type = "IN"
|
||||||
|
from_port = local.port_etcd
|
||||||
|
to_port = local.port_etcd
|
||||||
|
protocol = "TCP"
|
||||||
|
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||||
|
}
|
||||||
@@ -0,0 +1,48 @@
|
|||||||
|
output "control_plane_lb" {
|
||||||
|
description = "The DNS name of LB for control plane"
|
||||||
|
value = nifcloud_load_balancer.this.dns_name
|
||||||
|
}
|
||||||
|
|
||||||
|
output "security_group_name" {
|
||||||
|
description = "The security group used in the cluster"
|
||||||
|
value = {
|
||||||
|
bastion = nifcloud_security_group.bn.group_name,
|
||||||
|
control_plane = nifcloud_security_group.cp.group_name,
|
||||||
|
worker = nifcloud_security_group.wk.group_name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
output "private_network_id" {
|
||||||
|
description = "The private network used in the cluster"
|
||||||
|
value = nifcloud_private_lan.this.id
|
||||||
|
}
|
||||||
|
|
||||||
|
output "bastion_info" {
|
||||||
|
description = "The basion information in cluster"
|
||||||
|
value = { (nifcloud_instance.bn.instance_id) : {
|
||||||
|
instance_id = nifcloud_instance.bn.instance_id,
|
||||||
|
unique_id = nifcloud_instance.bn.unique_id,
|
||||||
|
private_ip = nifcloud_instance.bn.private_ip,
|
||||||
|
public_ip = nifcloud_instance.bn.public_ip,
|
||||||
|
} }
|
||||||
|
}
|
||||||
|
|
||||||
|
output "worker_info" {
|
||||||
|
description = "The worker information in cluster"
|
||||||
|
value = { for v in nifcloud_instance.wk : v.instance_id => {
|
||||||
|
instance_id = v.instance_id,
|
||||||
|
unique_id = v.unique_id,
|
||||||
|
private_ip = v.private_ip,
|
||||||
|
public_ip = v.public_ip,
|
||||||
|
} }
|
||||||
|
}
|
||||||
|
|
||||||
|
output "control_plane_info" {
|
||||||
|
description = "The control plane information in cluster"
|
||||||
|
value = { for v in nifcloud_instance.cp : v.instance_id => {
|
||||||
|
instance_id = v.instance_id,
|
||||||
|
unique_id = v.unique_id,
|
||||||
|
private_ip = v.private_ip,
|
||||||
|
public_ip = v.public_ip,
|
||||||
|
} }
|
||||||
|
}
|
||||||
@@ -0,0 +1,45 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
##
|
||||||
|
## IP Address
|
||||||
|
##
|
||||||
|
configure_private_ip_address () {
|
||||||
|
cat << EOS > /etc/netplan/01-netcfg.yaml
|
||||||
|
network:
|
||||||
|
version: 2
|
||||||
|
renderer: networkd
|
||||||
|
ethernets:
|
||||||
|
ens192:
|
||||||
|
dhcp4: yes
|
||||||
|
dhcp6: yes
|
||||||
|
dhcp-identifier: mac
|
||||||
|
ens224:
|
||||||
|
dhcp4: no
|
||||||
|
dhcp6: no
|
||||||
|
addresses: [${private_ip_address}]
|
||||||
|
EOS
|
||||||
|
netplan apply
|
||||||
|
}
|
||||||
|
configure_private_ip_address
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
##
|
||||||
|
## SSH
|
||||||
|
##
|
||||||
|
configure_ssh_port () {
|
||||||
|
sed -i 's/^#*Port [0-9]*/Port ${ssh_port}/' /etc/ssh/sshd_config
|
||||||
|
}
|
||||||
|
configure_ssh_port
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
##
|
||||||
|
## Hostname
|
||||||
|
##
|
||||||
|
hostnamectl set-hostname ${hostname}
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
##
|
||||||
|
## Disable swap files genereated by systemd-gpt-auto-generator
|
||||||
|
##
|
||||||
|
systemctl mask "dev-sda3.swap"
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user