mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 22:04:43 +03:00
Compare commits
713 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9051aa5296 | ||
|
|
432f8e9841 | ||
|
|
19792cfae7 | ||
|
|
9463b70edd | ||
|
|
b109f52dab | ||
|
|
e0781483fa | ||
|
|
ffcea384a6 | ||
|
|
deff6a82fa | ||
|
|
919a268de3 | ||
|
|
487cfa5e6c | ||
|
|
5fcda86f8c | ||
|
|
f2635776cd | ||
|
|
d30dbdde23 | ||
|
|
b59d5c35bc | ||
|
|
8331f7b056 | ||
|
|
92274a74f7 | ||
|
|
1739c479ed | ||
|
|
551317f1cd | ||
|
|
ddc19f43ba | ||
|
|
993b8e2791 | ||
|
|
9a5438ce2f | ||
|
|
d33434647b | ||
|
|
02169e8f85 | ||
|
|
b07e93e08b | ||
|
|
bad886ca9b | ||
|
|
967a042321 | ||
|
|
a585318b1a | ||
|
|
07d2f1aa36 | ||
|
|
b15e685a0b | ||
|
|
885c6cff71 | ||
|
|
c5e425b02b | ||
|
|
3fa81bb86e | ||
|
|
5daadc022d | ||
|
|
0cfcd39d55 | ||
|
|
7875c38023 | ||
|
|
edfec26988 | ||
|
|
daa290100c | ||
|
|
b4eb25197b | ||
|
|
ac00d23b80 | ||
|
|
9ae2eefb9a | ||
|
|
8c18f053aa | ||
|
|
2aefa25448 | ||
|
|
6e01c1e377 | ||
|
|
3c6ee19785 | ||
|
|
0e2d3fb923 | ||
|
|
af5e05d08d | ||
|
|
c83bfc9df6 | ||
|
|
1ebb670141 | ||
|
|
9d0786cbb0 | ||
|
|
53bde23a5e | ||
|
|
187798086a | ||
|
|
1540bc9759 | ||
|
|
618ab93b42 | ||
|
|
5ba67c55a2 | ||
|
|
d8ad9aedad | ||
|
|
3e6d0a50e8 | ||
|
|
ff09141a14 | ||
|
|
d188876a91 | ||
|
|
6f6274d0d9 | ||
|
|
29ee581067 | ||
|
|
17f07e2613 | ||
|
|
9ebdf0e3cf | ||
|
|
730caa3d58 | ||
|
|
7deb842030 | ||
|
|
5f7d5e1e80 | ||
|
|
931c76e58f | ||
|
|
3fafa583d1 | ||
|
|
d8e9b0f675 | ||
|
|
846c7a26e8 | ||
|
|
98d766c68e | ||
|
|
087b7fa38e | ||
|
|
92877e8bf8 | ||
|
|
d3ef41b603 | ||
|
|
633bfa7ebc | ||
|
|
13af4c1f40 | ||
|
|
afc3f7dce4 | ||
|
|
e8901a2422 | ||
|
|
c888de8b38 | ||
|
|
fefa1670a6 | ||
|
|
589d22da0b | ||
|
|
3dcb914607 | ||
|
|
b2b421840c | ||
|
|
5c7eef70b4 | ||
|
|
c2710899ed | ||
|
|
b997912ebe | ||
|
|
e5d07f3a3d | ||
|
|
fb9155c450 | ||
|
|
dc3195310c | ||
|
|
9f7c2b08a5 | ||
|
|
a6932b6b81 | ||
|
|
77d705ca9f | ||
|
|
89ac53acd7 | ||
|
|
1e22c83f0f | ||
|
|
1ad1e80ae3 | ||
|
|
bc785196c8 | ||
|
|
dfdf530723 | ||
|
|
33f33a7358 | ||
|
|
113dd2146a | ||
|
|
14c2df0418 | ||
|
|
b316518864 | ||
|
|
612663667c | ||
|
|
6c14f35f00 | ||
|
|
289be0a0db | ||
|
|
a4de023c29 | ||
|
|
e3fdd4a0ac | ||
|
|
bc9e14a762 | ||
|
|
3c5f20190f | ||
|
|
9c83551a0e | ||
|
|
99c139dd5a | ||
|
|
6c34745958 | ||
|
|
2ba4e9bda5 | ||
|
|
2a00c931e4 | ||
|
|
1e6ad5acb6 | ||
|
|
bc74a37696 | ||
|
|
0cb326b10f | ||
|
|
4daa9aa443 | ||
|
|
667364143c | ||
|
|
d8b357ce49 | ||
|
|
479d0e858d | ||
|
|
152c15b19f | ||
|
|
ce5a34d86c | ||
|
|
b8bafb2893 | ||
|
|
5da18854a3 | ||
|
|
d269e7f46c | ||
|
|
8c636f67af | ||
|
|
a84508d6b9 | ||
|
|
22c234040e | ||
|
|
4a1be18361 | ||
|
|
3b6df70f11 | ||
|
|
48390d37c2 | ||
|
|
0d3beb4e5a | ||
|
|
6e192d487b | ||
|
|
306c61a968 | ||
|
|
58b4fea2b1 | ||
|
|
2149bfbc5b | ||
|
|
0acb823d96 | ||
|
|
8ba6b601b0 | ||
|
|
06f981ffed | ||
|
|
4a4a3f759c | ||
|
|
8fbebf4e83 | ||
|
|
8371beb915 | ||
|
|
b39b32a48c | ||
|
|
dbe99b59a7 | ||
|
|
3cc413fe9a | ||
|
|
59d0138bcd | ||
|
|
801bbcbc63 | ||
|
|
4560ff7386 | ||
|
|
477841d8c0 | ||
|
|
a89dc49c52 | ||
|
|
90d8f7aa6a | ||
|
|
abc1421def | ||
|
|
7abd4eeafd | ||
|
|
27c79088e6 | ||
|
|
ce2a3a80db | ||
|
|
79bf74e90f | ||
|
|
4f12ba00d1 | ||
|
|
93104d9224 | ||
|
|
b5f4a79365 | ||
|
|
7e84de2ae1 | ||
|
|
06e1f81801 | ||
|
|
ccc3f89060 | ||
|
|
8a17de327e | ||
|
|
3b787123e3 | ||
|
|
127969d65f | ||
|
|
4b711e29ef | ||
|
|
2a3aa591e0 | ||
|
|
56cafc3fb3 | ||
|
|
b434456f54 | ||
|
|
6923d350f4 | ||
|
|
38beab8fe8 | ||
|
|
4bdd0ce417 | ||
|
|
e5c4e1ecc3 | ||
|
|
a48131f1e1 | ||
|
|
6e34918b52 | ||
|
|
66fddb2d52 | ||
|
|
87193fd270 | ||
|
|
52b5309385 | ||
|
|
7f4e048052 | ||
|
|
7fe7357154 | ||
|
|
635261eb12 | ||
|
|
5a5cf15c04 | ||
|
|
4d2b6b71f2 | ||
|
|
7bec169d58 | ||
|
|
bfd4ccbeaa | ||
|
|
76fe84fe93 | ||
|
|
cf4dd645a7 | ||
|
|
a5edd0d709 | ||
|
|
c33e08c3fa | ||
|
|
9b773185c3 | ||
|
|
b1974ab3cf | ||
|
|
b4e2b85745 | ||
|
|
fcd8d850dc | ||
|
|
6549b8f8ae | ||
|
|
1ea7ec3189 | ||
|
|
4077934519 | ||
|
|
fac8aaa44e | ||
|
|
fd422a0646 | ||
|
|
d7bb4d954a | ||
|
|
36322901a6 | ||
|
|
31d8fc086b | ||
|
|
9ca583d984 | ||
|
|
3ce933051a | ||
|
|
3b750cafc1 | ||
|
|
1911fe5ca8 | ||
|
|
2117e8167d | ||
|
|
c66d1ad6cb | ||
|
|
bd0383a4e3 | ||
|
|
dd5327ef9e | ||
|
|
cdce8c81da | ||
|
|
3f786542d3 | ||
|
|
e813b26963 | ||
|
|
abe711dcb5 | ||
|
|
b35a9fcb04 | ||
|
|
c27a91f7f0 | ||
|
|
2ab2f3a0a3 | ||
|
|
c825f4d180 | ||
|
|
7e195b06a6 | ||
|
|
30132d8c35 | ||
|
|
0d89db5141 | ||
|
|
4b7d59224d | ||
|
|
72157a7514 | ||
|
|
4f51607145 | ||
|
|
6602760a48 | ||
|
|
9232261665 | ||
|
|
af97febb04 | ||
|
|
c818dc1ce8 | ||
|
|
05dabb7e7b | ||
|
|
ad50f376a5 | ||
|
|
66e304c41b | ||
|
|
192f7967c9 | ||
|
|
f67d82a9db | ||
|
|
3cfbc1a79a | ||
|
|
d9f495d391 | ||
|
|
71f6c018ce | ||
|
|
0401f4afff | ||
|
|
b4989b5a2a | ||
|
|
6f4054679e | ||
|
|
df7d53b9ef | ||
|
|
0a9a42b544 | ||
|
|
0232e755f3 | ||
|
|
0536125f75 | ||
|
|
53d87e53c5 | ||
|
|
232020ef96 | ||
|
|
a22d74b165 | ||
|
|
8b8e534769 | ||
|
|
6b71229d3f | ||
|
|
145e5c8943 | ||
|
|
28315ca933 | ||
|
|
dced082e5f | ||
|
|
408faac3c9 | ||
|
|
cd4a606cb1 | ||
|
|
c7c3effd6f | ||
|
|
36898a2c39 | ||
|
|
8526c30b63 | ||
|
|
d6ebe8c3e7 | ||
|
|
02de35cfc3 | ||
|
|
7d8e21634c | ||
|
|
6b598eaacb | ||
|
|
2197330727 | ||
|
|
1d8627eb8b | ||
|
|
8f85ea89fa | ||
|
|
51a5f54fc4 | ||
|
|
e5550b5140 | ||
|
|
a1d6078d46 | ||
|
|
7fd87b95cf | ||
|
|
e3d562bcdb | ||
|
|
442e6e55b6 | ||
|
|
1f1a87bd3d | ||
|
|
4d1055f5d5 | ||
|
|
68acdd71f1 | ||
|
|
62b1ea2b48 | ||
|
|
9fa23ffa21 | ||
|
|
1dda89dbe3 | ||
|
|
2a1f77efc6 | ||
|
|
f9502e0964 | ||
|
|
09b67c1ad5 | ||
|
|
66475f98b9 | ||
|
|
8512cc5cca | ||
|
|
3a65c66a3e | ||
|
|
492b3e525d | ||
|
|
639010b3df | ||
|
|
34d1f0bff2 | ||
|
|
a330b281e8 | ||
|
|
6f9f80acee | ||
|
|
a8a62afd74 | ||
|
|
7fa682bdd5 | ||
|
|
34019291b8 | ||
|
|
847390dd9c | ||
|
|
08179018d4 | ||
|
|
b796226869 | ||
|
|
131d565498 | ||
|
|
084af7b6e5 | ||
|
|
bacd8c70e1 | ||
|
|
963c3479a9 | ||
|
|
39c567de47 | ||
|
|
da4cc74498 | ||
|
|
cac485756b | ||
|
|
118a7cd4ae | ||
|
|
c058e7a5ec | ||
|
|
1c10c3e2ff | ||
|
|
e0ddabc463 | ||
|
|
13da9bf75e | ||
|
|
e47eeb67ee | ||
|
|
824199fc7f | ||
|
|
c004896a40 | ||
|
|
940d2fdbb1 | ||
|
|
1c999b2a61 | ||
|
|
8e37841a2e | ||
|
|
8d1c0c469c | ||
|
|
8f5b0c777b | ||
|
|
0dd82293f1 | ||
|
|
26d7380c2e | ||
|
|
95703fb6f2 | ||
|
|
0121bce9e5 | ||
|
|
e766dd5582 | ||
|
|
fb1678d425 | ||
|
|
884053aaa7 | ||
|
|
93429bc661 | ||
|
|
3d27007750 | ||
|
|
4cbd97667d | ||
|
|
2730c90dcd | ||
|
|
09a1bcb30b | ||
|
|
77e08ba204 | ||
|
|
d3adf09bde | ||
|
|
26db1afd1a | ||
|
|
afa2a5f1c4 | ||
|
|
bcaf2f9ea3 | ||
|
|
3cd38e0d4c | ||
|
|
d16b562b18 | ||
|
|
0538f8a70d | ||
|
|
1a426ada3c | ||
|
|
d96e17451e | ||
|
|
a544e54578 | ||
|
|
f34a6699ef | ||
|
|
482857611a | ||
|
|
8d8bbc294a | ||
|
|
7f91f6e034 | ||
|
|
84c4c7dc82 | ||
|
|
1d4aa7abcc | ||
|
|
fe35c32c62 | ||
|
|
aa0da221e9 | ||
|
|
f1403493df | ||
|
|
36901d8394 | ||
|
|
a789707027 | ||
|
|
e6a2e34dd1 | ||
|
|
934d92f09c | ||
|
|
016ba4cdfa | ||
|
|
b227e44498 | ||
|
|
5e59541faa | ||
|
|
d94b7fd57c | ||
|
|
9964ba77ee | ||
|
|
153661cc47 | ||
|
|
166da2ffd0 | ||
|
|
8becd905b8 | ||
|
|
c83350e597 | ||
|
|
730866f431 | ||
|
|
ffbe9e7fd8 | ||
|
|
91b02c057e | ||
|
|
55d76ea3d8 | ||
|
|
1df0b67ec1 | ||
|
|
cd3b30d3bf | ||
|
|
53a685dbf8 | ||
|
|
218e527363 | ||
|
|
27fc391f71 | ||
|
|
1091e82327 | ||
|
|
a5cc8537f9 | ||
|
|
d692737a13 | ||
|
|
cc79125d3e | ||
|
|
a801e02cea | ||
|
|
29c7775ea1 | ||
|
|
cbf099de4d | ||
|
|
c8630f46fd | ||
|
|
af74d85b7d | ||
|
|
b8e7b4c0cd | ||
|
|
97e5f28537 | ||
|
|
f42e0a4711 | ||
|
|
6a5c828b6c | ||
|
|
97aa87612a | ||
|
|
d91f9e14e6 | ||
|
|
e24b1220a0 | ||
|
|
18f0531bba | ||
|
|
0a720b35af | ||
|
|
f557b54489 | ||
|
|
04852ad753 | ||
|
|
ee4f437aa2 | ||
|
|
aaa9a4efac | ||
|
|
0140cf71c8 | ||
|
|
51794e4c13 | ||
|
|
b249b06036 | ||
|
|
20caaf9d1f | ||
|
|
cb133cba68 | ||
|
|
c41ca22a78 | ||
|
|
009d2ffc6c | ||
|
|
baf1aba239 | ||
|
|
b891d77679 | ||
|
|
1d2ae39cff | ||
|
|
7bf09945f2 | ||
|
|
5c2e9a5376 | ||
|
|
b3a689658b | ||
|
|
d182d4f979 | ||
|
|
9c49e071d3 | ||
|
|
0f63924ed4 | ||
|
|
b2a7a27dfb | ||
|
|
b79dd602f3 | ||
|
|
157639e451 | ||
|
|
ea2c9d8f57 | ||
|
|
f958b32c83 | ||
|
|
2faa8f1e37 | ||
|
|
ab462d92b8 | ||
|
|
27905bbddf | ||
|
|
dc3e317d20 | ||
|
|
ac87ba5c0d | ||
|
|
ea918e1999 | ||
|
|
a5509fc2ce | ||
|
|
b614a3504b | ||
|
|
661d455ab4 | ||
|
|
cd8e469b9c | ||
|
|
991b3dbe54 | ||
|
|
a3caeba242 | ||
|
|
f5251f7d27 | ||
|
|
faedfb6307 | ||
|
|
1940495817 | ||
|
|
b979fb0116 | ||
|
|
7e140e5f3c | ||
|
|
0e5393f203 | ||
|
|
df6cf9aa51 | ||
|
|
5cf1396cb7 | ||
|
|
435e098751 | ||
|
|
6ffddbff24 | ||
|
|
64b0ce974d | ||
|
|
ce776f0f6a | ||
|
|
949984601f | ||
|
|
055e80f846 | ||
|
|
15363530ae | ||
|
|
ca6c5e2a6a | ||
|
|
73ddb62c58 | ||
|
|
a512f68650 | ||
|
|
83838b7fbc | ||
|
|
769f99b369 | ||
|
|
bf1b9649d0 | ||
|
|
6569180654 | ||
|
|
ae0ed87c0f | ||
|
|
9cc8ef4b91 | ||
|
|
ad33f71ac2 | ||
|
|
30634b3a25 | ||
|
|
b31cf0284d | ||
|
|
50c6a98b15 | ||
|
|
e7234c9114 | ||
|
|
a644b7c267 | ||
|
|
f0af7262b1 | ||
|
|
0865bef382 | ||
|
|
8c9588ab59 | ||
|
|
c0ce875743 | ||
|
|
a22d28e1c1 | ||
|
|
c32145057d | ||
|
|
fbb98b0070 | ||
|
|
db11394711 | ||
|
|
72f6b3f836 | ||
|
|
0a08268efb | ||
|
|
ccda9664e7 | ||
|
|
e98ba9e839 | ||
|
|
fd57fde075 | ||
|
|
6204b85a37 | ||
|
|
9fc8f9a07d | ||
|
|
8745486fb3 | ||
|
|
7cbe3c2171 | ||
|
|
a47c9239e8 | ||
|
|
635ca1a0b8 | ||
|
|
32fdfbcd5a | ||
|
|
dee9324d4b | ||
|
|
df8b27c03c | ||
|
|
8e95974930 | ||
|
|
64b32146ca | ||
|
|
36a7bdfac1 | ||
|
|
13dda0e36e | ||
|
|
6e7100f283 | ||
|
|
059cd17b47 | ||
|
|
fb7b3305dc | ||
|
|
0e1f24e95a | ||
|
|
81c3f2c971 | ||
|
|
82a28d6bb3 | ||
|
|
22f9114630 | ||
|
|
1704d699c4 | ||
|
|
f2f0cdd0ff | ||
|
|
da06c8e5a9 | ||
|
|
2f1fe44762 | ||
|
|
19268ded23 | ||
|
|
f67933d2ac | ||
|
|
247b9e83d8 | ||
|
|
9c2098b8fa | ||
|
|
48c0c8d854 | ||
|
|
f5f7b1626b | ||
|
|
c87a373c53 | ||
|
|
2609ec0dc3 | ||
|
|
61ed9886c1 | ||
|
|
aafd034ab8 | ||
|
|
d14394c691 | ||
|
|
16fc22a207 | ||
|
|
d9ea937493 | ||
|
|
a96a0ee307 | ||
|
|
f48468b83b | ||
|
|
5b79ec8e3b | ||
|
|
3f4acbc5f6 | ||
|
|
35e5adaf0a | ||
|
|
a268a49e1a | ||
|
|
91a83a3a0f | ||
|
|
a247c2c713 | ||
|
|
4feb62f6bf | ||
|
|
ac4ef719cc | ||
|
|
ceb97e5809 | ||
|
|
3bfda55fca | ||
|
|
9eade647e6 | ||
|
|
f82a1933b0 | ||
|
|
bbdd1c8f06 | ||
|
|
f876c89081 | ||
|
|
1babbcca85 | ||
|
|
58ecd312a7 | ||
|
|
c0dfa72707 | ||
|
|
fe1e758856 | ||
|
|
f325d13082 | ||
|
|
52ab54eeea | ||
|
|
d407a590a6 | ||
|
|
5eb805f098 | ||
|
|
dfdcb56784 | ||
|
|
659cccc507 | ||
|
|
f47c31dce5 | ||
|
|
236f066635 | ||
|
|
5ab8a712d9 | ||
|
|
cf7b9cfeef | ||
|
|
6090af29e7 | ||
|
|
359009bb05 | ||
|
|
bdbfa4d403 | ||
|
|
6849788ebc | ||
|
|
ac639b2a17 | ||
|
|
b18ed5922b | ||
|
|
b395bb953f | ||
|
|
b652792a93 | ||
|
|
7efe287c74 | ||
|
|
881b46f458 | ||
|
|
d43cd9a24c | ||
|
|
fff48d24ea | ||
|
|
f4feb17629 | ||
|
|
33135f2ada | ||
|
|
d6f4d10075 | ||
|
|
f97515352b | ||
|
|
f765ed8f1c | ||
|
|
84bfcbc0d8 | ||
|
|
2c98efb781 | ||
|
|
f7f58bf070 | ||
|
|
1432e511a2 | ||
|
|
1ddc420e39 | ||
|
|
b61eb7d7f3 | ||
|
|
dd55458315 | ||
|
|
1567a977c3 | ||
|
|
cb8be37f72 | ||
|
|
e5dd4e1e70 | ||
|
|
6d74a3db7a | ||
|
|
1da5926a94 | ||
|
|
4882531c29 | ||
|
|
f59b80b80b | ||
|
|
f7d0e4208e | ||
|
|
7b61a0eff0 | ||
|
|
23fd3461bc | ||
|
|
e22e15afda | ||
|
|
f453567cce | ||
|
|
69786b2d16 | ||
|
|
5a4352657d | ||
|
|
f13bc796d9 | ||
|
|
7a2cfb8578 | ||
|
|
a6a14e7f77 | ||
|
|
80cfeea957 | ||
|
|
2c90208486 | ||
|
|
4eea7f7eb9 | ||
|
|
3c59657f59 | ||
|
|
6598beb804 | ||
|
|
32049efbc2 | ||
|
|
78be27e18f | ||
|
|
5d9908c2c3 | ||
|
|
7eb4d7bb19 | ||
|
|
a7b0c454db | ||
|
|
83e3b72220 | ||
|
|
7e2e3ddd32 | ||
|
|
c3b3572025 | ||
|
|
f897596844 | ||
|
|
6df71956c4 | ||
|
|
94df70be98 | ||
|
|
6650bc6b25 | ||
|
|
7398858572 | ||
|
|
0c0a2138d9 | ||
|
|
5bf152886b | ||
|
|
08353f291b | ||
|
|
497db69c9f | ||
|
|
c7de737551 | ||
|
|
69749a5b7b | ||
|
|
b3e32c1393 | ||
|
|
fc38b6d0ca | ||
|
|
c34900e569 | ||
|
|
855f2a55cb | ||
|
|
ea35e6be9b | ||
|
|
71fdc257bc | ||
|
|
fd16f77e20 | ||
|
|
3eef8dc8d0 | ||
|
|
59176ebbb9 | ||
|
|
3663061b38 | ||
|
|
b421d0ed5b | ||
|
|
f7097fbe07 | ||
|
|
35efc387c4 | ||
|
|
fb309ca446 | ||
|
|
c833a8872b | ||
|
|
1d4f88eea8 | ||
|
|
e9b8c8956d | ||
|
|
095ccef8bd | ||
|
|
0df969ad19 | ||
|
|
3e5b6a5481 | ||
|
|
3201f17058 | ||
|
|
c36744e96d | ||
|
|
e51c5dc0a6 | ||
|
|
d297b82e82 | ||
|
|
ca649b57e6 | ||
|
|
2c587f9ea5 | ||
|
|
98b818bbaf | ||
|
|
26bf719a02 | ||
|
|
7e37aa4aca | ||
|
|
ce6854e726 | ||
|
|
ac49bbb336 | ||
|
|
b490231f59 | ||
|
|
6c7eabb53b | ||
|
|
7a0f0126f7 | ||
|
|
59d89a37cc | ||
|
|
1a07c87af7 | ||
|
|
29894293eb | ||
|
|
4d783fff0d | ||
|
|
a7a53d1f38 | ||
|
|
7f16b46ed5 | ||
|
|
58ee5f1cc9 | ||
|
|
bc844ca96e | ||
|
|
253dc4f606 | ||
|
|
b54ce3e66e | ||
|
|
a642931422 | ||
|
|
2228f0dabc | ||
|
|
a619dfb03e | ||
|
|
54548d3b95 | ||
|
|
58d4d65fab | ||
|
|
364ab2a6b7 | ||
|
|
fdbb078aa9 | ||
|
|
2ffc1afe40 | ||
|
|
18612b3501 | ||
|
|
d635a97088 | ||
|
|
9da5d67728 | ||
|
|
bd413e36a3 | ||
|
|
2c5781ace1 | ||
|
|
b50b3430be | ||
|
|
0e3518f2ca | ||
|
|
238f04c931 | ||
|
|
3a85a2f81c | ||
|
|
5dbfa0384e | ||
|
|
80c87db148 | ||
|
|
d0d7777d68 | ||
|
|
48b6128814 | ||
|
|
70b28288a3 | ||
|
|
a11e1eba9e | ||
|
|
2dfa928c90 | ||
|
|
d3c0fe1fcb | ||
|
|
36e8683cf5 | ||
|
|
c0221c2e72 | ||
|
|
9cef20187c | ||
|
|
95f1e4634a | ||
|
|
581a30fdec | ||
|
|
19e2868484 | ||
|
|
494ff9522b | ||
|
|
53aee6dc24 | ||
|
|
9fba448053 | ||
|
|
d284961d47 | ||
|
|
8ac57201a7 | ||
|
|
e30847e231 | ||
|
|
2bd8fbb2dd | ||
|
|
205ea33b10 | ||
|
|
c42397d7db | ||
|
|
306a6a751f | ||
|
|
318c69350e | ||
|
|
6d1804d8a4 | ||
|
|
ee67ece641 | ||
|
|
f703814561 | ||
|
|
c39835628d | ||
|
|
1253725975 | ||
|
|
f4c1d6a5d7 | ||
|
|
d7abdced05 | ||
|
|
78aeef074e | ||
|
|
0b7aa33bc2 | ||
|
|
5a4f07adca | ||
|
|
effd27a5f6 | ||
|
|
b900bd6e94 | ||
|
|
8e275ab2bd | ||
|
|
b56f465145 | ||
|
|
74cad6b811 | ||
|
|
a260412c7e | ||
|
|
8ef0cf771f | ||
|
|
9516170ce5 | ||
|
|
5aefa847df | ||
|
|
831ef7ea2c | ||
|
|
9c7e30e4b4 | ||
|
|
8c5bfc7718 | ||
|
|
61046a6923 | ||
|
|
9d2fabc9b9 | ||
|
|
3ad9e9c5eb | ||
|
|
cfe939ff08 | ||
|
|
1081f620d2 | ||
|
|
6019a84fb3 | ||
|
|
f4d762bb95 | ||
|
|
8766b36144 | ||
|
|
2ffcfdcd25 | ||
|
|
8d460a7300 | ||
|
|
71dabf9fb3 |
168
.gitlab-ci.yml
168
.gitlab-ci.yml
@@ -7,7 +7,7 @@ stages:
|
|||||||
|
|
||||||
variables:
|
variables:
|
||||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-incubator__kubespray'
|
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||||
# DOCKER_HOST: tcp://localhost:2375
|
# DOCKER_HOST: tcp://localhost:2375
|
||||||
ANSIBLE_FORCE_COLOR: "true"
|
ANSIBLE_FORCE_COLOR: "true"
|
||||||
MAGIC: "ci check this"
|
MAGIC: "ci check this"
|
||||||
@@ -42,7 +42,7 @@ before_script:
|
|||||||
tags:
|
tags:
|
||||||
- kubernetes
|
- kubernetes
|
||||||
- docker
|
- docker
|
||||||
image: quay.io/kubespray/kubespray:latest
|
image: quay.io/kubespray/kubespray:v2.7
|
||||||
|
|
||||||
.docker_service: &docker_service
|
.docker_service: &docker_service
|
||||||
services:
|
services:
|
||||||
@@ -93,10 +93,10 @@ before_script:
|
|||||||
# Check out latest tag if testing upgrade
|
# Check out latest tag if testing upgrade
|
||||||
# Uncomment when gitlab kubespray repo has tags
|
# Uncomment when gitlab kubespray repo has tags
|
||||||
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
|
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
|
||||||
- test "${UPGRADE_TEST}" != "false" && git checkout 02cd5418c22d51e40261775908d55bc562206023
|
- test "${UPGRADE_TEST}" != "false" && git checkout 53d87e53c5899d4ea2904ab7e3883708dd6363d3
|
||||||
# Checkout the CI vars file so it is available
|
# Checkout the CI vars file so it is available
|
||||||
- test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
|
- test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
|
||||||
# Workaround https://github.com/kubernetes-incubator/kubespray/issues/2021
|
# Workaround https://github.com/kubernetes-sigs/kubespray/issues/2021
|
||||||
- 'sh -c "echo ignore_assert_errors: true | tee -a tests/files/${CI_JOB_NAME}.yml"'
|
- 'sh -c "echo ignore_assert_errors: true | tee -a tests/files/${CI_JOB_NAME}.yml"'
|
||||||
|
|
||||||
|
|
||||||
@@ -240,9 +240,9 @@ before_script:
|
|||||||
# stage: deploy-part1
|
# stage: deploy-part1
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
|
.ubuntu18_flannel_aio_variables: &ubuntu18_flannel_aio_variables
|
||||||
# stage: deploy-part1
|
# stage: deploy-part1
|
||||||
UPGRADE_TEST: "graceful"
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
.centos_weave_kubeadm_variables: ¢os_weave_kubeadm_variables
|
.centos_weave_kubeadm_variables: ¢os_weave_kubeadm_variables
|
||||||
# stage: deploy-part1
|
# stage: deploy-part1
|
||||||
@@ -252,6 +252,10 @@ before_script:
|
|||||||
# stage: deploy-part1
|
# stage: deploy-part1
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
|
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
|
||||||
|
# stage: deploy-special
|
||||||
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
.ubuntu_contiv_sep_variables: &ubuntu_contiv_sep_variables
|
.ubuntu_contiv_sep_variables: &ubuntu_contiv_sep_variables
|
||||||
# stage: deploy-special
|
# stage: deploy-special
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
@@ -263,7 +267,7 @@ before_script:
|
|||||||
.ubuntu_cilium_sep_variables: &ubuntu_cilium_sep_variables
|
.ubuntu_cilium_sep_variables: &ubuntu_cilium_sep_variables
|
||||||
# stage: deploy-special
|
# stage: deploy-special
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
.rhel7_weave_variables: &rhel7_weave_variables
|
.rhel7_weave_variables: &rhel7_weave_variables
|
||||||
# stage: deploy-part1
|
# stage: deploy-part1
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
@@ -272,7 +276,7 @@ before_script:
|
|||||||
# stage: deploy-part2
|
# stage: deploy-part2
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
.debian8_calico_variables: &debian8_calico_variables
|
.debian9_calico_variables: &debian9_calico_variables
|
||||||
# stage: deploy-part2
|
# stage: deploy-part2
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
@@ -292,23 +296,31 @@ before_script:
|
|||||||
# stage: deploy-special
|
# stage: deploy-special
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
|
.centos7_kube_router_variables: ¢os7_kube_router_variables
|
||||||
|
# stage: deploy-special
|
||||||
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
|
.centos7_multus_calico_variables: ¢os7_multus_calico_variables
|
||||||
|
# stage: deploy-part2
|
||||||
|
UPGRADE_TEST: "graceful"
|
||||||
|
|
||||||
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
|
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
|
||||||
# stage: deploy-special
|
# stage: deploy-special
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
|
.coreos_kube_router_variables: &coreos_kube_router_variables
|
||||||
|
# stage: deploy-special
|
||||||
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
.ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables
|
.ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables
|
||||||
# stage: deploy-part1
|
# stage: deploy-part1
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
.ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables
|
.ubuntu_flannel_variables: &ubuntu_flannel_variables
|
||||||
# stage: deploy-part1
|
# stage: deploy-part2
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
.coreos_vault_upgrade_variables: &coreos_vault_upgrade_variables
|
.ubuntu_kube_router_variables: &ubuntu_kube_router_variables
|
||||||
# stage: deploy-part1
|
|
||||||
UPGRADE_TEST: "basic"
|
|
||||||
|
|
||||||
.ubuntu_flannel_variables: &ubuntu_flannel_variables
|
|
||||||
# stage: deploy-special
|
# stage: deploy-special
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
@@ -319,10 +331,24 @@ before_script:
|
|||||||
|
|
||||||
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
|
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
|
||||||
### PR JOBS PART1
|
### PR JOBS PART1
|
||||||
gce_coreos-calico-aio:
|
|
||||||
|
gce_ubuntu18-flannel-aio:
|
||||||
stage: deploy-part1
|
stage: deploy-part1
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *ubuntu18_flannel_aio_variables
|
||||||
|
<<: *gce_variables
|
||||||
|
when: on_success
|
||||||
|
except: ['triggers']
|
||||||
|
only: [/^pr-.*$/]
|
||||||
|
|
||||||
|
### PR JOBS PART2
|
||||||
|
|
||||||
|
gce_coreos-calico-aio:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
<<: *coreos_calico_aio_variables
|
<<: *coreos_calico_aio_variables
|
||||||
<<: *gce_variables
|
<<: *gce_variables
|
||||||
@@ -330,7 +356,6 @@ gce_coreos-calico-aio:
|
|||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: [/^pr-.*$/]
|
only: [/^pr-.*$/]
|
||||||
|
|
||||||
### PR JOBS PART2
|
|
||||||
gce_centos7-flannel-addons:
|
gce_centos7-flannel-addons:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
@@ -342,6 +367,30 @@ gce_centos7-flannel-addons:
|
|||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: [/^pr-.*$/]
|
only: [/^pr-.*$/]
|
||||||
|
|
||||||
|
gce_centos-weave-kubeadm-sep:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *centos_weave_kubeadm_variables
|
||||||
|
when: on_success
|
||||||
|
except: ['triggers']
|
||||||
|
only: [/^pr-.*$/]
|
||||||
|
|
||||||
|
gce_ubuntu-flannel-ha:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *ubuntu_flannel_variables
|
||||||
|
when: on_success
|
||||||
|
except: ['triggers']
|
||||||
|
only: [/^pr-.*$/]
|
||||||
|
|
||||||
|
### MANUAL JOBS
|
||||||
|
|
||||||
gce_ubuntu-weave-sep:
|
gce_ubuntu-weave-sep:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
@@ -349,11 +398,10 @@ gce_ubuntu-weave-sep:
|
|||||||
variables:
|
variables:
|
||||||
<<: *gce_variables
|
<<: *gce_variables
|
||||||
<<: *ubuntu_weave_sep_variables
|
<<: *ubuntu_weave_sep_variables
|
||||||
when: on_success
|
when: manual
|
||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: [/^pr-.*$/]
|
only: [/^pr-.*$/]
|
||||||
|
|
||||||
### MANUAL JOBS
|
|
||||||
gce_coreos-calico-sep-triggers:
|
gce_coreos-calico-sep-triggers:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
@@ -365,7 +413,7 @@ gce_coreos-calico-sep-triggers:
|
|||||||
only: ['triggers']
|
only: ['triggers']
|
||||||
|
|
||||||
gce_ubuntu-canal-ha-triggers:
|
gce_ubuntu-canal-ha-triggers:
|
||||||
stage: deploy-part2
|
stage: deploy-special
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
@@ -407,7 +455,7 @@ do_ubuntu-canal-ha:
|
|||||||
only: ['master', /^pr-.*$/]
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
gce_ubuntu-canal-ha:
|
gce_ubuntu-canal-ha:
|
||||||
stage: deploy-part2
|
stage: deploy-special
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
@@ -438,17 +486,6 @@ gce_ubuntu-canal-kubeadm-triggers:
|
|||||||
when: on_success
|
when: on_success
|
||||||
only: ['triggers']
|
only: ['triggers']
|
||||||
|
|
||||||
gce_centos-weave-kubeadm:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *centos_weave_kubeadm_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_centos-weave-kubeadm-triggers:
|
gce_centos-weave-kubeadm-triggers:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
@@ -513,24 +550,24 @@ gce_rhel7-weave-triggers:
|
|||||||
when: on_success
|
when: on_success
|
||||||
only: ['triggers']
|
only: ['triggers']
|
||||||
|
|
||||||
gce_debian8-calico-upgrade:
|
gce_debian9-calico-upgrade:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
<<: *gce_variables
|
<<: *gce_variables
|
||||||
<<: *debian8_calico_variables
|
<<: *debian9_calico_variables
|
||||||
when: manual
|
when: manual
|
||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: ['master', /^pr-.*$/]
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
gce_debian8-calico-triggers:
|
gce_debian9-calico-triggers:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
<<: *gce_variables
|
<<: *gce_variables
|
||||||
<<: *debian8_calico_variables
|
<<: *debian9_calico_variables
|
||||||
when: on_success
|
when: on_success
|
||||||
only: ['triggers']
|
only: ['triggers']
|
||||||
|
|
||||||
@@ -597,6 +634,28 @@ gce_centos7-calico-ha-triggers:
|
|||||||
when: on_success
|
when: on_success
|
||||||
only: ['triggers']
|
only: ['triggers']
|
||||||
|
|
||||||
|
gce_centos7-kube-router:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *centos7_kube_router_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
gce_centos7-multus-calico:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *centos7_multus_calico_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
gce_opensuse-canal:
|
gce_opensuse-canal:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
@@ -620,6 +679,17 @@ gce_coreos-alpha-weave-ha:
|
|||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: ['master', /^pr-.*$/]
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
gce_coreos-kube-router:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *coreos_kube_router_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
gce_ubuntu-rkt-sep:
|
gce_ubuntu-rkt-sep:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
@@ -631,35 +701,13 @@ gce_ubuntu-rkt-sep:
|
|||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: ['master', /^pr-.*$/]
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
gce_ubuntu-vault-sep:
|
gce_ubuntu-kube-router-sep:
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *ubuntu_vault_sep_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_coreos-vault-upgrade:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *coreos_vault_upgrade_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_ubuntu-flannel-sep:
|
|
||||||
stage: deploy-special
|
stage: deploy-special
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
<<: *gce_variables
|
<<: *gce_variables
|
||||||
<<: *ubuntu_flannel_variables
|
<<: *ubuntu_kube_router_variables
|
||||||
when: manual
|
when: manual
|
||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: ['master', /^pr-.*$/]
|
only: ['master', /^pr-.*$/]
|
||||||
|
|||||||
12
Dockerfile
12
Dockerfile
@@ -6,11 +6,11 @@ RUN apt update -y && \
|
|||||||
apt install -y \
|
apt install -y \
|
||||||
libssl-dev python-dev sshpass apt-transport-https \
|
libssl-dev python-dev sshpass apt-transport-https \
|
||||||
ca-certificates curl gnupg2 software-properties-common python-pip
|
ca-certificates curl gnupg2 software-properties-common python-pip
|
||||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
|
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
|
||||||
add-apt-repository \
|
add-apt-repository \
|
||||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||||
$(lsb_release -cs) \
|
$(lsb_release -cs) \
|
||||||
stable" \
|
stable" \
|
||||||
&& apt update -y && apt-get install docker-ce -y
|
&& apt update -y && apt-get install docker-ce -y
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN /usr/bin/python -m pip install pip -U && /usr/bin/python -m pip install -r tests/requirements.txt && python -m pip install -r requirements.txt
|
RUN /usr/bin/python -m pip install pip -U && /usr/bin/python -m pip install -r tests/requirements.txt && python -m pip install -r requirements.txt
|
||||||
|
|||||||
5
Makefile
Normal file
5
Makefile
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
mitogen:
|
||||||
|
ansible-playbook -c local mitogen.yaml -vv
|
||||||
|
clean:
|
||||||
|
rm -rf dist/
|
||||||
|
rm *.retry
|
||||||
12
OWNERS
12
OWNERS
@@ -1,9 +1,7 @@
|
|||||||
# See the OWNERS file documentation:
|
# See the OWNERS file documentation:
|
||||||
# https://github.com/kubernetes/kubernetes/blob/master/docs/devel/owners.md
|
# https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md
|
||||||
|
|
||||||
owners:
|
approvers:
|
||||||
- Smana
|
- kubespray-approvers
|
||||||
- ant31
|
reviewers:
|
||||||
- bogdando
|
- kubespray-reviewers
|
||||||
- mattymo
|
|
||||||
- rsmitty
|
|
||||||
|
|||||||
18
OWNERS_ALIASES
Normal file
18
OWNERS_ALIASES
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
aliases:
|
||||||
|
kubespray-approvers:
|
||||||
|
- ant31
|
||||||
|
- mattymo
|
||||||
|
- atoms
|
||||||
|
- chadswen
|
||||||
|
- rsmitty
|
||||||
|
- bogdando
|
||||||
|
- bradbeam
|
||||||
|
- woopstar
|
||||||
|
- riverzhang
|
||||||
|
- holser
|
||||||
|
- smana
|
||||||
|
kubespray-reviewers:
|
||||||
|
- jjungnickel
|
||||||
|
- archifleks
|
||||||
|
- chapsuk
|
||||||
|
- mirwan
|
||||||
76
README.md
76
README.md
@@ -1,11 +1,12 @@
|
|||||||

|

|
||||||
|
|
||||||
Deploy a Production Ready Kubernetes Cluster
|
Deploy a Production Ready Kubernetes Cluster
|
||||||
============================================
|
============================================
|
||||||
|
|
||||||
If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
||||||
|
You can get your invite [here](http://slack.k8s.io/)
|
||||||
|
|
||||||
- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere or Baremetal**
|
- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere, Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
||||||
- **Highly available** cluster
|
- **Highly available** cluster
|
||||||
- **Composable** (Choice of the network plugin for instance)
|
- **Composable** (Choice of the network plugin for instance)
|
||||||
- Supports most popular **Linux distributions**
|
- Supports most popular **Linux distributions**
|
||||||
@@ -18,6 +19,12 @@ To deploy the cluster you can use :
|
|||||||
|
|
||||||
### Ansible
|
### Ansible
|
||||||
|
|
||||||
|
#### Ansible version
|
||||||
|
|
||||||
|
Ansible v2.7.0 is failing and/or produce unexpected results due to [ansible/ansible/issues/46600](https://github.com/ansible/ansible/issues/46600)
|
||||||
|
|
||||||
|
#### Usage
|
||||||
|
|
||||||
# Install dependencies from ``requirements.txt``
|
# Install dependencies from ``requirements.txt``
|
||||||
sudo pip install -r requirements.txt
|
sudo pip install -r requirements.txt
|
||||||
|
|
||||||
@@ -29,11 +36,24 @@ To deploy the cluster you can use :
|
|||||||
CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
||||||
|
|
||||||
# Review and change parameters under ``inventory/mycluster/group_vars``
|
# Review and change parameters under ``inventory/mycluster/group_vars``
|
||||||
cat inventory/mycluster/group_vars/all.yml
|
cat inventory/mycluster/group_vars/all/all.yml
|
||||||
cat inventory/mycluster/group_vars/k8s-cluster.yml
|
cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
|
||||||
|
|
||||||
# Deploy Kubespray with Ansible Playbook
|
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
||||||
ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml
|
# The option `-b` is required, as for example writing SSL keys in /etc/,
|
||||||
|
# installing packages and interacting with various systemd daemons.
|
||||||
|
# Without -b the playbook will fail to run!
|
||||||
|
ansible-playbook -i inventory/mycluster/hosts.ini --become --become-user=root cluster.yml
|
||||||
|
|
||||||
|
Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu).
|
||||||
|
As a consequence, `ansible-playbook` command will fail with:
|
||||||
|
```
|
||||||
|
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
|
||||||
|
```
|
||||||
|
probably pointing on a task depending on a module present in requirements.txt (i.e. "unseal vault").
|
||||||
|
|
||||||
|
One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible.
|
||||||
|
A workaround consists of setting `ANSIBLE_LIBRARY` and `ANSIBLE_MODULE_UTILS` environment variables respectively to the `ansible/modules` and `ansible/module_utils` subdirectories of pip packages installation location, which can be found in the Location field of the output of `pip show [package]` before executing `ansible-playbook`.
|
||||||
|
|
||||||
### Vagrant
|
### Vagrant
|
||||||
|
|
||||||
@@ -78,9 +98,10 @@ Supported Linux Distributions
|
|||||||
-----------------------------
|
-----------------------------
|
||||||
|
|
||||||
- **Container Linux by CoreOS**
|
- **Container Linux by CoreOS**
|
||||||
- **Debian** Jessie, Stretch, Wheezy
|
- **Debian** Buster, Jessie, Stretch, Wheezy
|
||||||
- **Ubuntu** 16.04
|
- **Ubuntu** 16.04, 18.04
|
||||||
- **CentOS/RHEL** 7
|
- **CentOS/RHEL** 7
|
||||||
|
- **Fedora** 28
|
||||||
- **Fedora/CentOS** Atomic
|
- **Fedora/CentOS** Atomic
|
||||||
- **openSUSE** Leap 42.3/Tumbleweed
|
- **openSUSE** Leap 42.3/Tumbleweed
|
||||||
|
|
||||||
@@ -90,23 +111,27 @@ Supported Components
|
|||||||
--------------------
|
--------------------
|
||||||
|
|
||||||
- Core
|
- Core
|
||||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.10.4
|
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.12.3
|
||||||
- [etcd](https://github.com/coreos/etcd) v3.2.18
|
- [etcd](https://github.com/coreos/etcd) v3.2.24
|
||||||
- [docker](https://www.docker.com/) v17.03 (see note)
|
- [docker](https://www.docker.com/) v18.06 (see note)
|
||||||
- [rkt](https://github.com/rkt/rkt) v1.21.0 (see Note 2)
|
- [rkt](https://github.com/rkt/rkt) v1.21.0 (see Note 2)
|
||||||
|
- [cri-o](http://cri-o.io/) v1.11.5 (experimental: see [CRI-O Note](docs/cri-o.md). Only on centos based OS)
|
||||||
- Network Plugin
|
- Network Plugin
|
||||||
- [calico](https://github.com/projectcalico/calico) v2.6.8
|
- [calico](https://github.com/projectcalico/calico) v3.1.3
|
||||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||||
- [cilium](https://github.com/cilium/cilium) v1.1.2
|
- [cilium](https://github.com/cilium/cilium) v1.3.0
|
||||||
- [contiv](https://github.com/contiv/install) v1.1.7
|
- [contiv](https://github.com/contiv/install) v1.2.1
|
||||||
- [flanneld](https://github.com/coreos/flannel) v0.10.0
|
- [flanneld](https://github.com/coreos/flannel) v0.10.0
|
||||||
- [weave](https://github.com/weaveworks/weave) v2.4.0
|
- [kube-router](https://github.com/cloudnativelabs/kube-router) v0.2.1
|
||||||
|
- [multus](https://github.com/intel/multus-cni) v3.1.autoconf
|
||||||
|
- [weave](https://github.com/weaveworks/weave) v2.5.0
|
||||||
- Application
|
- Application
|
||||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v1.1.0-k8s1.10
|
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||||
- [cert-manager](https://github.com/jetstack/cert-manager) v0.4.0
|
- [cert-manager](https://github.com/jetstack/cert-manager) v0.5.2
|
||||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.17.1
|
- [coredns](https://github.com/coredns/coredns) v1.2.6
|
||||||
|
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.21.0
|
||||||
|
|
||||||
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
Note: The list of validated [docker versions](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.12.md) was updated to 1.11.1, 1.12.1, 1.13.1, 17.03, 17.06, 17.09, 18.06. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||||
|
|
||||||
Note 2: rkt support as docker alternative is limited to control plane (etcd and
|
Note 2: rkt support as docker alternative is limited to control plane (etcd and
|
||||||
kubelet). Docker is still used for Kubernetes cluster workloads and network
|
kubelet). Docker is still used for Kubernetes cluster workloads and network
|
||||||
@@ -116,10 +141,10 @@ plugins can be deployed for a given single cluster.
|
|||||||
Requirements
|
Requirements
|
||||||
------------
|
------------
|
||||||
|
|
||||||
- **Ansible v2.4 (or newer) and python-netaddr is installed on the machine
|
- **Ansible v2.5 (or newer) and python-netaddr is installed on the machine
|
||||||
that will run Ansible commands**
|
that will run Ansible commands**
|
||||||
- **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
|
- **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
|
||||||
- The target servers must have **access to the Internet** in order to pull docker images.
|
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/downloads.md#offline-environment))
|
||||||
- The target servers are configured to allow **IPv4 forwarding**.
|
- The target servers are configured to allow **IPv4 forwarding**.
|
||||||
- **Your ssh key must be copied** to all the servers part of your inventory.
|
- **Your ssh key must be copied** to all the servers part of your inventory.
|
||||||
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||||
@@ -147,6 +172,13 @@ You can choose between 6 network plugins. (default: `calico`, except Vagrant use
|
|||||||
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
||||||
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
|
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
|
||||||
|
|
||||||
|
- [kube-router](docs/kube-router.md): Kube-router is a L3 CNI for Kubernetes networking aiming to provide operational
|
||||||
|
simplicity and high performance: it uses IPVS to provide Kube Services Proxy (if setup to replace kube-proxy),
|
||||||
|
iptables for network policies, and BGP for ods L3 networking (with optionally BGP peering with out-of-cluster BGP peers).
|
||||||
|
It can also optionally advertise routes to Kubernetes cluster Pods CIDRs, ClusterIPs, ExternalIPs and LoadBalancerIPs.
|
||||||
|
|
||||||
|
- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
|
||||||
|
|
||||||
The choice is defined with the variable `kube_network_plugin`. There is also an
|
The choice is defined with the variable `kube_network_plugin`. There is also an
|
||||||
option to leverage built-in cloud provider networking instead.
|
option to leverage built-in cloud provider networking instead.
|
||||||
See also [Network checker](docs/netcheck.md).
|
See also [Network checker](docs/netcheck.md).
|
||||||
@@ -164,7 +196,7 @@ Tools and projects on top of Kubespray
|
|||||||
|
|
||||||
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/master/doc/integrations/ansible.rst)
|
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/master/doc/integrations/ansible.rst)
|
||||||
- [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer)
|
- [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer)
|
||||||
- [Terraform Contrib](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/terraform)
|
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
|
||||||
|
|
||||||
CI Tests
|
CI Tests
|
||||||
--------
|
--------
|
||||||
|
|||||||
150
Vagrantfile
vendored
150
Vagrantfile
vendored
@@ -1,6 +1,8 @@
|
|||||||
# -*- mode: ruby -*-
|
# -*- mode: ruby -*-
|
||||||
# # vi: set ft=ruby :
|
# # vi: set ft=ruby :
|
||||||
|
|
||||||
|
# For help on using kubespray with vagrant, check out docs/vagrant.md
|
||||||
|
|
||||||
require 'fileutils'
|
require 'fileutils'
|
||||||
|
|
||||||
Vagrant.require_version ">= 2.0.0"
|
Vagrant.require_version ">= 2.0.0"
|
||||||
@@ -13,13 +15,16 @@ COREOS_URL_TEMPLATE = "https://storage.googleapis.com/%s.release.core-os.net/amd
|
|||||||
DISK_UUID = Time.now.utc.to_i
|
DISK_UUID = Time.now.utc.to_i
|
||||||
|
|
||||||
SUPPORTED_OS = {
|
SUPPORTED_OS = {
|
||||||
"coreos-stable" => {box: "coreos-stable", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
|
"coreos-stable" => {box: "coreos-stable", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
|
||||||
"coreos-alpha" => {box: "coreos-alpha", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
|
"coreos-alpha" => {box: "coreos-alpha", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
|
||||||
"coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
|
"coreos-beta" => {box: "coreos-beta", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
|
||||||
"ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "vagrant"},
|
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
|
||||||
"centos" => {box: "centos/7", bootstrap_os: "centos", user: "vagrant"},
|
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
|
||||||
"opensuse" => {box: "opensuse/openSUSE-42.3-x86_64", bootstrap_os: "opensuse", use: "vagrant"},
|
"centos" => {box: "centos/7", user: "vagrant"},
|
||||||
"opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", bootstrap_os: "opensuse", use: "vagrant"},
|
"centos-bento" => {box: "bento/centos-7.5", user: "vagrant"},
|
||||||
|
"fedora" => {box: "fedora/28-cloud-base", user: "vagrant"},
|
||||||
|
"opensuse" => {box: "opensuse/openSUSE-42.3-x86_64", user: "vagrant"},
|
||||||
|
"opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", user: "vagrant"},
|
||||||
}
|
}
|
||||||
|
|
||||||
# Defaults for config options defined in CONFIG
|
# Defaults for config options defined in CONFIG
|
||||||
@@ -31,8 +36,10 @@ $vm_cpus = 1
|
|||||||
$shared_folders = {}
|
$shared_folders = {}
|
||||||
$forwarded_ports = {}
|
$forwarded_ports = {}
|
||||||
$subnet = "172.17.8"
|
$subnet = "172.17.8"
|
||||||
$os = "ubuntu"
|
$os = "ubuntu1804"
|
||||||
$network_plugin = "flannel"
|
$network_plugin = "flannel"
|
||||||
|
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
|
||||||
|
$multi_networking = false
|
||||||
# The first three nodes are etcd servers
|
# The first three nodes are etcd servers
|
||||||
$etcd_instances = $num_instances
|
$etcd_instances = $num_instances
|
||||||
# The first two nodes are kube masters
|
# The first two nodes are kube masters
|
||||||
@@ -44,7 +51,7 @@ $kube_node_instances_with_disks = false
|
|||||||
$kube_node_instances_with_disks_size = "20G"
|
$kube_node_instances_with_disks_size = "20G"
|
||||||
$kube_node_instances_with_disks_number = 2
|
$kube_node_instances_with_disks_number = 2
|
||||||
|
|
||||||
$local_release_dir = "/vagrant/temp"
|
$playbook = "cluster.yml"
|
||||||
|
|
||||||
host_vars = {}
|
host_vars = {}
|
||||||
|
|
||||||
@@ -54,13 +61,13 @@ end
|
|||||||
|
|
||||||
$box = SUPPORTED_OS[$os][:box]
|
$box = SUPPORTED_OS[$os][:box]
|
||||||
# if $inventory is not set, try to use example
|
# if $inventory is not set, try to use example
|
||||||
$inventory = File.join(File.dirname(__FILE__), "inventory", "sample") if ! $inventory
|
$inventory = "inventory/sample" if ! $inventory
|
||||||
|
$inventory = File.absolute_path($inventory, File.dirname(__FILE__))
|
||||||
|
|
||||||
# if $inventory has a hosts file use it, otherwise copy over vars etc
|
# if $inventory has a hosts.ini file use it, otherwise copy over
|
||||||
# to where vagrant expects dynamic inventory to be.
|
# vars etc to where vagrant expects dynamic inventory to be
|
||||||
if ! File.exist?(File.join(File.dirname($inventory), "hosts"))
|
if ! File.exist?(File.join(File.dirname($inventory), "hosts.ini"))
|
||||||
$vagrant_ansible = File.join(File.dirname(__FILE__), ".vagrant",
|
$vagrant_ansible = File.join(File.dirname(__FILE__), ".vagrant", "provisioners", "ansible")
|
||||||
"provisioners", "ansible")
|
|
||||||
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
|
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
|
||||||
if ! File.exist?(File.join($vagrant_ansible,"inventory"))
|
if ! File.exist?(File.join($vagrant_ansible,"inventory"))
|
||||||
FileUtils.ln_s($inventory, File.join($vagrant_ansible,"inventory"))
|
FileUtils.ln_s($inventory, File.join($vagrant_ansible,"inventory"))
|
||||||
@@ -75,76 +82,60 @@ if Vagrant.has_plugin?("vagrant-proxyconf")
|
|||||||
end
|
end
|
||||||
|
|
||||||
Vagrant.configure("2") do |config|
|
Vagrant.configure("2") do |config|
|
||||||
# always use Vagrants insecure key
|
|
||||||
config.ssh.insert_key = false
|
|
||||||
config.vm.box = $box
|
config.vm.box = $box
|
||||||
if SUPPORTED_OS[$os].has_key? :box_url
|
if SUPPORTED_OS[$os].has_key? :box_url
|
||||||
config.vm.box_url = SUPPORTED_OS[$os][:box_url]
|
config.vm.box_url = SUPPORTED_OS[$os][:box_url]
|
||||||
end
|
end
|
||||||
config.ssh.username = SUPPORTED_OS[$os][:user]
|
config.ssh.username = SUPPORTED_OS[$os][:user]
|
||||||
|
|
||||||
# plugin conflict
|
# plugin conflict
|
||||||
if Vagrant.has_plugin?("vagrant-vbguest") then
|
if Vagrant.has_plugin?("vagrant-vbguest") then
|
||||||
config.vbguest.auto_update = false
|
config.vbguest.auto_update = false
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# always use Vagrants insecure key
|
||||||
|
config.ssh.insert_key = false
|
||||||
|
|
||||||
(1..$num_instances).each do |i|
|
(1..$num_instances).each do |i|
|
||||||
config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config|
|
config.vm.define vm_name = "%s-%01d" % [$instance_name_prefix, i] do |node|
|
||||||
config.vm.hostname = vm_name
|
|
||||||
|
node.vm.hostname = vm_name
|
||||||
|
|
||||||
if Vagrant.has_plugin?("vagrant-proxyconf")
|
if Vagrant.has_plugin?("vagrant-proxyconf")
|
||||||
config.proxy.http = ENV['HTTP_PROXY'] || ENV['http_proxy'] || ""
|
node.proxy.http = ENV['HTTP_PROXY'] || ENV['http_proxy'] || ""
|
||||||
config.proxy.https = ENV['HTTPS_PROXY'] || ENV['https_proxy'] || ""
|
node.proxy.https = ENV['HTTPS_PROXY'] || ENV['https_proxy'] || ""
|
||||||
config.proxy.no_proxy = $no_proxy
|
node.proxy.no_proxy = $no_proxy
|
||||||
end
|
|
||||||
|
|
||||||
if $expose_docker_tcp
|
|
||||||
config.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true
|
|
||||||
end
|
|
||||||
|
|
||||||
$forwarded_ports.each do |guest, host|
|
|
||||||
config.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true
|
|
||||||
end
|
end
|
||||||
|
|
||||||
["vmware_fusion", "vmware_workstation"].each do |vmware|
|
["vmware_fusion", "vmware_workstation"].each do |vmware|
|
||||||
config.vm.provider vmware do |v|
|
node.vm.provider vmware do |v|
|
||||||
v.vmx['memsize'] = $vm_memory
|
v.vmx['memsize'] = $vm_memory
|
||||||
v.vmx['numvcpus'] = $vm_cpus
|
v.vmx['numvcpus'] = $vm_cpus
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
config.vm.synced_folder ".", "/vagrant", type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
|
node.vm.provider :virtualbox do |vb|
|
||||||
|
|
||||||
$shared_folders.each do |src, dst|
|
|
||||||
config.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
|
|
||||||
end
|
|
||||||
|
|
||||||
config.vm.provider :virtualbox do |vb|
|
|
||||||
vb.gui = $vm_gui
|
|
||||||
vb.memory = $vm_memory
|
vb.memory = $vm_memory
|
||||||
vb.cpus = $vm_cpus
|
vb.cpus = $vm_cpus
|
||||||
|
vb.gui = $vm_gui
|
||||||
|
vb.linked_clone = true
|
||||||
end
|
end
|
||||||
|
|
||||||
config.vm.provider :libvirt do |lv|
|
node.vm.provider :libvirt do |lv|
|
||||||
lv.memory = $vm_memory
|
lv.memory = $vm_memory
|
||||||
end
|
lv.cpus = $vm_cpus
|
||||||
|
lv.default_prefix = 'kubespray'
|
||||||
ip = "#{$subnet}.#{i+100}"
|
# Fix kernel panic on fedora 28
|
||||||
host_vars[vm_name] = {
|
if $os == "fedora"
|
||||||
"ip": ip,
|
lv.cpu_mode = "host-passthrough"
|
||||||
"bootstrap_os": SUPPORTED_OS[$os][:bootstrap_os],
|
end
|
||||||
"local_release_dir" => $local_release_dir,
|
end
|
||||||
"download_run_once": "False",
|
|
||||||
"kube_network_plugin": $network_plugin
|
|
||||||
}
|
|
||||||
|
|
||||||
config.vm.network :private_network, ip: ip
|
|
||||||
|
|
||||||
# Disable swap for each vm
|
|
||||||
config.vm.provision "shell", inline: "swapoff -a"
|
|
||||||
|
|
||||||
if $kube_node_instances_with_disks
|
if $kube_node_instances_with_disks
|
||||||
# Libvirt
|
# Libvirt
|
||||||
driverletters = ('a'..'z').to_a
|
driverletters = ('a'..'z').to_a
|
||||||
config.vm.provider :libvirt do |lv|
|
node.vm.provider :libvirt do |lv|
|
||||||
# always make /dev/sd{a/b/c} so that CI can ensure that
|
# always make /dev/sd{a/b/c} so that CI can ensure that
|
||||||
# virtualbox and libvirt will have the same devices to use for OSDs
|
# virtualbox and libvirt will have the same devices to use for OSDs
|
||||||
(1..$kube_node_instances_with_disks_number).each do |d|
|
(1..$kube_node_instances_with_disks_number).each do |d|
|
||||||
@@ -153,24 +144,51 @@ Vagrant.configure("2") do |config|
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
# Only execute once the Ansible provisioner,
|
if $expose_docker_tcp
|
||||||
# when all the machines are up and ready.
|
node.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true
|
||||||
|
end
|
||||||
|
|
||||||
|
$forwarded_ports.each do |guest, host|
|
||||||
|
node.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true
|
||||||
|
end
|
||||||
|
|
||||||
|
node.vm.synced_folder ".", "/vagrant", disabled: false, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] , rsync__exclude: ['.git','venv']
|
||||||
|
$shared_folders.each do |src, dst|
|
||||||
|
node.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
|
||||||
|
end
|
||||||
|
|
||||||
|
ip = "#{$subnet}.#{i+100}"
|
||||||
|
node.vm.network :private_network, ip: ip
|
||||||
|
|
||||||
|
# Disable swap for each vm
|
||||||
|
node.vm.provision "shell", inline: "swapoff -a"
|
||||||
|
|
||||||
|
host_vars[vm_name] = {
|
||||||
|
"ip": ip,
|
||||||
|
"kube_network_plugin": $network_plugin,
|
||||||
|
"kube_network_plugin_multus": $multi_networking,
|
||||||
|
"docker_keepcache": "1",
|
||||||
|
"download_run_once": "True",
|
||||||
|
"download_localhost": "False"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Only execute the Ansible provisioner once, when all the machines are up and ready.
|
||||||
if i == $num_instances
|
if i == $num_instances
|
||||||
config.vm.provision "ansible" do |ansible|
|
node.vm.provision "ansible" do |ansible|
|
||||||
ansible.playbook = "cluster.yml"
|
ansible.playbook = $playbook
|
||||||
if File.exist?(File.join(File.dirname($inventory), "hosts"))
|
if File.exist?(File.join( $inventory, "hosts.ini"))
|
||||||
ansible.inventory_path = $inventory
|
ansible.inventory_path = $inventory
|
||||||
end
|
end
|
||||||
ansible.become = true
|
ansible.become = true
|
||||||
ansible.limit = "all"
|
ansible.limit = "all"
|
||||||
ansible.host_key_checking = false
|
ansible.host_key_checking = false
|
||||||
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache"]
|
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "--ask-become-pass"]
|
||||||
ansible.host_vars = host_vars
|
ansible.host_vars = host_vars
|
||||||
#ansible.tags = ['download']
|
#ansible.tags = ['download']
|
||||||
ansible.groups = {
|
ansible.groups = {
|
||||||
"etcd" => ["#{$instance_name_prefix}-0[1:#{$etcd_instances}]"],
|
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
|
||||||
"kube-master" => ["#{$instance_name_prefix}-0[1:#{$kube_master_instances}]"],
|
"kube-master" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
||||||
"kube-node" => ["#{$instance_name_prefix}-0[1:#{$kube_node_instances}]"],
|
"kube-node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
|
||||||
"k8s-cluster:children" => ["kube-master", "kube-node"],
|
"k8s-cluster:children" => ["kube-master", "kube-node"],
|
||||||
}
|
}
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -3,6 +3,8 @@ pipelining=True
|
|||||||
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
||||||
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
||||||
[defaults]
|
[defaults]
|
||||||
|
strategy_plugins = plugins/mitogen/ansible_mitogen/plugins/strategy
|
||||||
|
|
||||||
host_key_checking=False
|
host_key_checking=False
|
||||||
gathering = smart
|
gathering = smart
|
||||||
fact_caching = jsonfile
|
fact_caching = jsonfile
|
||||||
@@ -13,3 +15,5 @@ callback_whitelist = profile_tasks
|
|||||||
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
||||||
deprecation_warnings=False
|
deprecation_warnings=False
|
||||||
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds
|
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds
|
||||||
|
[inventory]
|
||||||
|
ignore_patterns = artifacts, credentials
|
||||||
|
|||||||
52
cluster.yml
52
cluster.yml
@@ -1,5 +1,33 @@
|
|||||||
---
|
---
|
||||||
- hosts: localhost
|
- hosts: localhost
|
||||||
|
gather_facts: false
|
||||||
|
tasks:
|
||||||
|
- name: "Check ansible version !=2.7.0"
|
||||||
|
assert:
|
||||||
|
msg: "Ansible V2.7.0 can't be used until: https://github.com/ansible/ansible/issues/46600 is fixed"
|
||||||
|
that:
|
||||||
|
- ansible_version.string is version("2.7.0", "!=")
|
||||||
|
- ansible_version.string is version("2.5.0", ">=")
|
||||||
|
tags:
|
||||||
|
- check
|
||||||
|
vars:
|
||||||
|
ansible_connection: local
|
||||||
|
|
||||||
|
- hosts: localhost
|
||||||
|
gather_facts: false
|
||||||
|
tasks:
|
||||||
|
- name: deploy warning for non kubeadm
|
||||||
|
debug:
|
||||||
|
msg: "DEPRECATION: non-kubeadm deployment is deprecated from v2.9. Will be removed in next release."
|
||||||
|
when: not kubeadm_enabled and not skip_non_kubeadm_warning
|
||||||
|
|
||||||
|
- name: deploy cluster for non kubeadm
|
||||||
|
pause:
|
||||||
|
prompt: "Are you sure you want to deploy cluster using the deprecated non-kubeadm mode."
|
||||||
|
echo: no
|
||||||
|
when: not kubeadm_enabled and not skip_non_kubeadm_warning
|
||||||
|
|
||||||
|
- hosts: bastion[0]
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
@@ -33,20 +61,10 @@
|
|||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes/preinstall, tags: preinstall }
|
- { role: kubernetes/preinstall, tags: preinstall }
|
||||||
- { role: docker, tags: docker, when: manage_docker|default(true) }
|
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) }
|
||||||
- role: rkt
|
|
||||||
tags: rkt
|
|
||||||
when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"
|
|
||||||
- { role: download, tags: download, when: "not skip_downloads" }
|
- { role: download, tags: download, when: "not skip_downloads" }
|
||||||
environment: "{{proxy_env}}"
|
environment: "{{proxy_env}}"
|
||||||
|
|
||||||
- hosts: etcd:k8s-cluster:vault:calico-rr
|
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
|
||||||
roles:
|
|
||||||
- { role: kubespray-defaults, when: "cert_management == 'vault'" }
|
|
||||||
- { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" }
|
|
||||||
environment: "{{proxy_env}}"
|
|
||||||
|
|
||||||
- hosts: etcd
|
- hosts: etcd
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
@@ -59,13 +77,6 @@
|
|||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: etcd, tags: etcd, etcd_cluster_setup: false, etcd_events_cluster_setup: false }
|
- { role: etcd, tags: etcd, etcd_cluster_setup: false, etcd_events_cluster_setup: false }
|
||||||
|
|
||||||
- hosts: etcd:k8s-cluster:vault:calico-rr
|
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
|
||||||
roles:
|
|
||||||
- { role: kubespray-defaults}
|
|
||||||
- { role: vault, tags: vault, when: "cert_management == 'vault'"}
|
|
||||||
environment: "{{proxy_env}}"
|
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s-cluster
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
@@ -93,6 +104,7 @@
|
|||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
|
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
|
||||||
|
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"], when: "kubeadm_enabled" }
|
||||||
|
|
||||||
- hosts: kube-master
|
- hosts: kube-master
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
@@ -114,10 +126,10 @@
|
|||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq }
|
- { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq }
|
||||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf }
|
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
|
||||||
environment: "{{proxy_env}}"
|
environment: "{{proxy_env}}"
|
||||||
|
|
||||||
- hosts: kube-master[0]
|
- hosts: kube-master
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
import boto3
|
import boto3
|
||||||
import os
|
import os
|
||||||
import argparse
|
import argparse
|
||||||
@@ -13,7 +14,7 @@ class SearchEC2Tags(object):
|
|||||||
self.search_tags()
|
self.search_tags()
|
||||||
if self.args.host:
|
if self.args.host:
|
||||||
data = {}
|
data = {}
|
||||||
print json.dumps(data, indent=2)
|
print(json.dumps(data, indent=2))
|
||||||
|
|
||||||
def parse_args(self):
|
def parse_args(self):
|
||||||
|
|
||||||
@@ -44,18 +45,29 @@ class SearchEC2Tags(object):
|
|||||||
|
|
||||||
instances = ec2.instances.filter(Filters=[{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}])
|
instances = ec2.instances.filter(Filters=[{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}])
|
||||||
for instance in instances:
|
for instance in instances:
|
||||||
|
|
||||||
|
##Suppose default vpc_visibility is private
|
||||||
|
dns_name = instance.private_dns_name
|
||||||
|
ansible_host = {
|
||||||
|
'ansible_ssh_host': instance.private_ip_address
|
||||||
|
}
|
||||||
|
|
||||||
|
##Override when vpc_visibility actually is public
|
||||||
if self.vpc_visibility == "public":
|
if self.vpc_visibility == "public":
|
||||||
hosts[group].append(instance.public_dns_name)
|
dns_name = instance.public_dns_name
|
||||||
hosts['_meta']['hostvars'][instance.public_dns_name] = {
|
ansible_host = {
|
||||||
'ansible_ssh_host': instance.public_ip_address
|
'ansible_ssh_host': instance.public_ip_address
|
||||||
}
|
|
||||||
else:
|
|
||||||
hosts[group].append(instance.private_dns_name)
|
|
||||||
hosts['_meta']['hostvars'][instance.private_dns_name] = {
|
|
||||||
'ansible_ssh_host': instance.private_ip_address
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
##Set when instance actually has node_labels
|
||||||
|
node_labels_tag = list(filter(lambda t: t['Key'] == 'kubespray-node-labels', instance.tags))
|
||||||
|
if node_labels_tag:
|
||||||
|
ansible_host['node_labels'] = dict([ label.strip().split('=') for label in node_labels_tag[0]['Value'].split(',') ])
|
||||||
|
|
||||||
|
hosts[group].append(dns_name)
|
||||||
|
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
||||||
|
|
||||||
hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
|
hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
|
||||||
print json.dumps(hosts, sort_keys=True, indent=2)
|
print(json.dumps(hosts, sort_keys=True, indent=2))
|
||||||
|
|
||||||
SearchEC2Tags()
|
SearchEC2Tags()
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
|
|
||||||
# Due to some Azure limitations (ex:- Storage Account's name must be unique),
|
# Due to some Azure limitations (ex:- Storage Account's name must be unique),
|
||||||
# this name must be globally unique - it will be used as a prefix for azure components
|
# this name must be globally unique - it will be used as a prefix for azure components
|
||||||
cluster_name: example
|
cluster_name: example
|
||||||
|
|
||||||
@@ -7,6 +7,10 @@ cluster_name: example
|
|||||||
# node that can be used to access the masters and minions
|
# node that can be used to access the masters and minions
|
||||||
use_bastion: false
|
use_bastion: false
|
||||||
|
|
||||||
|
# Set this to a prefered name that will be used as the first part of the dns name for your bastotion host. For example: k8s-bastion.<azureregion>.cloudapp.azure.com.
|
||||||
|
# This is convenient when exceptions have to be configured on a firewall to allow ssh to the given bastion host.
|
||||||
|
# bastion_domain_prefix: k8s-bastion
|
||||||
|
|
||||||
number_of_k8s_masters: 3
|
number_of_k8s_masters: 3
|
||||||
number_of_k8s_nodes: 3
|
number_of_k8s_nodes: 3
|
||||||
|
|
||||||
@@ -20,7 +24,8 @@ admin_username: devops
|
|||||||
admin_password: changeme
|
admin_password: changeme
|
||||||
|
|
||||||
# MAKE SURE TO CHANGE THIS TO YOUR PUBLIC KEY to access your azure machines
|
# MAKE SURE TO CHANGE THIS TO YOUR PUBLIC KEY to access your azure machines
|
||||||
ssh_public_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy"
|
ssh_public_keys:
|
||||||
|
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy"
|
||||||
|
|
||||||
# Disable using ssh using password. Change it to false to allow to connect to ssh by password
|
# Disable using ssh using password. Change it to false to allow to connect to ssh by password
|
||||||
disablePasswordAuthentication: true
|
disablePasswordAuthentication: true
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
|
|
||||||
{% for vm in vm_ip_list %}
|
{% for vm in vm_ip_list %}
|
||||||
{% if not use_bastion or vm.virtualMachine.name == 'bastion' %}
|
{% if not use_bastion or vm.virtualMachine.name == 'bastion' %}
|
||||||
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.publicIpAddresses[0].ipAddress }} ip={{ vm.virtualMachine.network.privateIpAddresses[0] }}
|
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.publicIpAddresses[0].ipAddress }} ip={{ vm.virtualMachine.network.privateIpAddresses[0] }}
|
||||||
{% else %}
|
{% else %}
|
||||||
|
|||||||
@@ -15,7 +15,12 @@
|
|||||||
"name": "{{bastionIPAddressName}}",
|
"name": "{{bastionIPAddressName}}",
|
||||||
"location": "[resourceGroup().location]",
|
"location": "[resourceGroup().location]",
|
||||||
"properties": {
|
"properties": {
|
||||||
"publicIPAllocationMethod": "Static"
|
"publicIPAllocationMethod": "Static",
|
||||||
|
"dnsSettings": {
|
||||||
|
{% if bastion_domain_prefix %}
|
||||||
|
"domainNameLabel": "{{ bastion_domain_prefix }}"
|
||||||
|
{% endif %}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -66,10 +71,12 @@
|
|||||||
"disablePasswordAuthentication": "true",
|
"disablePasswordAuthentication": "true",
|
||||||
"ssh": {
|
"ssh": {
|
||||||
"publicKeys": [
|
"publicKeys": [
|
||||||
|
{% for key in ssh_public_keys %}
|
||||||
{
|
{
|
||||||
"path": "{{sshKeyPath}}",
|
"path": "{{sshKeyPath}}",
|
||||||
"keyData": "{{ssh_public_key}}"
|
"keyData": "{{key}}"
|
||||||
}
|
}{% if loop.index < ssh_public_keys | length %},{% endif %}
|
||||||
|
{% endfor %}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -162,10 +162,12 @@
|
|||||||
"disablePasswordAuthentication": "{{disablePasswordAuthentication}}",
|
"disablePasswordAuthentication": "{{disablePasswordAuthentication}}",
|
||||||
"ssh": {
|
"ssh": {
|
||||||
"publicKeys": [
|
"publicKeys": [
|
||||||
|
{% for key in ssh_public_keys %}
|
||||||
{
|
{
|
||||||
"path": "{{sshKeyPath}}",
|
"path": "{{sshKeyPath}}",
|
||||||
"keyData": "{{ssh_public_key}}"
|
"keyData": "{{key}}"
|
||||||
}
|
}{% if loop.index < ssh_public_keys | length %},{% endif %}
|
||||||
|
{% endfor %}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -79,10 +79,12 @@
|
|||||||
"disablePasswordAuthentication": "{{disablePasswordAuthentication}}",
|
"disablePasswordAuthentication": "{{disablePasswordAuthentication}}",
|
||||||
"ssh": {
|
"ssh": {
|
||||||
"publicKeys": [
|
"publicKeys": [
|
||||||
|
{% for key in ssh_public_keys %}
|
||||||
{
|
{
|
||||||
"path": "{{sshKeyPath}}",
|
"path": "{{sshKeyPath}}",
|
||||||
"keyData": "{{ssh_public_key}}"
|
"keyData": "{{key}}"
|
||||||
}
|
}{% if loop.index < ssh_public_keys | length %},{% endif %}
|
||||||
|
{% endfor %}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
176
contrib/dind/README.md
Normal file
176
contrib/dind/README.md
Normal file
@@ -0,0 +1,176 @@
|
|||||||
|
# Kubespray DIND experimental setup
|
||||||
|
|
||||||
|
This ansible playbook creates local docker containers
|
||||||
|
to serve as Kubernetes "nodes", which in turn will run
|
||||||
|
"normal" Kubernetes docker containers, a mode usually
|
||||||
|
called DIND (Docker-IN-Docker).
|
||||||
|
|
||||||
|
The playbook has two roles:
|
||||||
|
- dind-host: creates the "nodes" as containers in localhost, with
|
||||||
|
appropriate settings for DIND (privileged, volume mapping for dind
|
||||||
|
storage, etc).
|
||||||
|
- dind-cluster: customizes each node container to have required
|
||||||
|
system packages installed, and some utils (swapoff, lsattr)
|
||||||
|
symlinked to /bin/true to ease mimicking a real node.
|
||||||
|
|
||||||
|
This playbook has been test with Ubuntu 16.04 as host and ubuntu:16.04
|
||||||
|
as docker images (note that dind-cluster has specific customization
|
||||||
|
for these images).
|
||||||
|
|
||||||
|
The playbook also creates a `/tmp/kubespray.dind.inventory_builder.sh`
|
||||||
|
helper (wraps up running `contrib/inventory_builder/inventory.py` with
|
||||||
|
node containers IPs and prefix).
|
||||||
|
|
||||||
|
## Deploying
|
||||||
|
|
||||||
|
See below for a complete successful run:
|
||||||
|
|
||||||
|
1. Create the node containers
|
||||||
|
|
||||||
|
~~~~
|
||||||
|
# From the kubespray root dir
|
||||||
|
cd contrib/dind
|
||||||
|
pip install -r requirements.txt
|
||||||
|
|
||||||
|
ansible-playbook -i hosts dind-cluster.yaml
|
||||||
|
|
||||||
|
# Back to kubespray root
|
||||||
|
cd ../..
|
||||||
|
~~~~
|
||||||
|
|
||||||
|
NOTE: if the playbook run fails with something like below error
|
||||||
|
message, you may need to specifically set `ansible_python_interpreter`,
|
||||||
|
see `./hosts` file for an example expanded localhost entry.
|
||||||
|
|
||||||
|
~~~
|
||||||
|
failed: [localhost] (item=kube-node1) => {"changed": false, "item": "kube-node1", "msg": "Failed to import docker or docker-py - No module named requests.exceptions. Try `pip install docker` or `pip install docker-py` (Python 2.6)"}
|
||||||
|
~~~
|
||||||
|
|
||||||
|
2. Customize kubespray-dind.yaml
|
||||||
|
|
||||||
|
Note that there's coupling between above created node containers
|
||||||
|
and `kubespray-dind.yaml` settings, in particular regarding selected `node_distro`
|
||||||
|
(as set in `group_vars/all/all.yaml`), and docker settings.
|
||||||
|
|
||||||
|
~~~
|
||||||
|
$EDITOR contrib/dind/kubespray-dind.yaml
|
||||||
|
~~~
|
||||||
|
|
||||||
|
3. Prepare the inventory and run the playbook
|
||||||
|
|
||||||
|
~~~
|
||||||
|
INVENTORY_DIR=inventory/local-dind
|
||||||
|
mkdir -p ${INVENTORY_DIR}
|
||||||
|
rm -f ${INVENTORY_DIR}/hosts.ini
|
||||||
|
CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
|
||||||
|
|
||||||
|
ansible-playbook --become -e ansible_ssh_user=debian -i ${INVENTORY_DIR}/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml
|
||||||
|
~~~
|
||||||
|
|
||||||
|
NOTE: You could also test other distros without editing files by
|
||||||
|
passing `--extra-vars` as per below commandline,
|
||||||
|
replacing `DISTRO` by either `debian`, `ubuntu`, `centos`, `fedora`:
|
||||||
|
|
||||||
|
~~~
|
||||||
|
cd contrib/dind
|
||||||
|
ansible-playbook -i hosts dind-cluster.yaml --extra-vars node_distro=DISTRO
|
||||||
|
|
||||||
|
cd ../..
|
||||||
|
CONFIG_FILE=inventory/local-dind/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
|
||||||
|
ansible-playbook --become -e ansible_ssh_user=DISTRO -i inventory/local-dind/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml --extra-vars bootstrap_os=DISTRO
|
||||||
|
~~~
|
||||||
|
|
||||||
|
## Resulting deployment
|
||||||
|
|
||||||
|
See below to get an idea on how a completed deployment looks like,
|
||||||
|
from the host where you ran kubespray playbooks.
|
||||||
|
|
||||||
|
### node_distro: debian
|
||||||
|
|
||||||
|
Running from an Ubuntu Xenial host:
|
||||||
|
|
||||||
|
~~~
|
||||||
|
$ uname -a
|
||||||
|
Linux ip-xx-xx-xx-xx 4.4.0-1069-aws #79-Ubuntu SMP Mon Sep 24
|
||||||
|
15:01:41 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
|
||||||
|
|
||||||
|
$ docker ps
|
||||||
|
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||||
|
1835dd183b75 debian:9.5 "sh -c 'apt-get -qy …" 43 minutes ago Up 43 minutes kube-node5
|
||||||
|
30b0af8d2924 debian:9.5 "sh -c 'apt-get -qy …" 43 minutes ago Up 43 minutes kube-node4
|
||||||
|
3e0d1510c62f debian:9.5 "sh -c 'apt-get -qy …" 43 minutes ago Up 43 minutes kube-node3
|
||||||
|
738993566f94 debian:9.5 "sh -c 'apt-get -qy …" 44 minutes ago Up 44 minutes kube-node2
|
||||||
|
c581ef662ed2 debian:9.5 "sh -c 'apt-get -qy …" 44 minutes ago Up 44 minutes kube-node1
|
||||||
|
|
||||||
|
$ docker exec kube-node1 kubectl get node
|
||||||
|
NAME STATUS ROLES AGE VERSION
|
||||||
|
kube-node1 Ready master,node 18m v1.12.1
|
||||||
|
kube-node2 Ready master,node 17m v1.12.1
|
||||||
|
kube-node3 Ready node 17m v1.12.1
|
||||||
|
kube-node4 Ready node 17m v1.12.1
|
||||||
|
kube-node5 Ready node 17m v1.12.1
|
||||||
|
|
||||||
|
$ docker exec kube-node1 kubectl get pod --all-namespaces
|
||||||
|
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||||
|
default netchecker-agent-67489 1/1 Running 0 2m51s
|
||||||
|
default netchecker-agent-6qq6s 1/1 Running 0 2m51s
|
||||||
|
default netchecker-agent-fsw92 1/1 Running 0 2m51s
|
||||||
|
default netchecker-agent-fw6tl 1/1 Running 0 2m51s
|
||||||
|
default netchecker-agent-hostnet-8f2zb 1/1 Running 0 3m
|
||||||
|
default netchecker-agent-hostnet-gq7ml 1/1 Running 0 3m
|
||||||
|
default netchecker-agent-hostnet-jfkgv 1/1 Running 0 3m
|
||||||
|
default netchecker-agent-hostnet-kwfwx 1/1 Running 0 3m
|
||||||
|
default netchecker-agent-hostnet-r46nm 1/1 Running 0 3m
|
||||||
|
default netchecker-agent-lxdrn 1/1 Running 0 2m51s
|
||||||
|
default netchecker-server-864bd4c897-9vstl 1/1 Running 0 2m40s
|
||||||
|
default sh-68fcc6db45-qf55h 1/1 Running 1 12m
|
||||||
|
kube-system coredns-7598f59475-6vknq 1/1 Running 0 14m
|
||||||
|
kube-system coredns-7598f59475-l5q5x 1/1 Running 0 14m
|
||||||
|
kube-system kube-apiserver-kube-node1 1/1 Running 0 17m
|
||||||
|
kube-system kube-apiserver-kube-node2 1/1 Running 0 18m
|
||||||
|
kube-system kube-controller-manager-kube-node1 1/1 Running 0 18m
|
||||||
|
kube-system kube-controller-manager-kube-node2 1/1 Running 0 18m
|
||||||
|
kube-system kube-proxy-5xx9d 1/1 Running 0 17m
|
||||||
|
kube-system kube-proxy-cdqq4 1/1 Running 0 17m
|
||||||
|
kube-system kube-proxy-n64ls 1/1 Running 0 17m
|
||||||
|
kube-system kube-proxy-pswmj 1/1 Running 0 18m
|
||||||
|
kube-system kube-proxy-x89qw 1/1 Running 0 18m
|
||||||
|
kube-system kube-scheduler-kube-node1 1/1 Running 4 17m
|
||||||
|
kube-system kube-scheduler-kube-node2 1/1 Running 4 18m
|
||||||
|
kube-system kubernetes-dashboard-5db4d9f45f-548rl 1/1 Running 0 14m
|
||||||
|
kube-system nginx-proxy-kube-node3 1/1 Running 4 17m
|
||||||
|
kube-system nginx-proxy-kube-node4 1/1 Running 4 17m
|
||||||
|
kube-system nginx-proxy-kube-node5 1/1 Running 4 17m
|
||||||
|
kube-system weave-net-42bfr 2/2 Running 0 16m
|
||||||
|
kube-system weave-net-6gt8m 2/2 Running 0 16m
|
||||||
|
kube-system weave-net-88nnc 2/2 Running 0 16m
|
||||||
|
kube-system weave-net-shckr 2/2 Running 0 16m
|
||||||
|
kube-system weave-net-xr46t 2/2 Running 0 16m
|
||||||
|
|
||||||
|
$ docker exec kube-node1 curl -s http://localhost:31081/api/v1/connectivity_check
|
||||||
|
{"Message":"All 10 pods successfully reported back to the server","Absent":null,"Outdated":null}
|
||||||
|
~~~
|
||||||
|
|
||||||
|
## Using ./run-test-distros.sh
|
||||||
|
|
||||||
|
You can use `./run-test-distros.sh` to run a set of tests via DIND,
|
||||||
|
and excerpt from this script, to get an idea:
|
||||||
|
|
||||||
|
~~~
|
||||||
|
# The SPEC file(s) must have two arrays as e.g.
|
||||||
|
# DISTROS=(debian centos)
|
||||||
|
# EXTRAS=(
|
||||||
|
# 'kube_network_plugin=calico'
|
||||||
|
# 'kube_network_plugin=flannel'
|
||||||
|
# 'kube_network_plugin=weave'
|
||||||
|
# )
|
||||||
|
# that will be tested in a "combinatory" way (e.g. from above there'll be
|
||||||
|
# be 6 test runs), creating a sequenced <spec_filename>-nn.out with each output.
|
||||||
|
#
|
||||||
|
# Each $EXTRAS element will be whitespace split, and passed as --extra-vars
|
||||||
|
# to main kubespray ansible-playbook run.
|
||||||
|
~~~
|
||||||
|
|
||||||
|
See e.g. `test-some_distros-most_CNIs.env` and
|
||||||
|
`test-some_distros-kube_router_combo.env` in particular for a richer
|
||||||
|
set of CNI specific `--extra-vars` combo.
|
||||||
9
contrib/dind/dind-cluster.yaml
Normal file
9
contrib/dind/dind-cluster.yaml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
- hosts: localhost
|
||||||
|
gather_facts: False
|
||||||
|
roles:
|
||||||
|
- { role: dind-host }
|
||||||
|
|
||||||
|
- hosts: containers
|
||||||
|
roles:
|
||||||
|
- { role: dind-cluster }
|
||||||
2
contrib/dind/group_vars/all/all.yaml
Normal file
2
contrib/dind/group_vars/all/all.yaml
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
# See distro.yaml for supported node_distro images
|
||||||
|
node_distro: debian
|
||||||
40
contrib/dind/group_vars/all/distro.yaml
Normal file
40
contrib/dind/group_vars/all/distro.yaml
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
distro_settings:
|
||||||
|
debian: &DEBIAN
|
||||||
|
image: "debian:9.5"
|
||||||
|
user: "debian"
|
||||||
|
pid1_exe: /lib/systemd/systemd
|
||||||
|
init: |
|
||||||
|
sh -c "apt-get -qy update && apt-get -qy install systemd-sysv dbus && exec /sbin/init"
|
||||||
|
raw_setup: apt-get -qy update && apt-get -qy install dbus python sudo iproute2
|
||||||
|
raw_setup_done: test -x /usr/bin/sudo
|
||||||
|
agetty_svc: getty@*
|
||||||
|
ssh_service: ssh
|
||||||
|
extra_packages: []
|
||||||
|
ubuntu:
|
||||||
|
<<: *DEBIAN
|
||||||
|
image: "ubuntu:16.04"
|
||||||
|
user: "ubuntu"
|
||||||
|
init: |
|
||||||
|
/sbin/init
|
||||||
|
centos: &CENTOS
|
||||||
|
image: "centos:7"
|
||||||
|
user: "centos"
|
||||||
|
pid1_exe: /usr/lib/systemd/systemd
|
||||||
|
init: |
|
||||||
|
/sbin/init
|
||||||
|
raw_setup: yum -qy install policycoreutils dbus python sudo iproute iptables
|
||||||
|
raw_setup_done: test -x /usr/bin/sudo
|
||||||
|
agetty_svc: getty@* serial-getty@*
|
||||||
|
ssh_service: sshd
|
||||||
|
extra_packages: []
|
||||||
|
fedora:
|
||||||
|
<<: *CENTOS
|
||||||
|
image: "fedora:latest"
|
||||||
|
user: "fedora"
|
||||||
|
raw_setup: yum -qy install policycoreutils dbus python sudo iproute iptables; mkdir -p /etc/modules-load.d
|
||||||
|
extra_packages:
|
||||||
|
- hostname
|
||||||
|
- procps
|
||||||
|
- findutils
|
||||||
|
- kmod
|
||||||
|
- iputils
|
||||||
15
contrib/dind/hosts
Normal file
15
contrib/dind/hosts
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
[local]
|
||||||
|
# If you created a virtualenv for ansible, you may need to specify running the
|
||||||
|
# python binary from there instead:
|
||||||
|
#localhost ansible_connection=local ansible_python_interpreter=/home/user/kubespray/.venv/bin/python
|
||||||
|
localhost ansible_connection=local
|
||||||
|
|
||||||
|
[containers]
|
||||||
|
kube-node1
|
||||||
|
kube-node2
|
||||||
|
kube-node3
|
||||||
|
kube-node4
|
||||||
|
kube-node5
|
||||||
|
|
||||||
|
[containers:vars]
|
||||||
|
ansible_connection=docker
|
||||||
22
contrib/dind/kubespray-dind.yaml
Normal file
22
contrib/dind/kubespray-dind.yaml
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
# kubespray-dind.yaml: minimal kubespray ansible playbook usable for DIND
|
||||||
|
# See contrib/dind/README.md
|
||||||
|
kube_api_anonymous_auth: true
|
||||||
|
kubeadm_enabled: true
|
||||||
|
|
||||||
|
kubelet_fail_swap_on: false
|
||||||
|
|
||||||
|
# Docker nodes need to have been created with same "node_distro: debian"
|
||||||
|
# at contrib/dind/group_vars/all/all.yaml
|
||||||
|
bootstrap_os: debian
|
||||||
|
|
||||||
|
docker_version: latest
|
||||||
|
|
||||||
|
docker_storage_options: -s overlay2 --storage-opt overlay2.override_kernel_check=true -g /dind/docker
|
||||||
|
|
||||||
|
dns_mode: coredns
|
||||||
|
|
||||||
|
deploy_netchecker: True
|
||||||
|
netcheck_agent_image_repo: quay.io/l23network/k8s-netchecker-agent
|
||||||
|
netcheck_server_image_repo: quay.io/l23network/k8s-netchecker-server
|
||||||
|
netcheck_agent_image_tag: v1.0
|
||||||
|
netcheck_server_image_tag: v1.0
|
||||||
1
contrib/dind/requirements.txt
Normal file
1
contrib/dind/requirements.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
docker
|
||||||
70
contrib/dind/roles/dind-cluster/tasks/main.yaml
Normal file
70
contrib/dind/roles/dind-cluster/tasks/main.yaml
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
- name: set_fact distro_setup
|
||||||
|
set_fact:
|
||||||
|
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||||
|
|
||||||
|
- name: set_fact other distro settings
|
||||||
|
set_fact:
|
||||||
|
distro_user: "{{ distro_setup['user'] }}"
|
||||||
|
distro_ssh_service: "{{ distro_setup['ssh_service'] }}"
|
||||||
|
distro_extra_packages: "{{ distro_setup['extra_packages'] }}"
|
||||||
|
|
||||||
|
- name: Null-ify some linux tools to ease DIND
|
||||||
|
file:
|
||||||
|
src: "/bin/true"
|
||||||
|
dest: "{{item}}"
|
||||||
|
state: link
|
||||||
|
force: yes
|
||||||
|
with_items:
|
||||||
|
# DIND box may have swap enable, don't bother
|
||||||
|
- /sbin/swapoff
|
||||||
|
# /etc/hosts handling would fail on trying to copy file attributes on edit,
|
||||||
|
# void it by successfully returning nil output
|
||||||
|
- /usr/bin/lsattr
|
||||||
|
# disable selinux-isms, sp needed if running on non-Selinux host
|
||||||
|
- /usr/sbin/semodule
|
||||||
|
|
||||||
|
- name: Void installing dpkg docs and man pages on Debian based distros
|
||||||
|
copy:
|
||||||
|
content: |
|
||||||
|
# Delete locales
|
||||||
|
path-exclude=/usr/share/locale/*
|
||||||
|
# Delete man pages
|
||||||
|
path-exclude=/usr/share/man/*
|
||||||
|
# Delete docs
|
||||||
|
path-exclude=/usr/share/doc/*
|
||||||
|
path-include=/usr/share/doc/*/copyright
|
||||||
|
dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'Debian'
|
||||||
|
|
||||||
|
- name: Install system packages to better match a full-fledge node
|
||||||
|
package:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: present
|
||||||
|
with_items: "{{ distro_extra_packages }} + [ 'rsyslog', 'openssh-server' ]"
|
||||||
|
|
||||||
|
- name: Start needed services
|
||||||
|
service:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: started
|
||||||
|
with_items:
|
||||||
|
- rsyslog
|
||||||
|
- "{{ distro_ssh_service }}"
|
||||||
|
|
||||||
|
- name: Create distro user "{{distro_user}}"
|
||||||
|
user:
|
||||||
|
name: "{{ distro_user }}"
|
||||||
|
uid: 1000
|
||||||
|
#groups: sudo
|
||||||
|
append: yes
|
||||||
|
|
||||||
|
- name: Allow password-less sudo to "{{ distro_user }}"
|
||||||
|
copy:
|
||||||
|
content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL"
|
||||||
|
dest: "/etc/sudoers.d/{{ distro_user }}"
|
||||||
|
|
||||||
|
- name: Add my pubkey to "{{ distro_user }}" user authorized keys
|
||||||
|
authorized_key:
|
||||||
|
user: "{{ distro_user }}"
|
||||||
|
state: present
|
||||||
|
key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}"
|
||||||
86
contrib/dind/roles/dind-host/tasks/main.yaml
Normal file
86
contrib/dind/roles/dind-host/tasks/main.yaml
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
- name: set_fact distro_setup
|
||||||
|
set_fact:
|
||||||
|
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||||
|
|
||||||
|
- name: set_fact other distro settings
|
||||||
|
set_fact:
|
||||||
|
distro_image: "{{ distro_setup['image'] }}"
|
||||||
|
distro_init: "{{ distro_setup['init'] }}"
|
||||||
|
distro_pid1_exe: "{{ distro_setup['pid1_exe'] }}"
|
||||||
|
distro_raw_setup: "{{ distro_setup['raw_setup'] }}"
|
||||||
|
distro_raw_setup_done: "{{ distro_setup['raw_setup_done'] }}"
|
||||||
|
distro_agetty_svc: "{{ distro_setup['agetty_svc'] }}"
|
||||||
|
|
||||||
|
- name: Create dind node containers from "containers" inventory section
|
||||||
|
docker_container:
|
||||||
|
image: "{{ distro_image }}"
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: started
|
||||||
|
hostname: "{{ item }}"
|
||||||
|
command: "{{ distro_init }}"
|
||||||
|
#recreate: yes
|
||||||
|
privileged: true
|
||||||
|
tmpfs:
|
||||||
|
- /sys/module/nf_conntrack/parameters
|
||||||
|
volumes:
|
||||||
|
- /boot:/boot
|
||||||
|
- /lib/modules:/lib/modules
|
||||||
|
- "{{ item }}:/dind/docker"
|
||||||
|
register: containers
|
||||||
|
with_items: "{{groups.containers}}"
|
||||||
|
tags:
|
||||||
|
- addresses
|
||||||
|
|
||||||
|
- name: Gather list of containers IPs
|
||||||
|
set_fact:
|
||||||
|
addresses: "{{ containers.results | map(attribute='ansible_facts') | map(attribute='docker_container') | map(attribute='NetworkSettings') | map(attribute='IPAddress') | list }}"
|
||||||
|
tags:
|
||||||
|
- addresses
|
||||||
|
|
||||||
|
- name: Create inventory_builder helper already set with the list of node containers' IPs
|
||||||
|
template:
|
||||||
|
src: inventory_builder.sh.j2
|
||||||
|
dest: /tmp/kubespray.dind.inventory_builder.sh
|
||||||
|
mode: 0755
|
||||||
|
tags:
|
||||||
|
- addresses
|
||||||
|
|
||||||
|
- name: Install needed packages into node containers via raw, need to wait for possible systemd packages to finish installing
|
||||||
|
raw: |
|
||||||
|
# agetty processes churn a lot of cpu time failing on inexistent ttys, early STOP them, to rip them in below task
|
||||||
|
pkill -STOP agetty || true
|
||||||
|
{{ distro_raw_setup_done }} && echo SKIPPED && exit 0
|
||||||
|
until [ "$(readlink /proc/1/exe)" = "{{ distro_pid1_exe }}" ] ; do sleep 1; done
|
||||||
|
{{ distro_raw_setup }}
|
||||||
|
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||||
|
with_items: "{{ containers.results }}"
|
||||||
|
register: result
|
||||||
|
changed_when: result.stdout.find("SKIPPED") < 0
|
||||||
|
|
||||||
|
- name: Remove gettys from node containers
|
||||||
|
raw: |
|
||||||
|
until test -S /var/run/dbus/system_bus_socket; do sleep 1; done
|
||||||
|
systemctl disable {{ distro_agetty_svc }}
|
||||||
|
systemctl stop {{ distro_agetty_svc }}
|
||||||
|
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||||
|
with_items: "{{ containers.results }}"
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
|
||||||
|
# handle manually
|
||||||
|
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
|
||||||
|
raw: |
|
||||||
|
echo {{ item | hash('sha1') }} > /etc/machine-id.new
|
||||||
|
mv -b /etc/machine-id.new /etc/machine-id
|
||||||
|
cmp /etc/machine-id /etc/machine-id~ || true
|
||||||
|
systemctl daemon-reload
|
||||||
|
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||||
|
with_items: "{{ containers.results }}"
|
||||||
|
|
||||||
|
- name: Early hack image install to adapt for DIND
|
||||||
|
raw: |
|
||||||
|
rm -fv /usr/bin/udevadm /usr/sbin/udevadm
|
||||||
|
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||||
|
with_items: "{{ containers.results }}"
|
||||||
|
register: result
|
||||||
|
changed_when: result.stdout.find("removed") >= 0
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# NOTE: if you change HOST_PREFIX, you also need to edit ./hosts [containers] section
|
||||||
|
HOST_PREFIX=kube-node python3 contrib/inventory_builder/inventory.py {% for ip in addresses %} {{ ip }} {% endfor %}
|
||||||
93
contrib/dind/run-test-distros.sh
Executable file
93
contrib/dind/run-test-distros.sh
Executable file
@@ -0,0 +1,93 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Q&D test'em all: creates full DIND kubespray deploys
|
||||||
|
# for each distro, verifying it via netchecker.
|
||||||
|
|
||||||
|
info() {
|
||||||
|
local msg="$*"
|
||||||
|
local date="$(date -Isec)"
|
||||||
|
echo "INFO: [$date] $msg"
|
||||||
|
}
|
||||||
|
pass_or_fail() {
|
||||||
|
local rc="$?"
|
||||||
|
local msg="$*"
|
||||||
|
local date="$(date -Isec)"
|
||||||
|
[ $rc -eq 0 ] && echo "PASS: [$date] $msg" || echo "FAIL: [$date] $msg"
|
||||||
|
return $rc
|
||||||
|
}
|
||||||
|
test_distro() {
|
||||||
|
local distro=${1:?};shift
|
||||||
|
local extra="${*:-}"
|
||||||
|
local prefix="$distro[${extra}]}"
|
||||||
|
ansible-playbook -i hosts dind-cluster.yaml -e node_distro=$distro
|
||||||
|
pass_or_fail "$prefix: dind-nodes" || return 1
|
||||||
|
(cd ../..
|
||||||
|
INVENTORY_DIR=inventory/local-dind
|
||||||
|
mkdir -p ${INVENTORY_DIR}
|
||||||
|
rm -f ${INVENTORY_DIR}/hosts.ini
|
||||||
|
CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
|
||||||
|
# expand $extra with -e in front of each word
|
||||||
|
extra_args=""; for extra_arg in $extra; do extra_args="$extra_args -e $extra_arg"; done
|
||||||
|
ansible-playbook --become -e ansible_ssh_user=$distro -i \
|
||||||
|
${INVENTORY_DIR}/hosts.ini cluster.yml \
|
||||||
|
-e @contrib/dind/kubespray-dind.yaml -e bootstrap_os=$distro ${extra_args}
|
||||||
|
pass_or_fail "$prefix: kubespray"
|
||||||
|
) || return 1
|
||||||
|
local node0=${NODES[0]}
|
||||||
|
docker exec ${node0} kubectl get pod --all-namespaces
|
||||||
|
pass_or_fail "$prefix: kube-api" || return 1
|
||||||
|
let retries=60
|
||||||
|
while ((retries--)); do
|
||||||
|
# Some CNI may set NodePort on "main" node interface address (thus no localhost NodePort)
|
||||||
|
# e.g. kube-router: https://github.com/cloudnativelabs/kube-router/pull/217
|
||||||
|
docker exec ${node0} curl -m2 -s http://${NETCHECKER_HOST:?}:31081/api/v1/connectivity_check | grep successfully && break
|
||||||
|
sleep 2
|
||||||
|
done
|
||||||
|
[ $retries -ge 0 ]
|
||||||
|
pass_or_fail "$prefix: netcheck" || return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
NODES=($(egrep ^kube-node hosts))
|
||||||
|
NETCHECKER_HOST=localhost
|
||||||
|
|
||||||
|
: ${OUTPUT_DIR:=./out}
|
||||||
|
mkdir -p ${OUTPUT_DIR}
|
||||||
|
|
||||||
|
# The SPEC file(s) must have two arrays as e.g.
|
||||||
|
# DISTROS=(debian centos)
|
||||||
|
# EXTRAS=(
|
||||||
|
# 'kube_network_plugin=calico'
|
||||||
|
# 'kube_network_plugin=flannel'
|
||||||
|
# 'kube_network_plugin=weave'
|
||||||
|
# )
|
||||||
|
# that will be tested in a "combinatory" way (e.g. from above there'll be
|
||||||
|
# be 6 test runs), creating a sequenced <spec_filename>-nn.out with each output.
|
||||||
|
#
|
||||||
|
# Each $EXTRAS element will be whitespace split, and passed as --extra-vars
|
||||||
|
# to main kubespray ansible-playbook run.
|
||||||
|
|
||||||
|
SPECS=${*:?Missing SPEC files, e.g. test-most_distros-some_CNIs.env}
|
||||||
|
for spec in ${SPECS}; do
|
||||||
|
unset DISTROS EXTRAS
|
||||||
|
echo "Loading file=${spec} ..."
|
||||||
|
. ${spec} || continue
|
||||||
|
: ${DISTROS:?} || continue
|
||||||
|
echo "DISTROS=${DISTROS[@]}"
|
||||||
|
echo "EXTRAS->"
|
||||||
|
printf " %s\n" "${EXTRAS[@]}"
|
||||||
|
let n=1
|
||||||
|
for distro in ${DISTROS[@]}; do
|
||||||
|
for extra in "${EXTRAS[@]:-NULL}"; do
|
||||||
|
# Magic value to let this for run once:
|
||||||
|
[[ ${extra} == NULL ]] && unset extra
|
||||||
|
docker rm -f ${NODES[@]}
|
||||||
|
printf -v file_out "%s/%s-%02d.out" ${OUTPUT_DIR} ${spec} $((n++))
|
||||||
|
{
|
||||||
|
info "${distro}[${extra}] START: file_out=${file_out}"
|
||||||
|
time test_distro ${distro} ${extra}
|
||||||
|
} |& tee ${file_out}
|
||||||
|
# sleeping for the sake of the human to verify if they want
|
||||||
|
sleep 2m
|
||||||
|
done
|
||||||
|
done
|
||||||
|
done
|
||||||
|
egrep -H '^(....:|real)' $(ls -tr ${OUTPUT_DIR}/*.out)
|
||||||
11
contrib/dind/test-most_distros-some_CNIs.env
Normal file
11
contrib/dind/test-most_distros-some_CNIs.env
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
# Test spec file: used from ./run-test-distros.sh, will run
|
||||||
|
# each distro in $DISTROS overloading main kubespray ansible-playbook run
|
||||||
|
# Get all DISTROS from distro.yaml (shame no yaml parsing, but nuff anyway)
|
||||||
|
# DISTROS="${*:-$(egrep -o '^ \w+' group_vars/all/distro.yaml|paste -s)}"
|
||||||
|
DISTROS=(debian ubuntu centos fedora)
|
||||||
|
|
||||||
|
# Each line below will be added as --extra-vars to main playbook run
|
||||||
|
EXTRAS=(
|
||||||
|
'kube_network_plugin=calico'
|
||||||
|
'kube_network_plugin=weave'
|
||||||
|
)
|
||||||
8
contrib/dind/test-some_distros-kube_router_combo.env
Normal file
8
contrib/dind/test-some_distros-kube_router_combo.env
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
DISTROS=(debian centos)
|
||||||
|
NETCHECKER_HOST=${NODES[0]}
|
||||||
|
EXTRAS=(
|
||||||
|
'kube_network_plugin=kube-router {"kubeadm_enabled":true,"kube_router_run_service_proxy":false}'
|
||||||
|
'kube_network_plugin=kube-router {"kubeadm_enabled":true,"kube_router_run_service_proxy":true}'
|
||||||
|
'kube_network_plugin=kube-router {"kubeadm_enabled":false,"kube_router_run_service_proxy":false}'
|
||||||
|
'kube_network_plugin=kube-router {"kubeadm_enabled":false,"kube_router_run_service_proxy":true}'
|
||||||
|
)
|
||||||
8
contrib/dind/test-some_distros-most_CNIs.env
Normal file
8
contrib/dind/test-some_distros-most_CNIs.env
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
DISTROS=(debian centos)
|
||||||
|
EXTRAS=(
|
||||||
|
'kube_network_plugin=calico {"kubeadm_enabled":true}'
|
||||||
|
'kube_network_plugin=canal {"kubeadm_enabled":true}'
|
||||||
|
'kube_network_plugin=cilium {"kubeadm_enabled":true}'
|
||||||
|
'kube_network_plugin=flannel {"kubeadm_enabled":true}'
|
||||||
|
'kube_network_plugin=weave {"kubeadm_enabled":true}'
|
||||||
|
)
|
||||||
10
contrib/metallb/README.md
Normal file
10
contrib/metallb/README.md
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
# Deploy MetalLB into Kubespray/Kubernetes
|
||||||
|
```
|
||||||
|
MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation. In short, it allows you to create Kubernetes services of type “LoadBalancer” in clusters that don’t run on a cloud provider, and thus cannot simply hook into paid products to provide load-balancers.
|
||||||
|
```
|
||||||
|
This playbook aims to automate [this](https://metallb.universe.tf/tutorial/layer2/tutorial). It deploys MetalLB into kubernetes and sets up a layer 2 loadbalancer.
|
||||||
|
|
||||||
|
## Install
|
||||||
|
```
|
||||||
|
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/metallb/metallb.yml
|
||||||
|
```
|
||||||
6
contrib/metallb/metallb.yml
Normal file
6
contrib/metallb/metallb.yml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
- hosts: kube-master[0]
|
||||||
|
tags:
|
||||||
|
- "provision"
|
||||||
|
roles:
|
||||||
|
- { role: provision }
|
||||||
7
contrib/metallb/roles/provision/defaults/main.yml
Normal file
7
contrib/metallb/roles/provision/defaults/main.yml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
metallb:
|
||||||
|
ip_range: "10.5.0.50-10.5.0.99"
|
||||||
|
limits:
|
||||||
|
cpu: "100m"
|
||||||
|
memory: "100Mi"
|
||||||
|
port: "7472"
|
||||||
17
contrib/metallb/roles/provision/tasks/main.yml
Normal file
17
contrib/metallb/roles/provision/tasks/main.yml
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
- name: "Kubernetes Apps | Lay Down MetalLB"
|
||||||
|
become: true
|
||||||
|
template: { src: "{{ item }}.j2", dest: "{{ kube_config_dir }}/{{ item }}" }
|
||||||
|
with_items: ["metallb.yml", "metallb-config.yml"]
|
||||||
|
register: "rendering"
|
||||||
|
when:
|
||||||
|
- "inventory_hostname == groups['kube-master'][0]"
|
||||||
|
- name: "Kubernetes Apps | Install and configure MetalLB"
|
||||||
|
kube:
|
||||||
|
name: "MetalLB"
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
filename: "{{ kube_config_dir }}/{{ item.item }}"
|
||||||
|
state: "{{ item.changed | ternary('latest','present') }}"
|
||||||
|
with_items: "{{ rendering.results }}"
|
||||||
|
when:
|
||||||
|
- "inventory_hostname == groups['kube-master'][0]"
|
||||||
@@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
namespace: metallb-system
|
||||||
|
name: config
|
||||||
|
data:
|
||||||
|
config: |
|
||||||
|
address-pools:
|
||||||
|
- name: loadbalanced
|
||||||
|
protocol: layer2
|
||||||
|
addresses:
|
||||||
|
- {{ metallb.ip_range }}
|
||||||
254
contrib/metallb/roles/provision/templates/metallb.yml.j2
Normal file
254
contrib/metallb/roles/provision/templates/metallb.yml.j2
Normal file
@@ -0,0 +1,254 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Namespace
|
||||||
|
metadata:
|
||||||
|
name: metallb-system
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
---
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
namespace: metallb-system
|
||||||
|
name: controller
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
namespace: metallb-system
|
||||||
|
name: speaker
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
name: metallb-system:controller
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["services"]
|
||||||
|
verbs: ["get", "list", "watch", "update"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["services/status"]
|
||||||
|
verbs: ["update"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["events"]
|
||||||
|
verbs: ["create", "patch"]
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
name: metallb-system:speaker
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["services", "endpoints", "nodes"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: Role
|
||||||
|
metadata:
|
||||||
|
namespace: metallb-system
|
||||||
|
name: leader-election
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["endpoints"]
|
||||||
|
resourceNames: ["metallb-speaker"]
|
||||||
|
verbs: ["get", "update"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["endpoints"]
|
||||||
|
verbs: ["create"]
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: Role
|
||||||
|
metadata:
|
||||||
|
namespace: metallb-system
|
||||||
|
name: config-watcher
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["configmaps"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["events"]
|
||||||
|
verbs: ["create"]
|
||||||
|
---
|
||||||
|
|
||||||
|
## Role bindings
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: metallb-system:controller
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: controller
|
||||||
|
namespace: metallb-system
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: metallb-system:controller
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: metallb-system:speaker
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: speaker
|
||||||
|
namespace: metallb-system
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: metallb-system:speaker
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: RoleBinding
|
||||||
|
metadata:
|
||||||
|
namespace: metallb-system
|
||||||
|
name: config-watcher
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: controller
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: speaker
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: Role
|
||||||
|
name: config-watcher
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: RoleBinding
|
||||||
|
metadata:
|
||||||
|
namespace: metallb-system
|
||||||
|
name: leader-election
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: speaker
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: Role
|
||||||
|
name: leader-election
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1beta2
|
||||||
|
kind: DaemonSet
|
||||||
|
metadata:
|
||||||
|
namespace: metallb-system
|
||||||
|
name: speaker
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
component: speaker
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: metallb
|
||||||
|
component: speaker
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
component: speaker
|
||||||
|
annotations:
|
||||||
|
prometheus.io/scrape: "true"
|
||||||
|
prometheus.io/port: "{{ metallb.port }}"
|
||||||
|
spec:
|
||||||
|
serviceAccountName: speaker
|
||||||
|
terminationGracePeriodSeconds: 0
|
||||||
|
hostNetwork: true
|
||||||
|
containers:
|
||||||
|
- name: speaker
|
||||||
|
image: metallb/speaker:v0.6.2
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
args:
|
||||||
|
- --port={{ metallb.port }}
|
||||||
|
- --config=config
|
||||||
|
env:
|
||||||
|
- name: METALLB_NODE_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
ports:
|
||||||
|
- name: monitoring
|
||||||
|
containerPort: {{ metallb.port }}
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: {{ metallb.limits.cpu }}
|
||||||
|
memory: {{ metallb.limits.memory }}
|
||||||
|
securityContext:
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
readOnlyRootFilesystem: true
|
||||||
|
capabilities:
|
||||||
|
drop:
|
||||||
|
- all
|
||||||
|
add:
|
||||||
|
- net_raw
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1beta2
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
namespace: metallb-system
|
||||||
|
name: controller
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
component: controller
|
||||||
|
spec:
|
||||||
|
revisionHistoryLimit: 3
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: metallb
|
||||||
|
component: controller
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
component: controller
|
||||||
|
annotations:
|
||||||
|
prometheus.io/scrape: "true"
|
||||||
|
prometheus.io/port: "{{ metallb.port }}"
|
||||||
|
spec:
|
||||||
|
serviceAccountName: controller
|
||||||
|
terminationGracePeriodSeconds: 0
|
||||||
|
securityContext:
|
||||||
|
runAsNonRoot: true
|
||||||
|
runAsUser: 65534 # nobody
|
||||||
|
containers:
|
||||||
|
- name: controller
|
||||||
|
image: metallb/controller:v0.6.2
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
args:
|
||||||
|
- --port={{ metallb.port }}
|
||||||
|
- --config=config
|
||||||
|
ports:
|
||||||
|
- name: monitoring
|
||||||
|
containerPort: {{ metallb.port }}
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: {{ metallb.limits.cpu }}
|
||||||
|
memory: {{ metallb.limits.memory }}
|
||||||
|
securityContext:
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
capabilities:
|
||||||
|
drop:
|
||||||
|
- all
|
||||||
|
readOnlyRootFilesystem: true
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
@@ -12,7 +12,7 @@
|
|||||||
# ## As in the previous case, you can set ip to give direct communication on internal IPs
|
# ## As in the previous case, you can set ip to give direct communication on internal IPs
|
||||||
# gfs_node1 ansible_ssh_host=95.54.0.18 # disk_volume_device_1=/dev/vdc ip=10.3.0.7
|
# gfs_node1 ansible_ssh_host=95.54.0.18 # disk_volume_device_1=/dev/vdc ip=10.3.0.7
|
||||||
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
|
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
|
||||||
# gfs_node1 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
|
# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
|
||||||
|
|
||||||
# [kube-master]
|
# [kube-master]
|
||||||
# node1
|
# node1
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
# For Ubuntu.
|
# For Ubuntu.
|
||||||
glusterfs_default_release: ""
|
glusterfs_default_release: ""
|
||||||
glusterfs_ppa_use: yes
|
glusterfs_ppa_use: yes
|
||||||
glusterfs_ppa_version: "3.8"
|
glusterfs_ppa_version: "4.1"
|
||||||
|
|
||||||
# Gluster configuration.
|
# Gluster configuration.
|
||||||
gluster_mount_dir: /mnt/gluster
|
gluster_mount_dir: /mnt/gluster
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
# For Ubuntu.
|
# For Ubuntu.
|
||||||
glusterfs_default_release: ""
|
glusterfs_default_release: ""
|
||||||
glusterfs_ppa_use: yes
|
glusterfs_ppa_use: yes
|
||||||
glusterfs_ppa_version: "3.8"
|
glusterfs_ppa_version: "3.12"
|
||||||
|
|
||||||
# Gluster configuration.
|
# Gluster configuration.
|
||||||
gluster_mount_dir: /mnt/gluster
|
gluster_mount_dir: /mnt/gluster
|
||||||
|
|||||||
@@ -1,2 +1,2 @@
|
|||||||
---
|
---
|
||||||
glusterfs_daemon: glusterfs-server
|
glusterfs_daemon: glusterd
|
||||||
|
|||||||
16
contrib/network-storage/heketi/README.md
Normal file
16
contrib/network-storage/heketi/README.md
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# Deploy Heketi/Glusterfs into Kubespray/Kubernetes
|
||||||
|
This playbook aims to automate [this](https://github.com/heketi/heketi/blob/master/docs/admin/install-kubernetes.md) tutorial. It deploys heketi/glusterfs into kubernetes and sets up a storageclass.
|
||||||
|
|
||||||
|
## Client Setup
|
||||||
|
Heketi provides a CLI that provides users with a means to administer the deployment and configuration of GlusterFS in Kubernetes. [Download and install the heketi-cli](https://github.com/heketi/heketi/releases) on your client machine.
|
||||||
|
|
||||||
|
## Install
|
||||||
|
Copy the inventory.yml.sample over to inventory/sample/k8s_heketi_inventory.yml and change it according to your setup.
|
||||||
|
```
|
||||||
|
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
## Tear down
|
||||||
|
```
|
||||||
|
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml
|
||||||
|
```
|
||||||
9
contrib/network-storage/heketi/heketi-tear-down.yml
Normal file
9
contrib/network-storage/heketi/heketi-tear-down.yml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
- hosts: kube-master[0]
|
||||||
|
roles:
|
||||||
|
- { role: tear-down }
|
||||||
|
|
||||||
|
- hosts: heketi-node
|
||||||
|
become: yes
|
||||||
|
roles:
|
||||||
|
- { role: tear-down-disks }
|
||||||
10
contrib/network-storage/heketi/heketi.yml
Normal file
10
contrib/network-storage/heketi/heketi.yml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
- hosts: heketi-node
|
||||||
|
roles:
|
||||||
|
- { role: prepare }
|
||||||
|
|
||||||
|
- hosts: kube-master[0]
|
||||||
|
tags:
|
||||||
|
- "provision"
|
||||||
|
roles:
|
||||||
|
- { role: provision }
|
||||||
26
contrib/network-storage/heketi/inventory.yml.sample
Normal file
26
contrib/network-storage/heketi/inventory.yml.sample
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
all:
|
||||||
|
vars:
|
||||||
|
heketi_admin_key: "11elfeinhundertundelf"
|
||||||
|
heketi_user_key: "!!einseinseins"
|
||||||
|
children:
|
||||||
|
k8s-cluster:
|
||||||
|
vars:
|
||||||
|
kubelet_fail_swap_on: false
|
||||||
|
children:
|
||||||
|
kube-master:
|
||||||
|
hosts:
|
||||||
|
node1:
|
||||||
|
etcd:
|
||||||
|
hosts:
|
||||||
|
node2:
|
||||||
|
kube-node:
|
||||||
|
hosts: &kube_nodes
|
||||||
|
node1:
|
||||||
|
node2:
|
||||||
|
node3:
|
||||||
|
node4:
|
||||||
|
heketi-node:
|
||||||
|
vars:
|
||||||
|
disk_volume_device_1: "/dev/vdb"
|
||||||
|
hosts:
|
||||||
|
<<: *kube_nodes
|
||||||
1
contrib/network-storage/heketi/requirements.txt
Normal file
1
contrib/network-storage/heketi/requirements.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
jmespath
|
||||||
24
contrib/network-storage/heketi/roles/prepare/tasks/main.yml
Normal file
24
contrib/network-storage/heketi/roles/prepare/tasks/main.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
---
|
||||||
|
- name: "Load lvm kernel modules"
|
||||||
|
become: true
|
||||||
|
with_items:
|
||||||
|
- "dm_snapshot"
|
||||||
|
- "dm_mirror"
|
||||||
|
- "dm_thin_pool"
|
||||||
|
modprobe:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: "present"
|
||||||
|
|
||||||
|
- name: "Install glusterfs mount utils (RedHat)"
|
||||||
|
become: true
|
||||||
|
yum:
|
||||||
|
name: "glusterfs-fuse"
|
||||||
|
state: "present"
|
||||||
|
when: "ansible_os_family == 'RedHat'"
|
||||||
|
|
||||||
|
- name: "Install glusterfs mount utils (Debian)"
|
||||||
|
become: true
|
||||||
|
apt:
|
||||||
|
name: "glusterfs-client"
|
||||||
|
state: "present"
|
||||||
|
when: "ansible_os_family == 'Debian'"
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
---
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
- name: "stop port forwarding"
|
||||||
|
command: "killall "
|
||||||
@@ -0,0 +1,56 @@
|
|||||||
|
# Bootstrap heketi
|
||||||
|
- name: "Get state of heketi service, deployment and pods."
|
||||||
|
register: "initial_heketi_state"
|
||||||
|
changed_when: false
|
||||||
|
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
||||||
|
- name: "Bootstrap heketi."
|
||||||
|
when:
|
||||||
|
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Service']\"))|length == 0"
|
||||||
|
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Deployment']\"))|length == 0"
|
||||||
|
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod']\"))|length == 0"
|
||||||
|
include_tasks: "bootstrap/deploy.yml"
|
||||||
|
|
||||||
|
# Prepare heketi topology
|
||||||
|
- name: "Get heketi initial pod state."
|
||||||
|
register: "initial_heketi_pod"
|
||||||
|
command: "{{ bin_dir }}/kubectl get pods --selector=deploy-heketi=pod,glusterfs=heketi-pod,name=deploy-heketi --output=json"
|
||||||
|
changed_when: false
|
||||||
|
- name: "Ensure heketi bootstrap pod is up."
|
||||||
|
assert:
|
||||||
|
that: "(initial_heketi_pod.stdout|from_json|json_query('items[*]'))|length == 1"
|
||||||
|
- set_fact:
|
||||||
|
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout|from_json|json_query(\"items[*].metadata.name|[0]\") }}"
|
||||||
|
- name: "Test heketi topology."
|
||||||
|
changed_when: false
|
||||||
|
register: "heketi_topology"
|
||||||
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||||
|
- name: "Load heketi topology."
|
||||||
|
when: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*]\")|flatten|length == 0"
|
||||||
|
include_tasks: "bootstrap/topology.yml"
|
||||||
|
|
||||||
|
# Provision heketi database volume
|
||||||
|
- name: "Prepare heketi volumes."
|
||||||
|
include_tasks: "bootstrap/volumes.yml"
|
||||||
|
|
||||||
|
# Remove bootstrap heketi
|
||||||
|
- name: "Tear down bootstrap."
|
||||||
|
include_tasks: "bootstrap/tear-down.yml"
|
||||||
|
|
||||||
|
# Prepare heketi storage
|
||||||
|
- name: "Test heketi storage."
|
||||||
|
command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json"
|
||||||
|
changed_when: false
|
||||||
|
register: "heketi_storage_state"
|
||||||
|
# ensure endpoints actually exist before trying to move database data to it
|
||||||
|
- name: "Create heketi storage."
|
||||||
|
include_tasks: "bootstrap/storage.yml"
|
||||||
|
vars:
|
||||||
|
secret_query: "items[?metadata.name=='heketi-storage-secret' && kind=='Secret']"
|
||||||
|
endpoints_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Endpoints']"
|
||||||
|
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
||||||
|
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
||||||
|
when:
|
||||||
|
- "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0"
|
||||||
|
- "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0"
|
||||||
|
- "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0"
|
||||||
|
- "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0"
|
||||||
@@ -0,0 +1,24 @@
|
|||||||
|
---
|
||||||
|
- name: "Kubernetes Apps | Lay Down Heketi Bootstrap"
|
||||||
|
become: true
|
||||||
|
template: { src: "heketi-bootstrap.json.j2", dest: "{{ kube_config_dir }}/heketi-bootstrap.json" }
|
||||||
|
register: "rendering"
|
||||||
|
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
|
||||||
|
kube:
|
||||||
|
name: "GlusterFS"
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
filename: "{{ kube_config_dir }}/heketi-bootstrap.json"
|
||||||
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
|
- name: "Wait for heketi bootstrap to complete."
|
||||||
|
changed_when: false
|
||||||
|
register: "initial_heketi_state"
|
||||||
|
vars:
|
||||||
|
initial_heketi_state: { stdout: "{}" }
|
||||||
|
pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]"
|
||||||
|
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
||||||
|
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
||||||
|
until:
|
||||||
|
- "initial_heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
||||||
|
- "initial_heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
||||||
|
retries: 60
|
||||||
|
delay: 5
|
||||||
@@ -0,0 +1,33 @@
|
|||||||
|
---
|
||||||
|
- name: "Test heketi storage."
|
||||||
|
command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json"
|
||||||
|
changed_when: false
|
||||||
|
register: "heketi_storage_state"
|
||||||
|
- name: "Create heketi storage."
|
||||||
|
kube:
|
||||||
|
name: "GlusterFS"
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
filename: "{{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
||||||
|
state: "present"
|
||||||
|
vars:
|
||||||
|
secret_query: "items[?metadata.name=='heketi-storage-secret' && kind=='Secret']"
|
||||||
|
endpoints_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Endpoints']"
|
||||||
|
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
||||||
|
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
||||||
|
when:
|
||||||
|
- "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0"
|
||||||
|
- "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0"
|
||||||
|
- "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0"
|
||||||
|
- "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0"
|
||||||
|
register: "heketi_storage_result"
|
||||||
|
- name: "Get state of heketi database copy job."
|
||||||
|
command: "{{ bin_dir }}/kubectl get jobs --output=json"
|
||||||
|
changed_when: false
|
||||||
|
register: "heketi_storage_state"
|
||||||
|
vars:
|
||||||
|
heketi_storage_state: { stdout: "{}" }
|
||||||
|
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job' && status.succeeded==1]"
|
||||||
|
until:
|
||||||
|
- "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 1"
|
||||||
|
retries: 60
|
||||||
|
delay: 5
|
||||||
@@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
- name: "Get existing Heketi deploy resources."
|
||||||
|
command: "{{ bin_dir }}/kubectl get all --selector=\"deploy-heketi\" -o=json"
|
||||||
|
register: "heketi_resources"
|
||||||
|
changed_when: false
|
||||||
|
- name: "Delete bootstrap Heketi."
|
||||||
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
|
||||||
|
when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0"
|
||||||
|
- name: "Ensure there is nothing left over."
|
||||||
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
|
||||||
|
register: "heketi_result"
|
||||||
|
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||||
|
retries: 60
|
||||||
|
delay: 5
|
||||||
@@ -0,0 +1,26 @@
|
|||||||
|
---
|
||||||
|
- name: "Get heketi topology."
|
||||||
|
changed_when: false
|
||||||
|
register: "heketi_topology"
|
||||||
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||||
|
- name: "Render heketi topology template."
|
||||||
|
become: true
|
||||||
|
vars: { nodes: "{{ groups['heketi-node'] }}" }
|
||||||
|
register: "render"
|
||||||
|
template:
|
||||||
|
src: "topology.json.j2"
|
||||||
|
dest: "{{ kube_config_dir }}/topology.json"
|
||||||
|
- name: "Copy topology configuration into container."
|
||||||
|
changed_when: false
|
||||||
|
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
||||||
|
- name: "Load heketi topology."
|
||||||
|
when: "render.changed"
|
||||||
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||||
|
register: "load_heketi"
|
||||||
|
- name: "Get heketi topology."
|
||||||
|
changed_when: false
|
||||||
|
register: "heketi_topology"
|
||||||
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||||
|
until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length"
|
||||||
|
retries: 60
|
||||||
|
delay: 5
|
||||||
@@ -0,0 +1,41 @@
|
|||||||
|
---
|
||||||
|
- name: "Get heketi volume ids."
|
||||||
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume list --json"
|
||||||
|
changed_when: false
|
||||||
|
register: "heketi_volumes"
|
||||||
|
- name: "Get heketi volumes."
|
||||||
|
changed_when: false
|
||||||
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
||||||
|
with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}"
|
||||||
|
loop_control: { loop_var: "volume_id" }
|
||||||
|
register: "volumes_information"
|
||||||
|
- name: "Test heketi database volume."
|
||||||
|
set_fact: { heketi_database_volume_exists: true }
|
||||||
|
with_items: "{{ volumes_information.results }}"
|
||||||
|
loop_control: { loop_var: "volume_information" }
|
||||||
|
vars: { volume: "{{ volume_information.stdout|from_json }}" }
|
||||||
|
when: "volume.name == 'heketidbstorage'"
|
||||||
|
- name: "Provision database volume."
|
||||||
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
|
||||||
|
when: "heketi_database_volume_exists is undefined"
|
||||||
|
- name: "Copy configuration from pod."
|
||||||
|
become: true
|
||||||
|
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
||||||
|
- name: "Get heketi volume ids."
|
||||||
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume list --json"
|
||||||
|
changed_when: false
|
||||||
|
register: "heketi_volumes"
|
||||||
|
- name: "Get heketi volumes."
|
||||||
|
changed_when: false
|
||||||
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
||||||
|
with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}"
|
||||||
|
loop_control: { loop_var: "volume_id" }
|
||||||
|
register: "volumes_information"
|
||||||
|
- name: "Test heketi database volume."
|
||||||
|
set_fact: { heketi_database_volume_created: true }
|
||||||
|
with_items: "{{ volumes_information.results }}"
|
||||||
|
loop_control: { loop_var: "volume_information" }
|
||||||
|
vars: { volume: "{{ volume_information.stdout|from_json }}" }
|
||||||
|
when: "volume.name == 'heketidbstorage'"
|
||||||
|
- name: "Ensure heketi database volume exists."
|
||||||
|
assert: { that: "heketi_database_volume_created is defined" , msg: "Heketi database volume does not exist." }
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
- name: "Clean up left over jobs."
|
||||||
|
command: "{{ bin_dir }}/kubectl delete jobs,pods --selector=\"deploy-heketi\""
|
||||||
|
changed_when: false
|
||||||
@@ -0,0 +1,38 @@
|
|||||||
|
---
|
||||||
|
- name: "Kubernetes Apps | Lay Down GlusterFS Daemonset"
|
||||||
|
template: { src: "glusterfs-daemonset.json.j2", dest: "{{ kube_config_dir }}/glusterfs-daemonset.json" }
|
||||||
|
become: true
|
||||||
|
register: "rendering"
|
||||||
|
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
|
||||||
|
kube:
|
||||||
|
name: "GlusterFS"
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
filename: "{{ kube_config_dir }}/glusterfs-daemonset.json"
|
||||||
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
|
- name: "Kubernetes Apps | Label GlusterFS nodes"
|
||||||
|
include_tasks: "glusterfs/label.yml"
|
||||||
|
with_items: "{{ groups['heketi-node'] }}"
|
||||||
|
loop_control:
|
||||||
|
loop_var: "node"
|
||||||
|
- name: "Kubernetes Apps | Wait for daemonset to become available."
|
||||||
|
register: "daemonset_state"
|
||||||
|
command: "{{ bin_dir }}/kubectl get daemonset glusterfs --output=json --ignore-not-found=true"
|
||||||
|
changed_when: false
|
||||||
|
vars:
|
||||||
|
daemonset_state: { stdout: "{}" }
|
||||||
|
ready: "{{ daemonset_state.stdout|from_json|json_query(\"status.numberReady\") }}"
|
||||||
|
desired: "{{ daemonset_state.stdout|from_json|json_query(\"status.desiredNumberScheduled\") }}"
|
||||||
|
until: "ready | int >= 3"
|
||||||
|
retries: 60
|
||||||
|
delay: 5
|
||||||
|
|
||||||
|
- name: "Kubernetes Apps | Lay Down Heketi Service Account"
|
||||||
|
template: { src: "heketi-service-account.json.j2", dest: "{{ kube_config_dir }}/heketi-service-account.json" }
|
||||||
|
become: true
|
||||||
|
register: "rendering"
|
||||||
|
- name: "Kubernetes Apps | Install and configure Heketi Service Account"
|
||||||
|
kube:
|
||||||
|
name: "GlusterFS"
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
filename: "{{ kube_config_dir }}/heketi-service-account.json"
|
||||||
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
- register: "label_present"
|
||||||
|
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
|
||||||
|
changed_when: false
|
||||||
|
- name: "Assign storage label"
|
||||||
|
when: "label_present.stdout_lines|length == 0"
|
||||||
|
command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
|
||||||
|
- register: "label_present"
|
||||||
|
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
|
||||||
|
changed_when: false
|
||||||
|
- assert: { that: "label_present|length > 0", msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs." }
|
||||||
@@ -0,0 +1,26 @@
|
|||||||
|
---
|
||||||
|
- name: "Kubernetes Apps | Lay Down Heketi"
|
||||||
|
become: true
|
||||||
|
template: { src: "heketi-deployment.json.j2", dest: "{{ kube_config_dir }}/heketi-deployment.json" }
|
||||||
|
register: "rendering"
|
||||||
|
- name: "Kubernetes Apps | Install and configure Heketi"
|
||||||
|
kube:
|
||||||
|
name: "GlusterFS"
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
filename: "{{ kube_config_dir }}/heketi-deployment.json"
|
||||||
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
|
- name: "Ensure heketi is up and running."
|
||||||
|
changed_when: false
|
||||||
|
register: "heketi_state"
|
||||||
|
vars:
|
||||||
|
heketi_state: { stdout: "{}" }
|
||||||
|
pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]"
|
||||||
|
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
||||||
|
command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
|
||||||
|
until:
|
||||||
|
- "heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
||||||
|
- "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
||||||
|
retries: 60
|
||||||
|
delay: 5
|
||||||
|
- set_fact:
|
||||||
|
heketi_pod_name: "{{ heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
|
||||||
@@ -0,0 +1,30 @@
|
|||||||
|
---
|
||||||
|
- name: "Kubernetes Apps | GlusterFS"
|
||||||
|
include_tasks: "glusterfs.yml"
|
||||||
|
|
||||||
|
- name: "Kubernetes Apps | Heketi Secrets"
|
||||||
|
include_tasks: "secret.yml"
|
||||||
|
|
||||||
|
- name: "Kubernetes Apps | Test Heketi"
|
||||||
|
register: "heketi_service_state"
|
||||||
|
command: "{{bin_dir}}/kubectl get service heketi-storage-endpoints -o=name --ignore-not-found=true"
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: "Kubernetes Apps | Bootstrap Heketi"
|
||||||
|
when: "heketi_service_state.stdout == \"\""
|
||||||
|
include_tasks: "bootstrap.yml"
|
||||||
|
|
||||||
|
- name: "Kubernetes Apps | Heketi"
|
||||||
|
include_tasks: "heketi.yml"
|
||||||
|
|
||||||
|
- name: "Kubernetes Apps | Heketi Topology"
|
||||||
|
include_tasks: "topology.yml"
|
||||||
|
|
||||||
|
- name: "Kubernetes Apps | Heketi Storage"
|
||||||
|
include_tasks: "storage.yml"
|
||||||
|
|
||||||
|
- name: "Kubernetes Apps | Storage Class"
|
||||||
|
include_tasks: "storageclass.yml"
|
||||||
|
|
||||||
|
- name: "Clean up"
|
||||||
|
include_tasks: "cleanup.yml"
|
||||||
@@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
- register: "clusterrolebinding_state"
|
||||||
|
command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
||||||
|
changed_when: false
|
||||||
|
- name: "Kubernetes Apps | Deploy cluster role binding."
|
||||||
|
when: "clusterrolebinding_state.stdout == \"\""
|
||||||
|
command: "{{bin_dir}}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
|
||||||
|
- register: "clusterrolebinding_state"
|
||||||
|
command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
||||||
|
changed_when: false
|
||||||
|
- assert: { that: "clusterrolebinding_state.stdout != \"\"", message: "Cluster role binding is not present." }
|
||||||
|
|
||||||
|
- register: "secret_state"
|
||||||
|
command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
||||||
|
changed_when: false
|
||||||
|
- name: "Render Heketi secret configuration."
|
||||||
|
become: true
|
||||||
|
template:
|
||||||
|
src: "heketi.json.j2"
|
||||||
|
dest: "{{ kube_config_dir }}/heketi.json"
|
||||||
|
- name: "Deploy Heketi config secret"
|
||||||
|
when: "secret_state.stdout == \"\""
|
||||||
|
command: "{{bin_dir}}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
|
||||||
|
- register: "secret_state"
|
||||||
|
command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
||||||
|
changed_when: false
|
||||||
|
- assert: { that: "secret_state.stdout != \"\"", message: "Heketi config secret is not present." }
|
||||||
@@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
- name: "Kubernetes Apps | Lay Down Heketi Storage"
|
||||||
|
become: true
|
||||||
|
vars: { nodes: "{{ groups['heketi-node'] }}" }
|
||||||
|
template: { src: "heketi-storage.json.j2", dest: "{{ kube_config_dir }}/heketi-storage.json" }
|
||||||
|
register: "rendering"
|
||||||
|
- name: "Kubernetes Apps | Install and configure Heketi Storage"
|
||||||
|
kube:
|
||||||
|
name: "GlusterFS"
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
filename: "{{ kube_config_dir }}/heketi-storage.json"
|
||||||
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
@@ -0,0 +1,25 @@
|
|||||||
|
---
|
||||||
|
- name: "Test storage class."
|
||||||
|
command: "{{ bin_dir }}/kubectl get storageclass gluster --ignore-not-found=true --output=json"
|
||||||
|
register: "storageclass"
|
||||||
|
changed_when: false
|
||||||
|
- name: "Test heketi service."
|
||||||
|
command: "{{ bin_dir }}/kubectl get service heketi --ignore-not-found=true --output=json"
|
||||||
|
register: "heketi_service"
|
||||||
|
changed_when: false
|
||||||
|
- name: "Ensure heketi service is available."
|
||||||
|
assert: { that: "heketi_service.stdout != \"\"" }
|
||||||
|
- name: "Render storage class configuration."
|
||||||
|
become: true
|
||||||
|
vars:
|
||||||
|
endpoint_address: "{{ (heketi_service.stdout|from_json).spec.clusterIP }}"
|
||||||
|
template:
|
||||||
|
src: "storageclass.yml.j2"
|
||||||
|
dest: "{{ kube_config_dir }}/storageclass.yml"
|
||||||
|
register: "rendering"
|
||||||
|
- name: "Kubernetes Apps | Install and configure Storace Class"
|
||||||
|
kube:
|
||||||
|
name: "GlusterFS"
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
filename: "{{ kube_config_dir }}/storageclass.yml"
|
||||||
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
@@ -0,0 +1,25 @@
|
|||||||
|
---
|
||||||
|
- name: "Get heketi topology."
|
||||||
|
register: "heketi_topology"
|
||||||
|
changed_when: false
|
||||||
|
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||||
|
- name: "Render heketi topology template."
|
||||||
|
become: true
|
||||||
|
vars: { nodes: "{{ groups['heketi-node'] }}" }
|
||||||
|
register: "rendering"
|
||||||
|
template:
|
||||||
|
src: "topology.json.j2"
|
||||||
|
dest: "{{ kube_config_dir }}/topology.json"
|
||||||
|
- name: "Copy topology configuration into container."
|
||||||
|
when: "rendering.changed"
|
||||||
|
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
||||||
|
- name: "Load heketi topology."
|
||||||
|
when: "rendering.changed"
|
||||||
|
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||||
|
- name: "Get heketi topology."
|
||||||
|
register: "heketi_topology"
|
||||||
|
changed_when: false
|
||||||
|
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||||
|
until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length"
|
||||||
|
retries: 60
|
||||||
|
delay: 5
|
||||||
@@ -0,0 +1,144 @@
|
|||||||
|
{
|
||||||
|
"kind": "DaemonSet",
|
||||||
|
"apiVersion": "extensions/v1beta1",
|
||||||
|
"metadata": {
|
||||||
|
"name": "glusterfs",
|
||||||
|
"labels": {
|
||||||
|
"glusterfs": "deployment"
|
||||||
|
},
|
||||||
|
"annotations": {
|
||||||
|
"description": "GlusterFS Daemon Set",
|
||||||
|
"tags": "glusterfs"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"template": {
|
||||||
|
"metadata": {
|
||||||
|
"name": "glusterfs",
|
||||||
|
"labels": {
|
||||||
|
"glusterfs-node": "daemonset"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"nodeSelector": {
|
||||||
|
"storagenode" : "glusterfs"
|
||||||
|
},
|
||||||
|
"hostNetwork": true,
|
||||||
|
"containers": [
|
||||||
|
{
|
||||||
|
"image": "gluster/gluster-centos:gluster4u0_centos7",
|
||||||
|
"imagePullPolicy": "IfNotPresent",
|
||||||
|
"name": "glusterfs",
|
||||||
|
"volumeMounts": [
|
||||||
|
{
|
||||||
|
"name": "glusterfs-heketi",
|
||||||
|
"mountPath": "/var/lib/heketi"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-run",
|
||||||
|
"mountPath": "/run"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-lvm",
|
||||||
|
"mountPath": "/run/lvm"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-etc",
|
||||||
|
"mountPath": "/etc/glusterfs"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-logs",
|
||||||
|
"mountPath": "/var/log/glusterfs"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-config",
|
||||||
|
"mountPath": "/var/lib/glusterd"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-dev",
|
||||||
|
"mountPath": "/dev"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-cgroup",
|
||||||
|
"mountPath": "/sys/fs/cgroup"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"securityContext": {
|
||||||
|
"capabilities": {},
|
||||||
|
"privileged": true
|
||||||
|
},
|
||||||
|
"readinessProbe": {
|
||||||
|
"timeoutSeconds": 3,
|
||||||
|
"initialDelaySeconds": 60,
|
||||||
|
"exec": {
|
||||||
|
"command": [
|
||||||
|
"/bin/bash",
|
||||||
|
"-c",
|
||||||
|
"systemctl status glusterd.service"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"livenessProbe": {
|
||||||
|
"timeoutSeconds": 3,
|
||||||
|
"initialDelaySeconds": 60,
|
||||||
|
"exec": {
|
||||||
|
"command": [
|
||||||
|
"/bin/bash",
|
||||||
|
"-c",
|
||||||
|
"systemctl status glusterd.service"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"volumes": [
|
||||||
|
{
|
||||||
|
"name": "glusterfs-heketi",
|
||||||
|
"hostPath": {
|
||||||
|
"path": "/var/lib/heketi"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-run"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-lvm",
|
||||||
|
"hostPath": {
|
||||||
|
"path": "/run/lvm"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-etc",
|
||||||
|
"hostPath": {
|
||||||
|
"path": "/etc/glusterfs"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-logs",
|
||||||
|
"hostPath": {
|
||||||
|
"path": "/var/log/glusterfs"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-config",
|
||||||
|
"hostPath": {
|
||||||
|
"path": "/var/lib/glusterd"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-dev",
|
||||||
|
"hostPath": {
|
||||||
|
"path": "/dev"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-cgroup",
|
||||||
|
"hostPath": {
|
||||||
|
"path": "/sys/fs/cgroup"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,133 @@
|
|||||||
|
{
|
||||||
|
"kind": "List",
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"items": [
|
||||||
|
{
|
||||||
|
"kind": "Service",
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"metadata": {
|
||||||
|
"name": "deploy-heketi",
|
||||||
|
"labels": {
|
||||||
|
"glusterfs": "heketi-service",
|
||||||
|
"deploy-heketi": "support"
|
||||||
|
},
|
||||||
|
"annotations": {
|
||||||
|
"description": "Exposes Heketi Service"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"selector": {
|
||||||
|
"name": "deploy-heketi"
|
||||||
|
},
|
||||||
|
"ports": [
|
||||||
|
{
|
||||||
|
"name": "deploy-heketi",
|
||||||
|
"port": 8080,
|
||||||
|
"targetPort": 8080
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"kind": "Deployment",
|
||||||
|
"apiVersion": "extensions/v1beta1",
|
||||||
|
"metadata": {
|
||||||
|
"name": "deploy-heketi",
|
||||||
|
"labels": {
|
||||||
|
"glusterfs": "heketi-deployment",
|
||||||
|
"deploy-heketi": "deployment"
|
||||||
|
},
|
||||||
|
"annotations": {
|
||||||
|
"description": "Defines how to deploy Heketi"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"replicas": 1,
|
||||||
|
"template": {
|
||||||
|
"metadata": {
|
||||||
|
"name": "deploy-heketi",
|
||||||
|
"labels": {
|
||||||
|
"name": "deploy-heketi",
|
||||||
|
"glusterfs": "heketi-pod",
|
||||||
|
"deploy-heketi": "pod"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"serviceAccountName": "heketi-service-account",
|
||||||
|
"containers": [
|
||||||
|
{
|
||||||
|
"image": "heketi/heketi:7",
|
||||||
|
"imagePullPolicy": "Always",
|
||||||
|
"name": "deploy-heketi",
|
||||||
|
"env": [
|
||||||
|
{
|
||||||
|
"name": "HEKETI_EXECUTOR",
|
||||||
|
"value": "kubernetes"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "HEKETI_DB_PATH",
|
||||||
|
"value": "/var/lib/heketi/heketi.db"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "HEKETI_FSTAB",
|
||||||
|
"value": "/var/lib/heketi/fstab"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "HEKETI_SNAPSHOT_LIMIT",
|
||||||
|
"value": "14"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "HEKETI_KUBE_GLUSTER_DAEMONSET",
|
||||||
|
"value": "y"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"ports": [
|
||||||
|
{
|
||||||
|
"containerPort": 8080
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"volumeMounts": [
|
||||||
|
{
|
||||||
|
"name": "db",
|
||||||
|
"mountPath": "/var/lib/heketi"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "config",
|
||||||
|
"mountPath": "/etc/heketi"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"readinessProbe": {
|
||||||
|
"timeoutSeconds": 3,
|
||||||
|
"initialDelaySeconds": 3,
|
||||||
|
"httpGet": {
|
||||||
|
"path": "/hello",
|
||||||
|
"port": 8080
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"livenessProbe": {
|
||||||
|
"timeoutSeconds": 3,
|
||||||
|
"initialDelaySeconds": 30,
|
||||||
|
"httpGet": {
|
||||||
|
"path": "/hello",
|
||||||
|
"port": 8080
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"volumes": [
|
||||||
|
{
|
||||||
|
"name": "db"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "config",
|
||||||
|
"secret": {
|
||||||
|
"secretName": "heketi-config-secret"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -0,0 +1,159 @@
|
|||||||
|
{
|
||||||
|
"kind": "List",
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"items": [
|
||||||
|
{
|
||||||
|
"kind": "Secret",
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"metadata": {
|
||||||
|
"name": "heketi-db-backup",
|
||||||
|
"labels": {
|
||||||
|
"glusterfs": "heketi-db",
|
||||||
|
"heketi": "db"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"data": {
|
||||||
|
},
|
||||||
|
"type": "Opaque"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"kind": "Service",
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"metadata": {
|
||||||
|
"name": "heketi",
|
||||||
|
"labels": {
|
||||||
|
"glusterfs": "heketi-service",
|
||||||
|
"deploy-heketi": "support"
|
||||||
|
},
|
||||||
|
"annotations": {
|
||||||
|
"description": "Exposes Heketi Service"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"selector": {
|
||||||
|
"name": "heketi"
|
||||||
|
},
|
||||||
|
"ports": [
|
||||||
|
{
|
||||||
|
"name": "heketi",
|
||||||
|
"port": 8080,
|
||||||
|
"targetPort": 8080
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"kind": "Deployment",
|
||||||
|
"apiVersion": "extensions/v1beta1",
|
||||||
|
"metadata": {
|
||||||
|
"name": "heketi",
|
||||||
|
"labels": {
|
||||||
|
"glusterfs": "heketi-deployment"
|
||||||
|
},
|
||||||
|
"annotations": {
|
||||||
|
"description": "Defines how to deploy Heketi"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"replicas": 1,
|
||||||
|
"template": {
|
||||||
|
"metadata": {
|
||||||
|
"name": "heketi",
|
||||||
|
"labels": {
|
||||||
|
"name": "heketi",
|
||||||
|
"glusterfs": "heketi-pod"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"serviceAccountName": "heketi-service-account",
|
||||||
|
"containers": [
|
||||||
|
{
|
||||||
|
"image": "heketi/heketi:7",
|
||||||
|
"imagePullPolicy": "Always",
|
||||||
|
"name": "heketi",
|
||||||
|
"env": [
|
||||||
|
{
|
||||||
|
"name": "HEKETI_EXECUTOR",
|
||||||
|
"value": "kubernetes"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "HEKETI_DB_PATH",
|
||||||
|
"value": "/var/lib/heketi/heketi.db"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "HEKETI_FSTAB",
|
||||||
|
"value": "/var/lib/heketi/fstab"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "HEKETI_SNAPSHOT_LIMIT",
|
||||||
|
"value": "14"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "HEKETI_KUBE_GLUSTER_DAEMONSET",
|
||||||
|
"value": "y"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"ports": [
|
||||||
|
{
|
||||||
|
"containerPort": 8080
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"volumeMounts": [
|
||||||
|
{
|
||||||
|
"mountPath": "/backupdb",
|
||||||
|
"name": "heketi-db-secret"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "db",
|
||||||
|
"mountPath": "/var/lib/heketi"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "config",
|
||||||
|
"mountPath": "/etc/heketi"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"readinessProbe": {
|
||||||
|
"timeoutSeconds": 3,
|
||||||
|
"initialDelaySeconds": 3,
|
||||||
|
"httpGet": {
|
||||||
|
"path": "/hello",
|
||||||
|
"port": 8080
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"livenessProbe": {
|
||||||
|
"timeoutSeconds": 3,
|
||||||
|
"initialDelaySeconds": 30,
|
||||||
|
"httpGet": {
|
||||||
|
"path": "/hello",
|
||||||
|
"port": 8080
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"volumes": [
|
||||||
|
{
|
||||||
|
"name": "db",
|
||||||
|
"glusterfs": {
|
||||||
|
"endpoints": "heketi-storage-endpoints",
|
||||||
|
"path": "heketidbstorage"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "heketi-db-secret",
|
||||||
|
"secret": {
|
||||||
|
"secretName": "heketi-db-backup"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "config",
|
||||||
|
"secret": {
|
||||||
|
"secretName": "heketi-config-secret"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -0,0 +1,7 @@
|
|||||||
|
{
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"kind": "ServiceAccount",
|
||||||
|
"metadata": {
|
||||||
|
"name": "heketi-service-account"
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,54 @@
|
|||||||
|
{
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"kind": "List",
|
||||||
|
"items": [
|
||||||
|
{
|
||||||
|
"kind": "Endpoints",
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"metadata": {
|
||||||
|
"name": "heketi-storage-endpoints",
|
||||||
|
"creationTimestamp": null
|
||||||
|
},
|
||||||
|
"subsets": [
|
||||||
|
{% set nodeblocks = [] %}
|
||||||
|
{% for node in nodes %}
|
||||||
|
{% set nodeblock %}
|
||||||
|
{
|
||||||
|
"addresses": [
|
||||||
|
{
|
||||||
|
"ip": "{{ hostvars[node]['ansible_facts']['default_ipv4']['address'] }}"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"ports": [
|
||||||
|
{
|
||||||
|
"port": 1
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
{% endset %}
|
||||||
|
{% if nodeblocks.append(nodeblock) %}{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
{{ nodeblocks|join(',') }}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"kind": "Service",
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"metadata": {
|
||||||
|
"name": "heketi-storage-endpoints",
|
||||||
|
"creationTimestamp": null
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"ports": [
|
||||||
|
{
|
||||||
|
"port": 1,
|
||||||
|
"targetPort": 0
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"loadBalancer": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -0,0 +1,44 @@
|
|||||||
|
{
|
||||||
|
"_port_comment": "Heketi Server Port Number",
|
||||||
|
"port": "8080",
|
||||||
|
|
||||||
|
"_use_auth": "Enable JWT authorization. Please enable for deployment",
|
||||||
|
"use_auth": true,
|
||||||
|
|
||||||
|
"_jwt": "Private keys for access",
|
||||||
|
"jwt": {
|
||||||
|
"_admin": "Admin has access to all APIs",
|
||||||
|
"admin": {
|
||||||
|
"key": "{{ heketi_admin_key }}"
|
||||||
|
},
|
||||||
|
"_user": "User only has access to /volumes endpoint",
|
||||||
|
"user": {
|
||||||
|
"key": "{{ heketi_user_key }}"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
"_glusterfs_comment": "GlusterFS Configuration",
|
||||||
|
"glusterfs": {
|
||||||
|
"_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
|
||||||
|
"executor": "kubernetes",
|
||||||
|
|
||||||
|
"_db_comment": "Database file name",
|
||||||
|
"db": "/var/lib/heketi/heketi.db",
|
||||||
|
|
||||||
|
"kubeexec": {
|
||||||
|
"rebalance_on_expansion": true
|
||||||
|
},
|
||||||
|
|
||||||
|
"sshexec": {
|
||||||
|
"rebalance_on_expansion": true,
|
||||||
|
"keyfile": "/etc/heketi/private_key",
|
||||||
|
"fstab": "/etc/fstab",
|
||||||
|
"port": "22",
|
||||||
|
"user": "root",
|
||||||
|
"sudo": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
"_backup_db_to_kube_secret": "Backup the heketi database to a Kubernetes secret when running in Kubernetes. Default is off.",
|
||||||
|
"backup_db_to_kube_secret": false
|
||||||
|
}
|
||||||
@@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: gluster
|
||||||
|
annotations:
|
||||||
|
storageclass.beta.kubernetes.io/is-default-class: "true"
|
||||||
|
provisioner: kubernetes.io/glusterfs
|
||||||
|
parameters:
|
||||||
|
resturl: "http://{{ endpoint_address }}:8080"
|
||||||
|
restuser: "admin"
|
||||||
|
restuserkey: "{{ heketi_admin_key }}"
|
||||||
@@ -0,0 +1,34 @@
|
|||||||
|
{
|
||||||
|
"clusters": [
|
||||||
|
{
|
||||||
|
"nodes": [
|
||||||
|
{% set nodeblocks = [] %}
|
||||||
|
{% for node in nodes %}
|
||||||
|
{% set nodeblock %}
|
||||||
|
{
|
||||||
|
"node": {
|
||||||
|
"hostnames": {
|
||||||
|
"manage": [
|
||||||
|
"{{ node }}"
|
||||||
|
],
|
||||||
|
"storage": [
|
||||||
|
"{{ hostvars[node]['ansible_facts']['default_ipv4']['address'] }}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"zone": 1
|
||||||
|
},
|
||||||
|
"devices": [
|
||||||
|
{
|
||||||
|
"name": "{{ hostvars[node]['disk_volume_device_1'] }}",
|
||||||
|
"destroydata": false
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
{% endset %}
|
||||||
|
{% if nodeblocks.append(nodeblock) %}{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
{{ nodeblocks|join(',') }}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -0,0 +1,46 @@
|
|||||||
|
---
|
||||||
|
- name: "Install lvm utils (RedHat)"
|
||||||
|
become: true
|
||||||
|
yum:
|
||||||
|
name: "lvm2"
|
||||||
|
state: "present"
|
||||||
|
when: "ansible_os_family == 'RedHat'"
|
||||||
|
|
||||||
|
- name: "Install lvm utils (Debian)"
|
||||||
|
become: true
|
||||||
|
apt:
|
||||||
|
name: "lvm2"
|
||||||
|
state: "present"
|
||||||
|
when: "ansible_os_family == 'Debian'"
|
||||||
|
|
||||||
|
- name: "Get volume group information."
|
||||||
|
become: true
|
||||||
|
shell: "pvs {{ disk_volume_device_1 }} --option vg_name | tail -n+2"
|
||||||
|
register: "volume_groups"
|
||||||
|
ignore_errors: true
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: "Remove volume groups."
|
||||||
|
become: true
|
||||||
|
command: "vgremove {{ volume_group }} --yes"
|
||||||
|
with_items: "{{ volume_groups.stdout_lines }}"
|
||||||
|
loop_control: { loop_var: "volume_group" }
|
||||||
|
|
||||||
|
- name: "Remove physical volume from cluster disks."
|
||||||
|
become: true
|
||||||
|
command: "pvremove {{ disk_volume_device_1 }} --yes"
|
||||||
|
ignore_errors: true
|
||||||
|
|
||||||
|
- name: "Remove lvm utils (RedHat)"
|
||||||
|
become: true
|
||||||
|
yum:
|
||||||
|
name: "lvm2"
|
||||||
|
state: "absent"
|
||||||
|
when: "ansible_os_family == 'RedHat'"
|
||||||
|
|
||||||
|
- name: "Remove lvm utils (Debian)"
|
||||||
|
become: true
|
||||||
|
apt:
|
||||||
|
name: "lvm2"
|
||||||
|
state: "absent"
|
||||||
|
when: "ansible_os_family == 'Debian'"
|
||||||
@@ -0,0 +1,51 @@
|
|||||||
|
---
|
||||||
|
- name: "Remove storage class."
|
||||||
|
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
||||||
|
ignore_errors: true
|
||||||
|
- name: "Tear down heketi."
|
||||||
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
||||||
|
ignore_errors: true
|
||||||
|
- name: "Tear down heketi."
|
||||||
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
||||||
|
ignore_errors: true
|
||||||
|
- name: "Tear down bootstrap."
|
||||||
|
include_tasks: "../provision/tasks/bootstrap/tear-down.yml"
|
||||||
|
- name: "Ensure there is nothing left over."
|
||||||
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
||||||
|
register: "heketi_result"
|
||||||
|
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||||
|
retries: 60
|
||||||
|
delay: 5
|
||||||
|
- name: "Ensure there is nothing left over."
|
||||||
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
||||||
|
register: "heketi_result"
|
||||||
|
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||||
|
retries: 60
|
||||||
|
delay: 5
|
||||||
|
- name: "Tear down glusterfs."
|
||||||
|
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
||||||
|
ignore_errors: true
|
||||||
|
- name: "Remove heketi storage service."
|
||||||
|
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
||||||
|
ignore_errors: true
|
||||||
|
- name: "Remove heketi gluster role binding"
|
||||||
|
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
||||||
|
ignore_errors: true
|
||||||
|
- name: "Remove heketi config secret"
|
||||||
|
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
||||||
|
ignore_errors: true
|
||||||
|
- name: "Remove heketi db backup"
|
||||||
|
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
||||||
|
ignore_errors: true
|
||||||
|
- name: "Remove heketi service account"
|
||||||
|
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
||||||
|
ignore_errors: true
|
||||||
|
- name: "Get secrets"
|
||||||
|
command: "{{ bin_dir }}/kubectl get secrets --output=\"json\""
|
||||||
|
register: "secrets"
|
||||||
|
changed_when: false
|
||||||
|
- name: "Remove heketi storage secret"
|
||||||
|
vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
|
||||||
|
command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}"
|
||||||
|
when: "storage_query is defined"
|
||||||
|
ignore_errors: true
|
||||||
@@ -20,7 +20,7 @@ BuildRequires: python2-setuptools
|
|||||||
BuildRequires: python-d2to1
|
BuildRequires: python-d2to1
|
||||||
BuildRequires: python2-pbr
|
BuildRequires: python2-pbr
|
||||||
|
|
||||||
Requires: ansible >= 2.4.0
|
Requires: ansible >= 2.5.0
|
||||||
Requires: python-jinja2 >= 2.10
|
Requires: python-jinja2 >= 2.10
|
||||||
Requires: python-netaddr
|
Requires: python-netaddr
|
||||||
Requires: python-pbr
|
Requires: python-pbr
|
||||||
|
|||||||
@@ -22,8 +22,6 @@ export TF_VAR_AWS_SECRET_ACCESS_KEY ="xxx"
|
|||||||
export TF_VAR_AWS_SSH_KEY_NAME="yyy"
|
export TF_VAR_AWS_SSH_KEY_NAME="yyy"
|
||||||
export TF_VAR_AWS_DEFAULT_REGION="zzz"
|
export TF_VAR_AWS_DEFAULT_REGION="zzz"
|
||||||
```
|
```
|
||||||
- Rename `contrib/terraform/aws/terraform.tfvars.example` to `terraform.tfvars`
|
|
||||||
|
|
||||||
- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use CoreOS as base image. If you want to change this behaviour, see note "Using other distrib than CoreOs" below.
|
- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use CoreOS as base image. If you want to change this behaviour, see note "Using other distrib than CoreOs" below.
|
||||||
- Create an AWS EC2 SSH Key
|
- Create an AWS EC2 SSH Key
|
||||||
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
|
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
|
||||||
@@ -45,7 +43,7 @@ ssh -F ./ssh-bastion.conf user@$ip
|
|||||||
|
|
||||||
Example (this one assumes you are using CoreOS)
|
Example (this one assumes you are using CoreOS)
|
||||||
```commandline
|
```commandline
|
||||||
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache
|
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=core -b --become-user=root --flush-cache
|
||||||
```
|
```
|
||||||
***Using other distrib than CoreOs***
|
***Using other distrib than CoreOs***
|
||||||
If you want to use another distribution than CoreOS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.
|
If you want to use another distribution than CoreOS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.
|
||||||
@@ -113,9 +111,9 @@ the `AWS CLI` with the following command:
|
|||||||
aws iam delete-instance-profile --region <region_name> --instance-profile-name <profile_name>
|
aws iam delete-instance-profile --region <region_name> --instance-profile-name <profile_name>
|
||||||
```
|
```
|
||||||
|
|
||||||
***Ansible Inventory doesnt get created:***
|
***Ansible Inventory doesn't get created:***
|
||||||
|
|
||||||
It could happen that Terraform doesnt create an Ansible Inventory file automatically. If this is the case copy the output after `inventory=` and create a file named `hosts`in the directory `inventory` and paste the inventory into the file.
|
It could happen that Terraform doesn't create an Ansible Inventory file automatically. If this is the case copy the output after `inventory=` and create a file named `hosts`in the directory `inventory` and paste the inventory into the file.
|
||||||
|
|
||||||
**Architecture**
|
**Architecture**
|
||||||
|
|
||||||
|
|||||||
@@ -181,7 +181,7 @@ data "template_file" "inventory" {
|
|||||||
|
|
||||||
resource "null_resource" "inventories" {
|
resource "null_resource" "inventories" {
|
||||||
provisioner "local-exec" {
|
provisioner "local-exec" {
|
||||||
command = "echo '${data.template_file.inventory.rendered}' > ../../../inventory/hosts"
|
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
|
||||||
}
|
}
|
||||||
|
|
||||||
triggers {
|
triggers {
|
||||||
|
|||||||
@@ -2,8 +2,9 @@
|
|||||||
${connection_strings_master}
|
${connection_strings_master}
|
||||||
${connection_strings_node}
|
${connection_strings_node}
|
||||||
${connection_strings_etcd}
|
${connection_strings_etcd}
|
||||||
|
${public_ip_address_bastion}
|
||||||
|
|
||||||
|
[bastion]
|
||||||
${public_ip_address_bastion}
|
${public_ip_address_bastion}
|
||||||
|
|
||||||
[kube-master]
|
[kube-master]
|
||||||
@@ -24,4 +25,4 @@ kube-master
|
|||||||
|
|
||||||
|
|
||||||
[k8s-cluster:vars]
|
[k8s-cluster:vars]
|
||||||
${elb_api_fqdn}
|
${elb_api_fqdn}
|
||||||
|
|||||||
@@ -31,3 +31,5 @@ default_tags = {
|
|||||||
# Env = "devtest"
|
# Env = "devtest"
|
||||||
# Product = "kubernetes"
|
# Product = "kubernetes"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inventory_file = "../../../inventory/hosts"
|
||||||
|
|||||||
@@ -103,3 +103,7 @@ variable "default_tags" {
|
|||||||
description = "Default tags for all resources"
|
description = "Default tags for all resources"
|
||||||
type = "map"
|
type = "map"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "inventory_file" {
|
||||||
|
description = "Where to store the generated inventory file"
|
||||||
|
}
|
||||||
|
|||||||
@@ -8,6 +8,23 @@ Openstack.
|
|||||||
This will install a Kubernetes cluster on an Openstack Cloud. It should work on
|
This will install a Kubernetes cluster on an Openstack Cloud. It should work on
|
||||||
most modern installs of OpenStack that support the basic services.
|
most modern installs of OpenStack that support the basic services.
|
||||||
|
|
||||||
|
### Known compatible public clouds
|
||||||
|
- [Auro](https://auro.io/)
|
||||||
|
- [BetaCloud](https://www.betacloud.io/)
|
||||||
|
- [CityCloud](https://www.citycloud.com/)
|
||||||
|
- [DreamHost](https://www.dreamhost.com/cloud/computing/)
|
||||||
|
- [ELASTX](https://elastx.se/)
|
||||||
|
- [EnterCloudSuite](https://www.entercloudsuite.com/)
|
||||||
|
- [FugaCloud](https://fuga.cloud/)
|
||||||
|
- [OVH](https://www.ovh.com/)
|
||||||
|
- [Rackspace](https://www.rackspace.com/)
|
||||||
|
- [Ultimum](https://ultimum.io/)
|
||||||
|
- [VexxHost](https://vexxhost.com/)
|
||||||
|
- [Zetta](https://www.zetta.io/)
|
||||||
|
|
||||||
|
### Known incompatible public clouds
|
||||||
|
- T-Systems / Open Telekom Cloud: requires `wait_until_associated`
|
||||||
|
|
||||||
## Approach
|
## Approach
|
||||||
The terraform configuration inspects variables found in
|
The terraform configuration inspects variables found in
|
||||||
[variables.tf](variables.tf) to create resources in your OpenStack cluster.
|
[variables.tf](variables.tf) to create resources in your OpenStack cluster.
|
||||||
@@ -223,6 +240,9 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tf`.
|
|||||||
|`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. |
|
|`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. |
|
||||||
| `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks |
|
| `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks |
|
||||||
|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube-node` for tainting them as nodes, empty by default. |
|
|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube-node` for tainting them as nodes, empty by default. |
|
||||||
|
|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube-ingress` for running ingress controller pods, empty by default. |
|
||||||
|
|`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default |
|
||||||
|
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|
||||||
|
|
||||||
#### Terraform state files
|
#### Terraform state files
|
||||||
|
|
||||||
@@ -258,7 +278,7 @@ $ terraform apply -var-file=cluster.tf ../../contrib/terraform/openstack
|
|||||||
|
|
||||||
if you chose to create a bastion host, this script will create
|
if you chose to create a bastion host, this script will create
|
||||||
`contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for Ansible to
|
`contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for Ansible to
|
||||||
be able to access your machines tunneling through the bastion's IP address. If
|
be able to access your machines tunneling through the bastion's IP address. If
|
||||||
you want to manually handle the ssh tunneling to these machines, please delete
|
you want to manually handle the ssh tunneling to these machines, please delete
|
||||||
or move that file. If you want to use this, just leave it there, as ansible will
|
or move that file. If you want to use this, just leave it there, as ansible will
|
||||||
pick it up automatically.
|
pick it up automatically.
|
||||||
@@ -339,11 +359,6 @@ If it fails try to connect manually via SSH. It could be something as simple as
|
|||||||
### Configure cluster variables
|
### Configure cluster variables
|
||||||
|
|
||||||
Edit `inventory/$CLUSTER/group_vars/all.yml`:
|
Edit `inventory/$CLUSTER/group_vars/all.yml`:
|
||||||
- Set variable **bootstrap_os** appropriately for your desired image:
|
|
||||||
```
|
|
||||||
# Valid bootstrap options (required): ubuntu, coreos, centos, none
|
|
||||||
bootstrap_os: coreos
|
|
||||||
```
|
|
||||||
- **bin_dir**:
|
- **bin_dir**:
|
||||||
```
|
```
|
||||||
# Directory where the binaries will be installed
|
# Directory where the binaries will be installed
|
||||||
@@ -422,14 +437,6 @@ $ kubectl config use-context default-system
|
|||||||
kubectl version
|
kubectl version
|
||||||
```
|
```
|
||||||
|
|
||||||
If you are using floating ip addresses then you may get this error:
|
|
||||||
```
|
|
||||||
Unable to connect to the server: x509: certificate is valid for 10.0.0.6, 10.0.0.6, 10.233.0.1, 127.0.0.1, not 132.249.238.25
|
|
||||||
```
|
|
||||||
|
|
||||||
You can tell kubectl to ignore this condition by adding the
|
|
||||||
`--insecure-skip-tls-verify` option.
|
|
||||||
|
|
||||||
## GlusterFS
|
## GlusterFS
|
||||||
GlusterFS is not deployed by the standard`cluster.yml` playbook, see the
|
GlusterFS is not deployed by the standard`cluster.yml` playbook, see the
|
||||||
[GlusterFS playbook documentation](../../network-storage/glusterfs/README.md)
|
[GlusterFS playbook documentation](../../network-storage/glusterfs/README.md)
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ module "network" {
|
|||||||
subnet_cidr = "${var.subnet_cidr}"
|
subnet_cidr = "${var.subnet_cidr}"
|
||||||
cluster_name = "${var.cluster_name}"
|
cluster_name = "${var.cluster_name}"
|
||||||
dns_nameservers = "${var.dns_nameservers}"
|
dns_nameservers = "${var.dns_nameservers}"
|
||||||
|
use_neutron = "${var.use_neutron}"
|
||||||
}
|
}
|
||||||
|
|
||||||
module "ips" {
|
module "ips" {
|
||||||
@@ -50,7 +51,10 @@ module "compute" {
|
|||||||
k8s_master_fips = "${module.ips.k8s_master_fips}"
|
k8s_master_fips = "${module.ips.k8s_master_fips}"
|
||||||
k8s_node_fips = "${module.ips.k8s_node_fips}"
|
k8s_node_fips = "${module.ips.k8s_node_fips}"
|
||||||
bastion_fips = "${module.ips.bastion_fips}"
|
bastion_fips = "${module.ips.bastion_fips}"
|
||||||
|
bastion_allowed_remote_ips = "${var.bastion_allowed_remote_ips}"
|
||||||
supplementary_master_groups = "${var.supplementary_master_groups}"
|
supplementary_master_groups = "${var.supplementary_master_groups}"
|
||||||
|
supplementary_node_groups = "${var.supplementary_node_groups}"
|
||||||
|
worker_allowed_ports = "${var.worker_allowed_ports}"
|
||||||
|
|
||||||
network_id = "${module.network.router_id}"
|
network_id = "${module.network.router_id}"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,72 +3,63 @@ resource "openstack_compute_keypair_v2" "k8s" {
|
|||||||
public_key = "${chomp(file(var.public_key_path))}"
|
public_key = "${chomp(file(var.public_key_path))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_secgroup_v2" "k8s_master" {
|
resource "openstack_networking_secgroup_v2" "k8s_master" {
|
||||||
name = "${var.cluster_name}-k8s-master"
|
name = "${var.cluster_name}-k8s-master"
|
||||||
description = "${var.cluster_name} - Kubernetes Master"
|
description = "${var.cluster_name} - Kubernetes Master"
|
||||||
|
|
||||||
rule {
|
|
||||||
ip_protocol = "tcp"
|
|
||||||
from_port = "6443"
|
|
||||||
to_port = "6443"
|
|
||||||
cidr = "0.0.0.0/0"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_secgroup_v2" "bastion" {
|
resource "openstack_networking_secgroup_rule_v2" "k8s_master" {
|
||||||
|
direction = "ingress"
|
||||||
|
ethertype = "IPv4"
|
||||||
|
protocol = "tcp"
|
||||||
|
port_range_min = "6443"
|
||||||
|
port_range_max = "6443"
|
||||||
|
remote_ip_prefix = "0.0.0.0/0"
|
||||||
|
security_group_id = "${openstack_networking_secgroup_v2.k8s_master.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_secgroup_v2" "bastion" {
|
||||||
name = "${var.cluster_name}-bastion"
|
name = "${var.cluster_name}-bastion"
|
||||||
description = "${var.cluster_name} - Bastion Server"
|
description = "${var.cluster_name} - Bastion Server"
|
||||||
|
|
||||||
rule {
|
|
||||||
ip_protocol = "tcp"
|
|
||||||
from_port = "22"
|
|
||||||
to_port = "22"
|
|
||||||
cidr = "0.0.0.0/0"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_secgroup_v2" "k8s" {
|
resource "openstack_networking_secgroup_rule_v2" "bastion" {
|
||||||
|
count = "${length(var.bastion_allowed_remote_ips)}"
|
||||||
|
direction = "ingress"
|
||||||
|
ethertype = "IPv4"
|
||||||
|
protocol = "tcp"
|
||||||
|
port_range_min = "22"
|
||||||
|
port_range_max = "22"
|
||||||
|
remote_ip_prefix = "${var.bastion_allowed_remote_ips[count.index]}"
|
||||||
|
security_group_id = "${openstack_networking_secgroup_v2.bastion.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_secgroup_v2" "k8s" {
|
||||||
name = "${var.cluster_name}-k8s"
|
name = "${var.cluster_name}-k8s"
|
||||||
description = "${var.cluster_name} - Kubernetes"
|
description = "${var.cluster_name} - Kubernetes"
|
||||||
|
|
||||||
rule {
|
|
||||||
ip_protocol = "icmp"
|
|
||||||
from_port = "-1"
|
|
||||||
to_port = "-1"
|
|
||||||
cidr = "0.0.0.0/0"
|
|
||||||
}
|
|
||||||
|
|
||||||
rule {
|
|
||||||
ip_protocol = "tcp"
|
|
||||||
from_port = "1"
|
|
||||||
to_port = "65535"
|
|
||||||
self = true
|
|
||||||
}
|
|
||||||
|
|
||||||
rule {
|
|
||||||
ip_protocol = "udp"
|
|
||||||
from_port = "1"
|
|
||||||
to_port = "65535"
|
|
||||||
self = true
|
|
||||||
}
|
|
||||||
|
|
||||||
rule {
|
|
||||||
ip_protocol = "icmp"
|
|
||||||
from_port = "-1"
|
|
||||||
to_port = "-1"
|
|
||||||
self = true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
resource "openstack_compute_secgroup_v2" "worker" {
|
|
||||||
|
resource "openstack_networking_secgroup_rule_v2" "k8s" {
|
||||||
|
direction = "ingress"
|
||||||
|
ethertype = "IPv4"
|
||||||
|
remote_group_id = "${openstack_networking_secgroup_v2.k8s.id}"
|
||||||
|
security_group_id = "${openstack_networking_secgroup_v2.k8s.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_secgroup_v2" "worker" {
|
||||||
name = "${var.cluster_name}-k8s-worker"
|
name = "${var.cluster_name}-k8s-worker"
|
||||||
description = "${var.cluster_name} - Kubernetes worker nodes"
|
description = "${var.cluster_name} - Kubernetes worker nodes"
|
||||||
|
}
|
||||||
|
|
||||||
rule {
|
resource "openstack_networking_secgroup_rule_v2" "worker" {
|
||||||
ip_protocol = "tcp"
|
count = "${length(var.worker_allowed_ports)}"
|
||||||
from_port = "30000"
|
direction = "ingress"
|
||||||
to_port = "32767"
|
ethertype = "IPv4"
|
||||||
cidr = "0.0.0.0/0"
|
protocol = "${lookup(var.worker_allowed_ports[count.index], "protocol", "tcp")}"
|
||||||
}
|
port_range_min = "${lookup(var.worker_allowed_ports[count.index], "port_range_min")}"
|
||||||
|
port_range_max = "${lookup(var.worker_allowed_ports[count.index], "port_range_max")}"
|
||||||
|
remote_ip_prefix = "${lookup(var.worker_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0")}"
|
||||||
|
security_group_id = "${openstack_networking_secgroup_v2.worker.id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_instance_v2" "bastion" {
|
resource "openstack_compute_instance_v2" "bastion" {
|
||||||
@@ -82,8 +73,8 @@ resource "openstack_compute_instance_v2" "bastion" {
|
|||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
"${openstack_compute_secgroup_v2.bastion.name}",
|
"${openstack_networking_secgroup_v2.bastion.name}",
|
||||||
"default",
|
"default",
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -111,9 +102,9 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
|||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||||
"${openstack_compute_secgroup_v2.bastion.name}",
|
"${openstack_networking_secgroup_v2.bastion.name}",
|
||||||
"${openstack_compute_secgroup_v2.k8s.name}",
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
"default",
|
"default",
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -141,9 +132,9 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
|||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||||
"${openstack_compute_secgroup_v2.bastion.name}",
|
"${openstack_networking_secgroup_v2.bastion.name}",
|
||||||
"${openstack_compute_secgroup_v2.k8s.name}",
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
]
|
]
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
@@ -170,7 +161,7 @@ resource "openstack_compute_instance_v2" "etcd" {
|
|||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}"]
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
@@ -192,8 +183,8 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
|||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||||
"${openstack_compute_secgroup_v2.k8s.name}",
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
"default",
|
"default",
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -217,8 +208,8 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
|||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||||
"${openstack_compute_secgroup_v2.k8s.name}",
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
]
|
]
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
@@ -241,15 +232,15 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
|||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
"${openstack_compute_secgroup_v2.bastion.name}",
|
"${openstack_networking_secgroup_v2.bastion.name}",
|
||||||
"${openstack_compute_secgroup_v2.worker.name}",
|
"${openstack_networking_secgroup_v2.worker.name}",
|
||||||
"default",
|
"default",
|
||||||
]
|
]
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "kube-node,k8s-cluster"
|
kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}"
|
||||||
depends_on = "${var.network_id}"
|
depends_on = "${var.network_id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -271,14 +262,14 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
|||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
"${openstack_compute_secgroup_v2.worker.name}",
|
"${openstack_networking_secgroup_v2.worker.name}",
|
||||||
"default",
|
"default",
|
||||||
]
|
]
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "kube-node,k8s-cluster,no-floating"
|
kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}"
|
||||||
depends_on = "${var.network_id}"
|
depends_on = "${var.network_id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -321,7 +312,7 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
|||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
"default",
|
"default",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|||||||
@@ -46,7 +46,9 @@ variable "network_name" {}
|
|||||||
|
|
||||||
variable "flavor_bastion" {}
|
variable "flavor_bastion" {}
|
||||||
|
|
||||||
variable "network_id" {}
|
variable "network_id" {
|
||||||
|
default = ""
|
||||||
|
}
|
||||||
|
|
||||||
variable "k8s_master_fips" {
|
variable "k8s_master_fips" {
|
||||||
type = "list"
|
type = "list"
|
||||||
@@ -60,6 +62,18 @@ variable "bastion_fips" {
|
|||||||
type = "list"
|
type = "list"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "bastion_allowed_remote_ips" {
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
variable "supplementary_master_groups" {
|
variable "supplementary_master_groups" {
|
||||||
default = ""
|
default = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "supplementary_node_groups" {
|
||||||
|
default = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "worker_allowed_ports" {
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|||||||
@@ -12,4 +12,6 @@ variable "external_net" {}
|
|||||||
|
|
||||||
variable "network_name" {}
|
variable "network_name" {}
|
||||||
|
|
||||||
variable "router_id" {}
|
variable "router_id" {
|
||||||
|
default = ""
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,16 +1,19 @@
|
|||||||
resource "openstack_networking_router_v2" "k8s" {
|
resource "openstack_networking_router_v2" "k8s" {
|
||||||
name = "${var.cluster_name}-router"
|
name = "${var.cluster_name}-router"
|
||||||
|
count = "${var.use_neutron}"
|
||||||
admin_state_up = "true"
|
admin_state_up = "true"
|
||||||
external_network_id = "${var.external_net}"
|
external_network_id = "${var.external_net}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_network_v2" "k8s" {
|
resource "openstack_networking_network_v2" "k8s" {
|
||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
|
count = "${var.use_neutron}"
|
||||||
admin_state_up = "true"
|
admin_state_up = "true"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_subnet_v2" "k8s" {
|
resource "openstack_networking_subnet_v2" "k8s" {
|
||||||
name = "${var.cluster_name}-internal-network"
|
name = "${var.cluster_name}-internal-network"
|
||||||
|
count = "${var.use_neutron}"
|
||||||
network_id = "${openstack_networking_network_v2.k8s.id}"
|
network_id = "${openstack_networking_network_v2.k8s.id}"
|
||||||
cidr = "${var.subnet_cidr}"
|
cidr = "${var.subnet_cidr}"
|
||||||
ip_version = 4
|
ip_version = 4
|
||||||
@@ -18,6 +21,7 @@ resource "openstack_networking_subnet_v2" "k8s" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_router_interface_v2" "k8s" {
|
resource "openstack_networking_router_interface_v2" "k8s" {
|
||||||
|
count = "${var.use_neutron}"
|
||||||
router_id = "${openstack_networking_router_v2.k8s.id}"
|
router_id = "${openstack_networking_router_v2.k8s.id}"
|
||||||
subnet_id = "${openstack_networking_subnet_v2.k8s.id}"
|
subnet_id = "${openstack_networking_subnet_v2.k8s.id}"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,12 @@
|
|||||||
output "router_id" {
|
output "router_id" {
|
||||||
value = "${openstack_networking_router_interface_v2.k8s.id}"
|
value = "${element(concat(openstack_networking_router_v2.k8s.*.id, list("")), 0)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "router_internal_port_id" {
|
||||||
|
value = "${element(concat(openstack_networking_router_interface_v2.k8s.*.id, list("")), 0)}"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
output "subnet_id" {
|
output "subnet_id" {
|
||||||
value = "${openstack_networking_subnet_v2.k8s.id}"
|
value = "${element(concat(openstack_networking_subnet_v2.k8s.*.id, list("")), 0)}"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,3 +9,5 @@ variable "dns_nameservers" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
variable "subnet_cidr" {}
|
variable "subnet_cidr" {}
|
||||||
|
|
||||||
|
variable "use_neutron" {}
|
||||||
|
|||||||
@@ -43,4 +43,4 @@ network_name = "<network>"
|
|||||||
external_net = "<UUID>"
|
external_net = "<UUID>"
|
||||||
subnet_cidr = "<cidr>"
|
subnet_cidr = "<cidr>"
|
||||||
floatingip_pool = "<pool>"
|
floatingip_pool = "<pool>"
|
||||||
|
bastion_allowed_remote_ips = ["0.0.0.0/0"]
|
||||||
|
|||||||
@@ -103,6 +103,11 @@ variable "network_name" {
|
|||||||
default = "internal"
|
default = "internal"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "use_neutron" {
|
||||||
|
description = "Use neutron"
|
||||||
|
default = 1
|
||||||
|
}
|
||||||
|
|
||||||
variable "subnet_cidr" {
|
variable "subnet_cidr" {
|
||||||
description = "Subnet CIDR block."
|
description = "Subnet CIDR block."
|
||||||
type = "string"
|
type = "string"
|
||||||
@@ -128,3 +133,26 @@ variable "supplementary_master_groups" {
|
|||||||
description = "supplementary kubespray ansible groups for masters, such kube-node"
|
description = "supplementary kubespray ansible groups for masters, such kube-node"
|
||||||
default = ""
|
default = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "supplementary_node_groups" {
|
||||||
|
description = "supplementary kubespray ansible groups for worker nodes, such as kube-ingress"
|
||||||
|
default = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "bastion_allowed_remote_ips" {
|
||||||
|
description = "An array of CIDRs allowed to SSH to hosts"
|
||||||
|
type = "list"
|
||||||
|
default = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "worker_allowed_ports" {
|
||||||
|
type = "list"
|
||||||
|
default = [
|
||||||
|
{
|
||||||
|
"protocol" = "tcp"
|
||||||
|
"port_range_min" = 30000
|
||||||
|
"port_range_max" = 32767
|
||||||
|
"remote_ip_prefix" = "0.0.0.0/0"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|||||||
@@ -328,6 +328,7 @@ def openstack_host(resource, module_name):
|
|||||||
attrs = {
|
attrs = {
|
||||||
'access_ip_v4': raw_attrs['access_ip_v4'],
|
'access_ip_v4': raw_attrs['access_ip_v4'],
|
||||||
'access_ip_v6': raw_attrs['access_ip_v6'],
|
'access_ip_v6': raw_attrs['access_ip_v6'],
|
||||||
|
'access_ip': raw_attrs['access_ip_v4'],
|
||||||
'ip': raw_attrs['network.0.fixed_ip_v4'],
|
'ip': raw_attrs['network.0.fixed_ip_v4'],
|
||||||
'flavor': parse_dict(raw_attrs, 'flavor',
|
'flavor': parse_dict(raw_attrs, 'flavor',
|
||||||
sep='_'),
|
sep='_'),
|
||||||
@@ -685,6 +686,7 @@ def iter_host_ips(hosts, ips):
|
|||||||
ip = ips[host_id]
|
ip = ips[host_id]
|
||||||
host[1].update({
|
host[1].update({
|
||||||
'access_ip_v4': ip,
|
'access_ip_v4': ip,
|
||||||
|
'access_ip': ip,
|
||||||
'public_ipv4': ip,
|
'public_ipv4': ip,
|
||||||
'ansible_ssh_host': ip,
|
'ansible_ssh_host': ip,
|
||||||
})
|
})
|
||||||
|
|||||||
31
contrib/vault/groups_vars/vault.yaml
Normal file
31
contrib/vault/groups_vars/vault.yaml
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
vault_deployment_type: docker
|
||||||
|
vault_binary_checksum: 3c4d70ba71619a43229e65c67830e30e050eab7a81ac6b28325ff707e5914188
|
||||||
|
vault_version: 0.10.1
|
||||||
|
vault_download_url: "https://releases.hashicorp.com/vault/{{ vault_version }}/vault_{{ vault_version }}_linux_{{ image_arch }}.zip"
|
||||||
|
vault_image_repo: "vault"
|
||||||
|
vault_image_tag: "{{ vault_version }}"
|
||||||
|
vault_downloads:
|
||||||
|
vault:
|
||||||
|
enabled: "{{ cert_management == 'vault' }}"
|
||||||
|
container: "{{ vault_deployment_type != 'host' }}"
|
||||||
|
file: "{{ vault_deployment_type == 'host' }}"
|
||||||
|
dest: "{{local_release_dir}}/vault/vault_{{ vault_version }}_linux_{{ image_arch }}.zip"
|
||||||
|
mode: "0755"
|
||||||
|
owner: "vault"
|
||||||
|
repo: "{{ vault_image_repo }}"
|
||||||
|
sha256: "{{ vault_binary_checksum if vault_deployment_type == 'host' else vault_digest_checksum|d(none) }}"
|
||||||
|
tag: "{{ vault_image_tag }}"
|
||||||
|
unarchive: true
|
||||||
|
url: "{{ vault_download_url }}"
|
||||||
|
version: "{{ vault_version }}"
|
||||||
|
groups:
|
||||||
|
- vault
|
||||||
|
|
||||||
|
# Vault data dirs.
|
||||||
|
vault_base_dir: /etc/vault
|
||||||
|
vault_cert_dir: "{{ vault_base_dir }}/ssl"
|
||||||
|
vault_config_dir: "{{ vault_base_dir }}/config"
|
||||||
|
vault_roles_dir: "{{ vault_base_dir }}/roles"
|
||||||
|
vault_secrets_dir: "{{ vault_base_dir }}/secrets"
|
||||||
|
kube_vault_mount_path: "/kube"
|
||||||
|
etcd_vault_mount_path: "/etcd"
|
||||||
1
contrib/vault/requirements.txt
Normal file
1
contrib/vault/requirements.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
ansible-modules-hashivault>=3.9.4
|
||||||
@@ -26,6 +26,9 @@
|
|||||||
"{{ hostvars[host]['ip'] }}",
|
"{{ hostvars[host]['ip'] }}",
|
||||||
{%- endif -%}
|
{%- endif -%}
|
||||||
{%- endfor -%}
|
{%- endfor -%}
|
||||||
|
{%- for cert_alt_ip in etcd_cert_alt_ips -%}
|
||||||
|
"{{ cert_alt_ip }}",
|
||||||
|
{%- endfor -%}
|
||||||
"127.0.0.1","::1"
|
"127.0.0.1","::1"
|
||||||
]
|
]
|
||||||
issue_cert_path: "{{ item }}"
|
issue_cert_path: "{{ item }}"
|
||||||
@@ -62,3 +65,9 @@
|
|||||||
with_items: "{{ etcd_node_certs_needed|d([]) }}"
|
with_items: "{{ etcd_node_certs_needed|d([]) }}"
|
||||||
when: inventory_hostname in etcd_node_cert_hosts
|
when: inventory_hostname in etcd_node_cert_hosts
|
||||||
notify: set etcd_secret_changed
|
notify: set etcd_secret_changed
|
||||||
|
|
||||||
|
- name: gen_certs_vault | ensure file permissions
|
||||||
|
shell: >-
|
||||||
|
find {{etcd_cert_dir }} -type d -exec chmod 0755 {} \; &&
|
||||||
|
find {{etcd_cert_dir }} -type f -exec chmod 0640 {} \;
|
||||||
|
changed_when: false
|
||||||
@@ -21,10 +21,17 @@ vault_log_dir: "/var/log/vault"
|
|||||||
|
|
||||||
vault_version: 0.10.1
|
vault_version: 0.10.1
|
||||||
vault_binary_checksum: 66f0f1b0b221d664dd5913f8697409d7401df4bb2a19c7277e8fbad152063fae
|
vault_binary_checksum: 66f0f1b0b221d664dd5913f8697409d7401df4bb2a19c7277e8fbad152063fae
|
||||||
vault_download_url: "https://releases.hashicorp.com/vault/{{ vault_version }}/vault_{{ vault_version }}_linux_amd64.zip"
|
vault_download_url: "https://releases.hashicorp.com/vault/{{ vault_version }}/vault_{{ vault_version }}_linux_{{ image_arch }}.zip"
|
||||||
|
|
||||||
|
# hvac==0.7.0 is broken at the moment
|
||||||
|
hvac_version: 0.6.4
|
||||||
|
|
||||||
|
# Arch of Docker images and needed packages
|
||||||
|
image_arch: "{{host_architecture}}"
|
||||||
|
|
||||||
vault_download_vars:
|
vault_download_vars:
|
||||||
container: "{{ vault_deployment_type != 'host' }}"
|
container: "{{ vault_deployment_type != 'host' }}"
|
||||||
dest: "vault/vault_{{ vault_version }}_linux_amd64.zip"
|
dest: "vault/vault_{{ vault_version }}_linux_{{ image_arch }}.zip"
|
||||||
enabled: true
|
enabled: true
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
owner: "vault"
|
owner: "vault"
|
||||||
@@ -45,7 +52,7 @@ vault_bind_address: 0.0.0.0
|
|||||||
vault_port: 8200
|
vault_port: 8200
|
||||||
vault_etcd_url: "{{ etcd_access_addresses }}"
|
vault_etcd_url: "{{ etcd_access_addresses }}"
|
||||||
|
|
||||||
# 8y default lease
|
# By default lease
|
||||||
vault_default_lease_ttl: 70080h
|
vault_default_lease_ttl: 70080h
|
||||||
vault_max_lease_ttl: 87600h
|
vault_max_lease_ttl: 87600h
|
||||||
|
|
||||||
@@ -72,6 +79,7 @@ vault_config:
|
|||||||
cluster_name: "kubernetes-vault"
|
cluster_name: "kubernetes-vault"
|
||||||
default_lease_ttl: "{{ vault_default_lease_ttl }}"
|
default_lease_ttl: "{{ vault_default_lease_ttl }}"
|
||||||
max_lease_ttl: "{{ vault_max_lease_ttl }}"
|
max_lease_ttl: "{{ vault_max_lease_ttl }}"
|
||||||
|
ui: "true"
|
||||||
listener:
|
listener:
|
||||||
tcp:
|
tcp:
|
||||||
address: "{{ vault_bind_address }}:{{ vault_port }}"
|
address: "{{ vault_bind_address }}:{{ vault_port }}"
|
||||||
@@ -118,7 +126,7 @@ vault_pki_mounts:
|
|||||||
roles:
|
roles:
|
||||||
- name: userpass
|
- name: userpass
|
||||||
group: userpass
|
group: userpass
|
||||||
password: "{{ lookup('password', inventory_dir + '/credentials/vault/userpass.creds length=15') }}"
|
password: "{{ lookup('password', credentials_dir + '/vault/userpass.creds length=15') }}"
|
||||||
policy_rules: default
|
policy_rules: default
|
||||||
role_options:
|
role_options:
|
||||||
allow_any_name: true
|
allow_any_name: true
|
||||||
@@ -132,7 +140,7 @@ vault_pki_mounts:
|
|||||||
roles:
|
roles:
|
||||||
- name: vault
|
- name: vault
|
||||||
group: vault
|
group: vault
|
||||||
password: "{{ lookup('password', inventory_dir + '/credentials/vault/vault.creds length=15') }}"
|
password: "{{ lookup('password', credentials_dir + '/vault/vault.creds length=15') }}"
|
||||||
policy_rules: default
|
policy_rules: default
|
||||||
role_options:
|
role_options:
|
||||||
allow_any_name: true
|
allow_any_name: true
|
||||||
@@ -145,7 +153,7 @@ vault_pki_mounts:
|
|||||||
roles:
|
roles:
|
||||||
- name: etcd
|
- name: etcd
|
||||||
group: etcd
|
group: etcd
|
||||||
password: "{{ lookup('password', inventory_dir + '/credentials/vault/etcd.creds length=15') }}"
|
password: "{{ lookup('password', credentials_dir + '/vault/etcd.creds length=15') }}"
|
||||||
policy_rules: default
|
policy_rules: default
|
||||||
role_options:
|
role_options:
|
||||||
allow_any_name: true
|
allow_any_name: true
|
||||||
@@ -160,7 +168,7 @@ vault_pki_mounts:
|
|||||||
roles:
|
roles:
|
||||||
- name: kube-master
|
- name: kube-master
|
||||||
group: kube-master
|
group: kube-master
|
||||||
password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-master.creds length=15') }}"
|
password: "{{ lookup('password', credentials_dir + '/vault/kube-master.creds length=15') }}"
|
||||||
policy_rules: default
|
policy_rules: default
|
||||||
role_options:
|
role_options:
|
||||||
allow_any_name: true
|
allow_any_name: true
|
||||||
@@ -168,7 +176,7 @@ vault_pki_mounts:
|
|||||||
organization: "system:masters"
|
organization: "system:masters"
|
||||||
- name: front-proxy-client
|
- name: front-proxy-client
|
||||||
group: kube-master
|
group: kube-master
|
||||||
password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-proxy.creds length=15') }}"
|
password: "{{ lookup('password', credentials_dir + '/vault/kube-proxy.creds length=15') }}"
|
||||||
policy_rules: default
|
policy_rules: default
|
||||||
role_options:
|
role_options:
|
||||||
allow_any_name: true
|
allow_any_name: true
|
||||||
@@ -176,7 +184,7 @@ vault_pki_mounts:
|
|||||||
organization: "system:front-proxy-client"
|
organization: "system:front-proxy-client"
|
||||||
- name: kube-node
|
- name: kube-node
|
||||||
group: k8s-cluster
|
group: k8s-cluster
|
||||||
password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-node.creds length=15') }}"
|
password: "{{ lookup('password', credentials_dir + '/vault/kube-node.creds length=15') }}"
|
||||||
policy_rules: default
|
policy_rules: default
|
||||||
role_options:
|
role_options:
|
||||||
allow_any_name: true
|
allow_any_name: true
|
||||||
@@ -184,7 +192,7 @@ vault_pki_mounts:
|
|||||||
organization: "system:nodes"
|
organization: "system:nodes"
|
||||||
- name: kube-proxy
|
- name: kube-proxy
|
||||||
group: k8s-cluster
|
group: k8s-cluster
|
||||||
password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-proxy.creds length=15') }}"
|
password: "{{ lookup('password', credentials_dir + '/vault/kube-proxy.creds length=15') }}"
|
||||||
policy_rules: default
|
policy_rules: default
|
||||||
role_options:
|
role_options:
|
||||||
allow_any_name: true
|
allow_any_name: true
|
||||||
@@ -12,7 +12,7 @@
|
|||||||
headers: "{{ vault_client_headers }}"
|
headers: "{{ vault_client_headers }}"
|
||||||
status_code: "{{ vault_successful_http_codes | join(',') }}"
|
status_code: "{{ vault_successful_http_codes | join(',') }}"
|
||||||
register: vault_health_check
|
register: vault_health_check
|
||||||
until: vault_health_check|succeeded
|
until: vault_health_check is succeeded
|
||||||
retries: 10
|
retries: 10
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
run_once: yes
|
run_once: yes
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user