mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 13:54:37 +03:00
Compare commits
799 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
05dabb7e7b | ||
|
|
66e304c41b | ||
|
|
192f7967c9 | ||
|
|
f67d82a9db | ||
|
|
3cfbc1a79a | ||
|
|
d9f495d391 | ||
|
|
71f6c018ce | ||
|
|
0401f4afff | ||
|
|
b4989b5a2a | ||
|
|
6f4054679e | ||
|
|
df7d53b9ef | ||
|
|
0a9a42b544 | ||
|
|
0232e755f3 | ||
|
|
0536125f75 | ||
|
|
53d87e53c5 | ||
|
|
232020ef96 | ||
|
|
a22d74b165 | ||
|
|
8b8e534769 | ||
|
|
6b71229d3f | ||
|
|
145e5c8943 | ||
|
|
28315ca933 | ||
|
|
dced082e5f | ||
|
|
408faac3c9 | ||
|
|
cd4a606cb1 | ||
|
|
c7c3effd6f | ||
|
|
36898a2c39 | ||
|
|
8526c30b63 | ||
|
|
d6ebe8c3e7 | ||
|
|
02de35cfc3 | ||
|
|
7d8e21634c | ||
|
|
6b598eaacb | ||
|
|
2197330727 | ||
|
|
1d8627eb8b | ||
|
|
8f85ea89fa | ||
|
|
51a5f54fc4 | ||
|
|
e5550b5140 | ||
|
|
a1d6078d46 | ||
|
|
7fd87b95cf | ||
|
|
e3d562bcdb | ||
|
|
442e6e55b6 | ||
|
|
1f1a87bd3d | ||
|
|
4d1055f5d5 | ||
|
|
68acdd71f1 | ||
|
|
62b1ea2b48 | ||
|
|
9fa23ffa21 | ||
|
|
1dda89dbe3 | ||
|
|
2a1f77efc6 | ||
|
|
f9502e0964 | ||
|
|
09b67c1ad5 | ||
|
|
66475f98b9 | ||
|
|
8512cc5cca | ||
|
|
3a65c66a3e | ||
|
|
492b3e525d | ||
|
|
639010b3df | ||
|
|
34d1f0bff2 | ||
|
|
a330b281e8 | ||
|
|
6f9f80acee | ||
|
|
a8a62afd74 | ||
|
|
7fa682bdd5 | ||
|
|
34019291b8 | ||
|
|
847390dd9c | ||
|
|
08179018d4 | ||
|
|
b796226869 | ||
|
|
131d565498 | ||
|
|
084af7b6e5 | ||
|
|
bacd8c70e1 | ||
|
|
963c3479a9 | ||
|
|
39c567de47 | ||
|
|
da4cc74498 | ||
|
|
cac485756b | ||
|
|
118a7cd4ae | ||
|
|
c058e7a5ec | ||
|
|
1c10c3e2ff | ||
|
|
e0ddabc463 | ||
|
|
13da9bf75e | ||
|
|
e47eeb67ee | ||
|
|
824199fc7f | ||
|
|
c004896a40 | ||
|
|
940d2fdbb1 | ||
|
|
1c999b2a61 | ||
|
|
8e37841a2e | ||
|
|
8d1c0c469c | ||
|
|
8f5b0c777b | ||
|
|
0dd82293f1 | ||
|
|
26d7380c2e | ||
|
|
95703fb6f2 | ||
|
|
0121bce9e5 | ||
|
|
e766dd5582 | ||
|
|
fb1678d425 | ||
|
|
884053aaa7 | ||
|
|
93429bc661 | ||
|
|
3d27007750 | ||
|
|
4cbd97667d | ||
|
|
2730c90dcd | ||
|
|
09a1bcb30b | ||
|
|
77e08ba204 | ||
|
|
d3adf09bde | ||
|
|
26db1afd1a | ||
|
|
afa2a5f1c4 | ||
|
|
bcaf2f9ea3 | ||
|
|
3cd38e0d4c | ||
|
|
d16b562b18 | ||
|
|
0538f8a70d | ||
|
|
1a426ada3c | ||
|
|
d96e17451e | ||
|
|
a544e54578 | ||
|
|
f34a6699ef | ||
|
|
482857611a | ||
|
|
8d8bbc294a | ||
|
|
7f91f6e034 | ||
|
|
84c4c7dc82 | ||
|
|
1d4aa7abcc | ||
|
|
fe35c32c62 | ||
|
|
aa0da221e9 | ||
|
|
f1403493df | ||
|
|
36901d8394 | ||
|
|
a789707027 | ||
|
|
e6a2e34dd1 | ||
|
|
934d92f09c | ||
|
|
016ba4cdfa | ||
|
|
b227e44498 | ||
|
|
5e59541faa | ||
|
|
d94b7fd57c | ||
|
|
9964ba77ee | ||
|
|
153661cc47 | ||
|
|
166da2ffd0 | ||
|
|
8becd905b8 | ||
|
|
c83350e597 | ||
|
|
730866f431 | ||
|
|
ffbe9e7fd8 | ||
|
|
91b02c057e | ||
|
|
55d76ea3d8 | ||
|
|
1df0b67ec1 | ||
|
|
cd3b30d3bf | ||
|
|
53a685dbf8 | ||
|
|
218e527363 | ||
|
|
27fc391f71 | ||
|
|
1091e82327 | ||
|
|
a5cc8537f9 | ||
|
|
d692737a13 | ||
|
|
cc79125d3e | ||
|
|
a801e02cea | ||
|
|
29c7775ea1 | ||
|
|
cbf099de4d | ||
|
|
c8630f46fd | ||
|
|
af74d85b7d | ||
|
|
b8e7b4c0cd | ||
|
|
97e5f28537 | ||
|
|
f42e0a4711 | ||
|
|
6a5c828b6c | ||
|
|
97aa87612a | ||
|
|
d91f9e14e6 | ||
|
|
e24b1220a0 | ||
|
|
18f0531bba | ||
|
|
0a720b35af | ||
|
|
f557b54489 | ||
|
|
04852ad753 | ||
|
|
ee4f437aa2 | ||
|
|
aaa9a4efac | ||
|
|
0140cf71c8 | ||
|
|
51794e4c13 | ||
|
|
b249b06036 | ||
|
|
20caaf9d1f | ||
|
|
cb133cba68 | ||
|
|
c41ca22a78 | ||
|
|
009d2ffc6c | ||
|
|
baf1aba239 | ||
|
|
b891d77679 | ||
|
|
1d2ae39cff | ||
|
|
7bf09945f2 | ||
|
|
5c2e9a5376 | ||
|
|
b3a689658b | ||
|
|
d182d4f979 | ||
|
|
9c49e071d3 | ||
|
|
0f63924ed4 | ||
|
|
b2a7a27dfb | ||
|
|
b79dd602f3 | ||
|
|
157639e451 | ||
|
|
ea2c9d8f57 | ||
|
|
f958b32c83 | ||
|
|
2faa8f1e37 | ||
|
|
ab462d92b8 | ||
|
|
27905bbddf | ||
|
|
dc3e317d20 | ||
|
|
ac87ba5c0d | ||
|
|
ea918e1999 | ||
|
|
a5509fc2ce | ||
|
|
b614a3504b | ||
|
|
661d455ab4 | ||
|
|
cd8e469b9c | ||
|
|
991b3dbe54 | ||
|
|
a3caeba242 | ||
|
|
f5251f7d27 | ||
|
|
faedfb6307 | ||
|
|
1940495817 | ||
|
|
b979fb0116 | ||
|
|
7e140e5f3c | ||
|
|
0e5393f203 | ||
|
|
df6cf9aa51 | ||
|
|
5cf1396cb7 | ||
|
|
435e098751 | ||
|
|
6ffddbff24 | ||
|
|
64b0ce974d | ||
|
|
ce776f0f6a | ||
|
|
949984601f | ||
|
|
055e80f846 | ||
|
|
15363530ae | ||
|
|
ca6c5e2a6a | ||
|
|
73ddb62c58 | ||
|
|
a512f68650 | ||
|
|
769f99b369 | ||
|
|
bf1b9649d0 | ||
|
|
6569180654 | ||
|
|
ae0ed87c0f | ||
|
|
9cc8ef4b91 | ||
|
|
ad33f71ac2 | ||
|
|
30634b3a25 | ||
|
|
b31cf0284d | ||
|
|
50c6a98b15 | ||
|
|
e7234c9114 | ||
|
|
a644b7c267 | ||
|
|
f0af7262b1 | ||
|
|
0865bef382 | ||
|
|
8c9588ab59 | ||
|
|
c0ce875743 | ||
|
|
a22d28e1c1 | ||
|
|
c32145057d | ||
|
|
fbb98b0070 | ||
|
|
db11394711 | ||
|
|
72f6b3f836 | ||
|
|
0a08268efb | ||
|
|
ccda9664e7 | ||
|
|
e98ba9e839 | ||
|
|
fd57fde075 | ||
|
|
6204b85a37 | ||
|
|
9fc8f9a07d | ||
|
|
8745486fb3 | ||
|
|
7cbe3c2171 | ||
|
|
a47c9239e8 | ||
|
|
635ca1a0b8 | ||
|
|
32fdfbcd5a | ||
|
|
dee9324d4b | ||
|
|
df8b27c03c | ||
|
|
8e95974930 | ||
|
|
64b32146ca | ||
|
|
36a7bdfac1 | ||
|
|
13dda0e36e | ||
|
|
6e7100f283 | ||
|
|
059cd17b47 | ||
|
|
fb7b3305dc | ||
|
|
0e1f24e95a | ||
|
|
81c3f2c971 | ||
|
|
82a28d6bb3 | ||
|
|
22f9114630 | ||
|
|
1704d699c4 | ||
|
|
f2f0cdd0ff | ||
|
|
da06c8e5a9 | ||
|
|
2f1fe44762 | ||
|
|
19268ded23 | ||
|
|
f67933d2ac | ||
|
|
247b9e83d8 | ||
|
|
9c2098b8fa | ||
|
|
48c0c8d854 | ||
|
|
f5f7b1626b | ||
|
|
c87a373c53 | ||
|
|
2609ec0dc3 | ||
|
|
61ed9886c1 | ||
|
|
aafd034ab8 | ||
|
|
d14394c691 | ||
|
|
16fc22a207 | ||
|
|
d9ea937493 | ||
|
|
a96a0ee307 | ||
|
|
f48468b83b | ||
|
|
5b79ec8e3b | ||
|
|
3f4acbc5f6 | ||
|
|
35e5adaf0a | ||
|
|
a268a49e1a | ||
|
|
91a83a3a0f | ||
|
|
a247c2c713 | ||
|
|
4feb62f6bf | ||
|
|
ac4ef719cc | ||
|
|
ceb97e5809 | ||
|
|
3bfda55fca | ||
|
|
9eade647e6 | ||
|
|
f82a1933b0 | ||
|
|
bbdd1c8f06 | ||
|
|
f876c89081 | ||
|
|
1babbcca85 | ||
|
|
58ecd312a7 | ||
|
|
c0dfa72707 | ||
|
|
fe1e758856 | ||
|
|
f325d13082 | ||
|
|
52ab54eeea | ||
|
|
d407a590a6 | ||
|
|
5eb805f098 | ||
|
|
dfdcb56784 | ||
|
|
659cccc507 | ||
|
|
f47c31dce5 | ||
|
|
236f066635 | ||
|
|
5ab8a712d9 | ||
|
|
cf7b9cfeef | ||
|
|
6090af29e7 | ||
|
|
359009bb05 | ||
|
|
bdbfa4d403 | ||
|
|
6849788ebc | ||
|
|
ac639b2a17 | ||
|
|
b18ed5922b | ||
|
|
b395bb953f | ||
|
|
b652792a93 | ||
|
|
7efe287c74 | ||
|
|
881b46f458 | ||
|
|
d43cd9a24c | ||
|
|
fff48d24ea | ||
|
|
f4feb17629 | ||
|
|
33135f2ada | ||
|
|
d6f4d10075 | ||
|
|
f97515352b | ||
|
|
f765ed8f1c | ||
|
|
84bfcbc0d8 | ||
|
|
2c98efb781 | ||
|
|
f7f58bf070 | ||
|
|
1432e511a2 | ||
|
|
1ddc420e39 | ||
|
|
b61eb7d7f3 | ||
|
|
dd55458315 | ||
|
|
1567a977c3 | ||
|
|
cb8be37f72 | ||
|
|
e5dd4e1e70 | ||
|
|
6d74a3db7a | ||
|
|
1da5926a94 | ||
|
|
4882531c29 | ||
|
|
f59b80b80b | ||
|
|
f7d0e4208e | ||
|
|
7b61a0eff0 | ||
|
|
23fd3461bc | ||
|
|
e22e15afda | ||
|
|
f453567cce | ||
|
|
69786b2d16 | ||
|
|
5a4352657d | ||
|
|
f13bc796d9 | ||
|
|
7a2cfb8578 | ||
|
|
a6a14e7f77 | ||
|
|
80cfeea957 | ||
|
|
2c90208486 | ||
|
|
4eea7f7eb9 | ||
|
|
3c59657f59 | ||
|
|
6598beb804 | ||
|
|
32049efbc2 | ||
|
|
78be27e18f | ||
|
|
5d9908c2c3 | ||
|
|
7eb4d7bb19 | ||
|
|
a7b0c454db | ||
|
|
83e3b72220 | ||
|
|
7e2e3ddd32 | ||
|
|
c3b3572025 | ||
|
|
f897596844 | ||
|
|
6df71956c4 | ||
|
|
94df70be98 | ||
|
|
6650bc6b25 | ||
|
|
7398858572 | ||
|
|
0c0a2138d9 | ||
|
|
5bf152886b | ||
|
|
08353f291b | ||
|
|
497db69c9f | ||
|
|
c7de737551 | ||
|
|
69749a5b7b | ||
|
|
b3e32c1393 | ||
|
|
fc38b6d0ca | ||
|
|
c34900e569 | ||
|
|
855f2a55cb | ||
|
|
ea35e6be9b | ||
|
|
71fdc257bc | ||
|
|
fd16f77e20 | ||
|
|
3eef8dc8d0 | ||
|
|
59176ebbb9 | ||
|
|
3663061b38 | ||
|
|
b421d0ed5b | ||
|
|
f7097fbe07 | ||
|
|
35efc387c4 | ||
|
|
fb309ca446 | ||
|
|
c833a8872b | ||
|
|
1d4f88eea8 | ||
|
|
e9b8c8956d | ||
|
|
095ccef8bd | ||
|
|
0df969ad19 | ||
|
|
3e5b6a5481 | ||
|
|
3201f17058 | ||
|
|
c36744e96d | ||
|
|
e51c5dc0a6 | ||
|
|
d297b82e82 | ||
|
|
ca649b57e6 | ||
|
|
2c587f9ea5 | ||
|
|
98b818bbaf | ||
|
|
26bf719a02 | ||
|
|
7e37aa4aca | ||
|
|
ce6854e726 | ||
|
|
ac49bbb336 | ||
|
|
b490231f59 | ||
|
|
6c7eabb53b | ||
|
|
7a0f0126f7 | ||
|
|
59d89a37cc | ||
|
|
1a07c87af7 | ||
|
|
29894293eb | ||
|
|
4d783fff0d | ||
|
|
a7a53d1f38 | ||
|
|
7f16b46ed5 | ||
|
|
58ee5f1cc9 | ||
|
|
bc844ca96e | ||
|
|
253dc4f606 | ||
|
|
b54ce3e66e | ||
|
|
a642931422 | ||
|
|
2228f0dabc | ||
|
|
a619dfb03e | ||
|
|
54548d3b95 | ||
|
|
58d4d65fab | ||
|
|
364ab2a6b7 | ||
|
|
fdbb078aa9 | ||
|
|
2ffc1afe40 | ||
|
|
18612b3501 | ||
|
|
d635a97088 | ||
|
|
9da5d67728 | ||
|
|
bd413e36a3 | ||
|
|
2c5781ace1 | ||
|
|
b50b3430be | ||
|
|
0e3518f2ca | ||
|
|
238f04c931 | ||
|
|
3a85a2f81c | ||
|
|
5dbfa0384e | ||
|
|
80c87db148 | ||
|
|
d0d7777d68 | ||
|
|
48b6128814 | ||
|
|
70b28288a3 | ||
|
|
a11e1eba9e | ||
|
|
2dfa928c90 | ||
|
|
d3c0fe1fcb | ||
|
|
36e8683cf5 | ||
|
|
c0221c2e72 | ||
|
|
9cef20187c | ||
|
|
95f1e4634a | ||
|
|
581a30fdec | ||
|
|
19e2868484 | ||
|
|
8b3ce6e418 | ||
|
|
d8e77600e2 | ||
|
|
e3dcd96301 | ||
|
|
001cae5894 | ||
|
|
494ff9522b | ||
|
|
53aee6dc24 | ||
|
|
fd380615a0 | ||
|
|
039180b2ca | ||
|
|
22b89edbbc | ||
|
|
4650f04b37 | ||
|
|
9fba448053 | ||
|
|
82f9652fd8 | ||
|
|
94ae945bea | ||
|
|
f6189885c2 | ||
|
|
5c039d87aa | ||
|
|
08dfb7b59f | ||
|
|
4c0e723ead | ||
|
|
ea6af449a8 | ||
|
|
f72d74f951 | ||
|
|
d285565475 | ||
|
|
4eadf3228e | ||
|
|
99c5aa5a02 | ||
|
|
6ed65d762b | ||
|
|
ac18f6cf8b | ||
|
|
1f7a42f3a4 | ||
|
|
e71f261935 | ||
|
|
fcfe12437c | ||
|
|
b902602d16 | ||
|
|
b1ef336ffa | ||
|
|
d284961d47 | ||
|
|
8ac57201a7 | ||
|
|
538cb3b1bd | ||
|
|
17e335c6a7 | ||
|
|
280d6cac1a | ||
|
|
c288ffc55d | ||
|
|
9075dbdd3c | ||
|
|
16bd0d2b5d | ||
|
|
7850bce254 | ||
|
|
3d19e03294 | ||
|
|
496cb306bc | ||
|
|
b1f8bfdf7c | ||
|
|
2c38e4e1ac | ||
|
|
411d07a4f6 | ||
|
|
7d3a6541d7 | ||
|
|
0f400a113c | ||
|
|
e8447e3d71 | ||
|
|
f086b6824e | ||
|
|
ac644ed049 | ||
|
|
453fea1977 | ||
|
|
a953f1ca8b | ||
|
|
4b5cb1185f | ||
|
|
275cdc1ce3 | ||
|
|
8d6f67e476 | ||
|
|
9172150966 | ||
|
|
1f2831967e | ||
|
|
c19643cee2 | ||
|
|
a5c165bb13 | ||
|
|
d43f09081e | ||
|
|
1385091768 | ||
|
|
e30847e231 | ||
|
|
72074f283b | ||
|
|
a5db3dbea9 | ||
|
|
a2c9331b56 | ||
|
|
1a38a9df88 | ||
|
|
9b349a9049 | ||
|
|
329e97c4d3 | ||
|
|
2bd8fbb2dd | ||
|
|
205ea33b10 | ||
|
|
c42397d7db | ||
|
|
0366600b45 | ||
|
|
6a4ce96b7d | ||
|
|
b61c64a8ea | ||
|
|
ca62c75bdf | ||
|
|
38bd328abb | ||
|
|
37ccf7e405 | ||
|
|
cb91003cea | ||
|
|
4ad7b229d3 | ||
|
|
97e0de7e29 | ||
|
|
83d1486a67 | ||
|
|
9081b3f914 | ||
|
|
cf445fd4fe | ||
|
|
72f053d9bb | ||
|
|
a0defefb3f | ||
|
|
9e19159547 | ||
|
|
62b1166911 | ||
|
|
810596c6d8 | ||
|
|
a488d55c2c | ||
|
|
8106f1c86d | ||
|
|
306a6a751f | ||
|
|
318c69350e | ||
|
|
e63bc65a9d | ||
|
|
d306c9708c | ||
|
|
6a65345ef3 | ||
|
|
f1e348ab95 | ||
|
|
1a3b9dd864 | ||
|
|
8fee1ab102 | ||
|
|
5c617c5a8b | ||
|
|
6d1804d8a4 | ||
|
|
ee67ece641 | ||
|
|
0b939a495b | ||
|
|
4d7426ec95 | ||
|
|
f703814561 | ||
|
|
c39835628d | ||
|
|
1253725975 | ||
|
|
f4c1d6a5d7 | ||
|
|
d7abdced05 | ||
|
|
78aeef074e | ||
|
|
0b7aa33bc2 | ||
|
|
5a4f07adca | ||
|
|
4092f96dd8 | ||
|
|
effd27a5f6 | ||
|
|
fa003af8f0 | ||
|
|
77c870b7d0 | ||
|
|
32a6ca4fd6 | ||
|
|
958eca2863 | ||
|
|
af635ff3ff | ||
|
|
728024e8ff | ||
|
|
b548f6f320 | ||
|
|
62df6ac724 | ||
|
|
8bcad4f5ef | ||
|
|
31e6c44b07 | ||
|
|
77c910c1c3 | ||
|
|
c20196f9a0 | ||
|
|
f6a15b1829 | ||
|
|
7c22def422 | ||
|
|
87e49f0055 | ||
|
|
a36e3fbec3 | ||
|
|
4bceaf77ee | ||
|
|
35a3597416 | ||
|
|
2a279e30b0 | ||
|
|
b900bd6e94 | ||
|
|
c685dc493f | ||
|
|
aacc89e4e6 | ||
|
|
8e275ab2bd | ||
|
|
e24f888bc4 | ||
|
|
b56f465145 | ||
|
|
74cad6b811 | ||
|
|
3d2ea28c96 | ||
|
|
a260412c7e | ||
|
|
8ef0cf771f | ||
|
|
9516170ce5 | ||
|
|
5aefa847df | ||
|
|
831ef7ea2c | ||
|
|
9c7e30e4b4 | ||
|
|
8c5bfc7718 | ||
|
|
61046a6923 | ||
|
|
9d2fabc9b9 | ||
|
|
a643f72d93 | ||
|
|
73a2a18006 | ||
|
|
2ef05fb3b7 | ||
|
|
e06d02365e | ||
|
|
d6f2dbc723 | ||
|
|
20dba8b388 | ||
|
|
f624ba47fb | ||
|
|
94aa062d51 | ||
|
|
c0935e161b | ||
|
|
70fbc01cc1 | ||
|
|
6c2f169ea2 | ||
|
|
c230e617f0 | ||
|
|
1aee6ec371 | ||
|
|
d3fdfee211 | ||
|
|
3232e2743e | ||
|
|
cbb959151c | ||
|
|
c3d8b131db | ||
|
|
236d1a448d | ||
|
|
cfd51b1ac7 | ||
|
|
61e97251a5 | ||
|
|
c192a01b20 | ||
|
|
3ad9e9c5eb | ||
|
|
97a05ff34a | ||
|
|
6aaaf4a272 | ||
|
|
cd64f41524 | ||
|
|
df279b1ff6 | ||
|
|
aa859bc640 | ||
|
|
6ac601fd2d | ||
|
|
3a569c9dcb | ||
|
|
27d62941b2 | ||
|
|
ab345c5f69 | ||
|
|
a06f641b6c | ||
|
|
f2f1e7f9d1 | ||
|
|
0686b8452e | ||
|
|
72504d26dc | ||
|
|
1e98e8444e | ||
|
|
f216e7339b | ||
|
|
291dd1aca8 | ||
|
|
38da0adead | ||
|
|
81b3343796 | ||
|
|
f2c160e7e0 | ||
|
|
3d819a6edd | ||
|
|
20bd656975 | ||
|
|
cfe939ff08 | ||
|
|
9f245dd9b2 | ||
|
|
cf8e9eed69 | ||
|
|
10c9fe96b0 | ||
|
|
42b24616ac | ||
|
|
f9ccb93825 | ||
|
|
daeea75fbb | ||
|
|
0ad0202e8f | ||
|
|
a2a26755fe | ||
|
|
1f02cc70f1 | ||
|
|
fe010504aa | ||
|
|
05e3c76b1d | ||
|
|
63a458063b | ||
|
|
a8715f9f0f | ||
|
|
59be578842 | ||
|
|
cb0a257349 | ||
|
|
1081f620d2 | ||
|
|
e1cfe83825 | ||
|
|
6019a84fb3 | ||
|
|
f4d762bb95 | ||
|
|
69ea28e187 | ||
|
|
2f5a9e180c | ||
|
|
f912a4ece5 | ||
|
|
d1e66f9cc8 | ||
|
|
1a25903583 | ||
|
|
0728a2a78a | ||
|
|
b67cf74c5e | ||
|
|
2832a1cdcd | ||
|
|
4e0ed1ea50 | ||
|
|
164122555d | ||
|
|
11d87ecc37 | ||
|
|
7433348aae | ||
|
|
3673ed6262 | ||
|
|
16f860bbc2 | ||
|
|
d973ecf5cc | ||
|
|
f88cd27686 | ||
|
|
2a4fc70e1c | ||
|
|
c9c12129fd | ||
|
|
38f7ba2584 | ||
|
|
c4b1808983 | ||
|
|
f3ed740a75 | ||
|
|
b3f9cae820 | ||
|
|
a67bdff28c | ||
|
|
e3c8b230a0 | ||
|
|
9689a28d15 | ||
|
|
095d33bc51 | ||
|
|
821966b319 | ||
|
|
ab46687a8a | ||
|
|
be7278ce9d | ||
|
|
428218dbf0 | ||
|
|
d110999d31 | ||
|
|
4b8daa22f6 | ||
|
|
3f1887316b | ||
|
|
e60a63ea51 | ||
|
|
a2a7bcd43d | ||
|
|
c1bc4615fe | ||
|
|
76dca877da | ||
|
|
38e727dbe1 | ||
|
|
eba486f229 | ||
|
|
4ac79993e2 | ||
|
|
7c93e71801 | ||
|
|
1be399ab7b | ||
|
|
eae4fa040a | ||
|
|
a3c53efaf7 | ||
|
|
0f7fefd1b5 | ||
|
|
76fc786c07 | ||
|
|
76a1fd37ff | ||
|
|
73800ef111 | ||
|
|
742a8782dd | ||
|
|
8f6c863d7b | ||
|
|
cd7c58e8d3 | ||
|
|
a1de8a07d6 | ||
|
|
476b14b06e | ||
|
|
49d106f615 | ||
|
|
63fdfae918 | ||
|
|
ad48606e4e | ||
|
|
32f312f4a6 | ||
|
|
52ffd5dae4 | ||
|
|
c75da43f22 | ||
|
|
65f14f636d | ||
|
|
d7d85d2d3e | ||
|
|
363627d9f8 | ||
|
|
322b528ee0 | ||
|
|
0fe5f120a3 | ||
|
|
7950a49e28 | ||
|
|
698da78768 | ||
|
|
ba320e918d | ||
|
|
07cc981971 | ||
|
|
e23fd5ca44 | ||
|
|
7df5edef52 | ||
|
|
1eaa6925b9 | ||
|
|
86212d59ae | ||
|
|
82deb2c57f | ||
|
|
7507031cb1 | ||
|
|
51a9379d3c | ||
|
|
d73d60c9b0 | ||
|
|
004b4a0436 | ||
|
|
67ce8925e4 | ||
|
|
3a1f6810b7 | ||
|
|
066016cd3e | ||
|
|
28d6eb6af1 | ||
|
|
1a47a9b850 | ||
|
|
addd67dc63 | ||
|
|
70e0998a70 | ||
|
|
988bd88468 | ||
|
|
0d88972d3e | ||
|
|
0e012e5987 | ||
|
|
595e96ebf1 | ||
|
|
4c81cd2a71 | ||
|
|
32a8ea8094 | ||
|
|
c594bd7feb | ||
|
|
223ed98828 | ||
|
|
39e3df25a3 | ||
|
|
0fb017b9c1 | ||
|
|
fb465f8b4b | ||
|
|
3501eb6916 | ||
|
|
00db751646 | ||
|
|
df6c5b28a1 | ||
|
|
59789ae02a | ||
|
|
414e420bd2 | ||
|
|
03de4c0806 | ||
|
|
4fb8e6d455 | ||
|
|
06cdb260f6 | ||
|
|
c3c5817af6 | ||
|
|
9168c71359 | ||
|
|
1a14f1ecc1 | ||
|
|
44cb126e7d | ||
|
|
51f4e6585a | ||
|
|
f81e6d2ccf | ||
|
|
80dd230a65 | ||
|
|
d1b4ea5807 | ||
|
|
f5db403c45 | ||
|
|
75950344fb | ||
|
|
a49e06b54b | ||
|
|
0945eb990a | ||
|
|
a498cc223b | ||
|
|
ddd200bbfa | ||
|
|
9707aa8091 | ||
|
|
2e6a260ab1 | ||
|
|
49c6bf8fa6 | ||
|
|
296b92dbd4 | ||
|
|
b2756d148a | ||
|
|
756af57787 | ||
|
|
cb7096f2ec | ||
|
|
3c4871d9b8 | ||
|
|
f90673ac68 | ||
|
|
d435e17681 | ||
|
|
23e9737b85 | ||
|
|
54beb27eaa | ||
|
|
7968437a65 | ||
|
|
693b7c5fd0 | ||
|
|
1bd49ff125 | ||
|
|
9f460dd1bf | ||
|
|
2441dd6f6f | ||
|
|
ea44ad4d75 | ||
|
|
4b4786f75d | ||
|
|
c432697667 | ||
|
|
3535c29e59 | ||
|
|
94eb18b3d9 | ||
|
|
af5943f7e6 | ||
|
|
f26e16bf79 | ||
|
|
86e3506ae6 | ||
|
|
ba2107ea8c | ||
|
|
3f44a33738 | ||
|
|
8766b36144 | ||
|
|
2ffcfdcd25 | ||
|
|
8d460a7300 | ||
|
|
71dabf9fb3 |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,6 +1,6 @@
|
|||||||
.vagrant
|
.vagrant
|
||||||
*.retry
|
*.retry
|
||||||
inventory/vagrant_ansible_inventory
|
**/vagrant_ansible_inventory
|
||||||
inventory/credentials/
|
inventory/credentials/
|
||||||
inventory/group_vars/fake_hosts.yml
|
inventory/group_vars/fake_hosts.yml
|
||||||
inventory/host_vars/
|
inventory/host_vars/
|
||||||
@@ -12,9 +12,9 @@ temp
|
|||||||
*.tfstate
|
*.tfstate
|
||||||
*.tfstate.backup
|
*.tfstate.backup
|
||||||
contrib/terraform/aws/credentials.tfvars
|
contrib/terraform/aws/credentials.tfvars
|
||||||
**/*.sw[pon]
|
|
||||||
/ssh-bastion.conf
|
/ssh-bastion.conf
|
||||||
**/*.sw[pon]
|
**/*.sw[pon]
|
||||||
|
*~
|
||||||
vagrant/
|
vagrant/
|
||||||
|
|
||||||
# Byte-compiled / optimized / DLL files
|
# Byte-compiled / optimized / DLL files
|
||||||
|
|||||||
@@ -93,7 +93,7 @@ before_script:
|
|||||||
# Check out latest tag if testing upgrade
|
# Check out latest tag if testing upgrade
|
||||||
# Uncomment when gitlab kubespray repo has tags
|
# Uncomment when gitlab kubespray repo has tags
|
||||||
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
|
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
|
||||||
- test "${UPGRADE_TEST}" != "false" && git checkout f7d52564aad2ff8e337634951beb4a881c0e8aa6
|
- test "${UPGRADE_TEST}" != "false" && git checkout 8b3ce6e418ccf48171eb5b3888ee1af84f8d71ba
|
||||||
# Checkout the CI vars file so it is available
|
# Checkout the CI vars file so it is available
|
||||||
- test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
|
- test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
|
||||||
# Workaround https://github.com/kubernetes-incubator/kubespray/issues/2021
|
# Workaround https://github.com/kubernetes-incubator/kubespray/issues/2021
|
||||||
@@ -240,6 +240,10 @@ before_script:
|
|||||||
# stage: deploy-part1
|
# stage: deploy-part1
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
|
.ubuntu18_flannel_aio_variables: &ubuntu18_flannel_aio_variables
|
||||||
|
# stage: deploy-part1
|
||||||
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
|
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
|
||||||
# stage: deploy-part1
|
# stage: deploy-part1
|
||||||
UPGRADE_TEST: "graceful"
|
UPGRADE_TEST: "graceful"
|
||||||
@@ -263,7 +267,7 @@ before_script:
|
|||||||
.ubuntu_cilium_sep_variables: &ubuntu_cilium_sep_variables
|
.ubuntu_cilium_sep_variables: &ubuntu_cilium_sep_variables
|
||||||
# stage: deploy-special
|
# stage: deploy-special
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
.rhel7_weave_variables: &rhel7_weave_variables
|
.rhel7_weave_variables: &rhel7_weave_variables
|
||||||
# stage: deploy-part1
|
# stage: deploy-part1
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
@@ -304,6 +308,10 @@ before_script:
|
|||||||
# stage: deploy-part1
|
# stage: deploy-part1
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
|
.coreos_vault_upgrade_variables: &coreos_vault_upgrade_variables
|
||||||
|
# stage: deploy-part1
|
||||||
|
UPGRADE_TEST: "basic"
|
||||||
|
|
||||||
.ubuntu_flannel_variables: &ubuntu_flannel_variables
|
.ubuntu_flannel_variables: &ubuntu_flannel_variables
|
||||||
# stage: deploy-special
|
# stage: deploy-special
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
@@ -327,6 +335,18 @@ gce_coreos-calico-aio:
|
|||||||
only: [/^pr-.*$/]
|
only: [/^pr-.*$/]
|
||||||
|
|
||||||
### PR JOBS PART2
|
### PR JOBS PART2
|
||||||
|
|
||||||
|
gce_ubuntu18-flannel-aio:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *ubuntu18_flannel_aio_variables
|
||||||
|
<<: *gce_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: [/^pr-.*$/]
|
||||||
|
|
||||||
gce_centos7-flannel-addons:
|
gce_centos7-flannel-addons:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
@@ -338,6 +358,17 @@ gce_centos7-flannel-addons:
|
|||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: [/^pr-.*$/]
|
only: [/^pr-.*$/]
|
||||||
|
|
||||||
|
gce_centos-weave-kubeadm:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *centos_weave_kubeadm_variables
|
||||||
|
when: on_success
|
||||||
|
except: ['triggers']
|
||||||
|
only: [/^pr-.*$/]
|
||||||
|
|
||||||
gce_ubuntu-weave-sep:
|
gce_ubuntu-weave-sep:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
@@ -434,17 +465,6 @@ gce_ubuntu-canal-kubeadm-triggers:
|
|||||||
when: on_success
|
when: on_success
|
||||||
only: ['triggers']
|
only: ['triggers']
|
||||||
|
|
||||||
gce_centos-weave-kubeadm:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *centos_weave_kubeadm_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_centos-weave-kubeadm-triggers:
|
gce_centos-weave-kubeadm-triggers:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
@@ -560,7 +580,7 @@ gce_rhel7-canal-sep:
|
|||||||
<<: *rhel7_canal_sep_variables
|
<<: *rhel7_canal_sep_variables
|
||||||
when: manual
|
when: manual
|
||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: ['master', /^pr-.*$/,]
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
gce_rhel7-canal-sep-triggers:
|
gce_rhel7-canal-sep-triggers:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
@@ -638,6 +658,17 @@ gce_ubuntu-vault-sep:
|
|||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: ['master', /^pr-.*$/]
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
gce_coreos-vault-upgrade:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *coreos_vault_upgrade_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
gce_ubuntu-flannel-sep:
|
gce_ubuntu-flannel-sep:
|
||||||
stage: deploy-special
|
stage: deploy-special
|
||||||
<<: *job
|
<<: *job
|
||||||
|
|||||||
12
OWNERS
12
OWNERS
@@ -1,9 +1,7 @@
|
|||||||
# See the OWNERS file documentation:
|
# See the OWNERS file documentation:
|
||||||
# https://github.com/kubernetes/kubernetes/blob/master/docs/devel/owners.md
|
# https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md
|
||||||
|
|
||||||
owners:
|
approvers:
|
||||||
- Smana
|
- kubespray-approvers
|
||||||
- ant31
|
reviewers:
|
||||||
- bogdando
|
- kubespray-reviewers
|
||||||
- mattymo
|
|
||||||
- rsmitty
|
|
||||||
|
|||||||
18
OWNERS_ALIASES
Normal file
18
OWNERS_ALIASES
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
aliases:
|
||||||
|
kubespray-approvers:
|
||||||
|
- ant31
|
||||||
|
- mattymo
|
||||||
|
- atoms
|
||||||
|
- chadswen
|
||||||
|
- rsmitty
|
||||||
|
- bogdando
|
||||||
|
- bradbeam
|
||||||
|
- woopstar
|
||||||
|
- riverzhang
|
||||||
|
- holser
|
||||||
|
- smana
|
||||||
|
kubespray-reviewers:
|
||||||
|
- jjungnickel
|
||||||
|
- archifleks
|
||||||
|
- chapsuk
|
||||||
|
- mirwan
|
||||||
79
README.md
79
README.md
@@ -1,14 +1,15 @@
|
|||||||

|

|
||||||
|
|
||||||
Deploy a Production Ready Kubernetes Cluster
|
Deploy a Production Ready Kubernetes Cluster
|
||||||
============================================
|
============================================
|
||||||
|
|
||||||
If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
||||||
|
You can get your invite [here](http://slack.k8s.io/)
|
||||||
|
|
||||||
- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere or Baremetal**
|
- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere, Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
||||||
- **High available** cluster
|
- **Highly available** cluster
|
||||||
- **Composable** (Choice of the network plugin for instance)
|
- **Composable** (Choice of the network plugin for instance)
|
||||||
- Support most popular **Linux distributions**
|
- Supports most popular **Linux distributions**
|
||||||
- **Continuous integration tests**
|
- **Continuous integration tests**
|
||||||
|
|
||||||
Quick Start
|
Quick Start
|
||||||
@@ -18,23 +19,44 @@ To deploy the cluster you can use :
|
|||||||
|
|
||||||
### Ansible
|
### Ansible
|
||||||
|
|
||||||
|
# Install dependencies from ``requirements.txt``
|
||||||
|
sudo pip install -r requirements.txt
|
||||||
|
|
||||||
# Copy ``inventory/sample`` as ``inventory/mycluster``
|
# Copy ``inventory/sample`` as ``inventory/mycluster``
|
||||||
cp -rfp inventory/sample inventory/mycluster
|
cp -rfp inventory/sample/* inventory/mycluster
|
||||||
|
|
||||||
# Update Ansible inventory file with inventory builder
|
# Update Ansible inventory file with inventory builder
|
||||||
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
||||||
CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
||||||
|
|
||||||
# Review and change parameters under ``inventory/mycluster/group_vars``
|
# Review and change parameters under ``inventory/mycluster/group_vars``
|
||||||
cat inventory/mycluster/group_vars/all.yml
|
cat inventory/mycluster/group_vars/all/all.yml
|
||||||
cat inventory/mycluster/group_vars/k8s-cluster.yml
|
cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
|
||||||
|
|
||||||
# Deploy Kubespray with Ansible Playbook
|
# Deploy Kubespray with Ansible Playbook
|
||||||
ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml
|
ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml
|
||||||
|
|
||||||
|
Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu).
|
||||||
|
As a consequence, `ansible-playbook` command will fail with:
|
||||||
|
```
|
||||||
|
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
|
||||||
|
```
|
||||||
|
probably pointing on a task depending on a module present in requirements.txt (i.e. "unseal vault").
|
||||||
|
|
||||||
|
One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible.
|
||||||
|
A workaround consists of setting `ANSIBLE_LIBRARY` and `ANSIBLE_MODULE_UTILS` environment variables respectively to the `ansible/modules` and `ansible/module_utils` subdirectories of pip packages installation location, which can be found in the Location field of the output of `pip show [package]` before executing `ansible-playbook`.
|
||||||
|
|
||||||
### Vagrant
|
### Vagrant
|
||||||
|
|
||||||
# Simply running `vagrant up` (for tests purposes)
|
For Vagrant we need to install python dependencies for provisioning tasks.
|
||||||
|
Check if Python and pip are installed:
|
||||||
|
|
||||||
|
python -V && pip -V
|
||||||
|
|
||||||
|
If this returns the version of the software, you're good to go. If not, download and install Python from here <https://www.python.org/downloads/source/>
|
||||||
|
Install the necessary requirements
|
||||||
|
|
||||||
|
sudo pip install -r requirements.txt
|
||||||
vagrant up
|
vagrant up
|
||||||
|
|
||||||
Documents
|
Documents
|
||||||
@@ -68,28 +90,37 @@ Supported Linux Distributions
|
|||||||
|
|
||||||
- **Container Linux by CoreOS**
|
- **Container Linux by CoreOS**
|
||||||
- **Debian** Jessie, Stretch, Wheezy
|
- **Debian** Jessie, Stretch, Wheezy
|
||||||
- **Ubuntu** 16.04
|
- **Ubuntu** 16.04, 18.04
|
||||||
- **CentOS/RHEL** 7
|
- **CentOS/RHEL** 7
|
||||||
|
- **Fedora** 28
|
||||||
- **Fedora/CentOS** Atomic
|
- **Fedora/CentOS** Atomic
|
||||||
- **openSUSE** Leap 42.3/Tumbleweed
|
- **openSUSE** Leap 42.3/Tumbleweed
|
||||||
|
|
||||||
Note: Upstart/SysV init based OS types are not supported.
|
Note: Upstart/SysV init based OS types are not supported.
|
||||||
|
|
||||||
Versions of supported components
|
Supported Components
|
||||||
--------------------------------
|
--------------------
|
||||||
|
|
||||||
- [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.5
|
- Core
|
||||||
- [etcd](https://github.com/coreos/etcd/releases) v3.2.4
|
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.11.3
|
||||||
- [flanneld](https://github.com/coreos/flannel/releases) v0.10.0
|
- [etcd](https://github.com/coreos/etcd) v3.2.18
|
||||||
- [calico](https://docs.projectcalico.org/v2.6/releases/) v2.6.8
|
- [docker](https://www.docker.com/) v17.03 (see note)
|
||||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
- [rkt](https://github.com/rkt/rkt) v1.21.0 (see Note 2)
|
||||||
- [cilium](https://github.com/cilium/cilium) v1.0.0-rc8
|
- [cri-o](http://cri-o.io/) v1.11.5 (experimental: see [CRI-O Note](docs/cri-o.md). Only on centos based OS)
|
||||||
- [contiv](https://github.com/contiv/install/releases) v1.1.7
|
- Network Plugin
|
||||||
- [weave](http://weave.works/) v2.2.1
|
- [calico](https://github.com/projectcalico/calico) v3.1.3
|
||||||
- [docker](https://www.docker.com/) v17.03 (see note)
|
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||||
- [rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)
|
- [cilium](https://github.com/cilium/cilium) v1.2.0
|
||||||
|
- [contiv](https://github.com/contiv/install) v1.1.7
|
||||||
|
- [flanneld](https://github.com/coreos/flannel) v0.10.0
|
||||||
|
- [weave](https://github.com/weaveworks/weave) v2.4.1
|
||||||
|
- Application
|
||||||
|
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||||
|
- [cert-manager](https://github.com/jetstack/cert-manager) v0.5.0
|
||||||
|
- [coredns](https://github.com/coredns/coredns) v1.2.2
|
||||||
|
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.19.0
|
||||||
|
|
||||||
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
Note: kubernetes doesn't support newer docker versions ("Version 17.03 is recommended... Versions 17.06+ might work, but have not yet been tested and verified by the Kubernetes node team" cf. [Bootstrapping Clusters with kubeadm](https://kubernetes.io/docs/setup/independent/install-kubeadm/#installing-docker)). Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||||
|
|
||||||
Note 2: rkt support as docker alternative is limited to control plane (etcd and
|
Note 2: rkt support as docker alternative is limited to control plane (etcd and
|
||||||
kubelet). Docker is still used for Kubernetes cluster workloads and network
|
kubelet). Docker is still used for Kubernetes cluster workloads and network
|
||||||
@@ -122,7 +153,7 @@ You can choose between 6 network plugins. (default: `calico`, except Vagrant use
|
|||||||
|
|
||||||
- [canal](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
|
- [canal](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
|
||||||
|
|
||||||
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
||||||
|
|
||||||
- [contiv](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
|
- [contiv](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
|
||||||
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
|
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
|
||||||
@@ -152,8 +183,6 @@ Tools and projects on top of Kubespray
|
|||||||
CI Tests
|
CI Tests
|
||||||
--------
|
--------
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
[](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines)
|
[](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines)
|
||||||
|
|
||||||
CI/end-to-end tests sponsored by Google (GCE)
|
CI/end-to-end tests sponsored by Google (GCE)
|
||||||
|
|||||||
13
SECURITY_CONTACTS
Normal file
13
SECURITY_CONTACTS
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
# Defined below are the security contacts for this repo.
|
||||||
|
#
|
||||||
|
# They are the contact point for the Product Security Team to reach out
|
||||||
|
# to for triaging and handling of incoming issues.
|
||||||
|
#
|
||||||
|
# The below names agree to abide by the
|
||||||
|
# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy)
|
||||||
|
# and will be removed and replaced if they violate that agreement.
|
||||||
|
#
|
||||||
|
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||||
|
# INSTRUCTIONS AT https://kubernetes.io/security/
|
||||||
|
atoms
|
||||||
|
mattymo
|
||||||
9
Vagrantfile
vendored
9
Vagrantfile
vendored
@@ -18,6 +18,7 @@ SUPPORTED_OS = {
|
|||||||
"coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
|
"coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
|
||||||
"ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "vagrant"},
|
"ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "vagrant"},
|
||||||
"centos" => {box: "centos/7", bootstrap_os: "centos", user: "vagrant"},
|
"centos" => {box: "centos/7", bootstrap_os: "centos", user: "vagrant"},
|
||||||
|
"fedora" => {box: "fedora/28-cloud-base", bootstrap_os: "fedora", user: "vagrant"},
|
||||||
"opensuse" => {box: "opensuse/openSUSE-42.3-x86_64", bootstrap_os: "opensuse", use: "vagrant"},
|
"opensuse" => {box: "opensuse/openSUSE-42.3-x86_64", bootstrap_os: "opensuse", use: "vagrant"},
|
||||||
"opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", bootstrap_os: "opensuse", use: "vagrant"},
|
"opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", bootstrap_os: "opensuse", use: "vagrant"},
|
||||||
}
|
}
|
||||||
@@ -44,6 +45,8 @@ $kube_node_instances_with_disks = false
|
|||||||
$kube_node_instances_with_disks_size = "20G"
|
$kube_node_instances_with_disks_size = "20G"
|
||||||
$kube_node_instances_with_disks_number = 2
|
$kube_node_instances_with_disks_number = 2
|
||||||
|
|
||||||
|
$playbook = "cluster.yml"
|
||||||
|
|
||||||
$local_release_dir = "/vagrant/temp"
|
$local_release_dir = "/vagrant/temp"
|
||||||
|
|
||||||
host_vars = {}
|
host_vars = {}
|
||||||
@@ -125,6 +128,10 @@ Vagrant.configure("2") do |config|
|
|||||||
|
|
||||||
config.vm.provider :libvirt do |lv|
|
config.vm.provider :libvirt do |lv|
|
||||||
lv.memory = $vm_memory
|
lv.memory = $vm_memory
|
||||||
|
# Fix kernel panic on fedora 28
|
||||||
|
if $os == "fedora"
|
||||||
|
lv.cpu_mode = "host-passthrough"
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
ip = "#{$subnet}.#{i+100}"
|
ip = "#{$subnet}.#{i+100}"
|
||||||
@@ -157,7 +164,7 @@ Vagrant.configure("2") do |config|
|
|||||||
# when all the machines are up and ready.
|
# when all the machines are up and ready.
|
||||||
if i == $num_instances
|
if i == $num_instances
|
||||||
config.vm.provision "ansible" do |ansible|
|
config.vm.provision "ansible" do |ansible|
|
||||||
ansible.playbook = "cluster.yml"
|
ansible.playbook = $playbook
|
||||||
if File.exist?(File.join(File.dirname($inventory), "hosts"))
|
if File.exist?(File.join(File.dirname($inventory), "hosts"))
|
||||||
ansible.inventory_path = $inventory
|
ansible.inventory_path = $inventory
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -13,3 +13,5 @@ callback_whitelist = profile_tasks
|
|||||||
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
||||||
deprecation_warnings=False
|
deprecation_warnings=False
|
||||||
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds
|
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds
|
||||||
|
[inventory]
|
||||||
|
ignore_patterns = artifacts, credentials
|
||||||
|
|||||||
12
cluster.yml
12
cluster.yml
@@ -33,11 +33,12 @@
|
|||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes/preinstall, tags: preinstall }
|
- { role: kubernetes/preinstall, tags: preinstall }
|
||||||
- { role: docker, tags: docker }
|
- { role: docker, tags: docker, when: container_manager == 'docker' }
|
||||||
|
- { role: cri-o, tags: crio, when: container_manager == 'crio' }
|
||||||
- role: rkt
|
- role: rkt
|
||||||
tags: rkt
|
tags: rkt
|
||||||
when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"
|
when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"
|
||||||
- { role: download, tags: download, skip_downloads: false }
|
- { role: download, tags: download, when: "not skip_downloads" }
|
||||||
environment: "{{proxy_env}}"
|
environment: "{{proxy_env}}"
|
||||||
|
|
||||||
- hosts: etcd:k8s-cluster:vault:calico-rr
|
- hosts: etcd:k8s-cluster:vault:calico-rr
|
||||||
@@ -51,13 +52,13 @@
|
|||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: etcd, tags: etcd, etcd_cluster_setup: true }
|
- { role: etcd, tags: etcd, etcd_cluster_setup: true, etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}" }
|
||||||
|
|
||||||
- hosts: k8s-cluster:calico-rr
|
- hosts: k8s-cluster:calico-rr
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: etcd, tags: etcd, etcd_cluster_setup: false }
|
- { role: etcd, tags: etcd, etcd_cluster_setup: false, etcd_events_cluster_setup: false }
|
||||||
|
|
||||||
- hosts: etcd:k8s-cluster:vault:calico-rr
|
- hosts: etcd:k8s-cluster:vault:calico-rr
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
@@ -93,6 +94,7 @@
|
|||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
|
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
|
||||||
|
- { role: win_nodes/kubernetes_patch, tags: win_nodes, when: "kubeadm_enabled" }
|
||||||
|
|
||||||
- hosts: kube-master
|
- hosts: kube-master
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
@@ -117,7 +119,7 @@
|
|||||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf }
|
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf }
|
||||||
environment: "{{proxy_env}}"
|
environment: "{{proxy_env}}"
|
||||||
|
|
||||||
- hosts: kube-master[0]
|
- hosts: kube-master
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
|
|||||||
@@ -9,8 +9,8 @@ Resource Group. It will not install Kubernetes itself, this has to be done in a
|
|||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
- [Install azure-cli](https://docs.microsoft.com/en-us/azure/xplat-cli-install)
|
- [Install azure-cli](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest)
|
||||||
- [Login with azure-cli](https://docs.microsoft.com/en-us/azure/xplat-cli-connect)
|
- [Login with azure-cli](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli?view=azure-cli-latest)
|
||||||
- Dedicated Resource Group created in the Azure Portal or through azure-cli
|
- Dedicated Resource Group created in the Azure Portal or through azure-cli
|
||||||
|
|
||||||
## Configuration through group_vars/all
|
## Configuration through group_vars/all
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/python3
|
#!/usr/bin/env python3
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
|
|||||||
10
contrib/metallb/README.md
Normal file
10
contrib/metallb/README.md
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
# Deploy MetalLB into Kubespray/Kubernetes
|
||||||
|
```
|
||||||
|
MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation. In short, it allows you to create Kubernetes services of type “LoadBalancer” in clusters that don’t run on a cloud provider, and thus cannot simply hook into paid products to provide load-balancers.
|
||||||
|
```
|
||||||
|
This playbook aims to automate [this](https://metallb.universe.tf/tutorial/layer2/tutorial). It deploys MetalLB into kubernetes and sets up a layer 2 loadbalancer.
|
||||||
|
|
||||||
|
## Install
|
||||||
|
```
|
||||||
|
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/metallb/metallb.yml
|
||||||
|
```
|
||||||
6
contrib/metallb/metallb.yml
Normal file
6
contrib/metallb/metallb.yml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
- hosts: kube-master[0]
|
||||||
|
tags:
|
||||||
|
- "provision"
|
||||||
|
roles:
|
||||||
|
- { role: provision }
|
||||||
7
contrib/metallb/roles/provision/defaults/main.yml
Normal file
7
contrib/metallb/roles/provision/defaults/main.yml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
metallb:
|
||||||
|
ip_range: "10.5.0.50-10.5.0.99"
|
||||||
|
limits:
|
||||||
|
cpu: "100m"
|
||||||
|
memory: "100Mi"
|
||||||
|
port: "7472"
|
||||||
17
contrib/metallb/roles/provision/tasks/main.yml
Normal file
17
contrib/metallb/roles/provision/tasks/main.yml
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
- name: "Kubernetes Apps | Lay Down MetalLB"
|
||||||
|
become: true
|
||||||
|
template: { src: "{{ item }}.j2", dest: "{{ kube_config_dir }}/{{ item }}" }
|
||||||
|
with_items: ["metallb.yml", "metallb-config.yml"]
|
||||||
|
register: "rendering"
|
||||||
|
when:
|
||||||
|
- "inventory_hostname == groups['kube-master'][0]"
|
||||||
|
- name: "Kubernetes Apps | Install and configure MetalLB"
|
||||||
|
kube:
|
||||||
|
name: "MetalLB"
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
filename: "{{ kube_config_dir }}/{{ item.item }}"
|
||||||
|
state: "{{ item.changed | ternary('latest','present') }}"
|
||||||
|
with_items: "{{ rendering.results }}"
|
||||||
|
when:
|
||||||
|
- "inventory_hostname == groups['kube-master'][0]"
|
||||||
@@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
namespace: metallb-system
|
||||||
|
name: config
|
||||||
|
data:
|
||||||
|
config: |
|
||||||
|
address-pools:
|
||||||
|
- name: loadbalanced
|
||||||
|
protocol: layer2
|
||||||
|
addresses:
|
||||||
|
- {{ metallb.ip_range }}
|
||||||
254
contrib/metallb/roles/provision/templates/metallb.yml.j2
Normal file
254
contrib/metallb/roles/provision/templates/metallb.yml.j2
Normal file
@@ -0,0 +1,254 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Namespace
|
||||||
|
metadata:
|
||||||
|
name: metallb-system
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
---
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
namespace: metallb-system
|
||||||
|
name: controller
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
namespace: metallb-system
|
||||||
|
name: speaker
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
name: metallb-system:controller
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["services"]
|
||||||
|
verbs: ["get", "list", "watch", "update"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["services/status"]
|
||||||
|
verbs: ["update"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["events"]
|
||||||
|
verbs: ["create", "patch"]
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
name: metallb-system:speaker
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["services", "endpoints", "nodes"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: Role
|
||||||
|
metadata:
|
||||||
|
namespace: metallb-system
|
||||||
|
name: leader-election
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["endpoints"]
|
||||||
|
resourceNames: ["metallb-speaker"]
|
||||||
|
verbs: ["get", "update"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["endpoints"]
|
||||||
|
verbs: ["create"]
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: Role
|
||||||
|
metadata:
|
||||||
|
namespace: metallb-system
|
||||||
|
name: config-watcher
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["configmaps"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["events"]
|
||||||
|
verbs: ["create"]
|
||||||
|
---
|
||||||
|
|
||||||
|
## Role bindings
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: metallb-system:controller
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: controller
|
||||||
|
namespace: metallb-system
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: metallb-system:controller
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: metallb-system:speaker
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: speaker
|
||||||
|
namespace: metallb-system
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: metallb-system:speaker
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: RoleBinding
|
||||||
|
metadata:
|
||||||
|
namespace: metallb-system
|
||||||
|
name: config-watcher
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: controller
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: speaker
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: Role
|
||||||
|
name: config-watcher
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: RoleBinding
|
||||||
|
metadata:
|
||||||
|
namespace: metallb-system
|
||||||
|
name: leader-election
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: speaker
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: Role
|
||||||
|
name: leader-election
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1beta2
|
||||||
|
kind: DaemonSet
|
||||||
|
metadata:
|
||||||
|
namespace: metallb-system
|
||||||
|
name: speaker
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
component: speaker
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: metallb
|
||||||
|
component: speaker
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
component: speaker
|
||||||
|
annotations:
|
||||||
|
prometheus.io/scrape: "true"
|
||||||
|
prometheus.io/port: "{{ metallb.port }}"
|
||||||
|
spec:
|
||||||
|
serviceAccountName: speaker
|
||||||
|
terminationGracePeriodSeconds: 0
|
||||||
|
hostNetwork: true
|
||||||
|
containers:
|
||||||
|
- name: speaker
|
||||||
|
image: metallb/speaker:v0.6.2
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
args:
|
||||||
|
- --port={{ metallb.port }}
|
||||||
|
- --config=config
|
||||||
|
env:
|
||||||
|
- name: METALLB_NODE_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
ports:
|
||||||
|
- name: monitoring
|
||||||
|
containerPort: {{ metallb.port }}
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: {{ metallb.limits.cpu }}
|
||||||
|
memory: {{ metallb.limits.memory }}
|
||||||
|
securityContext:
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
readOnlyRootFilesystem: true
|
||||||
|
capabilities:
|
||||||
|
drop:
|
||||||
|
- all
|
||||||
|
add:
|
||||||
|
- net_raw
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1beta2
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
namespace: metallb-system
|
||||||
|
name: controller
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
component: controller
|
||||||
|
spec:
|
||||||
|
revisionHistoryLimit: 3
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: metallb
|
||||||
|
component: controller
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
component: controller
|
||||||
|
annotations:
|
||||||
|
prometheus.io/scrape: "true"
|
||||||
|
prometheus.io/port: "{{ metallb.port }}"
|
||||||
|
spec:
|
||||||
|
serviceAccountName: controller
|
||||||
|
terminationGracePeriodSeconds: 0
|
||||||
|
securityContext:
|
||||||
|
runAsNonRoot: true
|
||||||
|
runAsUser: 65534 # nobody
|
||||||
|
containers:
|
||||||
|
- name: controller
|
||||||
|
image: metallb/controller:v0.6.2
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
args:
|
||||||
|
- --port={{ metallb.port }}
|
||||||
|
- --config=config
|
||||||
|
ports:
|
||||||
|
- name: monitoring
|
||||||
|
containerPort: {{ metallb.port }}
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: {{ metallb.limits.cpu }}
|
||||||
|
memory: {{ metallb.limits.memory }}
|
||||||
|
securityContext:
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
capabilities:
|
||||||
|
drop:
|
||||||
|
- all
|
||||||
|
readOnlyRootFilesystem: true
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
@@ -1 +1 @@
|
|||||||
../../../inventory/group_vars
|
../../../inventory/local/group_vars
|
||||||
@@ -12,7 +12,7 @@
|
|||||||
# ## As in the previous case, you can set ip to give direct communication on internal IPs
|
# ## As in the previous case, you can set ip to give direct communication on internal IPs
|
||||||
# gfs_node1 ansible_ssh_host=95.54.0.18 # disk_volume_device_1=/dev/vdc ip=10.3.0.7
|
# gfs_node1 ansible_ssh_host=95.54.0.18 # disk_volume_device_1=/dev/vdc ip=10.3.0.7
|
||||||
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
|
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
|
||||||
# gfs_node1 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
|
# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
|
||||||
|
|
||||||
# [kube-master]
|
# [kube-master]
|
||||||
# node1
|
# node1
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
# For Ubuntu.
|
# For Ubuntu.
|
||||||
glusterfs_default_release: ""
|
glusterfs_default_release: ""
|
||||||
glusterfs_ppa_use: yes
|
glusterfs_ppa_use: yes
|
||||||
glusterfs_ppa_version: "3.8"
|
glusterfs_ppa_version: "4.1"
|
||||||
|
|
||||||
# Gluster configuration.
|
# Gluster configuration.
|
||||||
gluster_mount_dir: /mnt/gluster
|
gluster_mount_dir: /mnt/gluster
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
# For Ubuntu.
|
# For Ubuntu.
|
||||||
glusterfs_default_release: ""
|
glusterfs_default_release: ""
|
||||||
glusterfs_ppa_use: yes
|
glusterfs_ppa_use: yes
|
||||||
glusterfs_ppa_version: "3.8"
|
glusterfs_ppa_version: "3.12"
|
||||||
|
|
||||||
# Gluster configuration.
|
# Gluster configuration.
|
||||||
gluster_mount_dir: /mnt/gluster
|
gluster_mount_dir: /mnt/gluster
|
||||||
|
|||||||
@@ -1,2 +1,2 @@
|
|||||||
---
|
---
|
||||||
glusterfs_daemon: glusterfs-server
|
glusterfs_daemon: glusterd
|
||||||
|
|||||||
16
contrib/network-storage/heketi/README.md
Normal file
16
contrib/network-storage/heketi/README.md
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# Deploy Heketi/Glusterfs into Kubespray/Kubernetes
|
||||||
|
This playbook aims to automate [this](https://github.com/heketi/heketi/blob/master/docs/admin/install-kubernetes.md) tutorial. It deploys heketi/glusterfs into kubernetes and sets up a storageclass.
|
||||||
|
|
||||||
|
## Client Setup
|
||||||
|
Heketi provides a CLI that provides users with a means to administer the deployment and configuration of GlusterFS in Kubernetes. [Download and install the heketi-cli](https://github.com/heketi/heketi/releases) on your client machine.
|
||||||
|
|
||||||
|
## Install
|
||||||
|
Copy the inventory.yml.sample over to inventory/sample/k8s_heketi_inventory.yml and change it according to your setup.
|
||||||
|
```
|
||||||
|
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
## Tear down
|
||||||
|
```
|
||||||
|
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml
|
||||||
|
```
|
||||||
9
contrib/network-storage/heketi/heketi-tear-down.yml
Normal file
9
contrib/network-storage/heketi/heketi-tear-down.yml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
- hosts: kube-master[0]
|
||||||
|
roles:
|
||||||
|
- { role: tear-down }
|
||||||
|
|
||||||
|
- hosts: heketi-node
|
||||||
|
become: yes
|
||||||
|
roles:
|
||||||
|
- { role: tear-down-disks }
|
||||||
10
contrib/network-storage/heketi/heketi.yml
Normal file
10
contrib/network-storage/heketi/heketi.yml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
- hosts: heketi-node
|
||||||
|
roles:
|
||||||
|
- { role: prepare }
|
||||||
|
|
||||||
|
- hosts: kube-master[0]
|
||||||
|
tags:
|
||||||
|
- "provision"
|
||||||
|
roles:
|
||||||
|
- { role: provision }
|
||||||
26
contrib/network-storage/heketi/inventory.yml.sample
Normal file
26
contrib/network-storage/heketi/inventory.yml.sample
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
all:
|
||||||
|
vars:
|
||||||
|
heketi_admin_key: "11elfeinhundertundelf"
|
||||||
|
heketi_user_key: "!!einseinseins"
|
||||||
|
children:
|
||||||
|
k8s-cluster:
|
||||||
|
vars:
|
||||||
|
kubelet_fail_swap_on: false
|
||||||
|
children:
|
||||||
|
kube-master:
|
||||||
|
hosts:
|
||||||
|
node1:
|
||||||
|
etcd:
|
||||||
|
hosts:
|
||||||
|
node2:
|
||||||
|
kube-node:
|
||||||
|
hosts: &kube_nodes
|
||||||
|
node1:
|
||||||
|
node2:
|
||||||
|
node3:
|
||||||
|
node4:
|
||||||
|
heketi-node:
|
||||||
|
vars:
|
||||||
|
disk_volume_device_1: "/dev/vdb"
|
||||||
|
hosts:
|
||||||
|
<<: *kube_nodes
|
||||||
1
contrib/network-storage/heketi/requirements.txt
Normal file
1
contrib/network-storage/heketi/requirements.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
jmespath
|
||||||
24
contrib/network-storage/heketi/roles/prepare/tasks/main.yml
Normal file
24
contrib/network-storage/heketi/roles/prepare/tasks/main.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
---
|
||||||
|
- name: "Load lvm kernel modules"
|
||||||
|
become: true
|
||||||
|
with_items:
|
||||||
|
- "dm_snapshot"
|
||||||
|
- "dm_mirror"
|
||||||
|
- "dm_thin_pool"
|
||||||
|
modprobe:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: "present"
|
||||||
|
|
||||||
|
- name: "Install glusterfs mount utils (RedHat)"
|
||||||
|
become: true
|
||||||
|
yum:
|
||||||
|
name: "glusterfs-fuse"
|
||||||
|
state: "present"
|
||||||
|
when: "ansible_os_family == 'RedHat'"
|
||||||
|
|
||||||
|
- name: "Install glusterfs mount utils (Debian)"
|
||||||
|
become: true
|
||||||
|
apt:
|
||||||
|
name: "glusterfs-client"
|
||||||
|
state: "present"
|
||||||
|
when: "ansible_os_family == 'Debian'"
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
---
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
- name: "stop port forwarding"
|
||||||
|
command: "killall "
|
||||||
@@ -0,0 +1,56 @@
|
|||||||
|
# Bootstrap heketi
|
||||||
|
- name: "Get state of heketi service, deployment and pods."
|
||||||
|
register: "initial_heketi_state"
|
||||||
|
changed_when: false
|
||||||
|
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
||||||
|
- name: "Bootstrap heketi."
|
||||||
|
when:
|
||||||
|
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Service']\"))|length == 0"
|
||||||
|
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Deployment']\"))|length == 0"
|
||||||
|
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod']\"))|length == 0"
|
||||||
|
include_tasks: "bootstrap/deploy.yml"
|
||||||
|
|
||||||
|
# Prepare heketi topology
|
||||||
|
- name: "Get heketi initial pod state."
|
||||||
|
register: "initial_heketi_pod"
|
||||||
|
command: "{{ bin_dir }}/kubectl get pods --selector=deploy-heketi=pod,glusterfs=heketi-pod,name=deploy-heketi --output=json"
|
||||||
|
changed_when: false
|
||||||
|
- name: "Ensure heketi bootstrap pod is up."
|
||||||
|
assert:
|
||||||
|
that: "(initial_heketi_pod.stdout|from_json|json_query('items[*]'))|length == 1"
|
||||||
|
- set_fact:
|
||||||
|
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout|from_json|json_query(\"items[*].metadata.name|[0]\") }}"
|
||||||
|
- name: "Test heketi topology."
|
||||||
|
changed_when: false
|
||||||
|
register: "heketi_topology"
|
||||||
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||||
|
- name: "Load heketi topology."
|
||||||
|
when: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*]\")|flatten|length == 0"
|
||||||
|
include_tasks: "bootstrap/topology.yml"
|
||||||
|
|
||||||
|
# Provision heketi database volume
|
||||||
|
- name: "Prepare heketi volumes."
|
||||||
|
include_tasks: "bootstrap/volumes.yml"
|
||||||
|
|
||||||
|
# Remove bootstrap heketi
|
||||||
|
- name: "Tear down bootstrap."
|
||||||
|
include_tasks: "bootstrap/tear-down.yml"
|
||||||
|
|
||||||
|
# Prepare heketi storage
|
||||||
|
- name: "Test heketi storage."
|
||||||
|
command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json"
|
||||||
|
changed_when: false
|
||||||
|
register: "heketi_storage_state"
|
||||||
|
# ensure endpoints actually exist before trying to move database data to it
|
||||||
|
- name: "Create heketi storage."
|
||||||
|
include_tasks: "bootstrap/storage.yml"
|
||||||
|
vars:
|
||||||
|
secret_query: "items[?metadata.name=='heketi-storage-secret' && kind=='Secret']"
|
||||||
|
endpoints_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Endpoints']"
|
||||||
|
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
||||||
|
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
||||||
|
when:
|
||||||
|
- "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0"
|
||||||
|
- "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0"
|
||||||
|
- "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0"
|
||||||
|
- "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0"
|
||||||
@@ -0,0 +1,24 @@
|
|||||||
|
---
|
||||||
|
- name: "Kubernetes Apps | Lay Down Heketi Bootstrap"
|
||||||
|
become: true
|
||||||
|
template: { src: "heketi-bootstrap.json.j2", dest: "{{ kube_config_dir }}/heketi-bootstrap.json" }
|
||||||
|
register: "rendering"
|
||||||
|
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
|
||||||
|
kube:
|
||||||
|
name: "GlusterFS"
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
filename: "{{ kube_config_dir }}/heketi-bootstrap.json"
|
||||||
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
|
- name: "Wait for heketi bootstrap to complete."
|
||||||
|
changed_when: false
|
||||||
|
register: "initial_heketi_state"
|
||||||
|
vars:
|
||||||
|
initial_heketi_state: { stdout: "{}" }
|
||||||
|
pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]"
|
||||||
|
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
||||||
|
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
||||||
|
until:
|
||||||
|
- "initial_heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
||||||
|
- "initial_heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
||||||
|
retries: 60
|
||||||
|
delay: 5
|
||||||
@@ -0,0 +1,33 @@
|
|||||||
|
---
|
||||||
|
- name: "Test heketi storage."
|
||||||
|
command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json"
|
||||||
|
changed_when: false
|
||||||
|
register: "heketi_storage_state"
|
||||||
|
- name: "Create heketi storage."
|
||||||
|
kube:
|
||||||
|
name: "GlusterFS"
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
filename: "{{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
||||||
|
state: "present"
|
||||||
|
vars:
|
||||||
|
secret_query: "items[?metadata.name=='heketi-storage-secret' && kind=='Secret']"
|
||||||
|
endpoints_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Endpoints']"
|
||||||
|
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
||||||
|
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
||||||
|
when:
|
||||||
|
- "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0"
|
||||||
|
- "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0"
|
||||||
|
- "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0"
|
||||||
|
- "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0"
|
||||||
|
register: "heketi_storage_result"
|
||||||
|
- name: "Get state of heketi database copy job."
|
||||||
|
command: "{{ bin_dir }}/kubectl get jobs --output=json"
|
||||||
|
changed_when: false
|
||||||
|
register: "heketi_storage_state"
|
||||||
|
vars:
|
||||||
|
heketi_storage_state: { stdout: "{}" }
|
||||||
|
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job' && status.succeeded==1]"
|
||||||
|
until:
|
||||||
|
- "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 1"
|
||||||
|
retries: 60
|
||||||
|
delay: 5
|
||||||
@@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
- name: "Get existing Heketi deploy resources."
|
||||||
|
command: "{{ bin_dir }}/kubectl get all --selector=\"deploy-heketi\" -o=json"
|
||||||
|
register: "heketi_resources"
|
||||||
|
changed_when: false
|
||||||
|
- name: "Delete bootstrap Heketi."
|
||||||
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
|
||||||
|
when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0"
|
||||||
|
- name: "Ensure there is nothing left over."
|
||||||
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
|
||||||
|
register: "heketi_result"
|
||||||
|
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||||
|
retries: 60
|
||||||
|
delay: 5
|
||||||
@@ -0,0 +1,26 @@
|
|||||||
|
---
|
||||||
|
- name: "Get heketi topology."
|
||||||
|
changed_when: false
|
||||||
|
register: "heketi_topology"
|
||||||
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||||
|
- name: "Render heketi topology template."
|
||||||
|
become: true
|
||||||
|
vars: { nodes: "{{ groups['heketi-node'] }}" }
|
||||||
|
register: "render"
|
||||||
|
template:
|
||||||
|
src: "topology.json.j2"
|
||||||
|
dest: "{{ kube_config_dir }}/topology.json"
|
||||||
|
- name: "Copy topology configuration into container."
|
||||||
|
changed_when: false
|
||||||
|
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
||||||
|
- name: "Load heketi topology."
|
||||||
|
when: "render.changed"
|
||||||
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||||
|
register: "load_heketi"
|
||||||
|
- name: "Get heketi topology."
|
||||||
|
changed_when: false
|
||||||
|
register: "heketi_topology"
|
||||||
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||||
|
until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length"
|
||||||
|
retries: 60
|
||||||
|
delay: 5
|
||||||
@@ -0,0 +1,41 @@
|
|||||||
|
---
|
||||||
|
- name: "Get heketi volume ids."
|
||||||
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume list --json"
|
||||||
|
changed_when: false
|
||||||
|
register: "heketi_volumes"
|
||||||
|
- name: "Get heketi volumes."
|
||||||
|
changed_when: false
|
||||||
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
||||||
|
with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}"
|
||||||
|
loop_control: { loop_var: "volume_id" }
|
||||||
|
register: "volumes_information"
|
||||||
|
- name: "Test heketi database volume."
|
||||||
|
set_fact: { heketi_database_volume_exists: true }
|
||||||
|
with_items: "{{ volumes_information.results }}"
|
||||||
|
loop_control: { loop_var: "volume_information" }
|
||||||
|
vars: { volume: "{{ volume_information.stdout|from_json }}" }
|
||||||
|
when: "volume.name == 'heketidbstorage'"
|
||||||
|
- name: "Provision database volume."
|
||||||
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
|
||||||
|
when: "heketi_database_volume_exists is undefined"
|
||||||
|
- name: "Copy configuration from pod."
|
||||||
|
become: true
|
||||||
|
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
||||||
|
- name: "Get heketi volume ids."
|
||||||
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume list --json"
|
||||||
|
changed_when: false
|
||||||
|
register: "heketi_volumes"
|
||||||
|
- name: "Get heketi volumes."
|
||||||
|
changed_when: false
|
||||||
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
||||||
|
with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}"
|
||||||
|
loop_control: { loop_var: "volume_id" }
|
||||||
|
register: "volumes_information"
|
||||||
|
- name: "Test heketi database volume."
|
||||||
|
set_fact: { heketi_database_volume_created: true }
|
||||||
|
with_items: "{{ volumes_information.results }}"
|
||||||
|
loop_control: { loop_var: "volume_information" }
|
||||||
|
vars: { volume: "{{ volume_information.stdout|from_json }}" }
|
||||||
|
when: "volume.name == 'heketidbstorage'"
|
||||||
|
- name: "Ensure heketi database volume exists."
|
||||||
|
assert: { that: "heketi_database_volume_created is defined" , msg: "Heketi database volume does not exist." }
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
- name: "Clean up left over jobs."
|
||||||
|
command: "{{ bin_dir }}/kubectl delete jobs,pods --selector=\"deploy-heketi\""
|
||||||
|
changed_when: false
|
||||||
@@ -0,0 +1,38 @@
|
|||||||
|
---
|
||||||
|
- name: "Kubernetes Apps | Lay Down GlusterFS Daemonset"
|
||||||
|
template: { src: "glusterfs-daemonset.json.j2", dest: "{{ kube_config_dir }}/glusterfs-daemonset.json" }
|
||||||
|
become: true
|
||||||
|
register: "rendering"
|
||||||
|
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
|
||||||
|
kube:
|
||||||
|
name: "GlusterFS"
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
filename: "{{ kube_config_dir }}/glusterfs-daemonset.json"
|
||||||
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
|
- name: "Kubernetes Apps | Label GlusterFS nodes"
|
||||||
|
include_tasks: "glusterfs/label.yml"
|
||||||
|
with_items: "{{ groups['heketi-node'] }}"
|
||||||
|
loop_control:
|
||||||
|
loop_var: "node"
|
||||||
|
- name: "Kubernetes Apps | Wait for daemonset to become available."
|
||||||
|
register: "daemonset_state"
|
||||||
|
command: "{{ bin_dir }}/kubectl get daemonset glusterfs --output=json --ignore-not-found=true"
|
||||||
|
changed_when: false
|
||||||
|
vars:
|
||||||
|
daemonset_state: { stdout: "{}" }
|
||||||
|
ready: "{{ daemonset_state.stdout|from_json|json_query(\"status.numberReady\") }}"
|
||||||
|
desired: "{{ daemonset_state.stdout|from_json|json_query(\"status.desiredNumberScheduled\") }}"
|
||||||
|
until: "ready | int >= 3"
|
||||||
|
retries: 60
|
||||||
|
delay: 5
|
||||||
|
|
||||||
|
- name: "Kubernetes Apps | Lay Down Heketi Service Account"
|
||||||
|
template: { src: "heketi-service-account.json.j2", dest: "{{ kube_config_dir }}/heketi-service-account.json" }
|
||||||
|
become: true
|
||||||
|
register: "rendering"
|
||||||
|
- name: "Kubernetes Apps | Install and configure Heketi Service Account"
|
||||||
|
kube:
|
||||||
|
name: "GlusterFS"
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
filename: "{{ kube_config_dir }}/heketi-service-account.json"
|
||||||
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
- register: "label_present"
|
||||||
|
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
|
||||||
|
changed_when: false
|
||||||
|
- name: "Assign storage label"
|
||||||
|
when: "label_present.stdout_lines|length == 0"
|
||||||
|
command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
|
||||||
|
- register: "label_present"
|
||||||
|
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
|
||||||
|
changed_when: false
|
||||||
|
- assert: { that: "label_present|length > 0", msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs." }
|
||||||
@@ -0,0 +1,26 @@
|
|||||||
|
---
|
||||||
|
- name: "Kubernetes Apps | Lay Down Heketi"
|
||||||
|
become: true
|
||||||
|
template: { src: "heketi-deployment.json.j2", dest: "{{ kube_config_dir }}/heketi-deployment.json" }
|
||||||
|
register: "rendering"
|
||||||
|
- name: "Kubernetes Apps | Install and configure Heketi"
|
||||||
|
kube:
|
||||||
|
name: "GlusterFS"
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
filename: "{{ kube_config_dir }}/heketi-deployment.json"
|
||||||
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
|
- name: "Ensure heketi is up and running."
|
||||||
|
changed_when: false
|
||||||
|
register: "heketi_state"
|
||||||
|
vars:
|
||||||
|
heketi_state: { stdout: "{}" }
|
||||||
|
pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]"
|
||||||
|
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
||||||
|
command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
|
||||||
|
until:
|
||||||
|
- "heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
||||||
|
- "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
||||||
|
retries: 60
|
||||||
|
delay: 5
|
||||||
|
- set_fact:
|
||||||
|
heketi_pod_name: "{{ heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
|
||||||
@@ -0,0 +1,30 @@
|
|||||||
|
---
|
||||||
|
- name: "Kubernetes Apps | GlusterFS"
|
||||||
|
include_tasks: "glusterfs.yml"
|
||||||
|
|
||||||
|
- name: "Kubernetes Apps | Heketi Secrets"
|
||||||
|
include_tasks: "secret.yml"
|
||||||
|
|
||||||
|
- name: "Kubernetes Apps | Test Heketi"
|
||||||
|
register: "heketi_service_state"
|
||||||
|
command: "{{bin_dir}}/kubectl get service heketi-storage-endpoints -o=name --ignore-not-found=true"
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: "Kubernetes Apps | Bootstrap Heketi"
|
||||||
|
when: "heketi_service_state.stdout == \"\""
|
||||||
|
include_tasks: "bootstrap.yml"
|
||||||
|
|
||||||
|
- name: "Kubernetes Apps | Heketi"
|
||||||
|
include_tasks: "heketi.yml"
|
||||||
|
|
||||||
|
- name: "Kubernetes Apps | Heketi Topology"
|
||||||
|
include_tasks: "topology.yml"
|
||||||
|
|
||||||
|
- name: "Kubernetes Apps | Heketi Storage"
|
||||||
|
include_tasks: "storage.yml"
|
||||||
|
|
||||||
|
- name: "Kubernetes Apps | Storage Class"
|
||||||
|
include_tasks: "storageclass.yml"
|
||||||
|
|
||||||
|
- name: "Clean up"
|
||||||
|
include_tasks: "cleanup.yml"
|
||||||
@@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
- register: "clusterrolebinding_state"
|
||||||
|
command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
||||||
|
changed_when: false
|
||||||
|
- name: "Kubernetes Apps | Deploy cluster role binding."
|
||||||
|
when: "clusterrolebinding_state.stdout == \"\""
|
||||||
|
command: "{{bin_dir}}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
|
||||||
|
- register: "clusterrolebinding_state"
|
||||||
|
command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
||||||
|
changed_when: false
|
||||||
|
- assert: { that: "clusterrolebinding_state.stdout != \"\"", message: "Cluster role binding is not present." }
|
||||||
|
|
||||||
|
- register: "secret_state"
|
||||||
|
command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
||||||
|
changed_when: false
|
||||||
|
- name: "Render Heketi secret configuration."
|
||||||
|
become: true
|
||||||
|
template:
|
||||||
|
src: "heketi.json.j2"
|
||||||
|
dest: "{{ kube_config_dir }}/heketi.json"
|
||||||
|
- name: "Deploy Heketi config secret"
|
||||||
|
when: "secret_state.stdout == \"\""
|
||||||
|
command: "{{bin_dir}}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
|
||||||
|
- register: "secret_state"
|
||||||
|
command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
||||||
|
changed_when: false
|
||||||
|
- assert: { that: "secret_state.stdout != \"\"", message: "Heketi config secret is not present." }
|
||||||
@@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
- name: "Kubernetes Apps | Lay Down Heketi Storage"
|
||||||
|
become: true
|
||||||
|
vars: { nodes: "{{ groups['heketi-node'] }}" }
|
||||||
|
template: { src: "heketi-storage.json.j2", dest: "{{ kube_config_dir }}/heketi-storage.json" }
|
||||||
|
register: "rendering"
|
||||||
|
- name: "Kubernetes Apps | Install and configure Heketi Storage"
|
||||||
|
kube:
|
||||||
|
name: "GlusterFS"
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
filename: "{{ kube_config_dir }}/heketi-storage.json"
|
||||||
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
@@ -0,0 +1,25 @@
|
|||||||
|
---
|
||||||
|
- name: "Test storage class."
|
||||||
|
command: "{{ bin_dir }}/kubectl get storageclass gluster --ignore-not-found=true --output=json"
|
||||||
|
register: "storageclass"
|
||||||
|
changed_when: false
|
||||||
|
- name: "Test heketi service."
|
||||||
|
command: "{{ bin_dir }}/kubectl get service heketi --ignore-not-found=true --output=json"
|
||||||
|
register: "heketi_service"
|
||||||
|
changed_when: false
|
||||||
|
- name: "Ensure heketi service is available."
|
||||||
|
assert: { that: "heketi_service.stdout != \"\"" }
|
||||||
|
- name: "Render storage class configuration."
|
||||||
|
become: true
|
||||||
|
vars:
|
||||||
|
endpoint_address: "{{ (heketi_service.stdout|from_json).spec.clusterIP }}"
|
||||||
|
template:
|
||||||
|
src: "storageclass.yml.j2"
|
||||||
|
dest: "{{ kube_config_dir }}/storageclass.yml"
|
||||||
|
register: "rendering"
|
||||||
|
- name: "Kubernetes Apps | Install and configure Storace Class"
|
||||||
|
kube:
|
||||||
|
name: "GlusterFS"
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
filename: "{{ kube_config_dir }}/storageclass.yml"
|
||||||
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
@@ -0,0 +1,25 @@
|
|||||||
|
---
|
||||||
|
- name: "Get heketi topology."
|
||||||
|
register: "heketi_topology"
|
||||||
|
changed_when: false
|
||||||
|
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||||
|
- name: "Render heketi topology template."
|
||||||
|
become: true
|
||||||
|
vars: { nodes: "{{ groups['heketi-node'] }}" }
|
||||||
|
register: "rendering"
|
||||||
|
template:
|
||||||
|
src: "topology.json.j2"
|
||||||
|
dest: "{{ kube_config_dir }}/topology.json"
|
||||||
|
- name: "Copy topology configuration into container."
|
||||||
|
when: "rendering.changed"
|
||||||
|
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
||||||
|
- name: "Load heketi topology."
|
||||||
|
when: "rendering.changed"
|
||||||
|
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||||
|
- name: "Get heketi topology."
|
||||||
|
register: "heketi_topology"
|
||||||
|
changed_when: false
|
||||||
|
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||||
|
until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length"
|
||||||
|
retries: 60
|
||||||
|
delay: 5
|
||||||
@@ -0,0 +1,144 @@
|
|||||||
|
{
|
||||||
|
"kind": "DaemonSet",
|
||||||
|
"apiVersion": "extensions/v1beta1",
|
||||||
|
"metadata": {
|
||||||
|
"name": "glusterfs",
|
||||||
|
"labels": {
|
||||||
|
"glusterfs": "deployment"
|
||||||
|
},
|
||||||
|
"annotations": {
|
||||||
|
"description": "GlusterFS Daemon Set",
|
||||||
|
"tags": "glusterfs"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"template": {
|
||||||
|
"metadata": {
|
||||||
|
"name": "glusterfs",
|
||||||
|
"labels": {
|
||||||
|
"glusterfs-node": "daemonset"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"nodeSelector": {
|
||||||
|
"storagenode" : "glusterfs"
|
||||||
|
},
|
||||||
|
"hostNetwork": true,
|
||||||
|
"containers": [
|
||||||
|
{
|
||||||
|
"image": "gluster/gluster-centos:gluster4u0_centos7",
|
||||||
|
"imagePullPolicy": "IfNotPresent",
|
||||||
|
"name": "glusterfs",
|
||||||
|
"volumeMounts": [
|
||||||
|
{
|
||||||
|
"name": "glusterfs-heketi",
|
||||||
|
"mountPath": "/var/lib/heketi"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-run",
|
||||||
|
"mountPath": "/run"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-lvm",
|
||||||
|
"mountPath": "/run/lvm"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-etc",
|
||||||
|
"mountPath": "/etc/glusterfs"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-logs",
|
||||||
|
"mountPath": "/var/log/glusterfs"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-config",
|
||||||
|
"mountPath": "/var/lib/glusterd"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-dev",
|
||||||
|
"mountPath": "/dev"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-cgroup",
|
||||||
|
"mountPath": "/sys/fs/cgroup"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"securityContext": {
|
||||||
|
"capabilities": {},
|
||||||
|
"privileged": true
|
||||||
|
},
|
||||||
|
"readinessProbe": {
|
||||||
|
"timeoutSeconds": 3,
|
||||||
|
"initialDelaySeconds": 60,
|
||||||
|
"exec": {
|
||||||
|
"command": [
|
||||||
|
"/bin/bash",
|
||||||
|
"-c",
|
||||||
|
"systemctl status glusterd.service"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"livenessProbe": {
|
||||||
|
"timeoutSeconds": 3,
|
||||||
|
"initialDelaySeconds": 60,
|
||||||
|
"exec": {
|
||||||
|
"command": [
|
||||||
|
"/bin/bash",
|
||||||
|
"-c",
|
||||||
|
"systemctl status glusterd.service"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"volumes": [
|
||||||
|
{
|
||||||
|
"name": "glusterfs-heketi",
|
||||||
|
"hostPath": {
|
||||||
|
"path": "/var/lib/heketi"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-run"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-lvm",
|
||||||
|
"hostPath": {
|
||||||
|
"path": "/run/lvm"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-etc",
|
||||||
|
"hostPath": {
|
||||||
|
"path": "/etc/glusterfs"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-logs",
|
||||||
|
"hostPath": {
|
||||||
|
"path": "/var/log/glusterfs"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-config",
|
||||||
|
"hostPath": {
|
||||||
|
"path": "/var/lib/glusterd"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-dev",
|
||||||
|
"hostPath": {
|
||||||
|
"path": "/dev"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "glusterfs-cgroup",
|
||||||
|
"hostPath": {
|
||||||
|
"path": "/sys/fs/cgroup"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,133 @@
|
|||||||
|
{
|
||||||
|
"kind": "List",
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"items": [
|
||||||
|
{
|
||||||
|
"kind": "Service",
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"metadata": {
|
||||||
|
"name": "deploy-heketi",
|
||||||
|
"labels": {
|
||||||
|
"glusterfs": "heketi-service",
|
||||||
|
"deploy-heketi": "support"
|
||||||
|
},
|
||||||
|
"annotations": {
|
||||||
|
"description": "Exposes Heketi Service"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"selector": {
|
||||||
|
"name": "deploy-heketi"
|
||||||
|
},
|
||||||
|
"ports": [
|
||||||
|
{
|
||||||
|
"name": "deploy-heketi",
|
||||||
|
"port": 8080,
|
||||||
|
"targetPort": 8080
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"kind": "Deployment",
|
||||||
|
"apiVersion": "extensions/v1beta1",
|
||||||
|
"metadata": {
|
||||||
|
"name": "deploy-heketi",
|
||||||
|
"labels": {
|
||||||
|
"glusterfs": "heketi-deployment",
|
||||||
|
"deploy-heketi": "deployment"
|
||||||
|
},
|
||||||
|
"annotations": {
|
||||||
|
"description": "Defines how to deploy Heketi"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"replicas": 1,
|
||||||
|
"template": {
|
||||||
|
"metadata": {
|
||||||
|
"name": "deploy-heketi",
|
||||||
|
"labels": {
|
||||||
|
"name": "deploy-heketi",
|
||||||
|
"glusterfs": "heketi-pod",
|
||||||
|
"deploy-heketi": "pod"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"serviceAccountName": "heketi-service-account",
|
||||||
|
"containers": [
|
||||||
|
{
|
||||||
|
"image": "heketi/heketi:7",
|
||||||
|
"imagePullPolicy": "Always",
|
||||||
|
"name": "deploy-heketi",
|
||||||
|
"env": [
|
||||||
|
{
|
||||||
|
"name": "HEKETI_EXECUTOR",
|
||||||
|
"value": "kubernetes"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "HEKETI_DB_PATH",
|
||||||
|
"value": "/var/lib/heketi/heketi.db"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "HEKETI_FSTAB",
|
||||||
|
"value": "/var/lib/heketi/fstab"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "HEKETI_SNAPSHOT_LIMIT",
|
||||||
|
"value": "14"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "HEKETI_KUBE_GLUSTER_DAEMONSET",
|
||||||
|
"value": "y"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"ports": [
|
||||||
|
{
|
||||||
|
"containerPort": 8080
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"volumeMounts": [
|
||||||
|
{
|
||||||
|
"name": "db",
|
||||||
|
"mountPath": "/var/lib/heketi"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "config",
|
||||||
|
"mountPath": "/etc/heketi"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"readinessProbe": {
|
||||||
|
"timeoutSeconds": 3,
|
||||||
|
"initialDelaySeconds": 3,
|
||||||
|
"httpGet": {
|
||||||
|
"path": "/hello",
|
||||||
|
"port": 8080
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"livenessProbe": {
|
||||||
|
"timeoutSeconds": 3,
|
||||||
|
"initialDelaySeconds": 30,
|
||||||
|
"httpGet": {
|
||||||
|
"path": "/hello",
|
||||||
|
"port": 8080
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"volumes": [
|
||||||
|
{
|
||||||
|
"name": "db"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "config",
|
||||||
|
"secret": {
|
||||||
|
"secretName": "heketi-config-secret"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -0,0 +1,159 @@
|
|||||||
|
{
|
||||||
|
"kind": "List",
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"items": [
|
||||||
|
{
|
||||||
|
"kind": "Secret",
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"metadata": {
|
||||||
|
"name": "heketi-db-backup",
|
||||||
|
"labels": {
|
||||||
|
"glusterfs": "heketi-db",
|
||||||
|
"heketi": "db"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"data": {
|
||||||
|
},
|
||||||
|
"type": "Opaque"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"kind": "Service",
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"metadata": {
|
||||||
|
"name": "heketi",
|
||||||
|
"labels": {
|
||||||
|
"glusterfs": "heketi-service",
|
||||||
|
"deploy-heketi": "support"
|
||||||
|
},
|
||||||
|
"annotations": {
|
||||||
|
"description": "Exposes Heketi Service"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"selector": {
|
||||||
|
"name": "heketi"
|
||||||
|
},
|
||||||
|
"ports": [
|
||||||
|
{
|
||||||
|
"name": "heketi",
|
||||||
|
"port": 8080,
|
||||||
|
"targetPort": 8080
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"kind": "Deployment",
|
||||||
|
"apiVersion": "extensions/v1beta1",
|
||||||
|
"metadata": {
|
||||||
|
"name": "heketi",
|
||||||
|
"labels": {
|
||||||
|
"glusterfs": "heketi-deployment"
|
||||||
|
},
|
||||||
|
"annotations": {
|
||||||
|
"description": "Defines how to deploy Heketi"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"replicas": 1,
|
||||||
|
"template": {
|
||||||
|
"metadata": {
|
||||||
|
"name": "heketi",
|
||||||
|
"labels": {
|
||||||
|
"name": "heketi",
|
||||||
|
"glusterfs": "heketi-pod"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"serviceAccountName": "heketi-service-account",
|
||||||
|
"containers": [
|
||||||
|
{
|
||||||
|
"image": "heketi/heketi:7",
|
||||||
|
"imagePullPolicy": "Always",
|
||||||
|
"name": "heketi",
|
||||||
|
"env": [
|
||||||
|
{
|
||||||
|
"name": "HEKETI_EXECUTOR",
|
||||||
|
"value": "kubernetes"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "HEKETI_DB_PATH",
|
||||||
|
"value": "/var/lib/heketi/heketi.db"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "HEKETI_FSTAB",
|
||||||
|
"value": "/var/lib/heketi/fstab"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "HEKETI_SNAPSHOT_LIMIT",
|
||||||
|
"value": "14"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "HEKETI_KUBE_GLUSTER_DAEMONSET",
|
||||||
|
"value": "y"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"ports": [
|
||||||
|
{
|
||||||
|
"containerPort": 8080
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"volumeMounts": [
|
||||||
|
{
|
||||||
|
"mountPath": "/backupdb",
|
||||||
|
"name": "heketi-db-secret"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "db",
|
||||||
|
"mountPath": "/var/lib/heketi"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "config",
|
||||||
|
"mountPath": "/etc/heketi"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"readinessProbe": {
|
||||||
|
"timeoutSeconds": 3,
|
||||||
|
"initialDelaySeconds": 3,
|
||||||
|
"httpGet": {
|
||||||
|
"path": "/hello",
|
||||||
|
"port": 8080
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"livenessProbe": {
|
||||||
|
"timeoutSeconds": 3,
|
||||||
|
"initialDelaySeconds": 30,
|
||||||
|
"httpGet": {
|
||||||
|
"path": "/hello",
|
||||||
|
"port": 8080
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"volumes": [
|
||||||
|
{
|
||||||
|
"name": "db",
|
||||||
|
"glusterfs": {
|
||||||
|
"endpoints": "heketi-storage-endpoints",
|
||||||
|
"path": "heketidbstorage"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "heketi-db-secret",
|
||||||
|
"secret": {
|
||||||
|
"secretName": "heketi-db-backup"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "config",
|
||||||
|
"secret": {
|
||||||
|
"secretName": "heketi-config-secret"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -0,0 +1,7 @@
|
|||||||
|
{
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"kind": "ServiceAccount",
|
||||||
|
"metadata": {
|
||||||
|
"name": "heketi-service-account"
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,54 @@
|
|||||||
|
{
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"kind": "List",
|
||||||
|
"items": [
|
||||||
|
{
|
||||||
|
"kind": "Endpoints",
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"metadata": {
|
||||||
|
"name": "heketi-storage-endpoints",
|
||||||
|
"creationTimestamp": null
|
||||||
|
},
|
||||||
|
"subsets": [
|
||||||
|
{% set nodeblocks = [] %}
|
||||||
|
{% for node in nodes %}
|
||||||
|
{% set nodeblock %}
|
||||||
|
{
|
||||||
|
"addresses": [
|
||||||
|
{
|
||||||
|
"ip": "{{ hostvars[node]['ansible_facts']['default_ipv4']['address'] }}"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"ports": [
|
||||||
|
{
|
||||||
|
"port": 1
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
{% endset %}
|
||||||
|
{% if nodeblocks.append(nodeblock) %}{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
{{ nodeblocks|join(',') }}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"kind": "Service",
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"metadata": {
|
||||||
|
"name": "heketi-storage-endpoints",
|
||||||
|
"creationTimestamp": null
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"ports": [
|
||||||
|
{
|
||||||
|
"port": 1,
|
||||||
|
"targetPort": 0
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"loadBalancer": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -0,0 +1,44 @@
|
|||||||
|
{
|
||||||
|
"_port_comment": "Heketi Server Port Number",
|
||||||
|
"port": "8080",
|
||||||
|
|
||||||
|
"_use_auth": "Enable JWT authorization. Please enable for deployment",
|
||||||
|
"use_auth": true,
|
||||||
|
|
||||||
|
"_jwt": "Private keys for access",
|
||||||
|
"jwt": {
|
||||||
|
"_admin": "Admin has access to all APIs",
|
||||||
|
"admin": {
|
||||||
|
"key": "{{ heketi_admin_key }}"
|
||||||
|
},
|
||||||
|
"_user": "User only has access to /volumes endpoint",
|
||||||
|
"user": {
|
||||||
|
"key": "{{ heketi_user_key }}"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
"_glusterfs_comment": "GlusterFS Configuration",
|
||||||
|
"glusterfs": {
|
||||||
|
"_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
|
||||||
|
"executor": "kubernetes",
|
||||||
|
|
||||||
|
"_db_comment": "Database file name",
|
||||||
|
"db": "/var/lib/heketi/heketi.db",
|
||||||
|
|
||||||
|
"kubeexec": {
|
||||||
|
"rebalance_on_expansion": true
|
||||||
|
},
|
||||||
|
|
||||||
|
"sshexec": {
|
||||||
|
"rebalance_on_expansion": true,
|
||||||
|
"keyfile": "/etc/heketi/private_key",
|
||||||
|
"fstab": "/etc/fstab",
|
||||||
|
"port": "22",
|
||||||
|
"user": "root",
|
||||||
|
"sudo": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
"_backup_db_to_kube_secret": "Backup the heketi database to a Kubernetes secret when running in Kubernetes. Default is off.",
|
||||||
|
"backup_db_to_kube_secret": false
|
||||||
|
}
|
||||||
@@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: gluster
|
||||||
|
annotations:
|
||||||
|
storageclass.beta.kubernetes.io/is-default-class: "true"
|
||||||
|
provisioner: kubernetes.io/glusterfs
|
||||||
|
parameters:
|
||||||
|
resturl: "http://{{ endpoint_address }}:8080"
|
||||||
|
restuser: "admin"
|
||||||
|
restuserkey: "{{ heketi_admin_key }}"
|
||||||
@@ -0,0 +1,34 @@
|
|||||||
|
{
|
||||||
|
"clusters": [
|
||||||
|
{
|
||||||
|
"nodes": [
|
||||||
|
{% set nodeblocks = [] %}
|
||||||
|
{% for node in nodes %}
|
||||||
|
{% set nodeblock %}
|
||||||
|
{
|
||||||
|
"node": {
|
||||||
|
"hostnames": {
|
||||||
|
"manage": [
|
||||||
|
"{{ node }}"
|
||||||
|
],
|
||||||
|
"storage": [
|
||||||
|
"{{ hostvars[node]['ansible_facts']['default_ipv4']['address'] }}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"zone": 1
|
||||||
|
},
|
||||||
|
"devices": [
|
||||||
|
{
|
||||||
|
"name": "{{ hostvars[node]['disk_volume_device_1'] }}",
|
||||||
|
"destroydata": false
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
{% endset %}
|
||||||
|
{% if nodeblocks.append(nodeblock) %}{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
{{ nodeblocks|join(',') }}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -0,0 +1,46 @@
|
|||||||
|
---
|
||||||
|
- name: "Install lvm utils (RedHat)"
|
||||||
|
become: true
|
||||||
|
yum:
|
||||||
|
name: "lvm2"
|
||||||
|
state: "present"
|
||||||
|
when: "ansible_os_family == 'RedHat'"
|
||||||
|
|
||||||
|
- name: "Install lvm utils (Debian)"
|
||||||
|
become: true
|
||||||
|
apt:
|
||||||
|
name: "lvm2"
|
||||||
|
state: "present"
|
||||||
|
when: "ansible_os_family == 'Debian'"
|
||||||
|
|
||||||
|
- name: "Get volume group information."
|
||||||
|
become: true
|
||||||
|
shell: "pvs {{ disk_volume_device_1 }} --option vg_name | tail -n+2"
|
||||||
|
register: "volume_groups"
|
||||||
|
ignore_errors: true
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: "Remove volume groups."
|
||||||
|
become: true
|
||||||
|
command: "vgremove {{ volume_group }} --yes"
|
||||||
|
with_items: "{{ volume_groups.stdout_lines }}"
|
||||||
|
loop_control: { loop_var: "volume_group" }
|
||||||
|
|
||||||
|
- name: "Remove physical volume from cluster disks."
|
||||||
|
become: true
|
||||||
|
command: "pvremove {{ disk_volume_device_1 }} --yes"
|
||||||
|
ignore_errors: true
|
||||||
|
|
||||||
|
- name: "Remove lvm utils (RedHat)"
|
||||||
|
become: true
|
||||||
|
yum:
|
||||||
|
name: "lvm2"
|
||||||
|
state: "absent"
|
||||||
|
when: "ansible_os_family == 'RedHat'"
|
||||||
|
|
||||||
|
- name: "Remove lvm utils (Debian)"
|
||||||
|
become: true
|
||||||
|
apt:
|
||||||
|
name: "lvm2"
|
||||||
|
state: "absent"
|
||||||
|
when: "ansible_os_family == 'Debian'"
|
||||||
@@ -0,0 +1,51 @@
|
|||||||
|
---
|
||||||
|
- name: "Remove storage class."
|
||||||
|
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
||||||
|
ignore_errors: true
|
||||||
|
- name: "Tear down heketi."
|
||||||
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
||||||
|
ignore_errors: true
|
||||||
|
- name: "Tear down heketi."
|
||||||
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
||||||
|
ignore_errors: true
|
||||||
|
- name: "Tear down bootstrap."
|
||||||
|
include_tasks: "../provision/tasks/bootstrap/tear-down.yml"
|
||||||
|
- name: "Ensure there is nothing left over."
|
||||||
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
||||||
|
register: "heketi_result"
|
||||||
|
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||||
|
retries: 60
|
||||||
|
delay: 5
|
||||||
|
- name: "Ensure there is nothing left over."
|
||||||
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
||||||
|
register: "heketi_result"
|
||||||
|
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||||
|
retries: 60
|
||||||
|
delay: 5
|
||||||
|
- name: "Tear down glusterfs."
|
||||||
|
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
||||||
|
ignore_errors: true
|
||||||
|
- name: "Remove heketi storage service."
|
||||||
|
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
||||||
|
ignore_errors: true
|
||||||
|
- name: "Remove heketi gluster role binding"
|
||||||
|
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
||||||
|
ignore_errors: true
|
||||||
|
- name: "Remove heketi config secret"
|
||||||
|
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
||||||
|
ignore_errors: true
|
||||||
|
- name: "Remove heketi db backup"
|
||||||
|
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
||||||
|
ignore_errors: true
|
||||||
|
- name: "Remove heketi service account"
|
||||||
|
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
||||||
|
ignore_errors: true
|
||||||
|
- name: "Get secrets"
|
||||||
|
command: "{{ bin_dir }}/kubectl get secrets --output=\"json\""
|
||||||
|
register: "secrets"
|
||||||
|
changed_when: false
|
||||||
|
- name: "Remove heketi storage secret"
|
||||||
|
vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
|
||||||
|
command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}"
|
||||||
|
when: "storage_query is defined"
|
||||||
|
ignore_errors: true
|
||||||
@@ -17,21 +17,18 @@ This project will create:
|
|||||||
- Export the variables for your AWS credentials or edit `credentials.tfvars`:
|
- Export the variables for your AWS credentials or edit `credentials.tfvars`:
|
||||||
|
|
||||||
```
|
```
|
||||||
export AWS_ACCESS_KEY_ID="www"
|
export TF_VAR_AWS_ACCESS_KEY_ID="www"
|
||||||
export AWS_SECRET_ACCESS_KEY ="xxx"
|
export TF_VAR_AWS_SECRET_ACCESS_KEY ="xxx"
|
||||||
export AWS_SSH_KEY_NAME="yyy"
|
export TF_VAR_AWS_SSH_KEY_NAME="yyy"
|
||||||
export AWS_DEFAULT_REGION="zzz"
|
export TF_VAR_AWS_DEFAULT_REGION="zzz"
|
||||||
```
|
```
|
||||||
- Rename `contrib/terraform/aws/terraform.tfvars.example` to `terraform.tfvars`
|
|
||||||
|
|
||||||
- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use CoreOS as base image. If you want to change this behaviour, see note "Using other distrib than CoreOs" below.
|
- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use CoreOS as base image. If you want to change this behaviour, see note "Using other distrib than CoreOs" below.
|
||||||
- Allocate a new AWS Elastic IP. Use this for your `loadbalancer_apiserver_address` value (below)
|
|
||||||
- Create an AWS EC2 SSH Key
|
- Create an AWS EC2 SSH Key
|
||||||
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
|
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
```commandline
|
```commandline
|
||||||
terraform apply -var-file=credentials.tfvars -var 'loadbalancer_apiserver_address=34.212.228.77'
|
terraform apply -var-file=credentials.tfvars
|
||||||
```
|
```
|
||||||
|
|
||||||
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
|
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
|
||||||
@@ -46,7 +43,7 @@ ssh -F ./ssh-bastion.conf user@$ip
|
|||||||
|
|
||||||
Example (this one assumes you are using CoreOS)
|
Example (this one assumes you are using CoreOS)
|
||||||
```commandline
|
```commandline
|
||||||
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_ssh_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache
|
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache
|
||||||
```
|
```
|
||||||
***Using other distrib than CoreOs***
|
***Using other distrib than CoreOs***
|
||||||
If you want to use another distribution than CoreOS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.
|
If you want to use another distribution than CoreOS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.
|
||||||
|
|||||||
@@ -181,7 +181,7 @@ data "template_file" "inventory" {
|
|||||||
|
|
||||||
resource "null_resource" "inventories" {
|
resource "null_resource" "inventories" {
|
||||||
provisioner "local-exec" {
|
provisioner "local-exec" {
|
||||||
command = "echo '${data.template_file.inventory.rendered}' > ../../../inventory/hosts"
|
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
|
||||||
}
|
}
|
||||||
|
|
||||||
triggers {
|
triggers {
|
||||||
|
|||||||
@@ -31,3 +31,5 @@ default_tags = {
|
|||||||
# Env = "devtest"
|
# Env = "devtest"
|
||||||
# Product = "kubernetes"
|
# Product = "kubernetes"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inventory_file = "../../../inventory/hosts"
|
||||||
|
|||||||
@@ -103,3 +103,7 @@ variable "default_tags" {
|
|||||||
description = "Default tags for all resources"
|
description = "Default tags for all resources"
|
||||||
type = "map"
|
type = "map"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "inventory_file" {
|
||||||
|
description = "Where to store the generated inventory file"
|
||||||
|
}
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
../../inventory/group_vars
|
../../inventory/local/group_vars
|
||||||
@@ -32,7 +32,11 @@ floating IP addresses or not.
|
|||||||
- Kubernetes worker nodes
|
- Kubernetes worker nodes
|
||||||
|
|
||||||
Note that the Ansible script will report an invalid configuration if you wind up
|
Note that the Ansible script will report an invalid configuration if you wind up
|
||||||
with an even number of etcd instances since that is not a valid configuration.
|
with an even number of etcd instances since that is not a valid configuration. This
|
||||||
|
restriction includes standalone etcd nodes that are deployed in a cluster along with
|
||||||
|
master nodes with etcd replicas. As an example, if you have three master nodes with
|
||||||
|
etcd replicas and three standalone etcd nodes, the script will fail since there are
|
||||||
|
now six total etcd replicas.
|
||||||
|
|
||||||
### GlusterFS
|
### GlusterFS
|
||||||
The Terraform configuration supports provisioning of an optional GlusterFS
|
The Terraform configuration supports provisioning of an optional GlusterFS
|
||||||
@@ -135,7 +139,7 @@ the one you want to use with the environment variable `OS_CLOUD`:
|
|||||||
export OS_CLOUD=mycloud
|
export OS_CLOUD=mycloud
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Openrc method (deprecated)
|
##### Openrc method
|
||||||
|
|
||||||
When using classic environment variables, Terraform uses default `OS_*`
|
When using classic environment variables, Terraform uses default `OS_*`
|
||||||
environment variables. A script suitable for your environment may be available
|
environment variables. A script suitable for your environment may be available
|
||||||
@@ -218,6 +222,9 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tf`.
|
|||||||
|`number_of_bastions` | Number of bastion hosts to create. Scripts assume this is really just zero or one |
|
|`number_of_bastions` | Number of bastion hosts to create. Scripts assume this is really just zero or one |
|
||||||
|`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. |
|
|`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. |
|
||||||
| `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks |
|
| `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks |
|
||||||
|
|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube-node` for tainting them as nodes, empty by default. |
|
||||||
|
|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube-ingress` for running ingress controller pods, empty by default. |
|
||||||
|
|`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default |
|
||||||
|
|
||||||
#### Terraform state files
|
#### Terraform state files
|
||||||
|
|
||||||
@@ -253,7 +260,7 @@ $ terraform apply -var-file=cluster.tf ../../contrib/terraform/openstack
|
|||||||
|
|
||||||
if you chose to create a bastion host, this script will create
|
if you chose to create a bastion host, this script will create
|
||||||
`contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for Ansible to
|
`contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for Ansible to
|
||||||
be able to access your machines tunneling through the bastion's IP address. If
|
be able to access your machines tunneling through the bastion's IP address. If
|
||||||
you want to manually handle the ssh tunneling to these machines, please delete
|
you want to manually handle the ssh tunneling to these machines, please delete
|
||||||
or move that file. If you want to use this, just leave it there, as ansible will
|
or move that file. If you want to use this, just leave it there, as ansible will
|
||||||
pick it up automatically.
|
pick it up automatically.
|
||||||
@@ -299,11 +306,15 @@ If you have deployed and destroyed a previous iteration of your cluster, you wil
|
|||||||
|
|
||||||
#### Bastion host
|
#### Bastion host
|
||||||
|
|
||||||
If you are not using a bastion host, but not all of your nodes have floating IPs, create a file `inventory/$CLUSTER/group_vars/no-floating.yml` with the following content. Use one of your nodes with a floating IP (this should have been output at the end of the Terraform step) and the appropriate user for that OS, or if you have another jump host, use that.
|
Bastion access will be determined by:
|
||||||
|
|
||||||
```
|
- Your choice on the amount of bastion hosts (set by `number_of_bastions` terraform variable).
|
||||||
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@MASTER_IP"'
|
- The existence of nodes/masters with floating IPs (set by `number_of_k8s_masters`, `number_of_k8s_nodes`, `number_of_k8s_masters_no_etcd` terraform variables).
|
||||||
```
|
|
||||||
|
If you have a bastion host, your ssh traffic will be directly routed through it. This is regardless of whether you have masters/nodes with a floating IP assigned.
|
||||||
|
If you don't have a bastion host, but at least one of your masters/nodes have a floating IP, then ssh traffic will be tunneled by one of these machines.
|
||||||
|
|
||||||
|
So, either a bastion host, or at least master/node with a floating IP are required.
|
||||||
|
|
||||||
#### Test access
|
#### Test access
|
||||||
|
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ module "network" {
|
|||||||
|
|
||||||
external_net = "${var.external_net}"
|
external_net = "${var.external_net}"
|
||||||
network_name = "${var.network_name}"
|
network_name = "${var.network_name}"
|
||||||
|
subnet_cidr = "${var.subnet_cidr}"
|
||||||
cluster_name = "${var.cluster_name}"
|
cluster_name = "${var.cluster_name}"
|
||||||
dns_nameservers = "${var.dns_nameservers}"
|
dns_nameservers = "${var.dns_nameservers}"
|
||||||
}
|
}
|
||||||
@@ -24,6 +25,7 @@ module "compute" {
|
|||||||
source = "modules/compute"
|
source = "modules/compute"
|
||||||
|
|
||||||
cluster_name = "${var.cluster_name}"
|
cluster_name = "${var.cluster_name}"
|
||||||
|
az_list = "${var.az_list}"
|
||||||
number_of_k8s_masters = "${var.number_of_k8s_masters}"
|
number_of_k8s_masters = "${var.number_of_k8s_masters}"
|
||||||
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
|
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
|
||||||
number_of_etcd = "${var.number_of_etcd}"
|
number_of_etcd = "${var.number_of_etcd}"
|
||||||
@@ -48,6 +50,9 @@ module "compute" {
|
|||||||
k8s_master_fips = "${module.ips.k8s_master_fips}"
|
k8s_master_fips = "${module.ips.k8s_master_fips}"
|
||||||
k8s_node_fips = "${module.ips.k8s_node_fips}"
|
k8s_node_fips = "${module.ips.k8s_node_fips}"
|
||||||
bastion_fips = "${module.ips.bastion_fips}"
|
bastion_fips = "${module.ips.bastion_fips}"
|
||||||
|
bastion_allowed_remote_ips = "${var.bastion_allowed_remote_ips}"
|
||||||
|
supplementary_master_groups = "${var.supplementary_master_groups}"
|
||||||
|
supplementary_node_groups = "${var.supplementary_node_groups}"
|
||||||
|
|
||||||
network_id = "${module.network.router_id}"
|
network_id = "${module.network.router_id}"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,61 +3,62 @@ resource "openstack_compute_keypair_v2" "k8s" {
|
|||||||
public_key = "${chomp(file(var.public_key_path))}"
|
public_key = "${chomp(file(var.public_key_path))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_secgroup_v2" "k8s_master" {
|
resource "openstack_networking_secgroup_v2" "k8s_master" {
|
||||||
name = "${var.cluster_name}-k8s-master"
|
name = "${var.cluster_name}-k8s-master"
|
||||||
description = "${var.cluster_name} - Kubernetes Master"
|
description = "${var.cluster_name} - Kubernetes Master"
|
||||||
|
|
||||||
rule {
|
|
||||||
ip_protocol = "tcp"
|
|
||||||
from_port = "6443"
|
|
||||||
to_port = "6443"
|
|
||||||
cidr = "0.0.0.0/0"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_secgroup_v2" "bastion" {
|
resource "openstack_networking_secgroup_rule_v2" "k8s_master" {
|
||||||
|
direction = "ingress"
|
||||||
|
ethertype = "IPv4"
|
||||||
|
protocol = "tcp"
|
||||||
|
port_range_min = "6443"
|
||||||
|
port_range_max = "6443"
|
||||||
|
remote_ip_prefix = "0.0.0.0/0"
|
||||||
|
security_group_id = "${openstack_networking_secgroup_v2.k8s_master.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_secgroup_v2" "bastion" {
|
||||||
name = "${var.cluster_name}-bastion"
|
name = "${var.cluster_name}-bastion"
|
||||||
description = "${var.cluster_name} - Bastion Server"
|
description = "${var.cluster_name} - Bastion Server"
|
||||||
|
|
||||||
rule {
|
|
||||||
ip_protocol = "tcp"
|
|
||||||
from_port = "22"
|
|
||||||
to_port = "22"
|
|
||||||
cidr = "0.0.0.0/0"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_secgroup_v2" "k8s" {
|
resource "openstack_networking_secgroup_rule_v2" "bastion" {
|
||||||
|
count = "${length(var.bastion_allowed_remote_ips)}"
|
||||||
|
direction = "ingress"
|
||||||
|
ethertype = "IPv4"
|
||||||
|
protocol = "tcp"
|
||||||
|
port_range_min = "22"
|
||||||
|
port_range_max = "22"
|
||||||
|
remote_ip_prefix = "${var.bastion_allowed_remote_ips[count.index]}"
|
||||||
|
security_group_id = "${openstack_networking_secgroup_v2.bastion.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_secgroup_v2" "k8s" {
|
||||||
name = "${var.cluster_name}-k8s"
|
name = "${var.cluster_name}-k8s"
|
||||||
description = "${var.cluster_name} - Kubernetes"
|
description = "${var.cluster_name} - Kubernetes"
|
||||||
|
}
|
||||||
|
|
||||||
rule {
|
resource "openstack_networking_secgroup_rule_v2" "k8s" {
|
||||||
ip_protocol = "icmp"
|
direction = "ingress"
|
||||||
from_port = "-1"
|
ethertype = "IPv4"
|
||||||
to_port = "-1"
|
remote_group_id = "${openstack_networking_secgroup_v2.k8s.id}"
|
||||||
cidr = "0.0.0.0/0"
|
security_group_id = "${openstack_networking_secgroup_v2.k8s.id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
rule {
|
resource "openstack_networking_secgroup_v2" "worker" {
|
||||||
ip_protocol = "tcp"
|
name = "${var.cluster_name}-k8s-worker"
|
||||||
from_port = "1"
|
description = "${var.cluster_name} - Kubernetes worker nodes"
|
||||||
to_port = "65535"
|
}
|
||||||
self = true
|
|
||||||
}
|
|
||||||
|
|
||||||
rule {
|
resource "openstack_networking_secgroup_rule_v2" "worker" {
|
||||||
ip_protocol = "udp"
|
direction = "ingress"
|
||||||
from_port = "1"
|
ethertype = "IPv4"
|
||||||
to_port = "65535"
|
protocol = "tcp"
|
||||||
self = true
|
port_range_min = "30000"
|
||||||
}
|
port_range_max = "32767"
|
||||||
|
remote_ip_prefix = "0.0.0.0/0"
|
||||||
rule {
|
security_group_id = "${openstack_networking_secgroup_v2.worker.id}"
|
||||||
ip_protocol = "icmp"
|
|
||||||
from_port = "-1"
|
|
||||||
to_port = "-1"
|
|
||||||
self = true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_instance_v2" "bastion" {
|
resource "openstack_compute_instance_v2" "bastion" {
|
||||||
@@ -71,8 +72,8 @@ resource "openstack_compute_instance_v2" "bastion" {
|
|||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
"${openstack_compute_secgroup_v2.bastion.name}",
|
"${openstack_networking_secgroup_v2.bastion.name}",
|
||||||
"default",
|
"default",
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -83,7 +84,7 @@ resource "openstack_compute_instance_v2" "bastion" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
provisioner "local-exec" {
|
provisioner "local-exec" {
|
||||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
|
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > contrib/terraform/group_vars/no-floating.yml"
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -91,6 +92,7 @@ resource "openstack_compute_instance_v2" "bastion" {
|
|||||||
resource "openstack_compute_instance_v2" "k8s_master" {
|
resource "openstack_compute_instance_v2" "k8s_master" {
|
||||||
name = "${var.cluster_name}-k8s-master-${count.index+1}"
|
name = "${var.cluster_name}-k8s-master-${count.index+1}"
|
||||||
count = "${var.number_of_k8s_masters}"
|
count = "${var.number_of_k8s_masters}"
|
||||||
|
availability_zone = "${element(var.az_list, count.index)}"
|
||||||
image_name = "${var.image}"
|
image_name = "${var.image}"
|
||||||
flavor_id = "${var.flavor_k8s_master}"
|
flavor_id = "${var.flavor_k8s_master}"
|
||||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
@@ -99,23 +101,28 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
|||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||||
"${openstack_compute_secgroup_v2.bastion.name}",
|
"${openstack_networking_secgroup_v2.bastion.name}",
|
||||||
"${openstack_compute_secgroup_v2.k8s.name}",
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
"default",
|
"default",
|
||||||
]
|
]
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "etcd,kube-master,k8s-cluster,vault"
|
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||||
depends_on = "${var.network_id}"
|
depends_on = "${var.network_id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
provisioner "local-exec" {
|
||||||
|
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||||
name = "${var.cluster_name}-k8s-master-ne-${count.index+1}"
|
name = "${var.cluster_name}-k8s-master-ne-${count.index+1}"
|
||||||
count = "${var.number_of_k8s_masters_no_etcd}"
|
count = "${var.number_of_k8s_masters_no_etcd}"
|
||||||
|
availability_zone = "${element(var.az_list, count.index)}"
|
||||||
image_name = "${var.image}"
|
image_name = "${var.image}"
|
||||||
flavor_id = "${var.flavor_k8s_master}"
|
flavor_id = "${var.flavor_k8s_master}"
|
||||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
@@ -124,21 +131,27 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
|||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||||
"${openstack_compute_secgroup_v2.k8s.name}",
|
"${openstack_networking_secgroup_v2.bastion.name}",
|
||||||
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
]
|
]
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "kube-master,k8s-cluster,vault"
|
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||||
depends_on = "${var.network_id}"
|
depends_on = "${var.network_id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
provisioner "local-exec" {
|
||||||
|
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_instance_v2" "etcd" {
|
resource "openstack_compute_instance_v2" "etcd" {
|
||||||
name = "${var.cluster_name}-etcd-${count.index+1}"
|
name = "${var.cluster_name}-etcd-${count.index+1}"
|
||||||
count = "${var.number_of_etcd}"
|
count = "${var.number_of_etcd}"
|
||||||
|
availability_zone = "${element(var.az_list, count.index)}"
|
||||||
image_name = "${var.image}"
|
image_name = "${var.image}"
|
||||||
flavor_id = "${var.flavor_etcd}"
|
flavor_id = "${var.flavor_etcd}"
|
||||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
@@ -147,7 +160,7 @@ resource "openstack_compute_instance_v2" "etcd" {
|
|||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}"]
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
@@ -160,6 +173,7 @@ resource "openstack_compute_instance_v2" "etcd" {
|
|||||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||||
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
|
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
|
||||||
count = "${var.number_of_k8s_masters_no_floating_ip}"
|
count = "${var.number_of_k8s_masters_no_floating_ip}"
|
||||||
|
availability_zone = "${element(var.az_list, count.index)}"
|
||||||
image_name = "${var.image}"
|
image_name = "${var.image}"
|
||||||
flavor_id = "${var.flavor_k8s_master}"
|
flavor_id = "${var.flavor_k8s_master}"
|
||||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
@@ -168,14 +182,14 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
|||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||||
"${openstack_compute_secgroup_v2.k8s.name}",
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
"default",
|
"default",
|
||||||
]
|
]
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "etcd,kube-master,k8s-cluster,vault,no-floating"
|
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||||
depends_on = "${var.network_id}"
|
depends_on = "${var.network_id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -184,6 +198,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
|||||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||||
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index+1}"
|
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index+1}"
|
||||||
count = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
|
count = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
|
||||||
|
availability_zone = "${element(var.az_list, count.index)}"
|
||||||
image_name = "${var.image}"
|
image_name = "${var.image}"
|
||||||
flavor_id = "${var.flavor_k8s_master}"
|
flavor_id = "${var.flavor_k8s_master}"
|
||||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
@@ -192,13 +207,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
|||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||||
"${openstack_compute_secgroup_v2.k8s.name}",
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
]
|
]
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "kube-master,k8s-cluster,vault,no-floating"
|
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||||
depends_on = "${var.network_id}"
|
depends_on = "${var.network_id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -207,6 +222,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
|||||||
resource "openstack_compute_instance_v2" "k8s_node" {
|
resource "openstack_compute_instance_v2" "k8s_node" {
|
||||||
name = "${var.cluster_name}-k8s-node-${count.index+1}"
|
name = "${var.cluster_name}-k8s-node-${count.index+1}"
|
||||||
count = "${var.number_of_k8s_nodes}"
|
count = "${var.number_of_k8s_nodes}"
|
||||||
|
availability_zone = "${element(var.az_list, count.index)}"
|
||||||
image_name = "${var.image}"
|
image_name = "${var.image}"
|
||||||
flavor_id = "${var.flavor_k8s_node}"
|
flavor_id = "${var.flavor_k8s_node}"
|
||||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
@@ -215,22 +231,28 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
|||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
"${openstack_compute_secgroup_v2.bastion.name}",
|
"${openstack_networking_secgroup_v2.bastion.name}",
|
||||||
|
"${openstack_networking_secgroup_v2.worker.name}",
|
||||||
"default",
|
"default",
|
||||||
]
|
]
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "kube-node,k8s-cluster"
|
kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}"
|
||||||
depends_on = "${var.network_id}"
|
depends_on = "${var.network_id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
provisioner "local-exec" {
|
||||||
|
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > contrib/terraform/group_vars/no-floating.yml"
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||||
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
|
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
|
||||||
count = "${var.number_of_k8s_nodes_no_floating_ip}"
|
count = "${var.number_of_k8s_nodes_no_floating_ip}"
|
||||||
|
availability_zone = "${element(var.az_list, count.index)}"
|
||||||
image_name = "${var.image}"
|
image_name = "${var.image}"
|
||||||
flavor_id = "${var.flavor_k8s_node}"
|
flavor_id = "${var.flavor_k8s_node}"
|
||||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
@@ -239,13 +261,14 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
|||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
|
"${openstack_networking_secgroup_v2.worker.name}",
|
||||||
"default",
|
"default",
|
||||||
]
|
]
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "kube-node,k8s-cluster,no-floating"
|
kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}"
|
||||||
depends_on = "${var.network_id}"
|
depends_on = "${var.network_id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -279,6 +302,7 @@ resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
|||||||
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||||
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
|
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
|
||||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||||
|
availability_zone = "${element(var.az_list, count.index)}"
|
||||||
image_name = "${var.image_gfs}"
|
image_name = "${var.image_gfs}"
|
||||||
flavor_id = "${var.flavor_gfs_node}"
|
flavor_id = "${var.flavor_gfs_node}"
|
||||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
@@ -287,7 +311,7 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
|||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
"default",
|
"default",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,9 @@
|
|||||||
variable "cluster_name" {}
|
variable "cluster_name" {}
|
||||||
|
|
||||||
|
variable "az_list" {
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
variable "number_of_k8s_masters" {}
|
variable "number_of_k8s_masters" {}
|
||||||
|
|
||||||
variable "number_of_k8s_masters_no_etcd" {}
|
variable "number_of_k8s_masters_no_etcd" {}
|
||||||
@@ -55,3 +59,15 @@ variable "k8s_node_fips" {
|
|||||||
variable "bastion_fips" {
|
variable "bastion_fips" {
|
||||||
type = "list"
|
type = "list"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "bastion_allowed_remote_ips" {
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "supplementary_master_groups" {
|
||||||
|
default = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "supplementary_node_groups" {
|
||||||
|
default = ""
|
||||||
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ resource "openstack_networking_network_v2" "k8s" {
|
|||||||
resource "openstack_networking_subnet_v2" "k8s" {
|
resource "openstack_networking_subnet_v2" "k8s" {
|
||||||
name = "${var.cluster_name}-internal-network"
|
name = "${var.cluster_name}-internal-network"
|
||||||
network_id = "${openstack_networking_network_v2.k8s.id}"
|
network_id = "${openstack_networking_network_v2.k8s.id}"
|
||||||
cidr = "10.0.0.0/24"
|
cidr = "${var.subnet_cidr}"
|
||||||
ip_version = 4
|
ip_version = 4
|
||||||
dns_nameservers = "${var.dns_nameservers}"
|
dns_nameservers = "${var.dns_nameservers}"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,8 @@
|
|||||||
output "router_id" {
|
output "router_id" {
|
||||||
|
value = "${openstack_networking_router_v2.k8s.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "router_internal_port_id" {
|
||||||
value = "${openstack_networking_router_interface_v2.k8s.id}"
|
value = "${openstack_networking_router_interface_v2.k8s.id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -7,3 +7,5 @@ variable "cluster_name" {}
|
|||||||
variable "dns_nameservers" {
|
variable "dns_nameservers" {
|
||||||
type = "list"
|
type = "list"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "subnet_cidr" {}
|
||||||
|
|||||||
@@ -41,5 +41,6 @@ number_of_k8s_nodes_no_floating_ip = 4
|
|||||||
# networking
|
# networking
|
||||||
network_name = "<network>"
|
network_name = "<network>"
|
||||||
external_net = "<UUID>"
|
external_net = "<UUID>"
|
||||||
|
subnet_cidr = "<cidr>"
|
||||||
floatingip_pool = "<pool>"
|
floatingip_pool = "<pool>"
|
||||||
|
bastion_allowed_remote_ips = ["0.0.0.0/0"]
|
||||||
|
|||||||
@@ -2,6 +2,12 @@ variable "cluster_name" {
|
|||||||
default = "example"
|
default = "example"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "az_list" {
|
||||||
|
description = "List of Availability Zones available in your OpenStack cluster"
|
||||||
|
type = "list"
|
||||||
|
default = ["nova"]
|
||||||
|
}
|
||||||
|
|
||||||
variable "number_of_bastions" {
|
variable "number_of_bastions" {
|
||||||
default = 1
|
default = 1
|
||||||
}
|
}
|
||||||
@@ -97,6 +103,12 @@ variable "network_name" {
|
|||||||
default = "internal"
|
default = "internal"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "subnet_cidr" {
|
||||||
|
description = "Subnet CIDR block."
|
||||||
|
type = "string"
|
||||||
|
default = "10.0.0.0/24"
|
||||||
|
}
|
||||||
|
|
||||||
variable "dns_nameservers" {
|
variable "dns_nameservers" {
|
||||||
description = "An array of DNS name server names used by hosts in this subnet."
|
description = "An array of DNS name server names used by hosts in this subnet."
|
||||||
type = "list"
|
type = "list"
|
||||||
@@ -111,3 +123,19 @@ variable "floatingip_pool" {
|
|||||||
variable "external_net" {
|
variable "external_net" {
|
||||||
description = "uuid of the external/public network"
|
description = "uuid of the external/public network"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "supplementary_master_groups" {
|
||||||
|
description = "supplementary kubespray ansible groups for masters, such kube-node"
|
||||||
|
default = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "supplementary_node_groups" {
|
||||||
|
description = "supplementary kubespray ansible groups for worker nodes, such as kube-ingress"
|
||||||
|
default = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "bastion_allowed_remote_ips" {
|
||||||
|
description = "An array of CIDRs allowed to SSH to hosts"
|
||||||
|
type = "list"
|
||||||
|
default = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|||||||
@@ -706,6 +706,10 @@ def query_list(hosts):
|
|||||||
|
|
||||||
for name, attrs, hostgroups in hosts:
|
for name, attrs, hostgroups in hosts:
|
||||||
for group in set(hostgroups):
|
for group in set(hostgroups):
|
||||||
|
# Ansible 2.6.2 stopped supporting empty group names: https://github.com/ansible/ansible/pull/42584/commits/d4cd474b42ed23d8f8aabb2a7f84699673852eaf
|
||||||
|
# Empty group name defaults to "all" in Ansible < 2.6.2 so we alter empty group names to "all"
|
||||||
|
if not group: group = "all"
|
||||||
|
|
||||||
groups[group].setdefault('hosts', [])
|
groups[group].setdefault('hosts', [])
|
||||||
groups[group]['hosts'].append(name)
|
groups[group]['hosts'].append(name)
|
||||||
|
|
||||||
|
|||||||
@@ -123,7 +123,6 @@ The following tags are defined in playbooks:
|
|||||||
| hyperkube | Manipulations with K8s hyperkube image
|
| hyperkube | Manipulations with K8s hyperkube image
|
||||||
| k8s-pre-upgrade | Upgrading K8s cluster
|
| k8s-pre-upgrade | Upgrading K8s cluster
|
||||||
| k8s-secrets | Configuring K8s certs/keys
|
| k8s-secrets | Configuring K8s certs/keys
|
||||||
| kpm | Installing K8s apps definitions with KPM
|
|
||||||
| kube-apiserver | Configuring static pod kube-apiserver
|
| kube-apiserver | Configuring static pod kube-apiserver
|
||||||
| kube-controller-manager | Configuring static pod kube-controller-manager
|
| kube-controller-manager | Configuring static pod kube-controller-manager
|
||||||
| kubectl | Installing kubectl and bash completion
|
| kubectl | Installing kubectl and bash completion
|
||||||
@@ -159,7 +158,7 @@ And this play only removes the K8s cluster DNS resolver IP from hosts' /etc/reso
|
|||||||
```
|
```
|
||||||
ansible-playbook -i inventory/sample/hosts.ini -e dnsmasq_dns_server='' cluster.yml --tags resolvconf
|
ansible-playbook -i inventory/sample/hosts.ini -e dnsmasq_dns_server='' cluster.yml --tags resolvconf
|
||||||
```
|
```
|
||||||
And this prepares all container images localy (at the ansible runner node) without installing
|
And this prepares all container images locally (at the ansible runner node) without installing
|
||||||
or upgrading related stuff or trying to upload container to K8s cluster nodes:
|
or upgrading related stuff or trying to upload container to K8s cluster nodes:
|
||||||
```
|
```
|
||||||
ansible-playbook -i inventory/sample/hosts.ini cluster.yml \
|
ansible-playbook -i inventory/sample/hosts.ini cluster.yml \
|
||||||
|
|||||||
22
docs/aws.md
22
docs/aws.md
@@ -1,11 +1,11 @@
|
|||||||
AWS
|
AWS
|
||||||
===============
|
===============
|
||||||
|
|
||||||
To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`.
|
To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`. Refer to the [Kubespray Configuration](#kubespray-configuration) for customizing the provider.
|
||||||
|
|
||||||
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
|
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
|
||||||
|
|
||||||
You would also need to tag the resources in your VPC accordingly for the aws provider to utilize them. Tag the subnets and all instances that kubernetes will be run on with key `kubernetes.io/cluster/$cluster_name` (`$cluster_name` must be a unique identifier for the cluster). Tag the subnets that must be targetted by external ELBs with the key `kubernetes.io/role/elb` and internal ELBs with the key `kubernetes.io/role/internal-elb`.
|
You would also need to tag the resources in your VPC accordingly for the aws provider to utilize them. Tag the subnets, route tables and all instances that kubernetes will be run on with key `kubernetes.io/cluster/$cluster_name` (`$cluster_name` must be a unique identifier for the cluster). Tag the subnets that must be targetted by external ELBs with the key `kubernetes.io/role/elb` and internal ELBs with the key `kubernetes.io/role/internal-elb`.
|
||||||
|
|
||||||
Make sure your VPC has both DNS Hostnames support and Private DNS enabled.
|
Make sure your VPC has both DNS Hostnames support and Private DNS enabled.
|
||||||
|
|
||||||
@@ -58,3 +58,21 @@ export AWS_SECRET_ACCESS_KEY="yyyyy"
|
|||||||
export REGION="us-east-2"
|
export REGION="us-east-2"
|
||||||
```
|
```
|
||||||
- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kubespray-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml`
|
- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kubespray-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml`
|
||||||
|
|
||||||
|
## Kubespray configuration
|
||||||
|
|
||||||
|
Declare the cloud config variables for the `aws` provider as follows. Setting these variables are optional and depend on your use case.
|
||||||
|
|
||||||
|
Variable|Type|Comment
|
||||||
|
---|---|---
|
||||||
|
aws_zone|string|Force set the AWS zone. Recommended to leave blank.
|
||||||
|
aws_vpc|string|The AWS VPC flag enables the possibility to run the master components on a different aws account, on a different cloud provider or on-premise. If the flag is set also the KubernetesClusterTag must be provided
|
||||||
|
aws_subnet_id|string|SubnetID enables using a specific subnet to use for ELB's
|
||||||
|
aws_route_table_id|string|RouteTableID enables using a specific RouteTable
|
||||||
|
aws_role_arn|string|RoleARN is the IAM role to assume when interaction with AWS APIs
|
||||||
|
aws_kubernetes_cluster_tag|string|KubernetesClusterTag is the legacy cluster id we'll use to identify our cluster resources
|
||||||
|
aws_kubernetes_cluster_id|string|KubernetesClusterID is the cluster id we'll use to identify our cluster resources
|
||||||
|
aws_disable_security_group_ingress|bool|The aws provider creates an inbound rule per load balancer on the node security group. However, this can run into the AWS security group rule limit of 50 if many LoadBalancers are created. This flag disables the automatic ingress creation. It requires that the user has setup a rule that allows inbound traffic on kubelet ports from the local VPC subnet (so load balancers can access it). E.g. 10.82.0.0/16 30000-32000.
|
||||||
|
aws_elb_security_group|string|Only in Kubelet version >= 1.7 : AWS has a hard limit of 500 security groups. For large clusters creating a security group for each ELB can cause the max number of security groups to be reached. If this is set instead of creating a new Security group for each ELB this security group will be used instead.
|
||||||
|
aws_disable_strict_zone_check|bool|During the instantiation of an new AWS cloud provider, the detected region is validated against a known set of regions. In a non-standard, AWS like environment (e.g. Eucalyptus), this check may be undesirable. Setting this to true will disable the check and provide a warning that the check was skipped. Please note that this is an experimental feature and work-in-progress for the moment.
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,13 @@
|
|||||||
Calico
|
Calico
|
||||||
===========
|
===========
|
||||||
|
|
||||||
|
---
|
||||||
|
**N.B. Version 2.6.5 upgrade to 3.1.1 is upgrading etcd store to etcdv3**
|
||||||
|
If you create automated backups of etcdv2 please switch for creating etcdv3 backups, as kubernetes and calico now uses etcdv3
|
||||||
|
After migration you can check `/tmp/calico_upgrade/` directory for converted items to etcdv3.
|
||||||
|
**PLEASE TEST upgrade before upgrading production cluster.**
|
||||||
|
---
|
||||||
|
|
||||||
Check if the calico-node container is running
|
Check if the calico-node container is running
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -14,7 +21,7 @@ The **calicoctl** command allows to check the status of the network workloads.
|
|||||||
calicoctl node status
|
calicoctl node status
|
||||||
```
|
```
|
||||||
|
|
||||||
or for versions prior *v1.0.0*:
|
or for versions prior to *v1.0.0*:
|
||||||
|
|
||||||
```
|
```
|
||||||
calicoctl status
|
calicoctl status
|
||||||
@@ -26,7 +33,7 @@ calicoctl status
|
|||||||
calicoctl get ippool -o wide
|
calicoctl get ippool -o wide
|
||||||
```
|
```
|
||||||
|
|
||||||
or for versions prior *v1.0.0*:
|
or for versions prior to *v1.0.0*:
|
||||||
|
|
||||||
```
|
```
|
||||||
calicoctl pool show
|
calicoctl pool show
|
||||||
@@ -66,7 +73,7 @@ In some cases you may want to route the pods subnet and so NAT is not needed on
|
|||||||
For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located.
|
For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located.
|
||||||
The following variables need to be set:
|
The following variables need to be set:
|
||||||
`peer_with_router` to enable the peering with the datacenter's border router (default value: false).
|
`peer_with_router` to enable the peering with the datacenter's border router (default value: false).
|
||||||
you'll need to edit the inventory and add a and a hostvar `local_as` by node.
|
you'll need to edit the inventory and add a hostvar `local_as` by node.
|
||||||
|
|
||||||
```
|
```
|
||||||
node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx
|
node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx
|
||||||
@@ -86,7 +93,7 @@ To do so you can deploy BGP route reflectors and peer `calico-node` with them as
|
|||||||
recommended here:
|
recommended here:
|
||||||
|
|
||||||
* https://hub.docker.com/r/calico/routereflector/
|
* https://hub.docker.com/r/calico/routereflector/
|
||||||
* http://docs.projectcalico.org/v2.0/reference/private-cloud/l3-interconnect-fabric
|
* https://docs.projectcalico.org/v3.1/reference/private-cloud/l3-interconnect-fabric
|
||||||
|
|
||||||
You need to edit your inventory and add:
|
You need to edit your inventory and add:
|
||||||
|
|
||||||
@@ -149,7 +156,7 @@ The inventory above will deploy the following topology assuming that calico's
|
|||||||
|
|
||||||
##### Optional : Define default endpoint to host action
|
##### Optional : Define default endpoint to host action
|
||||||
|
|
||||||
By default Calico blocks traffic from endpoints to the host itself by using an iptables DROP action. When using it in kubernetes the action has to be changed to RETURN (default in kubespray) or ACCEPT (see https://github.com/projectcalico/felix/issues/660 and https://github.com/projectcalico/calicoctl/issues/1389). Otherwise all network packets from pods (with hostNetwork=False) to services endpoints (with hostNetwork=True) withing the same node are dropped.
|
By default Calico blocks traffic from endpoints to the host itself by using an iptables DROP action. When using it in kubernetes the action has to be changed to RETURN (default in kubespray) or ACCEPT (see https://github.com/projectcalico/felix/issues/660 and https://github.com/projectcalico/calicoctl/issues/1389). Otherwise all network packets from pods (with hostNetwork=False) to services endpoints (with hostNetwork=True) within the same node are dropped.
|
||||||
|
|
||||||
|
|
||||||
To re-define default action please set the following variable in your inventory:
|
To re-define default action please set the following variable in your inventory:
|
||||||
@@ -157,6 +164,15 @@ To re-define default action please set the following variable in your inventory:
|
|||||||
calico_endpoint_to_host_action: "ACCEPT"
|
calico_endpoint_to_host_action: "ACCEPT"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
##### Optional : Define address on which Felix will respond to health requests
|
||||||
|
|
||||||
|
Since Calico 3.2.0, HealthCheck default behavior changed from listening on all interfaces to just listening on localhost.
|
||||||
|
|
||||||
|
To re-define health host please set the following variable in your inventory:
|
||||||
|
```
|
||||||
|
calico_healthhost: "0.0.0.0"
|
||||||
|
```
|
||||||
|
|
||||||
Cloud providers configuration
|
Cloud providers configuration
|
||||||
=============================
|
=============================
|
||||||
|
|
||||||
@@ -169,3 +185,12 @@ By default the felix agent(calico-node) will abort if the Kernel RPF setting is
|
|||||||
```
|
```
|
||||||
calico_node_ignorelooserpf: true
|
calico_node_ignorelooserpf: true
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Note that in OpenStack you must allow `ipip` traffic in your security groups,
|
||||||
|
otherwise you will experience timeouts.
|
||||||
|
To do this you must add a rule which allows it, for example:
|
||||||
|
|
||||||
|
```
|
||||||
|
neutron security-group-rule-create --protocol 4 --direction egress k8s-a0tp4t
|
||||||
|
neutron security-group-rule-create --protocol 4 --direction igress k8s-a0tp4t
|
||||||
|
```
|
||||||
|
|||||||
@@ -54,16 +54,18 @@ The default configuration uses VXLAN to create an overlay. Two networks are crea
|
|||||||
|
|
||||||
You can change the default network configuration by overriding the `contiv_networks` variable.
|
You can change the default network configuration by overriding the `contiv_networks` variable.
|
||||||
|
|
||||||
The default forward mode is set to routing:
|
The default forward mode is set to routing and the default network mode is vxlan:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
contiv_fwd_mode: routing
|
contiv_fwd_mode: routing
|
||||||
|
contiv_net_mode: vxlan
|
||||||
```
|
```
|
||||||
|
|
||||||
The following is an example of how you can use VLAN instead of VXLAN:
|
The following is an example of how you can use VLAN instead of VXLAN:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
contiv_fwd_mode: bridge
|
contiv_fwd_mode: bridge
|
||||||
|
contiv_net_mode: vlan
|
||||||
contiv_vlan_interface: eth0
|
contiv_vlan_interface: eth0
|
||||||
contiv_networks:
|
contiv_networks:
|
||||||
- name: default-net
|
- name: default-net
|
||||||
|
|||||||
31
docs/cri-o.md
Normal file
31
docs/cri-o.md
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
cri-o
|
||||||
|
===============
|
||||||
|
|
||||||
|
cri-o is container developed by kubernetes project.
|
||||||
|
Currently, only basic function is supported for cri-o.
|
||||||
|
|
||||||
|
* cri-o is supported kubernetes 1.11.1 or later.
|
||||||
|
* helm and other feature may not be supported due to docker dependency.
|
||||||
|
* scale.yml and upgrade-cluster.yml are not supported.
|
||||||
|
|
||||||
|
helm and other feature may not be supported due to docker dependency.
|
||||||
|
|
||||||
|
Use cri-o instead of docker, set following variable:
|
||||||
|
|
||||||
|
#### all.yml
|
||||||
|
|
||||||
|
```
|
||||||
|
kubeadm_enabled: true
|
||||||
|
...
|
||||||
|
download_container: false
|
||||||
|
skip_downloads: false
|
||||||
|
```
|
||||||
|
|
||||||
|
#### k8s-cluster.yml
|
||||||
|
|
||||||
|
```
|
||||||
|
etcd_deployment_type: host
|
||||||
|
kubelet_deployment_type: host
|
||||||
|
container_manager: crio
|
||||||
|
```
|
||||||
|
|
||||||
@@ -52,13 +52,13 @@ You can modify how Kubespray sets up DNS for your cluster with the variables ``d
|
|||||||
## dns_mode
|
## dns_mode
|
||||||
``dns_mode`` configures how Kubespray will setup cluster DNS. There are four modes available:
|
``dns_mode`` configures how Kubespray will setup cluster DNS. There are four modes available:
|
||||||
|
|
||||||
#### dnsmasq_kubedns (default)
|
#### dnsmasq_kubedns
|
||||||
This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some
|
This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some
|
||||||
limitations (e.g. number of nameservers). Kubelet is instructed to use dnsmasq instead of kubedns/skydns.
|
limitations (e.g. number of nameservers). Kubelet is instructed to use dnsmasq instead of kubedns/skydns.
|
||||||
It is configured to forward all DNS queries belonging to cluster services to kubedns/skydns. All
|
It is configured to forward all DNS queries belonging to cluster services to kubedns/skydns. All
|
||||||
other queries are forwardet to the nameservers found in ``upstream_dns_servers`` or ``default_resolver``
|
other queries are forwardet to the nameservers found in ``upstream_dns_servers`` or ``default_resolver``
|
||||||
|
|
||||||
#### kubedns
|
#### kubedns (default)
|
||||||
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use kubedns/skydns for
|
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use kubedns/skydns for
|
||||||
all queries.
|
all queries.
|
||||||
|
|
||||||
|
|||||||
@@ -40,3 +40,14 @@ The full list of available vars may be found in the download's ansible role defa
|
|||||||
Those also allow to specify custom urls and local repositories for binaries and container
|
Those also allow to specify custom urls and local repositories for binaries and container
|
||||||
images as well. See also the DNS stack docs for the related intranet configuration,
|
images as well. See also the DNS stack docs for the related intranet configuration,
|
||||||
so the hosts can resolve those urls and repos.
|
so the hosts can resolve those urls and repos.
|
||||||
|
|
||||||
|
## Offline environment
|
||||||
|
|
||||||
|
In case your servers don't have access to internet (for example when deploying on premises with security constraints), you'll have, first, to setup the appropriate proxies/caches/mirrors and/or internal repositories and registries and, then, adapt the following variables to fit your environment before deploying:
|
||||||
|
|
||||||
|
* At least `foo_image_repo` and `foo_download_url` as described before (i.e. in case of use of proxies to registries and binaries repositories, checksums and versions do not necessarily need to be changed).
|
||||||
|
NB: Regarding `foo_image_repo`, when using insecure registries/proxies, you will certainly have to append them to the `docker_insecure_registries` variable in group_vars/all/docker.yml
|
||||||
|
* Depending on the `container_manager`
|
||||||
|
* When `container_manager=docker`, `docker_foo_repo_base_url`, `docker_foo_repo_gpgkey`, `dockerproject_bar_repo_base_url` and `dockerproject_bar_repo_gpgkey` (where `foo` is the distribution and `bar` is system package manager)
|
||||||
|
* When `container_manager=crio`, `crio_rhel_repo_base_url`
|
||||||
|
* When using Helm, `helm_stable_repo_url`
|
||||||
|
|||||||
@@ -38,9 +38,9 @@ See more details in the [ansible guide](ansible.md).
|
|||||||
Adding nodes
|
Adding nodes
|
||||||
------------
|
------------
|
||||||
|
|
||||||
You may want to add **worker** nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters.
|
You may want to add worker, master or etcd nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters.
|
||||||
|
|
||||||
- Add the new worker node to your inventory under kube-node (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
- Add the new worker node to your inventory in the appropriate group (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
||||||
- Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`:
|
- Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`:
|
||||||
|
|
||||||
ansible-playbook -i inventory/mycluster/hosts.ini scale.yml -b -v \
|
ansible-playbook -i inventory/mycluster/hosts.ini scale.yml -b -v \
|
||||||
@@ -51,11 +51,26 @@ Remove nodes
|
|||||||
|
|
||||||
You may want to remove **worker** nodes to your existing cluster. This can be done by re-running the `remove-node.yml` playbook. First, all nodes will be drained, then stop some kubernetes services and delete some certificates, and finally execute the kubectl command to delete these nodes. This can be combined with the add node function, This is generally helpful when doing something like autoscaling your clusters. Of course if a node is not working, you can remove the node and install it again.
|
You may want to remove **worker** nodes to your existing cluster. This can be done by re-running the `remove-node.yml` playbook. First, all nodes will be drained, then stop some kubernetes services and delete some certificates, and finally execute the kubectl command to delete these nodes. This can be combined with the add node function, This is generally helpful when doing something like autoscaling your clusters. Of course if a node is not working, you can remove the node and install it again.
|
||||||
|
|
||||||
- Add worker nodes to the list under kube-node if you want to delete them (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
Add worker nodes to the list under kube-node if you want to delete them (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
||||||
- Run the ansible-playbook command, substituting `remove-node.yml`:
|
|
||||||
|
ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
|
||||||
|
--private-key=~/.ssh/private_key
|
||||||
|
|
||||||
|
|
||||||
|
We support two ways to select the nodes:
|
||||||
|
|
||||||
|
- Use `--extra-vars "node=<nodename>,<nodename2>"` to select the node you want to delete.
|
||||||
```
|
```
|
||||||
ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
|
ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
|
||||||
--private-key=~/.ssh/private_key
|
--private-key=~/.ssh/private_key \
|
||||||
|
--extra-vars "node=nodename,nodename2"
|
||||||
|
```
|
||||||
|
or
|
||||||
|
- Use `--limit nodename,nodename2` to select the node
|
||||||
|
```
|
||||||
|
ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
|
||||||
|
--private-key=~/.ssh/private_key \
|
||||||
|
--limit nodename,nodename2"
|
||||||
```
|
```
|
||||||
|
|
||||||
Connecting to Kubernetes
|
Connecting to Kubernetes
|
||||||
@@ -74,7 +89,7 @@ authentication. One could generate a kubeconfig based on one installed
|
|||||||
kube-master hosts (needs improvement) or connect with a username and password.
|
kube-master hosts (needs improvement) or connect with a username and password.
|
||||||
By default, a user with admin rights is created, named `kube`.
|
By default, a user with admin rights is created, named `kube`.
|
||||||
The password can be viewed after deployment by looking at the file
|
The password can be viewed after deployment by looking at the file
|
||||||
`PATH_TO_KUBESPRAY/credentials/kube_user.creds`. This contains a randomly generated
|
`{{ credentials_dir }}/kube_user.creds` (`credentials_dir` is set to `{{ inventory_dir }}/credentials` by default). This contains a randomly generated
|
||||||
password. If you wish to set your own password, just precreate/modify this
|
password. If you wish to set your own password, just precreate/modify this
|
||||||
file yourself.
|
file yourself.
|
||||||
|
|
||||||
|
|||||||
@@ -5,18 +5,38 @@ The following components require a highly available endpoints:
|
|||||||
* etcd cluster,
|
* etcd cluster,
|
||||||
* kube-apiserver service instances.
|
* kube-apiserver service instances.
|
||||||
|
|
||||||
The latter relies on a 3rd side reverse proxies, like Nginx or HAProxy, to
|
The latter relies on a 3rd side reverse proxy, like Nginx or HAProxy, to
|
||||||
achieve the same goal.
|
achieve the same goal.
|
||||||
|
|
||||||
Etcd
|
Etcd
|
||||||
----
|
----
|
||||||
|
|
||||||
The `etcd_access_endpoint` fact provides an access pattern for clients. And the
|
In order to use an external loadbalancing (L4/TCP or L7 w/ SSL Passthrough VIP), the following variables need to be overriden in group_vars
|
||||||
`etcd_multiaccess` (defaults to `True`) group var controls that behavior.
|
* `etcd_access_addresses`
|
||||||
It makes deployed components to access the etcd cluster members
|
* `etcd_client_url`
|
||||||
directly: `http://ip1:2379, http://ip2:2379,...`. This mode assumes the clients
|
* `etcd_cert_alt_names`
|
||||||
do a loadbalancing and handle HA for connections.
|
* `etcd_cert_alt_ips`
|
||||||
|
|
||||||
|
### Example of a VIP w/ FQDN
|
||||||
|
```yaml
|
||||||
|
etcd_access_addresses: https://etcd.example.com:2379
|
||||||
|
etcd_client_url: https://etcd.example.com:2379
|
||||||
|
etcd_cert_alt_names:
|
||||||
|
- "etcd.kube-system.svc.{{ dns_domain }}"
|
||||||
|
- "etcd.kube-system.svc"
|
||||||
|
- "etcd.kube-system"
|
||||||
|
- "etcd"
|
||||||
|
- "etcd.example.com" # This one needs to be added to the default etcd_cert_alt_names
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example of a VIP w/o FQDN (IP only)
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
etcd_access_addresses: https://2.3.7.9:2379
|
||||||
|
etcd_client_url: https://2.3.7.9:2379
|
||||||
|
etcd_cert_alt_ips:
|
||||||
|
- "2.3.7.9"
|
||||||
|
```
|
||||||
|
|
||||||
Kube-apiserver
|
Kube-apiserver
|
||||||
--------------
|
--------------
|
||||||
@@ -83,7 +103,7 @@ loadbalancer_apiserver:
|
|||||||
so you will need to use a different port for the vip from that the API is
|
so you will need to use a different port for the vip from that the API is
|
||||||
listening on, or set the `kube_apiserver_bind_address` so that the API only
|
listening on, or set the `kube_apiserver_bind_address` so that the API only
|
||||||
listens on a specific interface (to avoid conflict with haproxy binding the
|
listens on a specific interface (to avoid conflict with haproxy binding the
|
||||||
port on the VIP adddress)
|
port on the VIP address)
|
||||||
|
|
||||||
This domain name, or default "lb-apiserver.kubernetes.local", will be inserted
|
This domain name, or default "lb-apiserver.kubernetes.local", will be inserted
|
||||||
into the `/etc/hosts` file of all servers in the `k8s-cluster` group and wired
|
into the `/etc/hosts` file of all servers in the `k8s-cluster` group and wired
|
||||||
|
|||||||
BIN
docs/img/kubernetes-logo.png
Normal file
BIN
docs/img/kubernetes-logo.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 6.8 KiB |
@@ -15,7 +15,7 @@ By default the normal behavior looks like:
|
|||||||
2. Kubernetes controller manager checks the statuses of Kubelets every
|
2. Kubernetes controller manager checks the statuses of Kubelets every
|
||||||
`–-node-monitor-period`. The default value is **5s**.
|
`–-node-monitor-period`. The default value is **5s**.
|
||||||
|
|
||||||
3. In case the status is updated within `--node-monitor-grace-period` of time,
|
3. In case the status is updated within `--node-monitor-grace-period` of time,
|
||||||
Kubernetes controller manager considers healthy status of Kubelet. The
|
Kubernetes controller manager considers healthy status of Kubelet. The
|
||||||
default value is **40s**.
|
default value is **40s**.
|
||||||
|
|
||||||
@@ -33,7 +33,7 @@ Kubelet will try to make `nodeStatusUpdateRetry` post attempts. Currently
|
|||||||
[kubelet.go](https://github.com/kubernetes/kubernetes/blob/release-1.5/pkg/kubelet/kubelet.go#L102).
|
[kubelet.go](https://github.com/kubernetes/kubernetes/blob/release-1.5/pkg/kubelet/kubelet.go#L102).
|
||||||
|
|
||||||
Kubelet will try to update the status in
|
Kubelet will try to update the status in
|
||||||
[tryUpdateNodeStatus](https://github.com/kubernetes/kubernetes/blob/release-1.5/pkg/kubelet/kubelet_node_status.go#L345)
|
[tryUpdateNodeStatus](https://github.com/kubernetes/kubernetes/blob/release-1.5/pkg/kubelet/kubelet_node_status.go#L312)
|
||||||
function. Kubelet uses `http.Client()` Golang method, but has no specified
|
function. Kubelet uses `http.Client()` Golang method, but has no specified
|
||||||
timeout. Thus there may be some glitches when API Server is overloaded while
|
timeout. Thus there may be some glitches when API Server is overloaded while
|
||||||
TCP connection is established.
|
TCP connection is established.
|
||||||
@@ -69,7 +69,7 @@ minute which may require large etcd containers or even dedicated nodes for etcd.
|
|||||||
|
|
||||||
> If we calculate the number of tries, the division will give 5, but in reality
|
> If we calculate the number of tries, the division will give 5, but in reality
|
||||||
> it will be from 3 to 5 with `nodeStatusUpdateRetry` attempts of each try. The
|
> it will be from 3 to 5 with `nodeStatusUpdateRetry` attempts of each try. The
|
||||||
> total number of attemtps will vary from 15 to 25 due to latency of all
|
> total number of attempts will vary from 15 to 25 due to latency of all
|
||||||
> components.
|
> components.
|
||||||
|
|
||||||
## Medium Update and Average Reaction
|
## Medium Update and Average Reaction
|
||||||
@@ -92,7 +92,7 @@ etcd updates per minute.
|
|||||||
Let's set `-–node-status-update-frequency` to **1m**.
|
Let's set `-–node-status-update-frequency` to **1m**.
|
||||||
`--node-monitor-grace-period` will set to **5m** and `--pod-eviction-timeout`
|
`--node-monitor-grace-period` will set to **5m** and `--pod-eviction-timeout`
|
||||||
to **1m**. In this scenario, every kubelet will try to update the status every
|
to **1m**. In this scenario, every kubelet will try to update the status every
|
||||||
minute. There will be 5 * 5 = 25 attempts before unhealty status. After 5m,
|
minute. There will be 5 * 5 = 25 attempts before unhealthy status. After 5m,
|
||||||
Kubernetes controller manager will set unhealthy status. This means that pods
|
Kubernetes controller manager will set unhealthy status. This means that pods
|
||||||
will be evicted after 1m after being marked unhealthy. (6m in total).
|
will be evicted after 1m after being marked unhealthy. (6m in total).
|
||||||
|
|
||||||
|
|||||||
@@ -25,13 +25,13 @@ There are related application specifc variables:
|
|||||||
netchecker_port: 31081
|
netchecker_port: 31081
|
||||||
agent_report_interval: 15
|
agent_report_interval: 15
|
||||||
netcheck_namespace: default
|
netcheck_namespace: default
|
||||||
agent_img: "quay.io/l23network/k8s-netchecker-agent:v1.0"
|
agent_img: "mirantis/k8s-netchecker-agent:v1.2.2"
|
||||||
server_img: "quay.io/l23network/k8s-netchecker-server:v1.0"
|
server_img: "mirantis/k8s-netchecker-server:v1.2.2"
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that the application verifies DNS resolve for FQDNs comprising only the
|
Note that the application verifies DNS resolve for FQDNs comprising only the
|
||||||
combination of the ``netcheck_namespace.dns_domain`` vars, for example the
|
combination of the ``netcheck_namespace.dns_domain`` vars, for example the
|
||||||
``netchecker-service.default.cluster.local``. If you want to deploy the application
|
``netchecker-service.default.svc.cluster.local``. If you want to deploy the application
|
||||||
to the non default namespace, make sure as well to adjust the ``searchdomains`` var
|
to the non default namespace, make sure as well to adjust the ``searchdomains`` var
|
||||||
so the resulting search domain records to contain that namespace, like:
|
so the resulting search domain records to contain that namespace, like:
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ OpenStack
|
|||||||
|
|
||||||
To deploy kubespray on [OpenStack](https://www.openstack.org/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'openstack'`.
|
To deploy kubespray on [OpenStack](https://www.openstack.org/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'openstack'`.
|
||||||
|
|
||||||
After that make sure to source in your OpenStack credentials like you would do when using `nova-client` by using `source path/to/your/openstack-rc`.
|
After that make sure to source in your OpenStack credentials like you would do when using `nova-client` or `neutron-client` by using `source path/to/your/openstack-rc` or `. path/to/your/openstack-rc`.
|
||||||
|
|
||||||
The next step is to make sure the hostnames in your `inventory` file are identical to your instance names in OpenStack.
|
The next step is to make sure the hostnames in your `inventory` file are identical to your instance names in OpenStack.
|
||||||
Otherwise [cinder](https://wiki.openstack.org/wiki/Cinder) won't work as expected.
|
Otherwise [cinder](https://wiki.openstack.org/wiki/Cinder) won't work as expected.
|
||||||
@@ -12,35 +12,34 @@ Unless you are using calico you can now run the playbook.
|
|||||||
|
|
||||||
**Additional step needed when using calico:**
|
**Additional step needed when using calico:**
|
||||||
|
|
||||||
Calico does not encapsulate all packages with the hosts ip addresses. Instead the packages will be routed with the PODs ip addresses directly.
|
Calico does not encapsulate all packages with the hosts' ip addresses. Instead the packages will be routed with the PODs ip addresses directly.
|
||||||
|
|
||||||
OpenStack will filter and drop all packages from ips it does not know to prevent spoofing.
|
OpenStack will filter and drop all packages from ips it does not know to prevent spoofing.
|
||||||
|
|
||||||
In order to make calico work on OpenStack you will need to tell OpenStack to allow calicos packages by allowing the network it uses.
|
In order to make calico work on OpenStack you will need to tell OpenStack to allow calico's packages by allowing the network it uses.
|
||||||
|
|
||||||
First you will need the ids of your OpenStack instances that will run kubernetes:
|
First you will need the ids of your OpenStack instances that will run kubernetes:
|
||||||
|
|
||||||
nova list --tenant Your-Tenant
|
openstack server list --project YOUR_PROJECT
|
||||||
+--------------------------------------+--------+----------------------------------+--------+-------------+
|
+--------------------------------------+--------+----------------------------------+--------+-------------+
|
||||||
| ID | Name | Tenant ID | Status | Power State |
|
| ID | Name | Tenant ID | Status | Power State |
|
||||||
+--------------------------------------+--------+----------------------------------+--------+-------------+
|
+--------------------------------------+--------+----------------------------------+--------+-------------+
|
||||||
| e1f48aad-df96-4bce-bf61-62ae12bf3f95 | k8s-1 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
|
| e1f48aad-df96-4bce-bf61-62ae12bf3f95 | k8s-1 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
|
||||||
| 725cd548-6ea3-426b-baaa-e7306d3c8052 | k8s-2 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
|
| 725cd548-6ea3-426b-baaa-e7306d3c8052 | k8s-2 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
|
||||||
|
|
||||||
Then you can use the instance ids to find the connected [neutron](https://wiki.openstack.org/wiki/Neutron) ports:
|
Then you can use the instance ids to find the connected [neutron](https://wiki.openstack.org/wiki/Neutron) ports (though they are now configured through using OpenStack):
|
||||||
|
|
||||||
neutron port-list -c id -c device_id
|
openstack port list -c id -c device_id --project YOUR_PROJECT
|
||||||
+--------------------------------------+--------------------------------------+
|
+--------------------------------------+--------------------------------------+
|
||||||
| id | device_id |
|
| id | device_id |
|
||||||
+--------------------------------------+--------------------------------------+
|
+--------------------------------------+--------------------------------------+
|
||||||
| 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 |
|
| 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 |
|
||||||
| e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 |
|
| e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 |
|
||||||
|
|
||||||
Given the port ids on the left, you can set the `allowed_address_pairs` in neutron.
|
Given the port ids on the left, you can set the two `allowed_address`(es) in OpenStack. Note that you have to allow both `kube_service_addresses` (default `10.233.0.0/18`) and `kube_pods_subnet` (default `10.233.64.0/18`.)
|
||||||
Note that you have to allow both of `kube_service_addresses` (default `10.233.0.0/18`)
|
|
||||||
and `kube_pods_subnet` (default `10.233.64.0/18`.)
|
|
||||||
|
|
||||||
# allow kube_service_addresses and kube_pods_subnet network
|
# allow kube_service_addresses and kube_pods_subnet network
|
||||||
neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18 ip_address=10.233.64.0/18
|
openstack port set 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
|
||||||
neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18 ip_address=10.233.64.0/18
|
openstack port set e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
|
||||||
|
|
||||||
Now you can finally run the playbook.
|
Now you can finally run the playbook.
|
||||||
|
|||||||
16
docs/proxy.md
Normal file
16
docs/proxy.md
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# Setting up Environment Proxy
|
||||||
|
|
||||||
|
If you set http and https proxy, all nodes and loadbalancer will be excluded from proxy with generating no_proxy variable in `roles/kubespray-defaults/defaults/main.yml`, if you have additional resources for exclude add them to `additional_no_proxy` variable. If you want fully override your `no_proxy` setting, then fill in just `no_proxy` and no nodes or loadbalancer addresses will be added to no_proxy.
|
||||||
|
|
||||||
|
## Set proxy for http and https
|
||||||
|
|
||||||
|
`http_proxy:"http://example.proxy.tld:port"`
|
||||||
|
`https_proxy:"http://example.proxy.tld:port"`
|
||||||
|
|
||||||
|
## Set default no_proxy (this will override default no_proxy generation)
|
||||||
|
|
||||||
|
`no_proxy: "node1,node1_ip,node2,node2_ip...additional_host"`
|
||||||
|
|
||||||
|
## Set additional addresses to default no_proxy (all cluster nodes and loadbalancer)
|
||||||
|
|
||||||
|
`additional_no_proxy: "aditional_host,"`
|
||||||
@@ -9,7 +9,7 @@ Kubespray's roadmap
|
|||||||
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kubespray/issues/320)
|
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kubespray/issues/320)
|
||||||
- the playbook would install and configure docker/rkt and the etcd cluster
|
- the playbook would install and configure docker/rkt and the etcd cluster
|
||||||
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
|
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
|
||||||
- a "kubespray" container would be deployed (kubespray-cli, ansible-playbook, kpm)
|
- a "kubespray" container would be deployed (kubespray-cli, ansible-playbook)
|
||||||
- to be discussed, a way to provide the inventory
|
- to be discussed, a way to provide the inventory
|
||||||
- **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kubespray/issues/321)
|
- **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kubespray/issues/321)
|
||||||
|
|
||||||
@@ -24,7 +24,7 @@ Kubespray's roadmap
|
|||||||
|
|
||||||
### Tests
|
### Tests
|
||||||
- [ ] Run kubernetes e2e tests
|
- [ ] Run kubernetes e2e tests
|
||||||
- [ ] Test idempotency on on single OS but for all network plugins/container engines
|
- [ ] Test idempotency on single OS but for all network plugins/container engines
|
||||||
- [ ] single test on AWS per day
|
- [ ] single test on AWS per day
|
||||||
- [ ] test scale up cluster: +1 etcd, +1 master, +1 node
|
- [ ] test scale up cluster: +1 etcd, +1 master, +1 node
|
||||||
- [ ] Reorganize CI test vars into group var files
|
- [ ] Reorganize CI test vars into group var files
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=v1.4.
|
|||||||
|
|
||||||
#### Graceful upgrade
|
#### Graceful upgrade
|
||||||
|
|
||||||
Kubespray also supports cordon, drain and uncordoning of nodes when performing
|
Kubespray also supports cordon, drain and uncordoning of nodes when performing
|
||||||
a cluster upgrade. There is a separate playbook used for this purpose. It is
|
a cluster upgrade. There is a separate playbook used for this purpose. It is
|
||||||
important to note that upgrade-cluster.yml can only be used for upgrading an
|
important to note that upgrade-cluster.yml can only be used for upgrading an
|
||||||
existing cluster. That means there must be at least 1 kube-master already
|
existing cluster. That means there must be at least 1 kube-master already
|
||||||
@@ -81,3 +81,61 @@ kubernetes-apps/rotate_tokens role, only pods in kube-system are destroyed and
|
|||||||
recreated. All other invalidated service account tokens are cleaned up
|
recreated. All other invalidated service account tokens are cleaned up
|
||||||
automatically, but other pods are not deleted out of an abundance of caution
|
automatically, but other pods are not deleted out of an abundance of caution
|
||||||
for impact to user deployed pods.
|
for impact to user deployed pods.
|
||||||
|
|
||||||
|
### Component-based upgrades
|
||||||
|
|
||||||
|
A deployer may want to upgrade specific components in order to minimize risk
|
||||||
|
or save time. This strategy is not covered by CI as of this writing, so it is
|
||||||
|
not guaranteed to work.
|
||||||
|
|
||||||
|
These commands are useful only for upgrading fully-deployed, healthy, existing
|
||||||
|
hosts. This will definitely not work for undeployed or partially deployed
|
||||||
|
hosts.
|
||||||
|
|
||||||
|
Upgrade docker:
|
||||||
|
|
||||||
|
```
|
||||||
|
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=docker
|
||||||
|
```
|
||||||
|
|
||||||
|
Upgrade etcd:
|
||||||
|
|
||||||
|
```
|
||||||
|
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=etcd
|
||||||
|
```
|
||||||
|
|
||||||
|
Upgrade vault:
|
||||||
|
|
||||||
|
```
|
||||||
|
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=vault
|
||||||
|
```
|
||||||
|
|
||||||
|
Upgrade kubelet:
|
||||||
|
|
||||||
|
```
|
||||||
|
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=node --skip-tags=k8s-gen-certs,k8s-gen-tokens
|
||||||
|
```
|
||||||
|
|
||||||
|
Upgrade Kubernetes master components:
|
||||||
|
|
||||||
|
```
|
||||||
|
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=master
|
||||||
|
```
|
||||||
|
|
||||||
|
Upgrade network plugins:
|
||||||
|
|
||||||
|
```
|
||||||
|
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=network
|
||||||
|
```
|
||||||
|
|
||||||
|
Upgrade all add-ons:
|
||||||
|
|
||||||
|
```
|
||||||
|
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=apps
|
||||||
|
```
|
||||||
|
|
||||||
|
Upgrade just helm (assuming `helm_enabled` is true):
|
||||||
|
|
||||||
|
```
|
||||||
|
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=helm
|
||||||
|
```
|
||||||
|
|||||||
16
docs/vars.md
16
docs/vars.md
@@ -126,9 +126,20 @@ node_labels:
|
|||||||
label1_name: label1_value
|
label1_name: label1_value
|
||||||
label2_name: label2_value
|
label2_name: label2_value
|
||||||
```
|
```
|
||||||
|
* *podsecuritypolicy_enabled* - When set to `true`, enables the PodSecurityPolicy admission controller and defines two policies `privileged` (applying to all resources in `kube-system` namespace and kubelet) and `restricted` (applying all other namespaces).
|
||||||
|
Addons deployed in kube-system namespaces are handled.
|
||||||
|
* *kubernetes_audit* - When set to `true`, enables Auditing.
|
||||||
|
The auditing parameters can be tuned via the following variables (which default values are shown below):
|
||||||
|
* `audit_log_path`: /var/log/audit/kube-apiserver-audit.log
|
||||||
|
* `audit_log_maxage`: 30
|
||||||
|
* `audit_log_maxbackups`: 1
|
||||||
|
* `audit_log_maxsize`: 100
|
||||||
|
* `audit_policy_file`: "{{ kube_config_dir }}/audit-policy/apiserver-audit-policy.yaml"
|
||||||
|
|
||||||
|
By default, the `audit_policy_file` contains [default rules](https://github.com/kubernetes-incubator/kubespray/blob/master/roles/kubernetes/master/templates/apiserver-audit-policy.yaml.j2) that can be overriden with the `audit_policy_custom_rules` variable.
|
||||||
|
|
||||||
##### Custom flags for Kube Components
|
##### Custom flags for Kube Components
|
||||||
For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. This can be done by providing a list of flags. Example:
|
For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. This can be done by providing a list of flags. The `kubelet_node_custom_flags` apply kubelet settings only to nodes and not masters. Example:
|
||||||
```
|
```
|
||||||
kubelet_custom_flags:
|
kubelet_custom_flags:
|
||||||
- "--eviction-hard=memory.available<100Mi"
|
- "--eviction-hard=memory.available<100Mi"
|
||||||
@@ -140,11 +151,12 @@ The possible vars are:
|
|||||||
* *controller_mgr_custom_flags*
|
* *controller_mgr_custom_flags*
|
||||||
* *scheduler_custom_flags*
|
* *scheduler_custom_flags*
|
||||||
* *kubelet_custom_flags*
|
* *kubelet_custom_flags*
|
||||||
|
* *kubelet_node_custom_flags*
|
||||||
|
|
||||||
#### User accounts
|
#### User accounts
|
||||||
|
|
||||||
By default, a user with admin rights is created, named `kube`.
|
By default, a user with admin rights is created, named `kube`.
|
||||||
The password can be viewed after deployment by looking at the file
|
The password can be viewed after deployment by looking at the file
|
||||||
`PATH_TO_KUBESPRAY/credentials/kube_user.creds`. This contains a randomly generated
|
`{{ credentials_dir }}/kube_user.creds` (`credentials_dir` is set to `{{ inventory_dir }}/credentials` by default). This contains a randomly generated
|
||||||
password. If you wish to set your own password, just precreate/modify this
|
password. If you wish to set your own password, just precreate/modify this
|
||||||
file yourself or change `kube_api_pwd` var.
|
file yourself or change `kube_api_pwd` var.
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ Weave uses [**consensus**](https://www.weave.works/docs/net/latest/ipam/##consen
|
|||||||
|
|
||||||
Weave encryption is supported for all communication
|
Weave encryption is supported for all communication
|
||||||
|
|
||||||
* To use Weave encryption, specify a strong password (if no password, no encrytion)
|
* To use Weave encryption, specify a strong password (if no password, no encryption)
|
||||||
|
|
||||||
```
|
```
|
||||||
# In file ./inventory/sample/group_vars/k8s-cluster.yml
|
# In file ./inventory/sample/group_vars/k8s-cluster.yml
|
||||||
|
|||||||
@@ -8,8 +8,8 @@
|
|||||||
version: "{{ item.version }}"
|
version: "{{ item.version }}"
|
||||||
state: "{{ item.state }}"
|
state: "{{ item.state }}"
|
||||||
with_items:
|
with_items:
|
||||||
- { state: "present", name: "docker", version: "2.7.0" }
|
- { state: "present", name: "docker", version: "3.4.1" }
|
||||||
- { state: "present", name: "docker-compose", version: "1.18.0" }
|
- { state: "present", name: "docker-compose", version: "1.21.2" }
|
||||||
|
|
||||||
- name: CephFS Provisioner | Check Go version
|
- name: CephFS Provisioner | Check Go version
|
||||||
shell: |
|
shell: |
|
||||||
@@ -35,19 +35,19 @@
|
|||||||
- name: CephFS Provisioner | Clone repo
|
- name: CephFS Provisioner | Clone repo
|
||||||
git:
|
git:
|
||||||
repo: https://github.com/kubernetes-incubator/external-storage.git
|
repo: https://github.com/kubernetes-incubator/external-storage.git
|
||||||
dest: "~/go/src/github.com/kubernetes-incubator"
|
dest: "~/go/src/github.com/kubernetes-incubator/external-storage"
|
||||||
version: 92295a30
|
version: 06fddbe2
|
||||||
clone: no
|
clone: yes
|
||||||
update: yes
|
update: yes
|
||||||
|
|
||||||
- name: CephFS Provisioner | Build image
|
- name: CephFS Provisioner | Build image
|
||||||
shell: |
|
shell: |
|
||||||
cd ~/go/src/github.com/kubernetes-incubator/external-storage
|
cd ~/go/src/github.com/kubernetes-incubator/external-storage
|
||||||
REGISTRY=quay.io/kubespray/ VERSION=92295a30 make ceph/cephfs
|
REGISTRY=quay.io/kubespray/ VERSION=06fddbe2 make ceph/cephfs
|
||||||
|
|
||||||
- name: CephFS Provisioner | Push image
|
- name: CephFS Provisioner | Push image
|
||||||
docker_image:
|
docker_image:
|
||||||
name: quay.io/kubespray/cephfs-provisioner:92295a30
|
name: quay.io/kubespray/cephfs-provisioner:06fddbe2
|
||||||
push: yes
|
push: yes
|
||||||
retries: 10
|
retries: 10
|
||||||
|
|
||||||
|
|||||||
@@ -1,137 +0,0 @@
|
|||||||
# Valid bootstrap options (required): ubuntu, coreos, centos, none
|
|
||||||
bootstrap_os: none
|
|
||||||
|
|
||||||
#Directory where etcd data stored
|
|
||||||
etcd_data_dir: /var/lib/etcd
|
|
||||||
|
|
||||||
# Directory where the binaries will be installed
|
|
||||||
bin_dir: /usr/local/bin
|
|
||||||
|
|
||||||
## The access_ip variable is used to define how other nodes should access
|
|
||||||
## the node. This is used in flannel to allow other flannel nodes to see
|
|
||||||
## this node for example. The access_ip is really useful AWS and Google
|
|
||||||
## environments where the nodes are accessed remotely by the "public" ip,
|
|
||||||
## but don't know about that address themselves.
|
|
||||||
#access_ip: 1.1.1.1
|
|
||||||
|
|
||||||
### LOADBALANCING AND ACCESS MODES
|
|
||||||
## Enable multiaccess to configure etcd clients to access all of the etcd members directly
|
|
||||||
## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
|
|
||||||
## This may be the case if clients support and loadbalance multiple etcd servers natively.
|
|
||||||
#etcd_multiaccess: true
|
|
||||||
|
|
||||||
### ETCD: disable peer client cert authentication.
|
|
||||||
# This affects ETCD_PEER_CLIENT_CERT_AUTH variable
|
|
||||||
#etcd_peer_client_auth: true
|
|
||||||
|
|
||||||
## External LB example config
|
|
||||||
## apiserver_loadbalancer_domain_name: "elb.some.domain"
|
|
||||||
#loadbalancer_apiserver:
|
|
||||||
# address: 1.2.3.4
|
|
||||||
# port: 1234
|
|
||||||
|
|
||||||
## Internal loadbalancers for apiservers
|
|
||||||
#loadbalancer_apiserver_localhost: true
|
|
||||||
|
|
||||||
## Local loadbalancer should use this port instead, if defined.
|
|
||||||
## Defaults to kube_apiserver_port (6443)
|
|
||||||
#nginx_kube_apiserver_port: 8443
|
|
||||||
|
|
||||||
### OTHER OPTIONAL VARIABLES
|
|
||||||
## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
|
|
||||||
## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
|
|
||||||
## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
|
|
||||||
## modules.
|
|
||||||
#kubelet_load_modules: false
|
|
||||||
|
|
||||||
## Internal network total size. This is the prefix of the
|
|
||||||
## entire network. Must be unused in your environment.
|
|
||||||
#kube_network_prefix: 18
|
|
||||||
|
|
||||||
## With calico it is possible to distributed routes with border routers of the datacenter.
|
|
||||||
## Warning : enabling router peering will disable calico's default behavior ('node mesh').
|
|
||||||
## The subnets of each nodes will be distributed by the datacenter router
|
|
||||||
#peer_with_router: false
|
|
||||||
|
|
||||||
## Upstream dns servers used by dnsmasq
|
|
||||||
#upstream_dns_servers:
|
|
||||||
# - 8.8.8.8
|
|
||||||
# - 8.8.4.4
|
|
||||||
|
|
||||||
## There are some changes specific to the cloud providers
|
|
||||||
## for instance we need to encapsulate packets with some network plugins
|
|
||||||
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external'
|
|
||||||
## When openstack is used make sure to source in the openstack credentials
|
|
||||||
## like you would do when using nova-client before starting the playbook.
|
|
||||||
#cloud_provider:
|
|
||||||
|
|
||||||
## When azure is used, you need to also set the following variables.
|
|
||||||
## see docs/azure.md for details on how to get these values
|
|
||||||
#azure_tenant_id:
|
|
||||||
#azure_subscription_id:
|
|
||||||
#azure_aad_client_id:
|
|
||||||
#azure_aad_client_secret:
|
|
||||||
#azure_resource_group:
|
|
||||||
#azure_location:
|
|
||||||
#azure_subnet_name:
|
|
||||||
#azure_security_group_name:
|
|
||||||
#azure_vnet_name:
|
|
||||||
#azure_vnet_resource_group:
|
|
||||||
#azure_route_table_name:
|
|
||||||
|
|
||||||
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
|
|
||||||
#openstack_blockstorage_version: "v1/v2/auto (default)"
|
|
||||||
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
|
|
||||||
#openstack_lbaas_enabled: True
|
|
||||||
#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
|
|
||||||
## To enable automatic floating ip provisioning, specify a subnet.
|
|
||||||
#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
|
|
||||||
## Override default LBaaS behavior
|
|
||||||
#openstack_lbaas_use_octavia: False
|
|
||||||
#openstack_lbaas_method: "ROUND_ROBIN"
|
|
||||||
#openstack_lbaas_provider: "haproxy"
|
|
||||||
#openstack_lbaas_create_monitor: "yes"
|
|
||||||
#openstack_lbaas_monitor_delay: "1m"
|
|
||||||
#openstack_lbaas_monitor_timeout: "30s"
|
|
||||||
#openstack_lbaas_monitor_max_retries: "3"
|
|
||||||
|
|
||||||
## Uncomment to enable experimental kubeadm deployment mode
|
|
||||||
#kubeadm_enabled: false
|
|
||||||
## Set these proxy values in order to update package manager and docker daemon to use proxies
|
|
||||||
#http_proxy: ""
|
|
||||||
#https_proxy: ""
|
|
||||||
## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
|
|
||||||
#no_proxy: ""
|
|
||||||
|
|
||||||
## Uncomment this if you want to force overlay/overlay2 as docker storage driver
|
|
||||||
## Please note that overlay2 is only supported on newer kernels
|
|
||||||
#docker_storage_options: -s overlay2
|
|
||||||
|
|
||||||
# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
|
|
||||||
#docker_dns_servers_strict: false
|
|
||||||
|
|
||||||
## Default packages to install within the cluster, f.e:
|
|
||||||
#kpm_packages:
|
|
||||||
# - name: kube-system/grafana
|
|
||||||
|
|
||||||
## Certificate Management
|
|
||||||
## This setting determines whether certs are generated via scripts or whether a
|
|
||||||
## cluster of Hashicorp's Vault is started to issue certificates (using etcd
|
|
||||||
## as a backend). Options are "script" or "vault"
|
|
||||||
#cert_management: script
|
|
||||||
|
|
||||||
# Set to true to allow pre-checks to fail and continue deployment
|
|
||||||
#ignore_assert_errors: false
|
|
||||||
|
|
||||||
## Etcd auto compaction retention for mvcc key value store in hour
|
|
||||||
#etcd_compaction_retention: 0
|
|
||||||
|
|
||||||
## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
|
|
||||||
#etcd_metrics: basic
|
|
||||||
|
|
||||||
## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing.
|
|
||||||
## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM.
|
|
||||||
#etcd_memory_limit: "512M"
|
|
||||||
|
|
||||||
# The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
|
|
||||||
#kube_read_only_port: 10255
|
|
||||||
85
inventory/sample/group_vars/all/all.yml
Normal file
85
inventory/sample/group_vars/all/all.yml
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
## Valid bootstrap options (required): ubuntu, coreos, centos, none
|
||||||
|
## If the OS is not listed here, it means it doesn't require extra/bootstrap steps.
|
||||||
|
## In example, python is not available on 'coreos' so it must be installed before
|
||||||
|
## anything else. In the opposite, Debian has already all its dependencies fullfiled, then bootstrap_os should be set to `none`.
|
||||||
|
bootstrap_os: none
|
||||||
|
|
||||||
|
## Directory where etcd data stored
|
||||||
|
etcd_data_dir: /var/lib/etcd
|
||||||
|
|
||||||
|
## Directory where the binaries will be installed
|
||||||
|
bin_dir: /usr/local/bin
|
||||||
|
|
||||||
|
## The access_ip variable is used to define how other nodes should access
|
||||||
|
## the node. This is used in flannel to allow other flannel nodes to see
|
||||||
|
## this node for example. The access_ip is really useful AWS and Google
|
||||||
|
## environments where the nodes are accessed remotely by the "public" ip,
|
||||||
|
## but don't know about that address themselves.
|
||||||
|
#access_ip: 1.1.1.1
|
||||||
|
|
||||||
|
|
||||||
|
## External LB example config
|
||||||
|
## apiserver_loadbalancer_domain_name: "elb.some.domain"
|
||||||
|
#loadbalancer_apiserver:
|
||||||
|
# address: 1.2.3.4
|
||||||
|
# port: 1234
|
||||||
|
|
||||||
|
## Internal loadbalancers for apiservers
|
||||||
|
#loadbalancer_apiserver_localhost: true
|
||||||
|
|
||||||
|
## Local loadbalancer should use this port instead, if defined.
|
||||||
|
## Defaults to kube_apiserver_port (6443)
|
||||||
|
#nginx_kube_apiserver_port: 8443
|
||||||
|
|
||||||
|
### OTHER OPTIONAL VARIABLES
|
||||||
|
## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
|
||||||
|
## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
|
||||||
|
## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
|
||||||
|
## modules.
|
||||||
|
#kubelet_load_modules: false
|
||||||
|
|
||||||
|
## Upstream dns servers used by dnsmasq
|
||||||
|
#upstream_dns_servers:
|
||||||
|
# - 8.8.8.8
|
||||||
|
# - 8.8.4.4
|
||||||
|
|
||||||
|
## There are some changes specific to the cloud providers
|
||||||
|
## for instance we need to encapsulate packets with some network plugins
|
||||||
|
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
|
||||||
|
## When openstack is used make sure to source in the openstack credentials
|
||||||
|
## like you would do when using nova-client before starting the playbook.
|
||||||
|
#cloud_provider:
|
||||||
|
|
||||||
|
|
||||||
|
## Uncomment to enable experimental kubeadm deployment mode
|
||||||
|
#kubeadm_enabled: false
|
||||||
|
|
||||||
|
## Set these proxy values in order to update package manager and docker daemon to use proxies
|
||||||
|
#http_proxy: ""
|
||||||
|
#https_proxy: ""
|
||||||
|
|
||||||
|
## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
|
||||||
|
#no_proxy: ""
|
||||||
|
|
||||||
|
## Some problems may occur when downloading files over https proxy due to ansible bug
|
||||||
|
## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable
|
||||||
|
## SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
|
||||||
|
#download_validate_certs: False
|
||||||
|
|
||||||
|
## If you need exclude all cluster nodes from proxy and other resources, add other resources here.
|
||||||
|
#additional_no_proxy: ""
|
||||||
|
|
||||||
|
## Certificate Management
|
||||||
|
## This setting determines whether certs are generated via scripts or whether a
|
||||||
|
## cluster of Hashicorp's Vault is started to issue certificates (using etcd
|
||||||
|
## as a backend). Options are "script" or "vault"
|
||||||
|
#cert_management: script
|
||||||
|
|
||||||
|
## Set to true to allow pre-checks to fail and continue deployment
|
||||||
|
#ignore_assert_errors: false
|
||||||
|
|
||||||
|
## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
|
||||||
|
#kube_read_only_port: 10255
|
||||||
|
|
||||||
|
## Set true to download and cache container
|
||||||
|
#download_container: true
|
||||||
14
inventory/sample/group_vars/all/azure.yml
Normal file
14
inventory/sample/group_vars/all/azure.yml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
## When azure is used, you need to also set the following variables.
|
||||||
|
## see docs/azure.md for details on how to get these values
|
||||||
|
|
||||||
|
#azure_tenant_id:
|
||||||
|
#azure_subscription_id:
|
||||||
|
#azure_aad_client_id:
|
||||||
|
#azure_aad_client_secret:
|
||||||
|
#azure_resource_group:
|
||||||
|
#azure_location:
|
||||||
|
#azure_subnet_name:
|
||||||
|
#azure_security_group_name:
|
||||||
|
#azure_vnet_name:
|
||||||
|
#azure_vnet_resource_group:
|
||||||
|
#azure_route_table_name:
|
||||||
2
inventory/sample/group_vars/all/coreos.yml
Normal file
2
inventory/sample/group_vars/all/coreos.yml
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
## Does coreos need auto upgrade, default is true
|
||||||
|
#coreos_auto_upgrade: true
|
||||||
61
inventory/sample/group_vars/all/docker.yml
Normal file
61
inventory/sample/group_vars/all/docker.yml
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
## Uncomment this if you want to force overlay/overlay2 as docker storage driver
|
||||||
|
## Please note that overlay2 is only supported on newer kernels
|
||||||
|
#docker_storage_options: -s overlay2
|
||||||
|
|
||||||
|
## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7.
|
||||||
|
docker_container_storage_setup: false
|
||||||
|
|
||||||
|
## It must be define a disk path for docker_container_storage_setup_devs.
|
||||||
|
## Otherwise docker-storage-setup will be executed incorrectly.
|
||||||
|
#docker_container_storage_setup_devs: /dev/vdb
|
||||||
|
|
||||||
|
## Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
|
||||||
|
docker_dns_servers_strict: false
|
||||||
|
|
||||||
|
# Path used to store Docker data
|
||||||
|
docker_daemon_graph: "/var/lib/docker"
|
||||||
|
|
||||||
|
## Used to set docker daemon iptables options to true
|
||||||
|
docker_iptables_enabled: "false"
|
||||||
|
|
||||||
|
# Docker log options
|
||||||
|
# Rotate container stderr/stdout logs at 50m and keep last 5
|
||||||
|
docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5"
|
||||||
|
|
||||||
|
# define docker bin_dir
|
||||||
|
docker_bin_dir: "/usr/bin"
|
||||||
|
|
||||||
|
## An obvious use case is allowing insecure-registry access to self hosted registries.
|
||||||
|
## Can be ipddress and domain_name.
|
||||||
|
## example define 172.19.16.11 or mirror.registry.io
|
||||||
|
#docker_insecure_registries:
|
||||||
|
# - mirror.registry.io
|
||||||
|
# - 172.19.16.11
|
||||||
|
|
||||||
|
## Add other registry,example China registry mirror.
|
||||||
|
#docker_registry_mirrors:
|
||||||
|
# - https://registry.docker-cn.com
|
||||||
|
# - https://mirror.aliyuncs.com
|
||||||
|
|
||||||
|
## If non-empty will override default system MounFlags value.
|
||||||
|
## This option takes a mount propagation flag: shared, slave
|
||||||
|
## or private, which control whether mounts in the file system
|
||||||
|
## namespace set up for docker will receive or propagate mounts
|
||||||
|
## and unmounts. Leave empty for system default
|
||||||
|
#docker_mount_flags:
|
||||||
|
|
||||||
|
## A string of extra options to pass to the docker daemon.
|
||||||
|
## This string should be exactly as you wish it to appear.
|
||||||
|
docker_options: >-
|
||||||
|
{%- if docker_insecure_registries is defined -%}
|
||||||
|
{{ docker_insecure_registries | map('regex_replace', '^(.*)$', '--insecure-registry=\1' ) | list | join(' ') }}
|
||||||
|
{%- endif %}
|
||||||
|
{% if docker_registry_mirrors is defined -%}
|
||||||
|
{{ docker_registry_mirrors | map('regex_replace', '^(.*)$', '--registry-mirror=\1' ) | list | join(' ') }}
|
||||||
|
{%- endif %}
|
||||||
|
--graph={{ docker_daemon_graph }} {{ docker_log_opts }}
|
||||||
|
{%- if ansible_architecture == "aarch64" and ansible_os_family == "RedHat" %}
|
||||||
|
--add-runtime docker-runc=/usr/libexec/docker/docker-runc-current
|
||||||
|
--default-runtime=docker-runc --exec-opt native.cgroupdriver=systemd
|
||||||
|
--userland-proxy-path=/usr/libexec/docker/docker-proxy-current --signature-verification=false
|
||||||
|
{%- endif -%}
|
||||||
15
inventory/sample/group_vars/all/oci.yml
Normal file
15
inventory/sample/group_vars/all/oci.yml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
## When Oracle Cloud Infrastructure is used, set these variables
|
||||||
|
#oci_private_key:
|
||||||
|
#oci_region_id:
|
||||||
|
#oci_tenancy_id:
|
||||||
|
#oci_user_id:
|
||||||
|
#oci_user_fingerprint:
|
||||||
|
#oci_compartment_id:
|
||||||
|
#oci_vnc_id:
|
||||||
|
#oci_subnet1_id:
|
||||||
|
#oci_subnet2_id:
|
||||||
|
## Overide these default behaviors if you wish
|
||||||
|
#oci_security_list_management: All
|
||||||
|
# If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint
|
||||||
|
#oci_use_instance_principals: false
|
||||||
|
#oci_cloud_controller_version: 0.5.0
|
||||||
16
inventory/sample/group_vars/all/openstack.yml
Normal file
16
inventory/sample/group_vars/all/openstack.yml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
|
||||||
|
#openstack_blockstorage_version: "v1/v2/auto (default)"
|
||||||
|
#openstack_blockstorage_ignore_volume_az: yes
|
||||||
|
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
|
||||||
|
#openstack_lbaas_enabled: True
|
||||||
|
#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
|
||||||
|
## To enable automatic floating ip provisioning, specify a subnet.
|
||||||
|
#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
|
||||||
|
## Override default LBaaS behavior
|
||||||
|
#openstack_lbaas_use_octavia: False
|
||||||
|
#openstack_lbaas_method: "ROUND_ROBIN"
|
||||||
|
#openstack_lbaas_provider: "haproxy"
|
||||||
|
#openstack_lbaas_create_monitor: "yes"
|
||||||
|
#openstack_lbaas_monitor_delay: "1m"
|
||||||
|
#openstack_lbaas_monitor_timeout: "30s"
|
||||||
|
#openstack_lbaas_monitor_max_retries: "3"
|
||||||
18
inventory/sample/group_vars/etcd.yml
Normal file
18
inventory/sample/group_vars/etcd.yml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
## Etcd auto compaction retention for mvcc key value store in hour
|
||||||
|
#etcd_compaction_retention: 0
|
||||||
|
|
||||||
|
## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
|
||||||
|
#etcd_metrics: basic
|
||||||
|
|
||||||
|
## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing.
|
||||||
|
## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM.
|
||||||
|
#etcd_memory_limit: "512M"
|
||||||
|
|
||||||
|
## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than
|
||||||
|
## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check
|
||||||
|
## etcd documentation for more information.
|
||||||
|
#etcd_quota_backend_bytes: "2G"
|
||||||
|
|
||||||
|
### ETCD: disable peer client cert authentication.
|
||||||
|
# This affects ETCD_PEER_CLIENT_CERT_AUTH variable
|
||||||
|
#etcd_peer_client_auth: true
|
||||||
51
inventory/sample/group_vars/k8s-cluster/addons.yml
Normal file
51
inventory/sample/group_vars/k8s-cluster/addons.yml
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
# Kubernetes dashboard
|
||||||
|
# RBAC required. see docs/getting-started.md for access details.
|
||||||
|
dashboard_enabled: true
|
||||||
|
|
||||||
|
# Helm deployment
|
||||||
|
helm_enabled: false
|
||||||
|
|
||||||
|
# Registry deployment
|
||||||
|
registry_enabled: false
|
||||||
|
# registry_namespace: kube-system
|
||||||
|
# registry_storage_class: ""
|
||||||
|
# registry_disk_size: "10Gi"
|
||||||
|
|
||||||
|
# Local volume provisioner deployment
|
||||||
|
local_volume_provisioner_enabled: false
|
||||||
|
# local_volume_provisioner_namespace: kube-system
|
||||||
|
# local_volume_provisioner_base_dir: /mnt/disks
|
||||||
|
# local_volume_provisioner_mount_dir: /mnt/disks
|
||||||
|
# local_volume_provisioner_storage_class: local-storage
|
||||||
|
|
||||||
|
# CephFS provisioner deployment
|
||||||
|
cephfs_provisioner_enabled: false
|
||||||
|
# cephfs_provisioner_namespace: "cephfs-provisioner"
|
||||||
|
# cephfs_provisioner_cluster: ceph
|
||||||
|
# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
|
||||||
|
# cephfs_provisioner_admin_id: admin
|
||||||
|
# cephfs_provisioner_secret: secret
|
||||||
|
# cephfs_provisioner_storage_class: cephfs
|
||||||
|
# cephfs_provisioner_reclaim_policy: Delete
|
||||||
|
# cephfs_provisioner_claim_root: /volumes
|
||||||
|
# cephfs_provisioner_deterministic_names: true
|
||||||
|
|
||||||
|
# Nginx ingress controller deployment
|
||||||
|
ingress_nginx_enabled: false
|
||||||
|
# ingress_nginx_host_network: false
|
||||||
|
# ingress_nginx_nodeselector:
|
||||||
|
# node-role.kubernetes.io/master: "true"
|
||||||
|
# ingress_nginx_namespace: "ingress-nginx"
|
||||||
|
# ingress_nginx_insecure_port: 80
|
||||||
|
# ingress_nginx_secure_port: 443
|
||||||
|
# ingress_nginx_configmap:
|
||||||
|
# map-hash-bucket-size: "128"
|
||||||
|
# ssl-protocols: "SSLv2"
|
||||||
|
# ingress_nginx_configmap_tcp_services:
|
||||||
|
# 9000: "default/example-go:8080"
|
||||||
|
# ingress_nginx_configmap_udp_services:
|
||||||
|
# 53: "kube-system/kube-dns:53"
|
||||||
|
|
||||||
|
# Cert manager deployment
|
||||||
|
cert_manager_enabled: false
|
||||||
|
# cert_manager_namespace: "cert-manager"
|
||||||
@@ -19,7 +19,7 @@ kube_users_dir: "{{ kube_config_dir }}/users"
|
|||||||
kube_api_anonymous_auth: true
|
kube_api_anonymous_auth: true
|
||||||
|
|
||||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||||
kube_version: v1.9.5
|
kube_version: v1.11.3
|
||||||
|
|
||||||
# Where the binaries will be downloaded.
|
# Where the binaries will be downloaded.
|
||||||
# Note: ensure that you've enough disk space (about 1G)
|
# Note: ensure that you've enough disk space (about 1G)
|
||||||
@@ -34,9 +34,12 @@ kube_cert_group: kube-cert
|
|||||||
# Cluster Loglevel configuration
|
# Cluster Loglevel configuration
|
||||||
kube_log_level: 2
|
kube_log_level: 2
|
||||||
|
|
||||||
|
# Directory where credentials will be stored
|
||||||
|
credentials_dir: "{{ inventory_dir }}/credentials"
|
||||||
|
|
||||||
# Users to create for basic auth in Kubernetes API via HTTP
|
# Users to create for basic auth in Kubernetes API via HTTP
|
||||||
# Optionally add groups for user
|
# Optionally add groups for user
|
||||||
kube_api_pwd: "{{ lookup('password', inventory_dir + '/credentials/kube_user.creds length=15 chars=ascii_letters,digits') }}"
|
kube_api_pwd: "{{ lookup('password', credentials_dir + '/kube_user.creds length=15 chars=ascii_letters,digits') }}"
|
||||||
kube_users:
|
kube_users:
|
||||||
kube:
|
kube:
|
||||||
pass: "{{kube_api_pwd}}"
|
pass: "{{kube_api_pwd}}"
|
||||||
@@ -56,38 +59,17 @@ kube_users:
|
|||||||
# kube_oidc_url: https:// ...
|
# kube_oidc_url: https:// ...
|
||||||
# kube_oidc_client_id: kubernetes
|
# kube_oidc_client_id: kubernetes
|
||||||
## Optional settings for OIDC
|
## Optional settings for OIDC
|
||||||
# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
|
# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem"
|
||||||
# kube_oidc_username_claim: sub
|
# kube_oidc_username_claim: sub
|
||||||
|
# kube_oidc_username_prefix: oidc:
|
||||||
# kube_oidc_groups_claim: groups
|
# kube_oidc_groups_claim: groups
|
||||||
|
# kube_oidc_groups_prefix: oidc:
|
||||||
|
|
||||||
|
|
||||||
# Choose network plugin (cilium, calico, contiv, weave or flannel)
|
# Choose network plugin (cilium, calico, contiv, weave or flannel)
|
||||||
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||||
kube_network_plugin: calico
|
kube_network_plugin: calico
|
||||||
|
|
||||||
# weave's network password for encryption
|
|
||||||
# if null then no network encryption
|
|
||||||
# you can use --extra-vars to pass the password in command line
|
|
||||||
weave_password: EnterPasswordHere
|
|
||||||
|
|
||||||
# Weave uses consensus mode by default
|
|
||||||
# Enabling seed mode allow to dynamically add or remove hosts
|
|
||||||
# https://www.weave.works/docs/net/latest/ipam/
|
|
||||||
weave_mode_seed: false
|
|
||||||
|
|
||||||
# This two variable are automatically changed by the weave's role, do not manually change these values
|
|
||||||
# To reset values :
|
|
||||||
# weave_seed: uninitialized
|
|
||||||
# weave_peers: uninitialized
|
|
||||||
weave_seed: uninitialized
|
|
||||||
weave_peers: uninitialized
|
|
||||||
|
|
||||||
# Set the MTU of Weave (default 1376, Jumbo Frames: 8916)
|
|
||||||
weave_mtu: 1376
|
|
||||||
|
|
||||||
# Enable kubernetes network policies
|
|
||||||
enable_network_policy: false
|
|
||||||
|
|
||||||
# Kubernetes internal network for services, unused block of space.
|
# Kubernetes internal network for services, unused block of space.
|
||||||
kube_service_addresses: 10.233.0.0/18
|
kube_service_addresses: 10.233.0.0/18
|
||||||
|
|
||||||
@@ -112,6 +94,11 @@ kube_apiserver_insecure_port: 8080 # (http)
|
|||||||
# Can be ipvs, iptables
|
# Can be ipvs, iptables
|
||||||
kube_proxy_mode: iptables
|
kube_proxy_mode: iptables
|
||||||
|
|
||||||
|
# Kube-proxy nodeport address.
|
||||||
|
# cidr to bind nodeport services. Flag --nodeport-addresses on kube-proxy manifest
|
||||||
|
kube_proxy_nodeport_addresses: false
|
||||||
|
# kube_proxy_nodeport_addresses_cidr: 10.0.1.0/24
|
||||||
|
|
||||||
## Encrypting Secret Data at Rest (experimental)
|
## Encrypting Secret Data at Rest (experimental)
|
||||||
kube_encrypt_secret_data: false
|
kube_encrypt_secret_data: false
|
||||||
|
|
||||||
@@ -135,18 +122,11 @@ skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipad
|
|||||||
dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
||||||
dns_domain: "{{ cluster_name }}"
|
dns_domain: "{{ cluster_name }}"
|
||||||
|
|
||||||
# Path used to store Docker data
|
## Container runtime
|
||||||
docker_daemon_graph: "/var/lib/docker"
|
## docker for docker and crio for cri-o.
|
||||||
|
container_manager: docker
|
||||||
|
|
||||||
## A string of extra options to pass to the docker daemon.
|
## Settings for containerized control plane (etcd/kubelet/secrets)
|
||||||
## This string should be exactly as you wish it to appear.
|
|
||||||
## An obvious use case is allowing insecure-registry access
|
|
||||||
## to self hosted registries like so:
|
|
||||||
|
|
||||||
docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
|
|
||||||
docker_bin_dir: "/usr/bin"
|
|
||||||
|
|
||||||
# Settings for containerized control plane (etcd/kubelet/secrets)
|
|
||||||
etcd_deployment_type: docker
|
etcd_deployment_type: docker
|
||||||
kubelet_deployment_type: host
|
kubelet_deployment_type: host
|
||||||
vault_deployment_type: docker
|
vault_deployment_type: docker
|
||||||
@@ -155,64 +135,19 @@ helm_deployment_type: host
|
|||||||
# K8s image pull policy (imagePullPolicy)
|
# K8s image pull policy (imagePullPolicy)
|
||||||
k8s_image_pull_policy: IfNotPresent
|
k8s_image_pull_policy: IfNotPresent
|
||||||
|
|
||||||
# Kubernetes dashboard
|
# audit log for kubernetes
|
||||||
# RBAC required. see docs/getting-started.md for access details.
|
kubernetes_audit: false
|
||||||
dashboard_enabled: true
|
|
||||||
|
|
||||||
# Monitoring apps for k8s
|
# dynamic kubelet configuration
|
||||||
efk_enabled: false
|
dynamic_kubelet_configuration: false
|
||||||
|
|
||||||
# Helm deployment
|
# define kubelet config dir for dynamic kubelet
|
||||||
helm_enabled: false
|
#kubelet_config_dir:
|
||||||
|
default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
|
||||||
|
dynamic_kubelet_configuration_dir: "{{ kubelet_config_dir | default(default_kubelet_config_dir) }}"
|
||||||
|
|
||||||
# Istio deployment
|
# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled)
|
||||||
istio_enabled: false
|
podsecuritypolicy_enabled: false
|
||||||
|
|
||||||
# Registry deployment
|
|
||||||
registry_enabled: false
|
|
||||||
# registry_namespace: "{{ system_namespace }}"
|
|
||||||
# registry_storage_class: ""
|
|
||||||
# registry_disk_size: "10Gi"
|
|
||||||
|
|
||||||
# Local volume provisioner deployment
|
|
||||||
local_volume_provisioner_enabled: false
|
|
||||||
# local_volume_provisioner_namespace: "{{ system_namespace }}"
|
|
||||||
# local_volume_provisioner_base_dir: /mnt/disks
|
|
||||||
# local_volume_provisioner_mount_dir: /mnt/disks
|
|
||||||
# local_volume_provisioner_storage_class: local-storage
|
|
||||||
|
|
||||||
# CephFS provisioner deployment
|
|
||||||
cephfs_provisioner_enabled: false
|
|
||||||
# cephfs_provisioner_namespace: "{{ system_namespace }}"
|
|
||||||
# cephfs_provisioner_cluster: ceph
|
|
||||||
# cephfs_provisioner_monitors:
|
|
||||||
# - 172.24.0.1:6789
|
|
||||||
# - 172.24.0.2:6789
|
|
||||||
# - 172.24.0.3:6789
|
|
||||||
# cephfs_provisioner_admin_id: admin
|
|
||||||
# cephfs_provisioner_secret: secret
|
|
||||||
# cephfs_provisioner_storage_class: cephfs
|
|
||||||
|
|
||||||
# Nginx ingress controller deployment
|
|
||||||
ingress_nginx_enabled: false
|
|
||||||
# ingress_nginx_host_network: false
|
|
||||||
# ingress_nginx_namespace: "ingress-nginx"
|
|
||||||
# ingress_nginx_insecure_port: 80
|
|
||||||
# ingress_nginx_secure_port: 443
|
|
||||||
# ingress_nginx_configmap:
|
|
||||||
# map-hash-bucket-size: "128"
|
|
||||||
# ssl-protocols: "SSLv2"
|
|
||||||
# ingress_nginx_configmap_tcp_services:
|
|
||||||
# 9000: "default/example-go:8080"
|
|
||||||
# ingress_nginx_configmap_udp_services:
|
|
||||||
# 53: "kube-system/kube-dns:53"
|
|
||||||
|
|
||||||
# Cert manager deployment
|
|
||||||
cert_manager_enabled: false
|
|
||||||
# cert_manager_namespace: "cert-manager"
|
|
||||||
|
|
||||||
# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
|
|
||||||
persistent_volumes_enabled: false
|
|
||||||
|
|
||||||
# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
|
# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
|
||||||
# kubeconfig_localhost: false
|
# kubeconfig_localhost: false
|
||||||
@@ -239,3 +174,18 @@ persistent_volumes_enabled: false
|
|||||||
## See https://github.com/kubernetes-incubator/kubespray/issues/2141
|
## See https://github.com/kubernetes-incubator/kubespray/issues/2141
|
||||||
## Set this variable to true to get rid of this issue
|
## Set this variable to true to get rid of this issue
|
||||||
volume_cross_zone_attachment: false
|
volume_cross_zone_attachment: false
|
||||||
|
# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
|
||||||
|
persistent_volumes_enabled: false
|
||||||
|
|
||||||
|
## Container Engine Acceleration
|
||||||
|
## Enable container accelertion feature, for example use gpu acceleration in containers
|
||||||
|
# nvidia_accelerator_enabled: true
|
||||||
|
## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset.
|
||||||
|
## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2'
|
||||||
|
## Array with nvida_gpu_nodes, leave empty or comment if you dont't want to install drivers.
|
||||||
|
## Labels and taints won't be set to nodes if they are not in the array.
|
||||||
|
# nvidia_gpu_nodes:
|
||||||
|
# - kube-gpu-001
|
||||||
|
# nvidia_driver_version: "384.111"
|
||||||
|
## flavor can be tesla or gtx
|
||||||
|
# nvidia_gpu_flavor: gtx
|
||||||
20
inventory/sample/group_vars/k8s-cluster/k8s-net-calico.yml
Normal file
20
inventory/sample/group_vars/k8s-cluster/k8s-net-calico.yml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# see roles/network_plugin/calico/defaults/main.yml
|
||||||
|
|
||||||
|
## With calico it is possible to distributed routes with border routers of the datacenter.
|
||||||
|
## Warning : enabling router peering will disable calico's default behavior ('node mesh').
|
||||||
|
## The subnets of each nodes will be distributed by the datacenter router
|
||||||
|
#peer_with_router: false
|
||||||
|
|
||||||
|
# Enables Internet connectivity from containers
|
||||||
|
# nat_outgoing: true
|
||||||
|
|
||||||
|
# add default ippool name
|
||||||
|
# calico_pool_name: "default-pool"
|
||||||
|
|
||||||
|
# Global as_num (/calico/bgp/v1/global/as_num)
|
||||||
|
# global_as_num: "64512"
|
||||||
|
|
||||||
|
# You can set MTU value here. If left undefined or empty, it will
|
||||||
|
# not be specified in calico CNI config, so Calico will use built-in
|
||||||
|
# defaults. The value should be a number, not a string.
|
||||||
|
# calico_mtu: 1500
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user