mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 13:54:37 +03:00
Compare commits
806 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
02cd5418c2 | ||
|
|
c7683f33cb | ||
|
|
49e3665d96 | ||
|
|
e95ba800ea | ||
|
|
5d9bb300d7 | ||
|
|
afcd5997b9 | ||
|
|
f73717ea35 | ||
|
|
1967963702 | ||
|
|
76dd0cd777 | ||
|
|
d87b6fd9f3 | ||
|
|
a6a47dbc96 | ||
|
|
61791bbb3d | ||
|
|
298c6cb790 | ||
|
|
a561ee6207 | ||
|
|
3fa7468d54 | ||
|
|
bc3abad602 | ||
|
|
d75b5d6931 | ||
|
|
02bf742e15 | ||
|
|
d07f75b389 | ||
|
|
2d34781259 | ||
|
|
cdb63a8c49 | ||
|
|
44a0626fc8 | ||
|
|
45eac53ec7 | ||
|
|
e42203a13e | ||
|
|
4ba25326ed | ||
|
|
dca4777347 | ||
|
|
e113d1ccab | ||
|
|
112ccfa9db | ||
|
|
0ed1919a38 | ||
|
|
ff003cfa3c | ||
|
|
6c954df636 | ||
|
|
981e61fb51 | ||
|
|
5db1c3eef7 | ||
|
|
88765f62e6 | ||
|
|
0f35e17e23 | ||
|
|
77b3f9bb97 | ||
|
|
09f93d9e0c | ||
|
|
45f15bf753 | ||
|
|
913cc5a9af | ||
|
|
a46acfcdd8 | ||
|
|
0c0f6b755d | ||
|
|
ecda4e3a8c | ||
|
|
4c12b273ac | ||
|
|
b68854f79d | ||
|
|
f954bc0a5a | ||
|
|
7b8359df4d | ||
|
|
66b61866cd | ||
|
|
3736bfa04a | ||
|
|
dfc46f02d7 | ||
|
|
9086665013 | ||
|
|
0210e53bb7 | ||
|
|
ca40d51bc6 | ||
|
|
ca6a07f595 | ||
|
|
b5bd959a97 | ||
|
|
973e7372b4 | ||
|
|
b54e091886 | ||
|
|
6c220e4e4b | ||
|
|
2511e14289 | ||
|
|
0f5ea5474c | ||
|
|
6567b8e012 | ||
|
|
aee3ec682e | ||
|
|
428a554ddb | ||
|
|
32f4194cf8 | ||
|
|
6f3ff70b17 | ||
|
|
76bb5f8d75 | ||
|
|
4b98537f79 | ||
|
|
cac2196ad5 | ||
|
|
ba24fe3226 | ||
|
|
3004791c64 | ||
|
|
b1a7889ff5 | ||
|
|
92fc2df214 | ||
|
|
4f714b07b8 | ||
|
|
eb4038a6b9 | ||
|
|
4c0e9ba890 | ||
|
|
deac627dc7 | ||
|
|
6ee3c053b7 | ||
|
|
16961f69f2 | ||
|
|
b9b028a735 | ||
|
|
5fe144aa0f | ||
|
|
5b0da4279f | ||
|
|
1ac978b8fa | ||
|
|
c1a2e9a8c6 | ||
|
|
195d6d791a | ||
|
|
aa301c31d1 | ||
|
|
d9418b1dc4 | ||
|
|
2c89a02db3 | ||
|
|
0ca08e03af | ||
|
|
15efdf0c16 | ||
|
|
ab8760cc83 | ||
|
|
b6da596ec1 | ||
|
|
3c12c6beb3 | ||
|
|
26caad4f12 | ||
|
|
8ece922ef0 | ||
|
|
887a468d32 | ||
|
|
859a7f32fb | ||
|
|
1f28764ca1 | ||
|
|
76cb37d6b5 | ||
|
|
7ddd4cd38c | ||
|
|
c1eb975545 | ||
|
|
414b739641 | ||
|
|
572ab650db | ||
|
|
e296ccb4d0 | ||
|
|
72c2a8982b | ||
|
|
13c57147eb | ||
|
|
7e58b96328 | ||
|
|
ac4a71452e | ||
|
|
03bcfa7ff5 | ||
|
|
af5f376163 | ||
|
|
004b0a3fcf | ||
|
|
4bb7d2b566 | ||
|
|
94a0562c93 | ||
|
|
f619eb08b1 | ||
|
|
55195fe546 | ||
|
|
5711074c5a | ||
|
|
4a705b3fba | ||
|
|
31e386886f | ||
|
|
4d85e3765e | ||
|
|
f0a04b4d65 | ||
|
|
760ca1c3a9 | ||
|
|
23b3833806 | ||
|
|
daeeae1a91 | ||
|
|
c8f857eae4 | ||
|
|
270d21f5c1 | ||
|
|
bf29198efd | ||
|
|
db4e225342 | ||
|
|
9ebbf1c3cd | ||
|
|
ef7f5edbb3 | ||
|
|
0b5404b2b7 | ||
|
|
19e1b11d98 | ||
|
|
0df32b03ca | ||
|
|
72a4223884 | ||
|
|
03117d9572 | ||
|
|
c78f5393c3 | ||
|
|
fda49564bf | ||
|
|
ed48b6e4b7 | ||
|
|
848fc323db | ||
|
|
e6f57f27ee | ||
|
|
015ea62e92 | ||
|
|
2ca7087018 | ||
|
|
d665f14682 | ||
|
|
e375678674 | ||
|
|
076b5c153f | ||
|
|
d33a482c91 | ||
|
|
d64839e7d2 | ||
|
|
31705a502d | ||
|
|
5f5d0ffe14 | ||
|
|
4f7479d94d | ||
|
|
9511178666 | ||
|
|
b8d1652baf | ||
|
|
f7dc73b830 | ||
|
|
8eac37fabd | ||
|
|
1d0415a6cf | ||
|
|
3f5c60886b | ||
|
|
a75598b3f4 | ||
|
|
60a057cace | ||
|
|
dd9d0c0530 | ||
|
|
9fa995ac9d | ||
|
|
f07734596e | ||
|
|
caec3de364 | ||
|
|
60bfc56e8e | ||
|
|
206e24448b | ||
|
|
4175431dcd | ||
|
|
bb1eb9fec8 | ||
|
|
b0d7115e9b | ||
|
|
f8ebd08e75 | ||
|
|
6ac7840195 | ||
|
|
30e4b89837 | ||
|
|
405c711edb | ||
|
|
0e6b4e80f7 | ||
|
|
9949782e96 | ||
|
|
bbb6e7b3da | ||
|
|
bc68188209 | ||
|
|
d3780e181e | ||
|
|
2e202051e3 | ||
|
|
448c1d5faa | ||
|
|
ff2b8e5e60 | ||
|
|
8b71ef8ceb | ||
|
|
ee8f678010 | ||
|
|
6425c837d5 | ||
|
|
a6b918c1a1 | ||
|
|
c025ab4eb4 | ||
|
|
ae30009fbc | ||
|
|
158d775306 | ||
|
|
9d540165c0 | ||
|
|
0cb51e7530 | ||
|
|
13e47e73c8 | ||
|
|
6c4e5e0e3d | ||
|
|
d2fd7b7462 | ||
|
|
d9453f323b | ||
|
|
b787b76c6c | ||
|
|
a94a407a43 | ||
|
|
96e46c4209 | ||
|
|
aa30fa8009 | ||
|
|
ebfee51aca | ||
|
|
8b6a6a5a28 | ||
|
|
14ac7d797b | ||
|
|
f253691a68 | ||
|
|
038da7255f | ||
|
|
73cd24bf5a | ||
|
|
4ee9cb2be9 | ||
|
|
f1d2f84043 | ||
|
|
b9a949820a | ||
|
|
50e5f0d28b | ||
|
|
1481f7d64b | ||
|
|
7d33650019 | ||
|
|
728598b230 | ||
|
|
e40368ae2b | ||
|
|
4ff17cb5a5 | ||
|
|
b7e6dd0dd4 | ||
|
|
8ee2091955 | ||
|
|
3fac550090 | ||
|
|
d29a1db134 | ||
|
|
653d97dda4 | ||
|
|
5364160d6a | ||
|
|
1a35948ff6 | ||
|
|
40c0f3756b | ||
|
|
3d6fd49179 | ||
|
|
d843e3d562 | ||
|
|
d8d5474dcc | ||
|
|
788e41a315 | ||
|
|
1bcc641dae | ||
|
|
f8fed0f308 | ||
|
|
d1e6632e6a | ||
|
|
710295bd2f | ||
|
|
3e2d68cd32 | ||
|
|
f3788525ff | ||
|
|
39d247a238 | ||
|
|
b37144b0b2 | ||
|
|
2e0b33f754 | ||
|
|
adc3f79c23 | ||
|
|
7904b454ba | ||
|
|
d264da8f08 | ||
|
|
6abe78ff46 | ||
|
|
9a4aa4288c | ||
|
|
50e3ccfa2b | ||
|
|
69a3c33ceb | ||
|
|
649b1ae868 | ||
|
|
973cc12ca9 | ||
|
|
436de45dd4 | ||
|
|
5f186a2835 | ||
|
|
ecec94ee7e | ||
|
|
196995a1a7 | ||
|
|
3a714fd4ac | ||
|
|
2132ec0269 | ||
|
|
c47fdc9aa0 | ||
|
|
5c4cfb54ae | ||
|
|
cd153a1fb3 | ||
|
|
b0ab92c921 | ||
|
|
5007a69eee | ||
|
|
8a46e050e3 | ||
|
|
256fd12da5 | ||
|
|
8e36ad09b4 | ||
|
|
96a92503cb | ||
|
|
5253153dbb | ||
|
|
12c78e622b | ||
|
|
216bf2e867 | ||
|
|
a086686e9f | ||
|
|
6402004018 | ||
|
|
955f833120 | ||
|
|
f4476f25bd | ||
|
|
8960d5bcfa | ||
|
|
605738757d | ||
|
|
569613f2a4 | ||
|
|
cc182ea2f3 | ||
|
|
3f96b2da7a | ||
|
|
9e44f94176 | ||
|
|
f94a7c6d82 | ||
|
|
dbf40bbbb8 | ||
|
|
954aae931e | ||
|
|
0b1200bb49 | ||
|
|
646d473e8e | ||
|
|
6975cd1622 | ||
|
|
b7f9bf43c2 | ||
|
|
388b627f72 | ||
|
|
f9019ab116 | ||
|
|
07657aecf4 | ||
|
|
e65904eee3 | ||
|
|
89847d5684 | ||
|
|
dada98143c | ||
|
|
713efff78e | ||
|
|
585303ad66 | ||
|
|
a800ed094b | ||
|
|
84e47f4aaa | ||
|
|
46ff9ce765 | ||
|
|
e31eb199c5 | ||
|
|
fd46442188 | ||
|
|
9837b7926f | ||
|
|
5aeaa248d4 | ||
|
|
739f6c78ad | ||
|
|
b75b6b513b | ||
|
|
2a3b48edaf | ||
|
|
7c7b33a0f8 | ||
|
|
40d72d1865 | ||
|
|
cdc2e7d4fe | ||
|
|
2628663590 | ||
|
|
5cc77eb6fd | ||
|
|
a1aa9d79c0 | ||
|
|
8b21034b31 | ||
|
|
67ffd8e923 | ||
|
|
af7edf4dff | ||
|
|
0fd3b9f7af | ||
|
|
7ef9f4dfdd | ||
|
|
6ce507f39f | ||
|
|
34cab91e86 | ||
|
|
63de9bdba3 | ||
|
|
afb6e7dfc3 | ||
|
|
ad89d1c876 | ||
|
|
6b80ac6500 | ||
|
|
2257dc9baa | ||
|
|
a40d9f3c72 | ||
|
|
977e7ae105 | ||
|
|
bc0fc5df98 | ||
|
|
810c10a0e9 | ||
|
|
bb469005b2 | ||
|
|
89ade65ad6 | ||
|
|
128d3ef94c | ||
|
|
b7e06085c7 | ||
|
|
8875e25fe9 | ||
|
|
44f9739750 | ||
|
|
9e85a023c1 | ||
|
|
b6698e686a | ||
|
|
66bd570584 | ||
|
|
e2c5a3895b | ||
|
|
fe719c1bc1 | ||
|
|
89fe6505f9 | ||
|
|
4b5f780ff0 | ||
|
|
31659efe13 | ||
|
|
2bd3776ddb | ||
|
|
c874f16c02 | ||
|
|
ba91304636 | ||
|
|
42a0f46268 | ||
|
|
d84ff06f73 | ||
|
|
87f33a4644 | ||
|
|
2d69b05c77 | ||
|
|
2eb57ee5cd | ||
|
|
85c69c2a4a | ||
|
|
c20f38b89c | ||
|
|
bfe196236f | ||
|
|
d4c61d2628 | ||
|
|
deef47c923 | ||
|
|
c19d8994b9 | ||
|
|
2de6da25a8 | ||
|
|
f13e76d022 | ||
|
|
95e2bde15b | ||
|
|
5c0a41a6e0 | ||
|
|
6424928ba3 | ||
|
|
4c280e59d4 | ||
|
|
56b7400dac | ||
|
|
d095a1bb96 | ||
|
|
76a89039ad | ||
|
|
f90e509bf6 | ||
|
|
dd8902bfcd | ||
|
|
911af3f331 | ||
|
|
e2f083f885 | ||
|
|
e5a450349b | ||
|
|
7a20d69809 | ||
|
|
c187ae22e5 | ||
|
|
cb202a76df | ||
|
|
e1d139db2e | ||
|
|
51e695066a | ||
|
|
ce25fa4302 | ||
|
|
c403b61383 | ||
|
|
3ef7c25a16 | ||
|
|
442d211ee3 | ||
|
|
c0aad0a6d5 | ||
|
|
5903aea86f | ||
|
|
f4a68eae01 | ||
|
|
4a36b091f4 | ||
|
|
874ec8fc73 | ||
|
|
41ca67bf54 | ||
|
|
d72232f15b | ||
|
|
03c61685fb | ||
|
|
46284198f8 | ||
|
|
9916100835 | ||
|
|
bbb1da1a83 | ||
|
|
cf183288dd | ||
|
|
07075add3d | ||
|
|
338238d086 | ||
|
|
c6c74616d8 | ||
|
|
03bb729fea | ||
|
|
60460c025c | ||
|
|
f8a59446e8 | ||
|
|
a37c642127 | ||
|
|
4e61fb9cd3 | ||
|
|
b472c2df98 | ||
|
|
17f9242b58 | ||
|
|
bc67deee78 | ||
|
|
f57abae01e | ||
|
|
275b1d6897 | ||
|
|
e9a676951b | ||
|
|
b31d905704 | ||
|
|
c70c44b07b | ||
|
|
20583e3d15 | ||
|
|
9f4588cd0c | ||
|
|
b25e0f82b1 | ||
|
|
cae1c683aa | ||
|
|
57e7a5a34a | ||
|
|
230f1e1208 | ||
|
|
7bce70339f | ||
|
|
e1aaef7d4d | ||
|
|
1a1d154e14 | ||
|
|
384e5dd4c4 | ||
|
|
abfb147292 | ||
|
|
44eb03f78a | ||
|
|
857784747b | ||
|
|
7a2cb5e41c | ||
|
|
e662ed4adc | ||
|
|
712bdfc82f | ||
|
|
34bd47de79 | ||
|
|
fe57c13b51 | ||
|
|
f9df692056 | ||
|
|
f193b12059 | ||
|
|
2cd254954c | ||
|
|
4dab92ce69 | ||
|
|
d53f45d4e2 | ||
|
|
ca08614641 | ||
|
|
47adf4bce6 | ||
|
|
e69979d5a2 | ||
|
|
2ae68df41b | ||
|
|
7928cd20fb | ||
|
|
ad9049a49e | ||
|
|
dfcd60a9e2 | ||
|
|
0684df804d | ||
|
|
f8d6b84cb6 | ||
|
|
a980731bed | ||
|
|
b4e264251f | ||
|
|
8006a6cd82 | ||
|
|
a69db4169b | ||
|
|
5cd6b0c753 | ||
|
|
36ead3a720 | ||
|
|
bb339265fc | ||
|
|
bb4446e94c | ||
|
|
d2102671cd | ||
|
|
138e0c2301 | ||
|
|
37cfd289d8 | ||
|
|
9f3081580a | ||
|
|
2b6781bc65 | ||
|
|
a3248379db | ||
|
|
0774c8385c | ||
|
|
b2d30d68e7 | ||
|
|
82d10b882c | ||
|
|
24ae85fa56 | ||
|
|
1869aa3985 | ||
|
|
95b8ac5f62 | ||
|
|
0b4168cad4 | ||
|
|
3289472e31 | ||
|
|
4ad53339f6 | ||
|
|
a4d3da6a8e | ||
|
|
7954ea2525 | ||
|
|
bd1f0bcfd7 | ||
|
|
bc2e26d7ef | ||
|
|
fd80013917 | ||
|
|
f7d52564aa | ||
|
|
f7e8d1149a | ||
|
|
bd091caaf9 | ||
|
|
b455a1bf76 | ||
|
|
c0a3bcf9b3 | ||
|
|
5eedb5562f | ||
|
|
dc6c703741 | ||
|
|
16629d0b8e | ||
|
|
7f79210ed1 | ||
|
|
27a1a697e7 | ||
|
|
c1267004ef | ||
|
|
9cdd2214f9 | ||
|
|
fc29764911 | ||
|
|
989e9174c2 | ||
|
|
3993e12335 | ||
|
|
ac4d782937 | ||
|
|
32d18ca992 | ||
|
|
2df4b6c5d2 | ||
|
|
088d36da09 | ||
|
|
6f36faa4f9 | ||
|
|
3846384d56 | ||
|
|
331f141f63 | ||
|
|
62dd3d2a9d | ||
|
|
fa8a128e49 | ||
|
|
b10c308a5a | ||
|
|
e22c70e431 | ||
|
|
f4fe9e3421 | ||
|
|
da173615e4 | ||
|
|
dc6a17e092 | ||
|
|
f4180503c8 | ||
|
|
240d4193ae | ||
|
|
ac66e98ae9 | ||
|
|
d2935ffed0 | ||
|
|
c6e0fcea31 | ||
|
|
5d014d986b | ||
|
|
714994cad8 | ||
|
|
08fe61e058 | ||
|
|
0c8bed21ee | ||
|
|
98eb845f8c | ||
|
|
98300e3165 | ||
|
|
e22759d8f0 | ||
|
|
bf1411060e | ||
|
|
a4d142368b | ||
|
|
eb80f9b606 | ||
|
|
ae47b617e3 | ||
|
|
c116b8022e | ||
|
|
5b98e15613 | ||
|
|
e5b4011aa4 | ||
|
|
3125f93b3f | ||
|
|
f19c8e8c1d | ||
|
|
20779df686 | ||
|
|
752fba1691 | ||
|
|
637604d08f | ||
|
|
ba7b1d74d0 | ||
|
|
1a9989ade9 | ||
|
|
11844c987c | ||
|
|
8c45c88d15 | ||
|
|
c87bb2f239 | ||
|
|
32eeb9a0e0 | ||
|
|
df21fc8643 | ||
|
|
ffbdf31ac4 | ||
|
|
ccd9cc3dce | ||
|
|
81867402f6 | ||
|
|
4f5d61212b | ||
|
|
ef96123482 | ||
|
|
ee27ab0052 | ||
|
|
57f87ba083 | ||
|
|
a9bb72c6fd | ||
|
|
9506c2e597 | ||
|
|
32884357ff | ||
|
|
278ac08087 | ||
|
|
88204642b7 | ||
|
|
1401286910 | ||
|
|
12eb242224 | ||
|
|
8f36a02998 | ||
|
|
88f9e25f76 | ||
|
|
dba1c13954 | ||
|
|
df9faa1743 | ||
|
|
74fd975b57 | ||
|
|
ce85bcaee7 | ||
|
|
6eb6e806e7 | ||
|
|
6ed2a60978 | ||
|
|
fd04c14260 | ||
|
|
10a5273f07 | ||
|
|
bac3bf1a5f | ||
|
|
e3b684df21 | ||
|
|
e45b30d033 | ||
|
|
ad6fecefa8 | ||
|
|
3fdb2ccf55 | ||
|
|
29f5b55d42 | ||
|
|
5aef52e8c0 | ||
|
|
336e0cbf70 | ||
|
|
3cd06b0eb4 | ||
|
|
6bb46e3ecb | ||
|
|
127bc01857 | ||
|
|
a6975c1850 | ||
|
|
b2cb0725ac | ||
|
|
b974b144a8 | ||
|
|
bfb25fa47b | ||
|
|
3bb505d43f | ||
|
|
b135bcb9d9 | ||
|
|
4e97225424 | ||
|
|
0771cd8599 | ||
|
|
91d848f98a | ||
|
|
40edf8c6f5 | ||
|
|
e78562830f | ||
|
|
bef259a6eb | ||
|
|
39ce1bd8be | ||
|
|
6291881943 | ||
|
|
802fd94dad | ||
|
|
66f38a1b31 | ||
|
|
d3850a4da5 | ||
|
|
53a4355e60 | ||
|
|
18a616f57c | ||
|
|
32333eb627 | ||
|
|
19def41fdf | ||
|
|
44b9dce134 | ||
|
|
fa5a538fe5 | ||
|
|
5e3fd2253f | ||
|
|
9643c2c1e3 | ||
|
|
93f3614382 | ||
|
|
cbc8a7d679 | ||
|
|
290bc993a5 | ||
|
|
3694657eb6 | ||
|
|
79417e07ca | ||
|
|
dad95c873b | ||
|
|
626b35e1b0 | ||
|
|
5881ba43f8 | ||
|
|
fed7b97dcb | ||
|
|
c4458c9d9a | ||
|
|
7bae2a4547 | ||
|
|
aeb3e647d4 | ||
|
|
fe036cbe77 | ||
|
|
952ec65a40 | ||
|
|
b8788421d5 | ||
|
|
c2347db934 | ||
|
|
27ead5d4fa | ||
|
|
591ae700ce | ||
|
|
6ade7c0a8d | ||
|
|
b3745f2614 | ||
|
|
ca8a9c600a | ||
|
|
a0225507a0 | ||
|
|
d39a88d63f | ||
|
|
e5d353d0a7 | ||
|
|
de422c822d | ||
|
|
4d3326b542 | ||
|
|
1b82138142 | ||
|
|
208ff8e350 | ||
|
|
ec54b36e05 | ||
|
|
38e8522cbf | ||
|
|
52f8687397 | ||
|
|
43600ffcf8 | ||
|
|
938d2d9e6e | ||
|
|
9368dbe0e7 | ||
|
|
fe3290601a | ||
|
|
e7173e1d62 | ||
|
|
8aafe64397 | ||
|
|
2140303fcc | ||
|
|
b80ded63ca | ||
|
|
7be2521a31 | ||
|
|
15b9d54a32 | ||
|
|
bc1a4e12ad | ||
|
|
67419e8d0a | ||
|
|
849aaf7435 | ||
|
|
a89ee8c406 | ||
|
|
0c6f172e75 | ||
|
|
a67349b076 | ||
|
|
f9b68a5d17 | ||
|
|
c7910b51a1 | ||
|
|
1f99710b21 | ||
|
|
5e558c361b | ||
|
|
5f39efcdfd | ||
|
|
037edf1215 | ||
|
|
37125866ca | ||
|
|
421e73b87c | ||
|
|
0d8de289dd | ||
|
|
00916dec38 | ||
|
|
c115e5677e | ||
|
|
56047c1c83 | ||
|
|
09d85631dc | ||
|
|
f25e4dc3ed | ||
|
|
a3a7c2d24e | ||
|
|
0126168472 | ||
|
|
e9f795c5ce | ||
|
|
0c7e1889e4 | ||
|
|
8b2bec700a | ||
|
|
125267544e | ||
|
|
0d55ed3600 | ||
|
|
ad0cd6939a | ||
|
|
a1244d7bd3 | ||
|
|
33adb334cd | ||
|
|
ef87a8a1f0 | ||
|
|
5223a80ab8 | ||
|
|
a595c84f7e | ||
|
|
adcfcc1178 | ||
|
|
b158dbcf79 | ||
|
|
ab3832f3e7 | ||
|
|
9bf415f749 | ||
|
|
a2bda9e5f1 | ||
|
|
0195725563 | ||
|
|
ec1170bd37 | ||
|
|
66c67dbe73 | ||
|
|
e5d8d8234d | ||
|
|
16ae2c1809 | ||
|
|
5c5e879c2c | ||
|
|
4771716ab2 | ||
|
|
b156585739 | ||
|
|
7a77b5c419 | ||
|
|
9872b594bf | ||
|
|
e6c88db0a0 | ||
|
|
257280a050 | ||
|
|
520103df78 | ||
|
|
3e3787de15 | ||
|
|
0c824d5ef1 | ||
|
|
c0e989b17c | ||
|
|
5218b3af82 | ||
|
|
ef0a91da27 | ||
|
|
8412181746 | ||
|
|
400ee2aa57 | ||
|
|
05b8466f87 | ||
|
|
6061c691e6 | ||
|
|
3ac967a7b6 | ||
|
|
19962f6b6a | ||
|
|
f7703dbca3 | ||
|
|
74a9eedb93 | ||
|
|
6df104b275 | ||
|
|
b27453d8d8 | ||
|
|
4470ee4ccf | ||
|
|
df27fd1e9c | ||
|
|
97c68810e0 | ||
|
|
8a86acf75d | ||
|
|
160e479f8d | ||
|
|
d738acf638 | ||
|
|
84d92aa3c7 | ||
|
|
dd01cabcdc | ||
|
|
e196adb98c | ||
|
|
c383c7e2c1 | ||
|
|
958bb5285d | ||
|
|
f0317ae70b | ||
|
|
591941bd39 | ||
|
|
e90769c869 | ||
|
|
256bbb1a8a | ||
|
|
2c7c956be9 | ||
|
|
fe81bba08d | ||
|
|
564de07963 | ||
|
|
84cf6fbe83 | ||
|
|
d9160f19c0 | ||
|
|
ba0a03a8ba | ||
|
|
b0f04d925a | ||
|
|
7b78e68727 | ||
|
|
ec53b8b66a | ||
|
|
6e949bf951 | ||
|
|
86fb669fd3 | ||
|
|
7123956ecd | ||
|
|
46cf6b77cf | ||
|
|
a52bc44f5a | ||
|
|
acb63a57fa | ||
|
|
5b08277ce4 | ||
|
|
5dc56df64e | ||
|
|
33c4d64b62 | ||
|
|
25de6825df | ||
|
|
0b60201a1e | ||
|
|
cfea99c4ee | ||
|
|
cea41a544e | ||
|
|
8371a060a0 | ||
|
|
7ed140cea7 | ||
|
|
cb97c2184e | ||
|
|
0b4fcc83bd | ||
|
|
514359e556 | ||
|
|
55b9d02a99 | ||
|
|
fc9a65be2b | ||
|
|
49dff97d9c | ||
|
|
4efb0b78fa | ||
|
|
c9fe8fde59 | ||
|
|
74d54946bf | ||
|
|
16462292e1 | ||
|
|
7ef1e1ef9d | ||
|
|
20d80311f0 | ||
|
|
f1a1f53f72 | ||
|
|
3acc42c5b3 | ||
|
|
c766bd077b | ||
|
|
54320c5b09 | ||
|
|
291b71ea3b | ||
|
|
356515222a | ||
|
|
688e589e0c | ||
|
|
6c98201aa4 | ||
|
|
d4b10eb9f5 | ||
|
|
728d56e74d | ||
|
|
a9f4038fcd | ||
|
|
77f1d4b0f1 | ||
|
|
d78577c810 | ||
|
|
5fb6b2eaf7 | ||
|
|
404caa111a | ||
|
|
b838468500 | ||
|
|
f2235be1d3 | ||
|
|
6ec45b10f1 | ||
|
|
d9879d8026 | ||
|
|
d487b2f927 | ||
|
|
66e5e14bac | ||
|
|
7e4668859b | ||
|
|
92d038062e | ||
|
|
2972bceb90 | ||
|
|
cb0a60a0fe | ||
|
|
3ee91e15ff | ||
|
|
ef47a73382 | ||
|
|
dc515e5ac5 | ||
|
|
56763d4288 | ||
|
|
ad9fa73301 | ||
|
|
10dd049912 | ||
|
|
4209f1cbfd | ||
|
|
ee83e874a8 | ||
|
|
27ed73e3e3 | ||
|
|
e41c0532e3 | ||
|
|
eeb7274d65 | ||
|
|
eb0dcf6063 | ||
|
|
83be0735cd | ||
|
|
fe4ba51d1a | ||
|
|
adf575b75e | ||
|
|
e5426f74a8 | ||
|
|
f5212d3b79 | ||
|
|
3d09c4be75 | ||
|
|
f2db15873d | ||
|
|
7c663de6c9 | ||
|
|
c14bbcdbf2 | ||
|
|
1be4c1935a | ||
|
|
764b1aa5f8 | ||
|
|
d13b07ba59 | ||
|
|
028afab908 | ||
|
|
55dfae2a52 | ||
|
|
994324e19c | ||
|
|
b81c0d869c | ||
|
|
f14f04c5ea | ||
|
|
9c86da1403 | ||
|
|
cb611b5ed0 | ||
|
|
891269ef39 | ||
|
|
ab171a1d6d | ||
|
|
a56738324a | ||
|
|
da61b8e7c9 | ||
|
|
d6d58bc938 | ||
|
|
e42cb43ca5 | ||
|
|
ca541c7e4a | ||
|
|
96e14424f0 | ||
|
|
47830896e8 | ||
|
|
5fd4b4afae | ||
|
|
dae9f6d3c2 | ||
|
|
8e1210f96e | ||
|
|
56aa683f28 | ||
|
|
1b9a6d7ad8 | ||
|
|
f591c4db56 | ||
|
|
371fa51e82 | ||
|
|
a927ed2da4 | ||
|
|
a55675acf8 | ||
|
|
25dd3d476a | ||
|
|
7c2b12ebd7 |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,6 +1,7 @@
|
||||
.vagrant
|
||||
*.retry
|
||||
inventory/vagrant_ansible_inventory
|
||||
inventory/credentials/
|
||||
inventory/group_vars/fake_hosts.yml
|
||||
inventory/host_vars/
|
||||
temp
|
||||
@@ -10,6 +11,7 @@ temp
|
||||
*.bak
|
||||
*.tfstate
|
||||
*.tfstate.backup
|
||||
contrib/terraform/aws/credentials.tfvars
|
||||
**/*.sw[pon]
|
||||
/ssh-bastion.conf
|
||||
**/*.sw[pon]
|
||||
@@ -22,7 +24,7 @@ __pycache__/
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
artifacts/
|
||||
inventory/*/artifacts/
|
||||
env/
|
||||
build/
|
||||
credentials/
|
||||
|
||||
627
.gitlab-ci.yml
627
.gitlab-ci.yml
@@ -1,14 +1,31 @@
|
||||
stages:
|
||||
- moderator
|
||||
- unit-tests
|
||||
- deploy-gce-part1
|
||||
- deploy-gce-part2
|
||||
- deploy-gce-special
|
||||
- moderator
|
||||
- deploy-part1
|
||||
- deploy-part2
|
||||
- deploy-special
|
||||
|
||||
variables:
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-incubator__kubespray'
|
||||
# DOCKER_HOST: tcp://localhost:2375
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
MAGIC: "ci check this"
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
||||
GS_ACCESS_KEY_ID: $GS_KEY
|
||||
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
||||
CONTAINER_ENGINE: docker
|
||||
SSH_USER: root
|
||||
GCE_PREEMPTIBLE: "false"
|
||||
ANSIBLE_KEEP_REMOTE_FILES: "1"
|
||||
ANSIBLE_CONFIG: ./tests/ansible.cfg
|
||||
ANSIBLE_INVENTORY: ./inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini
|
||||
IDEMPOT_CHECK: "false"
|
||||
RESET_CHECK: "false"
|
||||
UPGRADE_TEST: "false"
|
||||
KUBEADM_ENABLED: "false"
|
||||
LOG_LEVEL: "-vv"
|
||||
|
||||
# asia-east1-a
|
||||
# asia-northeast1-a
|
||||
@@ -18,15 +35,14 @@ variables:
|
||||
# us-west1-a
|
||||
|
||||
before_script:
|
||||
- pip install -r tests/requirements.txt
|
||||
- /usr/bin/python -m pip install -r tests/requirements.txt
|
||||
- mkdir -p /.ssh
|
||||
- cp tests/ansible.cfg .
|
||||
|
||||
.job: &job
|
||||
tags:
|
||||
- kubernetes
|
||||
- docker
|
||||
image: quay.io/ant31/kargo:master
|
||||
image: quay.io/kubespray/kubespray:latest
|
||||
|
||||
.docker_service: &docker_service
|
||||
services:
|
||||
@@ -39,31 +55,17 @@ before_script:
|
||||
.gce_variables: &gce_variables
|
||||
GCE_USER: travis
|
||||
SSH_USER: $GCE_USER
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||
CONTAINER_ENGINE: docker
|
||||
PRIVATE_KEY: $GCE_PRIVATE_KEY
|
||||
GS_ACCESS_KEY_ID: $GS_KEY
|
||||
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
||||
CLOUD_MACHINE_TYPE: "g1-small"
|
||||
ANSIBLE_KEEP_REMOTE_FILES: "1"
|
||||
ANSIBLE_CONFIG: ./tests/ansible.cfg
|
||||
BOOTSTRAP_OS: none
|
||||
DOWNLOAD_LOCALHOST: "false"
|
||||
DOWNLOAD_RUN_ONCE: "false"
|
||||
IDEMPOT_CHECK: "false"
|
||||
RESET_CHECK: "false"
|
||||
UPGRADE_TEST: "false"
|
||||
KUBEADM_ENABLED: "false"
|
||||
RESOLVCONF_MODE: docker_dns
|
||||
LOG_LEVEL: "-vv"
|
||||
ETCD_DEPLOYMENT: "docker"
|
||||
KUBELET_DEPLOYMENT: "host"
|
||||
VAULT_DEPLOYMENT: "docker"
|
||||
WEAVE_CPU_LIMIT: "100m"
|
||||
AUTHORIZATION_MODES: "{ 'authorization_modes': [] }"
|
||||
MAGIC: "ci check this"
|
||||
CI_PLATFORM: "gce"
|
||||
PRIVATE_KEY: $GCE_PRIVATE_KEY
|
||||
|
||||
.gce: &gce
|
||||
.do_variables: &do_variables
|
||||
PRIVATE_KEY: $DO_PRIVATE_KEY
|
||||
CI_PLATFORM: "do"
|
||||
SSH_USER: root
|
||||
|
||||
|
||||
.testcases: &testcases
|
||||
<<: *job
|
||||
<<: *docker_service
|
||||
cache:
|
||||
@@ -73,65 +75,43 @@ before_script:
|
||||
- $HOME/.cache
|
||||
before_script:
|
||||
- docker info
|
||||
- pip install -r tests/requirements.txt
|
||||
- /usr/bin/python -m pip install -r requirements.txt
|
||||
- /usr/bin/python -m pip install -r tests/requirements.txt
|
||||
- mkdir -p /.ssh
|
||||
- mkdir -p $HOME/.ssh
|
||||
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
||||
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
|
||||
- echo $GCE_CREDENTIALS > $HOME/.ssh/gce.json
|
||||
- chmod 400 $HOME/.ssh/id_rsa
|
||||
- ansible-playbook --version
|
||||
- export PYPATH=$([ $BOOTSTRAP_OS = none ] && echo /usr/bin/python || echo /opt/bin/python)
|
||||
- export PYPATH=$([[ ! "$CI_JOB_NAME" =~ "coreos" ]] && echo /usr/bin/python || echo /opt/bin/python)
|
||||
- echo "CI_JOB_NAME is $CI_JOB_NAME"
|
||||
- echo "PYPATH is $PYPATH"
|
||||
script:
|
||||
- pwd
|
||||
- ls
|
||||
- echo ${PWD}
|
||||
- echo "${STARTUP_SCRIPT}"
|
||||
- >
|
||||
ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local
|
||||
${LOG_LEVEL}
|
||||
-e cloud_image=${CLOUD_IMAGE}
|
||||
-e cloud_region=${CLOUD_REGION}
|
||||
-e gce_credentials_file=${HOME}/.ssh/gce.json
|
||||
-e gce_project_id=${GCE_PROJECT_ID}
|
||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||
-e cloud_machine_type=${CLOUD_MACHINE_TYPE}
|
||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
-e mode=${CLUSTER_MODE}
|
||||
-e test_id=${TEST_ID}
|
||||
-e startup_script="'${STARTUP_SCRIPT}'"
|
||||
- cd tests && make create-${CI_PLATFORM} -s ; cd -
|
||||
|
||||
# Check out latest tag if testing upgrade
|
||||
# Uncomment when gitlab kargo repo has tags
|
||||
# Uncomment when gitlab kubespray repo has tags
|
||||
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
|
||||
- test "${UPGRADE_TEST}" != "false" && git checkout 72ae7638bcc94c66afa8620dfa4ad9a9249327ea
|
||||
- test "${UPGRADE_TEST}" != "false" && git checkout f7d52564aad2ff8e337634951beb4a881c0e8aa6
|
||||
# Checkout the CI vars file so it is available
|
||||
- test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
|
||||
# Workaround https://github.com/kubernetes-incubator/kubespray/issues/2021
|
||||
- 'sh -c "echo ignore_assert_errors: true | tee -a tests/files/${CI_JOB_NAME}.yml"'
|
||||
|
||||
|
||||
# Create cluster
|
||||
- >
|
||||
ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
|
||||
ansible-playbook
|
||||
-i ${ANSIBLE_INVENTORY}
|
||||
-b --become-user=root
|
||||
--private-key=${HOME}/.ssh/id_rsa
|
||||
-u $SSH_USER
|
||||
${SSH_ARGS}
|
||||
${LOG_LEVEL}
|
||||
-e ansible_python_interpreter=${PYPATH}
|
||||
-e @${CI_TEST_VARS}
|
||||
-e ansible_ssh_user=${SSH_USER}
|
||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||
-e cloud_provider=gce
|
||||
-e cert_management=${CERT_MGMT:-script}
|
||||
-e "{deploy_netchecker: true}"
|
||||
-e "{download_localhost: ${DOWNLOAD_LOCALHOST}}"
|
||||
-e "{download_run_once: ${DOWNLOAD_RUN_ONCE}}"
|
||||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
-e kubedns_min_replicas=1
|
||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||
-e local_release_dir=${PWD}/downloads
|
||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||
-e vault_deployment_type=${VAULT_DEPLOYMENT}
|
||||
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
|
||||
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
|
||||
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
|
||||
-e "${AUTHORIZATION_MODES}"
|
||||
--limit "all:!fake_hosts"
|
||||
cluster.yml
|
||||
|
||||
@@ -141,27 +121,16 @@ before_script:
|
||||
test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml";
|
||||
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
|
||||
git checkout "${CI_BUILD_REF}";
|
||||
ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
|
||||
ansible-playbook
|
||||
-i ${ANSIBLE_INVENTORY}
|
||||
-b --become-user=root
|
||||
--private-key=${HOME}/.ssh/id_rsa
|
||||
-u $SSH_USER
|
||||
${SSH_ARGS}
|
||||
${LOG_LEVEL}
|
||||
-e ansible_python_interpreter=${PYPATH}
|
||||
-e @${CI_TEST_VARS}
|
||||
-e ansible_ssh_user=${SSH_USER}
|
||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||
-e cloud_provider=gce
|
||||
-e "{deploy_netchecker: true}"
|
||||
-e "{download_localhost: ${DOWNLOAD_LOCALHOST}}"
|
||||
-e "{download_run_once: ${DOWNLOAD_RUN_ONCE}}"
|
||||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
-e kubedns_min_replicas=1
|
||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||
-e local_release_dir=${PWD}/downloads
|
||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||
-e vault_deployment_type=${VAULT_DEPLOYMENT}
|
||||
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
|
||||
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
|
||||
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
|
||||
-e "${AUTHORIZATION_MODES}"
|
||||
--limit "all:!fake_hosts"
|
||||
$PLAYBOOK;
|
||||
fi
|
||||
@@ -169,37 +138,28 @@ before_script:
|
||||
# Tests Cases
|
||||
## Test Master API
|
||||
- >
|
||||
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
||||
ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
||||
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
|
||||
|
||||
## Ping the between 2 pod
|
||||
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
|
||||
- ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
|
||||
|
||||
## Advanced DNS checks
|
||||
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL
|
||||
- ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL
|
||||
|
||||
## Idempotency checks 1/5 (repeat deployment)
|
||||
- >
|
||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
ansible-playbook
|
||||
-i ${ANSIBLE_INVENTORY}
|
||||
-b --become-user=root
|
||||
--private-key=${HOME}/.ssh/id_rsa
|
||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||
-e cloud_provider=gce
|
||||
-u $SSH_USER
|
||||
${SSH_ARGS}
|
||||
${LOG_LEVEL}
|
||||
-e @${CI_TEST_VARS}
|
||||
-e ansible_python_interpreter=${PYPATH}
|
||||
-e "{deploy_netchecker: true}"
|
||||
-e "{download_localhost: ${DOWNLOAD_LOCALHOST}}"
|
||||
-e "{download_run_once: ${DOWNLOAD_RUN_ONCE}}"
|
||||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||
-e kubedns_min_replicas=1
|
||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||
-e local_release_dir=${PWD}/downloads
|
||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||
-e vault_deployment_type=${VAULT_DEPLOYMENT}
|
||||
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
|
||||
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
|
||||
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
|
||||
-e "${AUTHORIZATION_MODES}"
|
||||
--limit "all:!fake_hosts"
|
||||
cluster.yml;
|
||||
fi
|
||||
@@ -207,20 +167,29 @@ before_script:
|
||||
## Idempotency checks 2/5 (Advanced DNS checks)
|
||||
- >
|
||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
|
||||
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
|
||||
ansible-playbook
|
||||
-i ${ANSIBLE_INVENTORY}
|
||||
-b --become-user=root
|
||||
--private-key=${HOME}/.ssh/id_rsa
|
||||
-u $SSH_USER
|
||||
${SSH_ARGS}
|
||||
${LOG_LEVEL}
|
||||
-e @${CI_TEST_VARS}
|
||||
--limit "all:!fake_hosts"
|
||||
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
||||
fi
|
||||
|
||||
## Idempotency checks 3/5 (reset deployment)
|
||||
- >
|
||||
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
|
||||
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
|
||||
ansible-playbook
|
||||
-i ${ANSIBLE_INVENTORY}
|
||||
-b --become-user=root
|
||||
--private-key=${HOME}/.ssh/id_rsa
|
||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||
-e cloud_provider=gce
|
||||
-u $SSH_USER
|
||||
${SSH_ARGS}
|
||||
${LOG_LEVEL}
|
||||
-e @${CI_TEST_VARS}
|
||||
-e ansible_python_interpreter=${PYPATH}
|
||||
-e reset_confirmation=yes
|
||||
--limit "all:!fake_hosts"
|
||||
@@ -229,248 +198,148 @@ before_script:
|
||||
|
||||
## Idempotency checks 4/5 (redeploy after reset)
|
||||
- >
|
||||
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
|
||||
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
|
||||
ansible-playbook
|
||||
-i ${ANSIBLE_INVENTORY}
|
||||
-b --become-user=root
|
||||
--private-key=${HOME}/.ssh/id_rsa
|
||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||
-e cloud_provider=gce
|
||||
-u $SSH_USER
|
||||
${SSH_ARGS}
|
||||
${LOG_LEVEL}
|
||||
-e @${CI_TEST_VARS}
|
||||
-e ansible_python_interpreter=${PYPATH}
|
||||
-e "{deploy_netchecker: true}"
|
||||
-e "{download_localhost: ${DOWNLOAD_LOCALHOST}}"
|
||||
-e "{download_run_once: ${DOWNLOAD_RUN_ONCE}}"
|
||||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||
-e kubedns_min_replicas=1
|
||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||
-e local_release_dir=${PWD}/downloads
|
||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||
-e vault_deployment_type=${VAULT_DEPLOYMENT}
|
||||
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
|
||||
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
|
||||
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
|
||||
-e "${AUTHORIZATION_MODES}"
|
||||
--limit "all:!fake_hosts"
|
||||
cluster.yml;
|
||||
fi
|
||||
|
||||
## Idempotency checks 5/5 (Advanced DNS checks)
|
||||
- >
|
||||
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
|
||||
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
|
||||
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
|
||||
ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH}
|
||||
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
|
||||
--limit "all:!fake_hosts"
|
||||
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
||||
fi
|
||||
|
||||
after_script:
|
||||
- >
|
||||
ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
|
||||
-e mode=${CLUSTER_MODE}
|
||||
-e test_id=${TEST_ID}
|
||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
-e gce_project_id=${GCE_PROJECT_ID}
|
||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||
-e gce_credentials_file=${HOME}/.ssh/gce.json
|
||||
-e cloud_image=${CLOUD_IMAGE}
|
||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||
-e cloud_region=${CLOUD_REGION}
|
||||
- cd tests && make delete-${CI_PLATFORM} -s ; cd -
|
||||
|
||||
.gce: &gce
|
||||
<<: *testcases
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
|
||||
.do: &do
|
||||
variables:
|
||||
<<: *do_variables
|
||||
<<: *testcases
|
||||
|
||||
# Test matrix. Leave the comments for markup scripts.
|
||||
.coreos_calico_aio_variables: &coreos_calico_aio_variables
|
||||
# stage: deploy-gce-part1
|
||||
AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }"
|
||||
KUBE_NETWORK_PLUGIN: calico
|
||||
CLOUD_IMAGE: coreos-stable-1465-6-0-v20170817
|
||||
CLOUD_REGION: us-west1-b
|
||||
CLOUD_MACHINE_TYPE: "n1-standard-2"
|
||||
CLUSTER_MODE: aio
|
||||
BOOTSTRAP_OS: coreos
|
||||
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
|
||||
##User-data to simply turn off coreos upgrades
|
||||
STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd'
|
||||
# stage: deploy-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_canal_ha_rbac_variables: &ubuntu_canal_ha_rbac_variables
|
||||
# stage: deploy-gce-part1
|
||||
KUBE_NETWORK_PLUGIN: canal
|
||||
AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }"
|
||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||
CLOUD_REGION: europe-west1-b
|
||||
CLUSTER_MODE: ha
|
||||
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
|
||||
# stage: deploy-part1
|
||||
UPGRADE_TEST: "graceful"
|
||||
STARTUP_SCRIPT: ""
|
||||
|
||||
.centos_weave_kubeadm_variables: ¢os_weave_kubeadm_variables
|
||||
# stage: deploy-gce-part1
|
||||
KUBE_NETWORK_PLUGIN: weave
|
||||
AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }"
|
||||
CLOUD_IMAGE: centos-7
|
||||
CLOUD_MACHINE_TYPE: "n1-standard-1"
|
||||
CLOUD_REGION: us-central1-b
|
||||
CLUSTER_MODE: ha
|
||||
KUBEADM_ENABLED: "true"
|
||||
# stage: deploy-part1
|
||||
UPGRADE_TEST: "graceful"
|
||||
STARTUP_SCRIPT: ""
|
||||
|
||||
.ubuntu_canal_kubeadm_variables: &ubuntu_canal_kubeadm_variables
|
||||
# stage: deploy-gce-part1
|
||||
KUBE_NETWORK_PLUGIN: canal
|
||||
AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }"
|
||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||
CLOUD_MACHINE_TYPE: "n1-standard-1"
|
||||
CLOUD_REGION: europe-west1-b
|
||||
CLUSTER_MODE: ha
|
||||
KUBEADM_ENABLED: "true"
|
||||
STARTUP_SCRIPT: ""
|
||||
# stage: deploy-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_contiv_sep_variables: &ubuntu_contiv_sep_variables
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.coreos_cilium_variables: &coreos_cilium_variables
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_cilium_sep_variables: &ubuntu_cilium_sep_variables
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.rhel7_weave_variables: &rhel7_weave_variables
|
||||
# stage: deploy-gce-part1
|
||||
KUBE_NETWORK_PLUGIN: weave
|
||||
CLOUD_IMAGE: rhel-7
|
||||
CLOUD_REGION: europe-west1-b
|
||||
CLUSTER_MODE: default
|
||||
STARTUP_SCRIPT: ""
|
||||
# stage: deploy-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.centos7_flannel_variables: ¢os7_flannel_variables
|
||||
# stage: deploy-gce-part2
|
||||
KUBE_NETWORK_PLUGIN: flannel
|
||||
CLOUD_IMAGE: centos-7
|
||||
CLOUD_REGION: us-west1-a
|
||||
CLOUD_MACHINE_TYPE: "n1-standard-2"
|
||||
CLUSTER_MODE: default
|
||||
STARTUP_SCRIPT: ""
|
||||
.centos7_flannel_addons_variables: ¢os7_flannel_addons_variables
|
||||
# stage: deploy-part2
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.debian8_calico_variables: &debian8_calico_variables
|
||||
# stage: deploy-gce-part2
|
||||
KUBE_NETWORK_PLUGIN: calico
|
||||
CLOUD_IMAGE: debian-8-kubespray
|
||||
CLOUD_REGION: us-central1-b
|
||||
CLUSTER_MODE: default
|
||||
STARTUP_SCRIPT: ""
|
||||
# stage: deploy-part2
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.coreos_canal_variables: &coreos_canal_variables
|
||||
# stage: deploy-gce-part2
|
||||
KUBE_NETWORK_PLUGIN: canal
|
||||
CLOUD_IMAGE: coreos-stable-1465-6-0-v20170817
|
||||
CLOUD_REGION: us-east1-b
|
||||
CLUSTER_MODE: default
|
||||
BOOTSTRAP_OS: coreos
|
||||
IDEMPOT_CHECK: "true"
|
||||
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
|
||||
STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd'
|
||||
# stage: deploy-part2
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.rhel7_canal_sep_variables: &rhel7_canal_sep_variables
|
||||
# stage: deploy-gce-special
|
||||
KUBE_NETWORK_PLUGIN: canal
|
||||
CLOUD_IMAGE: rhel-7
|
||||
CLOUD_REGION: us-east1-b
|
||||
CLUSTER_MODE: separate
|
||||
STARTUP_SCRIPT: ""
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables
|
||||
# stage: deploy-gce-special
|
||||
KUBE_NETWORK_PLUGIN: weave
|
||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||
CLOUD_REGION: us-central1-b
|
||||
CLUSTER_MODE: separate
|
||||
IDEMPOT_CHECK: "false"
|
||||
STARTUP_SCRIPT: ""
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.centos7_calico_ha_variables: ¢os7_calico_ha_variables
|
||||
# stage: deploy-gce-special
|
||||
KUBE_NETWORK_PLUGIN: calico
|
||||
DOWNLOAD_LOCALHOST: "true"
|
||||
DOWNLOAD_RUN_ONCE: "true"
|
||||
CLOUD_IMAGE: centos-7
|
||||
CLOUD_REGION: europe-west1-b
|
||||
CLUSTER_MODE: ha-scale
|
||||
IDEMPOT_CHECK: "true"
|
||||
STARTUP_SCRIPT: ""
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
|
||||
# stage: deploy-gce-special
|
||||
KUBE_NETWORK_PLUGIN: weave
|
||||
CLOUD_IMAGE: coreos-alpha-1506-0-0-v20170817
|
||||
CLOUD_REGION: us-west1-a
|
||||
CLUSTER_MODE: ha-scale
|
||||
BOOTSTRAP_OS: coreos
|
||||
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
|
||||
STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd'
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables
|
||||
# stage: deploy-gce-part1
|
||||
KUBE_NETWORK_PLUGIN: flannel
|
||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||
CLOUD_REGION: us-central1-b
|
||||
CLUSTER_MODE: separate
|
||||
ETCD_DEPLOYMENT: rkt
|
||||
KUBELET_DEPLOYMENT: rkt
|
||||
STARTUP_SCRIPT: ""
|
||||
# stage: deploy-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables
|
||||
# stage: deploy-gce-part1
|
||||
AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }"
|
||||
CLOUD_MACHINE_TYPE: "n1-standard-2"
|
||||
KUBE_NETWORK_PLUGIN: canal
|
||||
CERT_MGMT: vault
|
||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||
CLOUD_REGION: us-central1-b
|
||||
CLUSTER_MODE: separate
|
||||
STARTUP_SCRIPT: ""
|
||||
# stage: deploy-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_flannel_variables: &ubuntu_flannel_variables
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.opensuse_canal_variables: &opensuse_canal_variables
|
||||
# stage: deploy-part2
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_flannel_rbac_variables: &ubuntu_flannel_rbac_variables
|
||||
# stage: deploy-gce-special
|
||||
AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }"
|
||||
KUBE_NETWORK_PLUGIN: flannel
|
||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||
CLOUD_REGION: europe-west1-b
|
||||
CLUSTER_MODE: separate
|
||||
STARTUP_SCRIPT: ""
|
||||
|
||||
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
|
||||
coreos-calico-aio:
|
||||
stage: deploy-gce-part1
|
||||
### PR JOBS PART1
|
||||
gce_coreos-calico-aio:
|
||||
stage: deploy-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *coreos_calico_aio_variables
|
||||
<<: *gce_variables
|
||||
when: on_success
|
||||
except: ['triggers']
|
||||
only: [/^pr-.*$/]
|
||||
|
||||
coreos-calico-sep-triggers:
|
||||
stage: deploy-gce-part1
|
||||
### PR JOBS PART2
|
||||
gce_centos7-flannel-addons:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *coreos_calico_aio_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
centos7-flannel:
|
||||
stage: deploy-gce-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *centos7_flannel_variables
|
||||
<<: *centos7_flannel_addons_variables
|
||||
when: on_success
|
||||
except: ['triggers']
|
||||
only: [/^pr-.*$/]
|
||||
|
||||
centos7-flannel-triggers:
|
||||
stage: deploy-gce-part1
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *centos7_flannel_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
ubuntu-weave-sep:
|
||||
stage: deploy-gce-special
|
||||
gce_ubuntu-weave-sep:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
@@ -480,8 +349,40 @@ ubuntu-weave-sep:
|
||||
except: ['triggers']
|
||||
only: [/^pr-.*$/]
|
||||
|
||||
ubuntu-weave-sep-triggers:
|
||||
stage: deploy-gce-part1
|
||||
### MANUAL JOBS
|
||||
gce_coreos-calico-sep-triggers:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *coreos_calico_aio_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
gce_ubuntu-canal-ha-triggers:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_canal_ha_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
gce_centos7-flannel-addons-triggers:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *centos7_flannel_addons_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
|
||||
gce_ubuntu-weave-sep-triggers:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
@@ -491,29 +392,29 @@ ubuntu-weave-sep-triggers:
|
||||
only: ['triggers']
|
||||
|
||||
# More builds for PRs/merges (manual) and triggers (auto)
|
||||
ubuntu-canal-ha-rbac:
|
||||
stage: deploy-gce-part1
|
||||
do_ubuntu-canal-ha:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
<<: *do
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_canal_ha_rbac_variables
|
||||
<<: *do_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
ubuntu-canal-ha-rbac-triggers:
|
||||
stage: deploy-gce-part1
|
||||
gce_ubuntu-canal-ha:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_canal_ha_rbac_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
<<: *ubuntu_canal_ha_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
ubuntu-canal-kubeadm-rbac:
|
||||
stage: deploy-gce-part1
|
||||
gce_ubuntu-canal-kubeadm:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
@@ -523,8 +424,8 @@ ubuntu-canal-kubeadm-rbac:
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
ubuntu-canal-kubeadm-triggers:
|
||||
stage: deploy-gce-part1
|
||||
gce_ubuntu-canal-kubeadm-triggers:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
@@ -533,8 +434,8 @@ ubuntu-canal-kubeadm-triggers:
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
centos-weave-kubeadm-rbac:
|
||||
stage: deploy-gce-part1
|
||||
gce_centos-weave-kubeadm:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
@@ -544,8 +445,8 @@ centos-weave-kubeadm-rbac:
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
centos-weave-kubeadm-triggers:
|
||||
stage: deploy-gce-part1
|
||||
gce_centos-weave-kubeadm-triggers:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
@@ -554,8 +455,41 @@ centos-weave-kubeadm-triggers:
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
rhel7-weave:
|
||||
stage: deploy-gce-part1
|
||||
gce_ubuntu-contiv-sep:
|
||||
stage: deploy-special
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_contiv_sep_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_coreos-cilium:
|
||||
stage: deploy-special
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *coreos_cilium_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_ubuntu-cilium-sep:
|
||||
stage: deploy-special
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_cilium_sep_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_rhel7-weave:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
@@ -565,8 +499,8 @@ rhel7-weave:
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
rhel7-weave-triggers:
|
||||
stage: deploy-gce-part1
|
||||
gce_rhel7-weave-triggers:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
@@ -575,8 +509,8 @@ rhel7-weave-triggers:
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
debian8-calico-upgrade:
|
||||
stage: deploy-gce-part2
|
||||
gce_debian8-calico-upgrade:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
@@ -586,8 +520,8 @@ debian8-calico-upgrade:
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
debian8-calico-triggers:
|
||||
stage: deploy-gce-part1
|
||||
gce_debian8-calico-triggers:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
@@ -596,8 +530,8 @@ debian8-calico-triggers:
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
coreos-canal:
|
||||
stage: deploy-gce-part2
|
||||
gce_coreos-canal:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
@@ -607,8 +541,8 @@ coreos-canal:
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
coreos-canal-triggers:
|
||||
stage: deploy-gce-part1
|
||||
gce_coreos-canal-triggers:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
@@ -617,8 +551,8 @@ coreos-canal-triggers:
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
rhel7-canal-sep:
|
||||
stage: deploy-gce-special
|
||||
gce_rhel7-canal-sep:
|
||||
stage: deploy-special
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
@@ -628,8 +562,8 @@ rhel7-canal-sep:
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/,]
|
||||
|
||||
rhel7-canal-sep-triggers:
|
||||
stage: deploy-gce-part1
|
||||
gce_rhel7-canal-sep-triggers:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
@@ -638,8 +572,8 @@ rhel7-canal-sep-triggers:
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
centos7-calico-ha:
|
||||
stage: deploy-gce-special
|
||||
gce_centos7-calico-ha:
|
||||
stage: deploy-special
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
@@ -649,8 +583,8 @@ centos7-calico-ha:
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
centos7-calico-ha-triggers:
|
||||
stage: deploy-gce-part1
|
||||
gce_centos7-calico-ha-triggers:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
@@ -659,9 +593,20 @@ centos7-calico-ha-triggers:
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
|
||||
gce_opensuse-canal:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *opensuse_canal_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
# no triggers yet https://github.com/kubernetes-incubator/kargo/issues/613
|
||||
coreos-alpha-weave-ha:
|
||||
stage: deploy-gce-special
|
||||
gce_coreos-alpha-weave-ha:
|
||||
stage: deploy-special
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
@@ -671,8 +616,8 @@ coreos-alpha-weave-ha:
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
ubuntu-rkt-sep:
|
||||
stage: deploy-gce-part1
|
||||
gce_ubuntu-rkt-sep:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
@@ -682,8 +627,8 @@ ubuntu-rkt-sep:
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
ubuntu-vault-sep:
|
||||
stage: deploy-gce-part1
|
||||
gce_ubuntu-vault-sep:
|
||||
stage: deploy-part2
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
@@ -693,13 +638,13 @@ ubuntu-vault-sep:
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
ubuntu-flannel-rbac-sep:
|
||||
stage: deploy-gce-special
|
||||
gce_ubuntu-flannel-sep:
|
||||
stage: deploy-special
|
||||
<<: *job
|
||||
<<: *gce
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
<<: *ubuntu_flannel_rbac_variables
|
||||
<<: *ubuntu_flannel_variables
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
16
Dockerfile
Normal file
16
Dockerfile
Normal file
@@ -0,0 +1,16 @@
|
||||
FROM ubuntu:16.04
|
||||
|
||||
RUN mkdir /kubespray
|
||||
WORKDIR /kubespray
|
||||
RUN apt update -y && \
|
||||
apt install -y \
|
||||
libssl-dev python-dev sshpass apt-transport-https \
|
||||
ca-certificates curl gnupg2 software-properties-common python-pip
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
|
||||
add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable" \
|
||||
&& apt update -y && apt-get install docker-ce -y
|
||||
COPY . .
|
||||
RUN /usr/bin/python -m pip install pip -U && /usr/bin/python -m pip install -r tests/requirements.txt && python -m pip install -r requirements.txt
|
||||
161
README.md
161
README.md
@@ -1,66 +1,93 @@
|
||||

|
||||
|
||||
## Deploy a production ready kubernetes cluster
|
||||
Deploy a Production Ready Kubernetes Cluster
|
||||
============================================
|
||||
|
||||
If you have questions, join us on the [kubernetes slack](https://slack.k8s.io), channel **#kubespray**.
|
||||
If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
||||
|
||||
- Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal**
|
||||
- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere or Baremetal**
|
||||
- **High available** cluster
|
||||
- **Composable** (Choice of the network plugin for instance)
|
||||
- Support most popular **Linux distributions**
|
||||
- **Continuous integration tests**
|
||||
|
||||
Quick Start
|
||||
-----------
|
||||
|
||||
To deploy the cluster you can use :
|
||||
|
||||
[**kubespray-cli**](https://github.com/kubespray/kubespray-cli) <br>
|
||||
**Ansible** usual commands and [**inventory builder**](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py) <br>
|
||||
**vagrant** by simply running `vagrant up` (for tests purposes) <br>
|
||||
### Ansible
|
||||
|
||||
# Copy ``inventory/sample`` as ``inventory/mycluster``
|
||||
cp -rfp inventory/sample inventory/mycluster
|
||||
|
||||
* [Requirements](#requirements)
|
||||
* [Kubespray vs ...](docs/comparisons.md)
|
||||
* [Getting started](docs/getting-started.md)
|
||||
* [Ansible inventory and tags](docs/ansible.md)
|
||||
* [Integration with existing ansible repo](docs/integration.md)
|
||||
* [Deployment data variables](docs/vars.md)
|
||||
* [DNS stack](docs/dns-stack.md)
|
||||
* [HA mode](docs/ha-mode.md)
|
||||
* [Network plugins](#network-plugins)
|
||||
* [Vagrant install](docs/vagrant.md)
|
||||
* [CoreOS bootstrap](docs/coreos.md)
|
||||
* [Downloaded artifacts](docs/downloads.md)
|
||||
* [Cloud providers](docs/cloud.md)
|
||||
* [OpenStack](docs/openstack.md)
|
||||
* [AWS](docs/aws.md)
|
||||
* [Azure](docs/azure.md)
|
||||
* [vSphere](docs/vsphere.md)
|
||||
* [Large deployments](docs/large-deployments.md)
|
||||
* [Upgrades basics](docs/upgrades.md)
|
||||
* [Roadmap](docs/roadmap.md)
|
||||
# Update Ansible inventory file with inventory builder
|
||||
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
||||
CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
||||
|
||||
Supported Linux distributions
|
||||
===============
|
||||
# Review and change parameters under ``inventory/mycluster/group_vars``
|
||||
cat inventory/mycluster/group_vars/all.yml
|
||||
cat inventory/mycluster/group_vars/k8s-cluster.yml
|
||||
|
||||
* **Container Linux by CoreOS**
|
||||
* **Debian** Jessie
|
||||
* **Ubuntu** 16.04
|
||||
* **CentOS/RHEL** 7
|
||||
# Deploy Kubespray with Ansible Playbook
|
||||
ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml
|
||||
|
||||
### Vagrant
|
||||
|
||||
# Simply running `vagrant up` (for tests purposes)
|
||||
vagrant up
|
||||
|
||||
Documents
|
||||
---------
|
||||
|
||||
- [Requirements](#requirements)
|
||||
- [Kubespray vs ...](docs/comparisons.md)
|
||||
- [Getting started](docs/getting-started.md)
|
||||
- [Ansible inventory and tags](docs/ansible.md)
|
||||
- [Integration with existing ansible repo](docs/integration.md)
|
||||
- [Deployment data variables](docs/vars.md)
|
||||
- [DNS stack](docs/dns-stack.md)
|
||||
- [HA mode](docs/ha-mode.md)
|
||||
- [Network plugins](#network-plugins)
|
||||
- [Vagrant install](docs/vagrant.md)
|
||||
- [CoreOS bootstrap](docs/coreos.md)
|
||||
- [Debian Jessie setup](docs/debian.md)
|
||||
- [openSUSE setup](docs/opensuse.md)
|
||||
- [Downloaded artifacts](docs/downloads.md)
|
||||
- [Cloud providers](docs/cloud.md)
|
||||
- [OpenStack](docs/openstack.md)
|
||||
- [AWS](docs/aws.md)
|
||||
- [Azure](docs/azure.md)
|
||||
- [vSphere](docs/vsphere.md)
|
||||
- [Large deployments](docs/large-deployments.md)
|
||||
- [Upgrades basics](docs/upgrades.md)
|
||||
- [Roadmap](docs/roadmap.md)
|
||||
|
||||
Supported Linux Distributions
|
||||
-----------------------------
|
||||
|
||||
- **Container Linux by CoreOS**
|
||||
- **Debian** Jessie, Stretch, Wheezy
|
||||
- **Ubuntu** 16.04
|
||||
- **CentOS/RHEL** 7
|
||||
- **Fedora/CentOS** Atomic
|
||||
- **openSUSE** Leap 42.3/Tumbleweed
|
||||
|
||||
Note: Upstart/SysV init based OS types are not supported.
|
||||
|
||||
Versions of supported components
|
||||
--------------------------------
|
||||
|
||||
|
||||
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.7.3 <br>
|
||||
[etcd](https://github.com/coreos/etcd/releases) v3.2.4 <br>
|
||||
[flanneld](https://github.com/coreos/flannel/releases) v0.8.0 <br>
|
||||
[calico](https://docs.projectcalico.org/v2.5/releases/) v2.5.0 <br>
|
||||
[canal](https://github.com/projectcalico/canal) (given calico/flannel versions) <br>
|
||||
[weave](http://weave.works/) v2.0.1 <br>
|
||||
[docker](https://www.docker.com/) v1.13 (see note)<br>
|
||||
[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)<br>
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.5
|
||||
- [etcd](https://github.com/coreos/etcd/releases) v3.2.4
|
||||
- [flanneld](https://github.com/coreos/flannel/releases) v0.10.0
|
||||
- [calico](https://docs.projectcalico.org/v2.6/releases/) v2.6.8
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.0.0-rc8
|
||||
- [contiv](https://github.com/contiv/install/releases) v1.1.7
|
||||
- [weave](http://weave.works/) v2.2.1
|
||||
- [docker](https://www.docker.com/) v17.03 (see note)
|
||||
- [rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)
|
||||
|
||||
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
|
||||
@@ -70,52 +97,64 @@ plugins' related OS services. Also note, only one of the supported network
|
||||
plugins can be deployed for a given single cluster.
|
||||
|
||||
Requirements
|
||||
--------------
|
||||
------------
|
||||
|
||||
* **Ansible v2.3 (or newer) and python-netaddr is installed on the machine
|
||||
- **Ansible v2.4 (or newer) and python-netaddr is installed on the machine
|
||||
that will run Ansible commands**
|
||||
* **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
|
||||
* The target servers must have **access to the Internet** in order to pull docker images.
|
||||
* The target servers are configured to allow **IPv4 forwarding**.
|
||||
* **Your ssh key must be copied** to all the servers part of your inventory.
|
||||
* The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||
- **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images.
|
||||
- The target servers are configured to allow **IPv4 forwarding**.
|
||||
- **Your ssh key must be copied** to all the servers part of your inventory.
|
||||
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||
in order to avoid any issue during deployment you should disable your firewall.
|
||||
- If kubespray is ran from non-root user account, correct privilege escalation method
|
||||
should be configured in the target servers. Then the `ansible_become` flag
|
||||
or command parameters `--become or -b` should be specified.
|
||||
|
||||
Network Plugins
|
||||
---------------
|
||||
|
||||
## Network plugins
|
||||
You can choose between 6 network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
||||
|
||||
You can choose between 4 network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
||||
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||
|
||||
* [**flannel**](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||
- [calico](docs/calico.md): bgp (layer 3) networking.
|
||||
|
||||
* [**calico**](docs/calico.md): bgp (layer 3) networking.
|
||||
- [canal](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
|
||||
|
||||
* [**canal**](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
|
||||
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
||||
|
||||
* [**weave**](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. <br>
|
||||
- [contiv](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
|
||||
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
|
||||
|
||||
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
||||
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
|
||||
|
||||
The choice is defined with the variable `kube_network_plugin`. There is also an
|
||||
option to leverage built-in cloud provider networking instead.
|
||||
See also [Network checker](docs/netcheck.md).
|
||||
|
||||
## Community docs and resources
|
||||
Community docs and resources
|
||||
----------------------------
|
||||
|
||||
- [kubernetes.io/docs/getting-started-guides/kubespray/](https://kubernetes.io/docs/getting-started-guides/kubespray/)
|
||||
- [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
|
||||
- [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty
|
||||
- [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=N9q51JgbWu8)
|
||||
|
||||
## Tools and projects on top of Kubespray
|
||||
- [Digital Rebar](https://github.com/digitalrebar/digitalrebar)
|
||||
- [Kubespray-cli](https://github.com/kubespray/kubespray-cli)
|
||||
Tools and projects on top of Kubespray
|
||||
--------------------------------------
|
||||
|
||||
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/master/doc/integrations/ansible.rst)
|
||||
- [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer)
|
||||
- [Terraform Contrib](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/terraform)
|
||||
|
||||
## CI Tests
|
||||
CI Tests
|
||||
--------
|
||||
|
||||

|
||||
|
||||
[](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines) </br>
|
||||
[](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines)
|
||||
|
||||
CI/end-to-end tests sponsored by Google (GCE), DigitalOcean, [teuto.net](https://teuto.net/) (openstack).
|
||||
CI/end-to-end tests sponsored by Google (GCE)
|
||||
See the [test matrix](docs/test_cases.md) for details.
|
||||
|
||||
65
Vagrantfile
vendored
65
Vagrantfile
vendored
@@ -3,34 +3,47 @@
|
||||
|
||||
require 'fileutils'
|
||||
|
||||
Vagrant.require_version ">= 1.8.0"
|
||||
Vagrant.require_version ">= 2.0.0"
|
||||
|
||||
CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb")
|
||||
|
||||
COREOS_URL_TEMPLATE = "https://storage.googleapis.com/%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json"
|
||||
|
||||
# Uniq disk UUID for libvirt
|
||||
DISK_UUID = Time.now.utc.to_i
|
||||
|
||||
SUPPORTED_OS = {
|
||||
"coreos-stable" => {box: "coreos-stable", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
|
||||
"coreos-alpha" => {box: "coreos-alpha", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
|
||||
"coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
|
||||
"ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "vagrant"},
|
||||
"centos" => {box: "bento/centos-7.3", bootstrap_os: "centos", user: "vagrant"},
|
||||
"centos" => {box: "centos/7", bootstrap_os: "centos", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/openSUSE-42.3-x86_64", bootstrap_os: "opensuse", use: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", bootstrap_os: "opensuse", use: "vagrant"},
|
||||
}
|
||||
|
||||
# Defaults for config options defined in CONFIG
|
||||
$num_instances = 3
|
||||
$instance_name_prefix = "k8s"
|
||||
$vm_gui = false
|
||||
$vm_memory = 1536
|
||||
$vm_memory = 2048
|
||||
$vm_cpus = 1
|
||||
$shared_folders = {}
|
||||
$forwarded_ports = {}
|
||||
$subnet = "172.17.8"
|
||||
$os = "ubuntu"
|
||||
$network_plugin = "flannel"
|
||||
# The first three nodes are etcd servers
|
||||
$etcd_instances = $num_instances
|
||||
# The first two nodes are masters
|
||||
# The first two nodes are kube masters
|
||||
$kube_master_instances = $num_instances == 1 ? $num_instances : ($num_instances - 1)
|
||||
# All nodes are kube nodes
|
||||
$kube_node_instances = $num_instances
|
||||
# The following only works when using the libvirt provider
|
||||
$kube_node_instances_with_disks = false
|
||||
$kube_node_instances_with_disks_size = "20G"
|
||||
$kube_node_instances_with_disks_number = 2
|
||||
|
||||
$local_release_dir = "/vagrant/temp"
|
||||
|
||||
host_vars = {}
|
||||
@@ -39,12 +52,9 @@ if File.exist?(CONFIG)
|
||||
require CONFIG
|
||||
end
|
||||
|
||||
# All nodes are kube nodes
|
||||
$kube_node_instances = $num_instances
|
||||
|
||||
$box = SUPPORTED_OS[$os][:box]
|
||||
# if $inventory is not set, try to use example
|
||||
$inventory = File.join(File.dirname(__FILE__), "inventory") if ! $inventory
|
||||
$inventory = File.join(File.dirname(__FILE__), "inventory", "sample") if ! $inventory
|
||||
|
||||
# if $inventory has a hosts file use it, otherwise copy over vars etc
|
||||
# to where vagrant expects dynamic inventory to be.
|
||||
@@ -53,7 +63,7 @@ if ! File.exist?(File.join(File.dirname($inventory), "hosts"))
|
||||
"provisioners", "ansible")
|
||||
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
|
||||
if ! File.exist?(File.join($vagrant_ansible,"inventory"))
|
||||
FileUtils.ln_s($inventory, $vagrant_ansible)
|
||||
FileUtils.ln_s($inventory, File.join($vagrant_ansible,"inventory"))
|
||||
end
|
||||
end
|
||||
|
||||
@@ -76,7 +86,6 @@ Vagrant.configure("2") do |config|
|
||||
if Vagrant.has_plugin?("vagrant-vbguest") then
|
||||
config.vbguest.auto_update = false
|
||||
end
|
||||
|
||||
(1..$num_instances).each do |i|
|
||||
config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config|
|
||||
config.vm.hostname = vm_name
|
||||
@@ -102,8 +111,10 @@ Vagrant.configure("2") do |config|
|
||||
end
|
||||
end
|
||||
|
||||
config.vm.synced_folder ".", "/vagrant", type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
|
||||
|
||||
$shared_folders.each do |src, dst|
|
||||
config.vm.synced_folder src, dst
|
||||
config.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
|
||||
end
|
||||
|
||||
config.vm.provider :virtualbox do |vb|
|
||||
@@ -112,20 +123,36 @@ Vagrant.configure("2") do |config|
|
||||
vb.cpus = $vm_cpus
|
||||
end
|
||||
|
||||
config.vm.provider :libvirt do |lv|
|
||||
lv.memory = $vm_memory
|
||||
end
|
||||
|
||||
ip = "#{$subnet}.#{i+100}"
|
||||
host_vars[vm_name] = {
|
||||
"ip": ip,
|
||||
"flannel_interface": ip,
|
||||
"flannel_backend_type": "host-gw",
|
||||
"bootstrap_os": SUPPORTED_OS[$os][:bootstrap_os],
|
||||
"local_release_dir" => $local_release_dir,
|
||||
"download_run_once": "False",
|
||||
# Override the default 'calico' with flannel.
|
||||
# inventory/group_vars/k8s-cluster.yml
|
||||
"kube_network_plugin": "flannel",
|
||||
"bootstrap_os": SUPPORTED_OS[$os][:bootstrap_os]
|
||||
"kube_network_plugin": $network_plugin
|
||||
}
|
||||
|
||||
config.vm.network :private_network, ip: ip
|
||||
|
||||
# Disable swap for each vm
|
||||
config.vm.provision "shell", inline: "swapoff -a"
|
||||
|
||||
if $kube_node_instances_with_disks
|
||||
# Libvirt
|
||||
driverletters = ('a'..'z').to_a
|
||||
config.vm.provider :libvirt do |lv|
|
||||
# always make /dev/sd{a/b/c} so that CI can ensure that
|
||||
# virtualbox and libvirt will have the same devices to use for OSDs
|
||||
(1..$kube_node_instances_with_disks_number).each do |d|
|
||||
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "ide"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Only execute once the Ansible provisioner,
|
||||
# when all the machines are up and ready.
|
||||
if i == $num_instances
|
||||
@@ -134,10 +161,10 @@ Vagrant.configure("2") do |config|
|
||||
if File.exist?(File.join(File.dirname($inventory), "hosts"))
|
||||
ansible.inventory_path = $inventory
|
||||
end
|
||||
ansible.sudo = true
|
||||
ansible.become = true
|
||||
ansible.limit = "all"
|
||||
ansible.host_key_checking = false
|
||||
ansible.raw_arguments = ["--forks=#{$num_instances}"]
|
||||
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache"]
|
||||
ansible.host_vars = host_vars
|
||||
#ansible.tags = ['download']
|
||||
ansible.groups = {
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
[ssh_connection]
|
||||
pipelining=True
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100
|
||||
#ssh_args = -F ./ssh-bastion.conf -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
||||
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
||||
[defaults]
|
||||
host_key_checking=False
|
||||
@@ -11,4 +10,6 @@ fact_caching_connection = /tmp
|
||||
stdout_callback = skippy
|
||||
library = ./library
|
||||
callback_whitelist = profile_tasks
|
||||
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles
|
||||
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
||||
deprecation_warnings=False
|
||||
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds
|
||||
|
||||
31
cluster.yml
31
cluster.yml
@@ -21,23 +21,31 @@
|
||||
vars:
|
||||
ansible_ssh_pipelining: true
|
||||
gather_facts: true
|
||||
pre_tasks:
|
||||
- name: gather facts from all instances
|
||||
setup:
|
||||
delegate_to: "{{item}}"
|
||||
delegate_facts: True
|
||||
with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]) }}"
|
||||
|
||||
- hosts: k8s-cluster:etcd:calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade }
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
- { role: docker, tags: docker }
|
||||
- role: rkt
|
||||
tags: rkt
|
||||
when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"
|
||||
- { role: download, tags: download, skip_downloads: false }
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
- hosts: etcd:k8s-cluster:vault
|
||||
- hosts: etcd:k8s-cluster:vault:calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults, when: "cert_management == 'vault'" }
|
||||
- { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" }
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
- hosts: etcd
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
@@ -45,29 +53,33 @@
|
||||
- { role: kubespray-defaults}
|
||||
- { role: etcd, tags: etcd, etcd_cluster_setup: true }
|
||||
|
||||
- hosts: k8s-cluster
|
||||
- hosts: k8s-cluster:calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: etcd, tags: etcd, etcd_cluster_setup: false }
|
||||
|
||||
- hosts: etcd:k8s-cluster:vault
|
||||
- hosts: etcd:k8s-cluster:vault:calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: vault, tags: vault, when: "cert_management == 'vault'"}
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
- hosts: k8s-cluster
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes/node, tags: node }
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
- hosts: kube-master
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes/master, tags: master }
|
||||
- { role: kubernetes/client, tags: client }
|
||||
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
||||
|
||||
- hosts: k8s-cluster
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
@@ -76,14 +88,20 @@
|
||||
- { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" }
|
||||
- { role: network_plugin, tags: network }
|
||||
|
||||
- hosts: kube-master
|
||||
- hosts: kube-master[0]
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
|
||||
|
||||
- hosts: kube-master
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes-apps/network_plugin, tags: network }
|
||||
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
||||
- { role: kubernetes/client, tags: client }
|
||||
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
||||
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
||||
|
||||
- hosts: calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
@@ -97,6 +115,7 @@
|
||||
- { role: kubespray-defaults}
|
||||
- { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq }
|
||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf }
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
- hosts: kube-master[0]
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
|
||||
@@ -1,58 +1,3 @@
|
||||
## Kubernetes Community Code of Conduct
|
||||
# Kubernetes Community Code of Conduct
|
||||
|
||||
### Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project, and in the interest of fostering
|
||||
an open and welcoming community, we pledge to respect all people who contribute
|
||||
through reporting issues, posting feature requests, updating documentation,
|
||||
submitting pull requests or patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project a harassment-free experience for
|
||||
everyone, regardless of level of experience, gender, gender identity and expression,
|
||||
sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
|
||||
religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing other's private information, such as physical or electronic addresses,
|
||||
without explicit permission
|
||||
* Other unethical or unprofessional conduct.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are not
|
||||
aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers
|
||||
commit themselves to fairly and consistently applying these principles to every aspect
|
||||
of managing this project. Project maintainers who do not follow or enforce the Code of
|
||||
Conduct may be permanently removed from the project team.
|
||||
|
||||
This code of conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting a Kubernetes maintainer, Sarah Novotny <sarahnovotny@google.com>, and/or Dan Kohn <dan@linuxfoundation.org>.
|
||||
|
||||
This Code of Conduct is adapted from the Contributor Covenant
|
||||
(http://contributor-covenant.org), version 1.2.0, available at
|
||||
http://contributor-covenant.org/version/1/2/0/
|
||||
|
||||
### Kubernetes Events Code of Conduct
|
||||
|
||||
Kubernetes events are working conferences intended for professional networking and collaboration in the
|
||||
Kubernetes community. Attendees are expected to behave according to professional standards and in accordance
|
||||
with their employer's policies on appropriate workplace behavior.
|
||||
|
||||
While at Kubernetes events or related social networking opportunities, attendees should not engage in
|
||||
discriminatory or offensive speech or actions regarding gender, sexuality, race, or religion. Speakers should
|
||||
be especially aware of these concerns.
|
||||
|
||||
The Kubernetes team does not condone any statements by speakers contrary to these standards. The Kubernetes
|
||||
team reserves the right to deny entrance and/or eject from an event (without refund) any individual found to
|
||||
be engaging in discriminatory or offensive speech or actions.
|
||||
|
||||
Please bring any concerns to the immediate attention of Kubernetes event staff.
|
||||
|
||||
|
||||
[]()
|
||||
Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
|
||||
|
||||
@@ -59,6 +59,6 @@ It will create the file ./inventory which can then be used with kubespray, e.g.:
|
||||
|
||||
```shell
|
||||
$ cd kubespray-root-dir
|
||||
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/group_vars/all.yml" cluster.yml
|
||||
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all.yml" cluster.yml
|
||||
```
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
|
||||
{% for vm in vm_ip_list %}
|
||||
{% if not use_bastion or vm.virtualMachinename == 'bastion' %}
|
||||
{% if not use_bastion or vm.virtualMachine.name == 'bastion' %}
|
||||
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.publicIpAddresses[0].ipAddress }} ip={{ vm.virtualMachine.network.privateIpAddresses[0] }}
|
||||
{% else %}
|
||||
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.privateIpAddresses[0] }}
|
||||
|
||||
@@ -54,7 +54,7 @@ def get_var_as_bool(name, default):
|
||||
|
||||
# Configurable as shell vars start
|
||||
|
||||
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory.cfg")
|
||||
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.ini")
|
||||
# Reconfigures cluster distribution at scale
|
||||
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
|
||||
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 200))
|
||||
@@ -318,7 +318,7 @@ Delete a host by id: inventory.py -node1
|
||||
|
||||
Configurable env vars:
|
||||
DEBUG Enable debug printing. Default: True
|
||||
CONFIG_FILE File to write config to Default: ./inventory.cfg
|
||||
CONFIG_FILE File to write config to Default: ./inventory/sample/hosts.ini
|
||||
HOST_PREFIX Host prefix for generated hosts. Default: node
|
||||
SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50
|
||||
MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
|
||||
|
||||
@@ -6,16 +6,16 @@ You can either deploy using Ansible on its own by supplying your own inventory f
|
||||
|
||||
In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group.
|
||||
|
||||
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/k8s_gfs_inventory`. Make sure that the settings on `inventory/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
|
||||
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/sample/k8s_gfs_inventory`. Make sure that the settings on `inventory/sample/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
|
||||
|
||||
```
|
||||
ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./cluster.yml
|
||||
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./cluster.yml
|
||||
```
|
||||
|
||||
This will provision your Kubernetes cluster. Then, to provision and configure the GlusterFS cluster, from the same directory execute:
|
||||
|
||||
```
|
||||
ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=<correct-user>` variable in the inventory file that you just created, for each machine/VM:
|
||||
|
||||
@@ -1,8 +1,17 @@
|
||||
---
|
||||
- hosts: gfs-cluster
|
||||
gather_facts: false
|
||||
vars:
|
||||
ansible_ssh_pipelining: false
|
||||
roles:
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
|
||||
- hosts: all
|
||||
gather_facts: true
|
||||
|
||||
- hosts: gfs-cluster
|
||||
vars:
|
||||
ansible_ssh_pipelining: true
|
||||
roles:
|
||||
- { role: glusterfs/server }
|
||||
|
||||
@@ -12,6 +21,5 @@
|
||||
|
||||
- hosts: kube-master[0]
|
||||
roles:
|
||||
- { role: kubernetes-pv/lib }
|
||||
- { role: kubernetes-pv }
|
||||
|
||||
|
||||
1
contrib/network-storage/glusterfs/roles/bootstrap-os
Symbolic link
1
contrib/network-storage/glusterfs/roles/bootstrap-os
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../../roles/bootstrap-os
|
||||
@@ -4,6 +4,7 @@
|
||||
with_items:
|
||||
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
|
||||
register: gluster_pv
|
||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||
|
||||
|
||||
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"kind": "Service",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "glusterfs"
|
||||
},
|
||||
"spec": {
|
||||
"ports": [
|
||||
{"port": 1}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,60 +0,0 @@
|
||||
%global srcname ansible_kubespray
|
||||
|
||||
%{!?upstream_version: %global upstream_version %{version}%{?milestone}}
|
||||
|
||||
Name: ansible-kubespray
|
||||
Version: XXX
|
||||
Release: XXX
|
||||
Summary: Ansible modules for installing Kubernetes
|
||||
|
||||
Group: System Environment/Libraries
|
||||
License: ASL 2.0
|
||||
Vendor: Kubespray <smainklh@gmail.com>
|
||||
Url: https://github.com/kubernetes-incubator/kubespray
|
||||
Source0: https://github.com/kubernetes-incubator/kubespray/archive/%{upstream_version}.tar.gz
|
||||
|
||||
BuildArch: noarch
|
||||
BuildRequires: git
|
||||
BuildRequires: python2-devel
|
||||
BuildRequires: python-setuptools
|
||||
BuildRequires: python-d2to1
|
||||
BuildRequires: python-pbr
|
||||
|
||||
Requires: ansible
|
||||
Requires: python-jinja2
|
||||
Requires: python-netaddr
|
||||
|
||||
%description
|
||||
|
||||
Ansible-kubespray is a set of Ansible modules and playbooks for
|
||||
installing a Kubernetes cluster. If you have questions, join us
|
||||
on the https://slack.k8s.io, channel '#kubespray'.
|
||||
|
||||
%prep
|
||||
%autosetup -n %{name}-%{upstream_version} -S git
|
||||
|
||||
|
||||
%build
|
||||
%{__python2} setup.py build
|
||||
|
||||
|
||||
%install
|
||||
export PBR_VERSION=%{version}
|
||||
export SKIP_PIP_INSTALL=1
|
||||
%{__python2} setup.py install --skip-build --root %{buildroot}
|
||||
|
||||
|
||||
%files
|
||||
%doc README.md
|
||||
%doc inventory/inventory.example
|
||||
%config /etc/kubespray/ansible.cfg
|
||||
%config /etc/kubespray/inventory/group_vars/all.yml
|
||||
%config /etc/kubespray/inventory/group_vars/k8s-cluster.yml
|
||||
%license LICENSE
|
||||
%{python2_sitelib}/%{srcname}-%{version}-py%{python2_version}.egg-info
|
||||
/usr/local/share/kubespray/roles/
|
||||
/usr/local/share/kubespray/playbooks/
|
||||
%defattr(-,root,root)
|
||||
|
||||
|
||||
%changelog
|
||||
62
contrib/packaging/rpm/kubespray.spec
Normal file
62
contrib/packaging/rpm/kubespray.spec
Normal file
@@ -0,0 +1,62 @@
|
||||
%global srcname kubespray
|
||||
|
||||
%{!?upstream_version: %global upstream_version %{version}%{?milestone}}
|
||||
|
||||
Name: kubespray
|
||||
Version: master
|
||||
Release: %(git describe | sed -r 's/v(\S+-?)-(\S+)-(\S+)/\1.dev\2+\3/')
|
||||
Summary: Ansible modules for installing Kubernetes
|
||||
|
||||
Group: System Environment/Libraries
|
||||
License: ASL 2.0
|
||||
Url: https://github.com/kubernetes-incubator/kubespray
|
||||
Source0: https://github.com/kubernetes-incubator/kubespray/archive/%{upstream_version}.tar.gz#/%{name}-%{release}.tar.gz
|
||||
|
||||
BuildArch: noarch
|
||||
BuildRequires: git
|
||||
BuildRequires: python2
|
||||
BuildRequires: python2-devel
|
||||
BuildRequires: python2-setuptools
|
||||
BuildRequires: python-d2to1
|
||||
BuildRequires: python2-pbr
|
||||
|
||||
Requires: ansible >= 2.4.0
|
||||
Requires: python-jinja2 >= 2.10
|
||||
Requires: python-netaddr
|
||||
Requires: python-pbr
|
||||
|
||||
%description
|
||||
|
||||
Ansible-kubespray is a set of Ansible modules and playbooks for
|
||||
installing a Kubernetes cluster. If you have questions, join us
|
||||
on the https://slack.k8s.io, channel '#kubespray'.
|
||||
|
||||
%prep
|
||||
%autosetup -n %{name}-%{upstream_version} -S git
|
||||
|
||||
|
||||
%build
|
||||
export PBR_VERSION=%{release}
|
||||
%{__python2} setup.py build bdist_rpm
|
||||
|
||||
|
||||
%install
|
||||
export PBR_VERSION=%{release}
|
||||
export SKIP_PIP_INSTALL=1
|
||||
%{__python2} setup.py install --skip-build --root %{buildroot} bdist_rpm
|
||||
|
||||
|
||||
%files
|
||||
%doc %{_docdir}/%{name}/README.md
|
||||
%doc %{_docdir}/%{name}/inventory/sample/hosts.ini
|
||||
%config %{_sysconfdir}/%{name}/ansible.cfg
|
||||
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/all.yml
|
||||
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s-cluster.yml
|
||||
%license %{_docdir}/%{name}/LICENSE
|
||||
%{python2_sitelib}/%{srcname}-%{release}-py%{python2_version}.egg-info
|
||||
%{_datarootdir}/%{name}/roles/
|
||||
%{_datarootdir}/%{name}/playbooks/
|
||||
%defattr(-,root,root)
|
||||
|
||||
|
||||
%changelog
|
||||
@@ -24,7 +24,7 @@ export AWS_DEFAULT_REGION="zzz"
|
||||
```
|
||||
- Rename `contrib/terraform/aws/terraform.tfvars.example` to `terraform.tfvars`
|
||||
|
||||
- Update `contrib/terraform/aws/terraform.tfvars` with your data
|
||||
- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use CoreOS as base image. If you want to change this behaviour, see note "Using other distrib than CoreOs" below.
|
||||
- Allocate a new AWS Elastic IP. Use this for your `loadbalancer_apiserver_address` value (below)
|
||||
- Create an AWS EC2 SSH Key
|
||||
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
|
||||
@@ -36,9 +36,10 @@ terraform apply -var-file=credentials.tfvars -var 'loadbalancer_apiserver_addres
|
||||
|
||||
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
|
||||
|
||||
- Ansible will automatically generate an ssh config file for your bastion hosts. To make use of it, make sure you have a line in your `ansible.cfg` file that looks like the following:
|
||||
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated ssh-bastion.conf.
|
||||
Ansible automatically detects bastion and changes ssh_args
|
||||
```commandline
|
||||
ssh_args = -F ./ssh-bastion.conf -o ControlMaster=auto -o ControlPersist=30m
|
||||
ssh -F ./ssh-bastion.conf user@$ip
|
||||
```
|
||||
|
||||
- Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag.
|
||||
@@ -47,6 +48,60 @@ Example (this one assumes you are using CoreOS)
|
||||
```commandline
|
||||
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_ssh_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache
|
||||
```
|
||||
***Using other distrib than CoreOs***
|
||||
If you want to use another distribution than CoreOS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.
|
||||
|
||||
For example, to use:
|
||||
- Debian Jessie, replace 'data "aws_ami" "distro"' in variables.tf with
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["debian-jessie-amd64-hvm-*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
owners = ["379101102735"]
|
||||
}
|
||||
|
||||
- Ubuntu 16.04, replace 'data "aws_ami" "distro"' in variables.tf with
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
owners = ["099720109477"]
|
||||
}
|
||||
|
||||
- Centos 7, replace 'data "aws_ami" "distro"' in variables.tf with
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["dcos-centos7-*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
owners = ["688023202711"]
|
||||
}
|
||||
|
||||
**Troubleshooting**
|
||||
|
||||
|
||||
@@ -8,6 +8,8 @@ provider "aws" {
|
||||
region = "${var.AWS_DEFAULT_REGION}"
|
||||
}
|
||||
|
||||
data "aws_availability_zones" "available" {}
|
||||
|
||||
/*
|
||||
* Calling modules who create the initial AWS VPC / AWS ELB
|
||||
* and AWS IAM Roles for Kubernetes Deployment
|
||||
@@ -18,10 +20,10 @@ module "aws-vpc" {
|
||||
|
||||
aws_cluster_name = "${var.aws_cluster_name}"
|
||||
aws_vpc_cidr_block = "${var.aws_vpc_cidr_block}"
|
||||
aws_avail_zones="${var.aws_avail_zones}"
|
||||
|
||||
aws_avail_zones="${slice(data.aws_availability_zones.available.names,0,2)}"
|
||||
aws_cidr_subnets_private="${var.aws_cidr_subnets_private}"
|
||||
aws_cidr_subnets_public="${var.aws_cidr_subnets_public}"
|
||||
default_tags="${var.default_tags}"
|
||||
|
||||
}
|
||||
|
||||
@@ -31,10 +33,11 @@ module "aws-elb" {
|
||||
|
||||
aws_cluster_name="${var.aws_cluster_name}"
|
||||
aws_vpc_id="${module.aws-vpc.aws_vpc_id}"
|
||||
aws_avail_zones="${var.aws_avail_zones}"
|
||||
aws_avail_zones="${slice(data.aws_availability_zones.available.names,0,2)}"
|
||||
aws_subnet_ids_public="${module.aws-vpc.aws_subnet_ids_public}"
|
||||
aws_elb_api_port = "${var.aws_elb_api_port}"
|
||||
k8s_secure_api_port = "${var.k8s_secure_api_port}"
|
||||
default_tags="${var.default_tags}"
|
||||
|
||||
}
|
||||
|
||||
@@ -48,12 +51,13 @@ module "aws-iam" {
|
||||
* Create Bastion Instances in AWS
|
||||
*
|
||||
*/
|
||||
|
||||
resource "aws_instance" "bastion-server" {
|
||||
ami = "${var.aws_bastion_ami}"
|
||||
ami = "${data.aws_ami.distro.id}"
|
||||
instance_type = "${var.aws_bastion_size}"
|
||||
count = "${length(var.aws_cidr_subnets_public)}"
|
||||
associate_public_ip_address = true
|
||||
availability_zone = "${element(var.aws_avail_zones,count.index)}"
|
||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_public,count.index)}"
|
||||
|
||||
|
||||
@@ -61,11 +65,11 @@ resource "aws_instance" "bastion-server" {
|
||||
|
||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||
|
||||
tags {
|
||||
Name = "kubernetes-${var.aws_cluster_name}-bastion-${count.index}"
|
||||
Cluster = "${var.aws_cluster_name}"
|
||||
Role = "bastion-${var.aws_cluster_name}-${count.index}"
|
||||
}
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-bastion-${count.index}",
|
||||
"Cluster", "${var.aws_cluster_name}",
|
||||
"Role", "bastion-${var.aws_cluster_name}-${count.index}"
|
||||
))}"
|
||||
}
|
||||
|
||||
|
||||
@@ -75,13 +79,13 @@ resource "aws_instance" "bastion-server" {
|
||||
*/
|
||||
|
||||
resource "aws_instance" "k8s-master" {
|
||||
ami = "${var.aws_cluster_ami}"
|
||||
ami = "${data.aws_ami.distro.id}"
|
||||
instance_type = "${var.aws_kube_master_size}"
|
||||
|
||||
count = "${var.aws_kube_master_num}"
|
||||
|
||||
|
||||
availability_zone = "${element(var.aws_avail_zones,count.index)}"
|
||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
||||
|
||||
|
||||
@@ -92,11 +96,11 @@ resource "aws_instance" "k8s-master" {
|
||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||
|
||||
|
||||
tags {
|
||||
Name = "kubernetes-${var.aws_cluster_name}-master${count.index}"
|
||||
Cluster = "${var.aws_cluster_name}"
|
||||
Role = "master"
|
||||
}
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-master${count.index}",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||
"Role", "master"
|
||||
))}"
|
||||
}
|
||||
|
||||
resource "aws_elb_attachment" "attach_master_nodes" {
|
||||
@@ -107,13 +111,13 @@ resource "aws_elb_attachment" "attach_master_nodes" {
|
||||
|
||||
|
||||
resource "aws_instance" "k8s-etcd" {
|
||||
ami = "${var.aws_cluster_ami}"
|
||||
ami = "${data.aws_ami.distro.id}"
|
||||
instance_type = "${var.aws_etcd_size}"
|
||||
|
||||
count = "${var.aws_etcd_num}"
|
||||
|
||||
|
||||
availability_zone = "${element(var.aws_avail_zones,count.index)}"
|
||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
||||
|
||||
|
||||
@@ -121,23 +125,22 @@ resource "aws_instance" "k8s-etcd" {
|
||||
|
||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||
|
||||
|
||||
tags {
|
||||
Name = "kubernetes-${var.aws_cluster_name}-etcd${count.index}"
|
||||
Cluster = "${var.aws_cluster_name}"
|
||||
Role = "etcd"
|
||||
}
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-etcd${count.index}",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||
"Role", "etcd"
|
||||
))}"
|
||||
|
||||
}
|
||||
|
||||
|
||||
resource "aws_instance" "k8s-worker" {
|
||||
ami = "${var.aws_cluster_ami}"
|
||||
ami = "${data.aws_ami.distro.id}"
|
||||
instance_type = "${var.aws_kube_worker_size}"
|
||||
|
||||
count = "${var.aws_kube_worker_num}"
|
||||
|
||||
availability_zone = "${element(var.aws_avail_zones,count.index)}"
|
||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
||||
|
||||
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
|
||||
@@ -146,11 +149,11 @@ resource "aws_instance" "k8s-worker" {
|
||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||
|
||||
|
||||
tags {
|
||||
Name = "kubernetes-${var.aws_cluster_name}-worker${count.index}"
|
||||
Cluster = "${var.aws_cluster_name}"
|
||||
Role = "worker"
|
||||
}
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-worker${count.index}",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||
"Role", "worker"
|
||||
))}"
|
||||
|
||||
}
|
||||
|
||||
@@ -164,16 +167,14 @@ data "template_file" "inventory" {
|
||||
template = "${file("${path.module}/templates/inventory.tpl")}"
|
||||
|
||||
vars {
|
||||
public_ip_address_bastion = "${join("\n",formatlist("bastion ansible_ssh_host=%s" , aws_instance.bastion-server.*.public_ip))}"
|
||||
connection_strings_master = "${join("\n",formatlist("%s ansible_ssh_host=%s",aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}"
|
||||
connection_strings_node = "${join("\n", formatlist("%s ansible_ssh_host=%s", aws_instance.k8s-worker.*.tags.Name, aws_instance.k8s-worker.*.private_ip))}"
|
||||
connection_strings_etcd = "${join("\n",formatlist("%s ansible_ssh_host=%s", aws_instance.k8s-etcd.*.tags.Name, aws_instance.k8s-etcd.*.private_ip))}"
|
||||
public_ip_address_bastion = "${join("\n",formatlist("bastion ansible_host=%s" , aws_instance.bastion-server.*.public_ip))}"
|
||||
connection_strings_master = "${join("\n",formatlist("%s ansible_host=%s",aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}"
|
||||
connection_strings_node = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.tags.Name, aws_instance.k8s-worker.*.private_ip))}"
|
||||
connection_strings_etcd = "${join("\n",formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.tags.Name, aws_instance.k8s-etcd.*.private_ip))}"
|
||||
list_master = "${join("\n",aws_instance.k8s-master.*.tags.Name)}"
|
||||
list_node = "${join("\n",aws_instance.k8s-worker.*.tags.Name)}"
|
||||
list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}"
|
||||
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
|
||||
elb_api_port = "loadbalancer_apiserver.port=${var.aws_elb_api_port}"
|
||||
loadbalancer_apiserver_address = "loadbalancer_apiserver.address=${var.loadbalancer_apiserver_address}"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -2,9 +2,9 @@ resource "aws_security_group" "aws-elb" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||
vpc_id = "${var.aws_vpc_id}"
|
||||
|
||||
tags {
|
||||
Name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||
}
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||
))}"
|
||||
}
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ resource "aws_elb" "aws-elb-api" {
|
||||
healthy_threshold = 2
|
||||
unhealthy_threshold = 2
|
||||
timeout = 3
|
||||
target = "HTTP:8080/"
|
||||
target = "TCP:${var.k8s_secure_api_port}"
|
||||
interval = 30
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ resource "aws_elb" "aws-elb-api" {
|
||||
connection_draining = true
|
||||
connection_draining_timeout = 400
|
||||
|
||||
tags {
|
||||
Name = "kubernetes-${var.aws_cluster_name}-elb-api"
|
||||
}
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-elb-api"
|
||||
))}"
|
||||
}
|
||||
|
||||
@@ -26,3 +26,8 @@ variable "aws_subnet_ids_public" {
|
||||
description = "IDs of Public Subnets"
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "default_tags" {
|
||||
description = "Tags for all resources"
|
||||
type = "map"
|
||||
}
|
||||
|
||||
@@ -129,10 +129,10 @@ EOF
|
||||
|
||||
resource "aws_iam_instance_profile" "kube-master" {
|
||||
name = "kube_${var.aws_cluster_name}_master_profile"
|
||||
roles = ["${aws_iam_role.kube-master.name}"]
|
||||
role = "${aws_iam_role.kube-master.name}"
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "kube-worker" {
|
||||
name = "kube_${var.aws_cluster_name}_node_profile"
|
||||
roles = ["${aws_iam_role.kube-worker.name}"]
|
||||
role = "${aws_iam_role.kube-worker.name}"
|
||||
}
|
||||
|
||||
@@ -6,9 +6,9 @@ resource "aws_vpc" "cluster-vpc" {
|
||||
enable_dns_support = true
|
||||
enable_dns_hostnames = true
|
||||
|
||||
tags {
|
||||
Name = "kubernetes-${var.aws_cluster_name}-vpc"
|
||||
}
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-vpc"
|
||||
))}"
|
||||
}
|
||||
|
||||
|
||||
@@ -18,13 +18,13 @@ resource "aws_eip" "cluster-nat-eip" {
|
||||
}
|
||||
|
||||
|
||||
|
||||
resource "aws_internet_gateway" "cluster-vpc-internetgw" {
|
||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||
|
||||
tags {
|
||||
Name = "kubernetes-${var.aws_cluster_name}-internetgw"
|
||||
}
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-internetgw"
|
||||
))}"
|
||||
}
|
||||
|
||||
resource "aws_subnet" "cluster-vpc-subnets-public" {
|
||||
@@ -33,9 +33,10 @@ resource "aws_subnet" "cluster-vpc-subnets-public" {
|
||||
availability_zone = "${element(var.aws_avail_zones, count.index)}"
|
||||
cidr_block = "${element(var.aws_cidr_subnets_public, count.index)}"
|
||||
|
||||
tags {
|
||||
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public"
|
||||
}
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member"
|
||||
))}"
|
||||
}
|
||||
|
||||
resource "aws_nat_gateway" "cluster-nat-gateway" {
|
||||
@@ -51,9 +52,9 @@ resource "aws_subnet" "cluster-vpc-subnets-private" {
|
||||
availability_zone = "${element(var.aws_avail_zones, count.index)}"
|
||||
cidr_block = "${element(var.aws_cidr_subnets_private, count.index)}"
|
||||
|
||||
tags {
|
||||
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
|
||||
}
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
|
||||
))}"
|
||||
}
|
||||
|
||||
#Routing in VPC
|
||||
@@ -66,9 +67,10 @@ resource "aws_route_table" "kubernetes-public" {
|
||||
cidr_block = "0.0.0.0/0"
|
||||
gateway_id = "${aws_internet_gateway.cluster-vpc-internetgw.id}"
|
||||
}
|
||||
tags {
|
||||
Name = "kubernetes-${var.aws_cluster_name}-routetable-public"
|
||||
}
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-routetable-public"
|
||||
))}"
|
||||
}
|
||||
|
||||
resource "aws_route_table" "kubernetes-private" {
|
||||
@@ -78,9 +80,11 @@ resource "aws_route_table" "kubernetes-private" {
|
||||
cidr_block = "0.0.0.0/0"
|
||||
nat_gateway_id = "${element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index)}"
|
||||
}
|
||||
tags {
|
||||
Name = "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
|
||||
}
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
|
||||
))}"
|
||||
|
||||
}
|
||||
|
||||
resource "aws_route_table_association" "kubernetes-public" {
|
||||
@@ -104,9 +108,9 @@ resource "aws_security_group" "kubernetes" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||
|
||||
tags {
|
||||
Name = "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||
}
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||
))}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow-all-ingress" {
|
||||
|
||||
@@ -14,3 +14,8 @@ output "aws_security_group" {
|
||||
value = ["${aws_security_group.kubernetes.*.id}"]
|
||||
|
||||
}
|
||||
|
||||
output "default_tags" {
|
||||
value = "${var.default_tags}"
|
||||
|
||||
}
|
||||
|
||||
@@ -22,3 +22,8 @@ variable "aws_cidr_subnets_public" {
|
||||
description = "CIDR Blocks for public subnets in Availability zones"
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "default_tags" {
|
||||
description = "Default tags for all resources"
|
||||
type = "map"
|
||||
}
|
||||
|
||||
@@ -22,3 +22,7 @@ output "aws_elb_api_fqdn" {
|
||||
output "inventory" {
|
||||
value = "${data.template_file.inventory.rendered}"
|
||||
}
|
||||
|
||||
output "default_tags" {
|
||||
value = "${var.default_tags}"
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
[all]
|
||||
${connection_strings_master}
|
||||
${connection_strings_node}
|
||||
${connection_strings_etcd}
|
||||
@@ -24,5 +25,3 @@ kube-master
|
||||
|
||||
[k8s-cluster:vars]
|
||||
${elb_api_fqdn}
|
||||
${elb_api_port}
|
||||
${loadbalancer_apiserver_address}
|
||||
|
||||
@@ -5,10 +5,8 @@ aws_cluster_name = "devtest"
|
||||
aws_vpc_cidr_block = "10.250.192.0/18"
|
||||
aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"]
|
||||
aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
|
||||
aws_avail_zones = ["us-west-2a","us-west-2b"]
|
||||
|
||||
#Bastion Host
|
||||
aws_bastion_ami = "ami-db56b9a3"
|
||||
aws_bastion_size = "t2.medium"
|
||||
|
||||
|
||||
@@ -23,10 +21,13 @@ aws_etcd_size = "t2.medium"
|
||||
aws_kube_worker_num = 4
|
||||
aws_kube_worker_size = "t2.medium"
|
||||
|
||||
aws_cluster_ami = "ami-db56b9a3"
|
||||
|
||||
#Settings AWS ELB
|
||||
|
||||
aws_elb_api_port = 6443
|
||||
k8s_secure_api_port = 6443
|
||||
kube_insecure_apiserver_address = "0.0.0.0"
|
||||
|
||||
default_tags = {
|
||||
# Env = "devtest"
|
||||
# Product = "kubernetes"
|
||||
}
|
||||
|
||||
@@ -20,6 +20,21 @@ variable "aws_cluster_name" {
|
||||
description = "Name of AWS Cluster"
|
||||
}
|
||||
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["CoreOS-stable-*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
owners = ["595879546273"] #CoreOS
|
||||
}
|
||||
|
||||
//AWS VPC Variables
|
||||
|
||||
@@ -27,11 +42,6 @@ variable "aws_vpc_cidr_block" {
|
||||
description = "CIDR Block for VPC"
|
||||
}
|
||||
|
||||
variable "aws_avail_zones" {
|
||||
description = "Availability Zones Used"
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "aws_cidr_subnets_private" {
|
||||
description = "CIDR Blocks for private subnets in Availability Zones"
|
||||
type = "list"
|
||||
@@ -44,10 +54,6 @@ variable "aws_cidr_subnets_public" {
|
||||
|
||||
//AWS EC2 Settings
|
||||
|
||||
variable "aws_bastion_ami" {
|
||||
description = "AMI ID for Bastion Host in chosen AWS Region"
|
||||
}
|
||||
|
||||
variable "aws_bastion_size" {
|
||||
description = "EC2 Instance Size of Bastion Host"
|
||||
}
|
||||
@@ -81,9 +87,6 @@ variable "aws_kube_worker_size" {
|
||||
description = "Instance size of Kubernetes Worker Nodes"
|
||||
}
|
||||
|
||||
variable "aws_cluster_ami" {
|
||||
description = "AMI ID for Kubernetes Cluster"
|
||||
}
|
||||
/*
|
||||
* AWS ELB Settings
|
||||
*
|
||||
@@ -96,6 +99,7 @@ variable "k8s_secure_api_port" {
|
||||
description = "Secure Port of K8S API Server"
|
||||
}
|
||||
|
||||
variable "loadbalancer_apiserver_address" {
|
||||
description= "Bind Address for ELB of K8s API Server"
|
||||
variable "default_tags" {
|
||||
description = "Default tags for all resources"
|
||||
type = "map"
|
||||
}
|
||||
|
||||
4
contrib/terraform/openstack/.gitignore
vendored
Normal file
4
contrib/terraform/openstack/.gitignore
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
.terraform
|
||||
*.tfvars
|
||||
*.tfstate
|
||||
*.tfstate.backup
|
||||
@@ -5,55 +5,287 @@ Openstack.
|
||||
|
||||
## Status
|
||||
|
||||
This will install a Kubernetes cluster on an Openstack Cloud. It has been tested on a
|
||||
OpenStack Cloud provided by [BlueBox](https://www.blueboxcloud.com/) and on OpenStack at [EMBL-EBI's](http://www.ebi.ac.uk/) [EMBASSY Cloud](http://www.embassycloud.org/). This should work on most modern installs of OpenStack that support the basic
|
||||
services.
|
||||
This will install a Kubernetes cluster on an Openstack Cloud. It should work on
|
||||
most modern installs of OpenStack that support the basic services.
|
||||
|
||||
There are some assumptions made to try and ensure it will work on your openstack cluster.
|
||||
## Approach
|
||||
The terraform configuration inspects variables found in
|
||||
[variables.tf](variables.tf) to create resources in your OpenStack cluster.
|
||||
There is a [python script](../terraform.py) that reads the generated`.tfstate`
|
||||
file to generate a dynamic inventory that is consumed by the main ansible script
|
||||
to actually install kubernetes and stand up the cluster.
|
||||
|
||||
* floating-ips are used for access, but you can have masters and nodes that don't use floating-ips if needed. You need currently at least 1 floating ip, which needs to be used on a master. If using more than one, at least one should be on a master for bastions to work fine.
|
||||
* you already have a suitable OS image in glance
|
||||
* you already have both an internal network and a floating-ip pool created
|
||||
* you have security-groups enabled
|
||||
### Networking
|
||||
The configuration includes creating a private subnet with a router to the
|
||||
external net. It will allocate floating IPs from a pool and assign them to the
|
||||
hosts where that makes sense. You have the option of creating bastion hosts
|
||||
inside the private subnet to access the nodes there. Alternatively, a node with
|
||||
a floating IP can be used as a jump host to nodes without.
|
||||
|
||||
### Kubernetes Nodes
|
||||
You can create many different kubernetes topologies by setting the number of
|
||||
different classes of hosts. For each class there are options for allocating
|
||||
floating IP addresses or not.
|
||||
- Master nodes with etcd
|
||||
- Master nodes without etcd
|
||||
- Standalone etcd hosts
|
||||
- Kubernetes worker nodes
|
||||
|
||||
Note that the Ansible script will report an invalid configuration if you wind up
|
||||
with an even number of etcd instances since that is not a valid configuration.
|
||||
|
||||
### GlusterFS
|
||||
The Terraform configuration supports provisioning of an optional GlusterFS
|
||||
shared file system based on a separate set of VMs. To enable this, you need to
|
||||
specify:
|
||||
- the number of Gluster hosts (minimum 2)
|
||||
- Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks
|
||||
- Other properties related to provisioning the hosts
|
||||
|
||||
Even if you are using Container Linux by CoreOS for your cluster, you will still
|
||||
need the GlusterFS VMs to be based on either Debian or RedHat based images.
|
||||
Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through
|
||||
binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
||||
|
||||
## Requirements
|
||||
|
||||
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
|
||||
- [Install Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html)
|
||||
- you already have a suitable OS image in Glance
|
||||
- you already have a floating IP pool created
|
||||
- you have security groups enabled
|
||||
- you have a pair of keys generated that can be used to secure the new hosts
|
||||
|
||||
## Module Architecture
|
||||
The configuration is divided into three modules:
|
||||
- Network
|
||||
- IPs
|
||||
- Compute
|
||||
|
||||
The main reason for splitting the configuration up in this way is to easily
|
||||
accommodate situations where floating IPs are limited by a quota or if you have
|
||||
any external references to the floating IP (e.g. DNS) that would otherwise have
|
||||
to be updated.
|
||||
|
||||
You can force your existing IPs by modifying the compute variables in
|
||||
`kubespray.tf` as follows:
|
||||
|
||||
```
|
||||
k8s_master_fips = ["151.101.129.67"]
|
||||
k8s_node_fips = ["151.101.129.68"]
|
||||
```
|
||||
|
||||
## Terraform
|
||||
Terraform will be used to provision all of the OpenStack resources with base software as appropriate.
|
||||
|
||||
Terraform will be used to provision all of the OpenStack resources. It is also used to deploy and provision the software
|
||||
requirements.
|
||||
### Configuration
|
||||
|
||||
### Prep
|
||||
#### Inventory files
|
||||
|
||||
#### OpenStack
|
||||
Create an inventory directory for your cluster by copying the existing sample and linking the `hosts` script (used to build the inventory based on Terraform state):
|
||||
|
||||
Ensure your OpenStack **Identity v2** credentials are loaded in environment variables. This can be done by downloading a credentials .rc file from your OpenStack dashboard and sourcing it:
|
||||
|
||||
```
|
||||
$ source ~/.stackrc
|
||||
```ShellSession
|
||||
$ cp -LRp contrib/terraform/openstack/sample-inventory inventory/$CLUSTER
|
||||
$ cd inventory/$CLUSTER
|
||||
$ ln -s ../../contrib/terraform/openstack/hosts
|
||||
```
|
||||
|
||||
> You must set **OS_REGION_NAME** and **OS_TENANT_ID** environment variables not required by openstack CLI
|
||||
This will be the base for subsequent Terraform commands.
|
||||
|
||||
You will need two networks before installing, an internal network and
|
||||
an external (floating IP Pool) network. The internet network can be shared as
|
||||
we use security groups to provide network segregation. Due to the many
|
||||
differences between OpenStack installs the Terraform does not attempt to create
|
||||
these for you.
|
||||
#### OpenStack access and credentials
|
||||
|
||||
By default Terraform will expect that your networks are called `internal` and
|
||||
`external`. You can change this by altering the Terraform variables `network_name` and `floatingip_pool`. This can be done on a new variables file or through environment variables.
|
||||
No provider variables are hardcoded inside `variables.tf` because Terraform
|
||||
supports various authentication methods for OpenStack: the older script and
|
||||
environment method (using `openrc`) as well as a newer declarative method, and
|
||||
different OpenStack environments may support Identity API version 2 or 3.
|
||||
|
||||
A full list of variables you can change can be found at [variables.tf](variables.tf).
|
||||
These are examples and may vary depending on your OpenStack cloud provider,
|
||||
for an exhaustive list on how to authenticate on OpenStack with Terraform
|
||||
please read the [OpenStack provider documentation](https://www.terraform.io/docs/providers/openstack/).
|
||||
|
||||
All OpenStack resources will use the Terraform variable `cluster_name` (
|
||||
default `example`) in their name to make it easier to track. For example the
|
||||
first compute resource will be named `example-kubernetes-1`.
|
||||
##### Declarative method (recommended)
|
||||
|
||||
#### Terraform
|
||||
The recommended authentication method is to describe credentials in a YAML file `clouds.yaml` that can be stored in:
|
||||
|
||||
* the current directory
|
||||
* `~/.config/openstack`
|
||||
* `/etc/openstack`
|
||||
|
||||
`clouds.yaml`:
|
||||
|
||||
```
|
||||
clouds:
|
||||
mycloud:
|
||||
auth:
|
||||
auth_url: https://openstack:5000/v3
|
||||
username: "username"
|
||||
project_name: "projectname"
|
||||
project_id: projectid
|
||||
user_domain_name: "Default"
|
||||
password: "password"
|
||||
region_name: "RegionOne"
|
||||
interface: "public"
|
||||
identity_api_version: 3
|
||||
```
|
||||
|
||||
If you have multiple clouds defined in your `clouds.yaml` file you can choose
|
||||
the one you want to use with the environment variable `OS_CLOUD`:
|
||||
|
||||
```
|
||||
export OS_CLOUD=mycloud
|
||||
```
|
||||
|
||||
##### Openrc method (deprecated)
|
||||
|
||||
When using classic environment variables, Terraform uses default `OS_*`
|
||||
environment variables. A script suitable for your environment may be available
|
||||
from Horizon under *Project* -> *Compute* -> *Access & Security* -> *API Access*.
|
||||
|
||||
With identity v2:
|
||||
|
||||
```
|
||||
source openrc
|
||||
|
||||
env | grep OS
|
||||
|
||||
OS_AUTH_URL=https://openstack:5000/v2.0
|
||||
OS_PROJECT_ID=projectid
|
||||
OS_PROJECT_NAME=projectname
|
||||
OS_USERNAME=username
|
||||
OS_PASSWORD=password
|
||||
OS_REGION_NAME=RegionOne
|
||||
OS_INTERFACE=public
|
||||
OS_IDENTITY_API_VERSION=2
|
||||
```
|
||||
|
||||
With identity v3:
|
||||
|
||||
```
|
||||
source openrc
|
||||
|
||||
env | grep OS
|
||||
|
||||
OS_AUTH_URL=https://openstack:5000/v3
|
||||
OS_PROJECT_ID=projectid
|
||||
OS_PROJECT_NAME=username
|
||||
OS_PROJECT_DOMAIN_ID=default
|
||||
OS_USERNAME=username
|
||||
OS_PASSWORD=password
|
||||
OS_REGION_NAME=RegionOne
|
||||
OS_INTERFACE=public
|
||||
OS_IDENTITY_API_VERSION=3
|
||||
OS_USER_DOMAIN_NAME=Default
|
||||
```
|
||||
|
||||
Terraform does not support a mix of DomainName and DomainID, choose one or the
|
||||
other:
|
||||
|
||||
```
|
||||
* provider.openstack: You must provide exactly one of DomainID or DomainName to authenticate by Username
|
||||
```
|
||||
|
||||
```
|
||||
unset OS_USER_DOMAIN_NAME
|
||||
export OS_USER_DOMAIN_ID=default
|
||||
|
||||
or
|
||||
|
||||
unset OS_PROJECT_DOMAIN_ID
|
||||
set OS_PROJECT_DOMAIN_NAME=Default
|
||||
```
|
||||
|
||||
#### Cluster variables
|
||||
The construction of the cluster is driven by values found in
|
||||
[variables.tf](variables.tf).
|
||||
|
||||
For your cluster, edit `inventory/$CLUSTER/cluster.tf`.
|
||||
|
||||
|Variable | Description |
|
||||
|---------|-------------|
|
||||
|`cluster_name` | All OpenStack resources will use the Terraform variable`cluster_name` (default`example`) in their name to make it easier to track. For example the first compute resource will be named`example-kubernetes-1`. |
|
||||
|`network_name` | The name to be given to the internal network that will be generated |
|
||||
|`dns_nameservers`| An array of DNS name server names to be used by hosts in the internal subnet. |
|
||||
|`floatingip_pool` | Name of the pool from which floating IPs will be allocated |
|
||||
|`external_net` | UUID of the external network that will be routed to |
|
||||
|`flavor_k8s_master`,`flavor_k8s_node`,`flavor_etcd`, `flavor_bastion`,`flavor_gfs_node` | Flavor depends on your openstack installation, you can get available flavor IDs through`nova flavor-list` |
|
||||
|`image`,`image_gfs` | Name of the image to use in provisioning the compute resources. Should already be loaded into glance. |
|
||||
|`ssh_user`,`ssh_user_gfs` | The username to ssh into the image with. This usually depends on the image you have selected |
|
||||
|`public_key_path` | Path on your local workstation to the public key file you wish to use in creating the key pairs |
|
||||
|`number_of_k8s_masters`, `number_of_k8s_masters_no_floating_ip` | Number of nodes that serve as both master and etcd. These can be provisioned with or without floating IP addresses|
|
||||
|`number_of_k8s_masters_no_etcd`, `number_of_k8s_masters_no_floating_ip_no_etcd` | Number of nodes that serve as just master with no etcd. These can be provisioned with or without floating IP addresses |
|
||||
|`number_of_etcd` | Number of pure etcd nodes |
|
||||
|`number_of_k8s_nodes`, `number_of_k8s_nodes_no_floating_ip` | Kubernetes worker nodes. These can be provisioned with or without floating ip addresses. |
|
||||
|`number_of_bastions` | Number of bastion hosts to create. Scripts assume this is really just zero or one |
|
||||
|`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. |
|
||||
| `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks |
|
||||
|
||||
#### Terraform state files
|
||||
|
||||
In the cluster's inventory folder, the following files might be created (either by Terraform
|
||||
or manually), to prevent you from pushing them accidentally they are in a
|
||||
`.gitignore` file in the `terraform/openstack` directory :
|
||||
|
||||
* `.terraform`
|
||||
* `.tfvars`
|
||||
* `.tfstate`
|
||||
* `.tfstate.backup`
|
||||
|
||||
You can still add them manually if you want to.
|
||||
|
||||
### Initialization
|
||||
|
||||
Before Terraform can operate on your cluster you need to install the required
|
||||
plugins. This is accomplished as follows:
|
||||
|
||||
```ShellSession
|
||||
$ cd inventory/$CLUSTER
|
||||
$ terraform init ../../contrib/terraform/openstack
|
||||
```
|
||||
|
||||
This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules.
|
||||
|
||||
### Provisioning cluster
|
||||
You can apply the Terraform configuration to your cluster with the following command
|
||||
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
|
||||
```ShellSession
|
||||
$ terraform apply -var-file=cluster.tf ../../contrib/terraform/openstack
|
||||
```
|
||||
|
||||
if you chose to create a bastion host, this script will create
|
||||
`contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for Ansible to
|
||||
be able to access your machines tunneling through the bastion's IP address. If
|
||||
you want to manually handle the ssh tunneling to these machines, please delete
|
||||
or move that file. If you want to use this, just leave it there, as ansible will
|
||||
pick it up automatically.
|
||||
|
||||
### Destroying cluster
|
||||
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
|
||||
|
||||
```ShellSession
|
||||
$ terraform destroy -var-file=cluster.tf ../../contrib/terraform/openstack
|
||||
```
|
||||
|
||||
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
|
||||
|
||||
* remove SSH keys from the destroyed cluster from your `~/.ssh/known_hosts` file
|
||||
* clean up any temporary cache files: `rm /tmp/$CLUSTER-*`
|
||||
|
||||
### Debugging
|
||||
You can enable debugging output from Terraform by setting
|
||||
`OS_DEBUG` to 1 and`TF_LOG` to`DEBUG` before running the Terraform command.
|
||||
|
||||
### Terraform output
|
||||
|
||||
Terraform can output values that are useful for configure Neutron/Octavia LBaaS or Cinder persistent volume provisioning as part of your Kubernetes deployment:
|
||||
|
||||
- `private_subnet_id`: the subnet where your instances are running is used for `openstack_lbaas_subnet_id`
|
||||
- `floating_network_id`: the network_id where the floating IP are provisioned is used for `openstack_lbaas_floating_network_id`
|
||||
|
||||
## Ansible
|
||||
|
||||
### Node access
|
||||
|
||||
#### SSH
|
||||
|
||||
Ensure your local ssh-agent is running and your ssh key has been added. This
|
||||
step is required by the terraform provisioner:
|
||||
@@ -63,141 +295,22 @@ $ eval $(ssh-agent -s)
|
||||
$ ssh-add ~/.ssh/id_rsa
|
||||
```
|
||||
|
||||
If you have deployed and destroyed a previous iteration of your cluster, you will need to clear out any stale keys from your SSH "known hosts" file ( `~/.ssh/known_hosts`).
|
||||
|
||||
Ensure that you have your Openstack credentials loaded into Terraform
|
||||
environment variables. Likely via a command similar to:
|
||||
#### Bastion host
|
||||
|
||||
If you are not using a bastion host, but not all of your nodes have floating IPs, create a file `inventory/$CLUSTER/group_vars/no-floating.yml` with the following content. Use one of your nodes with a floating IP (this should have been output at the end of the Terraform step) and the appropriate user for that OS, or if you have another jump host, use that.
|
||||
|
||||
```
|
||||
$ echo Setting up Terraform creds && \
|
||||
export TF_VAR_username=${OS_USERNAME} && \
|
||||
export TF_VAR_password=${OS_PASSWORD} && \
|
||||
export TF_VAR_tenant=${OS_TENANT_NAME} && \
|
||||
export TF_VAR_auth_url=${OS_AUTH_URL}
|
||||
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@MASTER_IP"'
|
||||
```
|
||||
|
||||
##### Alternative: etcd inside masters
|
||||
#### Test access
|
||||
|
||||
If you want to provision master or node VMs that don't use floating ips and where etcd is inside masters, write on a `my-terraform-vars.tfvars` file, for example:
|
||||
Make sure you can connect to the hosts. Note that Container Linux by CoreOS will have a state `FAILED` due to Python not being present. This is okay, because Python will be installed during bootstrapping, so long as the hosts are not `UNREACHABLE`.
|
||||
|
||||
```
|
||||
number_of_k8s_masters = "1"
|
||||
number_of_k8s_masters_no_floating_ip = "2"
|
||||
number_of_k8s_nodes_no_floating_ip = "1"
|
||||
number_of_k8s_nodes = "0"
|
||||
```
|
||||
This will provision one VM as master using a floating ip, two additional masters using no floating ips (these will only have private ips inside your tenancy) and one VM as node, again without a floating ip.
|
||||
|
||||
##### Alternative: etcd on separate machines
|
||||
|
||||
If you want to provision master or node VMs that don't use floating ips and where **etcd is on separate nodes from Kubernetes masters**, write on a `my-terraform-vars.tfvars` file, for example:
|
||||
|
||||
```
|
||||
number_of_etcd = "3"
|
||||
number_of_k8s_masters = "0"
|
||||
number_of_k8s_masters_no_etcd = "1"
|
||||
number_of_k8s_masters_no_floating_ip = "0"
|
||||
number_of_k8s_masters_no_floating_ip_no_etcd = "2"
|
||||
number_of_k8s_nodes_no_floating_ip = "1"
|
||||
number_of_k8s_nodes = "2"
|
||||
|
||||
flavor_k8s_node = "desired-flavor-id"
|
||||
flavor_k8s_master = "desired-flavor-id"
|
||||
flavor_etcd = "desired-flavor-id"
|
||||
```
|
||||
|
||||
This will provision one VM as master using a floating ip, two additional masters using no floating ips (these will only have private ips inside your tenancy), two VMs as nodes with floating ips, one VM as node without floating ip and three VMs for etcd.
|
||||
|
||||
##### Alternative: add GlusterFS
|
||||
|
||||
Additionally, now the terraform based installation supports provisioning of a GlusterFS shared file system based on a separate set of VMs, running either a Debian or RedHat based set of VMs. To enable this, you need to add to your `my-terraform-vars.tfvars` the following variables:
|
||||
|
||||
```
|
||||
# Flavour depends on your openstack installation, you can get available flavours through `nova flavor-list`
|
||||
flavor_gfs_node = "af659280-5b8a-42b5-8865-a703775911da"
|
||||
# This is the name of an image already available in your openstack installation.
|
||||
image_gfs = "Ubuntu 15.10"
|
||||
number_of_gfs_nodes_no_floating_ip = "3"
|
||||
# This is the size of the non-ephemeral volumes to be attached to store the GlusterFS bricks.
|
||||
gfs_volume_size_in_gb = "50"
|
||||
# The user needed for the image choosen for GlusterFS.
|
||||
ssh_user_gfs = "ubuntu"
|
||||
```
|
||||
|
||||
If these variables are provided, this will give rise to a new ansible group called `gfs-cluster`, for which we have added ansible roles to execute in the ansible provisioning step. If you are using Container Linux by CoreOS, these GlusterFS VM necessarily need to be either Debian or RedHat based VMs, Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
||||
|
||||
# Configure Cluster variables
|
||||
|
||||
Edit `inventory/group_vars/all.yml`:
|
||||
- Set variable **bootstrap_os** according selected image
|
||||
```
|
||||
# Valid bootstrap options (required): ubuntu, coreos, centos, none
|
||||
bootstrap_os: coreos
|
||||
```
|
||||
- **bin_dir**
|
||||
```
|
||||
# Directory where the binaries will be installed
|
||||
# Default:
|
||||
# bin_dir: /usr/local/bin
|
||||
# For Container Linux by CoreOS:
|
||||
bin_dir: /opt/bin
|
||||
```
|
||||
- and **cloud_provider**
|
||||
```
|
||||
cloud_provider: openstack
|
||||
```
|
||||
Edit `inventory/group_vars/k8s-cluster.yml`:
|
||||
- Set variable **kube_network_plugin** according selected networking
|
||||
```
|
||||
# Choose network plugin (calico, weave or flannel)
|
||||
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||
kube_network_plugin: flannel
|
||||
```
|
||||
> flannel works out-of-the-box
|
||||
|
||||
> calico requires allowing service's and pod's subnets on according OpenStack Neutron ports
|
||||
- Set variable **resolvconf_mode**
|
||||
```
|
||||
# Can be docker_dns, host_resolvconf or none
|
||||
# Default:
|
||||
# resolvconf_mode: docker_dns
|
||||
# For Container Linux by CoreOS:
|
||||
resolvconf_mode: host_resolvconf
|
||||
```
|
||||
|
||||
For calico configure OpenStack Neutron ports: [OpenStack](/docs/openstack.md)
|
||||
|
||||
# Provision a Kubernetes Cluster on OpenStack
|
||||
|
||||
If not using a tfvars file for your setup, then execute:
|
||||
```
|
||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate contrib/terraform/openstack
|
||||
openstack_compute_secgroup_v2.k8s_master: Creating...
|
||||
description: "" => "example - Kubernetes Master"
|
||||
name: "" => "example-k8s-master"
|
||||
rule.#: "" => "<computed>"
|
||||
...
|
||||
...
|
||||
Apply complete! Resources: 9 added, 0 changed, 0 destroyed.
|
||||
|
||||
The state of your infrastructure has been saved to the path
|
||||
below. This state is required to modify and destroy your
|
||||
infrastructure, so keep it safe. To inspect the complete state
|
||||
use the `terraform show` command.
|
||||
|
||||
State path: contrib/terraform/openstack/terraform.tfstate
|
||||
```
|
||||
|
||||
Alternatively, if you wrote your terraform variables on a file `my-terraform-vars.tfvars`, your command would look like:
|
||||
```
|
||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-terraform-vars.tfvars contrib/terraform/openstack
|
||||
```
|
||||
|
||||
if you choose to add masters or nodes without floating ips (only internal ips on your OpenStack tenancy), this script will create as well a file `contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for ansible to be able to access your machines tunneling through the first floating ip used. If you want to manually handling the ssh tunneling to these machines, please delete or move that file. If you want to use this, just leave it there, as ansible will pick it up automatically.
|
||||
|
||||
Make sure you can connect to the hosts:
|
||||
|
||||
```
|
||||
$ ansible -i contrib/terraform/openstack/hosts -m ping all
|
||||
$ ansible -i inventory/$CLUSTER/hosts -m ping all
|
||||
example-k8s_node-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
@@ -212,20 +325,59 @@ example-k8s-master-1 | SUCCESS => {
|
||||
}
|
||||
```
|
||||
|
||||
if you are deploying a system that needs bootstrapping, like Container Linux by CoreOS, these might have a state `FAILED` due to Container Linux by CoreOS not having python. As long as the state is not `UNREACHABLE`, this is fine.
|
||||
If it fails try to connect manually via SSH. It could be something as simple as a stale host key.
|
||||
|
||||
if it fails try to connect manually via SSH ... it could be somthing as simple as a stale host key.
|
||||
|
||||
Deploy kubernetes:
|
||||
### Configure cluster variables
|
||||
|
||||
Edit `inventory/$CLUSTER/group_vars/all.yml`:
|
||||
- Set variable **bootstrap_os** appropriately for your desired image:
|
||||
```
|
||||
$ ansible-playbook --become -i contrib/terraform/openstack/hosts cluster.yml
|
||||
# Valid bootstrap options (required): ubuntu, coreos, centos, none
|
||||
bootstrap_os: coreos
|
||||
```
|
||||
- **bin_dir**:
|
||||
```
|
||||
# Directory where the binaries will be installed
|
||||
# Default:
|
||||
# bin_dir: /usr/local/bin
|
||||
# For Container Linux by CoreOS:
|
||||
bin_dir: /opt/bin
|
||||
```
|
||||
- and **cloud_provider**:
|
||||
```
|
||||
cloud_provider: openstack
|
||||
```
|
||||
Edit `inventory/$CLUSTER/group_vars/k8s-cluster.yml`:
|
||||
- Set variable **kube_network_plugin** to your desired networking plugin.
|
||||
- **flannel** works out-of-the-box
|
||||
- **calico** requires [configuring OpenStack Neutron ports](/docs/openstack.md) to allow service and pod subnets
|
||||
```
|
||||
# Choose network plugin (calico, weave or flannel)
|
||||
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||
kube_network_plugin: flannel
|
||||
```
|
||||
- Set variable **resolvconf_mode**
|
||||
```
|
||||
# Can be docker_dns, host_resolvconf or none
|
||||
# Default:
|
||||
# resolvconf_mode: docker_dns
|
||||
# For Container Linux by CoreOS:
|
||||
resolvconf_mode: host_resolvconf
|
||||
```
|
||||
|
||||
# Set up local kubectl
|
||||
1. Install kubectl on your workstation:
|
||||
[Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
|
||||
2. Add route to internal IP of master node (if needed):
|
||||
### Deploy Kubernetes
|
||||
|
||||
```
|
||||
$ ansible-playbook --become -i inventory/$CLUSTER/hosts cluster.yml
|
||||
```
|
||||
|
||||
This will take some time as there are many tasks to run.
|
||||
|
||||
## Kubernetes
|
||||
|
||||
### Set up kubectl
|
||||
1. [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your workstation
|
||||
2. Add a route to the internal IP of a master node (if needed):
|
||||
```
|
||||
sudo route add [master-internal-ip] gw [router-ip]
|
||||
```
|
||||
@@ -233,48 +385,53 @@ or
|
||||
```
|
||||
sudo route add -net [internal-subnet]/24 gw [router-ip]
|
||||
```
|
||||
3. List Kubernetes certs&keys:
|
||||
3. List Kubernetes certificates & keys:
|
||||
```
|
||||
ssh [os-user]@[master-ip] sudo ls /etc/kubernetes/ssl/
|
||||
```
|
||||
4. Get admin's certs&key:
|
||||
4. Get `admin`'s certificates and keys:
|
||||
```
|
||||
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1-key.pem > admin-key.pem
|
||||
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1.pem > admin.pem
|
||||
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/ca.pem > ca.pem
|
||||
```
|
||||
5. Edit OpenStack Neutron master's Security Group to allow TCP connections to port 6443
|
||||
6. Configure kubectl:
|
||||
```
|
||||
kubectl config set-cluster default-cluster --server=https://[master-internal-ip]:6443 \
|
||||
5. Configure kubectl:
|
||||
```ShellSession
|
||||
$ kubectl config set-cluster default-cluster --server=https://[master-internal-ip]:6443 \
|
||||
--certificate-authority=ca.pem
|
||||
|
||||
kubectl config set-credentials default-admin \
|
||||
$ kubectl config set-credentials default-admin \
|
||||
--certificate-authority=ca.pem \
|
||||
--client-key=admin-key.pem \
|
||||
--client-certificate=admin.pem
|
||||
|
||||
kubectl config set-context default-system --cluster=default-cluster --user=default-admin
|
||||
kubectl config use-context default-system
|
||||
$ kubectl config set-context default-system --cluster=default-cluster --user=default-admin
|
||||
$ kubectl config use-context default-system
|
||||
```
|
||||
7. Check it:
|
||||
```
|
||||
kubectl version
|
||||
```
|
||||
|
||||
# What's next
|
||||
[Start Hello Kubernetes Service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/)
|
||||
|
||||
# clean up:
|
||||
|
||||
If you are using floating ip addresses then you may get this error:
|
||||
```
|
||||
$ terraform destroy
|
||||
Do you really want to destroy?
|
||||
Terraform will delete all your managed infrastructure.
|
||||
There is no undo. Only 'yes' will be accepted to confirm.
|
||||
|
||||
Enter a value: yes
|
||||
...
|
||||
...
|
||||
Apply complete! Resources: 0 added, 0 changed, 12 destroyed.
|
||||
Unable to connect to the server: x509: certificate is valid for 10.0.0.6, 10.0.0.6, 10.233.0.1, 127.0.0.1, not 132.249.238.25
|
||||
```
|
||||
|
||||
You can tell kubectl to ignore this condition by adding the
|
||||
`--insecure-skip-tls-verify` option.
|
||||
|
||||
## GlusterFS
|
||||
GlusterFS is not deployed by the standard`cluster.yml` playbook, see the
|
||||
[GlusterFS playbook documentation](../../network-storage/glusterfs/README.md)
|
||||
for instructions.
|
||||
|
||||
Basically you will install Gluster as
|
||||
```ShellSession
|
||||
$ ansible-playbook --become -i inventory/$CLUSTER/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
|
||||
## What's next
|
||||
|
||||
Try out your new Kubernetes cluster with the [Hello Kubernetes service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/).
|
||||
|
||||
@@ -1 +1 @@
|
||||
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@BASTION_ADDRESS"'
|
||||
ansible_ssh_common_args: "-o ProxyCommand='ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p -q USER@BASTION_ADDRESS {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}'"
|
||||
|
||||
@@ -1,226 +1,77 @@
|
||||
resource "openstack_networking_floatingip_v2" "k8s_master" {
|
||||
count = "${var.number_of_k8s_masters + var.number_of_k8s_masters_no_etcd}"
|
||||
pool = "${var.floatingip_pool}"
|
||||
module "network" {
|
||||
source = "modules/network"
|
||||
|
||||
external_net = "${var.external_net}"
|
||||
network_name = "${var.network_name}"
|
||||
cluster_name = "${var.cluster_name}"
|
||||
dns_nameservers = "${var.dns_nameservers}"
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "k8s_node" {
|
||||
count = "${var.number_of_k8s_nodes}"
|
||||
pool = "${var.floatingip_pool}"
|
||||
module "ips" {
|
||||
source = "modules/ips"
|
||||
|
||||
number_of_k8s_masters = "${var.number_of_k8s_masters}"
|
||||
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
|
||||
number_of_k8s_nodes = "${var.number_of_k8s_nodes}"
|
||||
floatingip_pool = "${var.floatingip_pool}"
|
||||
number_of_bastions = "${var.number_of_bastions}"
|
||||
external_net = "${var.external_net}"
|
||||
network_name = "${var.network_name}"
|
||||
router_id = "${module.network.router_id}"
|
||||
}
|
||||
|
||||
module "compute" {
|
||||
source = "modules/compute"
|
||||
|
||||
resource "openstack_compute_keypair_v2" "k8s" {
|
||||
name = "kubernetes-${var.cluster_name}"
|
||||
public_key = "${file(var.public_key_path)}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_secgroup_v2" "k8s_master" {
|
||||
name = "${var.cluster_name}-k8s-master"
|
||||
description = "${var.cluster_name} - Kubernetes Master"
|
||||
}
|
||||
|
||||
resource "openstack_compute_secgroup_v2" "k8s" {
|
||||
name = "${var.cluster_name}-k8s"
|
||||
description = "${var.cluster_name} - Kubernetes"
|
||||
rule {
|
||||
ip_protocol = "tcp"
|
||||
from_port = "22"
|
||||
to_port = "22"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
rule {
|
||||
ip_protocol = "icmp"
|
||||
from_port = "-1"
|
||||
to_port = "-1"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
rule {
|
||||
ip_protocol = "tcp"
|
||||
from_port = "1"
|
||||
to_port = "65535"
|
||||
self = true
|
||||
}
|
||||
rule {
|
||||
ip_protocol = "udp"
|
||||
from_port = "1"
|
||||
to_port = "65535"
|
||||
self = true
|
||||
}
|
||||
rule {
|
||||
ip_protocol = "icmp"
|
||||
from_port = "-1"
|
||||
to_port = "-1"
|
||||
self = true
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
name = "${var.cluster_name}-k8s-master-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_master.*.address, count.index)}"
|
||||
metadata = {
|
||||
cluster_name = "${var.cluster_name}"
|
||||
number_of_k8s_masters = "${var.number_of_k8s_masters}"
|
||||
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
|
||||
number_of_etcd = "${var.number_of_etcd}"
|
||||
number_of_k8s_masters_no_floating_ip = "${var.number_of_k8s_masters_no_floating_ip}"
|
||||
number_of_k8s_masters_no_floating_ip_no_etcd = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
|
||||
number_of_k8s_nodes = "${var.number_of_k8s_nodes}"
|
||||
number_of_bastions = "${var.number_of_bastions}"
|
||||
number_of_k8s_nodes_no_floating_ip = "${var.number_of_k8s_nodes_no_floating_ip}"
|
||||
number_of_gfs_nodes_no_floating_ip = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
gfs_volume_size_in_gb = "${var.gfs_volume_size_in_gb}"
|
||||
public_key_path = "${var.public_key_path}"
|
||||
image = "${var.image}"
|
||||
image_gfs = "${var.image_gfs}"
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster,vault"
|
||||
ssh_user_gfs = "${var.ssh_user_gfs}"
|
||||
flavor_k8s_master = "${var.flavor_k8s_master}"
|
||||
flavor_k8s_node = "${var.flavor_k8s_node}"
|
||||
flavor_etcd = "${var.flavor_etcd}"
|
||||
flavor_gfs_node = "${var.flavor_gfs_node}"
|
||||
network_name = "${var.network_name}"
|
||||
flavor_bastion = "${var.flavor_bastion}"
|
||||
k8s_master_fips = "${module.ips.k8s_master_fips}"
|
||||
k8s_node_fips = "${module.ips.k8s_node_fips}"
|
||||
bastion_fips = "${module.ips.bastion_fips}"
|
||||
|
||||
network_id = "${module.network.router_id}"
|
||||
}
|
||||
|
||||
output "private_subnet_id" {
|
||||
value = "${module.network.subnet_id}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters_no_etcd}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_master.*.address, count.index + var.number_of_k8s_masters)}"
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-master,kube-node,k8s-cluster,vault"
|
||||
output "floating_network_id" {
|
||||
value = "${var.external_net}"
|
||||
}
|
||||
|
||||
output "router_id" {
|
||||
value = "${module.network.router_id}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "etcd" {
|
||||
name = "${var.cluster_name}-etcd-${count.index+1}"
|
||||
count = "${var.number_of_etcd}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_etcd}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,vault,no-floating"
|
||||
}
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
|
||||
}
|
||||
output "k8s_master_fips" {
|
||||
value = "${module.ips.k8s_master_fips}"
|
||||
}
|
||||
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters_no_floating_ip}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster,vault,no-floating"
|
||||
}
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
|
||||
}
|
||||
output "k8s_node_fips" {
|
||||
value = "${module.ips.k8s_node_fips}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
output "bastion_fips" {
|
||||
value = "${module.ips.bastion_fips}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-master,kube-node,k8s-cluster,vault,no-floating"
|
||||
}
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
name = "${var.cluster_name}-k8s-node-${count.index+1}"
|
||||
count = "${var.number_of_k8s_nodes}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_node.*.address, count.index)}"
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-node,k8s-cluster,vault"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
|
||||
count = "${var.number_of_k8s_nodes_no_floating_ip}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-node,k8s-cluster,vault,no-floating"
|
||||
}
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
||||
name = "${var.cluster_name}-gfs-nephe-vol-${count.index+1}"
|
||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
description = "Non-ephemeral volume for GlusterFS"
|
||||
size = "${var.gfs_volume_size_in_gb}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
|
||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
image_name = "${var.image_gfs}"
|
||||
flavor_id = "${var.flavor_gfs_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user_gfs}"
|
||||
kubespray_groups = "gfs-cluster,network-storage"
|
||||
}
|
||||
volume {
|
||||
volume_id = "${element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)}"
|
||||
}
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/gfs-cluster.yml"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
#output "msg" {
|
||||
# value = "Your hosts are ready to go!\nYour ssh hosts are: ${join(", ", openstack_networking_floatingip_v2.k8s_master.*.address )}"
|
||||
#}
|
||||
|
||||
306
contrib/terraform/openstack/modules/compute/main.tf
Normal file
306
contrib/terraform/openstack/modules/compute/main.tf
Normal file
@@ -0,0 +1,306 @@
|
||||
resource "openstack_compute_keypair_v2" "k8s" {
|
||||
name = "kubernetes-${var.cluster_name}"
|
||||
public_key = "${chomp(file(var.public_key_path))}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_secgroup_v2" "k8s_master" {
|
||||
name = "${var.cluster_name}-k8s-master"
|
||||
description = "${var.cluster_name} - Kubernetes Master"
|
||||
|
||||
rule {
|
||||
ip_protocol = "tcp"
|
||||
from_port = "6443"
|
||||
to_port = "6443"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_secgroup_v2" "bastion" {
|
||||
name = "${var.cluster_name}-bastion"
|
||||
description = "${var.cluster_name} - Bastion Server"
|
||||
|
||||
rule {
|
||||
ip_protocol = "tcp"
|
||||
from_port = "22"
|
||||
to_port = "22"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_secgroup_v2" "k8s" {
|
||||
name = "${var.cluster_name}-k8s"
|
||||
description = "${var.cluster_name} - Kubernetes"
|
||||
|
||||
rule {
|
||||
ip_protocol = "icmp"
|
||||
from_port = "-1"
|
||||
to_port = "-1"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
|
||||
rule {
|
||||
ip_protocol = "tcp"
|
||||
from_port = "1"
|
||||
to_port = "65535"
|
||||
self = true
|
||||
}
|
||||
|
||||
rule {
|
||||
ip_protocol = "udp"
|
||||
from_port = "1"
|
||||
to_port = "65535"
|
||||
self = true
|
||||
}
|
||||
|
||||
rule {
|
||||
ip_protocol = "icmp"
|
||||
from_port = "-1"
|
||||
to_port = "-1"
|
||||
self = true
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "bastion" {
|
||||
name = "${var.cluster_name}-bastion-${count.index+1}"
|
||||
count = "${var.number_of_bastions}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_bastion}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
|
||||
"${openstack_compute_secgroup_v2.bastion.name}",
|
||||
"default",
|
||||
]
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "bastion"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
name = "${var.cluster_name}-k8s-master-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_compute_secgroup_v2.bastion.name}",
|
||||
"${openstack_compute_secgroup_v2.k8s.name}",
|
||||
"default",
|
||||
]
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,kube-master,k8s-cluster,vault"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters_no_etcd}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_compute_secgroup_v2.k8s.name}",
|
||||
]
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-master,k8s-cluster,vault"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "etcd" {
|
||||
name = "${var.cluster_name}-etcd-${count.index+1}"
|
||||
count = "${var.number_of_etcd}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_etcd}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}"]
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,vault,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters_no_floating_ip}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_compute_secgroup_v2.k8s.name}",
|
||||
"default",
|
||||
]
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,kube-master,k8s-cluster,vault,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_compute_secgroup_v2.k8s.name}",
|
||||
]
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-master,k8s-cluster,vault,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
name = "${var.cluster_name}-k8s-node-${count.index+1}"
|
||||
count = "${var.number_of_k8s_nodes}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
|
||||
"${openstack_compute_secgroup_v2.bastion.name}",
|
||||
"default",
|
||||
]
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-node,k8s-cluster"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
|
||||
count = "${var.number_of_k8s_nodes_no_floating_ip}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
|
||||
"default",
|
||||
]
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-node,k8s-cluster,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "bastion" {
|
||||
count = "${var.number_of_bastions}"
|
||||
floating_ip = "${var.bastion_fips[count.index]}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.bastion.*.id, count.index)}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_master" {
|
||||
count = "${var.number_of_k8s_masters}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.k8s_master.*.id, count.index)}"
|
||||
floating_ip = "${var.k8s_master_fips[count.index]}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_node" {
|
||||
count = "${var.number_of_k8s_nodes}"
|
||||
floating_ip = "${var.k8s_node_fips[count.index]}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.k8s_node.*.id, count.index)}"
|
||||
}
|
||||
|
||||
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
||||
name = "${var.cluster_name}-glusterfs_volume-${count.index+1}"
|
||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
description = "Non-ephemeral volume for GlusterFS"
|
||||
size = "${var.gfs_volume_size_in_gb}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
|
||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
image_name = "${var.image_gfs}"
|
||||
flavor_id = "${var.flavor_gfs_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
|
||||
"default",
|
||||
]
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user_gfs}"
|
||||
kubespray_groups = "gfs-cluster,network-storage,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "openstack_compute_volume_attach_v2" "glusterfs_volume" {
|
||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip.*.id, count.index)}"
|
||||
volume_id = "${element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)}"
|
||||
}
|
||||
57
contrib/terraform/openstack/modules/compute/variables.tf
Normal file
57
contrib/terraform/openstack/modules/compute/variables.tf
Normal file
@@ -0,0 +1,57 @@
|
||||
variable "cluster_name" {}
|
||||
|
||||
variable "number_of_k8s_masters" {}
|
||||
|
||||
variable "number_of_k8s_masters_no_etcd" {}
|
||||
|
||||
variable "number_of_etcd" {}
|
||||
|
||||
variable "number_of_k8s_masters_no_floating_ip" {}
|
||||
|
||||
variable "number_of_k8s_masters_no_floating_ip_no_etcd" {}
|
||||
|
||||
variable "number_of_k8s_nodes" {}
|
||||
|
||||
variable "number_of_k8s_nodes_no_floating_ip" {}
|
||||
|
||||
variable "number_of_bastions" {}
|
||||
|
||||
variable "number_of_gfs_nodes_no_floating_ip" {}
|
||||
|
||||
variable "gfs_volume_size_in_gb" {}
|
||||
|
||||
variable "public_key_path" {}
|
||||
|
||||
variable "image" {}
|
||||
|
||||
variable "image_gfs" {}
|
||||
|
||||
variable "ssh_user" {}
|
||||
|
||||
variable "ssh_user_gfs" {}
|
||||
|
||||
variable "flavor_k8s_master" {}
|
||||
|
||||
variable "flavor_k8s_node" {}
|
||||
|
||||
variable "flavor_etcd" {}
|
||||
|
||||
variable "flavor_gfs_node" {}
|
||||
|
||||
variable "network_name" {}
|
||||
|
||||
variable "flavor_bastion" {}
|
||||
|
||||
variable "network_id" {}
|
||||
|
||||
variable "k8s_master_fips" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "k8s_node_fips" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "bastion_fips" {
|
||||
type = "list"
|
||||
}
|
||||
23
contrib/terraform/openstack/modules/ips/main.tf
Normal file
23
contrib/terraform/openstack/modules/ips/main.tf
Normal file
@@ -0,0 +1,23 @@
|
||||
resource "null_resource" "dummy_dependency" {
|
||||
triggers {
|
||||
dependency_id = "${var.router_id}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "k8s_master" {
|
||||
count = "${var.number_of_k8s_masters}"
|
||||
pool = "${var.floatingip_pool}"
|
||||
depends_on = ["null_resource.dummy_dependency"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "k8s_node" {
|
||||
count = "${var.number_of_k8s_nodes}"
|
||||
pool = "${var.floatingip_pool}"
|
||||
depends_on = ["null_resource.dummy_dependency"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "bastion" {
|
||||
count = "${var.number_of_bastions}"
|
||||
pool = "${var.floatingip_pool}"
|
||||
depends_on = ["null_resource.dummy_dependency"]
|
||||
}
|
||||
11
contrib/terraform/openstack/modules/ips/outputs.tf
Normal file
11
contrib/terraform/openstack/modules/ips/outputs.tf
Normal file
@@ -0,0 +1,11 @@
|
||||
output "k8s_master_fips" {
|
||||
value = ["${openstack_networking_floatingip_v2.k8s_master.*.address}"]
|
||||
}
|
||||
|
||||
output "k8s_node_fips" {
|
||||
value = ["${openstack_networking_floatingip_v2.k8s_node.*.address}"]
|
||||
}
|
||||
|
||||
output "bastion_fips" {
|
||||
value = ["${openstack_networking_floatingip_v2.bastion.*.address}"]
|
||||
}
|
||||
15
contrib/terraform/openstack/modules/ips/variables.tf
Normal file
15
contrib/terraform/openstack/modules/ips/variables.tf
Normal file
@@ -0,0 +1,15 @@
|
||||
variable "number_of_k8s_masters" {}
|
||||
|
||||
variable "number_of_k8s_masters_no_etcd" {}
|
||||
|
||||
variable "number_of_k8s_nodes" {}
|
||||
|
||||
variable "floatingip_pool" {}
|
||||
|
||||
variable "number_of_bastions" {}
|
||||
|
||||
variable "external_net" {}
|
||||
|
||||
variable "network_name" {}
|
||||
|
||||
variable "router_id" {}
|
||||
23
contrib/terraform/openstack/modules/network/main.tf
Normal file
23
contrib/terraform/openstack/modules/network/main.tf
Normal file
@@ -0,0 +1,23 @@
|
||||
resource "openstack_networking_router_v2" "k8s" {
|
||||
name = "${var.cluster_name}-router"
|
||||
admin_state_up = "true"
|
||||
external_network_id = "${var.external_net}"
|
||||
}
|
||||
|
||||
resource "openstack_networking_network_v2" "k8s" {
|
||||
name = "${var.network_name}"
|
||||
admin_state_up = "true"
|
||||
}
|
||||
|
||||
resource "openstack_networking_subnet_v2" "k8s" {
|
||||
name = "${var.cluster_name}-internal-network"
|
||||
network_id = "${openstack_networking_network_v2.k8s.id}"
|
||||
cidr = "10.0.0.0/24"
|
||||
ip_version = 4
|
||||
dns_nameservers = "${var.dns_nameservers}"
|
||||
}
|
||||
|
||||
resource "openstack_networking_router_interface_v2" "k8s" {
|
||||
router_id = "${openstack_networking_router_v2.k8s.id}"
|
||||
subnet_id = "${openstack_networking_subnet_v2.k8s.id}"
|
||||
}
|
||||
7
contrib/terraform/openstack/modules/network/outputs.tf
Normal file
7
contrib/terraform/openstack/modules/network/outputs.tf
Normal file
@@ -0,0 +1,7 @@
|
||||
output "router_id" {
|
||||
value = "${openstack_networking_router_interface_v2.k8s.id}"
|
||||
}
|
||||
|
||||
output "subnet_id" {
|
||||
value = "${openstack_networking_subnet_v2.k8s.id}"
|
||||
}
|
||||
9
contrib/terraform/openstack/modules/network/variables.tf
Normal file
9
contrib/terraform/openstack/modules/network/variables.tf
Normal file
@@ -0,0 +1,9 @@
|
||||
variable "external_net" {}
|
||||
|
||||
variable "network_name" {}
|
||||
|
||||
variable "cluster_name" {}
|
||||
|
||||
variable "dns_nameservers" {
|
||||
type = "list"
|
||||
}
|
||||
45
contrib/terraform/openstack/sample-inventory/cluster.tf
Normal file
45
contrib/terraform/openstack/sample-inventory/cluster.tf
Normal file
@@ -0,0 +1,45 @@
|
||||
# your Kubernetes cluster name here
|
||||
cluster_name = "i-didnt-read-the-docs"
|
||||
|
||||
# SSH key to use for access to nodes
|
||||
public_key_path = "~/.ssh/id_rsa.pub"
|
||||
|
||||
# image to use for bastion, masters, standalone etcd instances, and nodes
|
||||
image = "<image name>"
|
||||
# user on the node (ex. core on Container Linux, ubuntu on Ubuntu, etc.)
|
||||
ssh_user = "<cloud-provisioned user>"
|
||||
|
||||
# 0|1 bastion nodes
|
||||
number_of_bastions = 0
|
||||
#flavor_bastion = "<UUID>"
|
||||
|
||||
# standalone etcds
|
||||
number_of_etcd = 0
|
||||
|
||||
# masters
|
||||
number_of_k8s_masters = 1
|
||||
number_of_k8s_masters_no_etcd = 0
|
||||
number_of_k8s_masters_no_floating_ip = 0
|
||||
number_of_k8s_masters_no_floating_ip_no_etcd = 0
|
||||
flavor_k8s_master = "<UUID>"
|
||||
|
||||
# nodes
|
||||
number_of_k8s_nodes = 2
|
||||
number_of_k8s_nodes_no_floating_ip = 4
|
||||
#flavor_k8s_node = "<UUID>"
|
||||
|
||||
# GlusterFS
|
||||
# either 0 or more than one
|
||||
#number_of_gfs_nodes_no_floating_ip = 0
|
||||
#gfs_volume_size_in_gb = 150
|
||||
# Container Linux does not support GlusterFS
|
||||
#image_gfs = "<image name>"
|
||||
# May be different from other nodes
|
||||
#ssh_user_gfs = "ubuntu"
|
||||
#flavor_gfs_node = "<UUID>"
|
||||
|
||||
# networking
|
||||
network_name = "<network>"
|
||||
external_net = "<UUID>"
|
||||
floatingip_pool = "<pool>"
|
||||
|
||||
1
contrib/terraform/openstack/sample-inventory/group_vars
Symbolic link
1
contrib/terraform/openstack/sample-inventory/group_vars
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../../inventory/sample/group_vars
|
||||
@@ -2,6 +2,10 @@ variable "cluster_name" {
|
||||
default = "example"
|
||||
}
|
||||
|
||||
variable "number_of_bastions" {
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters" {
|
||||
default = 2
|
||||
}
|
||||
@@ -63,19 +67,28 @@ variable "ssh_user_gfs" {
|
||||
default = "ubuntu"
|
||||
}
|
||||
|
||||
variable "flavor_bastion" {
|
||||
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "flavor_k8s_master" {
|
||||
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "flavor_k8s_node" {
|
||||
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "flavor_etcd" {
|
||||
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "flavor_gfs_node" {
|
||||
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
|
||||
default = 3
|
||||
}
|
||||
|
||||
@@ -84,23 +97,17 @@ variable "network_name" {
|
||||
default = "internal"
|
||||
}
|
||||
|
||||
variable "dns_nameservers" {
|
||||
description = "An array of DNS name server names used by hosts in this subnet."
|
||||
type = "list"
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "floatingip_pool" {
|
||||
description = "name of the floating ip pool to use"
|
||||
default = "external"
|
||||
}
|
||||
|
||||
variable "username" {
|
||||
description = "Your openstack username"
|
||||
}
|
||||
|
||||
variable "password" {
|
||||
description = "Your openstack password"
|
||||
}
|
||||
|
||||
variable "tenant" {
|
||||
description = "Your openstack tenant/project"
|
||||
}
|
||||
|
||||
variable "auth_url" {
|
||||
description = "Your openstack auth URL"
|
||||
variable "external_net" {
|
||||
description = "uuid of the external/public network"
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python2
|
||||
#
|
||||
# Copyright 2015 Cisco Systems, Inc.
|
||||
#
|
||||
@@ -70,6 +70,14 @@ def iterhosts(resources):
|
||||
yield parser(resource, module_name)
|
||||
|
||||
|
||||
def iterips(resources):
|
||||
'''yield ip tuples of (instance_id, ip)'''
|
||||
for module_name, key, resource in resources:
|
||||
resource_type, name = key.split('.', 1)
|
||||
if resource_type == 'openstack_compute_floatingip_associate_v2':
|
||||
yield openstack_floating_ips(resource)
|
||||
|
||||
|
||||
def parses(prefix):
|
||||
def inner(func):
|
||||
PARSERS[prefix] = func
|
||||
@@ -298,6 +306,17 @@ def softlayer_host(resource, module_name):
|
||||
|
||||
return name, attrs, groups
|
||||
|
||||
def openstack_floating_ips(resource):
|
||||
raw_attrs = resource['primary']['attributes']
|
||||
attrs = {
|
||||
'ip': raw_attrs['floating_ip'],
|
||||
'instance_id': raw_attrs['instance_id'],
|
||||
}
|
||||
return attrs
|
||||
|
||||
def openstack_floating_ips(resource):
|
||||
raw_attrs = resource['primary']['attributes']
|
||||
return raw_attrs['instance_id'], raw_attrs['floating_ip']
|
||||
|
||||
@parses('openstack_compute_instance_v2')
|
||||
@calculate_mantl_vars
|
||||
@@ -343,6 +362,8 @@ def openstack_host(resource, module_name):
|
||||
except (KeyError, ValueError):
|
||||
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
|
||||
|
||||
# Handling of floating IPs has changed: https://github.com/terraform-providers/terraform-provider-openstack/blob/master/CHANGELOG.md#010-june-21-2017
|
||||
|
||||
# attrs specific to Ansible
|
||||
if 'metadata.ssh_user' in raw_attrs:
|
||||
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
|
||||
@@ -656,6 +677,19 @@ def clc_server(resource, module_name):
|
||||
return name, attrs, groups
|
||||
|
||||
|
||||
def iter_host_ips(hosts, ips):
|
||||
'''Update hosts that have an entry in the floating IP list'''
|
||||
for host in hosts:
|
||||
host_id = host[1]['id']
|
||||
if host_id in ips:
|
||||
ip = ips[host_id]
|
||||
host[1].update({
|
||||
'access_ip_v4': ip,
|
||||
'public_ipv4': ip,
|
||||
'ansible_ssh_host': ip,
|
||||
})
|
||||
yield host
|
||||
|
||||
|
||||
## QUERY TYPES
|
||||
def query_host(hosts, target):
|
||||
@@ -727,6 +761,13 @@ def main():
|
||||
parser.exit()
|
||||
|
||||
hosts = iterhosts(iterresources(tfstates(args.root)))
|
||||
|
||||
# Perform a second pass on the file to pick up floating_ip entries to update the ip address of referenced hosts
|
||||
ips = dict(iterips(iterresources(tfstates(args.root))))
|
||||
|
||||
if ips:
|
||||
hosts = iter_host_ips(hosts, ips)
|
||||
|
||||
if args.list:
|
||||
output = query_list(hosts)
|
||||
if args.nometa:
|
||||
|
||||
@@ -66,10 +66,10 @@ kube-master
|
||||
Group vars and overriding variables precedence
|
||||
----------------------------------------------
|
||||
|
||||
The group variables to control main deployment options are located in the directory ``inventory/group_vars``.
|
||||
Optional variables are located in the `inventory/group_vars/all.yml`.
|
||||
The group variables to control main deployment options are located in the directory ``inventory/sample/group_vars``.
|
||||
Optional variables are located in the `inventory/sample/group_vars/all.yml`.
|
||||
Mandatory variables that are common for at least one role (or a node group) can be found in the
|
||||
`inventory/group_vars/k8s-cluster.yml`.
|
||||
`inventory/sample/group_vars/k8s-cluster.yml`.
|
||||
There are also role vars for docker, rkt, kubernetes preinstall and master roles.
|
||||
According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
|
||||
those cannot be overriden from the group vars. In order to override, one should use
|
||||
@@ -153,16 +153,16 @@ Example command to filter and apply only DNS configuration tasks and skip
|
||||
everything else related to host OS configuration and downloading images of containers:
|
||||
|
||||
```
|
||||
ansible-playbook -i inventory/inventory.ini cluster.yml --tags preinstall,dnsmasq,facts --skip-tags=download,bootstrap-os
|
||||
ansible-playbook -i inventory/sample/hosts.ini cluster.yml --tags preinstall,dnsmasq,facts --skip-tags=download,bootstrap-os
|
||||
```
|
||||
And this play only removes the K8s cluster DNS resolver IP from hosts' /etc/resolv.conf files:
|
||||
```
|
||||
ansible-playbook -i inventory/inventory.ini -e dns_server='' cluster.yml --tags resolvconf
|
||||
ansible-playbook -i inventory/sample/hosts.ini -e dnsmasq_dns_server='' cluster.yml --tags resolvconf
|
||||
```
|
||||
And this prepares all container images localy (at the ansible runner node) without installing
|
||||
or upgrading related stuff or trying to upload container to K8s cluster nodes:
|
||||
```
|
||||
ansible-playbook -i inventory/inventory.ini cluster.yml \
|
||||
ansible-playbook -i inventory/sample/hosts.ini cluster.yml \
|
||||
-e download_run_once=true -e download_localhost=true \
|
||||
--tags download --skip-tags upload,upgrade
|
||||
```
|
||||
|
||||
@@ -7,7 +7,7 @@ Note: Flannel is the only plugin that has currently been tested with atomic
|
||||
|
||||
### Vagrant
|
||||
|
||||
* For bootstrapping with Vagrant, use box centos/atomic-host
|
||||
* For bootstrapping with Vagrant, use box centos/atomic-host or fedora/atomic-host
|
||||
* Update VagrantFile variable `local_release_dir` to `/var/vagrant/temp`.
|
||||
* Update `vm_memory = 2048` and `vm_cpus = 2`
|
||||
* Networking on vagrant hosts has to be brought up manually once they are booted.
|
||||
@@ -17,6 +17,7 @@ Note: Flannel is the only plugin that has currently been tested with atomic
|
||||
sudo /sbin/ifup enp0s8
|
||||
```
|
||||
|
||||
* For users of vagrant-libvirt download qcow2 format from https://wiki.centos.org/SpecialInterestGroup/Atomic/Download/
|
||||
* For users of vagrant-libvirt download centos/atomic-host qcow2 format from https://wiki.centos.org/SpecialInterestGroup/Atomic/Download/
|
||||
* For users of vagrant-libvirt download fedora/atomic-host qcow2 format from https://getfedora.org/en/atomic/download/
|
||||
|
||||
Then you can proceed to [cluster deployment](#run-deployment)
|
||||
@@ -5,7 +5,7 @@ To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provi
|
||||
|
||||
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
|
||||
|
||||
You would also need to tag the resources in your VPC accordingly for the aws provider to utilize them. Tag the subnets and all instances that kubernetes will be run on with key `kuberentes.io/cluster/$cluster_name` (`$cluster_name` must be a unique identifier for the cluster). Tag the subnets that must be targetted by external ELBs with the key `kubernetes.io/role/elb` and internal ELBs with the key `kubernetes.io/role/internal-elb`.
|
||||
You would also need to tag the resources in your VPC accordingly for the aws provider to utilize them. Tag the subnets and all instances that kubernetes will be run on with key `kubernetes.io/cluster/$cluster_name` (`$cluster_name` must be a unique identifier for the cluster). Tag the subnets that must be targetted by external ELBs with the key `kubernetes.io/role/elb` and internal ELBs with the key `kubernetes.io/role/internal-elb`.
|
||||
|
||||
Make sure your VPC has both DNS Hostnames support and Private DNS enabled.
|
||||
|
||||
|
||||
@@ -3,20 +3,11 @@ Cloud providers
|
||||
|
||||
#### Provisioning
|
||||
|
||||
You can use kubespray-cli to start new instances on cloud providers
|
||||
here's an example
|
||||
```
|
||||
kubespray [aws|gce] --nodes 2 --etcd 3 --cluster-name test-smana
|
||||
```
|
||||
You can deploy instances in your cloud environment in several different ways. Examples include Terraform, Ansible (ec2 and gce modules), and manual creation.
|
||||
|
||||
#### Deploy kubernetes
|
||||
|
||||
With kubespray-cli
|
||||
```
|
||||
kubespray deploy [--aws|--gce] -u admin
|
||||
```
|
||||
|
||||
Or ansible-playbook command
|
||||
With ansible-playbook command
|
||||
```
|
||||
ansible-playbook -u smana -e ansible_ssh_user=admin -e cloud_provider=[aws|gce] -b --become-user=root -i inventory/single.cfg cluster.yml
|
||||
```
|
||||
|
||||
74
docs/contiv.md
Normal file
74
docs/contiv.md
Normal file
@@ -0,0 +1,74 @@
|
||||
Contiv
|
||||
======
|
||||
|
||||
Here is the [Contiv documentation](http://contiv.github.io/documents/).
|
||||
|
||||
## Administrate Contiv
|
||||
|
||||
There are two ways to manage Contiv:
|
||||
|
||||
* a web UI managed by the api proxy service
|
||||
* a CLI named `netctl`
|
||||
|
||||
|
||||
### Interfaces
|
||||
|
||||
#### The Web Interface
|
||||
|
||||
This UI is hosted on all kubernetes master nodes. The service is available at `https://<one of your master node>:10000`.
|
||||
|
||||
You can configure the api proxy by overriding the following variables:
|
||||
|
||||
```yaml
|
||||
contiv_enable_api_proxy: true
|
||||
contiv_api_proxy_port: 10000
|
||||
contiv_generate_certificate: true
|
||||
```
|
||||
|
||||
The default credentials to log in are: admin/admin.
|
||||
|
||||
|
||||
#### The Command Line Interface
|
||||
|
||||
The second way to modify the Contiv configuration is to use the CLI. To do this, you have to connect to the server and export an environment variable to tell netctl how to connect to the cluster:
|
||||
|
||||
```bash
|
||||
export NETMASTER=http://127.0.0.1:9999
|
||||
```
|
||||
|
||||
The port can be changed by overriding the following variable:
|
||||
|
||||
```yaml
|
||||
contiv_netmaster_port: 9999
|
||||
```
|
||||
|
||||
The CLI doesn't use the authentication process needed by the web interface.
|
||||
|
||||
|
||||
### Network configuration
|
||||
|
||||
The default configuration uses VXLAN to create an overlay. Two networks are created by default:
|
||||
|
||||
* `contivh1`: an infrastructure network. It allows nodes to access the pods IPs. It is mandatory in a Kubernetes environment that uses VXLAN.
|
||||
* `default-net` : the default network that hosts pods.
|
||||
|
||||
You can change the default network configuration by overriding the `contiv_networks` variable.
|
||||
|
||||
The default forward mode is set to routing:
|
||||
|
||||
```yaml
|
||||
contiv_fwd_mode: routing
|
||||
```
|
||||
|
||||
The following is an example of how you can use VLAN instead of VXLAN:
|
||||
|
||||
```yaml
|
||||
contiv_fwd_mode: bridge
|
||||
contiv_vlan_interface: eth0
|
||||
contiv_networks:
|
||||
- name: default-net
|
||||
subnet: "{{ kube_pods_subnet }}"
|
||||
gateway: "{{ kube_pods_subnet|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
|
||||
encap: vlan
|
||||
pkt_tag: 10
|
||||
```
|
||||
@@ -1,13 +1,7 @@
|
||||
CoreOS bootstrap
|
||||
===============
|
||||
|
||||
Example with **kubespray-cli**:
|
||||
|
||||
```
|
||||
kubespray deploy --gce --coreos
|
||||
```
|
||||
|
||||
Or with Ansible:
|
||||
Example with Ansible:
|
||||
|
||||
Before running the cluster playbook you must satisfy the following requirements:
|
||||
|
||||
|
||||
38
docs/debian.md
Normal file
38
docs/debian.md
Normal file
@@ -0,0 +1,38 @@
|
||||
Debian Jessie
|
||||
===============
|
||||
|
||||
Debian Jessie installation Notes:
|
||||
|
||||
- Add
|
||||
|
||||
```GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"```
|
||||
|
||||
to /etc/default/grub. Then update with
|
||||
|
||||
```
|
||||
sudo update-grub
|
||||
sudo update-grub2
|
||||
sudo reboot
|
||||
```
|
||||
|
||||
- Add the [backports](https://backports.debian.org/Instructions/) which contain Systemd 2.30 and update Systemd.
|
||||
|
||||
```apt-get -t jessie-backports install systemd```
|
||||
|
||||
(Necessary because the default Systemd version (2.15) does not support the "Delegate" directive in service files)
|
||||
|
||||
- Add the Ansible repository and install Ansible to get a proper version
|
||||
|
||||
```
|
||||
sudo add-apt-repository ppa:ansible/ansible
|
||||
sudo apt-get update
|
||||
sudo apt-get install ansible
|
||||
|
||||
```
|
||||
|
||||
- Install Jinja2 and Python-Netaddr
|
||||
|
||||
```sudo apt-get install python-jinja2=2.8-1~bpo8+1 python-netaddr```
|
||||
|
||||
|
||||
Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment)
|
||||
@@ -50,7 +50,7 @@ DNS modes supported by Kubespray
|
||||
You can modify how Kubespray sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
|
||||
|
||||
## dns_mode
|
||||
``dns_mode`` configures how Kubespray will setup cluster DNS. There are three modes available:
|
||||
``dns_mode`` configures how Kubespray will setup cluster DNS. There are four modes available:
|
||||
|
||||
#### dnsmasq_kubedns (default)
|
||||
This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some
|
||||
@@ -62,6 +62,20 @@ other queries are forwardet to the nameservers found in ``upstream_dns_servers``
|
||||
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use kubedns/skydns for
|
||||
all queries.
|
||||
|
||||
#### coredns
|
||||
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use CoreDNS for
|
||||
all queries.
|
||||
|
||||
#### coredns_dual
|
||||
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use CoreDNS for
|
||||
all queries. It will also deploy a secondary CoreDNS stack
|
||||
|
||||
#### manual
|
||||
This does not install dnsmasq or kubedns, but allows you to specify
|
||||
`manual_dns_server`, which will be configured on nodes for handling Pod DNS.
|
||||
Use this method if you plan to install your own DNS server in the cluster after
|
||||
initial deployment.
|
||||
|
||||
#### none
|
||||
This does not install any of dnsmasq and kubedns/skydns. This basically disables cluster DNS completely and
|
||||
leaves you with a non functional cluster.
|
||||
|
||||
@@ -1,29 +1,12 @@
|
||||
Getting started
|
||||
===============
|
||||
|
||||
The easiest way to run the deployement is to use the **kubespray-cli** tool.
|
||||
A complete documentation can be found in its [github repository](https://github.com/kubespray/kubespray-cli).
|
||||
|
||||
Here is a simple example on AWS:
|
||||
|
||||
* Create instances and generate the inventory
|
||||
|
||||
```
|
||||
kubespray aws --instances 3
|
||||
```
|
||||
|
||||
* Run the deployment
|
||||
|
||||
```
|
||||
kubespray deploy --aws -u centos -n calico
|
||||
```
|
||||
|
||||
Building your own inventory
|
||||
---------------------------
|
||||
|
||||
Ansible inventory can be stored in 3 formats: YAML, JSON, or INI-like. There is
|
||||
an example inventory located
|
||||
[here](https://github.com/kubernetes-incubator/kubespray/blob/master/inventory/inventory.example).
|
||||
[here](https://github.com/kubernetes-incubator/kubespray/blob/master/inventory/sample/hosts.ini).
|
||||
|
||||
You can use an
|
||||
[inventory generator](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py)
|
||||
@@ -35,11 +18,9 @@ certain threshold. Run `python3 contrib/inventory_builder/inventory.py help` hel
|
||||
|
||||
Example inventory generator usage:
|
||||
|
||||
```
|
||||
cp -r inventory my_inventory
|
||||
cp -r inventory/sample inventory/mycluster
|
||||
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
||||
CONFIG_FILE=my_inventory/inventory.cfg python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
||||
```
|
||||
CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
||||
|
||||
Starting custom deployment
|
||||
--------------------------
|
||||
@@ -47,12 +28,10 @@ Starting custom deployment
|
||||
Once you have an inventory, you may want to customize deployment data vars
|
||||
and start the deployment:
|
||||
|
||||
**IMPORTANT: Edit my_inventory/groups_vars/*.yaml to override data vars**
|
||||
**IMPORTANT**: Edit my\_inventory/groups\_vars/\*.yaml to override data vars:
|
||||
|
||||
```
|
||||
ansible-playbook -i my_inventory/inventory.cfg cluster.yml -b -v \
|
||||
ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml -b -v \
|
||||
--private-key=~/.ssh/private_key
|
||||
```
|
||||
|
||||
See more details in the [ansible guide](ansible.md).
|
||||
|
||||
@@ -63,19 +42,31 @@ You may want to add **worker** nodes to your existing cluster. This can be done
|
||||
|
||||
- Add the new worker node to your inventory under kube-node (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
||||
- Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`:
|
||||
|
||||
ansible-playbook -i inventory/mycluster/hosts.ini scale.yml -b -v \
|
||||
--private-key=~/.ssh/private_key
|
||||
|
||||
Remove nodes
|
||||
------------
|
||||
|
||||
You may want to remove **worker** nodes to your existing cluster. This can be done by re-running the `remove-node.yml` playbook. First, all nodes will be drained, then stop some kubernetes services and delete some certificates, and finally execute the kubectl command to delete these nodes. This can be combined with the add node function, This is generally helpful when doing something like autoscaling your clusters. Of course if a node is not working, you can remove the node and install it again.
|
||||
|
||||
- Add worker nodes to the list under kube-node if you want to delete them (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
||||
- Run the ansible-playbook command, substituting `remove-node.yml`:
|
||||
```
|
||||
ansible-playbook -i my_inventory/inventory.cfg scale.yml -b -v \
|
||||
ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
|
||||
--private-key=~/.ssh/private_key
|
||||
```
|
||||
|
||||
Connecting to Kubernetes
|
||||
------------------------
|
||||
|
||||
By default, Kubespray configures kube-master hosts with insecure access to
|
||||
kube-apiserver via port 8080. A kubeconfig file is not necessary in this case,
|
||||
because kubectl will use http://localhost:8080 to connect. The kubeconfig files
|
||||
because kubectl will use <http://localhost:8080> to connect. The kubeconfig files
|
||||
generated will point to localhost (on kube-masters) and kube-node hosts will
|
||||
connect either to a localhost nginx proxy or to a loadbalancer if configured.
|
||||
More details on this process are in the [HA guide](ha.md).
|
||||
More details on this process are in the [HA guide](ha-mode.md).
|
||||
|
||||
Kubespray permits connecting to the cluster remotely on any IP of any
|
||||
kube-master host on port 6443 by default. However, this requires
|
||||
@@ -83,7 +74,7 @@ authentication. One could generate a kubeconfig based on one installed
|
||||
kube-master hosts (needs improvement) or connect with a username and password.
|
||||
By default, a user with admin rights is created, named `kube`.
|
||||
The password can be viewed after deployment by looking at the file
|
||||
`PATH_TO_KUBESPRAY/credentials/kube_user`. This contains a randomly generated
|
||||
`PATH_TO_KUBESPRAY/credentials/kube_user.creds`. This contains a randomly generated
|
||||
password. If you wish to set your own password, just precreate/modify this
|
||||
file yourself.
|
||||
|
||||
@@ -93,25 +84,34 @@ the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-applicati
|
||||
Accessing Kubernetes Dashboard
|
||||
------------------------------
|
||||
|
||||
If the variable `dashboard_enabled` is set (default is true), then you can
|
||||
access the Kubernetes Dashboard at the following URL:
|
||||
As of kubernetes-dashboard v1.7.x:
|
||||
|
||||
https://kube:_kube-password_@_host_:6443/ui/
|
||||
- New login options that use apiserver auth proxying of token/basic/kubeconfig by default
|
||||
- Requires RBAC in authorization\_modes
|
||||
- Only serves over https
|
||||
- No longer available at <https://first_master:6443/ui> until apiserver is updated with the https proxy URL
|
||||
|
||||
To see the password, refer to the section above, titled *Connecting to
|
||||
Kubernetes*. The host can be any kube-master or kube-node or loadbalancer
|
||||
(when enabled).
|
||||
If the variable `dashboard_enabled` is set (default is true), then you can access the Kubernetes Dashboard at the following URL, You will be prompted for credentials:
|
||||
<https://first_master:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login>
|
||||
|
||||
Or you can run 'kubectl proxy' from your local machine to access dashboard in your browser from:
|
||||
<http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login>
|
||||
|
||||
It is recommended to access dashboard from behind a gateway (like Ingress Controller) that enforces an authentication token. Details and other access options here: <https://github.com/kubernetes/dashboard/wiki/Accessing-Dashboard---1.7.X-and-above>
|
||||
|
||||
Accessing Kubernetes API
|
||||
------------------------
|
||||
|
||||
The main client of Kubernetes is `kubectl`. It is installed on each kube-master
|
||||
host and can optionally be configured on your ansible host by setting
|
||||
`kubeconfig_localhost: true` in the configuration. If enabled, kubectl and
|
||||
admin.conf will appear in the artifacts/ directory after deployment. You can
|
||||
see a list of nodes by running the following commands:
|
||||
`kubectl_localhost: true` and `kubeconfig_localhost: true` in the configuration:
|
||||
|
||||
cd artifacts/
|
||||
./kubectl --kubeconfig admin.conf get nodes
|
||||
- If `kubectl_localhost` enabled, `kubectl` will download onto `/usr/local/bin/` and setup with bash completion. A helper script `inventory/mycluster/artifacts/kubectl.sh` also created for setup with below `admin.conf`.
|
||||
- If `kubeconfig_localhost` enabled `admin.conf` will appear in the `inventory/mycluster/artifacts/` directory after deployment.
|
||||
|
||||
If desired, copy kubectl to your bin dir and admin.conf to ~/.kube/config.
|
||||
You can see a list of nodes by running the following commands:
|
||||
|
||||
cd inventory/mycluster/artifacts
|
||||
./kubectl.sh get nodes
|
||||
|
||||
If desired, copy admin.conf to ~/.kube/config.
|
||||
|
||||
@@ -27,19 +27,21 @@ non-master Kubernetes node. This is referred to as localhost loadbalancing. It
|
||||
is less efficient than a dedicated load balancer because it creates extra
|
||||
health checks on the Kubernetes apiserver, but is more practical for scenarios
|
||||
where an external LB or virtual IP management is inconvenient. This option is
|
||||
configured by the variable `loadbalancer_apiserver_localhost` (defaults to `True`).
|
||||
configured by the variable `loadbalancer_apiserver_localhost` (defaults to
|
||||
`True`. Or `False`, if there is an external `loadbalancer_apiserver` defined).
|
||||
You may also define the port the local internal loadbalancer uses by changing,
|
||||
`nginx_kube_apiserver_port`. This defaults to the value of `kube_apiserver_port`.
|
||||
It is also important to note that Kubespray will only configure kubelet and kube-proxy
|
||||
on non-master nodes to use the local internal loadbalancer.
|
||||
`nginx_kube_apiserver_port`. This defaults to the value of
|
||||
`kube_apiserver_port`. It is also important to note that Kubespray will only
|
||||
configure kubelet and kube-proxy on non-master nodes to use the local internal
|
||||
loadbalancer.
|
||||
|
||||
If you choose to NOT use the local internal loadbalancer, you will need to configure
|
||||
your own loadbalancer to achieve HA. Note that deploying a loadbalancer is up to
|
||||
a user and is not covered by ansible roles in Kubespray. By default, it only configures
|
||||
a non-HA endpoint, which points to the `access_ip` or IP address of the first server
|
||||
node in the `kube-master` group. It can also configure clients to use endpoints
|
||||
for a given loadbalancer type. The following diagram shows how traffic to the
|
||||
apiserver is directed.
|
||||
If you choose to NOT use the local internal loadbalancer, you will need to
|
||||
configure your own loadbalancer to achieve HA. Note that deploying a
|
||||
loadbalancer is up to a user and is not covered by ansible roles in Kubespray.
|
||||
By default, it only configures a non-HA endpoint, which points to the
|
||||
`access_ip` or IP address of the first server node in the `kube-master` group.
|
||||
It can also configure clients to use endpoints for a given loadbalancer type.
|
||||
The following diagram shows how traffic to the apiserver is directed.
|
||||
|
||||

|
||||
|
||||
@@ -66,40 +68,72 @@ listen kubernetes-apiserver-https
|
||||
balance roundrobin
|
||||
```
|
||||
|
||||
And the corresponding example global vars config:
|
||||
Note: That's an example config managed elsewhere outside of Kubespray.
|
||||
|
||||
And the corresponding example global vars for such a "cluster-aware"
|
||||
external LB with the cluster API access modes configured in Kubespray:
|
||||
```
|
||||
apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
|
||||
apiserver_loadbalancer_domain_name: "my-apiserver-lb.example.com"
|
||||
loadbalancer_apiserver:
|
||||
address: <VIP>
|
||||
port: 8383
|
||||
```
|
||||
|
||||
Note: The default kubernetes apiserver configuration binds to all interfaces,
|
||||
so you will need to use a different port for the vip from that the API is
|
||||
listening on, or set the `kube_apiserver_bind_address` so that the API only
|
||||
listens on a specific interface (to avoid conflict with haproxy binding the
|
||||
port on the VIP adddress)
|
||||
|
||||
This domain name, or default "lb-apiserver.kubernetes.local", will be inserted
|
||||
into the `/etc/hosts` file of all servers in the `k8s-cluster` group. Note that
|
||||
into the `/etc/hosts` file of all servers in the `k8s-cluster` group and wired
|
||||
into the generated self-signed TLS/SSL certificates as well. Note that
|
||||
the HAProxy service should as well be HA and requires a VIP management, which
|
||||
is out of scope of this doc. Specifying an external LB overrides any internal
|
||||
localhost LB configuration.
|
||||
is out of scope of this doc.
|
||||
|
||||
Note: In order to achieve HA for HAProxy instances, those must be running on
|
||||
the each node in the `k8s-cluster` group as well, but require no VIP, thus
|
||||
no VIP management.
|
||||
There is a special case for an internal and an externally configured (not with
|
||||
Kubespray) LB used simultaneously. Keep in mind that the cluster is not aware
|
||||
of such an external LB and you need no to specify any configuration variables
|
||||
for it.
|
||||
|
||||
Access endpoints are evaluated automagically, as the following:
|
||||
Note: TLS/SSL termination for externally accessed API endpoints' will **not**
|
||||
be covered by Kubespray for that case. Make sure your external LB provides it.
|
||||
Alternatively you may specify an externally load balanced VIPs in the
|
||||
`supplementary_addresses_in_ssl_keys` list. Then, kubespray will add them into
|
||||
the generated cluster certifactes as well.
|
||||
|
||||
| Endpoint type | kube-master | non-master |
|
||||
|------------------------------|---------------|---------------------|
|
||||
| Local LB (default) | http://lc:p | https://lc:nsp |
|
||||
| External LB, no internal | https://lb:lp | https://lb:lp |
|
||||
| No ext/int LB | http://lc:p | https://m[0].aip:sp |
|
||||
Aside of that specific case, the `loadbalancer_apiserver` considered mutually
|
||||
exclusive to `loadbalancer_apiserver_localhost`.
|
||||
|
||||
Access API endpoints are evaluated automagically, as the following:
|
||||
|
||||
| Endpoint type | kube-master | non-master | external |
|
||||
|------------------------------|----------------|---------------------|---------------------|
|
||||
| Local LB (default) | https://bip:sp | https://lc:nsp | https://m[0].aip:sp |
|
||||
| Local LB + Unmanaged here LB | https://bip:sp | https://lc:nsp | https://ext |
|
||||
| External LB, no internal | https://bip:sp | https://lb:lp | https://lb:lp |
|
||||
| No ext/int LB | https://bip:sp | https://m[0].aip:sp | https://m[0].aip:sp |
|
||||
|
||||
Where:
|
||||
* `m[0]` - the first node in the `kube-master` group;
|
||||
* `lb` - LB FQDN, `apiserver_loadbalancer_domain_name`;
|
||||
* `ext` - Externally load balanced VIP:port and FQDN, not managed by Kubespray;
|
||||
* `lc` - localhost;
|
||||
* `p` - insecure port, `kube_apiserver_insecure_port`
|
||||
* `nsp` - nginx secure port, `nginx_kube_apiserver_port`;
|
||||
* `bip` - a custom bind IP or localhost for the default bind IP '0.0.0.0';
|
||||
* `nsp` - nginx secure port, `nginx_kube_apiserver_port`, defers to `sp`;
|
||||
* `sp` - secure port, `kube_apiserver_port`;
|
||||
* `lp` - LB port, `loadbalancer_apiserver.port`, defers to the secure port;
|
||||
* `ip` - the node IP, defers to the ansible IP;
|
||||
* `aip` - `access_ip`, defers to the ip.
|
||||
|
||||
A second and a third column represent internal cluster access modes. The last
|
||||
column illustrates an example URI to access the cluster APIs externally.
|
||||
Kubespray has nothing to do with it, this is informational only.
|
||||
|
||||
As you can see, the masters' internal API endpoints are always
|
||||
contacted via the local bind IP, which is `https://bip:sp`.
|
||||
|
||||
**Note** that for some cases, like healthchecks of applications deployed by
|
||||
Kubespray, the masters' APIs are accessed via the insecure endpoint, which
|
||||
consists of the local `kube_apiserver_insecure_bind_address` and
|
||||
`kube_apiserver_insecure_port`.
|
||||
|
||||
@@ -84,7 +84,7 @@ Other members of your team should use ```git submodule sync```, ```git submodule
|
||||
# Contributing
|
||||
If you made useful changes or fixed a bug in existent kubespray repo, use this flow for PRs to original kubespray repo.
|
||||
|
||||
0. Sign the [CNCF CLA](https://github.com/kubernetes/kubernetes/wiki/CLA-FAQ).
|
||||
0. Sign the [CNCF CLA](https://git.k8s.io/community/CLA.md).
|
||||
|
||||
1. Change working directory to git submodule directory (3d/kubespray).
|
||||
|
||||
|
||||
@@ -3,8 +3,7 @@ Large deployments of K8s
|
||||
|
||||
For a large scaled deployments, consider the following configuration changes:
|
||||
|
||||
* Tune [ansible settings]
|
||||
(http://docs.ansible.com/ansible/intro_configuration.html)
|
||||
* Tune [ansible settings](http://docs.ansible.com/ansible/intro_configuration.html)
|
||||
for `forks` and `timeout` vars to fit large numbers of nodes being deployed.
|
||||
|
||||
* Override containers' `foo_image_repo` vars to point to intranet registry.
|
||||
@@ -34,6 +33,9 @@ For a large scaled deployments, consider the following configuration changes:
|
||||
``kube_controller_pod_eviction_timeout`` for better Kubernetes reliability.
|
||||
Check out [Kubernetes Reliability](kubernetes-reliability.md)
|
||||
|
||||
* Tune network prefix sizes. Those are ``kube_network_node_prefix``,
|
||||
``kube_service_addresses`` and ``kube_pods_subnet``.
|
||||
|
||||
* Add calico-rr nodes if you are deploying with Calico or Canal. Nodes recover
|
||||
from host/network interruption much quicker with calico-rr. Note that
|
||||
calico-rr role must be on a host without kube-master or kube-node role (but
|
||||
@@ -44,5 +46,8 @@ For a large scaled deployments, consider the following configuration changes:
|
||||
section of the Getting started guide for tips on creating a large scale
|
||||
Ansible inventory.
|
||||
|
||||
* Override the ``etcd_events_cluster_setup: true`` store events in a separate
|
||||
dedicated etcd instance.
|
||||
|
||||
For example, when deploying 200 nodes, you may want to run ansible with
|
||||
``--forks=50``, ``--timeout=600`` and define the ``retry_stagger: 60``.
|
||||
|
||||
19
docs/opensuse.md
Normal file
19
docs/opensuse.md
Normal file
@@ -0,0 +1,19 @@
|
||||
openSUSE Leap 42.3 and Tumbleweed
|
||||
===============
|
||||
|
||||
openSUSE Leap installation Notes:
|
||||
|
||||
- Install Ansible
|
||||
|
||||
```
|
||||
sudo zypper ref
|
||||
sudo zypper -n install ansible
|
||||
|
||||
```
|
||||
|
||||
- Install Jinja2 and Python-Netaddr
|
||||
|
||||
```sudo zypper -n install python-Jinja2 python-netaddr```
|
||||
|
||||
|
||||
Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment)
|
||||
@@ -2,8 +2,9 @@ Kubespray's roadmap
|
||||
=================
|
||||
|
||||
### Kubeadm
|
||||
- Propose kubeadm as an option in order to setup the kubernetes cluster.
|
||||
That would probably improve deployment speed and certs management [#553](https://github.com/kubespray/kubespray/issues/553)
|
||||
- Switch to kubeadm deployment as the default method after some bugs are fixed:
|
||||
* Support for basic auth
|
||||
* cloudprovider cloud-config mount [#484](https://github.com/kubernetes/kubeadm/issues/484)
|
||||
|
||||
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kubespray/issues/320)
|
||||
- the playbook would install and configure docker/rkt and the etcd cluster
|
||||
@@ -12,60 +13,35 @@ That would probably improve deployment speed and certs management [#553](https:/
|
||||
- to be discussed, a way to provide the inventory
|
||||
- **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kubespray/issues/321)
|
||||
|
||||
### Provisionning and cloud providers
|
||||
### Provisioning and cloud providers
|
||||
- [ ] Terraform to provision instances on **GCE, AWS, Openstack, Digital Ocean, Azure**
|
||||
- [ ] On AWS autoscaling, multi AZ
|
||||
- [ ] On Azure autoscaling, create loadbalancer [#297](https://github.com/kubespray/kubespray/issues/297)
|
||||
- [ ] On GCE be able to create a loadbalancer automatically (IAM ?) [#280](https://github.com/kubespray/kubespray/issues/280)
|
||||
- [x] **TLS boostrap** support for kubelet [#234](https://github.com/kubespray/kubespray/issues/234)
|
||||
- [x] **TLS boostrap** support for kubelet (covered by kubeadm, but not in standard deployment) [#234](https://github.com/kubespray/kubespray/issues/234)
|
||||
(related issues: https://github.com/kubernetes/kubernetes/pull/20439 <br>
|
||||
https://github.com/kubernetes/kubernetes/issues/18112)
|
||||
|
||||
### Tests
|
||||
- [x] Run kubernetes e2e tests
|
||||
- [x] migrate to jenkins
|
||||
(a test is currently a deployment on a 3 node cluste, testing k8s api, ping between 2 pods)
|
||||
- [x] Full tests on GCE per day (All OS's, all network plugins)
|
||||
- [x] trigger a single test per pull request
|
||||
- [ ] ~~single test with the Ansible version n-1 per day~~
|
||||
- [x] Test idempotency on on single OS but for all network plugins/container engines
|
||||
- [ ] Run kubernetes e2e tests
|
||||
- [ ] Test idempotency on on single OS but for all network plugins/container engines
|
||||
- [ ] single test on AWS per day
|
||||
- [x] test different achitectures :
|
||||
- 3 instances, 3 are members of the etcd cluster, 2 of them acting as master and node, 1 as node
|
||||
- 5 instances, 3 are etcd and nodes, 2 are masters only
|
||||
- 7 instances, 3 etcd only, 2 masters, 2 nodes
|
||||
- [ ] test scale up cluster: +1 etcd, +1 master, +1 node
|
||||
- [ ] Reorganize CI test vars into group var files
|
||||
|
||||
### Lifecycle
|
||||
- [ ] Adopt the kubeadm tool by delegating CM tasks it is capable to accomplish well [#553](https://github.com/kubespray/kubespray/issues/553)
|
||||
- [x] Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kubespray/issues/154)
|
||||
- [ ] Drain worker node when shutting down/deleting an instance
|
||||
- [ ] Upgrade granularity: select components to upgrade and skip others
|
||||
|
||||
### Networking
|
||||
- [ ] romana.io support [#160](https://github.com/kubespray/kubespray/issues/160)
|
||||
- [ ] Configure network policy for Calico. [#159](https://github.com/kubespray/kubespray/issues/159)
|
||||
- [ ] Opencontrail
|
||||
- [x] Canal
|
||||
- [x] Cloud Provider native networking (instead of our network plugins)
|
||||
|
||||
### High availability
|
||||
- (to be discussed) option to set a loadbalancer for the apiservers like ucarp/packemaker/keepalived
|
||||
While waiting for the issue [kubernetes/kubernetes#18174](https://github.com/kubernetes/kubernetes/issues/18174) to be fixed.
|
||||
|
||||
### Kubespray-cli
|
||||
- Delete instances
|
||||
- `kubespray vagrant` to setup a test cluster locally
|
||||
- `kubespray azure` for Microsoft Azure support
|
||||
- switch to Terraform instead of Ansible for provisionning
|
||||
- update $HOME/.kube/config when a cluster is deployed. Optionally switch to this context
|
||||
- [ ] Consolidate network_plugins and kubernetes-apps/network_plugins
|
||||
|
||||
### Kubespray API
|
||||
- Perform all actions through an **API**
|
||||
- Store inventories / configurations of mulltiple clusters
|
||||
- make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
|
||||
|
||||
### Addons (with kpm)
|
||||
### Addons (helm or native ansible)
|
||||
Include optionals deployments to init the cluster:
|
||||
##### Monitoring
|
||||
- Heapster / Grafana ....
|
||||
@@ -86,9 +62,9 @@ Include optionals deployments to init the cluster:
|
||||
|
||||
### Others
|
||||
- remove nodes (adding is already supported)
|
||||
- being able to choose any k8s version (almost done)
|
||||
- **rkt** support [#59](https://github.com/kubespray/kubespray/issues/59)
|
||||
- Review documentation (split in categories)
|
||||
- Organize and update documentation (split in categories)
|
||||
- Refactor downloads so it all runs in the beginning of deployment
|
||||
- Make bootstrapping OS more consistent
|
||||
- **consul** -> if officialy supported by k8s
|
||||
- flex volumes options (e.g. **torrus** support) [#312](https://github.com/kubespray/kubespray/issues/312)
|
||||
- Clusters federation option (aka **ubernetes**) [#329](https://github.com/kubespray/kubespray/issues/329)
|
||||
|
||||
@@ -24,13 +24,13 @@ If you wanted to upgrade just kube_version from v1.4.3 to v1.4.6, you could
|
||||
deploy the following way:
|
||||
|
||||
```
|
||||
ansible-playbook cluster.yml -i inventory/inventory.cfg -e kube_version=v1.4.3
|
||||
ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=v1.4.3
|
||||
```
|
||||
|
||||
And then repeat with v1.4.6 as kube_version:
|
||||
|
||||
```
|
||||
ansible-playbook cluster.yml -i inventory/inventory.cfg -e kube_version=v1.4.6
|
||||
ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=v1.4.6
|
||||
```
|
||||
|
||||
#### Graceful upgrade
|
||||
@@ -44,7 +44,7 @@ deployed.
|
||||
```
|
||||
git fetch origin
|
||||
git checkout origin/master
|
||||
ansible-playbook upgrade-cluster.yml -b -i inventory/inventory.cfg -e kube_version=v1.6.0
|
||||
ansible-playbook upgrade-cluster.yml -b -i inventory/sample/hosts.ini -e kube_version=v1.6.0
|
||||
```
|
||||
|
||||
After a successul upgrade, the Server Version should be updated:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
Vagrant Install
|
||||
=================
|
||||
|
||||
Assuming you have Vagrant (1.8+) installed with virtualbox (it may work
|
||||
Assuming you have Vagrant (2.0+) installed with virtualbox (it may work
|
||||
with vmware, but is untested) you should be able to launch a 3 node
|
||||
Kubernetes cluster by simply running `$ vagrant up`.<br />
|
||||
|
||||
|
||||
28
docs/vars.md
28
docs/vars.md
@@ -28,6 +28,7 @@ Some variables of note include:
|
||||
* *kube_version* - Specify a given Kubernetes hyperkube version
|
||||
* *searchdomains* - Array of DNS domains to search when looking up hostnames
|
||||
* *nameservers* - Array of nameservers to use for DNS lookup
|
||||
* *preinstall_selinux_state* - Set selinux state, permitted values are permissive and disabled.
|
||||
|
||||
#### Addressing variables
|
||||
|
||||
@@ -61,8 +62,9 @@ following default cluster paramters:
|
||||
* *kube_network_node_prefix* - Subnet allocated per-node for pod IPs. Remainin
|
||||
bits in kube_pods_subnet dictates how many kube-nodes can be in cluster.
|
||||
* *dns_setup* - Enables dnsmasq
|
||||
* *dns_server* - Cluster IP for dnsmasq (default is 10.233.0.2)
|
||||
* *skydns_server* - Cluster IP for KubeDNS (default is 10.233.0.3)
|
||||
* *dnsmasq_dns_server* - Cluster IP for dnsmasq (default is 10.233.0.2)
|
||||
* *skydns_server* - Cluster IP for DNS (default is 10.233.0.3)
|
||||
* *skydns_server_secondary* - Secondary Cluster IP for CoreDNS used with coredns_dual deployment (default is 10.233.0.4)
|
||||
* *cloud_provider* - Enable extra Kubelet option if operating inside GCE or
|
||||
OpenStack (default is unset)
|
||||
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
|
||||
@@ -71,9 +73,12 @@ following default cluster paramters:
|
||||
alpha/experimental Kubernetes features. (defaults is `[]`)
|
||||
* *authorization_modes* - A list of [authorization mode](
|
||||
https://kubernetes.io/docs/admin/authorization/#using-flags-for-your-authorization-module)
|
||||
that the cluster should be configured for. Defaults to `[]` (i.e. no authorization).
|
||||
Note: `RBAC` is currently in experimental phase, and do not support either calico or
|
||||
vault. Upgrade from non-RBAC to RBAC is not tested.
|
||||
that the cluster should be configured for. Defaults to `['Node', 'RBAC']`
|
||||
(Node and RBAC authorizers).
|
||||
Note: `Node` and `RBAC` are enabled by default. Previously deployed clusters can be
|
||||
converted to RBAC mode. However, your apps which rely on Kubernetes API will
|
||||
require a service account and cluster role bindings. You can override this
|
||||
setting by setting authorization_modes to `[]`.
|
||||
|
||||
Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances'
|
||||
private addresses, make sure to pick another values for ``kube_service_addresses``
|
||||
@@ -99,7 +104,8 @@ Stack](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/dns-st
|
||||
* *docker_options* - Commonly used to set
|
||||
``--insecure-registry=myregistry.mydomain:5000``
|
||||
* *http_proxy/https_proxy/no_proxy* - Proxy variables for deploying behind a
|
||||
proxy
|
||||
proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames
|
||||
that correspond to each node.
|
||||
* *kubelet_deployment_type* - Controls which platform to deploy kubelet on.
|
||||
Available options are ``host``, ``rkt``, and ``docker``. ``docker`` mode
|
||||
is unlikely to work on newer releases. Starting with Kubernetes v1.7
|
||||
@@ -112,6 +118,14 @@ Stack](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/dns-st
|
||||
* *kubelet_cgroup_driver* - Allows manual override of the
|
||||
cgroup-driver option for Kubelet. By default autodetection is used
|
||||
to match Docker configuration.
|
||||
* *node_labels* - Labels applied to nodes via kubelet --node-labels parameter.
|
||||
For example, labels can be set in the inventory as variables or more widely in group_vars.
|
||||
*node_labels* must be defined as a dict:
|
||||
```
|
||||
node_labels:
|
||||
label1_name: label1_value
|
||||
label2_name: label2_value
|
||||
```
|
||||
|
||||
##### Custom flags for Kube Components
|
||||
For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. This can be done by providing a list of flags. Example:
|
||||
@@ -131,6 +145,6 @@ The possible vars are:
|
||||
|
||||
By default, a user with admin rights is created, named `kube`.
|
||||
The password can be viewed after deployment by looking at the file
|
||||
`PATH_TO_KUBESPRAY/credentials/kube_user`. This contains a randomly generated
|
||||
`PATH_TO_KUBESPRAY/credentials/kube_user.creds`. This contains a randomly generated
|
||||
password. If you wish to set your own password, just precreate/modify this
|
||||
file yourself or change `kube_api_pwd` var.
|
||||
|
||||
@@ -24,7 +24,7 @@ hardcoded to only create a Vault role for Etcd.
|
||||
This step is where the long-term Vault cluster is started and configured. Its
|
||||
first task, is to stop any temporary instances of Vault, to free the port for
|
||||
the long-term. At the end of this task, the entire Vault cluster should be up
|
||||
and read to go.
|
||||
and ready to go.
|
||||
|
||||
Keys to the Kingdom
|
||||
-------------------
|
||||
|
||||
@@ -16,7 +16,7 @@ After this step you should have:
|
||||
|
||||
## Kubespray configuration
|
||||
|
||||
Fist you must define the cloud provider in `inventory/group_vars/all.yml` and set it to `vsphere`.
|
||||
Fist you must define the cloud provider in `inventory/sample/group_vars/all.yml` and set it to `vsphere`.
|
||||
```yml
|
||||
cloud_provider: vsphere
|
||||
```
|
||||
@@ -34,10 +34,12 @@ Then, in the same file, you need to declare your vCenter credential following th
|
||||
| vsphere_datastore | TRUE | string | | | Datastore name to use |
|
||||
| vsphere_working_dir | TRUE | string | | | Working directory from the view "VMs and template" in the vCenter where VM are placed |
|
||||
| vsphere_scsi_controller_type | TRUE | string | buslogic, pvscsi, parallel | pvscsi | SCSI controller name. Commonly "pvscsi". |
|
||||
| vsphere_vm_uuid | FALSE | string | | | VM Instance UUID of virtual machine that host K8s master. Can be retrieved from instanceUuid property in VmConfigInfo, or as vc.uuid in VMX file or in `/sys/class/dmi/id/product_serial` |
|
||||
| vsphere_vm_uuid | FALSE | string | | | VM Instance UUID of virtual machine that host K8s master. Can be retrieved from instanceUuid property in VmConfigInfo, or as vc.uuid in VMX file or in `/sys/class/dmi/id/product_serial` (Optional, only used for Kubernetes <= 1.9.2) |
|
||||
| vsphere_public_network | FALSE | string | | Blank | Name of the network the VMs are joined to |
|
||||
| vsphere_resource_pool | FALSE | string | | Blank | Name of the Resource pool where the VMs are located (Optional, only used for Kubernetes >= 1.9.2) |
|
||||
|
||||
Example configuration
|
||||
|
||||
```yml
|
||||
vsphere_vcenter_ip: "myvcenter.domain.com"
|
||||
vsphere_vcenter_port: 443
|
||||
@@ -48,6 +50,7 @@ vsphere_datacenter: "DATACENTER_name"
|
||||
vsphere_datastore: "DATASTORE_name"
|
||||
vsphere_working_dir: "Docker_hosts"
|
||||
vsphere_scsi_controller_type: "pvscsi"
|
||||
vsphere_resource_pool: "K8s-Pool"
|
||||
```
|
||||
|
||||
## Deployment
|
||||
@@ -55,7 +58,7 @@ vsphere_scsi_controller_type: "pvscsi"
|
||||
Once the configuration is set, you can execute the playbook again to apply the new configuration
|
||||
```
|
||||
cd kubespray
|
||||
ansible-playbook -i inventory/inventory.cfg -b -v cluster.yml
|
||||
ansible-playbook -i inventory/sample/hosts.ini -b -v cluster.yml
|
||||
```
|
||||
|
||||
You'll find some usefull examples [here](https://github.com/kubernetes/kubernetes/tree/master/examples/volumes/vsphere) to test your configuration.
|
||||
|
||||
@@ -12,7 +12,7 @@ Weave encryption is supported for all communication
|
||||
* To use Weave encryption, specify a strong password (if no password, no encrytion)
|
||||
|
||||
```
|
||||
# In file ./inventory/group_vars/k8s-cluster.yml
|
||||
# In file ./inventory/sample/group_vars/k8s-cluster.yml
|
||||
weave_password: EnterPasswordHere
|
||||
```
|
||||
|
||||
@@ -77,21 +77,21 @@ The seed mode also allows multi-clouds and hybrid on-premise/cloud clusters depl
|
||||
* Switch from consensus mode to seed mode
|
||||
|
||||
```
|
||||
# In file ./inventory/group_vars/k8s-cluster.yml
|
||||
# In file ./inventory/sample/group_vars/k8s-cluster.yml
|
||||
weave_mode_seed: true
|
||||
```
|
||||
|
||||
These two variables are only used when `weave_mode_seed` is set to `true` (**/!\ do not manually change these values**)
|
||||
|
||||
```
|
||||
# In file ./inventory/group_vars/k8s-cluster.yml
|
||||
# In file ./inventory/sample/group_vars/k8s-cluster.yml
|
||||
weave_seed: uninitialized
|
||||
weave_peers: uninitialized
|
||||
```
|
||||
|
||||
The first variable, `weave_seed`, contains the initial nodes of the weave network
|
||||
|
||||
The seconde variable, `weave_peers`, saves the IPs of all nodes joined to the weave network
|
||||
The second variable, `weave_peers`, saves the IPs of all nodes joined to the weave network
|
||||
|
||||
These two variables are used to connect a new node to the weave network. The new node needs to know the firsts nodes (seed) and the list of IPs of all nodes.
|
||||
|
||||
|
||||
54
extra_playbooks/build-cephfs-provisioner.yml
Normal file
54
extra_playbooks/build-cephfs-provisioner.yml
Normal file
@@ -0,0 +1,54 @@
|
||||
---
|
||||
|
||||
- hosts: localhost
|
||||
tasks:
|
||||
- name: CephFS Provisioner | Install pip packages
|
||||
pip:
|
||||
name: "{{ item.name }}"
|
||||
version: "{{ item.version }}"
|
||||
state: "{{ item.state }}"
|
||||
with_items:
|
||||
- { state: "present", name: "docker", version: "2.7.0" }
|
||||
- { state: "present", name: "docker-compose", version: "1.18.0" }
|
||||
|
||||
- name: CephFS Provisioner | Check Go version
|
||||
shell: |
|
||||
go version
|
||||
ignore_errors: yes
|
||||
register: go_version_result
|
||||
|
||||
- name: CephFS Provisioner | Install Go 1.9
|
||||
shell: |
|
||||
add-apt-repository -y ppa:gophers/archive
|
||||
apt-get update
|
||||
apt-get install -y golang-1.9
|
||||
ln -fs /usr/lib/go-1.9/bin/* /usr/local/bin/
|
||||
when: 'go_version_result.rc != 0 or "go version go1.9" not in go_version_result.stdout'
|
||||
|
||||
- name: CephFS Provisioner | Check if image exists
|
||||
shell: |
|
||||
docker image list | grep 'cephfs-provisioner'
|
||||
ignore_errors: yes
|
||||
register: check_image_result
|
||||
|
||||
- block:
|
||||
- name: CephFS Provisioner | Clone repo
|
||||
git:
|
||||
repo: https://github.com/kubernetes-incubator/external-storage.git
|
||||
dest: "~/go/src/github.com/kubernetes-incubator"
|
||||
version: 92295a30
|
||||
clone: no
|
||||
update: yes
|
||||
|
||||
- name: CephFS Provisioner | Build image
|
||||
shell: |
|
||||
cd ~/go/src/github.com/kubernetes-incubator/external-storage
|
||||
REGISTRY=quay.io/kubespray/ VERSION=92295a30 make ceph/cephfs
|
||||
|
||||
- name: CephFS Provisioner | Push image
|
||||
docker_image:
|
||||
name: quay.io/kubespray/cephfs-provisioner:92295a30
|
||||
push: yes
|
||||
retries: 10
|
||||
|
||||
when: check_image_result.rc != 0
|
||||
@@ -3,6 +3,7 @@
|
||||
### * Will not upgrade etcd
|
||||
### * Will not upgrade network plugins
|
||||
### * Will not upgrade Docker
|
||||
### * Will not pre-download containers or kubeadm
|
||||
### * Currently does not support Vault deployment.
|
||||
###
|
||||
### In most cases, you probably want to use upgrade-cluster.yml playbook and
|
||||
@@ -46,6 +47,8 @@
|
||||
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
|
||||
- { role: kubernetes/node, tags: node }
|
||||
- { role: kubernetes/master, tags: master }
|
||||
- { role: kubernetes/client, tags: client }
|
||||
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
||||
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
||||
|
||||
#Finally handle worker upgrades, based on given batch size
|
||||
|
||||
1
inventory/local/group_vars
Symbolic link
1
inventory/local/group_vars
Symbolic link
@@ -0,0 +1 @@
|
||||
../sample/group_vars
|
||||
@@ -20,6 +20,10 @@ bin_dir: /usr/local/bin
|
||||
## This may be the case if clients support and loadbalance multiple etcd servers natively.
|
||||
#etcd_multiaccess: true
|
||||
|
||||
### ETCD: disable peer client cert authentication.
|
||||
# This affects ETCD_PEER_CLIENT_CERT_AUTH variable
|
||||
#etcd_peer_client_auth: true
|
||||
|
||||
## External LB example config
|
||||
## apiserver_loadbalancer_domain_name: "elb.some.domain"
|
||||
#loadbalancer_apiserver:
|
||||
@@ -56,7 +60,7 @@ bin_dir: /usr/local/bin
|
||||
|
||||
## There are some changes specific to the cloud providers
|
||||
## for instance we need to encapsulate packets with some network plugins
|
||||
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', or 'vsphere'
|
||||
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external'
|
||||
## When openstack is used make sure to source in the openstack credentials
|
||||
## like you would do when using nova-client before starting the playbook.
|
||||
#cloud_provider:
|
||||
@@ -72,14 +76,20 @@ bin_dir: /usr/local/bin
|
||||
#azure_subnet_name:
|
||||
#azure_security_group_name:
|
||||
#azure_vnet_name:
|
||||
#azure_vnet_resource_group:
|
||||
#azure_route_table_name:
|
||||
|
||||
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461)
|
||||
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
|
||||
#openstack_blockstorage_version: "v1/v2/auto (default)"
|
||||
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables.
|
||||
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
|
||||
#openstack_lbaas_enabled: True
|
||||
#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
|
||||
## To enable automatic floating ip provisioning, specify a subnet.
|
||||
#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
|
||||
## Override default LBaaS behavior
|
||||
#openstack_lbaas_use_octavia: False
|
||||
#openstack_lbaas_method: "ROUND_ROBIN"
|
||||
#openstack_lbaas_provider: "haproxy"
|
||||
#openstack_lbaas_create_monitor: "yes"
|
||||
#openstack_lbaas_monitor_delay: "1m"
|
||||
#openstack_lbaas_monitor_timeout: "30s"
|
||||
@@ -87,13 +97,10 @@ bin_dir: /usr/local/bin
|
||||
|
||||
## Uncomment to enable experimental kubeadm deployment mode
|
||||
#kubeadm_enabled: false
|
||||
#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}"
|
||||
#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
|
||||
#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
|
||||
#
|
||||
## Set these proxy values in order to update docker daemon to use proxies
|
||||
## Set these proxy values in order to update package manager and docker daemon to use proxies
|
||||
#http_proxy: ""
|
||||
#https_proxy: ""
|
||||
## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
|
||||
#no_proxy: ""
|
||||
|
||||
## Uncomment this if you want to force overlay/overlay2 as docker storage driver
|
||||
@@ -113,9 +120,6 @@ bin_dir: /usr/local/bin
|
||||
## as a backend). Options are "script" or "vault"
|
||||
#cert_management: script
|
||||
|
||||
## Please specify true if you want to perform a kernel upgrade
|
||||
kernel_upgrade: false
|
||||
|
||||
# Set to true to allow pre-checks to fail and continue deployment
|
||||
#ignore_assert_errors: false
|
||||
|
||||
@@ -124,3 +128,10 @@ kernel_upgrade: false
|
||||
|
||||
## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
|
||||
#etcd_metrics: basic
|
||||
|
||||
## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing.
|
||||
## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM.
|
||||
#etcd_memory_limit: "512M"
|
||||
|
||||
# The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
|
||||
#kube_read_only_port: 10255
|
||||
@@ -1,15 +1,11 @@
|
||||
# Kubernetes configuration dirs and system namespace.
|
||||
# Those are where all the additional config stuff goes
|
||||
# the kubernetes normally puts in /srv/kubernets.
|
||||
# the kubernetes normally puts in /srv/kubernetes.
|
||||
# This puts them in a sane location and namespace.
|
||||
# Editting those values will almost surely break something.
|
||||
# Editing those values will almost surely break something.
|
||||
kube_config_dir: /etc/kubernetes
|
||||
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
|
||||
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
|
||||
system_namespace: kube-system
|
||||
|
||||
# Logging directory (sysvinit systems)
|
||||
kube_log_dir: "/var/log/kubernetes"
|
||||
|
||||
# This is where all the cert scripts and certs will be located
|
||||
kube_cert_dir: "{{ kube_config_dir }}/ssl"
|
||||
@@ -20,10 +16,10 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
|
||||
# This is where to save basic auth file
|
||||
kube_users_dir: "{{ kube_config_dir }}/users"
|
||||
|
||||
kube_api_anonymous_auth: false
|
||||
kube_api_anonymous_auth: true
|
||||
|
||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||
kube_version: v1.7.5
|
||||
kube_version: v1.9.5
|
||||
|
||||
# Where the binaries will be downloaded.
|
||||
# Note: ensure that you've enough disk space (about 1G)
|
||||
@@ -32,7 +28,7 @@ local_release_dir: "/tmp/releases"
|
||||
retry_stagger: 5
|
||||
|
||||
# This is the group that the cert creation scripts chgrp the
|
||||
# cert files to. Not really changable...
|
||||
# cert files to. Not really changeable...
|
||||
kube_cert_group: kube-cert
|
||||
|
||||
# Cluster Loglevel configuration
|
||||
@@ -40,7 +36,7 @@ kube_log_level: 2
|
||||
|
||||
# Users to create for basic auth in Kubernetes API via HTTP
|
||||
# Optionally add groups for user
|
||||
kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
|
||||
kube_api_pwd: "{{ lookup('password', inventory_dir + '/credentials/kube_user.creds length=15 chars=ascii_letters,digits') }}"
|
||||
kube_users:
|
||||
kube:
|
||||
pass: "{{kube_api_pwd}}"
|
||||
@@ -50,8 +46,8 @@ kube_users:
|
||||
|
||||
## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
|
||||
#kube_oidc_auth: false
|
||||
#kube_basic_auth: true
|
||||
#kube_token_auth: true
|
||||
#kube_basic_auth: false
|
||||
#kube_token_auth: false
|
||||
|
||||
|
||||
## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
|
||||
@@ -65,7 +61,7 @@ kube_users:
|
||||
# kube_oidc_groups_claim: groups
|
||||
|
||||
|
||||
# Choose network plugin (calico, weave or flannel)
|
||||
# Choose network plugin (cilium, calico, contiv, weave or flannel)
|
||||
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||
kube_network_plugin: calico
|
||||
|
||||
@@ -86,6 +82,9 @@ weave_mode_seed: false
|
||||
weave_seed: uninitialized
|
||||
weave_peers: uninitialized
|
||||
|
||||
# Set the MTU of Weave (default 1376, Jumbo Frames: 8916)
|
||||
weave_mtu: 1376
|
||||
|
||||
# Enable kubernetes network policies
|
||||
enable_network_policy: false
|
||||
|
||||
@@ -106,21 +105,34 @@ kube_network_node_prefix: 24
|
||||
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
|
||||
kube_apiserver_port: 6443 # (https)
|
||||
kube_apiserver_insecure_port: 8080 # (http)
|
||||
# Set to 0 to disable insecure port - Requires RBAC in authorization_modes and kube_api_anonymous_auth: true
|
||||
#kube_apiserver_insecure_port: 0 # (disabled)
|
||||
|
||||
# Kube-proxy proxyMode configuration.
|
||||
# Can be ipvs, iptables
|
||||
kube_proxy_mode: iptables
|
||||
|
||||
## Encrypting Secret Data at Rest (experimental)
|
||||
kube_encrypt_secret_data: false
|
||||
|
||||
# DNS configuration.
|
||||
# Kubernetes cluster name, also will be used as DNS domain
|
||||
cluster_name: cluster.local
|
||||
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
|
||||
ndots: 2
|
||||
# Can be dnsmasq_kubedns, kubedns or none
|
||||
# Can be dnsmasq_kubedns, kubedns, coredns, coredns_dual, manual or none
|
||||
dns_mode: kubedns
|
||||
# Set manual server if using a custom cluster DNS server
|
||||
#manual_dns_server: 10.x.x.x
|
||||
|
||||
# Can be docker_dns, host_resolvconf or none
|
||||
resolvconf_mode: docker_dns
|
||||
# Deploy netchecker app to verify DNS resolve as an HTTP service
|
||||
deploy_netchecker: false
|
||||
# Ip address of the kubernetes skydns service
|
||||
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
|
||||
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
||||
skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}"
|
||||
dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
||||
dns_domain: "{{ cluster_name }}"
|
||||
|
||||
# Path used to store Docker data
|
||||
@@ -137,13 +149,14 @@ docker_bin_dir: "/usr/bin"
|
||||
# Settings for containerized control plane (etcd/kubelet/secrets)
|
||||
etcd_deployment_type: docker
|
||||
kubelet_deployment_type: host
|
||||
cert_management: script
|
||||
vault_deployment_type: docker
|
||||
helm_deployment_type: host
|
||||
|
||||
# K8s image pull policy (imagePullPolicy)
|
||||
k8s_image_pull_policy: IfNotPresent
|
||||
|
||||
# Kubernetes dashboard (available at http://first_master:6443/ui by default)
|
||||
# Kubernetes dashboard
|
||||
# RBAC required. see docs/getting-started.md for access details.
|
||||
dashboard_enabled: true
|
||||
|
||||
# Monitoring apps for k8s
|
||||
@@ -152,9 +165,58 @@ efk_enabled: false
|
||||
# Helm deployment
|
||||
helm_enabled: false
|
||||
|
||||
# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
|
||||
# Istio deployment
|
||||
istio_enabled: false
|
||||
|
||||
# Registry deployment
|
||||
registry_enabled: false
|
||||
# registry_namespace: "{{ system_namespace }}"
|
||||
# registry_storage_class: ""
|
||||
# registry_disk_size: "10Gi"
|
||||
|
||||
# Local volume provisioner deployment
|
||||
local_volume_provisioner_enabled: false
|
||||
# local_volume_provisioner_namespace: "{{ system_namespace }}"
|
||||
# local_volume_provisioner_base_dir: /mnt/disks
|
||||
# local_volume_provisioner_mount_dir: /mnt/disks
|
||||
# local_volume_provisioner_storage_class: local-storage
|
||||
|
||||
# CephFS provisioner deployment
|
||||
cephfs_provisioner_enabled: false
|
||||
# cephfs_provisioner_namespace: "{{ system_namespace }}"
|
||||
# cephfs_provisioner_cluster: ceph
|
||||
# cephfs_provisioner_monitors:
|
||||
# - 172.24.0.1:6789
|
||||
# - 172.24.0.2:6789
|
||||
# - 172.24.0.3:6789
|
||||
# cephfs_provisioner_admin_id: admin
|
||||
# cephfs_provisioner_secret: secret
|
||||
# cephfs_provisioner_storage_class: cephfs
|
||||
|
||||
# Nginx ingress controller deployment
|
||||
ingress_nginx_enabled: false
|
||||
# ingress_nginx_host_network: false
|
||||
# ingress_nginx_namespace: "ingress-nginx"
|
||||
# ingress_nginx_insecure_port: 80
|
||||
# ingress_nginx_secure_port: 443
|
||||
# ingress_nginx_configmap:
|
||||
# map-hash-bucket-size: "128"
|
||||
# ssl-protocols: "SSLv2"
|
||||
# ingress_nginx_configmap_tcp_services:
|
||||
# 9000: "default/example-go:8080"
|
||||
# ingress_nginx_configmap_udp_services:
|
||||
# 53: "kube-system/kube-dns:53"
|
||||
|
||||
# Cert manager deployment
|
||||
cert_manager_enabled: false
|
||||
# cert_manager_namespace: "cert-manager"
|
||||
|
||||
# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
|
||||
persistent_volumes_enabled: false
|
||||
|
||||
# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
|
||||
# kubeconfig_localhost: false
|
||||
# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
|
||||
# Download kubectl onto the host that runs Ansible in {{ bin_dir }}
|
||||
# kubectl_localhost: false
|
||||
|
||||
# dnsmasq
|
||||
@@ -166,5 +228,14 @@ helm_enabled: false
|
||||
# kubelet_cgroups_per_qos: true
|
||||
|
||||
# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
|
||||
# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
|
||||
# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
|
||||
# kubelet_enforce_node_allocatable: pods
|
||||
|
||||
## Supplementary addresses that can be added in kubernetes ssl keys.
|
||||
## That can be useful for example to setup a keepalived virtual IP
|
||||
# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
|
||||
|
||||
## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler.
|
||||
## See https://github.com/kubernetes-incubator/kubespray/issues/2141
|
||||
## Set this variable to true to get rid of this issue
|
||||
volume_cross_zone_attachment: false
|
||||
@@ -26,6 +26,11 @@
|
||||
# node5
|
||||
# node6
|
||||
|
||||
# [kube-ingress]
|
||||
# node2
|
||||
# node3
|
||||
|
||||
# [k8s-cluster:children]
|
||||
# kube-node
|
||||
# kube-master
|
||||
# kube-node
|
||||
# kube-ingress
|
||||
@@ -18,7 +18,9 @@ options:
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- The path and filename of the resource(s) definition file.
|
||||
- The path and filename of the resource(s) definition file(s).
|
||||
- To operate on several files this can accept a comma separated list of files or a list of files.
|
||||
aliases: [ 'files', 'file', 'filenames' ]
|
||||
kubectl:
|
||||
required: false
|
||||
default: null
|
||||
@@ -86,6 +88,15 @@ EXAMPLES = """
|
||||
|
||||
- name: test nginx is present
|
||||
kube: filename=/tmp/nginx.yml
|
||||
|
||||
- name: test nginx and postgresql are present
|
||||
kube: files=/tmp/nginx.yml,/tmp/postgresql.yml
|
||||
|
||||
- name: test nginx and postgresql are present
|
||||
kube:
|
||||
files:
|
||||
- /tmp/nginx.yml
|
||||
- /tmp/postgresql.yml
|
||||
"""
|
||||
|
||||
|
||||
@@ -112,7 +123,7 @@ class KubeManager(object):
|
||||
self.all = module.params.get('all')
|
||||
self.force = module.params.get('force')
|
||||
self.name = module.params.get('name')
|
||||
self.filename = module.params.get('filename')
|
||||
self.filename = [f.strip() for f in module.params.get('filename') or []]
|
||||
self.resource = module.params.get('resource')
|
||||
self.label = module.params.get('label')
|
||||
|
||||
@@ -122,7 +133,7 @@ class KubeManager(object):
|
||||
rc, out, err = self.module.run_command(args)
|
||||
if rc != 0:
|
||||
self.module.fail_json(
|
||||
msg='error running kubectl (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err))
|
||||
msg='error running kubectl (%s) command (rc=%d), out=\'%s\', err=\'%s\'' % (' '.join(args), rc, out, err))
|
||||
except Exception as exc:
|
||||
self.module.fail_json(
|
||||
msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc)))
|
||||
@@ -147,7 +158,7 @@ class KubeManager(object):
|
||||
if not self.filename:
|
||||
self.module.fail_json(msg='filename required to create')
|
||||
|
||||
cmd.append('--filename=' + self.filename)
|
||||
cmd.append('--filename=' + ','.join(self.filename))
|
||||
|
||||
return self._execute(cmd)
|
||||
|
||||
@@ -161,7 +172,7 @@ class KubeManager(object):
|
||||
if not self.filename:
|
||||
self.module.fail_json(msg='filename required to reload')
|
||||
|
||||
cmd.append('--filename=' + self.filename)
|
||||
cmd.append('--filename=' + ','.join(self.filename))
|
||||
|
||||
return self._execute(cmd)
|
||||
|
||||
@@ -173,7 +184,7 @@ class KubeManager(object):
|
||||
cmd = ['delete']
|
||||
|
||||
if self.filename:
|
||||
cmd.append('--filename=' + self.filename)
|
||||
cmd.append('--filename=' + ','.join(self.filename))
|
||||
else:
|
||||
if not self.resource:
|
||||
self.module.fail_json(msg='resource required to delete without filename')
|
||||
@@ -197,27 +208,31 @@ class KubeManager(object):
|
||||
def exists(self):
|
||||
cmd = ['get']
|
||||
|
||||
if self.filename:
|
||||
cmd.append('--filename=' + ','.join(self.filename))
|
||||
else:
|
||||
if not self.resource:
|
||||
return False
|
||||
self.module.fail_json(msg='resource required without filename')
|
||||
|
||||
cmd.append(self.resource)
|
||||
|
||||
if self.name:
|
||||
cmd.append(self.name)
|
||||
|
||||
cmd.append('--no-headers')
|
||||
|
||||
if self.label:
|
||||
cmd.append('--selector=' + self.label)
|
||||
|
||||
if self.all:
|
||||
cmd.append('--all-namespaces')
|
||||
|
||||
cmd.append('--no-headers')
|
||||
|
||||
result = self._execute_nofail(cmd)
|
||||
if not result:
|
||||
return False
|
||||
return True
|
||||
|
||||
# TODO: This is currently unused, perhaps convert to 'scale' with a replicas param?
|
||||
def stop(self):
|
||||
|
||||
if not self.force and not self.exists():
|
||||
@@ -226,7 +241,7 @@ class KubeManager(object):
|
||||
cmd = ['stop']
|
||||
|
||||
if self.filename:
|
||||
cmd.append('--filename=' + self.filename)
|
||||
cmd.append('--filename=' + ','.join(self.filename))
|
||||
else:
|
||||
if not self.resource:
|
||||
self.module.fail_json(msg='resource required to stop without filename')
|
||||
@@ -253,7 +268,7 @@ def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(),
|
||||
filename=dict(),
|
||||
filename=dict(type='list', aliases=['files', 'file', 'filenames']),
|
||||
namespace=dict(),
|
||||
resource=dict(),
|
||||
label=dict(),
|
||||
@@ -263,7 +278,8 @@ def main():
|
||||
all=dict(default=False, type='bool'),
|
||||
log_level=dict(default=0, type='int'),
|
||||
state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped']),
|
||||
)
|
||||
),
|
||||
mutually_exclusive=[['filename', 'list']]
|
||||
)
|
||||
|
||||
changed = False
|
||||
@@ -288,8 +304,6 @@ def main():
|
||||
else:
|
||||
module.fail_json(msg='Unrecognized state %s.' % state)
|
||||
|
||||
if result:
|
||||
changed = True
|
||||
module.exit_json(changed=changed,
|
||||
msg='success: %s' % (' '.join(result))
|
||||
)
|
||||
|
||||
29
remove-node.yml
Normal file
29
remove-node.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
|
||||
- hosts: all
|
||||
gather_facts: true
|
||||
|
||||
- hosts: etcd:k8s-cluster:vault:calico-rr
|
||||
vars_prompt:
|
||||
name: "delete_nodes_confirmation"
|
||||
prompt: "Are you sure you want to delete nodes state? Type 'yes' to delete nodes."
|
||||
default: "no"
|
||||
private: no
|
||||
|
||||
pre_tasks:
|
||||
- name: check confirmation
|
||||
fail:
|
||||
msg: "Delete nodes confirmation failed"
|
||||
when: delete_nodes_confirmation != "yes"
|
||||
|
||||
- hosts: kube-master
|
||||
roles:
|
||||
- { role: remove-node/pre-remove, tags: pre-remove }
|
||||
|
||||
- hosts: kube-node
|
||||
roles:
|
||||
- { role: reset, tags: reset }
|
||||
|
||||
- hosts: kube-master
|
||||
roles:
|
||||
- { role: remove-node/post-remove, tags: post-remove }
|
||||
@@ -1,4 +1,4 @@
|
||||
pbr>=1.6
|
||||
ansible>=2.3.2
|
||||
ansible>=2.4.0
|
||||
netaddr
|
||||
jinja2>=2.9.6
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
---
|
||||
|
||||
- hosts: all
|
||||
gather_facts: true
|
||||
|
||||
- hosts: etcd:k8s-cluster:vault:calico-rr
|
||||
vars_prompt:
|
||||
name: "reset_confirmation"
|
||||
prompt: "Are you sure you want to reset cluster state? Type 'yes' to reset your cluster."
|
||||
|
||||
@@ -3,13 +3,13 @@
|
||||
has_bastion: "{{ 'bastion' in groups['all'] }}"
|
||||
|
||||
- set_fact:
|
||||
bastion_ip: "{{ hostvars['bastion']['ansible_ssh_host'] }}"
|
||||
bastion_ip: "{{ hostvars['bastion']['ansible_host'] }}"
|
||||
when: has_bastion
|
||||
|
||||
# As we are actually running on localhost, the ansible_ssh_user is your local user when you try to use it directly
|
||||
# To figure out the real ssh user, we delegate this task to the bastion and store the ansible_ssh_user in real_user
|
||||
# To figure out the real ssh user, we delegate this task to the bastion and store the ansible_user in real_user
|
||||
- set_fact:
|
||||
real_user: "{{ ansible_ssh_user }}"
|
||||
real_user: "{{ ansible_user }}"
|
||||
delegate_to: bastion
|
||||
when: has_bastion
|
||||
|
||||
@@ -18,3 +18,4 @@
|
||||
template:
|
||||
src: ssh-bastion.conf
|
||||
dest: "{{ playbook_dir }}/ssh-bastion.conf"
|
||||
when: has_bastion
|
||||
|
||||
@@ -16,6 +16,5 @@ Host {{ bastion_ip }}
|
||||
ControlPersist 5m
|
||||
|
||||
Host {{ vars['hosts'] }}
|
||||
ProxyCommand ssh -W %h:%p {{ real_user }}@{{ bastion_ip }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}
|
||||
StrictHostKeyChecking no
|
||||
ProxyCommand ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p {{ real_user }}@{{ bastion_ip }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
---
|
||||
pypy_version: 2.4.0
|
||||
pip_python_modules:
|
||||
pip_python_coreos_modules:
|
||||
- httplib2
|
||||
- six
|
||||
|
||||
override_system_hostname: true
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#/bin/bash
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
BINDIR="/opt/bin"
|
||||
|
||||
@@ -15,4 +15,6 @@
|
||||
when: fastestmirror.stat.exists
|
||||
|
||||
- name: Install packages requirements for bootstrap
|
||||
raw: yum -y install libselinux-python
|
||||
yum:
|
||||
name: libselinux-python
|
||||
state: present
|
||||
|
||||
@@ -3,7 +3,9 @@
|
||||
raw: stat /opt/bin/.bootstrapped
|
||||
register: need_bootstrap
|
||||
failed_when: false
|
||||
tags: facts
|
||||
changed_when: false
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- name: Bootstrap | Run bootstrap.sh
|
||||
script: bootstrap.sh
|
||||
@@ -11,7 +13,8 @@
|
||||
|
||||
- set_fact:
|
||||
ansible_python_interpreter: "/opt/bin/python"
|
||||
tags: facts
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- name: Bootstrap | Check if we need to install pip
|
||||
shell: "{{ansible_python_interpreter}} -m pip --version"
|
||||
@@ -20,7 +23,8 @@
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
when: need_bootstrap.rc != 0
|
||||
tags: facts
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- name: Bootstrap | Copy get-pip.py
|
||||
copy:
|
||||
@@ -48,4 +52,4 @@
|
||||
- name: Install required python modules
|
||||
pip:
|
||||
name: "{{ item }}"
|
||||
with_items: "{{pip_python_modules}}"
|
||||
with_items: "{{pip_python_coreos_modules}}"
|
||||
|
||||
24
roles/bootstrap-os/tasks/bootstrap-debian.yml
Normal file
24
roles/bootstrap-os/tasks/bootstrap-debian.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
---
|
||||
# raw: cat /etc/issue.net | grep '{{ bootstrap_versions }}'
|
||||
|
||||
- name: Bootstrap | Check if bootstrap is needed
|
||||
raw: which "{{ item }}"
|
||||
register: need_bootstrap
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
with_items:
|
||||
- python
|
||||
- pip
|
||||
- dbus-daemon
|
||||
tags: facts
|
||||
|
||||
- name: Bootstrap | Install python 2.x, pip, and dbus
|
||||
raw:
|
||||
apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y python-minimal python-pip dbus
|
||||
when:
|
||||
need_bootstrap.results | map(attribute='rc') | sort | last | bool
|
||||
|
||||
- set_fact:
|
||||
ansible_python_interpreter: "/usr/bin/python"
|
||||
tags: facts
|
||||
7
roles/bootstrap-os/tasks/bootstrap-opensuse.yml
Normal file
7
roles/bootstrap-os/tasks/bootstrap-opensuse.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
- name: Install required packages (SUSE)
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- python-cryptography
|
||||
@@ -5,18 +5,22 @@
|
||||
raw: which "{{ item }}"
|
||||
register: need_bootstrap
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
with_items:
|
||||
- python
|
||||
- pip
|
||||
tags: facts
|
||||
- dbus-daemon
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- name: Bootstrap | Install python 2.x and pip
|
||||
raw:
|
||||
apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y python-minimal python-pip
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y python-minimal python-pip dbus
|
||||
when:
|
||||
"{{ need_bootstrap.results | map(attribute='rc') | sort | last | bool }}"
|
||||
|
||||
- set_fact:
|
||||
ansible_python_interpreter: "/usr/bin/python"
|
||||
tags: facts
|
||||
tags:
|
||||
- facts
|
||||
|
||||
@@ -1,14 +1,20 @@
|
||||
---
|
||||
- include: bootstrap-ubuntu.yml
|
||||
- import_tasks: bootstrap-ubuntu.yml
|
||||
when: bootstrap_os == "ubuntu"
|
||||
|
||||
- include: bootstrap-coreos.yml
|
||||
- import_tasks: bootstrap-debian.yml
|
||||
when: bootstrap_os == "debian"
|
||||
|
||||
- import_tasks: bootstrap-coreos.yml
|
||||
when: bootstrap_os == "coreos"
|
||||
|
||||
- include: bootstrap-centos.yml
|
||||
- import_tasks: bootstrap-centos.yml
|
||||
when: bootstrap_os == "centos"
|
||||
|
||||
- include: setup-pipelining.yml
|
||||
- import_tasks: bootstrap-opensuse.yml
|
||||
when: bootstrap_os == "opensuse"
|
||||
|
||||
- import_tasks: setup-pipelining.yml
|
||||
|
||||
- name: check if atomic host
|
||||
stat:
|
||||
@@ -23,18 +29,25 @@
|
||||
gather_subset: '!all'
|
||||
filter: ansible_*
|
||||
|
||||
- name: Assign inventory name to unconfigured hostnames (non-CoreOS)
|
||||
- name: Assign inventory name to unconfigured hostnames (non-CoreOS and Tumbleweed)
|
||||
hostname:
|
||||
name: "{{inventory_hostname}}"
|
||||
when: ansible_os_family not in ['CoreOS', 'Container Linux by CoreOS']
|
||||
when:
|
||||
- override_system_hostname
|
||||
- ansible_distribution not in ['openSUSE Tumbleweed']
|
||||
- ansible_os_family not in ['CoreOS', 'Container Linux by CoreOS']
|
||||
|
||||
- name: Assign inventory name to unconfigured hostnames (CoreOS only)
|
||||
- name: Assign inventory name to unconfigured hostnames (CoreOS and Tumbleweed only)
|
||||
command: "hostnamectl set-hostname {{inventory_hostname}}"
|
||||
register: hostname_changed
|
||||
when: ansible_hostname == 'localhost' and ansible_os_family in ['CoreOS', 'Container Linux by CoreOS']
|
||||
when:
|
||||
- ansible_hostname == 'localhost'
|
||||
- ansible_distribution in ['openSUSE Tumbleweed'] or ansible_os_family in ['CoreOS', 'Container Linux by CoreOS']
|
||||
- override_system_hostname
|
||||
|
||||
- name: Update hostname fact (CoreOS only)
|
||||
- name: Update hostname fact (CoreOS and Tumbleweed only)
|
||||
setup:
|
||||
gather_subset: '!all'
|
||||
filter: ansible_hostname
|
||||
when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] and hostname_changed.changed
|
||||
when:
|
||||
- hostname_changed.changed
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: download
|
||||
file: "{{ downloads.dnsmasq }}"
|
||||
when: dns_mode == 'dnsmasq_kubedns' and download_localhost|default(false)
|
||||
tags: [download, dnsmasq]
|
||||
@@ -3,13 +3,15 @@
|
||||
file:
|
||||
path: /etc/dnsmasq.d
|
||||
state: directory
|
||||
tags: bootstrap-os
|
||||
tags:
|
||||
- bootstrap-os
|
||||
|
||||
- name: ensure dnsmasq.d-available directory exists
|
||||
file:
|
||||
path: /etc/dnsmasq.d-available
|
||||
state: directory
|
||||
tags: bootstrap-os
|
||||
tags:
|
||||
- bootstrap-os
|
||||
|
||||
- name: check system nameservers
|
||||
shell: awk '/^nameserver/ {print $NF}' /etc/resolv.conf
|
||||
@@ -89,7 +91,7 @@
|
||||
- name: Start Resources
|
||||
kube:
|
||||
name: "{{item.item.name}}"
|
||||
namespace: "{{system_namespace}}"
|
||||
namespace: "kube-system"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
resource: "{{item.item.type}}"
|
||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||
@@ -100,7 +102,7 @@
|
||||
|
||||
- name: Check for dnsmasq port (pulling image and running container)
|
||||
wait_for:
|
||||
host: "{{dns_server}}"
|
||||
host: "{{dnsmasq_dns_server}}"
|
||||
port: 53
|
||||
timeout: 180
|
||||
when: inventory_hostname == groups['kube-node'][0] and groups['kube-node'][0] in ansible_play_hosts
|
||||
|
||||
@@ -22,7 +22,7 @@ server={{ srv }}
|
||||
{% endfor %}
|
||||
{% elif resolvconf_mode == 'host_resolvconf' %}
|
||||
{# The default resolver is only needed when the hosts resolv.conf was modified by us. If it was not modified, we can rely on dnsmasq to reuse the systems resolv.conf #}
|
||||
server={{ default_resolver }}
|
||||
server={{ cloud_resolver }}
|
||||
{% endif %}
|
||||
|
||||
{% if kube_log_level == '4' %}
|
||||
|
||||
@@ -39,7 +39,7 @@ spec:
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: autoscaler
|
||||
image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1
|
||||
image: "{{ dnsmasqautoscaler_image_repo }}:{{ dnsmasqautoscaler_image_tag }}"
|
||||
resources:
|
||||
requests:
|
||||
cpu: "20m"
|
||||
|
||||
@@ -3,11 +3,11 @@ kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: dnsmasq
|
||||
namespace: "{{ system_namespace }}"
|
||||
namespace: "kube-system"
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: dnsmasq
|
||||
namespace: "{{ system_namespace}}"
|
||||
namespace: "kube-system"
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: dnsmasq
|
||||
namespace: "{{system_namespace}}"
|
||||
namespace: "kube-system"
|
||||
labels:
|
||||
k8s-app: dnsmasq
|
||||
kubernetes.io/cluster-service: "true"
|
||||
|
||||
@@ -3,6 +3,6 @@ apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: dnsmasq
|
||||
namespace: "{{ system_namespace }}"
|
||||
namespace: "kube-system"
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
|
||||
@@ -6,7 +6,7 @@ metadata:
|
||||
kubernetes.io/cluster-service: 'true'
|
||||
k8s-app: dnsmasq
|
||||
name: dnsmasq
|
||||
namespace: {{system_namespace}}
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- port: 53
|
||||
@@ -18,6 +18,6 @@ spec:
|
||||
targetPort: 53
|
||||
protocol: UDP
|
||||
type: ClusterIP
|
||||
clusterIP: {{dns_server}}
|
||||
clusterIP: {{dnsmasq_dns_server}}
|
||||
selector:
|
||||
k8s-app: dnsmasq
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
---
|
||||
docker_version: '1.13'
|
||||
docker_version: '17.03'
|
||||
docker_selinux_version: '17.03'
|
||||
|
||||
docker_package_info:
|
||||
pkgs:
|
||||
@@ -10,9 +11,31 @@ docker_repo_key_info:
|
||||
docker_repo_info:
|
||||
repos:
|
||||
|
||||
dockerproject_repo_key_info:
|
||||
repo_keys:
|
||||
|
||||
dockerproject_repo_info:
|
||||
repos:
|
||||
|
||||
docker_dns_servers_strict: yes
|
||||
|
||||
docker_container_storage_setup: false
|
||||
|
||||
docker_rh_repo_base_url: 'https://yum.dockerproject.org/repo/main/centos/7'
|
||||
docker_rh_repo_gpgkey: 'https://yum.dockerproject.org/gpg'
|
||||
# Used to override obsoletes=0
|
||||
yum_conf: /etc/yum.conf
|
||||
docker_yum_conf: /etc/yum_docker.conf
|
||||
|
||||
# CentOS/RedHat docker-ce repo
|
||||
docker_rh_repo_base_url: 'https://download.docker.com/linux/centos/7/$basearch/stable'
|
||||
docker_rh_repo_gpgkey: 'https://download.docker.com/linux/centos/gpg'
|
||||
# Ubuntu docker-ce repo
|
||||
docker_ubuntu_repo_base_url: "https://download.docker.com/linux/ubuntu"
|
||||
docker_ubuntu_repo_gpgkey: 'https://download.docker.com/linux/ubuntu/gpg'
|
||||
# Debian docker-ce repo
|
||||
docker_debian_repo_base_url: "https://download.docker.com/linux/debian"
|
||||
docker_debian_repo_gpgkey: 'https://download.docker.com/linux/debian/gpg'
|
||||
# dockerproject repo
|
||||
dockerproject_rh_repo_base_url: 'https://yum.dockerproject.org/repo/main/centos/7'
|
||||
dockerproject_rh_repo_gpgkey: 'https://yum.dockerproject.org/gpg'
|
||||
dockerproject_apt_repo_base_url: 'https://apt.dockerproject.org/repo'
|
||||
dockerproject_apt_repo_gpgkey: 'https://apt.dockerproject.org/gpg'
|
||||
|
||||
@@ -3,6 +3,9 @@ docker_container_storage_setup_version: v0.6.0
|
||||
docker_container_storage_setup_profile_name: kubespray
|
||||
docker_container_storage_setup_storage_driver: devicemapper
|
||||
docker_container_storage_setup_container_thinpool: docker-pool
|
||||
# It must be define a disk path for docker_container_storage_setup_devs.
|
||||
# Otherwise docker-storage-setup will be executed incorrectly.
|
||||
# docker_container_storage_setup_devs: /dev/vdb
|
||||
docker_container_storage_setup_data_size: 40%FREE
|
||||
docker_container_storage_setup_min_data_size: 2G
|
||||
docker_container_storage_setup_chunk_size: 512K
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
copy:
|
||||
dest: /etc/systemd/system/docker.service.d/override.conf
|
||||
content: |-
|
||||
### Thie file is managed by Ansible
|
||||
### This file is managed by Ansible
|
||||
[Service]
|
||||
EnvironmentFile=-/etc/sysconfig/docker-storage
|
||||
|
||||
@@ -31,6 +31,12 @@
|
||||
group: root
|
||||
mode: 0644
|
||||
|
||||
# https://docs.docker.com/engine/installation/linux/docker-ce/centos/#install-using-the-repository
|
||||
- name: docker-storage-setup | install lvm2
|
||||
yum:
|
||||
name: lvm2
|
||||
state: present
|
||||
|
||||
- name: docker-storage-setup | install and run container-storage-setup
|
||||
become: yes
|
||||
script: install_container_storage_setup.sh {{ docker_container_storage_setup_version }} {{ docker_container_storage_setup_profile_name }}
|
||||
|
||||
@@ -12,11 +12,21 @@
|
||||
paths:
|
||||
- ../vars
|
||||
skip: true
|
||||
tags: facts
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- include: set_facts_dns.yml
|
||||
# https://yum.dockerproject.org/repo/main/opensuse/ contains packages for an EOL
|
||||
# openSUSE version so we can't use it. The only alternative is to use the docker
|
||||
# packages from the distribution repositories.
|
||||
- name: Warn about Docker version on SUSE
|
||||
debug:
|
||||
msg: "SUSE distributions always install Docker from the distro repos"
|
||||
when: ansible_pkg_mgr == 'zypper'
|
||||
|
||||
- include_tasks: set_facts_dns.yml
|
||||
when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
|
||||
tags: facts
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- name: check for minimum kernel version
|
||||
fail:
|
||||
@@ -25,28 +35,52 @@
|
||||
{{ docker_kernel_min_version }} on
|
||||
{{ ansible_distribution }}-{{ ansible_distribution_version }}
|
||||
when: (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]) and (ansible_kernel|version_compare(docker_kernel_min_version, "<"))
|
||||
tags: facts
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- name: ensure docker repository public key is installed
|
||||
- import_tasks: pre-upgrade.yml
|
||||
|
||||
- name: ensure docker-ce repository public key is installed
|
||||
action: "{{ docker_repo_key_info.pkg_key }}"
|
||||
args:
|
||||
id: "{{item}}"
|
||||
keyserver: "{{docker_repo_key_info.keyserver}}"
|
||||
url: "{{docker_repo_key_info.url}}"
|
||||
state: present
|
||||
register: keyserver_task_result
|
||||
until: keyserver_task_result|succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
with_items: "{{ docker_repo_key_info.repo_keys }}"
|
||||
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic)
|
||||
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic)
|
||||
|
||||
- name: ensure docker repository is enabled
|
||||
- name: ensure docker-ce repository is enabled
|
||||
action: "{{ docker_repo_info.pkg_repo }}"
|
||||
args:
|
||||
repo: "{{item}}"
|
||||
state: present
|
||||
with_items: "{{ docker_repo_info.repos }}"
|
||||
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic) and (docker_repo_info.repos|length > 0)
|
||||
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic) and (docker_repo_info.repos|length > 0)
|
||||
|
||||
- name: ensure docker-engine repository public key is installed
|
||||
action: "{{ dockerproject_repo_key_info.pkg_key }}"
|
||||
args:
|
||||
id: "{{item}}"
|
||||
url: "{{dockerproject_repo_key_info.url}}"
|
||||
state: present
|
||||
register: keyserver_task_result
|
||||
until: keyserver_task_result|succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
with_items: "{{ dockerproject_repo_key_info.repo_keys }}"
|
||||
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic)
|
||||
|
||||
- name: ensure docker-engine repository is enabled
|
||||
action: "{{ dockerproject_repo_info.pkg_repo }}"
|
||||
args:
|
||||
repo: "{{item}}"
|
||||
state: present
|
||||
with_items: "{{ dockerproject_repo_info.repos }}"
|
||||
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic) and (dockerproject_repo_info.repos|length > 0)
|
||||
|
||||
- name: Configure docker repository on RedHat/CentOS
|
||||
template:
|
||||
@@ -54,11 +88,27 @@
|
||||
dest: "/etc/yum.repos.d/docker.repo"
|
||||
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
|
||||
|
||||
- name: Copy yum.conf for editing
|
||||
copy:
|
||||
src: "{{ yum_conf }}"
|
||||
dest: "{{ docker_yum_conf }}"
|
||||
remote_src: yes
|
||||
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
|
||||
|
||||
- name: Edit copy of yum.conf to set obsoletes=0
|
||||
lineinfile:
|
||||
path: "{{ docker_yum_conf }}"
|
||||
state: present
|
||||
regexp: '^obsoletes='
|
||||
line: 'obsoletes=0'
|
||||
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
|
||||
|
||||
- name: ensure docker packages are installed
|
||||
action: "{{ docker_package_info.pkg_mgr }}"
|
||||
args:
|
||||
pkg: "{{item.name}}"
|
||||
force: "{{item.force|default(omit)}}"
|
||||
conf_file: "{{item.yum_conf|default(omit)}}"
|
||||
state: present
|
||||
register: docker_task_result
|
||||
until: docker_task_result|succeeded
|
||||
@@ -68,15 +118,30 @@
|
||||
notify: restart docker
|
||||
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic) and (docker_package_info.pkgs|length > 0)
|
||||
|
||||
- name: check minimum docker version for docker_dns mode. You need at least docker version >= 1.12 for resolvconf_mode=docker_dns
|
||||
- name: ensure service is started if docker packages are already present
|
||||
service:
|
||||
name: docker
|
||||
state: started
|
||||
when: docker_task_result is not changed
|
||||
|
||||
- name: flush handlers so we can wait for docker to come up
|
||||
meta: flush_handlers
|
||||
|
||||
- name: set fact for docker_version
|
||||
command: "docker version -f '{{ '{{' }}.Client.Version{{ '}}' }}'"
|
||||
register: docker_version
|
||||
failed_when: docker_version.stdout|version_compare('1.12', '<')
|
||||
register: installed_docker_version
|
||||
changed_when: false
|
||||
when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
|
||||
|
||||
- name: check minimum docker version for docker_dns mode. You need at least docker version >= 1.12 for resolvconf_mode=docker_dns
|
||||
fail:
|
||||
msg: "You need at least docker version >= 1.12 for resolvconf_mode=docker_dns"
|
||||
when: >
|
||||
dns_mode != 'none' and
|
||||
resolvconf_mode == 'docker_dns' and
|
||||
installed_docker_version.stdout|version_compare('1.12', '<')
|
||||
|
||||
- name: Set docker systemd config
|
||||
include: systemd.yml
|
||||
import_tasks: systemd.yml
|
||||
|
||||
- name: ensure docker service is started and enabled
|
||||
service:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user