mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 13:54:37 +03:00
Compare commits
850 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
abe9b40602 | ||
|
|
b0ccda8a42 | ||
|
|
c8dad3f6c6 | ||
|
|
5ec9ab7ec0 | ||
|
|
73097aa39d | ||
|
|
86cc703c75 | ||
|
|
4dba34bd02 | ||
|
|
b0437516c1 | ||
|
|
da015e0249 | ||
|
|
554857da97 | ||
|
|
9bf23fa43b | ||
|
|
42287066d3 | ||
|
|
a1ff1de975 | ||
|
|
1bfbc5bbc4 | ||
|
|
c5b4d3ceaa | ||
|
|
8fc9c5d025 | ||
|
|
42bba66c02 | ||
|
|
53bc80bb59 | ||
|
|
771ce96e6d | ||
|
|
fc456ff0cd | ||
|
|
b4f70db878 | ||
|
|
5707f79b33 | ||
|
|
0a2f4edfc6 | ||
|
|
56fa46716e | ||
|
|
b74abe56fd | ||
|
|
62aecd1e4a | ||
|
|
973afef96e | ||
|
|
a235605d2c | ||
|
|
023108a733 | ||
|
|
75d1be8272 | ||
|
|
a44235d11b | ||
|
|
7abf6a6958 | ||
|
|
0d0b1fdf82 | ||
|
|
b710c72f04 | ||
|
|
678c316d01 | ||
|
|
bc6de32faf | ||
|
|
7cf8ad4dc7 | ||
|
|
02ec72fa40 | ||
|
|
d22634a597 | ||
|
|
4132cee687 | ||
|
|
f3df0d5f4a | ||
|
|
1d285e654d | ||
|
|
dc6ad64ec7 | ||
|
|
92bfcf0467 | ||
|
|
54b1fe83f3 | ||
|
|
5337cff179 | ||
|
|
1be788f785 | ||
|
|
8afbf339f7 | ||
|
|
8c935dfb50 | ||
|
|
66c5ed8406 | ||
|
|
4087e97505 | ||
|
|
da50ed0936 | ||
|
|
fbbfff3795 | ||
|
|
fb9103acd3 | ||
|
|
49d921cf91 | ||
|
|
fe29c97ae8 | ||
|
|
2abb6c8689 | ||
|
|
a3ca441998 | ||
|
|
9cf503acb1 | ||
|
|
1cbdd7ed5c | ||
|
|
428e52e0d1 | ||
|
|
70dc222719 | ||
|
|
69f796f0c7 | ||
|
|
5826f0810c | ||
|
|
de9443a694 | ||
|
|
99c5f7e013 | ||
|
|
d9dedc2cd5 | ||
|
|
23ae6027ab | ||
|
|
781b5691c9 | ||
|
|
fd9bbcb157 | ||
|
|
e0410661fa | ||
|
|
8ef754678a | ||
|
|
161a8f55fa | ||
|
|
7481cc31e1 | ||
|
|
b15b6e834f | ||
|
|
76640cf1a1 | ||
|
|
374ea7b81d | ||
|
|
46bef931e9 | ||
|
|
a36e9ae690 | ||
|
|
728155a2a1 | ||
|
|
cdf9a9f4fc | ||
|
|
29307740dd | ||
|
|
a038d62644 | ||
|
|
20c7e31ea3 | ||
|
|
65065e7fdf | ||
|
|
352297cf8d | ||
|
|
a67a50f9c0 | ||
|
|
324bc41097 | ||
|
|
c81b443d93 | ||
|
|
dc16ab92f4 | ||
|
|
53032a6695 | ||
|
|
d90a5f291b | ||
|
|
3b7791501e | ||
|
|
f2b8a3614d | ||
|
|
e89b47c7ee | ||
|
|
2aa66eb12d | ||
|
|
4c8b93e5b9 | ||
|
|
216631bf02 | ||
|
|
c7f3123e28 | ||
|
|
f599c2a691 | ||
|
|
bc7d1f36ea | ||
|
|
80fa294a31 | ||
|
|
465dfd68bc | ||
|
|
73f45fbe94 | ||
|
|
d270678bda | ||
|
|
de028814e5 | ||
|
|
b5406b752d | ||
|
|
6025981ceb | ||
|
|
4348e78b24 | ||
|
|
e2f9adc2ff | ||
|
|
f67a24499b | ||
|
|
5c704552d8 | ||
|
|
d83ea51101 | ||
|
|
fa6027e8f0 | ||
|
|
2849191e67 | ||
|
|
0559eec681 | ||
|
|
a3a7fe7c8e | ||
|
|
9b2d176617 | ||
|
|
7a3547e4d1 | ||
|
|
e6fb686156 | ||
|
|
5e80603bbb | ||
|
|
c8d95a1586 | ||
|
|
27a99e0a3f | ||
|
|
3cc351dff9 | ||
|
|
23c9071c30 | ||
|
|
14141ec137 | ||
|
|
5bec2edaf7 | ||
|
|
f504d0ea99 | ||
|
|
3b7797b1a1 | ||
|
|
aa63eb6196 | ||
|
|
23aa3e4638 | ||
|
|
56ae3bfec2 | ||
|
|
4d5c4a13cb | ||
|
|
69a8f91512 | ||
|
|
fa791cc344 | ||
|
|
456f743470 | ||
|
|
ab6f0012cc | ||
|
|
4afbf51d32 | ||
|
|
d62684b617 | ||
|
|
a8dfcbbfc7 | ||
|
|
bbdc6210f5 | ||
|
|
c7f6ed1495 | ||
|
|
818aa7aeb1 | ||
|
|
045acc724b | ||
|
|
d540560619 | ||
|
|
797bfd85b0 | ||
|
|
07cb8ebef7 | ||
|
|
54416cabfd | ||
|
|
3617ae31f6 | ||
|
|
4f05d801c3 | ||
|
|
956afcb33f | ||
|
|
6347419233 | ||
|
|
0c7a50fe1e | ||
|
|
7423932510 | ||
|
|
b41530ba5d | ||
|
|
29e916508c | ||
|
|
b45f3f0004 | ||
|
|
2a5721b4d4 | ||
|
|
e30a703c8e | ||
|
|
333f1a4a40 | ||
|
|
84b278021a | ||
|
|
1e470b0473 | ||
|
|
0ef3a7914c | ||
|
|
a3fff1e438 | ||
|
|
4bc204925a | ||
|
|
5d9946184a | ||
|
|
5ba169a612 | ||
|
|
872b37f751 | ||
|
|
8485136f9a | ||
|
|
ff1bc739f1 | ||
|
|
594a0e7f1b | ||
|
|
8e28ba38d2 | ||
|
|
73c2ff17dd | ||
|
|
13f225e6ae | ||
|
|
3f62492a15 | ||
|
|
5e3bd2dff1 | ||
|
|
787a9c74fa | ||
|
|
14749df6f3 | ||
|
|
2db2898112 | ||
|
|
3776000fc4 | ||
|
|
f0572e59e7 | ||
|
|
6217184c7f | ||
|
|
044dcbaed0 | ||
|
|
8a5eae94ea | ||
|
|
bf3c6aeed1 | ||
|
|
f3fbf995ca | ||
|
|
03bded2b6b | ||
|
|
d5c0829d61 | ||
|
|
00369303de | ||
|
|
1f1479c0a7 | ||
|
|
e67f848abc | ||
|
|
560f50d3cd | ||
|
|
3f45122d0d | ||
|
|
50bdaa573c | ||
|
|
24b6698cc9 | ||
|
|
73885d3b9e | ||
|
|
f29387316f | ||
|
|
d6fd0d2aca | ||
|
|
e814da1eec | ||
|
|
e029a09345 | ||
|
|
dcd9c9509b | ||
|
|
15eb7db36d | ||
|
|
a5b46bfc8c | ||
|
|
fbba259933 | ||
|
|
7b77e2d232 | ||
|
|
48a182844c | ||
|
|
9335cdcebc | ||
|
|
38af93b60c | ||
|
|
741de6051c | ||
|
|
b8f0de3074 | ||
|
|
88d919337e | ||
|
|
f518b90c6b | ||
|
|
d5c33e6d6c | ||
|
|
338eb4ce65 | ||
|
|
009e208bcd | ||
|
|
81e6877b02 | ||
|
|
3722acee85 | ||
|
|
a4a35f8a4f | ||
|
|
82119ca923 | ||
|
|
6ca2019002 | ||
|
|
53e3463b5a | ||
|
|
c9ed5f69d7 | ||
|
|
696d481e3b | ||
|
|
f5a83ceded | ||
|
|
3fe66a1298 | ||
|
|
6af1f65d3c | ||
|
|
4a10dca7d4 | ||
|
|
4d57ed314d | ||
|
|
86d0e12695 | ||
|
|
4e81bcc147 | ||
|
|
691baf5b14 | ||
|
|
6243467856 | ||
|
|
3c5a4474ac | ||
|
|
01da65252b | ||
|
|
f3e7615bef | ||
|
|
f47a666227 | ||
|
|
b708db4cd5 | ||
|
|
a3144e7e21 | ||
|
|
683efc5698 | ||
|
|
38a3075025 | ||
|
|
fc072300ea | ||
|
|
d25ecfe1c1 | ||
|
|
37d98e79ec | ||
|
|
a65605b17a | ||
|
|
424e59805f | ||
|
|
6df8111cd4 | ||
|
|
76db060afb | ||
|
|
d588532c9b | ||
|
|
d6d7458d68 | ||
|
|
228b244c84 | ||
|
|
d89ecb8308 | ||
|
|
50751bb610 | ||
|
|
64f48bf84c | ||
|
|
f8fdc0cd93 | ||
|
|
09fe95bc60 | ||
|
|
ada5941a70 | ||
|
|
88fe3403ce | ||
|
|
04f2682ac6 | ||
|
|
873b5608cf | ||
|
|
12086744e0 | ||
|
|
33ab615072 | ||
|
|
f696d7abee | ||
|
|
5a1cf19278 | ||
|
|
416e65509b | ||
|
|
4de6a78e26 | ||
|
|
026088deea | ||
|
|
f142e671b3 | ||
|
|
2f49b6caa8 | ||
|
|
50c86919dc | ||
|
|
781cc00cc4 | ||
|
|
05dc2b3a09 | ||
|
|
d0e628911c | ||
|
|
656633f784 | ||
|
|
530e1c329d | ||
|
|
f5aec8add4 | ||
|
|
f92309bfd0 | ||
|
|
ef10feb26f | ||
|
|
c6586829de | ||
|
|
b103385678 | ||
|
|
848191e97a | ||
|
|
04e3fb6a5a | ||
|
|
b218e17f44 | ||
|
|
bba6d0c613 | ||
|
|
49af1f9969 | ||
|
|
a6dc50e7cb | ||
|
|
f69b5f7f33 | ||
|
|
37eac010c8 | ||
|
|
d4b9f15c0a | ||
|
|
ec3daedf9e | ||
|
|
1cf76a10db | ||
|
|
d83181a2be | ||
|
|
b834a28891 | ||
|
|
78f6f6b889 | ||
|
|
0b02f6593b | ||
|
|
7f1d9ff543 | ||
|
|
c5fb734098 | ||
|
|
d5d3cfd3fa | ||
|
|
cc77a8c395 | ||
|
|
d39c273d96 | ||
|
|
316508626d | ||
|
|
46ba6a4154 | ||
|
|
d8cbbc414e | ||
|
|
ebae491e3f | ||
|
|
6f919e5020 | ||
|
|
4ff851b302 | ||
|
|
3af90f8772 | ||
|
|
cb54d074b5 | ||
|
|
9032e271f1 | ||
|
|
15597aa493 | ||
|
|
3b9d13fda9 | ||
|
|
5e0249ae7c | ||
|
|
27958e4247 | ||
|
|
353afa7cb0 | ||
|
|
e865c50574 | ||
|
|
a30ad1e5a5 | ||
|
|
586ad89d50 | ||
|
|
6caa639243 | ||
|
|
80f31818df | ||
|
|
854cc53fa5 | ||
|
|
d2a1ac3b0c | ||
|
|
a678d1be9d | ||
|
|
097806dfe8 | ||
|
|
7cdf1fd388 | ||
|
|
a4e65c7ceb | ||
|
|
20ebb49568 | ||
|
|
aa162b0d5d | ||
|
|
b15f3e182d | ||
|
|
4d39c1856e | ||
|
|
b2fa84af61 | ||
|
|
913fed0089 | ||
|
|
80ea18bd28 | ||
|
|
12c6b5c3eb | ||
|
|
35c0010876 | ||
|
|
f52584a715 | ||
|
|
09bbdadcee | ||
|
|
d711a0c83f | ||
|
|
01cf11b961 | ||
|
|
29825e6873 | ||
|
|
d18ad63e49 | ||
|
|
3da392d1cf | ||
|
|
8947614d97 | ||
|
|
7e4f4a96fc | ||
|
|
301a371efe | ||
|
|
1a6df84c7a | ||
|
|
2d38c1e20c | ||
|
|
9155339cf0 | ||
|
|
d8a023a92c | ||
|
|
8ad74404c9 | ||
|
|
1ce2f04f47 | ||
|
|
20b12751af | ||
|
|
e485fab7eb | ||
|
|
adca353fe9 | ||
|
|
7a72e567d5 | ||
|
|
3c050be0b0 | ||
|
|
41e684eb5a | ||
|
|
2067417ad4 | ||
|
|
55890e1b82 | ||
|
|
1e524c68d5 | ||
|
|
740d8b0a26 | ||
|
|
a8dd69cf17 | ||
|
|
4fe2aa6bf7 | ||
|
|
5d5c9cab19 | ||
|
|
5f12b7aedf | ||
|
|
d71590bbd0 | ||
|
|
9ffc65f8f3 | ||
|
|
483f1d2ca0 | ||
|
|
1babba753d | ||
|
|
ed18a10571 | ||
|
|
0440e45d65 | ||
|
|
2fb27c8521 | ||
|
|
f17f4ff963 | ||
|
|
e9c34fe038 | ||
|
|
669ab10c17 | ||
|
|
0a3cf1a087 | ||
|
|
3511b55cf5 | ||
|
|
1f01b6546c | ||
|
|
0efa3e6392 | ||
|
|
6d7f3c4405 | ||
|
|
85e0fb32e6 | ||
|
|
d0ae316934 | ||
|
|
f6d280452f | ||
|
|
7fb5fbac37 | ||
|
|
b7fd462944 | ||
|
|
ec08303f82 | ||
|
|
e640233947 | ||
|
|
ea7a6f1cf1 | ||
|
|
38009a215a | ||
|
|
150a969cf4 | ||
|
|
3c4cbf133e | ||
|
|
fd2c47b56a | ||
|
|
2560c4dda3 | ||
|
|
254a0ab69d | ||
|
|
7b3e59ed0a | ||
|
|
44de04be89 | ||
|
|
33024731e4 | ||
|
|
d469282f1c | ||
|
|
acbf3db233 | ||
|
|
adf6a7121f | ||
|
|
b73f009c07 | ||
|
|
bbfd2dc2bd | ||
|
|
4fe61968cf | ||
|
|
9e8e069b23 | ||
|
|
26ca58419f | ||
|
|
063faaae1c | ||
|
|
131c3d4d5b | ||
|
|
44ee4b507c | ||
|
|
c36a0226d0 | ||
|
|
67832aada9 | ||
|
|
74727b085b | ||
|
|
bb495006c8 | ||
|
|
3d25b4dfc1 | ||
|
|
1c12c19150 | ||
|
|
58dc641001 | ||
|
|
88249308a0 | ||
|
|
b4aaa7b908 | ||
|
|
c386172be7 | ||
|
|
c66e9a6d62 | ||
|
|
81801ce23b | ||
|
|
7dfa39483f | ||
|
|
b07641c3f3 | ||
|
|
4638acfe81 | ||
|
|
aadef80404 | ||
|
|
9805fb7a34 | ||
|
|
7d2ba49969 | ||
|
|
f81bafa07b | ||
|
|
94892ab3a4 | ||
|
|
323d788f48 | ||
|
|
eafab9636f | ||
|
|
107bfb259a | ||
|
|
d4a36aa55b | ||
|
|
07b2894080 | ||
|
|
802ac377b8 | ||
|
|
738ab4239a | ||
|
|
b5a895d1ec | ||
|
|
23685b4537 | ||
|
|
e552be76ce | ||
|
|
eea22dfd40 | ||
|
|
0a722942cc | ||
|
|
192f4c4e96 | ||
|
|
e03588f431 | ||
|
|
8872b2e0c6 | ||
|
|
061f5a313b | ||
|
|
2e2ed3bd35 | ||
|
|
7697baf0da | ||
|
|
22a5a00c49 | ||
|
|
8b289ad9e1 | ||
|
|
6a33411d65 | ||
|
|
9a91ef8628 | ||
|
|
fbce6349c4 | ||
|
|
954676b3d8 | ||
|
|
e2ad6aad5a | ||
|
|
038a2eb862 | ||
|
|
5d146e52fe | ||
|
|
c41c1e771f | ||
|
|
befa8a6cbd | ||
|
|
85b77f7c22 | ||
|
|
6b3f7306a4 | ||
|
|
ba5c0fa364 | ||
|
|
2a92fd2f14 | ||
|
|
7e974f1401 | ||
|
|
8373fa393a | ||
|
|
613841381d | ||
|
|
9e76aafc1c | ||
|
|
9b5096ab10 | ||
|
|
01d70f2c7c | ||
|
|
6878c2af4e | ||
|
|
db2b76a22a | ||
|
|
263c8731f2 | ||
|
|
69e5deeccc | ||
|
|
2e1e27219e | ||
|
|
226d5ed7de | ||
|
|
52e0aa7a80 | ||
|
|
bd9474bafd | ||
|
|
316b73178d | ||
|
|
58c71d8ea6 | ||
|
|
e245e935aa | ||
|
|
143e2272ff | ||
|
|
cd7924f8c9 | ||
|
|
7f93a5a0f5 | ||
|
|
1abd3cf3d7 | ||
|
|
91e2d61cf2 | ||
|
|
f6d60a7e89 | ||
|
|
40f1c51ec3 | ||
|
|
68fd7e39da | ||
|
|
a096761306 | ||
|
|
d790ec96d8 | ||
|
|
5e260fe23a | ||
|
|
2054a98cf7 | ||
|
|
ce8ba1f170 | ||
|
|
595d6427ac | ||
|
|
39dc61b948 | ||
|
|
96688269f8 | ||
|
|
55aa58ee2e | ||
|
|
556a8d68bc | ||
|
|
3ed5f89cf5 | ||
|
|
8d0158ceeb | ||
|
|
fcd895d032 | ||
|
|
61d88b8db2 | ||
|
|
3e52f1a4e9 | ||
|
|
4479cc48fe | ||
|
|
5708914699 | ||
|
|
881be9b741 | ||
|
|
e6f1c4df7f | ||
|
|
e2592f1ce2 | ||
|
|
77d31e679a | ||
|
|
decbcdc423 | ||
|
|
e3ffa21303 | ||
|
|
f2ecda6f0f | ||
|
|
26f6f1f62e | ||
|
|
28aee0fc34 | ||
|
|
f97cb4e761 | ||
|
|
405198acd0 | ||
|
|
eecaba6b84 | ||
|
|
83e11f9ef7 | ||
|
|
5a7ac7e5c1 | ||
|
|
c15c933ce8 | ||
|
|
0697ab4b4f | ||
|
|
13e3e867ac | ||
|
|
cc30220f01 | ||
|
|
257019d424 | ||
|
|
4959bfc1b3 | ||
|
|
301671ae19 | ||
|
|
1e09fd8e0f | ||
|
|
f10f7d0e84 | ||
|
|
3ee5aa0d6b | ||
|
|
fce8712bff | ||
|
|
2051bf2b67 | ||
|
|
87c9a871b9 | ||
|
|
5e2c14e916 | ||
|
|
5b5546adf1 | ||
|
|
0b09c8154a | ||
|
|
bab2e5ed0d | ||
|
|
7c620ade85 | ||
|
|
1d9c0c7d17 | ||
|
|
aa1d5b8970 | ||
|
|
435993891b | ||
|
|
1d5a9464e2 | ||
|
|
e88b8f247a | ||
|
|
880c9c6b48 | ||
|
|
7633e6d582 | ||
|
|
72802e4d8d | ||
|
|
4fb8adb9e4 | ||
|
|
5c52a830d2 | ||
|
|
2c8d75afb7 | ||
|
|
4d5b41b8db | ||
|
|
11d9c2e2c3 | ||
|
|
352fbd71e7 | ||
|
|
2706633f81 | ||
|
|
50af3cf6c1 | ||
|
|
39d7503069 | ||
|
|
41434ce080 | ||
|
|
f72ed13f3c | ||
|
|
0fec370dcd | ||
|
|
bf63569184 | ||
|
|
8216e821d3 | ||
|
|
13efa95ef7 | ||
|
|
e25237455c | ||
|
|
b38ed2c959 | ||
|
|
a34139e19e | ||
|
|
80379f6cab | ||
|
|
d58b338bd8 | ||
|
|
e1e13b68b3 | ||
|
|
90ee5df413 | ||
|
|
5834e609a6 | ||
|
|
532e97c542 | ||
|
|
d156449819 | ||
|
|
d4bd08f82e | ||
|
|
3ce033995f | ||
|
|
320f4d4d7f | ||
|
|
16715adfa0 | ||
|
|
100d972cea | ||
|
|
ce63597e4a | ||
|
|
5f117fb65e | ||
|
|
72fee60c8f | ||
|
|
1bb1ba2274 | ||
|
|
6ebcaab2bb | ||
|
|
cd42e649a7 | ||
|
|
8167e5b690 | ||
|
|
de014422bf | ||
|
|
2f5c0d10bb | ||
|
|
48b5ee5cd5 | ||
|
|
dd4159fe65 | ||
|
|
62a8961d8f | ||
|
|
e7b835eb4c | ||
|
|
fbe9e0ac1a | ||
|
|
40feb120e4 | ||
|
|
6362211860 | ||
|
|
925a820b56 | ||
|
|
50b884a32d | ||
|
|
890878f5db | ||
|
|
435ef14379 | ||
|
|
3c44ffcf80 | ||
|
|
73aee004ac | ||
|
|
30a9149b52 | ||
|
|
4a7f829ecf | ||
|
|
dc8a8011be | ||
|
|
5e84dabb46 | ||
|
|
3e8f4c1545 | ||
|
|
1a50a1a733 | ||
|
|
e50647d252 | ||
|
|
951e4675c6 | ||
|
|
c04e8b57b9 | ||
|
|
32d47c836d | ||
|
|
90a7941d56 | ||
|
|
3e3ee0aeb1 | ||
|
|
7b674e0607 | ||
|
|
593a9a262d | ||
|
|
456596710e | ||
|
|
01cd4cf1c6 | ||
|
|
1712314fab | ||
|
|
7da9880ff7 | ||
|
|
d42b37b77d | ||
|
|
c3e83f464f | ||
|
|
1550c05a7a | ||
|
|
ea833a4cd7 | ||
|
|
2d8e04dca7 | ||
|
|
d5ce5874e8 | ||
|
|
225f765b56 | ||
|
|
ddffdb63bf | ||
|
|
0d1be39a97 | ||
|
|
2c1dd69891 | ||
|
|
145687a48e | ||
|
|
9051aa5296 | ||
|
|
432f8e9841 | ||
|
|
19792cfae7 | ||
|
|
9463b70edd | ||
|
|
b109f52dab | ||
|
|
e0781483fa | ||
|
|
ffcea384a6 | ||
|
|
deff6a82fa | ||
|
|
919a268de3 | ||
|
|
487cfa5e6c | ||
|
|
5fcda86f8c | ||
|
|
f2635776cd | ||
|
|
d30dbdde23 | ||
|
|
b59d5c35bc | ||
|
|
8331f7b056 | ||
|
|
92274a74f7 | ||
|
|
1739c479ed | ||
|
|
551317f1cd | ||
|
|
ddc19f43ba | ||
|
|
993b8e2791 | ||
|
|
9a5438ce2f | ||
|
|
d33434647b | ||
|
|
02169e8f85 | ||
|
|
b07e93e08b | ||
|
|
bad886ca9b | ||
|
|
967a042321 | ||
|
|
a585318b1a | ||
|
|
07d2f1aa36 | ||
|
|
b15e685a0b | ||
|
|
885c6cff71 | ||
|
|
c5e425b02b | ||
|
|
3fa81bb86e | ||
|
|
5daadc022d | ||
|
|
0cfcd39d55 | ||
|
|
7875c38023 | ||
|
|
edfec26988 | ||
|
|
daa290100c | ||
|
|
b4eb25197b | ||
|
|
ac00d23b80 | ||
|
|
9ae2eefb9a | ||
|
|
8c18f053aa | ||
|
|
2aefa25448 | ||
|
|
6e01c1e377 | ||
|
|
3c6ee19785 | ||
|
|
0e2d3fb923 | ||
|
|
af5e05d08d | ||
|
|
c83bfc9df6 | ||
|
|
1ebb670141 | ||
|
|
9d0786cbb0 | ||
|
|
53bde23a5e | ||
|
|
187798086a | ||
|
|
1540bc9759 | ||
|
|
618ab93b42 | ||
|
|
5ba67c55a2 | ||
|
|
d8ad9aedad | ||
|
|
3e6d0a50e8 | ||
|
|
ff09141a14 | ||
|
|
d188876a91 | ||
|
|
6f6274d0d9 | ||
|
|
29ee581067 | ||
|
|
17f07e2613 | ||
|
|
9ebdf0e3cf | ||
|
|
730caa3d58 | ||
|
|
7deb842030 | ||
|
|
5f7d5e1e80 | ||
|
|
931c76e58f | ||
|
|
3fafa583d1 | ||
|
|
d8e9b0f675 | ||
|
|
846c7a26e8 | ||
|
|
98d766c68e | ||
|
|
087b7fa38e | ||
|
|
92877e8bf8 | ||
|
|
d3ef41b603 | ||
|
|
633bfa7ebc | ||
|
|
13af4c1f40 | ||
|
|
afc3f7dce4 | ||
|
|
e8901a2422 | ||
|
|
c888de8b38 | ||
|
|
fefa1670a6 | ||
|
|
589d22da0b | ||
|
|
3dcb914607 | ||
|
|
b2b421840c | ||
|
|
5c7eef70b4 | ||
|
|
c2710899ed | ||
|
|
b997912ebe | ||
|
|
e5d07f3a3d | ||
|
|
fb9155c450 | ||
|
|
dc3195310c | ||
|
|
9f7c2b08a5 | ||
|
|
a6932b6b81 | ||
|
|
77d705ca9f | ||
|
|
89ac53acd7 | ||
|
|
1e22c83f0f | ||
|
|
1ad1e80ae3 | ||
|
|
bc785196c8 | ||
|
|
dfdf530723 | ||
|
|
33f33a7358 | ||
|
|
113dd2146a | ||
|
|
14c2df0418 | ||
|
|
b316518864 | ||
|
|
612663667c | ||
|
|
6c14f35f00 | ||
|
|
289be0a0db | ||
|
|
a4de023c29 | ||
|
|
e3fdd4a0ac | ||
|
|
bc9e14a762 | ||
|
|
3c5f20190f | ||
|
|
9c83551a0e | ||
|
|
99c139dd5a | ||
|
|
6c34745958 | ||
|
|
2ba4e9bda5 | ||
|
|
2a00c931e4 | ||
|
|
1e6ad5acb6 | ||
|
|
bc74a37696 | ||
|
|
0cb326b10f | ||
|
|
4daa9aa443 | ||
|
|
667364143c | ||
|
|
d8b357ce49 | ||
|
|
479d0e858d | ||
|
|
152c15b19f | ||
|
|
ce5a34d86c | ||
|
|
b8bafb2893 | ||
|
|
5da18854a3 | ||
|
|
d269e7f46c | ||
|
|
8c636f67af | ||
|
|
a84508d6b9 | ||
|
|
22c234040e | ||
|
|
4a1be18361 | ||
|
|
3b6df70f11 | ||
|
|
48390d37c2 | ||
|
|
0d3beb4e5a | ||
|
|
6e192d487b | ||
|
|
306c61a968 | ||
|
|
58b4fea2b1 | ||
|
|
2149bfbc5b | ||
|
|
0acb823d96 | ||
|
|
8ba6b601b0 | ||
|
|
06f981ffed | ||
|
|
4a4a3f759c | ||
|
|
8fbebf4e83 | ||
|
|
8371beb915 | ||
|
|
b39b32a48c | ||
|
|
dbe99b59a7 | ||
|
|
3cc413fe9a | ||
|
|
59d0138bcd | ||
|
|
801bbcbc63 | ||
|
|
4560ff7386 | ||
|
|
477841d8c0 | ||
|
|
a89dc49c52 | ||
|
|
90d8f7aa6a | ||
|
|
abc1421def | ||
|
|
7abd4eeafd | ||
|
|
27c79088e6 | ||
|
|
ce2a3a80db | ||
|
|
79bf74e90f | ||
|
|
4f12ba00d1 | ||
|
|
93104d9224 | ||
|
|
b5f4a79365 | ||
|
|
7e84de2ae1 | ||
|
|
06e1f81801 | ||
|
|
ccc3f89060 | ||
|
|
8a17de327e | ||
|
|
3b787123e3 | ||
|
|
127969d65f | ||
|
|
4b711e29ef | ||
|
|
2a3aa591e0 | ||
|
|
56cafc3fb3 | ||
|
|
b434456f54 | ||
|
|
6923d350f4 | ||
|
|
38beab8fe8 | ||
|
|
4bdd0ce417 | ||
|
|
e5c4e1ecc3 | ||
|
|
a48131f1e1 | ||
|
|
6e34918b52 | ||
|
|
66fddb2d52 | ||
|
|
87193fd270 | ||
|
|
52b5309385 | ||
|
|
7f4e048052 | ||
|
|
7fe7357154 | ||
|
|
635261eb12 | ||
|
|
5a5cf15c04 | ||
|
|
4d2b6b71f2 | ||
|
|
7bec169d58 | ||
|
|
bfd4ccbeaa | ||
|
|
76fe84fe93 | ||
|
|
cf4dd645a7 | ||
|
|
a5edd0d709 | ||
|
|
c33e08c3fa | ||
|
|
9b773185c3 | ||
|
|
b1974ab3cf | ||
|
|
b4e2b85745 | ||
|
|
fcd8d850dc | ||
|
|
6549b8f8ae | ||
|
|
1ea7ec3189 | ||
|
|
4077934519 | ||
|
|
fac8aaa44e | ||
|
|
fd422a0646 | ||
|
|
d7bb4d954a | ||
|
|
36322901a6 | ||
|
|
31d8fc086b | ||
|
|
9ca583d984 | ||
|
|
3ce933051a | ||
|
|
3b750cafc1 | ||
|
|
1911fe5ca8 | ||
|
|
2117e8167d | ||
|
|
c66d1ad6cb | ||
|
|
bd0383a4e3 | ||
|
|
dd5327ef9e | ||
|
|
cdce8c81da | ||
|
|
3f786542d3 | ||
|
|
e813b26963 | ||
|
|
abe711dcb5 | ||
|
|
b35a9fcb04 | ||
|
|
c27a91f7f0 | ||
|
|
2ab2f3a0a3 | ||
|
|
c825f4d180 | ||
|
|
7e195b06a6 | ||
|
|
30132d8c35 | ||
|
|
0d89db5141 | ||
|
|
4b7d59224d | ||
|
|
72157a7514 | ||
|
|
4f51607145 | ||
|
|
6602760a48 | ||
|
|
9232261665 | ||
|
|
af97febb04 | ||
|
|
c818dc1ce8 | ||
|
|
ad50f376a5 | ||
|
|
83838b7fbc |
25
.ansible-lint
Normal file
25
.ansible-lint
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
---
|
||||||
|
parseable: true
|
||||||
|
skip_list:
|
||||||
|
# see https://docs.ansible.com/ansible-lint/rules/default_rules.html for a list of all default rules
|
||||||
|
# The following rules throw errors.
|
||||||
|
# These either still need to be corrected in the repository and the rules re-enabled or documented why they are skipped on purpose.
|
||||||
|
- '301'
|
||||||
|
- '305'
|
||||||
|
- '306'
|
||||||
|
- '404'
|
||||||
|
- '503'
|
||||||
|
|
||||||
|
# These rules are intentionally skipped:
|
||||||
|
#
|
||||||
|
# [E204]: "Lines should be no longer than 160 chars"
|
||||||
|
# This could be re-enabled with a major rewrite in the future.
|
||||||
|
# For now, there's not enough value gain from strictly limiting line length.
|
||||||
|
# (Disabled in May 2019)
|
||||||
|
- '204'
|
||||||
|
|
||||||
|
# [E701]: "meta/main.yml should contain relevant info"
|
||||||
|
# Roles in Kubespray are not intended to be used/imported by Ansible Galaxy.
|
||||||
|
# While it can be useful to have these metadata available, they are also available in the existing documentation.
|
||||||
|
# (Disabled in May 2019)
|
||||||
|
- '701'
|
||||||
@@ -1,16 +1,11 @@
|
|||||||
<!-- Thanks for filing an issue! Before hitting the button, please answer these questions.-->
|
---
|
||||||
|
name: Bug Report
|
||||||
**Is this a BUG REPORT or FEATURE REQUEST?** (choose one):
|
about: Report a bug encountered while operating Kubernetes
|
||||||
|
labels: kind/bug
|
||||||
|
|
||||||
|
---
|
||||||
<!--
|
<!--
|
||||||
If this is a BUG REPORT, please:
|
Please, be ready for followup questions, and please respond in a timely
|
||||||
- Fill in as much of the template below as you can. If you leave out
|
|
||||||
information, we can't help you as well.
|
|
||||||
|
|
||||||
If this is a FEATURE REQUEST, please:
|
|
||||||
- Describe *in detail* the feature/behavior/change you'd like to see.
|
|
||||||
|
|
||||||
In both cases, be ready for followup questions, and please respond in a timely
|
|
||||||
manner. If we can't reproduce a bug or think a feature already exists, we
|
manner. If we can't reproduce a bug or think a feature already exists, we
|
||||||
might close your issue. If we're wrong, PLEASE feel free to reopen it and
|
might close your issue. If we're wrong, PLEASE feel free to reopen it and
|
||||||
explain why.
|
explain why.
|
||||||
11
.github/ISSUE_TEMPLATE/enhancement.md
vendored
Normal file
11
.github/ISSUE_TEMPLATE/enhancement.md
vendored
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
name: Enhancement Request
|
||||||
|
about: Suggest an enhancement to the Kubespray project
|
||||||
|
labels: kind/feature
|
||||||
|
|
||||||
|
---
|
||||||
|
<!-- Please only use this template for submitting enhancement requests -->
|
||||||
|
|
||||||
|
**What would you like to be added**:
|
||||||
|
|
||||||
|
**Why is this needed**:
|
||||||
20
.github/ISSUE_TEMPLATE/failing-test.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/failing-test.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
name: Failing Test
|
||||||
|
about: Report test failures in Kubespray CI jobs
|
||||||
|
labels: kind/failing-test
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- Please only use this template for submitting reports about failing tests in Kubespray CI jobs -->
|
||||||
|
|
||||||
|
**Which jobs are failing**:
|
||||||
|
|
||||||
|
**Which test(s) are failing**:
|
||||||
|
|
||||||
|
**Since when has it been failing**:
|
||||||
|
|
||||||
|
**Testgrid link**:
|
||||||
|
|
||||||
|
**Reason for failure**:
|
||||||
|
|
||||||
|
**Anything else we need to know**:
|
||||||
18
.github/ISSUE_TEMPLATE/support.md
vendored
Normal file
18
.github/ISSUE_TEMPLATE/support.md
vendored
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
---
|
||||||
|
name: Support Request
|
||||||
|
about: Support request or question relating to Kubespray
|
||||||
|
labels: triage/support
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
<!--
|
||||||
|
STOP -- PLEASE READ!
|
||||||
|
|
||||||
|
GitHub is not the right place for support requests.
|
||||||
|
|
||||||
|
If you're looking for help, check [Stack Overflow](https://stackoverflow.com/questions/tagged/kubespray) and the [troubleshooting guide](https://kubernetes.io/docs/tasks/debug-application-cluster/troubleshooting/).
|
||||||
|
|
||||||
|
You can also post your question on the [Kubernetes Slack](http://slack.k8s.io/) or the [Discuss Kubernetes](https://discuss.kubernetes.io/) forum.
|
||||||
|
|
||||||
|
If the matter is security related, please disclose it privately via https://kubernetes.io/security/.
|
||||||
|
-->
|
||||||
44
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
44
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
<!-- Thanks for sending a pull request! Here are some tips for you:
|
||||||
|
|
||||||
|
1. If this is your first time, please read our contributor guidelines: https://git.k8s.io/community/contributors/guide#your-first-contribution and developer guide https://git.k8s.io/community/contributors/devel/development.md#development-guide
|
||||||
|
2. Please label this pull request according to what type of issue you are addressing, especially if this is a release targeted pull request. For reference on required PR/issue labels, read here:
|
||||||
|
https://git.k8s.io/community/contributors/devel/release.md#issue-kind-label
|
||||||
|
3. Ensure you have added or ran the appropriate tests for your PR: https://git.k8s.io/community/contributors/devel/testing.md
|
||||||
|
4. If you want *faster* PR reviews, read how: https://git.k8s.io/community/contributors/guide/pull-requests.md#best-practices-for-faster-reviews
|
||||||
|
5. Follow the instructions for writing a release note: https://git.k8s.io/community/contributors/guide/release-notes.md
|
||||||
|
6. If the PR is unfinished, see how to mark it: https://git.k8s.io/community/contributors/guide/pull-requests.md#marking-unfinished-pull-requests
|
||||||
|
-->
|
||||||
|
|
||||||
|
**What type of PR is this?**
|
||||||
|
> Uncomment only one ` /kind <>` line, hit enter to put that in a new line, and remove leading whitespaces from that line:
|
||||||
|
>
|
||||||
|
> /kind api-change
|
||||||
|
> /kind bug
|
||||||
|
> /kind cleanup
|
||||||
|
> /kind design
|
||||||
|
> /kind documentation
|
||||||
|
> /kind failing-test
|
||||||
|
> /kind feature
|
||||||
|
> /kind flake
|
||||||
|
|
||||||
|
**What this PR does / why we need it**:
|
||||||
|
|
||||||
|
**Which issue(s) this PR fixes**:
|
||||||
|
<!--
|
||||||
|
*Automatically closes linked issue when PR is merged.
|
||||||
|
Usage: `Fixes #<issue number>`, or `Fixes (paste link of issue)`.
|
||||||
|
_If PR is about `failing-tests or flakes`, please post the related issues/tests in a comment and do not use `Fixes`_*
|
||||||
|
-->
|
||||||
|
Fixes #
|
||||||
|
|
||||||
|
**Special notes for your reviewer**:
|
||||||
|
|
||||||
|
**Does this PR introduce a user-facing change?**:
|
||||||
|
<!--
|
||||||
|
If no, just write "NONE" in the release-note block below.
|
||||||
|
If yes, a release note is required:
|
||||||
|
Enter your extended release note in the block below. If the PR requires additional action from users switching to the new release, include the string "action required".
|
||||||
|
-->
|
||||||
|
```release-note
|
||||||
|
|
||||||
|
```
|
||||||
11
.gitignore
vendored
11
.gitignore
vendored
@@ -1,9 +1,6 @@
|
|||||||
.vagrant
|
.vagrant
|
||||||
*.retry
|
*.retry
|
||||||
**/vagrant_ansible_inventory
|
**/vagrant_ansible_inventory
|
||||||
inventory/credentials/
|
|
||||||
inventory/group_vars/fake_hosts.yml
|
|
||||||
inventory/host_vars/
|
|
||||||
temp
|
temp
|
||||||
.idea
|
.idea
|
||||||
.tox
|
.tox
|
||||||
@@ -11,12 +8,19 @@ temp
|
|||||||
*.bak
|
*.bak
|
||||||
*.tfstate
|
*.tfstate
|
||||||
*.tfstate.backup
|
*.tfstate.backup
|
||||||
|
.terraform/
|
||||||
contrib/terraform/aws/credentials.tfvars
|
contrib/terraform/aws/credentials.tfvars
|
||||||
/ssh-bastion.conf
|
/ssh-bastion.conf
|
||||||
**/*.sw[pon]
|
**/*.sw[pon]
|
||||||
*~
|
*~
|
||||||
vagrant/
|
vagrant/
|
||||||
|
|
||||||
|
# Ansible inventory
|
||||||
|
inventory/*
|
||||||
|
!inventory/local
|
||||||
|
!inventory/sample
|
||||||
|
inventory/*/artifacts/
|
||||||
|
|
||||||
# Byte-compiled / optimized / DLL files
|
# Byte-compiled / optimized / DLL files
|
||||||
__pycache__/
|
__pycache__/
|
||||||
*.py[cod]
|
*.py[cod]
|
||||||
@@ -24,7 +28,6 @@ __pycache__/
|
|||||||
|
|
||||||
# Distribution / packaging
|
# Distribution / packaging
|
||||||
.Python
|
.Python
|
||||||
inventory/*/artifacts/
|
|
||||||
env/
|
env/
|
||||||
build/
|
build/
|
||||||
credentials/
|
credentials/
|
||||||
|
|||||||
702
.gitlab-ci.yml
702
.gitlab-ci.yml
@@ -1,14 +1,16 @@
|
|||||||
|
---
|
||||||
stages:
|
stages:
|
||||||
- unit-tests
|
- unit-tests
|
||||||
- moderator
|
|
||||||
- deploy-part1
|
- deploy-part1
|
||||||
|
- moderator
|
||||||
- deploy-part2
|
- deploy-part2
|
||||||
|
- deploy-gce
|
||||||
- deploy-special
|
- deploy-special
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-incubator__kubespray'
|
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||||
# DOCKER_HOST: tcp://localhost:2375
|
# DOCKER_HOST: tcp://localhost:2375
|
||||||
ANSIBLE_FORCE_COLOR: "true"
|
ANSIBLE_FORCE_COLOR: "true"
|
||||||
MAGIC: "ci check this"
|
MAGIC: "ci check this"
|
||||||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||||
@@ -24,694 +26,46 @@ variables:
|
|||||||
IDEMPOT_CHECK: "false"
|
IDEMPOT_CHECK: "false"
|
||||||
RESET_CHECK: "false"
|
RESET_CHECK: "false"
|
||||||
UPGRADE_TEST: "false"
|
UPGRADE_TEST: "false"
|
||||||
KUBEADM_ENABLED: "false"
|
|
||||||
LOG_LEVEL: "-vv"
|
LOG_LEVEL: "-vv"
|
||||||
|
|
||||||
# asia-east1-a
|
|
||||||
# asia-northeast1-a
|
|
||||||
# europe-west1-b
|
|
||||||
# us-central1-a
|
|
||||||
# us-east1-b
|
|
||||||
# us-west1-a
|
|
||||||
|
|
||||||
before_script:
|
before_script:
|
||||||
- /usr/bin/python -m pip install -r tests/requirements.txt
|
- ./tests/scripts/rebase.sh
|
||||||
- mkdir -p /.ssh
|
- /usr/bin/python -m pip install -r tests/requirements.txt
|
||||||
|
- mkdir -p /.ssh
|
||||||
|
|
||||||
.job: &job
|
.job: &job
|
||||||
tags:
|
tags:
|
||||||
- kubernetes
|
- packet
|
||||||
- docker
|
variables:
|
||||||
image: quay.io/kubespray/kubespray:latest
|
KUBESPRAY_VERSION: v2.10.0
|
||||||
|
image: quay.io/kubespray/kubespray:$KUBESPRAY_VERSION
|
||||||
.docker_service: &docker_service
|
|
||||||
services:
|
|
||||||
- docker:dind
|
|
||||||
|
|
||||||
.create_cluster: &create_cluster
|
|
||||||
<<: *job
|
|
||||||
<<: *docker_service
|
|
||||||
|
|
||||||
.gce_variables: &gce_variables
|
|
||||||
GCE_USER: travis
|
|
||||||
SSH_USER: $GCE_USER
|
|
||||||
CLOUD_MACHINE_TYPE: "g1-small"
|
|
||||||
CI_PLATFORM: "gce"
|
|
||||||
PRIVATE_KEY: $GCE_PRIVATE_KEY
|
|
||||||
|
|
||||||
.do_variables: &do_variables
|
|
||||||
PRIVATE_KEY: $DO_PRIVATE_KEY
|
|
||||||
CI_PLATFORM: "do"
|
|
||||||
SSH_USER: root
|
|
||||||
|
|
||||||
|
|
||||||
.testcases: &testcases
|
.testcases: &testcases
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *docker_service
|
services:
|
||||||
cache:
|
- docker:dind
|
||||||
key: "$CI_BUILD_REF_NAME"
|
|
||||||
paths:
|
|
||||||
- downloads/
|
|
||||||
- $HOME/.cache
|
|
||||||
before_script:
|
before_script:
|
||||||
- docker info
|
- ./tests/scripts/rebase.sh
|
||||||
- /usr/bin/python -m pip install -r requirements.txt
|
- ./tests/scripts/testcases_prepare.sh
|
||||||
- /usr/bin/python -m pip install -r tests/requirements.txt
|
|
||||||
- mkdir -p /.ssh
|
|
||||||
- mkdir -p $HOME/.ssh
|
|
||||||
- ansible-playbook --version
|
|
||||||
- export PYPATH=$([[ ! "$CI_JOB_NAME" =~ "coreos" ]] && echo /usr/bin/python || echo /opt/bin/python)
|
|
||||||
- echo "CI_JOB_NAME is $CI_JOB_NAME"
|
|
||||||
- echo "PYPATH is $PYPATH"
|
|
||||||
script:
|
script:
|
||||||
- pwd
|
- ./tests/scripts/testcases_run.sh
|
||||||
- ls
|
|
||||||
- echo ${PWD}
|
|
||||||
- echo "${STARTUP_SCRIPT}"
|
|
||||||
- cd tests && make create-${CI_PLATFORM} -s ; cd -
|
|
||||||
|
|
||||||
# Check out latest tag if testing upgrade
|
|
||||||
# Uncomment when gitlab kubespray repo has tags
|
|
||||||
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
|
|
||||||
- test "${UPGRADE_TEST}" != "false" && git checkout 8b3ce6e418ccf48171eb5b3888ee1af84f8d71ba
|
|
||||||
# Checkout the CI vars file so it is available
|
|
||||||
- test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
|
|
||||||
# Workaround https://github.com/kubernetes-incubator/kubespray/issues/2021
|
|
||||||
- 'sh -c "echo ignore_assert_errors: true | tee -a tests/files/${CI_JOB_NAME}.yml"'
|
|
||||||
|
|
||||||
|
|
||||||
# Create cluster
|
|
||||||
- >
|
|
||||||
ansible-playbook
|
|
||||||
-i ${ANSIBLE_INVENTORY}
|
|
||||||
-b --become-user=root
|
|
||||||
--private-key=${HOME}/.ssh/id_rsa
|
|
||||||
-u $SSH_USER
|
|
||||||
${SSH_ARGS}
|
|
||||||
${LOG_LEVEL}
|
|
||||||
-e @${CI_TEST_VARS}
|
|
||||||
-e ansible_ssh_user=${SSH_USER}
|
|
||||||
-e local_release_dir=${PWD}/downloads
|
|
||||||
--limit "all:!fake_hosts"
|
|
||||||
cluster.yml
|
|
||||||
|
|
||||||
# Repeat deployment if testing upgrade
|
|
||||||
- >
|
|
||||||
if [ "${UPGRADE_TEST}" != "false" ]; then
|
|
||||||
test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml";
|
|
||||||
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
|
|
||||||
git checkout "${CI_BUILD_REF}";
|
|
||||||
ansible-playbook
|
|
||||||
-i ${ANSIBLE_INVENTORY}
|
|
||||||
-b --become-user=root
|
|
||||||
--private-key=${HOME}/.ssh/id_rsa
|
|
||||||
-u $SSH_USER
|
|
||||||
${SSH_ARGS}
|
|
||||||
${LOG_LEVEL}
|
|
||||||
-e @${CI_TEST_VARS}
|
|
||||||
-e ansible_ssh_user=${SSH_USER}
|
|
||||||
-e local_release_dir=${PWD}/downloads
|
|
||||||
--limit "all:!fake_hosts"
|
|
||||||
$PLAYBOOK;
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Tests Cases
|
|
||||||
## Test Master API
|
|
||||||
- >
|
|
||||||
ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
|
||||||
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
|
|
||||||
|
|
||||||
## Ping the between 2 pod
|
|
||||||
- ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
|
|
||||||
|
|
||||||
## Advanced DNS checks
|
|
||||||
- ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL
|
|
||||||
|
|
||||||
## Idempotency checks 1/5 (repeat deployment)
|
|
||||||
- >
|
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
|
||||||
ansible-playbook
|
|
||||||
-i ${ANSIBLE_INVENTORY}
|
|
||||||
-b --become-user=root
|
|
||||||
--private-key=${HOME}/.ssh/id_rsa
|
|
||||||
-u $SSH_USER
|
|
||||||
${SSH_ARGS}
|
|
||||||
${LOG_LEVEL}
|
|
||||||
-e @${CI_TEST_VARS}
|
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
|
||||||
-e local_release_dir=${PWD}/downloads
|
|
||||||
--limit "all:!fake_hosts"
|
|
||||||
cluster.yml;
|
|
||||||
fi
|
|
||||||
|
|
||||||
## Idempotency checks 2/5 (Advanced DNS checks)
|
|
||||||
- >
|
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
|
||||||
ansible-playbook
|
|
||||||
-i ${ANSIBLE_INVENTORY}
|
|
||||||
-b --become-user=root
|
|
||||||
--private-key=${HOME}/.ssh/id_rsa
|
|
||||||
-u $SSH_USER
|
|
||||||
${SSH_ARGS}
|
|
||||||
${LOG_LEVEL}
|
|
||||||
-e @${CI_TEST_VARS}
|
|
||||||
--limit "all:!fake_hosts"
|
|
||||||
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
|
||||||
fi
|
|
||||||
|
|
||||||
## Idempotency checks 3/5 (reset deployment)
|
|
||||||
- >
|
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
|
|
||||||
ansible-playbook
|
|
||||||
-i ${ANSIBLE_INVENTORY}
|
|
||||||
-b --become-user=root
|
|
||||||
--private-key=${HOME}/.ssh/id_rsa
|
|
||||||
-u $SSH_USER
|
|
||||||
${SSH_ARGS}
|
|
||||||
${LOG_LEVEL}
|
|
||||||
-e @${CI_TEST_VARS}
|
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
|
||||||
-e reset_confirmation=yes
|
|
||||||
--limit "all:!fake_hosts"
|
|
||||||
reset.yml;
|
|
||||||
fi
|
|
||||||
|
|
||||||
## Idempotency checks 4/5 (redeploy after reset)
|
|
||||||
- >
|
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
|
|
||||||
ansible-playbook
|
|
||||||
-i ${ANSIBLE_INVENTORY}
|
|
||||||
-b --become-user=root
|
|
||||||
--private-key=${HOME}/.ssh/id_rsa
|
|
||||||
-u $SSH_USER
|
|
||||||
${SSH_ARGS}
|
|
||||||
${LOG_LEVEL}
|
|
||||||
-e @${CI_TEST_VARS}
|
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
|
||||||
-e local_release_dir=${PWD}/downloads
|
|
||||||
--limit "all:!fake_hosts"
|
|
||||||
cluster.yml;
|
|
||||||
fi
|
|
||||||
|
|
||||||
## Idempotency checks 5/5 (Advanced DNS checks)
|
|
||||||
- >
|
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
|
|
||||||
ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH}
|
|
||||||
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
|
|
||||||
--limit "all:!fake_hosts"
|
|
||||||
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
|
||||||
fi
|
|
||||||
|
|
||||||
after_script:
|
after_script:
|
||||||
- cd tests && make delete-${CI_PLATFORM} -s ; cd -
|
- ./tests/scripts/testcases_cleanup.sh
|
||||||
|
|
||||||
.gce: &gce
|
|
||||||
<<: *testcases
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
|
|
||||||
.do: &do
|
|
||||||
variables:
|
|
||||||
<<: *do_variables
|
|
||||||
<<: *testcases
|
|
||||||
|
|
||||||
# Test matrix. Leave the comments for markup scripts.
|
|
||||||
.coreos_calico_aio_variables: &coreos_calico_aio_variables
|
|
||||||
# stage: deploy-part1
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.ubuntu18_flannel_aio_variables: &ubuntu18_flannel_aio_variables
|
|
||||||
# stage: deploy-part1
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
|
|
||||||
# stage: deploy-part1
|
|
||||||
UPGRADE_TEST: "graceful"
|
|
||||||
|
|
||||||
.centos_weave_kubeadm_variables: ¢os_weave_kubeadm_variables
|
|
||||||
# stage: deploy-part1
|
|
||||||
UPGRADE_TEST: "graceful"
|
|
||||||
|
|
||||||
.ubuntu_canal_kubeadm_variables: &ubuntu_canal_kubeadm_variables
|
|
||||||
# stage: deploy-part1
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.ubuntu_contiv_sep_variables: &ubuntu_contiv_sep_variables
|
|
||||||
# stage: deploy-special
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.coreos_cilium_variables: &coreos_cilium_variables
|
|
||||||
# stage: deploy-special
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.ubuntu_cilium_sep_variables: &ubuntu_cilium_sep_variables
|
|
||||||
# stage: deploy-special
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.rhel7_weave_variables: &rhel7_weave_variables
|
|
||||||
# stage: deploy-part1
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.centos7_flannel_addons_variables: ¢os7_flannel_addons_variables
|
|
||||||
# stage: deploy-part2
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.debian8_calico_variables: &debian8_calico_variables
|
|
||||||
# stage: deploy-part2
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.coreos_canal_variables: &coreos_canal_variables
|
|
||||||
# stage: deploy-part2
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.rhel7_canal_sep_variables: &rhel7_canal_sep_variables
|
|
||||||
# stage: deploy-special
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables
|
|
||||||
# stage: deploy-special
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.centos7_calico_ha_variables: ¢os7_calico_ha_variables
|
|
||||||
# stage: deploy-special
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
|
|
||||||
# stage: deploy-special
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables
|
|
||||||
# stage: deploy-part1
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables
|
|
||||||
# stage: deploy-part1
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.coreos_vault_upgrade_variables: &coreos_vault_upgrade_variables
|
|
||||||
# stage: deploy-part1
|
|
||||||
UPGRADE_TEST: "basic"
|
|
||||||
|
|
||||||
.ubuntu_flannel_variables: &ubuntu_flannel_variables
|
|
||||||
# stage: deploy-special
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
.opensuse_canal_variables: &opensuse_canal_variables
|
|
||||||
# stage: deploy-part2
|
|
||||||
MOVED_TO_GROUP_VARS: "true"
|
|
||||||
|
|
||||||
|
|
||||||
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
|
|
||||||
### PR JOBS PART1
|
|
||||||
gce_coreos-calico-aio:
|
|
||||||
stage: deploy-part1
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *coreos_calico_aio_variables
|
|
||||||
<<: *gce_variables
|
|
||||||
when: on_success
|
|
||||||
except: ['triggers']
|
|
||||||
only: [/^pr-.*$/]
|
|
||||||
|
|
||||||
### PR JOBS PART2
|
|
||||||
|
|
||||||
gce_ubuntu18-flannel-aio:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *ubuntu18_flannel_aio_variables
|
|
||||||
<<: *gce_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: [/^pr-.*$/]
|
|
||||||
|
|
||||||
gce_centos7-flannel-addons:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *centos7_flannel_addons_variables
|
|
||||||
when: on_success
|
|
||||||
except: ['triggers']
|
|
||||||
only: [/^pr-.*$/]
|
|
||||||
|
|
||||||
gce_centos-weave-kubeadm:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *centos_weave_kubeadm_variables
|
|
||||||
when: on_success
|
|
||||||
except: ['triggers']
|
|
||||||
only: [/^pr-.*$/]
|
|
||||||
|
|
||||||
gce_ubuntu-weave-sep:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *ubuntu_weave_sep_variables
|
|
||||||
when: on_success
|
|
||||||
except: ['triggers']
|
|
||||||
only: [/^pr-.*$/]
|
|
||||||
|
|
||||||
### MANUAL JOBS
|
|
||||||
gce_coreos-calico-sep-triggers:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *coreos_calico_aio_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
gce_ubuntu-canal-ha-triggers:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *ubuntu_canal_ha_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
gce_centos7-flannel-addons-triggers:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *centos7_flannel_addons_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
|
|
||||||
gce_ubuntu-weave-sep-triggers:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *ubuntu_weave_sep_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
# More builds for PRs/merges (manual) and triggers (auto)
|
|
||||||
do_ubuntu-canal-ha:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *do
|
|
||||||
variables:
|
|
||||||
<<: *do_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_ubuntu-canal-ha:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *ubuntu_canal_ha_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_ubuntu-canal-kubeadm:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *ubuntu_canal_kubeadm_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_ubuntu-canal-kubeadm-triggers:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *ubuntu_canal_kubeadm_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
gce_centos-weave-kubeadm-triggers:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *centos_weave_kubeadm_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
gce_ubuntu-contiv-sep:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *ubuntu_contiv_sep_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_coreos-cilium:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *coreos_cilium_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_ubuntu-cilium-sep:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *ubuntu_cilium_sep_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_rhel7-weave:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *rhel7_weave_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_rhel7-weave-triggers:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *rhel7_weave_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
gce_debian8-calico-upgrade:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *debian8_calico_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_debian8-calico-triggers:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *debian8_calico_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
gce_coreos-canal:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *coreos_canal_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_coreos-canal-triggers:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *coreos_canal_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
gce_rhel7-canal-sep:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *rhel7_canal_sep_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_rhel7-canal-sep-triggers:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *rhel7_canal_sep_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
gce_centos7-calico-ha:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *centos7_calico_ha_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_centos7-calico-ha-triggers:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *centos7_calico_ha_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
gce_opensuse-canal:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *opensuse_canal_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
# no triggers yet https://github.com/kubernetes-incubator/kargo/issues/613
|
|
||||||
gce_coreos-alpha-weave-ha:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *coreos_alpha_weave_ha_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_ubuntu-rkt-sep:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *ubuntu_rkt_sep_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_ubuntu-vault-sep:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *ubuntu_vault_sep_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_coreos-vault-upgrade:
|
|
||||||
stage: deploy-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *coreos_vault_upgrade_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_ubuntu-flannel-sep:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *ubuntu_flannel_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
|
# For failfast, at least 1 job must be defined in .gitlab-ci.yml
|
||||||
# Premoderated with manual actions
|
# Premoderated with manual actions
|
||||||
ci-authorized:
|
ci-authorized:
|
||||||
<<: *job
|
extends: .job
|
||||||
stage: moderator
|
stage: moderator
|
||||||
before_script:
|
|
||||||
- apt-get -y install jq
|
|
||||||
script:
|
script:
|
||||||
- /bin/sh scripts/premoderator.sh
|
- /bin/sh scripts/premoderator.sh
|
||||||
except: ['triggers', 'master']
|
except: ['triggers', 'master']
|
||||||
|
# Disable ci moderator
|
||||||
|
only: []
|
||||||
|
|
||||||
syntax-check:
|
include:
|
||||||
<<: *job
|
- .gitlab-ci/lint.yml
|
||||||
stage: unit-tests
|
- .gitlab-ci/shellcheck.yml
|
||||||
script:
|
- .gitlab-ci/digital-ocean.yml
|
||||||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root cluster.yml -vvv --syntax-check
|
- .gitlab-ci/terraform.yml
|
||||||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root upgrade-cluster.yml -vvv --syntax-check
|
- .gitlab-ci/packet.yml
|
||||||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root reset.yml -vvv --syntax-check
|
|
||||||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root extra_playbooks/upgrade-only-k8s.yml -vvv --syntax-check
|
|
||||||
except: ['triggers', 'master']
|
|
||||||
|
|
||||||
yamllint:
|
|
||||||
<<: *job
|
|
||||||
stage: unit-tests
|
|
||||||
script:
|
|
||||||
- yamllint roles
|
|
||||||
except: ['triggers', 'master']
|
|
||||||
|
|
||||||
tox-inventory-builder:
|
|
||||||
stage: unit-tests
|
|
||||||
<<: *job
|
|
||||||
script:
|
|
||||||
- pip install tox
|
|
||||||
- cd contrib/inventory_builder && tox
|
|
||||||
when: manual
|
|
||||||
except: ['triggers', 'master']
|
|
||||||
|
|||||||
19
.gitlab-ci/digital-ocean.yml
Normal file
19
.gitlab-ci/digital-ocean.yml
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
---
|
||||||
|
.do_variables: &do_variables
|
||||||
|
PRIVATE_KEY: $DO_PRIVATE_KEY
|
||||||
|
CI_PLATFORM: "do"
|
||||||
|
SSH_USER: root
|
||||||
|
|
||||||
|
.do: &do
|
||||||
|
extends: .testcases
|
||||||
|
tags:
|
||||||
|
- do
|
||||||
|
|
||||||
|
do_ubuntu-canal-ha:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .do
|
||||||
|
variables:
|
||||||
|
<<: *do_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
247
.gitlab-ci/gce.yml
Normal file
247
.gitlab-ci/gce.yml
Normal file
@@ -0,0 +1,247 @@
|
|||||||
|
---
|
||||||
|
.gce_variables: &gce_variables
|
||||||
|
GCE_USER: travis
|
||||||
|
SSH_USER: $GCE_USER
|
||||||
|
CLOUD_MACHINE_TYPE: "g1-small"
|
||||||
|
CI_PLATFORM: "gce"
|
||||||
|
PRIVATE_KEY: $GCE_PRIVATE_KEY
|
||||||
|
|
||||||
|
.cache: &cache
|
||||||
|
cache:
|
||||||
|
key: "$CI_BUILD_REF_NAME"
|
||||||
|
paths:
|
||||||
|
- downloads/
|
||||||
|
- $HOME/.cache
|
||||||
|
|
||||||
|
.gce: &gce
|
||||||
|
extends: .testcases
|
||||||
|
<<: *cache
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
tags:
|
||||||
|
- gce
|
||||||
|
except: ['triggers']
|
||||||
|
only: [/^pr-.*$/]
|
||||||
|
|
||||||
|
.centos_weave_kubeadm_variables: ¢os_weave_kubeadm_variables
|
||||||
|
# stage: deploy-part1
|
||||||
|
UPGRADE_TEST: "graceful"
|
||||||
|
|
||||||
|
.centos7_multus_calico_variables: ¢os7_multus_calico_variables
|
||||||
|
# stage: deploy-gce
|
||||||
|
UPGRADE_TEST: "graceful"
|
||||||
|
|
||||||
|
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
|
||||||
|
### PR JOBS PART1
|
||||||
|
|
||||||
|
gce_ubuntu18-flannel-aio:
|
||||||
|
stage: deploy-part1
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
### PR JOBS PART2
|
||||||
|
|
||||||
|
gce_coreos-calico-aio:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
gce_centos7-flannel-addons:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
### MANUAL JOBS
|
||||||
|
|
||||||
|
gce_centos-weave-kubeadm-sep:
|
||||||
|
stage: deploy-gce
|
||||||
|
extends: .gce
|
||||||
|
variables:
|
||||||
|
<<: *centos_weave_kubeadm_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
gce_ubuntu-weave-sep:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
gce_coreos-calico-sep-triggers:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
gce_ubuntu-canal-ha-triggers:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *gce
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
gce_centos7-flannel-addons-triggers:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
gce_ubuntu-weave-sep-triggers:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
# More builds for PRs/merges (manual) and triggers (auto)
|
||||||
|
|
||||||
|
|
||||||
|
gce_ubuntu-canal-ha:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_ubuntu-canal-kubeadm:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_ubuntu-canal-kubeadm-triggers:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
gce_ubuntu-flannel-ha:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_centos-weave-kubeadm-triggers:
|
||||||
|
stage: deploy-gce
|
||||||
|
extends: .gce
|
||||||
|
variables:
|
||||||
|
<<: *centos_weave_kubeadm_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
gce_ubuntu-contiv-sep:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_coreos-cilium:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_ubuntu18-cilium-sep:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_rhel7-weave:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_rhel7-weave-triggers:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
gce_debian9-calico-upgrade:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_debian9-calico-triggers:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
gce_coreos-canal:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_coreos-canal-triggers:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
gce_rhel7-canal-sep:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_rhel7-canal-sep-triggers:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
gce_centos7-calico-ha:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_centos7-calico-ha-triggers:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
gce_centos7-kube-router:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_centos7-multus-calico:
|
||||||
|
stage: deploy-gce
|
||||||
|
extends: .gce
|
||||||
|
variables:
|
||||||
|
<<: *centos7_multus_calico_variables
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_oracle-canal:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
gce_opensuse-canal:
|
||||||
|
stage: deploy-gce
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
# no triggers yet https://github.com/kubernetes-incubator/kargo/issues/613
|
||||||
|
gce_coreos-alpha-weave-ha:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_coreos-kube-router:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
gce_ubuntu-kube-router-sep:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *gce
|
||||||
|
when: manual
|
||||||
49
.gitlab-ci/lint.yml
Normal file
49
.gitlab-ci/lint.yml
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
---
|
||||||
|
yamllint:
|
||||||
|
extends: .job
|
||||||
|
stage: unit-tests
|
||||||
|
script:
|
||||||
|
- yamllint --strict .
|
||||||
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
|
vagrant-validate:
|
||||||
|
extends: .job
|
||||||
|
stage: unit-tests
|
||||||
|
script:
|
||||||
|
- curl -sL https://releases.hashicorp.com/vagrant/2.2.4/vagrant_2.2.4_x86_64.deb -o /tmp/vagrant_2.2.4_x86_64.deb
|
||||||
|
- dpkg -i /tmp/vagrant_2.2.4_x86_64.deb
|
||||||
|
- vagrant validate --ignore-provider
|
||||||
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
|
ansible-lint:
|
||||||
|
extends: .job
|
||||||
|
stage: unit-tests
|
||||||
|
# lint every yml/yaml file that looks like it contains Ansible plays
|
||||||
|
script: |-
|
||||||
|
grep -Rl '^- hosts: \|^ hosts: ' --include \*.yml --include \*.yaml . | xargs -P 4 -n 25 ansible-lint -v
|
||||||
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
|
syntax-check:
|
||||||
|
extends: .job
|
||||||
|
stage: unit-tests
|
||||||
|
variables:
|
||||||
|
ANSIBLE_INVENTORY: inventory/local-tests.cfg
|
||||||
|
ANSIBLE_REMOTE_USER: root
|
||||||
|
ANSIBLE_BECOME: "true"
|
||||||
|
ANSIBLE_BECOME_USER: root
|
||||||
|
ANSIBLE_VERBOSITY: "3"
|
||||||
|
script:
|
||||||
|
- ansible-playbook --syntax-check cluster.yml
|
||||||
|
- ansible-playbook --syntax-check upgrade-cluster.yml
|
||||||
|
- ansible-playbook --syntax-check reset.yml
|
||||||
|
- ansible-playbook --syntax-check extra_playbooks/upgrade-only-k8s.yml
|
||||||
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
|
tox-inventory-builder:
|
||||||
|
stage: unit-tests
|
||||||
|
extends: .job
|
||||||
|
script:
|
||||||
|
- pip install tox
|
||||||
|
- cd contrib/inventory_builder && tox
|
||||||
|
when: manual
|
||||||
|
except: ['triggers', 'master']
|
||||||
122
.gitlab-ci/packet.yml
Normal file
122
.gitlab-ci/packet.yml
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
---
|
||||||
|
.packet_variables: &packet_variables
|
||||||
|
CI_PLATFORM: "packet"
|
||||||
|
SSH_USER: "kubespray"
|
||||||
|
|
||||||
|
.packet: &packet
|
||||||
|
extends: .testcases
|
||||||
|
variables:
|
||||||
|
<<: *packet_variables
|
||||||
|
tags:
|
||||||
|
- packet
|
||||||
|
only: [/^pr-.*$/]
|
||||||
|
except: ['triggers']
|
||||||
|
|
||||||
|
.test-upgrade: &test-upgrade
|
||||||
|
variables:
|
||||||
|
UPGRADE_TEST: "graceful"
|
||||||
|
|
||||||
|
packet_ubuntu18-calico-aio:
|
||||||
|
stage: deploy-part1
|
||||||
|
<<: *packet
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
# ### PR JOBS PART2
|
||||||
|
|
||||||
|
packet_centos7-flannel-addons:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
# ### MANUAL JOBS
|
||||||
|
|
||||||
|
packet_centos-weave-kubeadm-sep:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
packet_ubuntu-weave-sep:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: manual
|
||||||
|
only: ['triggers']
|
||||||
|
except: []
|
||||||
|
|
||||||
|
# # More builds for PRs/merges (manual) and triggers (auto)
|
||||||
|
|
||||||
|
packet_ubuntu-canal-ha:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *packet
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
packet_ubuntu-canal-kubeadm:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_ubuntu-flannel-ha:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
packet_ubuntu-contiv-sep:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_ubuntu18-cilium-sep:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *packet
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
packet_ubuntu18-flannel-containerd:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
packet_debian9-macvlan-sep:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_debian9-calico-upgrade:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_centos7-calico-ha:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
packet_centos7-kube-ovn:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_centos7-kube-router:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_centos7-multus-calico:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
packet_opensuse-canal:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
packet_oracle-7-canal:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
packet_ubuntu-kube-router-sep:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *packet
|
||||||
|
when: manual
|
||||||
15
.gitlab-ci/shellcheck.yml
Normal file
15
.gitlab-ci/shellcheck.yml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
---
|
||||||
|
shellcheck:
|
||||||
|
extends: .job
|
||||||
|
stage: unit-tests
|
||||||
|
variables:
|
||||||
|
SHELLCHECK_VERSION: v0.6.0
|
||||||
|
before_script:
|
||||||
|
- ./tests/scripts/rebase.sh
|
||||||
|
- curl --silent "https://storage.googleapis.com/shellcheck/shellcheck-"${SHELLCHECK_VERSION}".linux.x86_64.tar.xz" | tar -xJv
|
||||||
|
- cp shellcheck-"${SHELLCHECK_VERSION}"/shellcheck /usr/bin/
|
||||||
|
- shellcheck --version
|
||||||
|
script:
|
||||||
|
# Run shellcheck for all *.sh except contrib/
|
||||||
|
- find . -name '*.sh' -not -path './contrib/*' | xargs shellcheck --severity error
|
||||||
|
except: ['triggers', 'master']
|
||||||
162
.gitlab-ci/terraform.yml
Normal file
162
.gitlab-ci/terraform.yml
Normal file
@@ -0,0 +1,162 @@
|
|||||||
|
---
|
||||||
|
# Tests for contrib/terraform/
|
||||||
|
.terraform_install:
|
||||||
|
extends: .job
|
||||||
|
before_script:
|
||||||
|
- ./tests/scripts/rebase.sh
|
||||||
|
- ./tests/scripts/testcases_prepare.sh
|
||||||
|
- ./tests/scripts/terraform_install.sh
|
||||||
|
# Set Ansible config
|
||||||
|
- cp ansible.cfg ~/.ansible.cfg
|
||||||
|
# Prepare inventory
|
||||||
|
- if [ "$PROVIDER" == "openstack" ]; then VARIABLEFILE="cluster.tfvars"; else VARIABLEFILE="cluster.tf"; fi
|
||||||
|
- cp contrib/terraform/$PROVIDER/sample-inventory/$VARIABLEFILE .
|
||||||
|
- ln -s contrib/terraform/$PROVIDER/hosts
|
||||||
|
- terraform init contrib/terraform/$PROVIDER
|
||||||
|
# Copy SSH keypair
|
||||||
|
- mkdir -p ~/.ssh
|
||||||
|
- echo "$PACKET_PRIVATE_KEY" | base64 -d > ~/.ssh/id_rsa
|
||||||
|
- chmod 400 ~/.ssh/id_rsa
|
||||||
|
- echo "$PACKET_PUBLIC_KEY" | base64 -d > ~/.ssh/id_rsa.pub
|
||||||
|
|
||||||
|
.terraform_validate:
|
||||||
|
extends: .terraform_install
|
||||||
|
stage: unit-tests
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
script:
|
||||||
|
- if [ "$PROVIDER" == "openstack" ]; then VARIABLEFILE="cluster.tfvars"; else VARIABLEFILE="cluster.tf"; fi
|
||||||
|
- terraform validate -var-file=$VARIABLEFILE contrib/terraform/$PROVIDER
|
||||||
|
- terraform fmt -check -diff contrib/terraform/$PROVIDER
|
||||||
|
|
||||||
|
.terraform_apply:
|
||||||
|
extends: .terraform_install
|
||||||
|
stage: deploy-part2
|
||||||
|
when: manual
|
||||||
|
only: [/^pr-.*$/]
|
||||||
|
variables:
|
||||||
|
ANSIBLE_INVENTORY_UNPARSED_FAILED: "true"
|
||||||
|
ANSIBLE_INVENTORY: hosts
|
||||||
|
CI_PLATFORM: tf
|
||||||
|
TF_VAR_ssh_user: $SSH_USER
|
||||||
|
TF_VAR_cluster_name: $CI_JOB_ID
|
||||||
|
script:
|
||||||
|
- tests/scripts/testcases_run.sh
|
||||||
|
after_script:
|
||||||
|
# Cleanup regardless of exit code
|
||||||
|
- ./tests/scripts/testcases_cleanup.sh
|
||||||
|
|
||||||
|
tf-validate-openstack:
|
||||||
|
extends: .terraform_validate
|
||||||
|
variables:
|
||||||
|
TF_VERSION: 0.12.6
|
||||||
|
PROVIDER: openstack
|
||||||
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
|
||||||
|
tf-validate-packet:
|
||||||
|
extends: .terraform_validate
|
||||||
|
variables:
|
||||||
|
TF_VERSION: 0.11.11
|
||||||
|
PROVIDER: packet
|
||||||
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
|
||||||
|
tf-validate-aws:
|
||||||
|
extends: .terraform_validate
|
||||||
|
variables:
|
||||||
|
TF_VERSION: 0.11.11
|
||||||
|
PROVIDER: aws
|
||||||
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
|
||||||
|
tf-packet-ubuntu16-default:
|
||||||
|
extends: .terraform_apply
|
||||||
|
variables:
|
||||||
|
TF_VERSION: 0.11.11
|
||||||
|
PROVIDER: packet
|
||||||
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
TF_VAR_number_of_k8s_masters: "1"
|
||||||
|
TF_VAR_number_of_k8s_nodes: "1"
|
||||||
|
TF_VAR_plan_k8s_masters: t1.small.x86
|
||||||
|
TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||||
|
TF_VAR_facility: ewr1
|
||||||
|
TF_VAR_public_key_path: ""
|
||||||
|
TF_VAR_operating_system: ubuntu_16_04
|
||||||
|
|
||||||
|
tf-packet-ubuntu18-default:
|
||||||
|
extends: .terraform_apply
|
||||||
|
variables:
|
||||||
|
TF_VERSION: 0.11.11
|
||||||
|
PROVIDER: packet
|
||||||
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
TF_VAR_number_of_k8s_masters: "1"
|
||||||
|
TF_VAR_number_of_k8s_nodes: "1"
|
||||||
|
TF_VAR_plan_k8s_masters: t1.small.x86
|
||||||
|
TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||||
|
TF_VAR_facility: ams1
|
||||||
|
TF_VAR_public_key_path: ""
|
||||||
|
TF_VAR_operating_system: ubuntu_18_04
|
||||||
|
|
||||||
|
.ovh_variables: &ovh_variables
|
||||||
|
OS_AUTH_URL: https://auth.cloud.ovh.net/v3
|
||||||
|
OS_PROJECT_ID: 8d3cd5d737d74227ace462dee0b903fe
|
||||||
|
OS_PROJECT_NAME: "9361447987648822"
|
||||||
|
OS_USER_DOMAIN_NAME: Default
|
||||||
|
OS_PROJECT_DOMAIN_ID: default
|
||||||
|
OS_USERNAME: 8XuhBMfkKVrk
|
||||||
|
OS_REGION_NAME: UK1
|
||||||
|
OS_INTERFACE: public
|
||||||
|
OS_IDENTITY_API_VERSION: "3"
|
||||||
|
|
||||||
|
tf-ovh_ubuntu18-calico:
|
||||||
|
extends: .terraform_apply
|
||||||
|
when: on_success
|
||||||
|
variables:
|
||||||
|
<<: *ovh_variables
|
||||||
|
TF_VERSION: 0.12.6
|
||||||
|
PROVIDER: openstack
|
||||||
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
ANSIBLE_TIMEOUT: "60"
|
||||||
|
SSH_USER: ubuntu
|
||||||
|
TF_VAR_number_of_k8s_masters: "0"
|
||||||
|
TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||||
|
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||||
|
TF_VAR_number_of_etcd: "0"
|
||||||
|
TF_VAR_number_of_k8s_nodes: "0"
|
||||||
|
TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||||
|
TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||||
|
TF_VAR_number_of_bastions: "0"
|
||||||
|
TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||||
|
TF_VAR_use_neutron: "0"
|
||||||
|
TF_VAR_floatingip_pool: "Ext-Net"
|
||||||
|
TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||||
|
TF_VAR_network_name: "Ext-Net"
|
||||||
|
TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||||
|
TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||||
|
TF_VAR_image: "Ubuntu 18.04"
|
||||||
|
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||||
|
|
||||||
|
tf-ovh_coreos-calico:
|
||||||
|
extends: .terraform_apply
|
||||||
|
when: on_success
|
||||||
|
variables:
|
||||||
|
<<: *ovh_variables
|
||||||
|
TF_VERSION: 0.12.6
|
||||||
|
PROVIDER: openstack
|
||||||
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
ANSIBLE_TIMEOUT: "60"
|
||||||
|
SSH_USER: core
|
||||||
|
TF_VAR_number_of_k8s_masters: "0"
|
||||||
|
TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||||
|
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||||
|
TF_VAR_number_of_etcd: "0"
|
||||||
|
TF_VAR_number_of_k8s_nodes: "0"
|
||||||
|
TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||||
|
TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||||
|
TF_VAR_number_of_bastions: "0"
|
||||||
|
TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||||
|
TF_VAR_use_neutron: "0"
|
||||||
|
TF_VAR_floatingip_pool: "Ext-Net"
|
||||||
|
TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||||
|
TF_VAR_network_name: "Ext-Net"
|
||||||
|
TF_VAR_flavor_k8s_master: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
|
||||||
|
TF_VAR_flavor_k8s_node: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
|
||||||
|
TF_VAR_image: "CoreOS Stable"
|
||||||
|
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||||
@@ -7,4 +7,5 @@
|
|||||||
1. Submit an issue describing your proposed change to the repo in question.
|
1. Submit an issue describing your proposed change to the repo in question.
|
||||||
2. The [repo owners](OWNERS) will respond to your issue promptly.
|
2. The [repo owners](OWNERS) will respond to your issue promptly.
|
||||||
3. Fork the desired repo, develop and test your code changes.
|
3. Fork the desired repo, develop and test your code changes.
|
||||||
4. Submit a pull request.
|
4. Sign the CNCF CLA (https://git.k8s.io/community/CLA.md#the-contributor-license-agreement)
|
||||||
|
5. Submit a pull request.
|
||||||
|
|||||||
10
Dockerfile
10
Dockerfile
@@ -1,11 +1,11 @@
|
|||||||
FROM ubuntu:16.04
|
FROM ubuntu:18.04
|
||||||
|
|
||||||
RUN mkdir /kubespray
|
RUN mkdir /kubespray
|
||||||
WORKDIR /kubespray
|
WORKDIR /kubespray
|
||||||
RUN apt update -y && \
|
RUN apt update -y && \
|
||||||
apt install -y \
|
apt install -y \
|
||||||
libssl-dev python-dev sshpass apt-transport-https \
|
libssl-dev python3-dev sshpass apt-transport-https jq \
|
||||||
ca-certificates curl gnupg2 software-properties-common python-pip
|
ca-certificates curl gnupg2 software-properties-common python3-pip rsync
|
||||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
|
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
|
||||||
add-apt-repository \
|
add-apt-repository \
|
||||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||||
@@ -13,4 +13,6 @@ RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - &&
|
|||||||
stable" \
|
stable" \
|
||||||
&& apt update -y && apt-get install docker-ce -y
|
&& apt update -y && apt-get install docker-ce -y
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN /usr/bin/python -m pip install pip -U && /usr/bin/python -m pip install -r tests/requirements.txt && python -m pip install -r requirements.txt
|
RUN /usr/bin/python3 -m pip install pip -U && /usr/bin/python3 -m pip install -r tests/requirements.txt && python3 -m pip install -r requirements.txt
|
||||||
|
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.14.4/bin/linux/amd64/kubectl \
|
||||||
|
&& chmod a+x kubectl && cp kubectl /usr/local/bin/kubectl
|
||||||
|
|||||||
5
Makefile
Normal file
5
Makefile
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
mitogen:
|
||||||
|
ansible-playbook -c local mitogen.yaml -vv
|
||||||
|
clean:
|
||||||
|
rm -rf dist/
|
||||||
|
rm *.retry
|
||||||
3
OWNERS
3
OWNERS
@@ -1,5 +1,4 @@
|
|||||||
# See the OWNERS file documentation:
|
# See the OWNERS docs at https://go.k8s.io/owners
|
||||||
# https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md
|
|
||||||
|
|
||||||
approvers:
|
approvers:
|
||||||
- kubespray-approvers
|
- kubespray-approvers
|
||||||
|
|||||||
@@ -11,8 +11,11 @@ aliases:
|
|||||||
- riverzhang
|
- riverzhang
|
||||||
- holser
|
- holser
|
||||||
- smana
|
- smana
|
||||||
|
- verwilst
|
||||||
kubespray-reviewers:
|
kubespray-reviewers:
|
||||||
- jjungnickel
|
- jjungnickel
|
||||||
- archifleks
|
- archifleks
|
||||||
- chapsuk
|
- chapsuk
|
||||||
- mirwan
|
- mirwan
|
||||||
|
- miouge1
|
||||||
|
- holmsten
|
||||||
|
|||||||
91
README.md
91
README.md
@@ -1,12 +1,12 @@
|
|||||||

|

|
||||||
|
|
||||||
Deploy a Production Ready Kubernetes Cluster
|
Deploy a Production Ready Kubernetes Cluster
|
||||||
============================================
|
============================================
|
||||||
|
|
||||||
If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
If you have questions, check the [documentation](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
||||||
You can get your invite [here](http://slack.k8s.io/)
|
You can get your invite [here](http://slack.k8s.io/)
|
||||||
|
|
||||||
- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere, Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere, Packet (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
||||||
- **Highly available** cluster
|
- **Highly available** cluster
|
||||||
- **Composable** (Choice of the network plugin for instance)
|
- **Composable** (Choice of the network plugin for instance)
|
||||||
- Supports most popular **Linux distributions**
|
- Supports most popular **Linux distributions**
|
||||||
@@ -19,22 +19,27 @@ To deploy the cluster you can use :
|
|||||||
|
|
||||||
### Ansible
|
### Ansible
|
||||||
|
|
||||||
|
#### Usage
|
||||||
|
|
||||||
# Install dependencies from ``requirements.txt``
|
# Install dependencies from ``requirements.txt``
|
||||||
sudo pip install -r requirements.txt
|
sudo pip install -r requirements.txt
|
||||||
|
|
||||||
# Copy ``inventory/sample`` as ``inventory/mycluster``
|
# Copy ``inventory/sample`` as ``inventory/mycluster``
|
||||||
cp -rfp inventory/sample/* inventory/mycluster
|
cp -rfp inventory/sample inventory/mycluster
|
||||||
|
|
||||||
# Update Ansible inventory file with inventory builder
|
# Update Ansible inventory file with inventory builder
|
||||||
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
||||||
CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
CONFIG_FILE=inventory/mycluster/hosts.yml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
||||||
|
|
||||||
# Review and change parameters under ``inventory/mycluster/group_vars``
|
# Review and change parameters under ``inventory/mycluster/group_vars``
|
||||||
cat inventory/mycluster/group_vars/all/all.yml
|
cat inventory/mycluster/group_vars/all/all.yml
|
||||||
cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
|
cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
|
||||||
|
|
||||||
# Deploy Kubespray with Ansible Playbook
|
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
||||||
ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml
|
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
||||||
|
# installing packages and interacting with various systemd daemons.
|
||||||
|
# Without --become the playbook will fail to run!
|
||||||
|
ansible-playbook -i inventory/mycluster/hosts.yml --become --become-user=root cluster.yml
|
||||||
|
|
||||||
Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu).
|
Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu).
|
||||||
As a consequence, `ansible-playbook` command will fail with:
|
As a consequence, `ansible-playbook` command will fail with:
|
||||||
@@ -81,6 +86,7 @@ Documents
|
|||||||
- [AWS](docs/aws.md)
|
- [AWS](docs/aws.md)
|
||||||
- [Azure](docs/azure.md)
|
- [Azure](docs/azure.md)
|
||||||
- [vSphere](docs/vsphere.md)
|
- [vSphere](docs/vsphere.md)
|
||||||
|
- [Packet Host](docs/packet.md)
|
||||||
- [Large deployments](docs/large-deployments.md)
|
- [Large deployments](docs/large-deployments.md)
|
||||||
- [Upgrades basics](docs/upgrades.md)
|
- [Upgrades basics](docs/upgrades.md)
|
||||||
- [Roadmap](docs/roadmap.md)
|
- [Roadmap](docs/roadmap.md)
|
||||||
@@ -89,12 +95,13 @@ Supported Linux Distributions
|
|||||||
-----------------------------
|
-----------------------------
|
||||||
|
|
||||||
- **Container Linux by CoreOS**
|
- **Container Linux by CoreOS**
|
||||||
- **Debian** Jessie, Stretch, Wheezy
|
- **Debian** Buster, Jessie, Stretch, Wheezy
|
||||||
- **Ubuntu** 16.04, 18.04
|
- **Ubuntu** 16.04, 18.04
|
||||||
- **CentOS/RHEL** 7
|
- **CentOS/RHEL** 7
|
||||||
- **Fedora** 28
|
- **Fedora** 28
|
||||||
- **Fedora/CentOS** Atomic
|
- **Fedora/CentOS** Atomic
|
||||||
- **openSUSE** Leap 42.3/Tumbleweed
|
- **openSUSE** Leap 42.3/Tumbleweed
|
||||||
|
- **Oracle Linux** 7
|
||||||
|
|
||||||
Note: Upstart/SysV init based OS types are not supported.
|
Note: Upstart/SysV init based OS types are not supported.
|
||||||
|
|
||||||
@@ -102,38 +109,36 @@ Supported Components
|
|||||||
--------------------
|
--------------------
|
||||||
|
|
||||||
- Core
|
- Core
|
||||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.11.3
|
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.15.11
|
||||||
- [etcd](https://github.com/coreos/etcd) v3.2.18
|
- [etcd](https://github.com/coreos/etcd) v3.3.10
|
||||||
- [docker](https://www.docker.com/) v17.03 (see note)
|
- [docker](https://www.docker.com/) v18.06 (see note)
|
||||||
- [rkt](https://github.com/rkt/rkt) v1.21.0 (see Note 2)
|
|
||||||
- [cri-o](http://cri-o.io/) v1.11.5 (experimental: see [CRI-O Note](docs/cri-o.md). Only on centos based OS)
|
- [cri-o](http://cri-o.io/) v1.11.5 (experimental: see [CRI-O Note](docs/cri-o.md). Only on centos based OS)
|
||||||
- Network Plugin
|
- Network Plugin
|
||||||
- [calico](https://github.com/projectcalico/calico) v3.1.3
|
- [cni-plugins](https://github.com/containernetworking/plugins) v0.8.1
|
||||||
|
- [calico](https://github.com/projectcalico/calico) v3.7.3
|
||||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||||
- [cilium](https://github.com/cilium/cilium) v1.2.0
|
- [cilium](https://github.com/cilium/cilium) v1.5.5
|
||||||
- [contiv](https://github.com/contiv/install) v1.1.7
|
- [contiv](https://github.com/contiv/install) v1.2.1
|
||||||
- [flanneld](https://github.com/coreos/flannel) v0.10.0
|
- [flanneld](https://github.com/coreos/flannel) v0.11.0
|
||||||
- [weave](https://github.com/weaveworks/weave) v2.4.1
|
- [kube-router](https://github.com/cloudnativelabs/kube-router) v0.2.5
|
||||||
|
- [multus](https://github.com/intel/multus-cni) v3.2.1
|
||||||
|
- [weave](https://github.com/weaveworks/weave) v2.5.2
|
||||||
- Application
|
- Application
|
||||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||||
- [cert-manager](https://github.com/jetstack/cert-manager) v0.5.0
|
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||||
- [coredns](https://github.com/coredns/coredns) v1.2.2
|
- [cert-manager](https://github.com/jetstack/cert-manager) v0.5.2
|
||||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.19.0
|
- [coredns](https://github.com/coredns/coredns) v1.6.0
|
||||||
|
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.25.1
|
||||||
|
|
||||||
Note: kubernetes doesn't support newer docker versions ("Version 17.03 is recommended... Versions 17.06+ might work, but have not yet been tested and verified by the Kubernetes node team" cf. [Bootstrapping Clusters with kubeadm](https://kubernetes.io/docs/setup/independent/install-kubeadm/#installing-docker)). Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
Note: The list of validated [docker versions](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.13.md) was updated to 1.11.1, 1.12.1, 1.13.1, 17.03, 17.06, 17.09, 18.06. kubeadm now properly recognizes Docker 18.09.0 and newer, but still treats 18.06 as the default supported version. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||||
|
|
||||||
Note 2: rkt support as docker alternative is limited to control plane (etcd and
|
|
||||||
kubelet). Docker is still used for Kubernetes cluster workloads and network
|
|
||||||
plugins' related OS services. Also note, only one of the supported network
|
|
||||||
plugins can be deployed for a given single cluster.
|
|
||||||
|
|
||||||
Requirements
|
Requirements
|
||||||
------------
|
------------
|
||||||
|
- **Minimum required version of Kubernetes is v1.14**
|
||||||
- **Ansible v2.4 (or newer) and python-netaddr is installed on the machine
|
- **Ansible v2.7.8 (or newer, but [not 2.8.x](https://github.com/kubernetes-sigs/kubespray/issues/4778)) and python-netaddr is installed on the machine
|
||||||
that will run Ansible commands**
|
that will run Ansible commands**
|
||||||
- **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
|
- **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
|
||||||
- The target servers must have **access to the Internet** in order to pull docker images.
|
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/downloads.md#offline-environment))
|
||||||
- The target servers are configured to allow **IPv4 forwarding**.
|
- The target servers are configured to allow **IPv4 forwarding**.
|
||||||
- **Your ssh key must be copied** to all the servers part of your inventory.
|
- **Your ssh key must be copied** to all the servers part of your inventory.
|
||||||
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||||
@@ -142,10 +147,18 @@ Requirements
|
|||||||
should be configured in the target servers. Then the `ansible_become` flag
|
should be configured in the target servers. Then the `ansible_become` flag
|
||||||
or command parameters `--become or -b` should be specified.
|
or command parameters `--become or -b` should be specified.
|
||||||
|
|
||||||
|
Hardware:
|
||||||
|
These limits are safe guarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide.
|
||||||
|
|
||||||
|
- Master
|
||||||
|
- Memory: 1500 MB
|
||||||
|
- Node
|
||||||
|
- Memory: 1024 MB
|
||||||
|
|
||||||
Network Plugins
|
Network Plugins
|
||||||
---------------
|
---------------
|
||||||
|
|
||||||
You can choose between 6 network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
You can choose between 10 network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
||||||
|
|
||||||
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||||
|
|
||||||
@@ -159,7 +172,18 @@ You can choose between 6 network plugins. (default: `calico`, except Vagrant use
|
|||||||
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
|
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
|
||||||
|
|
||||||
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
||||||
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
|
(Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)).
|
||||||
|
|
||||||
|
- [kube-ovn](docs/kube-ovn.md): Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
|
||||||
|
|
||||||
|
- [kube-router](docs/kube-router.md): Kube-router is a L3 CNI for Kubernetes networking aiming to provide operational
|
||||||
|
simplicity and high performance: it uses IPVS to provide Kube Services Proxy (if setup to replace kube-proxy),
|
||||||
|
iptables for network policies, and BGP for ods L3 networking (with optionally BGP peering with out-of-cluster BGP peers).
|
||||||
|
It can also optionally advertise routes to Kubernetes cluster Pods CIDRs, ClusterIPs, ExternalIPs and LoadBalancerIPs.
|
||||||
|
|
||||||
|
- [macvlan](docs/macvlan.md): Macvlan is a Linux network driver. Pods have their own unique Mac and Ip address, connected directly the physical (layer 2) network.
|
||||||
|
|
||||||
|
- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
|
||||||
|
|
||||||
The choice is defined with the variable `kube_network_plugin`. There is also an
|
The choice is defined with the variable `kube_network_plugin`. There is also an
|
||||||
option to leverage built-in cloud provider networking instead.
|
option to leverage built-in cloud provider networking instead.
|
||||||
@@ -177,13 +201,12 @@ Tools and projects on top of Kubespray
|
|||||||
--------------------------------------
|
--------------------------------------
|
||||||
|
|
||||||
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/master/doc/integrations/ansible.rst)
|
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/master/doc/integrations/ansible.rst)
|
||||||
- [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer)
|
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
|
||||||
- [Terraform Contrib](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/terraform)
|
|
||||||
|
|
||||||
CI Tests
|
CI Tests
|
||||||
--------
|
--------
|
||||||
|
|
||||||
[](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines)
|
[](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines)
|
||||||
|
|
||||||
CI/end-to-end tests sponsored by Google (GCE)
|
CI/end-to-end tests sponsored by Google (GCE)
|
||||||
See the [test matrix](docs/test_cases.md) for details.
|
See the [test matrix](docs/test_cases.md) for details.
|
||||||
|
|||||||
181
Vagrantfile
vendored
181
Vagrantfile
vendored
@@ -1,6 +1,8 @@
|
|||||||
# -*- mode: ruby -*-
|
# -*- mode: ruby -*-
|
||||||
# # vi: set ft=ruby :
|
# # vi: set ft=ruby :
|
||||||
|
|
||||||
|
# For help on using kubespray with vagrant, check out docs/vagrant.md
|
||||||
|
|
||||||
require 'fileutils'
|
require 'fileutils'
|
||||||
|
|
||||||
Vagrant.require_version ">= 2.0.0"
|
Vagrant.require_version ">= 2.0.0"
|
||||||
@@ -13,14 +15,17 @@ COREOS_URL_TEMPLATE = "https://storage.googleapis.com/%s.release.core-os.net/amd
|
|||||||
DISK_UUID = Time.now.utc.to_i
|
DISK_UUID = Time.now.utc.to_i
|
||||||
|
|
||||||
SUPPORTED_OS = {
|
SUPPORTED_OS = {
|
||||||
"coreos-stable" => {box: "coreos-stable", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
|
"coreos-stable" => {box: "coreos-stable", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
|
||||||
"coreos-alpha" => {box: "coreos-alpha", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
|
"coreos-alpha" => {box: "coreos-alpha", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
|
||||||
"coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
|
"coreos-beta" => {box: "coreos-beta", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
|
||||||
"ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "vagrant"},
|
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
|
||||||
"centos" => {box: "centos/7", bootstrap_os: "centos", user: "vagrant"},
|
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
|
||||||
"fedora" => {box: "fedora/28-cloud-base", bootstrap_os: "fedora", user: "vagrant"},
|
"centos" => {box: "centos/7", user: "vagrant"},
|
||||||
"opensuse" => {box: "opensuse/openSUSE-42.3-x86_64", bootstrap_os: "opensuse", use: "vagrant"},
|
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
||||||
"opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", bootstrap_os: "opensuse", use: "vagrant"},
|
"fedora" => {box: "fedora/28-cloud-base", user: "vagrant"},
|
||||||
|
"opensuse" => {box: "opensuse/openSUSE-15.0-x86_64", user: "vagrant"},
|
||||||
|
"opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", user: "vagrant"},
|
||||||
|
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||||
}
|
}
|
||||||
|
|
||||||
# Defaults for config options defined in CONFIG
|
# Defaults for config options defined in CONFIG
|
||||||
@@ -32,8 +37,10 @@ $vm_cpus = 1
|
|||||||
$shared_folders = {}
|
$shared_folders = {}
|
||||||
$forwarded_ports = {}
|
$forwarded_ports = {}
|
||||||
$subnet = "172.17.8"
|
$subnet = "172.17.8"
|
||||||
$os = "ubuntu"
|
$os = "ubuntu1804"
|
||||||
$network_plugin = "flannel"
|
$network_plugin = "flannel"
|
||||||
|
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
|
||||||
|
$multi_networking = false
|
||||||
# The first three nodes are etcd servers
|
# The first three nodes are etcd servers
|
||||||
$etcd_instances = $num_instances
|
$etcd_instances = $num_instances
|
||||||
# The first two nodes are kube masters
|
# The first two nodes are kube masters
|
||||||
@@ -44,11 +51,13 @@ $kube_node_instances = $num_instances
|
|||||||
$kube_node_instances_with_disks = false
|
$kube_node_instances_with_disks = false
|
||||||
$kube_node_instances_with_disks_size = "20G"
|
$kube_node_instances_with_disks_size = "20G"
|
||||||
$kube_node_instances_with_disks_number = 2
|
$kube_node_instances_with_disks_number = 2
|
||||||
|
$override_disk_size = false
|
||||||
|
$disk_size = "20GB"
|
||||||
|
$local_path_provisioner_enabled = false
|
||||||
|
$local_path_provisioner_claim_root = "/opt/local-path-provisioner/"
|
||||||
|
|
||||||
$playbook = "cluster.yml"
|
$playbook = "cluster.yml"
|
||||||
|
|
||||||
$local_release_dir = "/vagrant/temp"
|
|
||||||
|
|
||||||
host_vars = {}
|
host_vars = {}
|
||||||
|
|
||||||
if File.exist?(CONFIG)
|
if File.exist?(CONFIG)
|
||||||
@@ -57,13 +66,13 @@ end
|
|||||||
|
|
||||||
$box = SUPPORTED_OS[$os][:box]
|
$box = SUPPORTED_OS[$os][:box]
|
||||||
# if $inventory is not set, try to use example
|
# if $inventory is not set, try to use example
|
||||||
$inventory = File.join(File.dirname(__FILE__), "inventory", "sample") if ! $inventory
|
$inventory = "inventory/sample" if ! $inventory
|
||||||
|
$inventory = File.absolute_path($inventory, File.dirname(__FILE__))
|
||||||
|
|
||||||
# if $inventory has a hosts file use it, otherwise copy over vars etc
|
# if $inventory has a hosts.ini file use it, otherwise copy over
|
||||||
# to where vagrant expects dynamic inventory to be.
|
# vars etc to where vagrant expects dynamic inventory to be
|
||||||
if ! File.exist?(File.join(File.dirname($inventory), "hosts"))
|
if ! File.exist?(File.join(File.dirname($inventory), "hosts.ini"))
|
||||||
$vagrant_ansible = File.join(File.dirname(__FILE__), ".vagrant",
|
$vagrant_ansible = File.join(File.dirname(__FILE__), ".vagrant", "provisioners", "ansible")
|
||||||
"provisioners", "ansible")
|
|
||||||
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
|
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
|
||||||
if ! File.exist?(File.join($vagrant_ansible,"inventory"))
|
if ! File.exist?(File.join($vagrant_ansible,"inventory"))
|
||||||
FileUtils.ln_s($inventory, File.join($vagrant_ansible,"inventory"))
|
FileUtils.ln_s($inventory, File.join($vagrant_ansible,"inventory"))
|
||||||
@@ -78,80 +87,68 @@ if Vagrant.has_plugin?("vagrant-proxyconf")
|
|||||||
end
|
end
|
||||||
|
|
||||||
Vagrant.configure("2") do |config|
|
Vagrant.configure("2") do |config|
|
||||||
# always use Vagrants insecure key
|
|
||||||
config.ssh.insert_key = false
|
|
||||||
config.vm.box = $box
|
config.vm.box = $box
|
||||||
if SUPPORTED_OS[$os].has_key? :box_url
|
if SUPPORTED_OS[$os].has_key? :box_url
|
||||||
config.vm.box_url = SUPPORTED_OS[$os][:box_url]
|
config.vm.box_url = SUPPORTED_OS[$os][:box_url]
|
||||||
end
|
end
|
||||||
config.ssh.username = SUPPORTED_OS[$os][:user]
|
config.ssh.username = SUPPORTED_OS[$os][:user]
|
||||||
|
|
||||||
# plugin conflict
|
# plugin conflict
|
||||||
if Vagrant.has_plugin?("vagrant-vbguest") then
|
if Vagrant.has_plugin?("vagrant-vbguest") then
|
||||||
config.vbguest.auto_update = false
|
config.vbguest.auto_update = false
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# always use Vagrants insecure key
|
||||||
|
config.ssh.insert_key = false
|
||||||
|
|
||||||
|
if ($override_disk_size)
|
||||||
|
unless Vagrant.has_plugin?("vagrant-disksize")
|
||||||
|
system "vagrant plugin install vagrant-disksize"
|
||||||
|
end
|
||||||
|
config.disksize.size = $disk_size
|
||||||
|
end
|
||||||
|
|
||||||
(1..$num_instances).each do |i|
|
(1..$num_instances).each do |i|
|
||||||
config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config|
|
config.vm.define vm_name = "%s-%01d" % [$instance_name_prefix, i] do |node|
|
||||||
config.vm.hostname = vm_name
|
|
||||||
|
node.vm.hostname = vm_name
|
||||||
|
|
||||||
if Vagrant.has_plugin?("vagrant-proxyconf")
|
if Vagrant.has_plugin?("vagrant-proxyconf")
|
||||||
config.proxy.http = ENV['HTTP_PROXY'] || ENV['http_proxy'] || ""
|
node.proxy.http = ENV['HTTP_PROXY'] || ENV['http_proxy'] || ""
|
||||||
config.proxy.https = ENV['HTTPS_PROXY'] || ENV['https_proxy'] || ""
|
node.proxy.https = ENV['HTTPS_PROXY'] || ENV['https_proxy'] || ""
|
||||||
config.proxy.no_proxy = $no_proxy
|
node.proxy.no_proxy = $no_proxy
|
||||||
end
|
|
||||||
|
|
||||||
if $expose_docker_tcp
|
|
||||||
config.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true
|
|
||||||
end
|
|
||||||
|
|
||||||
$forwarded_ports.each do |guest, host|
|
|
||||||
config.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true
|
|
||||||
end
|
end
|
||||||
|
|
||||||
["vmware_fusion", "vmware_workstation"].each do |vmware|
|
["vmware_fusion", "vmware_workstation"].each do |vmware|
|
||||||
config.vm.provider vmware do |v|
|
node.vm.provider vmware do |v|
|
||||||
v.vmx['memsize'] = $vm_memory
|
v.vmx['memsize'] = $vm_memory
|
||||||
v.vmx['numvcpus'] = $vm_cpus
|
v.vmx['numvcpus'] = $vm_cpus
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
config.vm.synced_folder ".", "/vagrant", type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
|
node.vm.provider :virtualbox do |vb|
|
||||||
|
|
||||||
$shared_folders.each do |src, dst|
|
|
||||||
config.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
|
|
||||||
end
|
|
||||||
|
|
||||||
config.vm.provider :virtualbox do |vb|
|
|
||||||
vb.gui = $vm_gui
|
|
||||||
vb.memory = $vm_memory
|
vb.memory = $vm_memory
|
||||||
vb.cpus = $vm_cpus
|
vb.cpus = $vm_cpus
|
||||||
|
vb.gui = $vm_gui
|
||||||
|
vb.linked_clone = true
|
||||||
|
vb.customize ["modifyvm", :id, "--vram", "8"] # ubuntu defaults to 256 MB which is a waste of precious RAM
|
||||||
end
|
end
|
||||||
|
|
||||||
config.vm.provider :libvirt do |lv|
|
node.vm.provider :libvirt do |lv|
|
||||||
lv.memory = $vm_memory
|
lv.memory = $vm_memory
|
||||||
# Fix kernel panic on fedora 28
|
lv.cpus = $vm_cpus
|
||||||
if $os == "fedora"
|
lv.default_prefix = 'kubespray'
|
||||||
lv.cpu_mode = "host-passthrough"
|
# Fix kernel panic on fedora 28
|
||||||
end
|
if $os == "fedora"
|
||||||
end
|
lv.cpu_mode = "host-passthrough"
|
||||||
|
end
|
||||||
ip = "#{$subnet}.#{i+100}"
|
end
|
||||||
host_vars[vm_name] = {
|
|
||||||
"ip": ip,
|
|
||||||
"bootstrap_os": SUPPORTED_OS[$os][:bootstrap_os],
|
|
||||||
"local_release_dir" => $local_release_dir,
|
|
||||||
"download_run_once": "False",
|
|
||||||
"kube_network_plugin": $network_plugin
|
|
||||||
}
|
|
||||||
|
|
||||||
config.vm.network :private_network, ip: ip
|
|
||||||
|
|
||||||
# Disable swap for each vm
|
|
||||||
config.vm.provision "shell", inline: "swapoff -a"
|
|
||||||
|
|
||||||
if $kube_node_instances_with_disks
|
if $kube_node_instances_with_disks
|
||||||
# Libvirt
|
# Libvirt
|
||||||
driverletters = ('a'..'z').to_a
|
driverletters = ('a'..'z').to_a
|
||||||
config.vm.provider :libvirt do |lv|
|
node.vm.provider :libvirt do |lv|
|
||||||
# always make /dev/sd{a/b/c} so that CI can ensure that
|
# always make /dev/sd{a/b/c} so that CI can ensure that
|
||||||
# virtualbox and libvirt will have the same devices to use for OSDs
|
# virtualbox and libvirt will have the same devices to use for OSDs
|
||||||
(1..$kube_node_instances_with_disks_number).each do |d|
|
(1..$kube_node_instances_with_disks_number).each do |d|
|
||||||
@@ -160,24 +157,64 @@ Vagrant.configure("2") do |config|
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
# Only execute once the Ansible provisioner,
|
if $expose_docker_tcp
|
||||||
# when all the machines are up and ready.
|
node.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true
|
||||||
|
end
|
||||||
|
|
||||||
|
$forwarded_ports.each do |guest, host|
|
||||||
|
node.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true
|
||||||
|
end
|
||||||
|
|
||||||
|
node.vm.synced_folder ".", "/vagrant", disabled: false, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] , rsync__exclude: ['.git','venv']
|
||||||
|
$shared_folders.each do |src, dst|
|
||||||
|
node.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
|
||||||
|
end
|
||||||
|
|
||||||
|
ip = "#{$subnet}.#{i+100}"
|
||||||
|
node.vm.network :private_network, ip: ip
|
||||||
|
|
||||||
|
# Disable swap for each vm
|
||||||
|
node.vm.provision "shell", inline: "swapoff -a"
|
||||||
|
|
||||||
|
host_vars[vm_name] = {
|
||||||
|
"ip": ip,
|
||||||
|
"flannel_interface": "eth1",
|
||||||
|
"kube_network_plugin": $network_plugin,
|
||||||
|
"kube_network_plugin_multus": $multi_networking,
|
||||||
|
"download_run_once": "True",
|
||||||
|
"download_localhost": "False",
|
||||||
|
"download_cache_dir": ENV['HOME'] + "/kubespray_cache",
|
||||||
|
# Make kubespray cache even when download_run_once is false
|
||||||
|
"download_force_cache": "True",
|
||||||
|
# Keeping the cache on the nodes can improve provisioning speed while debugging kubespray
|
||||||
|
"download_keep_remote_cache": "False",
|
||||||
|
"docker_keepcache": "1",
|
||||||
|
# These two settings will put kubectl and admin.config in $inventory/artifacts
|
||||||
|
"kubeconfig_localhost": "True",
|
||||||
|
"kubectl_localhost": "True",
|
||||||
|
"local_path_provisioner_enabled": "#{$local_path_provisioner_enabled}",
|
||||||
|
"local_path_provisioner_claim_root": "#{$local_path_provisioner_claim_root}",
|
||||||
|
"ansible_ssh_user": SUPPORTED_OS[$os][:user]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Only execute the Ansible provisioner once, when all the machines are up and ready.
|
||||||
if i == $num_instances
|
if i == $num_instances
|
||||||
config.vm.provision "ansible" do |ansible|
|
node.vm.provision "ansible" do |ansible|
|
||||||
ansible.playbook = $playbook
|
ansible.playbook = $playbook
|
||||||
if File.exist?(File.join(File.dirname($inventory), "hosts"))
|
$ansible_inventory_path = File.join( $inventory, "hosts.ini")
|
||||||
ansible.inventory_path = $inventory
|
if File.exist?($ansible_inventory_path)
|
||||||
|
ansible.inventory_path = $ansible_inventory_path
|
||||||
end
|
end
|
||||||
ansible.become = true
|
ansible.become = true
|
||||||
ansible.limit = "all"
|
ansible.limit = "all"
|
||||||
ansible.host_key_checking = false
|
ansible.host_key_checking = false
|
||||||
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache"]
|
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "-e ansible_become_pass=vagrant"]
|
||||||
ansible.host_vars = host_vars
|
ansible.host_vars = host_vars
|
||||||
#ansible.tags = ['download']
|
#ansible.tags = ['download']
|
||||||
ansible.groups = {
|
ansible.groups = {
|
||||||
"etcd" => ["#{$instance_name_prefix}-0[1:#{$etcd_instances}]"],
|
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
|
||||||
"kube-master" => ["#{$instance_name_prefix}-0[1:#{$kube_master_instances}]"],
|
"kube-master" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
||||||
"kube-node" => ["#{$instance_name_prefix}-0[1:#{$kube_node_instances}]"],
|
"kube-node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
|
||||||
"k8s-cluster:children" => ["kube-master", "kube-node"],
|
"k8s-cluster:children" => ["kube-master", "kube-node"],
|
||||||
}
|
}
|
||||||
end
|
end
|
||||||
|
|||||||
2
_config.yml
Normal file
2
_config.yml
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
---
|
||||||
|
theme: jekyll-theme-slate
|
||||||
@@ -3,6 +3,10 @@ pipelining=True
|
|||||||
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
||||||
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
||||||
[defaults]
|
[defaults]
|
||||||
|
strategy_plugins = plugins/mitogen/ansible_mitogen/plugins/strategy
|
||||||
|
# https://github.com/ansible/ansible/issues/56930 (to ignore group names with - and .)
|
||||||
|
force_valid_group_names = ignore
|
||||||
|
|
||||||
host_key_checking=False
|
host_key_checking=False
|
||||||
gathering = smart
|
gathering = smart
|
||||||
fact_caching = jsonfile
|
fact_caching = jsonfile
|
||||||
|
|||||||
105
cluster.yml
105
cluster.yml
@@ -1,78 +1,68 @@
|
|||||||
---
|
---
|
||||||
- hosts: localhost
|
- hosts: localhost
|
||||||
|
gather_facts: false
|
||||||
|
become: no
|
||||||
|
tasks:
|
||||||
|
- name: "Check ansible version >=2.7.8"
|
||||||
|
assert:
|
||||||
|
msg: "Ansible must be v2.7.8 or higher"
|
||||||
|
that:
|
||||||
|
- ansible_version.string is version("2.7.8", ">=")
|
||||||
|
tags:
|
||||||
|
- check
|
||||||
|
vars:
|
||||||
|
ansible_connection: local
|
||||||
|
|
||||||
|
- hosts: bastion[0]
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||||
|
|
||||||
- hosts: k8s-cluster:etcd:calico-rr
|
- hosts: k8s-cluster:etcd
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
vars:
|
|
||||||
# Need to disable pipelining for bootstrap-os as some systems have requiretty in sudoers set, which makes pipelining
|
|
||||||
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
|
|
||||||
ansible_ssh_pipelining: false
|
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: bootstrap-os, tags: bootstrap-os}
|
- { role: bootstrap-os, tags: bootstrap-os}
|
||||||
|
|
||||||
- hosts: k8s-cluster:etcd:calico-rr
|
- hosts: k8s-cluster:etcd
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
|
||||||
vars:
|
|
||||||
ansible_ssh_pipelining: true
|
|
||||||
gather_facts: true
|
|
||||||
pre_tasks:
|
|
||||||
- name: gather facts from all instances
|
|
||||||
setup:
|
|
||||||
delegate_to: "{{item}}"
|
|
||||||
delegate_facts: True
|
|
||||||
with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]) }}"
|
|
||||||
|
|
||||||
- hosts: k8s-cluster:etcd:calico-rr
|
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes/preinstall, tags: preinstall }
|
- { role: kubernetes/preinstall, tags: preinstall }
|
||||||
- { role: docker, tags: docker, when: container_manager == 'docker' }
|
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) }
|
||||||
- { role: cri-o, tags: crio, when: container_manager == 'crio' }
|
|
||||||
- role: rkt
|
|
||||||
tags: rkt
|
|
||||||
when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"
|
|
||||||
- { role: download, tags: download, when: "not skip_downloads" }
|
- { role: download, tags: download, when: "not skip_downloads" }
|
||||||
environment: "{{proxy_env}}"
|
environment: "{{ proxy_env }}"
|
||||||
|
|
||||||
- hosts: etcd:k8s-cluster:vault:calico-rr
|
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
|
||||||
roles:
|
|
||||||
- { role: kubespray-defaults, when: "cert_management == 'vault'" }
|
|
||||||
- { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" }
|
|
||||||
environment: "{{proxy_env}}"
|
|
||||||
|
|
||||||
- hosts: etcd
|
- hosts: etcd
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: etcd, tags: etcd, etcd_cluster_setup: true, etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}" }
|
- role: etcd
|
||||||
|
tags: etcd
|
||||||
|
vars:
|
||||||
|
etcd_cluster_setup: true
|
||||||
|
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
|
||||||
|
when: not etcd_kubeadm_enabled| default(false)
|
||||||
|
|
||||||
- hosts: k8s-cluster:calico-rr
|
- hosts: k8s-cluster
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: etcd, tags: etcd, etcd_cluster_setup: false, etcd_events_cluster_setup: false }
|
- role: etcd
|
||||||
|
tags: etcd
|
||||||
- hosts: etcd:k8s-cluster:vault:calico-rr
|
vars:
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
etcd_cluster_setup: false
|
||||||
roles:
|
etcd_events_cluster_setup: false
|
||||||
- { role: kubespray-defaults}
|
when: not etcd_kubeadm_enabled| default(false)
|
||||||
- { role: vault, tags: vault, when: "cert_management == 'vault'"}
|
|
||||||
environment: "{{proxy_env}}"
|
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s-cluster
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes/node, tags: node }
|
- { role: kubernetes/node, tags: node }
|
||||||
environment: "{{proxy_env}}"
|
environment: "{{ proxy_env }}"
|
||||||
|
|
||||||
- hosts: kube-master
|
- hosts: kube-master
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
@@ -86,15 +76,21 @@
|
|||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" }
|
- { role: kubernetes/kubeadm, tags: kubeadm}
|
||||||
- { role: network_plugin, tags: network }
|
- { role: network_plugin, tags: network }
|
||||||
|
|
||||||
|
- hosts: calico-rr
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
|
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr']}
|
||||||
|
|
||||||
- hosts: kube-master[0]
|
- hosts: kube-master[0]
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
|
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
|
||||||
- { role: win_nodes/kubernetes_patch, tags: win_nodes, when: "kubeadm_enabled" }
|
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"]}
|
||||||
|
|
||||||
- hosts: kube-master
|
- hosts: kube-master
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
@@ -105,22 +101,15 @@
|
|||||||
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
||||||
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
||||||
|
|
||||||
- hosts: calico-rr
|
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
|
||||||
roles:
|
|
||||||
- { role: kubespray-defaults}
|
|
||||||
- { role: network_plugin/calico/rr, tags: network }
|
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
|
||||||
roles:
|
|
||||||
- { role: kubespray-defaults}
|
|
||||||
- { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq }
|
|
||||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf }
|
|
||||||
environment: "{{proxy_env}}"
|
|
||||||
|
|
||||||
- hosts: kube-master
|
- hosts: kube-master
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes-apps, tags: apps }
|
- { role: kubernetes-apps, tags: apps }
|
||||||
|
environment: "{{ proxy_env }}"
|
||||||
|
|
||||||
|
- hosts: k8s-cluster
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
|
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
import boto3
|
import boto3
|
||||||
import os
|
import os
|
||||||
import argparse
|
import argparse
|
||||||
@@ -13,7 +14,7 @@ class SearchEC2Tags(object):
|
|||||||
self.search_tags()
|
self.search_tags()
|
||||||
if self.args.host:
|
if self.args.host:
|
||||||
data = {}
|
data = {}
|
||||||
print json.dumps(data, indent=2)
|
print(json.dumps(data, indent=2))
|
||||||
|
|
||||||
def parse_args(self):
|
def parse_args(self):
|
||||||
|
|
||||||
@@ -41,21 +42,35 @@ class SearchEC2Tags(object):
|
|||||||
region = os.environ['REGION']
|
region = os.environ['REGION']
|
||||||
|
|
||||||
ec2 = boto3.resource('ec2', region)
|
ec2 = boto3.resource('ec2', region)
|
||||||
|
filters = [{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}]
|
||||||
instances = ec2.instances.filter(Filters=[{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}])
|
cluster_name = os.getenv('CLUSTER_NAME')
|
||||||
|
if cluster_name:
|
||||||
|
filters.append({'Name': 'tag-key', 'Values': ['kubernetes.io/cluster/'+cluster_name]})
|
||||||
|
instances = ec2.instances.filter(Filters=filters)
|
||||||
for instance in instances:
|
for instance in instances:
|
||||||
|
|
||||||
|
##Suppose default vpc_visibility is private
|
||||||
|
dns_name = instance.private_dns_name
|
||||||
|
ansible_host = {
|
||||||
|
'ansible_ssh_host': instance.private_ip_address
|
||||||
|
}
|
||||||
|
|
||||||
|
##Override when vpc_visibility actually is public
|
||||||
if self.vpc_visibility == "public":
|
if self.vpc_visibility == "public":
|
||||||
hosts[group].append(instance.public_dns_name)
|
dns_name = instance.public_dns_name
|
||||||
hosts['_meta']['hostvars'][instance.public_dns_name] = {
|
ansible_host = {
|
||||||
'ansible_ssh_host': instance.public_ip_address
|
'ansible_ssh_host': instance.public_ip_address
|
||||||
}
|
|
||||||
else:
|
|
||||||
hosts[group].append(instance.private_dns_name)
|
|
||||||
hosts['_meta']['hostvars'][instance.private_dns_name] = {
|
|
||||||
'ansible_ssh_host': instance.private_ip_address
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
##Set when instance actually has node_labels
|
||||||
|
node_labels_tag = list(filter(lambda t: t['Key'] == 'kubespray-node-labels', instance.tags))
|
||||||
|
if node_labels_tag:
|
||||||
|
ansible_host['node_labels'] = dict([ label.strip().split('=') for label in node_labels_tag[0]['Value'].split(',') ])
|
||||||
|
|
||||||
|
hosts[group].append(dns_name)
|
||||||
|
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
||||||
|
|
||||||
hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
|
hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
|
||||||
print json.dumps(hosts, sort_keys=True, indent=2)
|
print(json.dumps(hosts, sort_keys=True, indent=2))
|
||||||
|
|
||||||
SearchEC2Tags()
|
SearchEC2Tags()
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
|
|
||||||
# Due to some Azure limitations (ex:- Storage Account's name must be unique),
|
# Due to some Azure limitations (ex:- Storage Account's name must be unique),
|
||||||
# this name must be globally unique - it will be used as a prefix for azure components
|
# this name must be globally unique - it will be used as a prefix for azure components
|
||||||
cluster_name: example
|
cluster_name: example
|
||||||
|
|
||||||
@@ -7,6 +7,10 @@ cluster_name: example
|
|||||||
# node that can be used to access the masters and minions
|
# node that can be used to access the masters and minions
|
||||||
use_bastion: false
|
use_bastion: false
|
||||||
|
|
||||||
|
# Set this to a prefered name that will be used as the first part of the dns name for your bastotion host. For example: k8s-bastion.<azureregion>.cloudapp.azure.com.
|
||||||
|
# This is convenient when exceptions have to be configured on a firewall to allow ssh to the given bastion host.
|
||||||
|
# bastion_domain_prefix: k8s-bastion
|
||||||
|
|
||||||
number_of_k8s_masters: 3
|
number_of_k8s_masters: 3
|
||||||
number_of_k8s_nodes: 3
|
number_of_k8s_nodes: 3
|
||||||
|
|
||||||
@@ -20,7 +24,8 @@ admin_username: devops
|
|||||||
admin_password: changeme
|
admin_password: changeme
|
||||||
|
|
||||||
# MAKE SURE TO CHANGE THIS TO YOUR PUBLIC KEY to access your azure machines
|
# MAKE SURE TO CHANGE THIS TO YOUR PUBLIC KEY to access your azure machines
|
||||||
ssh_public_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy"
|
ssh_public_keys:
|
||||||
|
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy"
|
||||||
|
|
||||||
# Disable using ssh using password. Change it to false to allow to connect to ssh by password
|
# Disable using ssh using password. Change it to false to allow to connect to ssh by password
|
||||||
disablePasswordAuthentication: true
|
disablePasswordAuthentication: true
|
||||||
|
|||||||
@@ -4,8 +4,11 @@
|
|||||||
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
||||||
register: vm_list_cmd
|
register: vm_list_cmd
|
||||||
|
|
||||||
- set_fact:
|
- name: Set vm_list
|
||||||
|
set_fact:
|
||||||
vm_list: "{{ vm_list_cmd.stdout }}"
|
vm_list: "{{ vm_list_cmd.stdout }}"
|
||||||
|
|
||||||
- name: Generate inventory
|
- name: Generate inventory
|
||||||
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
|
template:
|
||||||
|
src: inventory.j2
|
||||||
|
dest: "{{ playbook_dir }}/inventory"
|
||||||
|
|||||||
@@ -8,9 +8,22 @@
|
|||||||
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
||||||
register: vm_list_cmd
|
register: vm_list_cmd
|
||||||
|
|
||||||
- set_fact:
|
- name: Query Azure Load Balancer Public IP
|
||||||
|
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
|
||||||
|
register: lb_pubip_cmd
|
||||||
|
|
||||||
|
- name: Set VM IP, roles lists and load balancer public IP
|
||||||
|
set_fact:
|
||||||
vm_ip_list: "{{ vm_ip_list_cmd.stdout }}"
|
vm_ip_list: "{{ vm_ip_list_cmd.stdout }}"
|
||||||
vm_roles_list: "{{ vm_list_cmd.stdout }}"
|
vm_roles_list: "{{ vm_list_cmd.stdout }}"
|
||||||
|
lb_pubip: "{{ lb_pubip_cmd.stdout }}"
|
||||||
|
|
||||||
- name: Generate inventory
|
- name: Generate inventory
|
||||||
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
|
template:
|
||||||
|
src: inventory.j2
|
||||||
|
dest: "{{ playbook_dir }}/inventory"
|
||||||
|
|
||||||
|
- name: Generate Load Balancer variables
|
||||||
|
template:
|
||||||
|
src: loadbalancer_vars.j2
|
||||||
|
dest: "{{ playbook_dir }}/loadbalancer_vars.yml"
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
|
|
||||||
{% for vm in vm_ip_list %}
|
{% for vm in vm_ip_list %}
|
||||||
{% if not use_bastion or vm.virtualMachine.name == 'bastion' %}
|
{% if not use_bastion or vm.virtualMachine.name == 'bastion' %}
|
||||||
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.publicIpAddresses[0].ipAddress }} ip={{ vm.virtualMachine.network.privateIpAddresses[0] }}
|
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.publicIpAddresses[0].ipAddress }} ip={{ vm.virtualMachine.network.privateIpAddresses[0] }}
|
||||||
{% else %}
|
{% else %}
|
||||||
|
|||||||
@@ -0,0 +1,8 @@
|
|||||||
|
## External LB example config
|
||||||
|
apiserver_loadbalancer_domain_name: {{ lb_pubip.dnsSettings.fqdn }}
|
||||||
|
loadbalancer_apiserver:
|
||||||
|
address: {{ lb_pubip.ipAddress }}
|
||||||
|
port: 6443
|
||||||
|
|
||||||
|
## Internal loadbalancers for apiservers
|
||||||
|
loadbalancer_apiserver_localhost: false
|
||||||
@@ -1,3 +1,4 @@
|
|||||||
|
---
|
||||||
apiVersion: "2015-06-15"
|
apiVersion: "2015-06-15"
|
||||||
|
|
||||||
virtualNetworkName: "{{ azure_virtual_network_name | default('KubeVNET') }}"
|
virtualNetworkName: "{{ azure_virtual_network_name | default('KubeVNET') }}"
|
||||||
@@ -28,10 +29,9 @@ sshKeyPath: "/home/{{admin_username}}/.ssh/authorized_keys"
|
|||||||
imageReference:
|
imageReference:
|
||||||
publisher: "OpenLogic"
|
publisher: "OpenLogic"
|
||||||
offer: "CentOS"
|
offer: "CentOS"
|
||||||
sku: "7.2"
|
sku: "7.5"
|
||||||
version: "latest"
|
version: "latest"
|
||||||
imageReferenceJson: "{{imageReference|to_json}}"
|
imageReferenceJson: "{{imageReference|to_json}}"
|
||||||
|
|
||||||
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
|
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
|
||||||
storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"
|
storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"
|
||||||
|
|
||||||
|
|||||||
@@ -1,9 +1,18 @@
|
|||||||
- set_fact:
|
---
|
||||||
base_dir: "{{playbook_dir}}/.generated/"
|
- name: Set base_dir
|
||||||
|
set_fact:
|
||||||
|
base_dir: "{{ playbook_dir }}/.generated/"
|
||||||
|
|
||||||
- file: path={{base_dir}} state=directory recurse=true
|
- name: Create base_dir
|
||||||
|
file:
|
||||||
|
path: "{{ base_dir }}"
|
||||||
|
state: directory
|
||||||
|
recurse: true
|
||||||
|
|
||||||
- template: src={{item}} dest="{{base_dir}}/{{item}}"
|
- name: Store json files in base_dir
|
||||||
|
template:
|
||||||
|
src: "{{ item }}"
|
||||||
|
dest: "{{ base_dir }}/{{ item }}"
|
||||||
with_items:
|
with_items:
|
||||||
- network.json
|
- network.json
|
||||||
- storage.json
|
- storage.json
|
||||||
|
|||||||
@@ -15,7 +15,12 @@
|
|||||||
"name": "{{bastionIPAddressName}}",
|
"name": "{{bastionIPAddressName}}",
|
||||||
"location": "[resourceGroup().location]",
|
"location": "[resourceGroup().location]",
|
||||||
"properties": {
|
"properties": {
|
||||||
"publicIPAllocationMethod": "Static"
|
"publicIPAllocationMethod": "Static",
|
||||||
|
"dnsSettings": {
|
||||||
|
{% if bastion_domain_prefix %}
|
||||||
|
"domainNameLabel": "{{ bastion_domain_prefix }}"
|
||||||
|
{% endif %}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -66,10 +71,12 @@
|
|||||||
"disablePasswordAuthentication": "true",
|
"disablePasswordAuthentication": "true",
|
||||||
"ssh": {
|
"ssh": {
|
||||||
"publicKeys": [
|
"publicKeys": [
|
||||||
|
{% for key in ssh_public_keys %}
|
||||||
{
|
{
|
||||||
"path": "{{sshKeyPath}}",
|
"path": "{{sshKeyPath}}",
|
||||||
"keyData": "{{ssh_public_key}}"
|
"keyData": "{{key}}"
|
||||||
}
|
}{% if loop.index < ssh_public_keys | length %},{% endif %}
|
||||||
|
{% endfor %}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -162,10 +162,12 @@
|
|||||||
"disablePasswordAuthentication": "{{disablePasswordAuthentication}}",
|
"disablePasswordAuthentication": "{{disablePasswordAuthentication}}",
|
||||||
"ssh": {
|
"ssh": {
|
||||||
"publicKeys": [
|
"publicKeys": [
|
||||||
|
{% for key in ssh_public_keys %}
|
||||||
{
|
{
|
||||||
"path": "{{sshKeyPath}}",
|
"path": "{{sshKeyPath}}",
|
||||||
"keyData": "{{ssh_public_key}}"
|
"keyData": "{{key}}"
|
||||||
}
|
}{% if loop.index < ssh_public_keys | length %},{% endif %}
|
||||||
|
{% endfor %}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -79,10 +79,12 @@
|
|||||||
"disablePasswordAuthentication": "{{disablePasswordAuthentication}}",
|
"disablePasswordAuthentication": "{{disablePasswordAuthentication}}",
|
||||||
"ssh": {
|
"ssh": {
|
||||||
"publicKeys": [
|
"publicKeys": [
|
||||||
|
{% for key in ssh_public_keys %}
|
||||||
{
|
{
|
||||||
"path": "{{sshKeyPath}}",
|
"path": "{{sshKeyPath}}",
|
||||||
"keyData": "{{ssh_public_key}}"
|
"keyData": "{{key}}"
|
||||||
}
|
}{% if loop.index < ssh_public_keys | length %},{% endif %}
|
||||||
|
{% endfor %}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
176
contrib/dind/README.md
Normal file
176
contrib/dind/README.md
Normal file
@@ -0,0 +1,176 @@
|
|||||||
|
# Kubespray DIND experimental setup
|
||||||
|
|
||||||
|
This ansible playbook creates local docker containers
|
||||||
|
to serve as Kubernetes "nodes", which in turn will run
|
||||||
|
"normal" Kubernetes docker containers, a mode usually
|
||||||
|
called DIND (Docker-IN-Docker).
|
||||||
|
|
||||||
|
The playbook has two roles:
|
||||||
|
- dind-host: creates the "nodes" as containers in localhost, with
|
||||||
|
appropriate settings for DIND (privileged, volume mapping for dind
|
||||||
|
storage, etc).
|
||||||
|
- dind-cluster: customizes each node container to have required
|
||||||
|
system packages installed, and some utils (swapoff, lsattr)
|
||||||
|
symlinked to /bin/true to ease mimicking a real node.
|
||||||
|
|
||||||
|
This playbook has been test with Ubuntu 16.04 as host and ubuntu:16.04
|
||||||
|
as docker images (note that dind-cluster has specific customization
|
||||||
|
for these images).
|
||||||
|
|
||||||
|
The playbook also creates a `/tmp/kubespray.dind.inventory_builder.sh`
|
||||||
|
helper (wraps up running `contrib/inventory_builder/inventory.py` with
|
||||||
|
node containers IPs and prefix).
|
||||||
|
|
||||||
|
## Deploying
|
||||||
|
|
||||||
|
See below for a complete successful run:
|
||||||
|
|
||||||
|
1. Create the node containers
|
||||||
|
|
||||||
|
~~~~
|
||||||
|
# From the kubespray root dir
|
||||||
|
cd contrib/dind
|
||||||
|
pip install -r requirements.txt
|
||||||
|
|
||||||
|
ansible-playbook -i hosts dind-cluster.yaml
|
||||||
|
|
||||||
|
# Back to kubespray root
|
||||||
|
cd ../..
|
||||||
|
~~~~
|
||||||
|
|
||||||
|
NOTE: if the playbook run fails with something like below error
|
||||||
|
message, you may need to specifically set `ansible_python_interpreter`,
|
||||||
|
see `./hosts` file for an example expanded localhost entry.
|
||||||
|
|
||||||
|
~~~
|
||||||
|
failed: [localhost] (item=kube-node1) => {"changed": false, "item": "kube-node1", "msg": "Failed to import docker or docker-py - No module named requests.exceptions. Try `pip install docker` or `pip install docker-py` (Python 2.6)"}
|
||||||
|
~~~
|
||||||
|
|
||||||
|
2. Customize kubespray-dind.yaml
|
||||||
|
|
||||||
|
Note that there's coupling between above created node containers
|
||||||
|
and `kubespray-dind.yaml` settings, in particular regarding selected `node_distro`
|
||||||
|
(as set in `group_vars/all/all.yaml`), and docker settings.
|
||||||
|
|
||||||
|
~~~
|
||||||
|
$EDITOR contrib/dind/kubespray-dind.yaml
|
||||||
|
~~~
|
||||||
|
|
||||||
|
3. Prepare the inventory and run the playbook
|
||||||
|
|
||||||
|
~~~
|
||||||
|
INVENTORY_DIR=inventory/local-dind
|
||||||
|
mkdir -p ${INVENTORY_DIR}
|
||||||
|
rm -f ${INVENTORY_DIR}/hosts.ini
|
||||||
|
CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
|
||||||
|
|
||||||
|
ansible-playbook --become -e ansible_ssh_user=debian -i ${INVENTORY_DIR}/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml
|
||||||
|
~~~
|
||||||
|
|
||||||
|
NOTE: You could also test other distros without editing files by
|
||||||
|
passing `--extra-vars` as per below commandline,
|
||||||
|
replacing `DISTRO` by either `debian`, `ubuntu`, `centos`, `fedora`:
|
||||||
|
|
||||||
|
~~~
|
||||||
|
cd contrib/dind
|
||||||
|
ansible-playbook -i hosts dind-cluster.yaml --extra-vars node_distro=DISTRO
|
||||||
|
|
||||||
|
cd ../..
|
||||||
|
CONFIG_FILE=inventory/local-dind/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
|
||||||
|
ansible-playbook --become -e ansible_ssh_user=DISTRO -i inventory/local-dind/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml --extra-vars bootstrap_os=DISTRO
|
||||||
|
~~~
|
||||||
|
|
||||||
|
## Resulting deployment
|
||||||
|
|
||||||
|
See below to get an idea on how a completed deployment looks like,
|
||||||
|
from the host where you ran kubespray playbooks.
|
||||||
|
|
||||||
|
### node_distro: debian
|
||||||
|
|
||||||
|
Running from an Ubuntu Xenial host:
|
||||||
|
|
||||||
|
~~~
|
||||||
|
$ uname -a
|
||||||
|
Linux ip-xx-xx-xx-xx 4.4.0-1069-aws #79-Ubuntu SMP Mon Sep 24
|
||||||
|
15:01:41 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
|
||||||
|
|
||||||
|
$ docker ps
|
||||||
|
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||||
|
1835dd183b75 debian:9.5 "sh -c 'apt-get -qy …" 43 minutes ago Up 43 minutes kube-node5
|
||||||
|
30b0af8d2924 debian:9.5 "sh -c 'apt-get -qy …" 43 minutes ago Up 43 minutes kube-node4
|
||||||
|
3e0d1510c62f debian:9.5 "sh -c 'apt-get -qy …" 43 minutes ago Up 43 minutes kube-node3
|
||||||
|
738993566f94 debian:9.5 "sh -c 'apt-get -qy …" 44 minutes ago Up 44 minutes kube-node2
|
||||||
|
c581ef662ed2 debian:9.5 "sh -c 'apt-get -qy …" 44 minutes ago Up 44 minutes kube-node1
|
||||||
|
|
||||||
|
$ docker exec kube-node1 kubectl get node
|
||||||
|
NAME STATUS ROLES AGE VERSION
|
||||||
|
kube-node1 Ready master,node 18m v1.12.1
|
||||||
|
kube-node2 Ready master,node 17m v1.12.1
|
||||||
|
kube-node3 Ready node 17m v1.12.1
|
||||||
|
kube-node4 Ready node 17m v1.12.1
|
||||||
|
kube-node5 Ready node 17m v1.12.1
|
||||||
|
|
||||||
|
$ docker exec kube-node1 kubectl get pod --all-namespaces
|
||||||
|
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||||
|
default netchecker-agent-67489 1/1 Running 0 2m51s
|
||||||
|
default netchecker-agent-6qq6s 1/1 Running 0 2m51s
|
||||||
|
default netchecker-agent-fsw92 1/1 Running 0 2m51s
|
||||||
|
default netchecker-agent-fw6tl 1/1 Running 0 2m51s
|
||||||
|
default netchecker-agent-hostnet-8f2zb 1/1 Running 0 3m
|
||||||
|
default netchecker-agent-hostnet-gq7ml 1/1 Running 0 3m
|
||||||
|
default netchecker-agent-hostnet-jfkgv 1/1 Running 0 3m
|
||||||
|
default netchecker-agent-hostnet-kwfwx 1/1 Running 0 3m
|
||||||
|
default netchecker-agent-hostnet-r46nm 1/1 Running 0 3m
|
||||||
|
default netchecker-agent-lxdrn 1/1 Running 0 2m51s
|
||||||
|
default netchecker-server-864bd4c897-9vstl 1/1 Running 0 2m40s
|
||||||
|
default sh-68fcc6db45-qf55h 1/1 Running 1 12m
|
||||||
|
kube-system coredns-7598f59475-6vknq 1/1 Running 0 14m
|
||||||
|
kube-system coredns-7598f59475-l5q5x 1/1 Running 0 14m
|
||||||
|
kube-system kube-apiserver-kube-node1 1/1 Running 0 17m
|
||||||
|
kube-system kube-apiserver-kube-node2 1/1 Running 0 18m
|
||||||
|
kube-system kube-controller-manager-kube-node1 1/1 Running 0 18m
|
||||||
|
kube-system kube-controller-manager-kube-node2 1/1 Running 0 18m
|
||||||
|
kube-system kube-proxy-5xx9d 1/1 Running 0 17m
|
||||||
|
kube-system kube-proxy-cdqq4 1/1 Running 0 17m
|
||||||
|
kube-system kube-proxy-n64ls 1/1 Running 0 17m
|
||||||
|
kube-system kube-proxy-pswmj 1/1 Running 0 18m
|
||||||
|
kube-system kube-proxy-x89qw 1/1 Running 0 18m
|
||||||
|
kube-system kube-scheduler-kube-node1 1/1 Running 4 17m
|
||||||
|
kube-system kube-scheduler-kube-node2 1/1 Running 4 18m
|
||||||
|
kube-system kubernetes-dashboard-5db4d9f45f-548rl 1/1 Running 0 14m
|
||||||
|
kube-system nginx-proxy-kube-node3 1/1 Running 4 17m
|
||||||
|
kube-system nginx-proxy-kube-node4 1/1 Running 4 17m
|
||||||
|
kube-system nginx-proxy-kube-node5 1/1 Running 4 17m
|
||||||
|
kube-system weave-net-42bfr 2/2 Running 0 16m
|
||||||
|
kube-system weave-net-6gt8m 2/2 Running 0 16m
|
||||||
|
kube-system weave-net-88nnc 2/2 Running 0 16m
|
||||||
|
kube-system weave-net-shckr 2/2 Running 0 16m
|
||||||
|
kube-system weave-net-xr46t 2/2 Running 0 16m
|
||||||
|
|
||||||
|
$ docker exec kube-node1 curl -s http://localhost:31081/api/v1/connectivity_check
|
||||||
|
{"Message":"All 10 pods successfully reported back to the server","Absent":null,"Outdated":null}
|
||||||
|
~~~
|
||||||
|
|
||||||
|
## Using ./run-test-distros.sh
|
||||||
|
|
||||||
|
You can use `./run-test-distros.sh` to run a set of tests via DIND,
|
||||||
|
and excerpt from this script, to get an idea:
|
||||||
|
|
||||||
|
~~~
|
||||||
|
# The SPEC file(s) must have two arrays as e.g.
|
||||||
|
# DISTROS=(debian centos)
|
||||||
|
# EXTRAS=(
|
||||||
|
# 'kube_network_plugin=calico'
|
||||||
|
# 'kube_network_plugin=flannel'
|
||||||
|
# 'kube_network_plugin=weave'
|
||||||
|
# )
|
||||||
|
# that will be tested in a "combinatory" way (e.g. from above there'll be
|
||||||
|
# be 6 test runs), creating a sequenced <spec_filename>-nn.out with each output.
|
||||||
|
#
|
||||||
|
# Each $EXTRAS element will be whitespace split, and passed as --extra-vars
|
||||||
|
# to main kubespray ansible-playbook run.
|
||||||
|
~~~
|
||||||
|
|
||||||
|
See e.g. `test-some_distros-most_CNIs.env` and
|
||||||
|
`test-some_distros-kube_router_combo.env` in particular for a richer
|
||||||
|
set of CNI specific `--extra-vars` combo.
|
||||||
9
contrib/dind/dind-cluster.yaml
Normal file
9
contrib/dind/dind-cluster.yaml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
- hosts: localhost
|
||||||
|
gather_facts: False
|
||||||
|
roles:
|
||||||
|
- { role: dind-host }
|
||||||
|
|
||||||
|
- hosts: containers
|
||||||
|
roles:
|
||||||
|
- { role: dind-cluster }
|
||||||
3
contrib/dind/group_vars/all/all.yaml
Normal file
3
contrib/dind/group_vars/all/all.yaml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
# See distro.yaml for supported node_distro images
|
||||||
|
node_distro: debian
|
||||||
41
contrib/dind/group_vars/all/distro.yaml
Normal file
41
contrib/dind/group_vars/all/distro.yaml
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
---
|
||||||
|
distro_settings:
|
||||||
|
debian: &DEBIAN
|
||||||
|
image: "debian:9.5"
|
||||||
|
user: "debian"
|
||||||
|
pid1_exe: /lib/systemd/systemd
|
||||||
|
init: |
|
||||||
|
sh -c "apt-get -qy update && apt-get -qy install systemd-sysv dbus && exec /sbin/init"
|
||||||
|
raw_setup: apt-get -qy update && apt-get -qy install dbus python sudo iproute2
|
||||||
|
raw_setup_done: test -x /usr/bin/sudo
|
||||||
|
agetty_svc: getty@*
|
||||||
|
ssh_service: ssh
|
||||||
|
extra_packages: []
|
||||||
|
ubuntu:
|
||||||
|
<<: *DEBIAN
|
||||||
|
image: "ubuntu:16.04"
|
||||||
|
user: "ubuntu"
|
||||||
|
init: |
|
||||||
|
/sbin/init
|
||||||
|
centos: &CENTOS
|
||||||
|
image: "centos:7"
|
||||||
|
user: "centos"
|
||||||
|
pid1_exe: /usr/lib/systemd/systemd
|
||||||
|
init: |
|
||||||
|
/sbin/init
|
||||||
|
raw_setup: yum -qy install policycoreutils dbus python sudo iproute iptables
|
||||||
|
raw_setup_done: test -x /usr/bin/sudo
|
||||||
|
agetty_svc: getty@* serial-getty@*
|
||||||
|
ssh_service: sshd
|
||||||
|
extra_packages: []
|
||||||
|
fedora:
|
||||||
|
<<: *CENTOS
|
||||||
|
image: "fedora:latest"
|
||||||
|
user: "fedora"
|
||||||
|
raw_setup: yum -qy install policycoreutils dbus python sudo iproute iptables; mkdir -p /etc/modules-load.d
|
||||||
|
extra_packages:
|
||||||
|
- hostname
|
||||||
|
- procps
|
||||||
|
- findutils
|
||||||
|
- kmod
|
||||||
|
- iputils
|
||||||
15
contrib/dind/hosts
Normal file
15
contrib/dind/hosts
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
[local]
|
||||||
|
# If you created a virtualenv for ansible, you may need to specify running the
|
||||||
|
# python binary from there instead:
|
||||||
|
#localhost ansible_connection=local ansible_python_interpreter=/home/user/kubespray/.venv/bin/python
|
||||||
|
localhost ansible_connection=local
|
||||||
|
|
||||||
|
[containers]
|
||||||
|
kube-node1
|
||||||
|
kube-node2
|
||||||
|
kube-node3
|
||||||
|
kube-node4
|
||||||
|
kube-node5
|
||||||
|
|
||||||
|
[containers:vars]
|
||||||
|
ansible_connection=docker
|
||||||
22
contrib/dind/kubespray-dind.yaml
Normal file
22
contrib/dind/kubespray-dind.yaml
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
# kubespray-dind.yaml: minimal kubespray ansible playbook usable for DIND
|
||||||
|
# See contrib/dind/README.md
|
||||||
|
kube_api_anonymous_auth: true
|
||||||
|
|
||||||
|
kubelet_fail_swap_on: false
|
||||||
|
|
||||||
|
# Docker nodes need to have been created with same "node_distro: debian"
|
||||||
|
# at contrib/dind/group_vars/all/all.yaml
|
||||||
|
bootstrap_os: debian
|
||||||
|
|
||||||
|
docker_version: latest
|
||||||
|
|
||||||
|
docker_storage_options: -s overlay2 --storage-opt overlay2.override_kernel_check=true -g /dind/docker
|
||||||
|
|
||||||
|
dns_mode: coredns
|
||||||
|
|
||||||
|
deploy_netchecker: True
|
||||||
|
netcheck_agent_image_repo: quay.io/l23network/k8s-netchecker-agent
|
||||||
|
netcheck_server_image_repo: quay.io/l23network/k8s-netchecker-server
|
||||||
|
netcheck_agent_image_tag: v1.0
|
||||||
|
netcheck_server_image_tag: v1.0
|
||||||
1
contrib/dind/requirements.txt
Normal file
1
contrib/dind/requirements.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
docker
|
||||||
71
contrib/dind/roles/dind-cluster/tasks/main.yaml
Normal file
71
contrib/dind/roles/dind-cluster/tasks/main.yaml
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
---
|
||||||
|
- name: set_fact distro_setup
|
||||||
|
set_fact:
|
||||||
|
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||||
|
|
||||||
|
- name: set_fact other distro settings
|
||||||
|
set_fact:
|
||||||
|
distro_user: "{{ distro_setup['user'] }}"
|
||||||
|
distro_ssh_service: "{{ distro_setup['ssh_service'] }}"
|
||||||
|
distro_extra_packages: "{{ distro_setup['extra_packages'] }}"
|
||||||
|
|
||||||
|
- name: Null-ify some linux tools to ease DIND
|
||||||
|
file:
|
||||||
|
src: "/bin/true"
|
||||||
|
dest: "{{ item }}"
|
||||||
|
state: link
|
||||||
|
force: yes
|
||||||
|
with_items:
|
||||||
|
# DIND box may have swap enable, don't bother
|
||||||
|
- /sbin/swapoff
|
||||||
|
# /etc/hosts handling would fail on trying to copy file attributes on edit,
|
||||||
|
# void it by successfully returning nil output
|
||||||
|
- /usr/bin/lsattr
|
||||||
|
# disable selinux-isms, sp needed if running on non-Selinux host
|
||||||
|
- /usr/sbin/semodule
|
||||||
|
|
||||||
|
- name: Void installing dpkg docs and man pages on Debian based distros
|
||||||
|
copy:
|
||||||
|
content: |
|
||||||
|
# Delete locales
|
||||||
|
path-exclude=/usr/share/locale/*
|
||||||
|
# Delete man pages
|
||||||
|
path-exclude=/usr/share/man/*
|
||||||
|
# Delete docs
|
||||||
|
path-exclude=/usr/share/doc/*
|
||||||
|
path-include=/usr/share/doc/*/copyright
|
||||||
|
dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'Debian'
|
||||||
|
|
||||||
|
- name: Install system packages to better match a full-fledge node
|
||||||
|
package:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: present
|
||||||
|
with_items: "{{ distro_extra_packages }} + [ 'rsyslog', 'openssh-server' ]"
|
||||||
|
|
||||||
|
- name: Start needed services
|
||||||
|
service:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: started
|
||||||
|
with_items:
|
||||||
|
- rsyslog
|
||||||
|
- "{{ distro_ssh_service }}"
|
||||||
|
|
||||||
|
- name: Create distro user "{{ distro_user }}"
|
||||||
|
user:
|
||||||
|
name: "{{ distro_user }}"
|
||||||
|
uid: 1000
|
||||||
|
# groups: sudo
|
||||||
|
append: yes
|
||||||
|
|
||||||
|
- name: Allow password-less sudo to "{{ distro_user }}"
|
||||||
|
copy:
|
||||||
|
content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL"
|
||||||
|
dest: "/etc/sudoers.d/{{ distro_user }}"
|
||||||
|
|
||||||
|
- name: Add my pubkey to "{{ distro_user }}" user authorized keys
|
||||||
|
authorized_key:
|
||||||
|
user: "{{ distro_user }}"
|
||||||
|
state: present
|
||||||
|
key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}"
|
||||||
88
contrib/dind/roles/dind-host/tasks/main.yaml
Normal file
88
contrib/dind/roles/dind-host/tasks/main.yaml
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
---
|
||||||
|
- name: set_fact distro_setup
|
||||||
|
set_fact:
|
||||||
|
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||||
|
|
||||||
|
- name: set_fact other distro settings
|
||||||
|
set_fact:
|
||||||
|
distro_image: "{{ distro_setup['image'] }}"
|
||||||
|
distro_init: "{{ distro_setup['init'] }}"
|
||||||
|
distro_pid1_exe: "{{ distro_setup['pid1_exe'] }}"
|
||||||
|
distro_raw_setup: "{{ distro_setup['raw_setup'] }}"
|
||||||
|
distro_raw_setup_done: "{{ distro_setup['raw_setup_done'] }}"
|
||||||
|
distro_agetty_svc: "{{ distro_setup['agetty_svc'] }}"
|
||||||
|
|
||||||
|
- name: Create dind node containers from "containers" inventory section
|
||||||
|
docker_container:
|
||||||
|
image: "{{ distro_image }}"
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: started
|
||||||
|
hostname: "{{ item }}"
|
||||||
|
command: "{{ distro_init }}"
|
||||||
|
# recreate: yes
|
||||||
|
privileged: true
|
||||||
|
tmpfs:
|
||||||
|
- /sys/module/nf_conntrack/parameters
|
||||||
|
volumes:
|
||||||
|
- /boot:/boot
|
||||||
|
- /lib/modules:/lib/modules
|
||||||
|
- "{{ item }}:/dind/docker"
|
||||||
|
register: containers
|
||||||
|
with_items: "{{ groups.containers }}"
|
||||||
|
tags:
|
||||||
|
- addresses
|
||||||
|
|
||||||
|
- name: Gather list of containers IPs
|
||||||
|
set_fact:
|
||||||
|
addresses: "{{ containers.results | map(attribute='ansible_facts') | map(attribute='docker_container') | map(attribute='NetworkSettings') | map(attribute='IPAddress') | list }}"
|
||||||
|
tags:
|
||||||
|
- addresses
|
||||||
|
|
||||||
|
- name: Create inventory_builder helper already set with the list of node containers' IPs
|
||||||
|
template:
|
||||||
|
src: inventory_builder.sh.j2
|
||||||
|
dest: /tmp/kubespray.dind.inventory_builder.sh
|
||||||
|
mode: 0755
|
||||||
|
tags:
|
||||||
|
- addresses
|
||||||
|
|
||||||
|
- name: Install needed packages into node containers via raw, need to wait for possible systemd packages to finish installing
|
||||||
|
raw: |
|
||||||
|
# agetty processes churn a lot of cpu time failing on inexistent ttys, early STOP them, to rip them in below task
|
||||||
|
pkill -STOP agetty || true
|
||||||
|
{{ distro_raw_setup_done }} && echo SKIPPED && exit 0
|
||||||
|
until [ "$(readlink /proc/1/exe)" = "{{ distro_pid1_exe }}" ] ; do sleep 1; done
|
||||||
|
{{ distro_raw_setup }}
|
||||||
|
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||||
|
with_items: "{{ containers.results }}"
|
||||||
|
register: result
|
||||||
|
changed_when: result.stdout.find("SKIPPED") < 0
|
||||||
|
|
||||||
|
- name: Remove gettys from node containers
|
||||||
|
raw: |
|
||||||
|
until test -S /var/run/dbus/system_bus_socket; do sleep 1; done
|
||||||
|
systemctl disable {{ distro_agetty_svc }}
|
||||||
|
systemctl stop {{ distro_agetty_svc }}
|
||||||
|
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||||
|
with_items: "{{ containers.results }}"
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
|
||||||
|
# handle manually
|
||||||
|
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
|
||||||
|
raw: |
|
||||||
|
echo {{ item | hash('sha1') }} > /etc/machine-id.new
|
||||||
|
mv -b /etc/machine-id.new /etc/machine-id
|
||||||
|
cmp /etc/machine-id /etc/machine-id~ || true
|
||||||
|
systemctl daemon-reload
|
||||||
|
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||||
|
with_items: "{{ containers.results }}"
|
||||||
|
|
||||||
|
- name: Early hack image install to adapt for DIND
|
||||||
|
# noqa 302 - this task uses the raw module intentionally
|
||||||
|
raw: |
|
||||||
|
rm -fv /usr/bin/udevadm /usr/sbin/udevadm
|
||||||
|
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||||
|
with_items: "{{ containers.results }}"
|
||||||
|
register: result
|
||||||
|
changed_when: result.stdout.find("removed") >= 0
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# NOTE: if you change HOST_PREFIX, you also need to edit ./hosts [containers] section
|
||||||
|
HOST_PREFIX=kube-node python3 contrib/inventory_builder/inventory.py {% for ip in addresses %} {{ ip }} {% endfor %}
|
||||||
93
contrib/dind/run-test-distros.sh
Executable file
93
contrib/dind/run-test-distros.sh
Executable file
@@ -0,0 +1,93 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Q&D test'em all: creates full DIND kubespray deploys
|
||||||
|
# for each distro, verifying it via netchecker.
|
||||||
|
|
||||||
|
info() {
|
||||||
|
local msg="$*"
|
||||||
|
local date="$(date -Isec)"
|
||||||
|
echo "INFO: [$date] $msg"
|
||||||
|
}
|
||||||
|
pass_or_fail() {
|
||||||
|
local rc="$?"
|
||||||
|
local msg="$*"
|
||||||
|
local date="$(date -Isec)"
|
||||||
|
[ $rc -eq 0 ] && echo "PASS: [$date] $msg" || echo "FAIL: [$date] $msg"
|
||||||
|
return $rc
|
||||||
|
}
|
||||||
|
test_distro() {
|
||||||
|
local distro=${1:?};shift
|
||||||
|
local extra="${*:-}"
|
||||||
|
local prefix="$distro[${extra}]}"
|
||||||
|
ansible-playbook -i hosts dind-cluster.yaml -e node_distro=$distro
|
||||||
|
pass_or_fail "$prefix: dind-nodes" || return 1
|
||||||
|
(cd ../..
|
||||||
|
INVENTORY_DIR=inventory/local-dind
|
||||||
|
mkdir -p ${INVENTORY_DIR}
|
||||||
|
rm -f ${INVENTORY_DIR}/hosts.ini
|
||||||
|
CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
|
||||||
|
# expand $extra with -e in front of each word
|
||||||
|
extra_args=""; for extra_arg in $extra; do extra_args="$extra_args -e $extra_arg"; done
|
||||||
|
ansible-playbook --become -e ansible_ssh_user=$distro -i \
|
||||||
|
${INVENTORY_DIR}/hosts.ini cluster.yml \
|
||||||
|
-e @contrib/dind/kubespray-dind.yaml -e bootstrap_os=$distro ${extra_args}
|
||||||
|
pass_or_fail "$prefix: kubespray"
|
||||||
|
) || return 1
|
||||||
|
local node0=${NODES[0]}
|
||||||
|
docker exec ${node0} kubectl get pod --all-namespaces
|
||||||
|
pass_or_fail "$prefix: kube-api" || return 1
|
||||||
|
let retries=60
|
||||||
|
while ((retries--)); do
|
||||||
|
# Some CNI may set NodePort on "main" node interface address (thus no localhost NodePort)
|
||||||
|
# e.g. kube-router: https://github.com/cloudnativelabs/kube-router/pull/217
|
||||||
|
docker exec ${node0} curl -m2 -s http://${NETCHECKER_HOST:?}:31081/api/v1/connectivity_check | grep successfully && break
|
||||||
|
sleep 2
|
||||||
|
done
|
||||||
|
[ $retries -ge 0 ]
|
||||||
|
pass_or_fail "$prefix: netcheck" || return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
NODES=($(egrep ^kube-node hosts))
|
||||||
|
NETCHECKER_HOST=localhost
|
||||||
|
|
||||||
|
: ${OUTPUT_DIR:=./out}
|
||||||
|
mkdir -p ${OUTPUT_DIR}
|
||||||
|
|
||||||
|
# The SPEC file(s) must have two arrays as e.g.
|
||||||
|
# DISTROS=(debian centos)
|
||||||
|
# EXTRAS=(
|
||||||
|
# 'kube_network_plugin=calico'
|
||||||
|
# 'kube_network_plugin=flannel'
|
||||||
|
# 'kube_network_plugin=weave'
|
||||||
|
# )
|
||||||
|
# that will be tested in a "combinatory" way (e.g. from above there'll be
|
||||||
|
# be 6 test runs), creating a sequenced <spec_filename>-nn.out with each output.
|
||||||
|
#
|
||||||
|
# Each $EXTRAS element will be whitespace split, and passed as --extra-vars
|
||||||
|
# to main kubespray ansible-playbook run.
|
||||||
|
|
||||||
|
SPECS=${*:?Missing SPEC files, e.g. test-most_distros-some_CNIs.env}
|
||||||
|
for spec in ${SPECS}; do
|
||||||
|
unset DISTROS EXTRAS
|
||||||
|
echo "Loading file=${spec} ..."
|
||||||
|
. ${spec} || continue
|
||||||
|
: ${DISTROS:?} || continue
|
||||||
|
echo "DISTROS=${DISTROS[@]}"
|
||||||
|
echo "EXTRAS->"
|
||||||
|
printf " %s\n" "${EXTRAS[@]}"
|
||||||
|
let n=1
|
||||||
|
for distro in ${DISTROS[@]}; do
|
||||||
|
for extra in "${EXTRAS[@]:-NULL}"; do
|
||||||
|
# Magic value to let this for run once:
|
||||||
|
[[ ${extra} == NULL ]] && unset extra
|
||||||
|
docker rm -f ${NODES[@]}
|
||||||
|
printf -v file_out "%s/%s-%02d.out" ${OUTPUT_DIR} ${spec} $((n++))
|
||||||
|
{
|
||||||
|
info "${distro}[${extra}] START: file_out=${file_out}"
|
||||||
|
time test_distro ${distro} ${extra}
|
||||||
|
} |& tee ${file_out}
|
||||||
|
# sleeping for the sake of the human to verify if they want
|
||||||
|
sleep 2m
|
||||||
|
done
|
||||||
|
done
|
||||||
|
done
|
||||||
|
egrep -H '^(....:|real)' $(ls -tr ${OUTPUT_DIR}/*.out)
|
||||||
11
contrib/dind/test-most_distros-some_CNIs.env
Normal file
11
contrib/dind/test-most_distros-some_CNIs.env
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
# Test spec file: used from ./run-test-distros.sh, will run
|
||||||
|
# each distro in $DISTROS overloading main kubespray ansible-playbook run
|
||||||
|
# Get all DISTROS from distro.yaml (shame no yaml parsing, but nuff anyway)
|
||||||
|
# DISTROS="${*:-$(egrep -o '^ \w+' group_vars/all/distro.yaml|paste -s)}"
|
||||||
|
DISTROS=(debian ubuntu centos fedora)
|
||||||
|
|
||||||
|
# Each line below will be added as --extra-vars to main playbook run
|
||||||
|
EXTRAS=(
|
||||||
|
'kube_network_plugin=calico'
|
||||||
|
'kube_network_plugin=weave'
|
||||||
|
)
|
||||||
6
contrib/dind/test-some_distros-kube_router_combo.env
Normal file
6
contrib/dind/test-some_distros-kube_router_combo.env
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
DISTROS=(debian centos)
|
||||||
|
NETCHECKER_HOST=${NODES[0]}
|
||||||
|
EXTRAS=(
|
||||||
|
'kube_network_plugin=kube-router {"kube_router_run_service_proxy":false}'
|
||||||
|
'kube_network_plugin=kube-router {"kube_router_run_service_proxy":true}'
|
||||||
|
)
|
||||||
8
contrib/dind/test-some_distros-most_CNIs.env
Normal file
8
contrib/dind/test-some_distros-most_CNIs.env
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
DISTROS=(debian centos)
|
||||||
|
EXTRAS=(
|
||||||
|
'kube_network_plugin=calico {}'
|
||||||
|
'kube_network_plugin=canal {}'
|
||||||
|
'kube_network_plugin=cilium {}'
|
||||||
|
'kube_network_plugin=flannel {}'
|
||||||
|
'kube_network_plugin=weave {}'
|
||||||
|
)
|
||||||
@@ -17,6 +17,9 @@
|
|||||||
#
|
#
|
||||||
# Advanced usage:
|
# Advanced usage:
|
||||||
# Add another host after initial creation: inventory.py 10.10.1.5
|
# Add another host after initial creation: inventory.py 10.10.1.5
|
||||||
|
# Add range of hosts: inventory.py 10.10.1.3-10.10.1.5
|
||||||
|
# Add hosts with different ip and access ip:
|
||||||
|
# inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.1.3
|
||||||
# Delete a host: inventory.py -10.10.1.3
|
# Delete a host: inventory.py -10.10.1.3
|
||||||
# Delete a host by id: inventory.py -node1
|
# Delete a host by id: inventory.py -node1
|
||||||
#
|
#
|
||||||
@@ -31,21 +34,21 @@
|
|||||||
# ip: X.X.X.X
|
# ip: X.X.X.X
|
||||||
|
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
try:
|
from ipaddress import ip_address
|
||||||
import configparser
|
from ruamel.yaml import YAML
|
||||||
except ImportError:
|
|
||||||
import ConfigParser as configparser
|
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster:children',
|
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster',
|
||||||
'calico-rr', 'vault']
|
'calico-rr']
|
||||||
PROTECTED_NAMES = ROLES
|
PROTECTED_NAMES = ROLES
|
||||||
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'load']
|
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'load']
|
||||||
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
||||||
'0': False, 'no': False, 'false': False, 'off': False}
|
'0': False, 'no': False, 'false': False, 'off': False}
|
||||||
|
yaml = YAML()
|
||||||
|
yaml.Representer.add_representer(OrderedDict, yaml.Representer.represent_dict)
|
||||||
|
|
||||||
|
|
||||||
def get_var_as_bool(name, default):
|
def get_var_as_bool(name, default):
|
||||||
@@ -54,7 +57,9 @@ def get_var_as_bool(name, default):
|
|||||||
|
|
||||||
# Configurable as shell vars start
|
# Configurable as shell vars start
|
||||||
|
|
||||||
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.ini")
|
|
||||||
|
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.yaml")
|
||||||
|
KUBE_MASTERS = int(os.environ.get("KUBE_MASTERS_MASTERS", 2))
|
||||||
# Reconfigures cluster distribution at scale
|
# Reconfigures cluster distribution at scale
|
||||||
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
|
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
|
||||||
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 200))
|
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 200))
|
||||||
@@ -68,11 +73,14 @@ HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
|
|||||||
class KubesprayInventory(object):
|
class KubesprayInventory(object):
|
||||||
|
|
||||||
def __init__(self, changed_hosts=None, config_file=None):
|
def __init__(self, changed_hosts=None, config_file=None):
|
||||||
self.config = configparser.ConfigParser(allow_no_value=True,
|
|
||||||
delimiters=('\t', ' '))
|
|
||||||
self.config_file = config_file
|
self.config_file = config_file
|
||||||
|
self.yaml_config = {}
|
||||||
if self.config_file:
|
if self.config_file:
|
||||||
self.config.read(self.config_file)
|
try:
|
||||||
|
self.hosts_file = open(config_file, 'r')
|
||||||
|
self.yaml_config = yaml.load(self.hosts_file)
|
||||||
|
except FileNotFoundError:
|
||||||
|
pass
|
||||||
|
|
||||||
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
|
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
|
||||||
self.parse_command(changed_hosts[0], changed_hosts[1:])
|
self.parse_command(changed_hosts[0], changed_hosts[1:])
|
||||||
@@ -81,18 +89,21 @@ class KubesprayInventory(object):
|
|||||||
self.ensure_required_groups(ROLES)
|
self.ensure_required_groups(ROLES)
|
||||||
|
|
||||||
if changed_hosts:
|
if changed_hosts:
|
||||||
|
changed_hosts = self.range2ips(changed_hosts)
|
||||||
self.hosts = self.build_hostnames(changed_hosts)
|
self.hosts = self.build_hostnames(changed_hosts)
|
||||||
self.purge_invalid_hosts(self.hosts.keys(), PROTECTED_NAMES)
|
self.purge_invalid_hosts(self.hosts.keys(), PROTECTED_NAMES)
|
||||||
self.set_all(self.hosts)
|
self.set_all(self.hosts)
|
||||||
self.set_k8s_cluster()
|
self.set_k8s_cluster()
|
||||||
self.set_etcd(list(self.hosts.keys())[:3])
|
etcd_hosts_count = 3 if len(self.hosts.keys()) >= 3 else 1
|
||||||
|
self.set_etcd(list(self.hosts.keys())[:etcd_hosts_count])
|
||||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||||
self.set_kube_master(list(self.hosts.keys())[3:5])
|
self.set_kube_master(list(self.hosts.keys())[
|
||||||
|
etcd_hosts_count:(etcd_hosts_count + KUBE_MASTERS)])
|
||||||
else:
|
else:
|
||||||
self.set_kube_master(list(self.hosts.keys())[:2])
|
self.set_kube_master(list(self.hosts.keys())[:KUBE_MASTERS])
|
||||||
self.set_kube_node(self.hosts.keys())
|
self.set_kube_node(self.hosts.keys())
|
||||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||||
self.set_calico_rr(list(self.hosts.keys())[:3])
|
self.set_calico_rr(list(self.hosts.keys())[:etcd_hosts_count])
|
||||||
else: # Show help if no options
|
else: # Show help if no options
|
||||||
self.show_help()
|
self.show_help()
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
@@ -101,8 +112,9 @@ class KubesprayInventory(object):
|
|||||||
|
|
||||||
def write_config(self, config_file):
|
def write_config(self, config_file):
|
||||||
if config_file:
|
if config_file:
|
||||||
with open(config_file, 'w') as f:
|
with open(self.config_file, 'w') as f:
|
||||||
self.config.write(f)
|
yaml.dump(self.yaml_config, f)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
print("WARNING: Unable to save config. Make sure you set "
|
print("WARNING: Unable to save config. Make sure you set "
|
||||||
"CONFIG_FILE env var.")
|
"CONFIG_FILE env var.")
|
||||||
@@ -112,28 +124,29 @@ class KubesprayInventory(object):
|
|||||||
print("DEBUG: {0}".format(msg))
|
print("DEBUG: {0}".format(msg))
|
||||||
|
|
||||||
def get_ip_from_opts(self, optstring):
|
def get_ip_from_opts(self, optstring):
|
||||||
opts = optstring.split(' ')
|
if 'ip' in optstring:
|
||||||
for opt in opts:
|
return optstring['ip']
|
||||||
if '=' not in opt:
|
else:
|
||||||
continue
|
raise ValueError("IP parameter not found in options")
|
||||||
k, v = opt.split('=')
|
|
||||||
if k == "ip":
|
|
||||||
return v
|
|
||||||
raise ValueError("IP parameter not found in options")
|
|
||||||
|
|
||||||
def ensure_required_groups(self, groups):
|
def ensure_required_groups(self, groups):
|
||||||
for group in groups:
|
for group in groups:
|
||||||
try:
|
if group == 'all':
|
||||||
self.debug("Adding group {0}".format(group))
|
self.debug("Adding group {0}".format(group))
|
||||||
self.config.add_section(group)
|
if group not in self.yaml_config:
|
||||||
except configparser.DuplicateSectionError:
|
all_dict = OrderedDict([('hosts', OrderedDict({})),
|
||||||
pass
|
('children', OrderedDict({}))])
|
||||||
|
self.yaml_config = {'all': all_dict}
|
||||||
|
else:
|
||||||
|
self.debug("Adding group {0}".format(group))
|
||||||
|
if group not in self.yaml_config['all']['children']:
|
||||||
|
self.yaml_config['all']['children'][group] = {'hosts': {}}
|
||||||
|
|
||||||
def get_host_id(self, host):
|
def get_host_id(self, host):
|
||||||
'''Returns integer host ID (without padding) from a given hostname.'''
|
'''Returns integer host ID (without padding) from a given hostname.'''
|
||||||
try:
|
try:
|
||||||
short_hostname = host.split('.')[0]
|
short_hostname = host.split('.')[0]
|
||||||
return int(re.findall("\d+$", short_hostname)[-1])
|
return int(re.findall("\\d+$", short_hostname)[-1])
|
||||||
except IndexError:
|
except IndexError:
|
||||||
raise ValueError("Host name must end in an integer")
|
raise ValueError("Host name must end in an integer")
|
||||||
|
|
||||||
@@ -141,12 +154,12 @@ class KubesprayInventory(object):
|
|||||||
existing_hosts = OrderedDict()
|
existing_hosts = OrderedDict()
|
||||||
highest_host_id = 0
|
highest_host_id = 0
|
||||||
try:
|
try:
|
||||||
for host, opts in self.config.items('all'):
|
for host in self.yaml_config['all']['hosts']:
|
||||||
existing_hosts[host] = opts
|
existing_hosts[host] = self.yaml_config['all']['hosts'][host]
|
||||||
host_id = self.get_host_id(host)
|
host_id = self.get_host_id(host)
|
||||||
if host_id > highest_host_id:
|
if host_id > highest_host_id:
|
||||||
highest_host_id = host_id
|
highest_host_id = host_id
|
||||||
except configparser.NoSectionError:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# FIXME(mattymo): Fix condition where delete then add reuses highest id
|
# FIXME(mattymo): Fix condition where delete then add reuses highest id
|
||||||
@@ -163,22 +176,53 @@ class KubesprayInventory(object):
|
|||||||
self.debug("Marked {0} for deletion.".format(realhost))
|
self.debug("Marked {0} for deletion.".format(realhost))
|
||||||
self.delete_host_by_ip(all_hosts, realhost)
|
self.delete_host_by_ip(all_hosts, realhost)
|
||||||
elif host[0].isdigit():
|
elif host[0].isdigit():
|
||||||
|
if ',' in host:
|
||||||
|
ip, access_ip = host.split(',')
|
||||||
|
else:
|
||||||
|
ip = host
|
||||||
|
access_ip = host
|
||||||
if self.exists_hostname(all_hosts, host):
|
if self.exists_hostname(all_hosts, host):
|
||||||
self.debug("Skipping existing host {0}.".format(host))
|
self.debug("Skipping existing host {0}.".format(host))
|
||||||
continue
|
continue
|
||||||
elif self.exists_ip(all_hosts, host):
|
elif self.exists_ip(all_hosts, ip):
|
||||||
self.debug("Skipping existing host {0}.".format(host))
|
self.debug("Skipping existing host {0}.".format(ip))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
next_host = "{0}{1}".format(HOST_PREFIX, next_host_id)
|
next_host = "{0}{1}".format(HOST_PREFIX, next_host_id)
|
||||||
next_host_id += 1
|
next_host_id += 1
|
||||||
all_hosts[next_host] = "ansible_host={0} ip={1}".format(
|
all_hosts[next_host] = {'ansible_host': access_ip,
|
||||||
host, host)
|
'ip': ip,
|
||||||
|
'access_ip': access_ip}
|
||||||
elif host[0].isalpha():
|
elif host[0].isalpha():
|
||||||
raise Exception("Adding hosts by hostname is not supported.")
|
raise Exception("Adding hosts by hostname is not supported.")
|
||||||
|
|
||||||
return all_hosts
|
return all_hosts
|
||||||
|
|
||||||
|
def range2ips(self, hosts):
|
||||||
|
reworked_hosts = []
|
||||||
|
|
||||||
|
def ips(start_address, end_address):
|
||||||
|
try:
|
||||||
|
# Python 3.x
|
||||||
|
start = int(ip_address(start_address))
|
||||||
|
end = int(ip_address(end_address))
|
||||||
|
except:
|
||||||
|
# Python 2.7
|
||||||
|
start = int(ip_address(unicode(start_address)))
|
||||||
|
end = int(ip_address(unicode(end_address)))
|
||||||
|
return [ip_address(ip).exploded for ip in range(start, end + 1)]
|
||||||
|
|
||||||
|
for host in hosts:
|
||||||
|
if '-' in host and not host.startswith('-'):
|
||||||
|
start, end = host.strip().split('-')
|
||||||
|
try:
|
||||||
|
reworked_hosts.extend(ips(start, end))
|
||||||
|
except ValueError:
|
||||||
|
raise Exception("Range of ip_addresses isn't valid")
|
||||||
|
else:
|
||||||
|
reworked_hosts.append(host)
|
||||||
|
return reworked_hosts
|
||||||
|
|
||||||
def exists_hostname(self, existing_hosts, hostname):
|
def exists_hostname(self, existing_hosts, hostname):
|
||||||
return hostname in existing_hosts.keys()
|
return hostname in existing_hosts.keys()
|
||||||
|
|
||||||
@@ -196,16 +240,34 @@ class KubesprayInventory(object):
|
|||||||
raise ValueError("Unable to find host by IP: {0}".format(ip))
|
raise ValueError("Unable to find host by IP: {0}".format(ip))
|
||||||
|
|
||||||
def purge_invalid_hosts(self, hostnames, protected_names=[]):
|
def purge_invalid_hosts(self, hostnames, protected_names=[]):
|
||||||
for role in self.config.sections():
|
for role in self.yaml_config['all']['children']:
|
||||||
for host, _ in self.config.items(role):
|
if role != 'k8s-cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa
|
||||||
|
all_hosts = self.yaml_config['all']['children'][role]['hosts'].copy() # noqa
|
||||||
|
for host in all_hosts.keys():
|
||||||
|
if host not in hostnames and host not in protected_names:
|
||||||
|
self.debug(
|
||||||
|
"Host {0} removed from role {1}".format(host, role)) # noqa
|
||||||
|
del self.yaml_config['all']['children'][role]['hosts'][host] # noqa
|
||||||
|
# purge from all
|
||||||
|
if self.yaml_config['all']['hosts']:
|
||||||
|
all_hosts = self.yaml_config['all']['hosts'].copy()
|
||||||
|
for host in all_hosts.keys():
|
||||||
if host not in hostnames and host not in protected_names:
|
if host not in hostnames and host not in protected_names:
|
||||||
self.debug("Host {0} removed from role {1}".format(host,
|
self.debug("Host {0} removed from role all".format(host))
|
||||||
role))
|
del self.yaml_config['all']['hosts'][host]
|
||||||
self.config.remove_option(role, host)
|
|
||||||
|
|
||||||
def add_host_to_group(self, group, host, opts=""):
|
def add_host_to_group(self, group, host, opts=""):
|
||||||
self.debug("adding host {0} to group {1}".format(host, group))
|
self.debug("adding host {0} to group {1}".format(host, group))
|
||||||
self.config.set(group, host, opts)
|
if group == 'all':
|
||||||
|
if self.yaml_config['all']['hosts'] is None:
|
||||||
|
self.yaml_config['all']['hosts'] = {host: None}
|
||||||
|
self.yaml_config['all']['hosts'][host] = opts
|
||||||
|
elif group != 'k8s-cluster:children':
|
||||||
|
if self.yaml_config['all']['children'][group]['hosts'] is None:
|
||||||
|
self.yaml_config['all']['children'][group]['hosts'] = {
|
||||||
|
host: None}
|
||||||
|
else:
|
||||||
|
self.yaml_config['all']['children'][group]['hosts'][host] = None # noqa
|
||||||
|
|
||||||
def set_kube_master(self, hosts):
|
def set_kube_master(self, hosts):
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
@@ -216,31 +278,31 @@ class KubesprayInventory(object):
|
|||||||
self.add_host_to_group('all', host, opts)
|
self.add_host_to_group('all', host, opts)
|
||||||
|
|
||||||
def set_k8s_cluster(self):
|
def set_k8s_cluster(self):
|
||||||
self.add_host_to_group('k8s-cluster:children', 'kube-node')
|
k8s_cluster = {'children': {'kube-master': None, 'kube-node': None}}
|
||||||
self.add_host_to_group('k8s-cluster:children', 'kube-master')
|
self.yaml_config['all']['children']['k8s-cluster'] = k8s_cluster
|
||||||
|
|
||||||
def set_calico_rr(self, hosts):
|
def set_calico_rr(self, hosts):
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
if host in self.config.items('kube-master'):
|
if host in self.yaml_config['all']['children']['kube-master']:
|
||||||
self.debug("Not adding {0} to calico-rr group because it "
|
self.debug("Not adding {0} to calico-rr group because it "
|
||||||
"conflicts with kube-master group".format(host))
|
"conflicts with kube-master group".format(host))
|
||||||
continue
|
continue
|
||||||
if host in self.config.items('kube-node'):
|
if host in self.yaml_config['all']['children']['kube-node']:
|
||||||
self.debug("Not adding {0} to calico-rr group because it "
|
self.debug("Not adding {0} to calico-rr group because it "
|
||||||
"conflicts with kube-node group".format(host))
|
"conflicts with kube-node group".format(host))
|
||||||
continue
|
continue
|
||||||
self.add_host_to_group('calico-rr', host)
|
self.add_host_to_group('calico-rr', host)
|
||||||
|
|
||||||
def set_kube_node(self, hosts):
|
def set_kube_node(self, hosts):
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
if len(self.config['all']) >= SCALE_THRESHOLD:
|
if len(self.yaml_config['all']['hosts']) >= SCALE_THRESHOLD:
|
||||||
if self.config.has_option('etcd', host):
|
if host in self.yaml_config['all']['children']['etcd']['hosts']: # noqa
|
||||||
self.debug("Not adding {0} to kube-node group because of "
|
self.debug("Not adding {0} to kube-node group because of "
|
||||||
"scale deployment and host is in etcd "
|
"scale deployment and host is in etcd "
|
||||||
"group.".format(host))
|
"group.".format(host))
|
||||||
continue
|
continue
|
||||||
if len(self.config['all']) >= MASSIVE_SCALE_THRESHOLD:
|
if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa
|
||||||
if self.config.has_option('kube-master', host):
|
if host in self.yaml_config['all']['children']['kube-master']['hosts']: # noqa
|
||||||
self.debug("Not adding {0} to kube-node group because of "
|
self.debug("Not adding {0} to kube-node group because of "
|
||||||
"scale deployment and host is in kube-master "
|
"scale deployment and host is in kube-master "
|
||||||
"group.".format(host))
|
"group.".format(host))
|
||||||
@@ -250,42 +312,31 @@ class KubesprayInventory(object):
|
|||||||
def set_etcd(self, hosts):
|
def set_etcd(self, hosts):
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
self.add_host_to_group('etcd', host)
|
self.add_host_to_group('etcd', host)
|
||||||
self.add_host_to_group('vault', host)
|
|
||||||
|
|
||||||
def load_file(self, files=None):
|
def load_file(self, files=None):
|
||||||
'''Directly loads JSON, or YAML file to inventory.'''
|
'''Directly loads JSON to inventory.'''
|
||||||
|
|
||||||
if not files:
|
if not files:
|
||||||
raise Exception("No input file specified.")
|
raise Exception("No input file specified.")
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import yaml
|
|
||||||
|
|
||||||
for filename in list(files):
|
for filename in list(files):
|
||||||
# Try JSON, then YAML
|
# Try JSON
|
||||||
try:
|
try:
|
||||||
with open(filename, 'r') as f:
|
with open(filename, 'r') as f:
|
||||||
data = json.load(f)
|
data = json.load(f)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
try:
|
raise Exception("Cannot read %s as JSON, or CSV", filename)
|
||||||
with open(filename, 'r') as f:
|
|
||||||
data = yaml.load(f)
|
|
||||||
print("yaml")
|
|
||||||
except ValueError:
|
|
||||||
raise Exception("Cannot read %s as JSON, YAML, or CSV",
|
|
||||||
filename)
|
|
||||||
|
|
||||||
self.ensure_required_groups(ROLES)
|
self.ensure_required_groups(ROLES)
|
||||||
self.set_k8s_cluster()
|
self.set_k8s_cluster()
|
||||||
for group, hosts in data.items():
|
for group, hosts in data.items():
|
||||||
self.ensure_required_groups([group])
|
self.ensure_required_groups([group])
|
||||||
for host, opts in hosts.items():
|
for host, opts in hosts.items():
|
||||||
optstring = "ansible_host={0} ip={0}".format(opts['ip'])
|
optstring = {'ansible_host': opts['ip'],
|
||||||
for key, val in opts.items():
|
'ip': opts['ip'],
|
||||||
if key == "ip":
|
'access_ip': opts['ip']}
|
||||||
continue
|
|
||||||
optstring += " {0}={1}".format(key, val)
|
|
||||||
|
|
||||||
self.add_host_to_group('all', host, optstring)
|
self.add_host_to_group('all', host, optstring)
|
||||||
self.add_host_to_group(group, host)
|
self.add_host_to_group(group, host)
|
||||||
self.write_config(self.config_file)
|
self.write_config(self.config_file)
|
||||||
@@ -313,24 +364,26 @@ print_ips - Write a space-delimited list of IPs from "all" group
|
|||||||
|
|
||||||
Advanced usage:
|
Advanced usage:
|
||||||
Add another host after initial creation: inventory.py 10.10.1.5
|
Add another host after initial creation: inventory.py 10.10.1.5
|
||||||
|
Add range of hosts: inventory.py 10.10.1.3-10.10.1.5
|
||||||
|
Add hosts with different ip and access ip: inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.10.3
|
||||||
Delete a host: inventory.py -10.10.1.3
|
Delete a host: inventory.py -10.10.1.3
|
||||||
Delete a host by id: inventory.py -node1
|
Delete a host by id: inventory.py -node1
|
||||||
|
|
||||||
Configurable env vars:
|
Configurable env vars:
|
||||||
DEBUG Enable debug printing. Default: True
|
DEBUG Enable debug printing. Default: True
|
||||||
CONFIG_FILE File to write config to Default: ./inventory/sample/hosts.ini
|
CONFIG_FILE File to write config to Default: ./inventory/sample/hosts.yaml
|
||||||
HOST_PREFIX Host prefix for generated hosts. Default: node
|
HOST_PREFIX Host prefix for generated hosts. Default: node
|
||||||
SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50
|
SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50
|
||||||
MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
|
MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
|
||||||
'''
|
''' # noqa
|
||||||
print(help_text)
|
print(help_text)
|
||||||
|
|
||||||
def print_config(self):
|
def print_config(self):
|
||||||
self.config.write(sys.stdout)
|
yaml.dump(self.yaml_config, sys.stdout)
|
||||||
|
|
||||||
def print_ips(self):
|
def print_ips(self):
|
||||||
ips = []
|
ips = []
|
||||||
for host, opts in self.config.items('all'):
|
for host, opts in self.yaml_config['all']['hosts'].items():
|
||||||
ips.append(self.get_ip_from_opts(opts))
|
ips.append(self.get_ip_from_opts(opts))
|
||||||
print(' '.join(ips))
|
print(' '.join(ips))
|
||||||
|
|
||||||
@@ -340,5 +393,6 @@ def main(argv=None):
|
|||||||
argv = sys.argv[1:]
|
argv = sys.argv[1:]
|
||||||
KubesprayInventory(argv, CONFIG_FILE)
|
KubesprayInventory(argv, CONFIG_FILE)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|||||||
@@ -1 +1,3 @@
|
|||||||
configparser>=3.3.0
|
configparser>=3.3.0
|
||||||
|
ruamel.yaml>=0.15.88
|
||||||
|
ipaddress
|
||||||
|
|||||||
@@ -34,7 +34,9 @@ class TestInventory(unittest.TestCase):
|
|||||||
self.inv = inventory.KubesprayInventory()
|
self.inv = inventory.KubesprayInventory()
|
||||||
|
|
||||||
def test_get_ip_from_opts(self):
|
def test_get_ip_from_opts(self):
|
||||||
optstring = "ansible_host=10.90.3.2 ip=10.90.3.2"
|
optstring = {'ansible_host': '10.90.3.2',
|
||||||
|
'ip': '10.90.3.2',
|
||||||
|
'access_ip': '10.90.3.2'}
|
||||||
expected = "10.90.3.2"
|
expected = "10.90.3.2"
|
||||||
result = self.inv.get_ip_from_opts(optstring)
|
result = self.inv.get_ip_from_opts(optstring)
|
||||||
self.assertEqual(expected, result)
|
self.assertEqual(expected, result)
|
||||||
@@ -48,7 +50,7 @@ class TestInventory(unittest.TestCase):
|
|||||||
groups = ['group1', 'group2']
|
groups = ['group1', 'group2']
|
||||||
self.inv.ensure_required_groups(groups)
|
self.inv.ensure_required_groups(groups)
|
||||||
for group in groups:
|
for group in groups:
|
||||||
self.assertTrue(group in self.inv.config.sections())
|
self.assertTrue(group in self.inv.yaml_config['all']['children'])
|
||||||
|
|
||||||
def test_get_host_id(self):
|
def test_get_host_id(self):
|
||||||
hostnames = ['node99', 'no99de01', '01node01', 'node1.domain',
|
hostnames = ['node99', 'no99de01', '01node01', 'node1.domain',
|
||||||
@@ -67,35 +69,49 @@ class TestInventory(unittest.TestCase):
|
|||||||
def test_build_hostnames_add_one(self):
|
def test_build_hostnames_add_one(self):
|
||||||
changed_hosts = ['10.90.0.2']
|
changed_hosts = ['10.90.0.2']
|
||||||
expected = OrderedDict([('node1',
|
expected = OrderedDict([('node1',
|
||||||
'ansible_host=10.90.0.2 ip=10.90.0.2')])
|
{'ansible_host': '10.90.0.2',
|
||||||
|
'ip': '10.90.0.2',
|
||||||
|
'access_ip': '10.90.0.2'})])
|
||||||
result = self.inv.build_hostnames(changed_hosts)
|
result = self.inv.build_hostnames(changed_hosts)
|
||||||
self.assertEqual(expected, result)
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
def test_build_hostnames_add_duplicate(self):
|
def test_build_hostnames_add_duplicate(self):
|
||||||
changed_hosts = ['10.90.0.2']
|
changed_hosts = ['10.90.0.2']
|
||||||
expected = OrderedDict([('node1',
|
expected = OrderedDict([('node1',
|
||||||
'ansible_host=10.90.0.2 ip=10.90.0.2')])
|
{'ansible_host': '10.90.0.2',
|
||||||
self.inv.config['all'] = expected
|
'ip': '10.90.0.2',
|
||||||
|
'access_ip': '10.90.0.2'})])
|
||||||
|
self.inv.yaml_config['all']['hosts'] = expected
|
||||||
result = self.inv.build_hostnames(changed_hosts)
|
result = self.inv.build_hostnames(changed_hosts)
|
||||||
self.assertEqual(expected, result)
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
def test_build_hostnames_add_two(self):
|
def test_build_hostnames_add_two(self):
|
||||||
changed_hosts = ['10.90.0.2', '10.90.0.3']
|
changed_hosts = ['10.90.0.2', '10.90.0.3']
|
||||||
expected = OrderedDict([
|
expected = OrderedDict([
|
||||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
('node1', {'ansible_host': '10.90.0.2',
|
||||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
'ip': '10.90.0.2',
|
||||||
self.inv.config['all'] = OrderedDict()
|
'access_ip': '10.90.0.2'}),
|
||||||
|
('node2', {'ansible_host': '10.90.0.3',
|
||||||
|
'ip': '10.90.0.3',
|
||||||
|
'access_ip': '10.90.0.3'})])
|
||||||
|
self.inv.yaml_config['all']['hosts'] = OrderedDict()
|
||||||
result = self.inv.build_hostnames(changed_hosts)
|
result = self.inv.build_hostnames(changed_hosts)
|
||||||
self.assertEqual(expected, result)
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
def test_build_hostnames_delete_first(self):
|
def test_build_hostnames_delete_first(self):
|
||||||
changed_hosts = ['-10.90.0.2']
|
changed_hosts = ['-10.90.0.2']
|
||||||
existing_hosts = OrderedDict([
|
existing_hosts = OrderedDict([
|
||||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
('node1', {'ansible_host': '10.90.0.2',
|
||||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
'ip': '10.90.0.2',
|
||||||
self.inv.config['all'] = existing_hosts
|
'access_ip': '10.90.0.2'}),
|
||||||
|
('node2', {'ansible_host': '10.90.0.3',
|
||||||
|
'ip': '10.90.0.3',
|
||||||
|
'access_ip': '10.90.0.3'})])
|
||||||
|
self.inv.yaml_config['all']['hosts'] = existing_hosts
|
||||||
expected = OrderedDict([
|
expected = OrderedDict([
|
||||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
('node2', {'ansible_host': '10.90.0.3',
|
||||||
|
'ip': '10.90.0.3',
|
||||||
|
'access_ip': '10.90.0.3'})])
|
||||||
result = self.inv.build_hostnames(changed_hosts)
|
result = self.inv.build_hostnames(changed_hosts)
|
||||||
self.assertEqual(expected, result)
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
@@ -103,8 +119,12 @@ class TestInventory(unittest.TestCase):
|
|||||||
hostname = 'node1'
|
hostname = 'node1'
|
||||||
expected = True
|
expected = True
|
||||||
existing_hosts = OrderedDict([
|
existing_hosts = OrderedDict([
|
||||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
('node1', {'ansible_host': '10.90.0.2',
|
||||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
'ip': '10.90.0.2',
|
||||||
|
'access_ip': '10.90.0.2'}),
|
||||||
|
('node2', {'ansible_host': '10.90.0.3',
|
||||||
|
'ip': '10.90.0.3',
|
||||||
|
'access_ip': '10.90.0.3'})])
|
||||||
result = self.inv.exists_hostname(existing_hosts, hostname)
|
result = self.inv.exists_hostname(existing_hosts, hostname)
|
||||||
self.assertEqual(expected, result)
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
@@ -112,8 +132,12 @@ class TestInventory(unittest.TestCase):
|
|||||||
hostname = 'node99'
|
hostname = 'node99'
|
||||||
expected = False
|
expected = False
|
||||||
existing_hosts = OrderedDict([
|
existing_hosts = OrderedDict([
|
||||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
('node1', {'ansible_host': '10.90.0.2',
|
||||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
'ip': '10.90.0.2',
|
||||||
|
'access_ip': '10.90.0.2'}),
|
||||||
|
('node2', {'ansible_host': '10.90.0.3',
|
||||||
|
'ip': '10.90.0.3',
|
||||||
|
'access_ip': '10.90.0.3'})])
|
||||||
result = self.inv.exists_hostname(existing_hosts, hostname)
|
result = self.inv.exists_hostname(existing_hosts, hostname)
|
||||||
self.assertEqual(expected, result)
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
@@ -121,8 +145,12 @@ class TestInventory(unittest.TestCase):
|
|||||||
ip = '10.90.0.2'
|
ip = '10.90.0.2'
|
||||||
expected = True
|
expected = True
|
||||||
existing_hosts = OrderedDict([
|
existing_hosts = OrderedDict([
|
||||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
('node1', {'ansible_host': '10.90.0.2',
|
||||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
'ip': '10.90.0.2',
|
||||||
|
'access_ip': '10.90.0.2'}),
|
||||||
|
('node2', {'ansible_host': '10.90.0.3',
|
||||||
|
'ip': '10.90.0.3',
|
||||||
|
'access_ip': '10.90.0.3'})])
|
||||||
result = self.inv.exists_ip(existing_hosts, ip)
|
result = self.inv.exists_ip(existing_hosts, ip)
|
||||||
self.assertEqual(expected, result)
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
@@ -130,26 +158,40 @@ class TestInventory(unittest.TestCase):
|
|||||||
ip = '10.90.0.200'
|
ip = '10.90.0.200'
|
||||||
expected = False
|
expected = False
|
||||||
existing_hosts = OrderedDict([
|
existing_hosts = OrderedDict([
|
||||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
('node1', {'ansible_host': '10.90.0.2',
|
||||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
'ip': '10.90.0.2',
|
||||||
|
'access_ip': '10.90.0.2'}),
|
||||||
|
('node2', {'ansible_host': '10.90.0.3',
|
||||||
|
'ip': '10.90.0.3',
|
||||||
|
'access_ip': '10.90.0.3'})])
|
||||||
result = self.inv.exists_ip(existing_hosts, ip)
|
result = self.inv.exists_ip(existing_hosts, ip)
|
||||||
self.assertEqual(expected, result)
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
def test_delete_host_by_ip_positive(self):
|
def test_delete_host_by_ip_positive(self):
|
||||||
ip = '10.90.0.2'
|
ip = '10.90.0.2'
|
||||||
expected = OrderedDict([
|
expected = OrderedDict([
|
||||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
('node2', {'ansible_host': '10.90.0.3',
|
||||||
|
'ip': '10.90.0.3',
|
||||||
|
'access_ip': '10.90.0.3'})])
|
||||||
existing_hosts = OrderedDict([
|
existing_hosts = OrderedDict([
|
||||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
('node1', {'ansible_host': '10.90.0.2',
|
||||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
'ip': '10.90.0.2',
|
||||||
|
'access_ip': '10.90.0.2'}),
|
||||||
|
('node2', {'ansible_host': '10.90.0.3',
|
||||||
|
'ip': '10.90.0.3',
|
||||||
|
'access_ip': '10.90.0.3'})])
|
||||||
self.inv.delete_host_by_ip(existing_hosts, ip)
|
self.inv.delete_host_by_ip(existing_hosts, ip)
|
||||||
self.assertEqual(expected, existing_hosts)
|
self.assertEqual(expected, existing_hosts)
|
||||||
|
|
||||||
def test_delete_host_by_ip_negative(self):
|
def test_delete_host_by_ip_negative(self):
|
||||||
ip = '10.90.0.200'
|
ip = '10.90.0.200'
|
||||||
existing_hosts = OrderedDict([
|
existing_hosts = OrderedDict([
|
||||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
('node1', {'ansible_host': '10.90.0.2',
|
||||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
'ip': '10.90.0.2',
|
||||||
|
'access_ip': '10.90.0.2'}),
|
||||||
|
('node2', {'ansible_host': '10.90.0.3',
|
||||||
|
'ip': '10.90.0.3',
|
||||||
|
'access_ip': '10.90.0.3'})])
|
||||||
self.assertRaisesRegexp(ValueError, "Unable to find host",
|
self.assertRaisesRegexp(ValueError, "Unable to find host",
|
||||||
self.inv.delete_host_by_ip, existing_hosts, ip)
|
self.inv.delete_host_by_ip, existing_hosts, ip)
|
||||||
|
|
||||||
@@ -157,59 +199,71 @@ class TestInventory(unittest.TestCase):
|
|||||||
proper_hostnames = ['node1', 'node2']
|
proper_hostnames = ['node1', 'node2']
|
||||||
bad_host = 'doesnotbelong2'
|
bad_host = 'doesnotbelong2'
|
||||||
existing_hosts = OrderedDict([
|
existing_hosts = OrderedDict([
|
||||||
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
('node1', {'ansible_host': '10.90.0.2',
|
||||||
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3'),
|
'ip': '10.90.0.2',
|
||||||
('doesnotbelong2', 'whateveropts=ilike')])
|
'access_ip': '10.90.0.2'}),
|
||||||
self.inv.config['all'] = existing_hosts
|
('node2', {'ansible_host': '10.90.0.3',
|
||||||
|
'ip': '10.90.0.3',
|
||||||
|
'access_ip': '10.90.0.3'}),
|
||||||
|
('doesnotbelong2', {'whateveropts=ilike'})])
|
||||||
|
self.inv.yaml_config['all']['hosts'] = existing_hosts
|
||||||
self.inv.purge_invalid_hosts(proper_hostnames)
|
self.inv.purge_invalid_hosts(proper_hostnames)
|
||||||
self.assertTrue(bad_host not in self.inv.config['all'].keys())
|
self.assertTrue(
|
||||||
|
bad_host not in self.inv.yaml_config['all']['hosts'].keys())
|
||||||
|
|
||||||
def test_add_host_to_group(self):
|
def test_add_host_to_group(self):
|
||||||
group = 'etcd'
|
group = 'etcd'
|
||||||
host = 'node1'
|
host = 'node1'
|
||||||
opts = 'ip=10.90.0.2'
|
opts = {'ip': '10.90.0.2'}
|
||||||
|
|
||||||
self.inv.add_host_to_group(group, host, opts)
|
self.inv.add_host_to_group(group, host, opts)
|
||||||
self.assertEqual(self.inv.config[group].get(host), opts)
|
self.assertEqual(
|
||||||
|
self.inv.yaml_config['all']['children'][group]['hosts'].get(host),
|
||||||
|
None)
|
||||||
|
|
||||||
def test_set_kube_master(self):
|
def test_set_kube_master(self):
|
||||||
group = 'kube-master'
|
group = 'kube-master'
|
||||||
host = 'node1'
|
host = 'node1'
|
||||||
|
|
||||||
self.inv.set_kube_master([host])
|
self.inv.set_kube_master([host])
|
||||||
self.assertTrue(host in self.inv.config[group])
|
self.assertTrue(
|
||||||
|
host in self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||||
|
|
||||||
def test_set_all(self):
|
def test_set_all(self):
|
||||||
group = 'all'
|
|
||||||
hosts = OrderedDict([
|
hosts = OrderedDict([
|
||||||
('node1', 'opt1'),
|
('node1', 'opt1'),
|
||||||
('node2', 'opt2')])
|
('node2', 'opt2')])
|
||||||
|
|
||||||
self.inv.set_all(hosts)
|
self.inv.set_all(hosts)
|
||||||
for host, opt in hosts.items():
|
for host, opt in hosts.items():
|
||||||
self.assertEqual(self.inv.config[group].get(host), opt)
|
self.assertEqual(
|
||||||
|
self.inv.yaml_config['all']['hosts'].get(host), opt)
|
||||||
|
|
||||||
def test_set_k8s_cluster(self):
|
def test_set_k8s_cluster(self):
|
||||||
group = 'k8s-cluster:children'
|
group = 'k8s-cluster'
|
||||||
expected_hosts = ['kube-node', 'kube-master']
|
expected_hosts = ['kube-node', 'kube-master']
|
||||||
|
|
||||||
self.inv.set_k8s_cluster()
|
self.inv.set_k8s_cluster()
|
||||||
for host in expected_hosts:
|
for host in expected_hosts:
|
||||||
self.assertTrue(host in self.inv.config[group])
|
self.assertTrue(
|
||||||
|
host in
|
||||||
|
self.inv.yaml_config['all']['children'][group]['children'])
|
||||||
|
|
||||||
def test_set_kube_node(self):
|
def test_set_kube_node(self):
|
||||||
group = 'kube-node'
|
group = 'kube-node'
|
||||||
host = 'node1'
|
host = 'node1'
|
||||||
|
|
||||||
self.inv.set_kube_node([host])
|
self.inv.set_kube_node([host])
|
||||||
self.assertTrue(host in self.inv.config[group])
|
self.assertTrue(
|
||||||
|
host in self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||||
|
|
||||||
def test_set_etcd(self):
|
def test_set_etcd(self):
|
||||||
group = 'etcd'
|
group = 'etcd'
|
||||||
host = 'node1'
|
host = 'node1'
|
||||||
|
|
||||||
self.inv.set_etcd([host])
|
self.inv.set_etcd([host])
|
||||||
self.assertTrue(host in self.inv.config[group])
|
self.assertTrue(
|
||||||
|
host in self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||||
|
|
||||||
def test_scale_scenario_one(self):
|
def test_scale_scenario_one(self):
|
||||||
num_nodes = 50
|
num_nodes = 50
|
||||||
@@ -219,11 +273,13 @@ class TestInventory(unittest.TestCase):
|
|||||||
hosts["node" + str(hostid)] = ""
|
hosts["node" + str(hostid)] = ""
|
||||||
|
|
||||||
self.inv.set_all(hosts)
|
self.inv.set_all(hosts)
|
||||||
self.inv.set_etcd(hosts.keys()[0:3])
|
self.inv.set_etcd(list(hosts.keys())[0:3])
|
||||||
self.inv.set_kube_master(hosts.keys()[0:2])
|
self.inv.set_kube_master(list(hosts.keys())[0:2])
|
||||||
self.inv.set_kube_node(hosts.keys())
|
self.inv.set_kube_node(hosts.keys())
|
||||||
for h in range(3):
|
for h in range(3):
|
||||||
self.assertFalse(hosts.keys()[h] in self.inv.config['kube-node'])
|
self.assertFalse(
|
||||||
|
list(hosts.keys())[h] in
|
||||||
|
self.inv.yaml_config['all']['children']['kube-node']['hosts'])
|
||||||
|
|
||||||
def test_scale_scenario_two(self):
|
def test_scale_scenario_two(self):
|
||||||
num_nodes = 500
|
num_nodes = 500
|
||||||
@@ -233,8 +289,57 @@ class TestInventory(unittest.TestCase):
|
|||||||
hosts["node" + str(hostid)] = ""
|
hosts["node" + str(hostid)] = ""
|
||||||
|
|
||||||
self.inv.set_all(hosts)
|
self.inv.set_all(hosts)
|
||||||
self.inv.set_etcd(hosts.keys()[0:3])
|
self.inv.set_etcd(list(hosts.keys())[0:3])
|
||||||
self.inv.set_kube_master(hosts.keys()[3:5])
|
self.inv.set_kube_master(list(hosts.keys())[3:5])
|
||||||
self.inv.set_kube_node(hosts.keys())
|
self.inv.set_kube_node(hosts.keys())
|
||||||
for h in range(5):
|
for h in range(5):
|
||||||
self.assertFalse(hosts.keys()[h] in self.inv.config['kube-node'])
|
self.assertFalse(
|
||||||
|
list(hosts.keys())[h] in
|
||||||
|
self.inv.yaml_config['all']['children']['kube-node']['hosts'])
|
||||||
|
|
||||||
|
def test_range2ips_range(self):
|
||||||
|
changed_hosts = ['10.90.0.2', '10.90.0.4-10.90.0.6', '10.90.0.8']
|
||||||
|
expected = ['10.90.0.2',
|
||||||
|
'10.90.0.4',
|
||||||
|
'10.90.0.5',
|
||||||
|
'10.90.0.6',
|
||||||
|
'10.90.0.8']
|
||||||
|
result = self.inv.range2ips(changed_hosts)
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
|
def test_range2ips_incorrect_range(self):
|
||||||
|
host_range = ['10.90.0.4-a.9b.c.e']
|
||||||
|
self.assertRaisesRegexp(Exception, "Range of ip_addresses isn't valid",
|
||||||
|
self.inv.range2ips, host_range)
|
||||||
|
|
||||||
|
def test_build_hostnames_different_ips_add_one(self):
|
||||||
|
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||||
|
expected = OrderedDict([('node1',
|
||||||
|
{'ansible_host': '192.168.0.2',
|
||||||
|
'ip': '10.90.0.2',
|
||||||
|
'access_ip': '192.168.0.2'})])
|
||||||
|
result = self.inv.build_hostnames(changed_hosts)
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
|
def test_build_hostnames_different_ips_add_duplicate(self):
|
||||||
|
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||||
|
expected = OrderedDict([('node1',
|
||||||
|
{'ansible_host': '192.168.0.2',
|
||||||
|
'ip': '10.90.0.2',
|
||||||
|
'access_ip': '192.168.0.2'})])
|
||||||
|
self.inv.yaml_config['all']['hosts'] = expected
|
||||||
|
result = self.inv.build_hostnames(changed_hosts)
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
|
def test_build_hostnames_different_ips_add_two(self):
|
||||||
|
changed_hosts = ['10.90.0.2,192.168.0.2', '10.90.0.3,192.168.0.3']
|
||||||
|
expected = OrderedDict([
|
||||||
|
('node1', {'ansible_host': '192.168.0.2',
|
||||||
|
'ip': '10.90.0.2',
|
||||||
|
'access_ip': '192.168.0.2'}),
|
||||||
|
('node2', {'ansible_host': '192.168.0.3',
|
||||||
|
'ip': '10.90.0.3',
|
||||||
|
'access_ip': '192.168.0.3'})])
|
||||||
|
self.inv.yaml_config['all']['hosts'] = OrderedDict()
|
||||||
|
result = self.inv.build_hostnames(changed_hosts)
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|||||||
@@ -1,15 +1,9 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
- name: Upgrade all packages to the latest version (yum)
|
|
||||||
yum:
|
|
||||||
name: '*'
|
|
||||||
state: latest
|
|
||||||
when: ansible_os_family == "RedHat"
|
|
||||||
|
|
||||||
- name: Install required packages
|
- name: Install required packages
|
||||||
yum:
|
yum:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: latest
|
state: present
|
||||||
with_items:
|
with_items:
|
||||||
- bind-utils
|
- bind-utils
|
||||||
- ntp
|
- ntp
|
||||||
@@ -21,23 +15,13 @@
|
|||||||
update_cache: yes
|
update_cache: yes
|
||||||
cache_valid_time: 3600
|
cache_valid_time: 3600
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: latest
|
state: present
|
||||||
install_recommends: no
|
install_recommends: no
|
||||||
with_items:
|
with_items:
|
||||||
- dnsutils
|
- dnsutils
|
||||||
- ntp
|
- ntp
|
||||||
when: ansible_os_family == "Debian"
|
when: ansible_os_family == "Debian"
|
||||||
|
|
||||||
- name: Upgrade all packages to the latest version (apt)
|
|
||||||
shell: apt-get -o \
|
|
||||||
Dpkg::Options::=--force-confdef -o \
|
|
||||||
Dpkg::Options::=--force-confold -q -y \
|
|
||||||
dist-upgrade
|
|
||||||
environment:
|
|
||||||
DEBIAN_FRONTEND: noninteractive
|
|
||||||
when: ansible_os_family == "Debian"
|
|
||||||
|
|
||||||
|
|
||||||
# Create deployment user if required
|
# Create deployment user if required
|
||||||
- include: user.yml
|
- include: user.yml
|
||||||
when: k8s_deployment_user is defined
|
when: k8s_deployment_user is defined
|
||||||
|
|||||||
@@ -2,9 +2,11 @@
|
|||||||
```
|
```
|
||||||
MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation. In short, it allows you to create Kubernetes services of type “LoadBalancer” in clusters that don’t run on a cloud provider, and thus cannot simply hook into paid products to provide load-balancers.
|
MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation. In short, it allows you to create Kubernetes services of type “LoadBalancer” in clusters that don’t run on a cloud provider, and thus cannot simply hook into paid products to provide load-balancers.
|
||||||
```
|
```
|
||||||
This playbook aims to automate [this](https://metallb.universe.tf/tutorial/layer2/tutorial). It deploys MetalLB into kubernetes and sets up a layer 2 loadbalancer.
|
This playbook aims to automate [this](https://metallb.universe.tf/concepts/layer2/). It deploys MetalLB into kubernetes and sets up a layer 2 loadbalancer.
|
||||||
|
|
||||||
## Install
|
## Install
|
||||||
```
|
```
|
||||||
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/metallb/metallb.yml
|
Defaults can be found in contrib/metallb/roles/provision/defaults/main.yml. You can override the defaults by copying the contents of this file to somewhere in inventory/mycluster/group_vars such as inventory/mycluster/groups_vars/k8s-cluster/addons.yml and making any adjustments as required.
|
||||||
|
|
||||||
|
ansible-playbook --ask-become -i inventory/sample/hosts.ini contrib/metallb/metallb.yml
|
||||||
```
|
```
|
||||||
|
|||||||
1
contrib/metallb/library
Symbolic link
1
contrib/metallb/library
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../library
|
||||||
@@ -1,7 +1,14 @@
|
|||||||
---
|
---
|
||||||
metallb:
|
metallb:
|
||||||
ip_range: "10.5.0.50-10.5.0.99"
|
ip_range: "10.5.0.50-10.5.0.99"
|
||||||
|
protocol: "layer2"
|
||||||
|
# additional_address_pools:
|
||||||
|
# kube_service_pool:
|
||||||
|
# ip_range: "10.5.1.50-10.5.1.99"
|
||||||
|
# protocol: "layer2"
|
||||||
|
# auto_assign: false
|
||||||
limits:
|
limits:
|
||||||
cpu: "100m"
|
cpu: "100m"
|
||||||
memory: "100Mi"
|
memory: "100Mi"
|
||||||
port: "7472"
|
port: "7472"
|
||||||
|
version: v0.7.3
|
||||||
|
|||||||
@@ -9,9 +9,10 @@
|
|||||||
- name: "Kubernetes Apps | Install and configure MetalLB"
|
- name: "Kubernetes Apps | Install and configure MetalLB"
|
||||||
kube:
|
kube:
|
||||||
name: "MetalLB"
|
name: "MetalLB"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/{{ item.item }}"
|
filename: "{{ kube_config_dir }}/{{ item.item }}"
|
||||||
state: "{{ item.changed | ternary('latest','present') }}"
|
state: "{{ item.changed | ternary('latest','present') }}"
|
||||||
|
become: true
|
||||||
with_items: "{{ rendering.results }}"
|
with_items: "{{ rendering.results }}"
|
||||||
when:
|
when:
|
||||||
- "inventory_hostname == groups['kube-master'][0]"
|
- "inventory_hostname == groups['kube-master'][0]"
|
||||||
|
|||||||
@@ -8,6 +8,14 @@ data:
|
|||||||
config: |
|
config: |
|
||||||
address-pools:
|
address-pools:
|
||||||
- name: loadbalanced
|
- name: loadbalanced
|
||||||
protocol: layer2
|
protocol: {{ metallb.protocol }}
|
||||||
addresses:
|
addresses:
|
||||||
- {{ metallb.ip_range }}
|
- {{ metallb.ip_range }}
|
||||||
|
{% if metallb.additional_address_pools is defined %}{% for pool in metallb.additional_address_pools %}
|
||||||
|
- name: {{ pool }}
|
||||||
|
protocol: {{ metallb.additional_address_pools[pool].protocol }}
|
||||||
|
addresses:
|
||||||
|
- {{ metallb.additional_address_pools[pool].ip_range }}
|
||||||
|
auto-assign: {{ metallb.additional_address_pools[pool].auto_assign }}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
|||||||
@@ -53,22 +53,6 @@ rules:
|
|||||||
---
|
---
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: Role
|
kind: Role
|
||||||
metadata:
|
|
||||||
namespace: metallb-system
|
|
||||||
name: leader-election
|
|
||||||
labels:
|
|
||||||
app: metallb
|
|
||||||
rules:
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["endpoints"]
|
|
||||||
resourceNames: ["metallb-speaker"]
|
|
||||||
verbs: ["get", "update"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["endpoints"]
|
|
||||||
verbs: ["create"]
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: Role
|
|
||||||
metadata:
|
metadata:
|
||||||
namespace: metallb-system
|
namespace: metallb-system
|
||||||
name: config-watcher
|
name: config-watcher
|
||||||
@@ -131,21 +115,6 @@ roleRef:
|
|||||||
kind: Role
|
kind: Role
|
||||||
name: config-watcher
|
name: config-watcher
|
||||||
---
|
---
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: RoleBinding
|
|
||||||
metadata:
|
|
||||||
namespace: metallb-system
|
|
||||||
name: leader-election
|
|
||||||
labels:
|
|
||||||
app: metallb
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: speaker
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: Role
|
|
||||||
name: leader-election
|
|
||||||
---
|
|
||||||
apiVersion: apps/v1beta2
|
apiVersion: apps/v1beta2
|
||||||
kind: DaemonSet
|
kind: DaemonSet
|
||||||
metadata:
|
metadata:
|
||||||
@@ -173,7 +142,7 @@ spec:
|
|||||||
hostNetwork: true
|
hostNetwork: true
|
||||||
containers:
|
containers:
|
||||||
- name: speaker
|
- name: speaker
|
||||||
image: metallb/speaker:v0.6.2
|
image: metallb/speaker:{{ metallb.version }}
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
args:
|
args:
|
||||||
- --port={{ metallb.port }}
|
- --port={{ metallb.port }}
|
||||||
@@ -230,7 +199,7 @@ spec:
|
|||||||
runAsUser: 65534 # nobody
|
runAsUser: 65534 # nobody
|
||||||
containers:
|
containers:
|
||||||
- name: controller
|
- name: controller
|
||||||
image: metallb/controller:v0.6.2
|
image: metallb/controller:{{ metallb.version }}
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
args:
|
args:
|
||||||
- --port={{ metallb.port }}
|
- --port={{ metallb.port }}
|
||||||
@@ -250,5 +219,3 @@ spec:
|
|||||||
readOnlyRootFilesystem: true
|
readOnlyRootFilesystem: true
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,14 +1,15 @@
|
|||||||
---
|
---
|
||||||
kind: ClusterRoleBinding
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
metadata:
|
metadata:
|
||||||
name: dnsmasq
|
name: kubernetes-dashboard
|
||||||
namespace: "kube-system"
|
labels:
|
||||||
subjects:
|
k8s-app: kubernetes-dashboard
|
||||||
- kind: ServiceAccount
|
|
||||||
name: dnsmasq
|
|
||||||
namespace: "kube-system"
|
|
||||||
roleRef:
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
name: cluster-admin
|
name: cluster-admin
|
||||||
apiGroup: rbac.authorization.k8s.io
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: kubernetes-dashboard
|
||||||
|
namespace: kube-system
|
||||||
@@ -4,7 +4,7 @@
|
|||||||
vars:
|
vars:
|
||||||
ansible_ssh_pipelining: false
|
ansible_ssh_pipelining: false
|
||||||
roles:
|
roles:
|
||||||
- { role: bootstrap-os, tags: bootstrap-os}
|
- { role: bootstrap-os, tags: bootstrap-os}
|
||||||
|
|
||||||
- hosts: all
|
- hosts: all
|
||||||
gather_facts: true
|
gather_facts: true
|
||||||
@@ -22,4 +22,3 @@
|
|||||||
- hosts: kube-master[0]
|
- hosts: kube-master[0]
|
||||||
roles:
|
roles:
|
||||||
- { role: kubernetes-pv }
|
- { role: kubernetes-pv }
|
||||||
|
|
||||||
|
|||||||
@@ -22,9 +22,9 @@ galaxy_info:
|
|||||||
- wheezy
|
- wheezy
|
||||||
- jessie
|
- jessie
|
||||||
galaxy_tags:
|
galaxy_tags:
|
||||||
- system
|
- system
|
||||||
- networking
|
- networking
|
||||||
- cloud
|
- cloud
|
||||||
- clustering
|
- clustering
|
||||||
- files
|
- files
|
||||||
- sharing
|
- sharing
|
||||||
|
|||||||
@@ -12,5 +12,5 @@
|
|||||||
- name: Ensure Gluster mount directories exist.
|
- name: Ensure Gluster mount directories exist.
|
||||||
file: "path={{ item }} state=directory mode=0775"
|
file: "path={{ item }} state=directory mode=0775"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ gluster_mount_dir }}"
|
- "{{ gluster_mount_dir }}"
|
||||||
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
|
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
|
||||||
|
|||||||
@@ -22,9 +22,9 @@ galaxy_info:
|
|||||||
- wheezy
|
- wheezy
|
||||||
- jessie
|
- jessie
|
||||||
galaxy_tags:
|
galaxy_tags:
|
||||||
- system
|
- system
|
||||||
- networking
|
- networking
|
||||||
- cloud
|
- cloud
|
||||||
- clustering
|
- clustering
|
||||||
- files
|
- files
|
||||||
- sharing
|
- sharing
|
||||||
|
|||||||
@@ -33,24 +33,24 @@
|
|||||||
- name: Ensure Gluster brick and mount directories exist.
|
- name: Ensure Gluster brick and mount directories exist.
|
||||||
file: "path={{ item }} state=directory mode=0775"
|
file: "path={{ item }} state=directory mode=0775"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ gluster_brick_dir }}"
|
- "{{ gluster_brick_dir }}"
|
||||||
- "{{ gluster_mount_dir }}"
|
- "{{ gluster_mount_dir }}"
|
||||||
|
|
||||||
- name: Configure Gluster volume.
|
- name: Configure Gluster volume.
|
||||||
gluster_volume:
|
gluster_volume:
|
||||||
state: present
|
state: present
|
||||||
name: "{{ gluster_brick_name }}"
|
name: "{{ gluster_brick_name }}"
|
||||||
brick: "{{ gluster_brick_dir }}"
|
brick: "{{ gluster_brick_dir }}"
|
||||||
replicas: "{{ groups['gfs-cluster'] | length }}"
|
replicas: "{{ groups['gfs-cluster'] | length }}"
|
||||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||||
host: "{{ inventory_hostname }}"
|
host: "{{ inventory_hostname }}"
|
||||||
force: yes
|
force: yes
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- name: Mount glusterfs to retrieve disk size
|
- name: Mount glusterfs to retrieve disk size
|
||||||
mount:
|
mount:
|
||||||
name: "{{ gluster_mount_dir }}"
|
name: "{{ gluster_mount_dir }}"
|
||||||
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
||||||
fstype: glusterfs
|
fstype: glusterfs
|
||||||
opts: "defaults,_netdev"
|
opts: "defaults,_netdev"
|
||||||
state: mounted
|
state: mounted
|
||||||
@@ -63,13 +63,13 @@
|
|||||||
|
|
||||||
- name: Set Gluster disk size to variable
|
- name: Set Gluster disk size to variable
|
||||||
set_fact:
|
set_fact:
|
||||||
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024*1024*1024)) | int }}"
|
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024*1024*1024)) | int }}"
|
||||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
- name: Create file on GlusterFS
|
- name: Create file on GlusterFS
|
||||||
template:
|
template:
|
||||||
dest: "{{ gluster_mount_dir }}/.test-file.txt"
|
dest: "{{ gluster_mount_dir }}/.test-file.txt"
|
||||||
src: test-file.txt
|
src: test-file.txt
|
||||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
- name: Unmount glusterfs
|
- name: Unmount glusterfs
|
||||||
@@ -79,4 +79,3 @@
|
|||||||
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
||||||
state: unmounted
|
state: unmounted
|
||||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
|
|||||||
@@ -1,10 +1,12 @@
|
|||||||
---
|
---
|
||||||
- name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV
|
- name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV
|
||||||
template: src={{item.file}} dest={{kube_config_dir}}/{{item.dest}}
|
template:
|
||||||
|
src: "{{ item.file }}"
|
||||||
|
dest: "{{ kube_config_dir }}/{{ item.dest }}"
|
||||||
with_items:
|
with_items:
|
||||||
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||||
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
|
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
|
||||||
register: gluster_pv
|
register: gluster_pv
|
||||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||||
|
|
||||||
@@ -12,9 +14,9 @@
|
|||||||
kube:
|
kube:
|
||||||
name: glusterfs
|
name: glusterfs
|
||||||
namespace: default
|
namespace: default
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{ item.item.type }}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.dest}}"
|
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
||||||
state: "{{item.changed | ternary('latest','present') }}"
|
state: "{{ item.changed | ternary('latest','present') }}"
|
||||||
with_items: "{{ gluster_pv.results }}"
|
with_items: "{{ gluster_pv.results }}"
|
||||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined
|
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined
|
||||||
|
|||||||
@@ -1,2 +1,3 @@
|
|||||||
|
---
|
||||||
dependencies:
|
dependencies:
|
||||||
- {role: kubernetes-pv/ansible, tags: apps}
|
- {role: kubernetes-pv/ansible, tags: apps}
|
||||||
|
|||||||
@@ -14,3 +14,5 @@ ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contr
|
|||||||
```
|
```
|
||||||
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml
|
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Add `--extra-vars "heketi_remove_lvm=true"` to the command above to remove LVM packages from the system
|
||||||
|
|||||||
@@ -2,23 +2,23 @@
|
|||||||
- name: "Load lvm kernel modules"
|
- name: "Load lvm kernel modules"
|
||||||
become: true
|
become: true
|
||||||
with_items:
|
with_items:
|
||||||
- "dm_snapshot"
|
- "dm_snapshot"
|
||||||
- "dm_mirror"
|
- "dm_mirror"
|
||||||
- "dm_thin_pool"
|
- "dm_thin_pool"
|
||||||
modprobe:
|
modprobe:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: "present"
|
state: "present"
|
||||||
|
|
||||||
- name: "Install glusterfs mount utils (RedHat)"
|
- name: "Install glusterfs mount utils (RedHat)"
|
||||||
become: true
|
become: true
|
||||||
yum:
|
yum:
|
||||||
name: "glusterfs-fuse"
|
name: "glusterfs-fuse"
|
||||||
state: "present"
|
state: "present"
|
||||||
when: "ansible_os_family == 'RedHat'"
|
when: "ansible_os_family == 'RedHat'"
|
||||||
|
|
||||||
- name: "Install glusterfs mount utils (Debian)"
|
- name: "Install glusterfs mount utils (Debian)"
|
||||||
become: true
|
become: true
|
||||||
apt:
|
apt:
|
||||||
name: "glusterfs-client"
|
name: "glusterfs-client"
|
||||||
state: "present"
|
state: "present"
|
||||||
when: "ansible_os_family == 'Debian'"
|
when: "ansible_os_family == 'Debian'"
|
||||||
|
|||||||
@@ -1,8 +1,10 @@
|
|||||||
|
---
|
||||||
# Bootstrap heketi
|
# Bootstrap heketi
|
||||||
- name: "Get state of heketi service, deployment and pods."
|
- name: "Get state of heketi service, deployment and pods."
|
||||||
register: "initial_heketi_state"
|
register: "initial_heketi_state"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
||||||
|
|
||||||
- name: "Bootstrap heketi."
|
- name: "Bootstrap heketi."
|
||||||
when:
|
when:
|
||||||
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Service']\"))|length == 0"
|
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Service']\"))|length == 0"
|
||||||
@@ -15,15 +17,20 @@
|
|||||||
register: "initial_heketi_pod"
|
register: "initial_heketi_pod"
|
||||||
command: "{{ bin_dir }}/kubectl get pods --selector=deploy-heketi=pod,glusterfs=heketi-pod,name=deploy-heketi --output=json"
|
command: "{{ bin_dir }}/kubectl get pods --selector=deploy-heketi=pod,glusterfs=heketi-pod,name=deploy-heketi --output=json"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: "Ensure heketi bootstrap pod is up."
|
- name: "Ensure heketi bootstrap pod is up."
|
||||||
assert:
|
assert:
|
||||||
that: "(initial_heketi_pod.stdout|from_json|json_query('items[*]'))|length == 1"
|
that: "(initial_heketi_pod.stdout|from_json|json_query('items[*]'))|length == 1"
|
||||||
- set_fact:
|
|
||||||
|
- name: Store the initial heketi pod name
|
||||||
|
set_fact:
|
||||||
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout|from_json|json_query(\"items[*].metadata.name|[0]\") }}"
|
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout|from_json|json_query(\"items[*].metadata.name|[0]\") }}"
|
||||||
|
|
||||||
- name: "Test heketi topology."
|
- name: "Test heketi topology."
|
||||||
changed_when: false
|
changed_when: false
|
||||||
register: "heketi_topology"
|
register: "heketi_topology"
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||||
|
|
||||||
- name: "Load heketi topology."
|
- name: "Load heketi topology."
|
||||||
when: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*]\")|flatten|length == 0"
|
when: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*]\")|flatten|length == 0"
|
||||||
include_tasks: "bootstrap/topology.yml"
|
include_tasks: "bootstrap/topology.yml"
|
||||||
@@ -41,6 +48,7 @@
|
|||||||
command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json"
|
command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
register: "heketi_storage_state"
|
register: "heketi_storage_state"
|
||||||
|
|
||||||
# ensure endpoints actually exist before trying to move database data to it
|
# ensure endpoints actually exist before trying to move database data to it
|
||||||
- name: "Create heketi storage."
|
- name: "Create heketi storage."
|
||||||
include_tasks: "bootstrap/storage.yml"
|
include_tasks: "bootstrap/storage.yml"
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
|
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
|
||||||
kube:
|
kube:
|
||||||
name: "GlusterFS"
|
name: "GlusterFS"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/heketi-bootstrap.json"
|
filename: "{{ kube_config_dir }}/heketi-bootstrap.json"
|
||||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
- name: "Wait for heketi bootstrap to complete."
|
- name: "Wait for heketi bootstrap to complete."
|
||||||
@@ -18,7 +18,7 @@
|
|||||||
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
||||||
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
||||||
until:
|
until:
|
||||||
- "initial_heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
- "initial_heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
||||||
- "initial_heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
- "initial_heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
- name: "Create heketi storage."
|
- name: "Create heketi storage."
|
||||||
kube:
|
kube:
|
||||||
name: "GlusterFS"
|
name: "GlusterFS"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
filename: "{{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
||||||
state: "present"
|
state: "present"
|
||||||
vars:
|
vars:
|
||||||
|
|||||||
@@ -38,4 +38,4 @@
|
|||||||
vars: { volume: "{{ volume_information.stdout|from_json }}" }
|
vars: { volume: "{{ volume_information.stdout|from_json }}" }
|
||||||
when: "volume.name == 'heketidbstorage'"
|
when: "volume.name == 'heketidbstorage'"
|
||||||
- name: "Ensure heketi database volume exists."
|
- name: "Ensure heketi database volume exists."
|
||||||
assert: { that: "heketi_database_volume_created is defined" , msg: "Heketi database volume does not exist." }
|
assert: { that: "heketi_database_volume_created is defined", msg: "Heketi database volume does not exist." }
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
|
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
|
||||||
kube:
|
kube:
|
||||||
name: "GlusterFS"
|
name: "GlusterFS"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/glusterfs-daemonset.json"
|
filename: "{{ kube_config_dir }}/glusterfs-daemonset.json"
|
||||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
- name: "Kubernetes Apps | Label GlusterFS nodes"
|
- name: "Kubernetes Apps | Label GlusterFS nodes"
|
||||||
@@ -33,6 +33,6 @@
|
|||||||
- name: "Kubernetes Apps | Install and configure Heketi Service Account"
|
- name: "Kubernetes Apps | Install and configure Heketi Service Account"
|
||||||
kube:
|
kube:
|
||||||
name: "GlusterFS"
|
name: "GlusterFS"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/heketi-service-account.json"
|
filename: "{{ kube_config_dir }}/heketi-service-account.json"
|
||||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
|
|||||||
@@ -1,11 +1,19 @@
|
|||||||
---
|
---
|
||||||
- register: "label_present"
|
- name: Get storage nodes
|
||||||
|
register: "label_present"
|
||||||
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
|
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: "Assign storage label"
|
- name: "Assign storage label"
|
||||||
when: "label_present.stdout_lines|length == 0"
|
when: "label_present.stdout_lines|length == 0"
|
||||||
command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
|
command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
|
||||||
- register: "label_present"
|
|
||||||
|
- name: Get storage nodes again
|
||||||
|
register: "label_present"
|
||||||
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
|
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
- assert: { that: "label_present|length > 0", msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs." }
|
|
||||||
|
- name: Ensure the label has been set
|
||||||
|
assert:
|
||||||
|
that: "label_present|length > 0"
|
||||||
|
msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs."
|
||||||
|
|||||||
@@ -1,26 +1,33 @@
|
|||||||
---
|
---
|
||||||
- name: "Kubernetes Apps | Lay Down Heketi"
|
- name: "Kubernetes Apps | Lay Down Heketi"
|
||||||
become: true
|
become: true
|
||||||
template: { src: "heketi-deployment.json.j2", dest: "{{ kube_config_dir }}/heketi-deployment.json" }
|
template:
|
||||||
|
src: "heketi-deployment.json.j2"
|
||||||
|
dest: "{{ kube_config_dir }}/heketi-deployment.json"
|
||||||
register: "rendering"
|
register: "rendering"
|
||||||
|
|
||||||
- name: "Kubernetes Apps | Install and configure Heketi"
|
- name: "Kubernetes Apps | Install and configure Heketi"
|
||||||
kube:
|
kube:
|
||||||
name: "GlusterFS"
|
name: "GlusterFS"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/heketi-deployment.json"
|
filename: "{{ kube_config_dir }}/heketi-deployment.json"
|
||||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
|
|
||||||
- name: "Ensure heketi is up and running."
|
- name: "Ensure heketi is up and running."
|
||||||
changed_when: false
|
changed_when: false
|
||||||
register: "heketi_state"
|
register: "heketi_state"
|
||||||
vars:
|
vars:
|
||||||
heketi_state: { stdout: "{}" }
|
heketi_state:
|
||||||
|
stdout: "{}"
|
||||||
pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]"
|
pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]"
|
||||||
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
||||||
command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
|
command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
|
||||||
until:
|
until:
|
||||||
- "heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
- "heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
||||||
- "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
- "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
- set_fact:
|
|
||||||
|
- name: Set the Heketi pod name
|
||||||
|
set_fact:
|
||||||
heketi_pod_name: "{{ heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
|
heketi_pod_name: "{{ heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
|
|
||||||
- name: "Kubernetes Apps | Test Heketi"
|
- name: "Kubernetes Apps | Test Heketi"
|
||||||
register: "heketi_service_state"
|
register: "heketi_service_state"
|
||||||
command: "{{bin_dir}}/kubectl get service heketi-storage-endpoints -o=name --ignore-not-found=true"
|
command: "{{ bin_dir }}/kubectl get service heketi-storage-endpoints -o=name --ignore-not-found=true"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: "Kubernetes Apps | Bootstrap Heketi"
|
- name: "Kubernetes Apps | Bootstrap Heketi"
|
||||||
|
|||||||
@@ -1,27 +1,44 @@
|
|||||||
---
|
---
|
||||||
- register: "clusterrolebinding_state"
|
- name: Get clusterrolebindings
|
||||||
command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
register: "clusterrolebinding_state"
|
||||||
|
command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: "Kubernetes Apps | Deploy cluster role binding."
|
- name: "Kubernetes Apps | Deploy cluster role binding."
|
||||||
when: "clusterrolebinding_state.stdout == \"\""
|
when: "clusterrolebinding_state.stdout == \"\""
|
||||||
command: "{{bin_dir}}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
|
command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
|
||||||
- register: "clusterrolebinding_state"
|
|
||||||
command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
|
||||||
changed_when: false
|
|
||||||
- assert: { that: "clusterrolebinding_state.stdout != \"\"", message: "Cluster role binding is not present." }
|
|
||||||
|
|
||||||
- register: "secret_state"
|
- name: Get clusterrolebindings again
|
||||||
command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
register: "clusterrolebinding_state"
|
||||||
|
command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Make sure that clusterrolebindings are present now
|
||||||
|
assert:
|
||||||
|
that: "clusterrolebinding_state.stdout != \"\""
|
||||||
|
msg: "Cluster role binding is not present."
|
||||||
|
|
||||||
|
- name: Get the heketi-config-secret secret
|
||||||
|
register: "secret_state"
|
||||||
|
command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
- name: "Render Heketi secret configuration."
|
- name: "Render Heketi secret configuration."
|
||||||
become: true
|
become: true
|
||||||
template:
|
template:
|
||||||
src: "heketi.json.j2"
|
src: "heketi.json.j2"
|
||||||
dest: "{{ kube_config_dir }}/heketi.json"
|
dest: "{{ kube_config_dir }}/heketi.json"
|
||||||
|
|
||||||
- name: "Deploy Heketi config secret"
|
- name: "Deploy Heketi config secret"
|
||||||
when: "secret_state.stdout == \"\""
|
when: "secret_state.stdout == \"\""
|
||||||
command: "{{bin_dir}}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
|
command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
|
||||||
- register: "secret_state"
|
|
||||||
command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
- name: Get the heketi-config-secret secret again
|
||||||
|
register: "secret_state"
|
||||||
|
command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
- assert: { that: "secret_state.stdout != \"\"", message: "Heketi config secret is not present." }
|
|
||||||
|
- name: Make sure the heketi-config-secret secret exists now
|
||||||
|
assert:
|
||||||
|
that: "secret_state.stdout != \"\""
|
||||||
|
msg: "Heketi config secret is not present."
|
||||||
|
|||||||
@@ -7,6 +7,6 @@
|
|||||||
- name: "Kubernetes Apps | Install and configure Heketi Storage"
|
- name: "Kubernetes Apps | Install and configure Heketi Storage"
|
||||||
kube:
|
kube:
|
||||||
name: "GlusterFS"
|
name: "GlusterFS"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/heketi-storage.json"
|
filename: "{{ kube_config_dir }}/heketi-storage.json"
|
||||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
|
|||||||
@@ -8,7 +8,7 @@
|
|||||||
register: "heketi_service"
|
register: "heketi_service"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
- name: "Ensure heketi service is available."
|
- name: "Ensure heketi service is available."
|
||||||
assert: { that: "heketi_service.stdout != \"\"" }
|
assert: { that: "heketi_service.stdout != \"\"" }
|
||||||
- name: "Render storage class configuration."
|
- name: "Render storage class configuration."
|
||||||
become: true
|
become: true
|
||||||
vars:
|
vars:
|
||||||
@@ -20,6 +20,6 @@
|
|||||||
- name: "Kubernetes Apps | Install and configure Storace Class"
|
- name: "Kubernetes Apps | Install and configure Storace Class"
|
||||||
kube:
|
kube:
|
||||||
name: "GlusterFS"
|
name: "GlusterFS"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/storageclass.yml"
|
filename: "{{ kube_config_dir }}/storageclass.yml"
|
||||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
|
|||||||
@@ -69,7 +69,7 @@
|
|||||||
},
|
},
|
||||||
"readinessProbe": {
|
"readinessProbe": {
|
||||||
"timeoutSeconds": 3,
|
"timeoutSeconds": 3,
|
||||||
"initialDelaySeconds": 60,
|
"initialDelaySeconds": 3,
|
||||||
"exec": {
|
"exec": {
|
||||||
"command": [
|
"command": [
|
||||||
"/bin/bash",
|
"/bin/bash",
|
||||||
@@ -80,7 +80,7 @@
|
|||||||
},
|
},
|
||||||
"livenessProbe": {
|
"livenessProbe": {
|
||||||
"timeoutSeconds": 3,
|
"timeoutSeconds": 3,
|
||||||
"initialDelaySeconds": 60,
|
"initialDelaySeconds": 10,
|
||||||
"exec": {
|
"exec": {
|
||||||
"command": [
|
"command": [
|
||||||
"/bin/bash",
|
"/bin/bash",
|
||||||
|
|||||||
@@ -56,7 +56,7 @@
|
|||||||
"serviceAccountName": "heketi-service-account",
|
"serviceAccountName": "heketi-service-account",
|
||||||
"containers": [
|
"containers": [
|
||||||
{
|
{
|
||||||
"image": "heketi/heketi:7",
|
"image": "heketi/heketi:9",
|
||||||
"imagePullPolicy": "Always",
|
"imagePullPolicy": "Always",
|
||||||
"name": "deploy-heketi",
|
"name": "deploy-heketi",
|
||||||
"env": [
|
"env": [
|
||||||
@@ -106,7 +106,7 @@
|
|||||||
},
|
},
|
||||||
"livenessProbe": {
|
"livenessProbe": {
|
||||||
"timeoutSeconds": 3,
|
"timeoutSeconds": 3,
|
||||||
"initialDelaySeconds": 30,
|
"initialDelaySeconds": 10,
|
||||||
"httpGet": {
|
"httpGet": {
|
||||||
"path": "/hello",
|
"path": "/hello",
|
||||||
"port": 8080
|
"port": 8080
|
||||||
|
|||||||
@@ -68,7 +68,7 @@
|
|||||||
"serviceAccountName": "heketi-service-account",
|
"serviceAccountName": "heketi-service-account",
|
||||||
"containers": [
|
"containers": [
|
||||||
{
|
{
|
||||||
"image": "heketi/heketi:7",
|
"image": "heketi/heketi:9",
|
||||||
"imagePullPolicy": "Always",
|
"imagePullPolicy": "Always",
|
||||||
"name": "heketi",
|
"name": "heketi",
|
||||||
"env": [
|
"env": [
|
||||||
@@ -122,7 +122,7 @@
|
|||||||
},
|
},
|
||||||
"livenessProbe": {
|
"livenessProbe": {
|
||||||
"timeoutSeconds": 3,
|
"timeoutSeconds": 3,
|
||||||
"initialDelaySeconds": 30,
|
"initialDelaySeconds": 10,
|
||||||
"httpGet": {
|
"httpGet": {
|
||||||
"path": "/hello",
|
"path": "/hello",
|
||||||
"port": 8080
|
"port": 8080
|
||||||
|
|||||||
@@ -0,0 +1,2 @@
|
|||||||
|
---
|
||||||
|
heketi_remove_lvm: false
|
||||||
@@ -2,18 +2,20 @@
|
|||||||
- name: "Install lvm utils (RedHat)"
|
- name: "Install lvm utils (RedHat)"
|
||||||
become: true
|
become: true
|
||||||
yum:
|
yum:
|
||||||
name: "lvm2"
|
name: "lvm2"
|
||||||
state: "present"
|
state: "present"
|
||||||
when: "ansible_os_family == 'RedHat'"
|
when: "ansible_os_family == 'RedHat'"
|
||||||
|
|
||||||
- name: "Install lvm utils (Debian)"
|
- name: "Install lvm utils (Debian)"
|
||||||
become: true
|
become: true
|
||||||
apt:
|
apt:
|
||||||
name: "lvm2"
|
name: "lvm2"
|
||||||
state: "present"
|
state: "present"
|
||||||
when: "ansible_os_family == 'Debian'"
|
when: "ansible_os_family == 'Debian'"
|
||||||
|
|
||||||
- name: "Get volume group information."
|
- name: "Get volume group information."
|
||||||
|
environment:
|
||||||
|
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||||
become: true
|
become: true
|
||||||
shell: "pvs {{ disk_volume_device_1 }} --option vg_name | tail -n+2"
|
shell: "pvs {{ disk_volume_device_1 }} --option vg_name | tail -n+2"
|
||||||
register: "volume_groups"
|
register: "volume_groups"
|
||||||
@@ -21,12 +23,16 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: "Remove volume groups."
|
- name: "Remove volume groups."
|
||||||
|
environment:
|
||||||
|
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||||
become: true
|
become: true
|
||||||
command: "vgremove {{ volume_group }} --yes"
|
command: "vgremove {{ volume_group }} --yes"
|
||||||
with_items: "{{ volume_groups.stdout_lines }}"
|
with_items: "{{ volume_groups.stdout_lines }}"
|
||||||
loop_control: { loop_var: "volume_group" }
|
loop_control: { loop_var: "volume_group" }
|
||||||
|
|
||||||
- name: "Remove physical volume from cluster disks."
|
- name: "Remove physical volume from cluster disks."
|
||||||
|
environment:
|
||||||
|
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||||
become: true
|
become: true
|
||||||
command: "pvremove {{ disk_volume_device_1 }} --yes"
|
command: "pvremove {{ disk_volume_device_1 }} --yes"
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
@@ -34,13 +40,13 @@
|
|||||||
- name: "Remove lvm utils (RedHat)"
|
- name: "Remove lvm utils (RedHat)"
|
||||||
become: true
|
become: true
|
||||||
yum:
|
yum:
|
||||||
name: "lvm2"
|
name: "lvm2"
|
||||||
state: "absent"
|
state: "absent"
|
||||||
when: "ansible_os_family == 'RedHat'"
|
when: "ansible_os_family == 'RedHat' and heketi_remove_lvm"
|
||||||
|
|
||||||
- name: "Remove lvm utils (Debian)"
|
- name: "Remove lvm utils (Debian)"
|
||||||
become: true
|
become: true
|
||||||
apt:
|
apt:
|
||||||
name: "lvm2"
|
name: "lvm2"
|
||||||
state: "absent"
|
state: "absent"
|
||||||
when: "ansible_os_family == 'Debian'"
|
when: "ansible_os_family == 'Debian' and heketi_remove_lvm"
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ BuildRequires: python2-setuptools
|
|||||||
BuildRequires: python-d2to1
|
BuildRequires: python-d2to1
|
||||||
BuildRequires: python2-pbr
|
BuildRequires: python2-pbr
|
||||||
|
|
||||||
Requires: ansible >= 2.4.0
|
Requires: ansible >= 2.5.0
|
||||||
Requires: python-jinja2 >= 2.10
|
Requires: python-jinja2 >= 2.10
|
||||||
Requires: python-netaddr
|
Requires: python-netaddr
|
||||||
Requires: python-pbr
|
Requires: python-pbr
|
||||||
|
|||||||
5
contrib/terraform/OWNERS
Normal file
5
contrib/terraform/OWNERS
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
# See the OWNERS docs at https://go.k8s.io/owners
|
||||||
|
|
||||||
|
approvers:
|
||||||
|
- holmsten
|
||||||
|
- miouge1
|
||||||
@@ -43,7 +43,7 @@ ssh -F ./ssh-bastion.conf user@$ip
|
|||||||
|
|
||||||
Example (this one assumes you are using CoreOS)
|
Example (this one assumes you are using CoreOS)
|
||||||
```commandline
|
```commandline
|
||||||
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache
|
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=core -b --become-user=root --flush-cache
|
||||||
```
|
```
|
||||||
***Using other distrib than CoreOs***
|
***Using other distrib than CoreOs***
|
||||||
If you want to use another distribution than CoreOS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.
|
If you want to use another distribution than CoreOS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.
|
||||||
@@ -111,9 +111,9 @@ the `AWS CLI` with the following command:
|
|||||||
aws iam delete-instance-profile --region <region_name> --instance-profile-name <profile_name>
|
aws iam delete-instance-profile --region <region_name> --instance-profile-name <profile_name>
|
||||||
```
|
```
|
||||||
|
|
||||||
***Ansible Inventory doesnt get created:***
|
***Ansible Inventory doesn't get created:***
|
||||||
|
|
||||||
It could happen that Terraform doesnt create an Ansible Inventory file automatically. If this is the case copy the output after `inventory=` and create a file named `hosts`in the directory `inventory` and paste the inventory into the file.
|
It could happen that Terraform doesn't create an Ansible Inventory file automatically. If this is the case copy the output after `inventory=` and create a file named `hosts`in the directory `inventory` and paste the inventory into the file.
|
||||||
|
|
||||||
**Architecture**
|
**Architecture**
|
||||||
|
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 0.8.7"
|
required_version = ">= 0.8.7"
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "aws" {
|
provider "aws" {
|
||||||
access_key = "${var.AWS_ACCESS_KEY_ID}"
|
access_key = "${var.AWS_ACCESS_KEY_ID}"
|
||||||
secret_key = "${var.AWS_SECRET_ACCESS_KEY}"
|
secret_key = "${var.AWS_SECRET_ACCESS_KEY}"
|
||||||
region = "${var.AWS_DEFAULT_REGION}"
|
region = "${var.AWS_DEFAULT_REGION}"
|
||||||
}
|
}
|
||||||
|
|
||||||
data "aws_availability_zones" "available" {}
|
data "aws_availability_zones" "available" {}
|
||||||
@@ -18,33 +18,30 @@ data "aws_availability_zones" "available" {}
|
|||||||
module "aws-vpc" {
|
module "aws-vpc" {
|
||||||
source = "modules/vpc"
|
source = "modules/vpc"
|
||||||
|
|
||||||
aws_cluster_name = "${var.aws_cluster_name}"
|
aws_cluster_name = "${var.aws_cluster_name}"
|
||||||
aws_vpc_cidr_block = "${var.aws_vpc_cidr_block}"
|
aws_vpc_cidr_block = "${var.aws_vpc_cidr_block}"
|
||||||
aws_avail_zones="${slice(data.aws_availability_zones.available.names,0,2)}"
|
aws_avail_zones = "${slice(data.aws_availability_zones.available.names,0,2)}"
|
||||||
aws_cidr_subnets_private="${var.aws_cidr_subnets_private}"
|
aws_cidr_subnets_private = "${var.aws_cidr_subnets_private}"
|
||||||
aws_cidr_subnets_public="${var.aws_cidr_subnets_public}"
|
aws_cidr_subnets_public = "${var.aws_cidr_subnets_public}"
|
||||||
default_tags="${var.default_tags}"
|
default_tags = "${var.default_tags}"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
module "aws-elb" {
|
module "aws-elb" {
|
||||||
source = "modules/elb"
|
source = "modules/elb"
|
||||||
|
|
||||||
aws_cluster_name="${var.aws_cluster_name}"
|
aws_cluster_name = "${var.aws_cluster_name}"
|
||||||
aws_vpc_id="${module.aws-vpc.aws_vpc_id}"
|
aws_vpc_id = "${module.aws-vpc.aws_vpc_id}"
|
||||||
aws_avail_zones="${slice(data.aws_availability_zones.available.names,0,2)}"
|
aws_avail_zones = "${slice(data.aws_availability_zones.available.names,0,2)}"
|
||||||
aws_subnet_ids_public="${module.aws-vpc.aws_subnet_ids_public}"
|
aws_subnet_ids_public = "${module.aws-vpc.aws_subnet_ids_public}"
|
||||||
aws_elb_api_port = "${var.aws_elb_api_port}"
|
aws_elb_api_port = "${var.aws_elb_api_port}"
|
||||||
k8s_secure_api_port = "${var.k8s_secure_api_port}"
|
k8s_secure_api_port = "${var.k8s_secure_api_port}"
|
||||||
default_tags="${var.default_tags}"
|
default_tags = "${var.default_tags}"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
module "aws-iam" {
|
module "aws-iam" {
|
||||||
source = "modules/iam"
|
source = "modules/iam"
|
||||||
|
|
||||||
aws_cluster_name="${var.aws_cluster_name}"
|
aws_cluster_name = "${var.aws_cluster_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -53,50 +50,44 @@ module "aws-iam" {
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
resource "aws_instance" "bastion-server" {
|
resource "aws_instance" "bastion-server" {
|
||||||
ami = "${data.aws_ami.distro.id}"
|
ami = "${data.aws_ami.distro.id}"
|
||||||
instance_type = "${var.aws_bastion_size}"
|
instance_type = "${var.aws_bastion_size}"
|
||||||
count = "${length(var.aws_cidr_subnets_public)}"
|
count = "${length(var.aws_cidr_subnets_public)}"
|
||||||
associate_public_ip_address = true
|
associate_public_ip_address = true
|
||||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
||||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_public,count.index)}"
|
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_public,count.index)}"
|
||||||
|
|
||||||
|
vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"]
|
||||||
|
|
||||||
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
|
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||||
|
|
||||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
tags = "${merge(var.default_tags, map(
|
||||||
|
|
||||||
tags = "${merge(var.default_tags, map(
|
|
||||||
"Name", "kubernetes-${var.aws_cluster_name}-bastion-${count.index}",
|
"Name", "kubernetes-${var.aws_cluster_name}-bastion-${count.index}",
|
||||||
"Cluster", "${var.aws_cluster_name}",
|
"Cluster", "${var.aws_cluster_name}",
|
||||||
"Role", "bastion-${var.aws_cluster_name}-${count.index}"
|
"Role", "bastion-${var.aws_cluster_name}-${count.index}"
|
||||||
))}"
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Create K8s Master and worker nodes and etcd instances
|
* Create K8s Master and worker nodes and etcd instances
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
resource "aws_instance" "k8s-master" {
|
resource "aws_instance" "k8s-master" {
|
||||||
ami = "${data.aws_ami.distro.id}"
|
ami = "${data.aws_ami.distro.id}"
|
||||||
instance_type = "${var.aws_kube_master_size}"
|
instance_type = "${var.aws_kube_master_size}"
|
||||||
|
|
||||||
count = "${var.aws_kube_master_num}"
|
count = "${var.aws_kube_master_num}"
|
||||||
|
|
||||||
|
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
||||||
|
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
||||||
|
|
||||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"]
|
||||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
|
||||||
|
|
||||||
|
iam_instance_profile = "${module.aws-iam.kube-master-profile}"
|
||||||
|
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||||
|
|
||||||
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
|
tags = "${merge(var.default_tags, map(
|
||||||
|
|
||||||
|
|
||||||
iam_instance_profile = "${module.aws-iam.kube-master-profile}"
|
|
||||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
|
||||||
|
|
||||||
|
|
||||||
tags = "${merge(var.default_tags, map(
|
|
||||||
"Name", "kubernetes-${var.aws_cluster_name}-master${count.index}",
|
"Name", "kubernetes-${var.aws_cluster_name}-master${count.index}",
|
||||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||||
"Role", "master"
|
"Role", "master"
|
||||||
@@ -104,88 +95,77 @@ resource "aws_instance" "k8s-master" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_elb_attachment" "attach_master_nodes" {
|
resource "aws_elb_attachment" "attach_master_nodes" {
|
||||||
count = "${var.aws_kube_master_num}"
|
count = "${var.aws_kube_master_num}"
|
||||||
elb = "${module.aws-elb.aws_elb_api_id}"
|
elb = "${module.aws-elb.aws_elb_api_id}"
|
||||||
instance = "${element(aws_instance.k8s-master.*.id,count.index)}"
|
instance = "${element(aws_instance.k8s-master.*.id,count.index)}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
resource "aws_instance" "k8s-etcd" {
|
resource "aws_instance" "k8s-etcd" {
|
||||||
ami = "${data.aws_ami.distro.id}"
|
ami = "${data.aws_ami.distro.id}"
|
||||||
instance_type = "${var.aws_etcd_size}"
|
instance_type = "${var.aws_etcd_size}"
|
||||||
|
|
||||||
count = "${var.aws_etcd_num}"
|
count = "${var.aws_etcd_num}"
|
||||||
|
|
||||||
|
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
||||||
|
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
||||||
|
|
||||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"]
|
||||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
|
||||||
|
|
||||||
|
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||||
|
|
||||||
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
|
tags = "${merge(var.default_tags, map(
|
||||||
|
|
||||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
|
||||||
|
|
||||||
tags = "${merge(var.default_tags, map(
|
|
||||||
"Name", "kubernetes-${var.aws_cluster_name}-etcd${count.index}",
|
"Name", "kubernetes-${var.aws_cluster_name}-etcd${count.index}",
|
||||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||||
"Role", "etcd"
|
"Role", "etcd"
|
||||||
))}"
|
))}"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
resource "aws_instance" "k8s-worker" {
|
resource "aws_instance" "k8s-worker" {
|
||||||
ami = "${data.aws_ami.distro.id}"
|
ami = "${data.aws_ami.distro.id}"
|
||||||
instance_type = "${var.aws_kube_worker_size}"
|
instance_type = "${var.aws_kube_worker_size}"
|
||||||
|
|
||||||
count = "${var.aws_kube_worker_num}"
|
count = "${var.aws_kube_worker_num}"
|
||||||
|
|
||||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
||||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
||||||
|
|
||||||
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
|
vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"]
|
||||||
|
|
||||||
iam_instance_profile = "${module.aws-iam.kube-worker-profile}"
|
iam_instance_profile = "${module.aws-iam.kube-worker-profile}"
|
||||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||||
|
|
||||||
|
tags = "${merge(var.default_tags, map(
|
||||||
tags = "${merge(var.default_tags, map(
|
|
||||||
"Name", "kubernetes-${var.aws_cluster_name}-worker${count.index}",
|
"Name", "kubernetes-${var.aws_cluster_name}-worker${count.index}",
|
||||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||||
"Role", "worker"
|
"Role", "worker"
|
||||||
))}"
|
))}"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Create Kubespray Inventory File
|
* Create Kubespray Inventory File
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
data "template_file" "inventory" {
|
data "template_file" "inventory" {
|
||||||
template = "${file("${path.module}/templates/inventory.tpl")}"
|
template = "${file("${path.module}/templates/inventory.tpl")}"
|
||||||
|
|
||||||
vars {
|
|
||||||
public_ip_address_bastion = "${join("\n",formatlist("bastion ansible_host=%s" , aws_instance.bastion-server.*.public_ip))}"
|
|
||||||
connection_strings_master = "${join("\n",formatlist("%s ansible_host=%s",aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}"
|
|
||||||
connection_strings_node = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.tags.Name, aws_instance.k8s-worker.*.private_ip))}"
|
|
||||||
connection_strings_etcd = "${join("\n",formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.tags.Name, aws_instance.k8s-etcd.*.private_ip))}"
|
|
||||||
list_master = "${join("\n",aws_instance.k8s-master.*.tags.Name)}"
|
|
||||||
list_node = "${join("\n",aws_instance.k8s-worker.*.tags.Name)}"
|
|
||||||
list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}"
|
|
||||||
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
|
|
||||||
}
|
|
||||||
|
|
||||||
|
vars {
|
||||||
|
public_ip_address_bastion = "${join("\n",formatlist("bastion ansible_host=%s" , aws_instance.bastion-server.*.public_ip))}"
|
||||||
|
connection_strings_master = "${join("\n",formatlist("%s ansible_host=%s",aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}"
|
||||||
|
connection_strings_node = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.tags.Name, aws_instance.k8s-worker.*.private_ip))}"
|
||||||
|
connection_strings_etcd = "${join("\n",formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.tags.Name, aws_instance.k8s-etcd.*.private_ip))}"
|
||||||
|
list_master = "${join("\n",aws_instance.k8s-master.*.tags.Name)}"
|
||||||
|
list_node = "${join("\n",aws_instance.k8s-worker.*.tags.Name)}"
|
||||||
|
list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}"
|
||||||
|
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "null_resource" "inventories" {
|
resource "null_resource" "inventories" {
|
||||||
provisioner "local-exec" {
|
provisioner "local-exec" {
|
||||||
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
|
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
|
||||||
}
|
}
|
||||||
|
|
||||||
triggers {
|
triggers {
|
||||||
template = "${data.template_file.inventory.rendered}"
|
template = "${data.template_file.inventory.rendered}"
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,55 +1,54 @@
|
|||||||
resource "aws_security_group" "aws-elb" {
|
resource "aws_security_group" "aws-elb" {
|
||||||
name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||||
vpc_id = "${var.aws_vpc_id}"
|
vpc_id = "${var.aws_vpc_id}"
|
||||||
|
|
||||||
tags = "${merge(var.default_tags, map(
|
tags = "${merge(var.default_tags, map(
|
||||||
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||||
))}"
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "aws-allow-api-access" {
|
resource "aws_security_group_rule" "aws-allow-api-access" {
|
||||||
type = "ingress"
|
type = "ingress"
|
||||||
from_port = "${var.aws_elb_api_port}"
|
from_port = "${var.aws_elb_api_port}"
|
||||||
to_port = "${var.k8s_secure_api_port}"
|
to_port = "${var.k8s_secure_api_port}"
|
||||||
protocol = "TCP"
|
protocol = "TCP"
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
security_group_id = "${aws_security_group.aws-elb.id}"
|
security_group_id = "${aws_security_group.aws-elb.id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_security_group_rule" "aws-allow-api-egress" {
|
resource "aws_security_group_rule" "aws-allow-api-egress" {
|
||||||
type = "egress"
|
type = "egress"
|
||||||
from_port = 0
|
from_port = 0
|
||||||
to_port = 65535
|
to_port = 65535
|
||||||
protocol = "TCP"
|
protocol = "TCP"
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
security_group_id = "${aws_security_group.aws-elb.id}"
|
security_group_id = "${aws_security_group.aws-elb.id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Create a new AWS ELB for K8S API
|
# Create a new AWS ELB for K8S API
|
||||||
resource "aws_elb" "aws-elb-api" {
|
resource "aws_elb" "aws-elb-api" {
|
||||||
name = "kubernetes-elb-${var.aws_cluster_name}"
|
name = "kubernetes-elb-${var.aws_cluster_name}"
|
||||||
subnets = ["${var.aws_subnet_ids_public}"]
|
subnets = ["${var.aws_subnet_ids_public}"]
|
||||||
security_groups = ["${aws_security_group.aws-elb.id}"]
|
security_groups = ["${aws_security_group.aws-elb.id}"]
|
||||||
|
|
||||||
listener {
|
listener {
|
||||||
instance_port = "${var.k8s_secure_api_port}"
|
instance_port = "${var.k8s_secure_api_port}"
|
||||||
instance_protocol = "tcp"
|
instance_protocol = "tcp"
|
||||||
lb_port = "${var.aws_elb_api_port}"
|
lb_port = "${var.aws_elb_api_port}"
|
||||||
lb_protocol = "tcp"
|
lb_protocol = "tcp"
|
||||||
}
|
}
|
||||||
|
|
||||||
health_check {
|
health_check {
|
||||||
healthy_threshold = 2
|
healthy_threshold = 2
|
||||||
unhealthy_threshold = 2
|
unhealthy_threshold = 2
|
||||||
timeout = 3
|
timeout = 3
|
||||||
target = "TCP:${var.k8s_secure_api_port}"
|
target = "TCP:${var.k8s_secure_api_port}"
|
||||||
interval = 30
|
interval = 30
|
||||||
}
|
}
|
||||||
|
|
||||||
cross_zone_load_balancing = true
|
cross_zone_load_balancing = true
|
||||||
idle_timeout = 400
|
idle_timeout = 400
|
||||||
connection_draining = true
|
connection_draining = true
|
||||||
connection_draining_timeout = 400
|
connection_draining_timeout = 400
|
||||||
|
|
||||||
tags = "${merge(var.default_tags, map(
|
tags = "${merge(var.default_tags, map(
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
output "aws_elb_api_id" {
|
output "aws_elb_api_id" {
|
||||||
value = "${aws_elb.aws-elb-api.id}"
|
value = "${aws_elb.aws-elb-api.id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
output "aws_elb_api_fqdn" {
|
output "aws_elb_api_fqdn" {
|
||||||
value = "${aws_elb.aws-elb-api.dns_name}"
|
value = "${aws_elb.aws-elb-api.dns_name}"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,33 +1,30 @@
|
|||||||
variable "aws_cluster_name" {
|
variable "aws_cluster_name" {
|
||||||
description = "Name of Cluster"
|
description = "Name of Cluster"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "aws_vpc_id" {
|
variable "aws_vpc_id" {
|
||||||
description = "AWS VPC ID"
|
description = "AWS VPC ID"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "aws_elb_api_port" {
|
variable "aws_elb_api_port" {
|
||||||
description = "Port for AWS ELB"
|
description = "Port for AWS ELB"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "k8s_secure_api_port" {
|
variable "k8s_secure_api_port" {
|
||||||
description = "Secure Port of K8S API Server"
|
description = "Secure Port of K8S API Server"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
variable "aws_avail_zones" {
|
variable "aws_avail_zones" {
|
||||||
description = "Availability Zones Used"
|
description = "Availability Zones Used"
|
||||||
type = "list"
|
type = "list"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
variable "aws_subnet_ids_public" {
|
variable "aws_subnet_ids_public" {
|
||||||
description = "IDs of Public Subnets"
|
description = "IDs of Public Subnets"
|
||||||
type = "list"
|
type = "list"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "default_tags" {
|
variable "default_tags" {
|
||||||
description = "Tags for all resources"
|
description = "Tags for all resources"
|
||||||
type = "map"
|
type = "map"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
#Add AWS Roles for Kubernetes
|
#Add AWS Roles for Kubernetes
|
||||||
|
|
||||||
resource "aws_iam_role" "kube-master" {
|
resource "aws_iam_role" "kube-master" {
|
||||||
name = "kubernetes-${var.aws_cluster_name}-master"
|
name = "kubernetes-${var.aws_cluster_name}-master"
|
||||||
assume_role_policy = <<EOF
|
|
||||||
|
assume_role_policy = <<EOF
|
||||||
{
|
{
|
||||||
"Version": "2012-10-17",
|
"Version": "2012-10-17",
|
||||||
"Statement": [
|
"Statement": [
|
||||||
@@ -19,8 +20,9 @@ EOF
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_iam_role" "kube-worker" {
|
resource "aws_iam_role" "kube-worker" {
|
||||||
name = "kubernetes-${var.aws_cluster_name}-node"
|
name = "kubernetes-${var.aws_cluster_name}-node"
|
||||||
assume_role_policy = <<EOF
|
|
||||||
|
assume_role_policy = <<EOF
|
||||||
{
|
{
|
||||||
"Version": "2012-10-17",
|
"Version": "2012-10-17",
|
||||||
"Statement": [
|
"Statement": [
|
||||||
@@ -39,9 +41,10 @@ EOF
|
|||||||
#Add AWS Policies for Kubernetes
|
#Add AWS Policies for Kubernetes
|
||||||
|
|
||||||
resource "aws_iam_role_policy" "kube-master" {
|
resource "aws_iam_role_policy" "kube-master" {
|
||||||
name = "kubernetes-${var.aws_cluster_name}-master"
|
name = "kubernetes-${var.aws_cluster_name}-master"
|
||||||
role = "${aws_iam_role.kube-master.id}"
|
role = "${aws_iam_role.kube-master.id}"
|
||||||
policy = <<EOF
|
|
||||||
|
policy = <<EOF
|
||||||
{
|
{
|
||||||
"Version": "2012-10-17",
|
"Version": "2012-10-17",
|
||||||
"Statement": [
|
"Statement": [
|
||||||
@@ -73,9 +76,10 @@ EOF
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_iam_role_policy" "kube-worker" {
|
resource "aws_iam_role_policy" "kube-worker" {
|
||||||
name = "kubernetes-${var.aws_cluster_name}-node"
|
name = "kubernetes-${var.aws_cluster_name}-node"
|
||||||
role = "${aws_iam_role.kube-worker.id}"
|
role = "${aws_iam_role.kube-worker.id}"
|
||||||
policy = <<EOF
|
|
||||||
|
policy = <<EOF
|
||||||
{
|
{
|
||||||
"Version": "2012-10-17",
|
"Version": "2012-10-17",
|
||||||
"Statement": [
|
"Statement": [
|
||||||
@@ -124,15 +128,14 @@ resource "aws_iam_role_policy" "kube-worker" {
|
|||||||
EOF
|
EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#Create AWS Instance Profiles
|
#Create AWS Instance Profiles
|
||||||
|
|
||||||
resource "aws_iam_instance_profile" "kube-master" {
|
resource "aws_iam_instance_profile" "kube-master" {
|
||||||
name = "kube_${var.aws_cluster_name}_master_profile"
|
name = "kube_${var.aws_cluster_name}_master_profile"
|
||||||
role = "${aws_iam_role.kube-master.name}"
|
role = "${aws_iam_role.kube-master.name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_iam_instance_profile" "kube-worker" {
|
resource "aws_iam_instance_profile" "kube-worker" {
|
||||||
name = "kube_${var.aws_cluster_name}_node_profile"
|
name = "kube_${var.aws_cluster_name}_node_profile"
|
||||||
role = "${aws_iam_role.kube-worker.name}"
|
role = "${aws_iam_role.kube-worker.name}"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
output "kube-master-profile" {
|
output "kube-master-profile" {
|
||||||
value = "${aws_iam_instance_profile.kube-master.name }"
|
value = "${aws_iam_instance_profile.kube-master.name }"
|
||||||
}
|
}
|
||||||
|
|
||||||
output "kube-worker-profile" {
|
output "kube-worker-profile" {
|
||||||
value = "${aws_iam_instance_profile.kube-worker.name }"
|
value = "${aws_iam_instance_profile.kube-worker.name }"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
variable "aws_cluster_name" {
|
variable "aws_cluster_name" {
|
||||||
description = "Name of Cluster"
|
description = "Name of Cluster"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,58 +1,53 @@
|
|||||||
|
|
||||||
resource "aws_vpc" "cluster-vpc" {
|
resource "aws_vpc" "cluster-vpc" {
|
||||||
cidr_block = "${var.aws_vpc_cidr_block}"
|
cidr_block = "${var.aws_vpc_cidr_block}"
|
||||||
|
|
||||||
#DNS Related Entries
|
#DNS Related Entries
|
||||||
enable_dns_support = true
|
enable_dns_support = true
|
||||||
enable_dns_hostnames = true
|
enable_dns_hostnames = true
|
||||||
|
|
||||||
tags = "${merge(var.default_tags, map(
|
tags = "${merge(var.default_tags, map(
|
||||||
"Name", "kubernetes-${var.aws_cluster_name}-vpc"
|
"Name", "kubernetes-${var.aws_cluster_name}-vpc"
|
||||||
))}"
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
resource "aws_eip" "cluster-nat-eip" {
|
resource "aws_eip" "cluster-nat-eip" {
|
||||||
count = "${length(var.aws_cidr_subnets_public)}"
|
count = "${length(var.aws_cidr_subnets_public)}"
|
||||||
vpc = true
|
vpc = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
resource "aws_internet_gateway" "cluster-vpc-internetgw" {
|
resource "aws_internet_gateway" "cluster-vpc-internetgw" {
|
||||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||||
|
|
||||||
|
|
||||||
tags = "${merge(var.default_tags, map(
|
tags = "${merge(var.default_tags, map(
|
||||||
"Name", "kubernetes-${var.aws_cluster_name}-internetgw"
|
"Name", "kubernetes-${var.aws_cluster_name}-internetgw"
|
||||||
))}"
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_subnet" "cluster-vpc-subnets-public" {
|
resource "aws_subnet" "cluster-vpc-subnets-public" {
|
||||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||||
count="${length(var.aws_avail_zones)}"
|
count = "${length(var.aws_avail_zones)}"
|
||||||
availability_zone = "${element(var.aws_avail_zones, count.index)}"
|
availability_zone = "${element(var.aws_avail_zones, count.index)}"
|
||||||
cidr_block = "${element(var.aws_cidr_subnets_public, count.index)}"
|
cidr_block = "${element(var.aws_cidr_subnets_public, count.index)}"
|
||||||
|
|
||||||
tags = "${merge(var.default_tags, map(
|
tags = "${merge(var.default_tags, map(
|
||||||
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public",
|
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public",
|
||||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member"
|
"kubernetes.io/cluster/${var.aws_cluster_name}", "member"
|
||||||
))}"
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_nat_gateway" "cluster-nat-gateway" {
|
resource "aws_nat_gateway" "cluster-nat-gateway" {
|
||||||
count = "${length(var.aws_cidr_subnets_public)}"
|
count = "${length(var.aws_cidr_subnets_public)}"
|
||||||
allocation_id = "${element(aws_eip.cluster-nat-eip.*.id, count.index)}"
|
allocation_id = "${element(aws_eip.cluster-nat-eip.*.id, count.index)}"
|
||||||
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index)}"
|
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index)}"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_subnet" "cluster-vpc-subnets-private" {
|
resource "aws_subnet" "cluster-vpc-subnets-private" {
|
||||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||||
count="${length(var.aws_avail_zones)}"
|
count = "${length(var.aws_avail_zones)}"
|
||||||
availability_zone = "${element(var.aws_avail_zones, count.index)}"
|
availability_zone = "${element(var.aws_avail_zones, count.index)}"
|
||||||
cidr_block = "${element(var.aws_cidr_subnets_private, count.index)}"
|
cidr_block = "${element(var.aws_cidr_subnets_private, count.index)}"
|
||||||
|
|
||||||
tags = "${merge(var.default_tags, map(
|
tags = "${merge(var.default_tags, map(
|
||||||
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
|
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
|
||||||
))}"
|
))}"
|
||||||
}
|
}
|
||||||
@@ -62,81 +57,78 @@ resource "aws_subnet" "cluster-vpc-subnets-private" {
|
|||||||
#TODO: Do we need two routing tables for each subnet for redundancy or is one enough?
|
#TODO: Do we need two routing tables for each subnet for redundancy or is one enough?
|
||||||
|
|
||||||
resource "aws_route_table" "kubernetes-public" {
|
resource "aws_route_table" "kubernetes-public" {
|
||||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||||
route {
|
|
||||||
cidr_block = "0.0.0.0/0"
|
|
||||||
gateway_id = "${aws_internet_gateway.cluster-vpc-internetgw.id}"
|
|
||||||
}
|
|
||||||
|
|
||||||
tags = "${merge(var.default_tags, map(
|
route {
|
||||||
|
cidr_block = "0.0.0.0/0"
|
||||||
|
gateway_id = "${aws_internet_gateway.cluster-vpc-internetgw.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
tags = "${merge(var.default_tags, map(
|
||||||
"Name", "kubernetes-${var.aws_cluster_name}-routetable-public"
|
"Name", "kubernetes-${var.aws_cluster_name}-routetable-public"
|
||||||
))}"
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_route_table" "kubernetes-private" {
|
resource "aws_route_table" "kubernetes-private" {
|
||||||
count = "${length(var.aws_cidr_subnets_private)}"
|
count = "${length(var.aws_cidr_subnets_private)}"
|
||||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||||
route {
|
|
||||||
cidr_block = "0.0.0.0/0"
|
|
||||||
nat_gateway_id = "${element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index)}"
|
|
||||||
}
|
|
||||||
|
|
||||||
tags = "${merge(var.default_tags, map(
|
route {
|
||||||
|
cidr_block = "0.0.0.0/0"
|
||||||
|
nat_gateway_id = "${element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
tags = "${merge(var.default_tags, map(
|
||||||
"Name", "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
|
"Name", "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
|
||||||
))}"
|
))}"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_route_table_association" "kubernetes-public" {
|
resource "aws_route_table_association" "kubernetes-public" {
|
||||||
count = "${length(var.aws_cidr_subnets_public)}"
|
count = "${length(var.aws_cidr_subnets_public)}"
|
||||||
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id,count.index)}"
|
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id,count.index)}"
|
||||||
route_table_id = "${aws_route_table.kubernetes-public.id}"
|
route_table_id = "${aws_route_table.kubernetes-public.id}"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_route_table_association" "kubernetes-private" {
|
resource "aws_route_table_association" "kubernetes-private" {
|
||||||
count = "${length(var.aws_cidr_subnets_private)}"
|
count = "${length(var.aws_cidr_subnets_private)}"
|
||||||
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-private.*.id,count.index)}"
|
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-private.*.id,count.index)}"
|
||||||
route_table_id = "${element(aws_route_table.kubernetes-private.*.id,count.index)}"
|
route_table_id = "${element(aws_route_table.kubernetes-private.*.id,count.index)}"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#Kubernetes Security Groups
|
#Kubernetes Security Groups
|
||||||
|
|
||||||
resource "aws_security_group" "kubernetes" {
|
resource "aws_security_group" "kubernetes" {
|
||||||
name = "kubernetes-${var.aws_cluster_name}-securitygroup"
|
name = "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||||
|
|
||||||
tags = "${merge(var.default_tags, map(
|
tags = "${merge(var.default_tags, map(
|
||||||
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup"
|
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||||
))}"
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_security_group_rule" "allow-all-ingress" {
|
resource "aws_security_group_rule" "allow-all-ingress" {
|
||||||
type = "ingress"
|
type = "ingress"
|
||||||
from_port = 0
|
from_port = 0
|
||||||
to_port = 65535
|
to_port = 65535
|
||||||
protocol = "-1"
|
protocol = "-1"
|
||||||
cidr_blocks= ["${var.aws_vpc_cidr_block}"]
|
cidr_blocks = ["${var.aws_vpc_cidr_block}"]
|
||||||
security_group_id = "${aws_security_group.kubernetes.id}"
|
security_group_id = "${aws_security_group.kubernetes.id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_security_group_rule" "allow-all-egress" {
|
resource "aws_security_group_rule" "allow-all-egress" {
|
||||||
type = "egress"
|
type = "egress"
|
||||||
from_port = 0
|
from_port = 0
|
||||||
to_port = 65535
|
to_port = 65535
|
||||||
protocol = "-1"
|
protocol = "-1"
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
security_group_id = "${aws_security_group.kubernetes.id}"
|
security_group_id = "${aws_security_group.kubernetes.id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
resource "aws_security_group_rule" "allow-ssh-connections" {
|
resource "aws_security_group_rule" "allow-ssh-connections" {
|
||||||
type = "ingress"
|
type = "ingress"
|
||||||
from_port = 22
|
from_port = 22
|
||||||
to_port = 22
|
to_port = 22
|
||||||
protocol = "TCP"
|
protocol = "TCP"
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
security_group_id = "${aws_security_group.kubernetes.id}"
|
security_group_id = "${aws_security_group.kubernetes.id}"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,21 +1,19 @@
|
|||||||
output "aws_vpc_id" {
|
output "aws_vpc_id" {
|
||||||
value = "${aws_vpc.cluster-vpc.id}"
|
value = "${aws_vpc.cluster-vpc.id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
output "aws_subnet_ids_private" {
|
output "aws_subnet_ids_private" {
|
||||||
value = ["${aws_subnet.cluster-vpc-subnets-private.*.id}"]
|
value = ["${aws_subnet.cluster-vpc-subnets-private.*.id}"]
|
||||||
}
|
}
|
||||||
|
|
||||||
output "aws_subnet_ids_public" {
|
output "aws_subnet_ids_public" {
|
||||||
value = ["${aws_subnet.cluster-vpc-subnets-public.*.id}"]
|
value = ["${aws_subnet.cluster-vpc-subnets-public.*.id}"]
|
||||||
}
|
}
|
||||||
|
|
||||||
output "aws_security_group" {
|
output "aws_security_group" {
|
||||||
value = ["${aws_security_group.kubernetes.*.id}"]
|
value = ["${aws_security_group.kubernetes.*.id}"]
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
output "default_tags" {
|
output "default_tags" {
|
||||||
value = "${var.default_tags}"
|
value = "${var.default_tags}"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,29 +1,27 @@
|
|||||||
variable "aws_vpc_cidr_block" {
|
variable "aws_vpc_cidr_block" {
|
||||||
description = "CIDR Blocks for AWS VPC"
|
description = "CIDR Blocks for AWS VPC"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
variable "aws_cluster_name" {
|
variable "aws_cluster_name" {
|
||||||
description = "Name of Cluster"
|
description = "Name of Cluster"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
variable "aws_avail_zones" {
|
variable "aws_avail_zones" {
|
||||||
description = "AWS Availability Zones Used"
|
description = "AWS Availability Zones Used"
|
||||||
type = "list"
|
type = "list"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "aws_cidr_subnets_private" {
|
variable "aws_cidr_subnets_private" {
|
||||||
description = "CIDR Blocks for private subnets in Availability zones"
|
description = "CIDR Blocks for private subnets in Availability zones"
|
||||||
type = "list"
|
type = "list"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "aws_cidr_subnets_public" {
|
variable "aws_cidr_subnets_public" {
|
||||||
description = "CIDR Blocks for public subnets in Availability zones"
|
description = "CIDR Blocks for public subnets in Availability zones"
|
||||||
type = "list"
|
type = "list"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "default_tags" {
|
variable "default_tags" {
|
||||||
description = "Default tags for all resources"
|
description = "Default tags for all resources"
|
||||||
type = "map"
|
type = "map"
|
||||||
}
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user