mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 22:04:43 +03:00
Compare commits
815 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
72ae7638bc | ||
|
|
3bfad5ca73 | ||
|
|
781f31d2b8 | ||
|
|
df28db0066 | ||
|
|
20183f3860 | ||
|
|
2645e88b0c | ||
|
|
8373129588 | ||
|
|
9a3c6f236d | ||
|
|
bc5159a1f5 | ||
|
|
af007c7189 | ||
|
|
dc79d07303 | ||
|
|
79167c7577 | ||
|
|
08dd057864 | ||
|
|
fee3f288c0 | ||
|
|
b22bef5cfb | ||
|
|
460b5824c3 | ||
|
|
b0a28b1e80 | ||
|
|
ca6535f210 | ||
|
|
1155008719 | ||
|
|
d07594ed59 | ||
|
|
4b137efdbd | ||
|
|
383d582b47 | ||
|
|
6eacedc443 | ||
|
|
b1a5bb593c | ||
|
|
9369c6549a | ||
|
|
c7731a3b93 | ||
|
|
24706c163a | ||
|
|
a276dc47e0 | ||
|
|
e55f8a61cd | ||
|
|
c8bcca0845 | ||
|
|
cb6892d2ed | ||
|
|
43eda8d878 | ||
|
|
a2534e03bd | ||
|
|
dc5b955930 | ||
|
|
5de7896ffb | ||
|
|
01af45d14a | ||
|
|
cc9f3ea938 | ||
|
|
ff43de695e | ||
|
|
8bc717a55c | ||
|
|
d09222c900 | ||
|
|
87cdb81fae | ||
|
|
38eb1d548a | ||
|
|
e0960f6288 | ||
|
|
74403f2003 | ||
|
|
b2c83714d1 | ||
|
|
2c21672de6 | ||
|
|
f7dc21773d | ||
|
|
3e457e4edf | ||
|
|
03572d175f | ||
|
|
c4894d6092 | ||
|
|
3fb0383df4 | ||
|
|
ee36763f9d | ||
|
|
955c5549ae | ||
|
|
4a34514b21 | ||
|
|
805d9f22ce | ||
|
|
20f29327e9 | ||
|
|
018b5039e7 | ||
|
|
d6aeb767a0 | ||
|
|
b5d3d4741f | ||
|
|
85c747d444 | ||
|
|
927e6d89d7 | ||
|
|
3d87f23bf5 | ||
|
|
45845d4a2a | ||
|
|
00ef129b2a | ||
|
|
06b219217b | ||
|
|
789910d8eb | ||
|
|
a8e6a0763d | ||
|
|
e1386ba604 | ||
|
|
83deecb9e9 | ||
|
|
d8dcb8f6e0 | ||
|
|
5fa31eaead | ||
|
|
d245201614 | ||
|
|
a5b84a47b0 | ||
|
|
552b2f0635 | ||
|
|
0b3badf3d8 | ||
|
|
cea3e224aa | ||
|
|
1eaf0e1c63 | ||
|
|
2cda982345 | ||
|
|
c9734b6d7b | ||
|
|
fd01377f12 | ||
|
|
8d2fc88336 | ||
|
|
092bf07cbf | ||
|
|
5145a8e8be | ||
|
|
b495d36fa5 | ||
|
|
3bdeaa4a6f | ||
|
|
d1f58fed4c | ||
|
|
12e918bd31 | ||
|
|
637f445c3f | ||
|
|
d0e4cf5895 | ||
|
|
e0bf8b2aab | ||
|
|
483c06b4ab | ||
|
|
f4a3b31415 | ||
|
|
5c7e309d13 | ||
|
|
7a72b2d558 | ||
|
|
c75b21a510 | ||
|
|
a9f318d523 | ||
|
|
1dca0bd8d7 | ||
|
|
f3165a716a | ||
|
|
9f45eba6f6 | ||
|
|
ecaa7dad49 | ||
|
|
ee84e34570 | ||
|
|
442be2ac02 | ||
|
|
22d600e8c0 | ||
|
|
e160018826 | ||
|
|
d1a02bd3e9 | ||
|
|
380fb986b6 | ||
|
|
e7f794531e | ||
|
|
992023288f | ||
|
|
ef5a36dd69 | ||
|
|
3ab90db6ee | ||
|
|
e26be9cb8a | ||
|
|
bba555bb08 | ||
|
|
4b0af73dd2 | ||
|
|
da72b8c385 | ||
|
|
44079b7176 | ||
|
|
19c36fe4c9 | ||
|
|
a742d10c54 | ||
|
|
6bd27038cc | ||
|
|
5df757a403 | ||
|
|
38f5d1b18e | ||
|
|
5f75d4c099 | ||
|
|
319a0d65af | ||
|
|
3d2680a102 | ||
|
|
c36fb5919a | ||
|
|
46d3f4369e | ||
|
|
c2b3920b50 | ||
|
|
6e7323e3e8 | ||
|
|
e98b0371e5 | ||
|
|
f085419055 | ||
|
|
1fedbded62 | ||
|
|
c8258171ca | ||
|
|
007ee0da8e | ||
|
|
5e1ac9ce87 | ||
|
|
a7cd08603e | ||
|
|
854cd1a517 | ||
|
|
cf8c74cb07 | ||
|
|
23565ebe62 | ||
|
|
8467bce2a6 | ||
|
|
e6225d70a1 | ||
|
|
a69de8be40 | ||
|
|
649654207f | ||
|
|
3123502f4c | ||
|
|
17d54cffbb | ||
|
|
bddee7c38e | ||
|
|
6f9c311285 | ||
|
|
0cfa6a8981 | ||
|
|
d5516a4ca9 | ||
|
|
d2b793057e | ||
|
|
b2a409fd4d | ||
|
|
4ba237c5d8 | ||
|
|
f5ef02d4cc | ||
|
|
ec2255764a | ||
|
|
1a8e92c922 | ||
|
|
5c1891ec9f | ||
|
|
83265b7f75 | ||
|
|
5364a10033 | ||
|
|
c2a46e4aa3 | ||
|
|
bae5ce0bfa | ||
|
|
cc5edb720c | ||
|
|
e17c2ef698 | ||
|
|
61b74f9a5b | ||
|
|
0cd83eadc0 | ||
|
|
1757c45490 | ||
|
|
d85f98d2a9 | ||
|
|
9e123011c2 | ||
|
|
774c4d0d6f | ||
|
|
7332679678 | ||
|
|
bb6f727f25 | ||
|
|
586d2a41ce | ||
|
|
91dff61008 | ||
|
|
8203383c03 | ||
|
|
a3c88a0de5 | ||
|
|
fff0aec720 | ||
|
|
b73786c6d5 | ||
|
|
67eeccb31f | ||
|
|
266ca9318d | ||
|
|
3e97299a46 | ||
|
|
eacc42fedd | ||
|
|
db3e8edacd | ||
|
|
6e41634295 | ||
|
|
ef3c2d86d3 | ||
|
|
780308c194 | ||
|
|
696fd690ae | ||
|
|
d323501c7f | ||
|
|
66d8b2c18a | ||
|
|
6d8a415b4d | ||
|
|
dad268a686 | ||
|
|
e7acc2fddf | ||
|
|
6fb17a813c | ||
|
|
11ede9f872 | ||
|
|
6ac1c1c886 | ||
|
|
01c0ab4f06 | ||
|
|
7713f35326 | ||
|
|
7220b09ff9 | ||
|
|
b7298ef51a | ||
|
|
16b10b026b | ||
|
|
9b18c073b6 | ||
|
|
dd89e705f2 | ||
|
|
56b86bbfca | ||
|
|
7e2aafcc76 | ||
|
|
11c774b04f | ||
|
|
6ba926381b | ||
|
|
af55e179c7 | ||
|
|
18a42e4b38 | ||
|
|
a10ccadb54 | ||
|
|
15fee582cc | ||
|
|
43408634bb | ||
|
|
d47fce6ce7 | ||
|
|
9e64267867 | ||
|
|
7ae5785447 | ||
|
|
ef8d3f684f | ||
|
|
cc6e3d14ce | ||
|
|
83f44b1ac1 | ||
|
|
1f470eadd1 | ||
|
|
005b01bd9a | ||
|
|
6f67367b57 | ||
|
|
9ee0600a7f | ||
|
|
30cc7c847e | ||
|
|
a5bb24b886 | ||
|
|
f02d810af8 | ||
|
|
55f6b6a6ab | ||
|
|
b999ee60aa | ||
|
|
85afd3ef14 | ||
|
|
1907030d89 | ||
|
|
361a5eac7e | ||
|
|
fecb41d2ef | ||
|
|
4cdb641e7b | ||
|
|
efa2dff681 | ||
|
|
31a7b7d24e | ||
|
|
af8cc4dc4a | ||
|
|
8eb60f5624 | ||
|
|
791ea89b88 | ||
|
|
c572760a66 | ||
|
|
69fc19f7e0 | ||
|
|
b939c24b3d | ||
|
|
3eb494dbe3 | ||
|
|
d6a66c83c2 | ||
|
|
582a9a5db8 | ||
|
|
0afbc19ffb | ||
|
|
ac9290f985 | ||
|
|
a133ba1998 | ||
|
|
5657738f7e | ||
|
|
d310acc1eb | ||
|
|
2b88f10b04 | ||
|
|
883ba7aa90 | ||
|
|
28f55deaae | ||
|
|
40407930d5 | ||
|
|
674b71b535 | ||
|
|
677d9c47ac | ||
|
|
2638ab98ad | ||
|
|
bc3068c2f9 | ||
|
|
2bde9bea1c | ||
|
|
502f2f040d | ||
|
|
041d4d666e | ||
|
|
c0c10a97e7 | ||
|
|
5a7c50027f | ||
|
|
88b5065e7d | ||
|
|
b690008192 | ||
|
|
2d6bc9536c | ||
|
|
01dc6b2f0e | ||
|
|
d8aa2d0a9e | ||
|
|
19bb97d24d | ||
|
|
9f4f168804 | ||
|
|
82e133b382 | ||
|
|
cf3083d68e | ||
|
|
e796cdbb27 | ||
|
|
2d44582f88 | ||
|
|
2a61344c03 | ||
|
|
77c6aad1b5 | ||
|
|
b60a897265 | ||
|
|
fdd41c706a | ||
|
|
d68cfeed6e | ||
|
|
14911e0d22 | ||
|
|
9503434d53 | ||
|
|
c3c9e955e5 | ||
|
|
72d5db92a8 | ||
|
|
3f302c8d47 | ||
|
|
04a769bb37 | ||
|
|
f9d4a1c1d8 | ||
|
|
3e7db46195 | ||
|
|
e52aca4837 | ||
|
|
5ec503bd6f | ||
|
|
49be805001 | ||
|
|
94596388f7 | ||
|
|
5c4980c6e0 | ||
|
|
6d157f0b3e | ||
|
|
c3d5fdff64 | ||
|
|
d6cbdbd6aa | ||
|
|
d7b8fb3113 | ||
|
|
45044c2d75 | ||
|
|
a9f260d135 | ||
|
|
072b3b9d8c | ||
|
|
ae7f59e249 | ||
|
|
450b4e16b2 | ||
|
|
c48ffa24be | ||
|
|
7f0c0a0922 | ||
|
|
bce1c62308 | ||
|
|
9b3aa3451e | ||
|
|
436c0b58db | ||
|
|
7ac62822cb | ||
|
|
af8ae83ea0 | ||
|
|
0bcecae2a3 | ||
|
|
bd130315b6 | ||
|
|
504711647e | ||
|
|
a9a016d7b1 | ||
|
|
ab12b23e6f | ||
|
|
797bdbd998 | ||
|
|
1c45d37348 | ||
|
|
b521255ec9 | ||
|
|
75ea001bfe | ||
|
|
ff2fb9196f | ||
|
|
acae0fe4a3 | ||
|
|
ccc11e5680 | ||
|
|
2670eefcd4 | ||
|
|
c0cae9e8a0 | ||
|
|
f8cf6b4f7c | ||
|
|
a29182a010 | ||
|
|
1cfe0beac0 | ||
|
|
798f90c4d5 | ||
|
|
fac4334950 | ||
|
|
f8d44a8a88 | ||
|
|
1136a94a6e | ||
|
|
fd20e0de90 | ||
|
|
a1150dc334 | ||
|
|
b4d06ff8dd | ||
|
|
7581705007 | ||
|
|
5a5707159a | ||
|
|
742a1681ce | ||
|
|
fba9b9cb65 | ||
|
|
61b2d7548a | ||
|
|
80828a7c77 | ||
|
|
f5af86c9d5 | ||
|
|
58acbe7caf | ||
|
|
355b92d7ba | ||
|
|
d42e4f2344 | ||
|
|
fbded9cdac | ||
|
|
907e43b9d5 | ||
|
|
fb467df47c | ||
|
|
48beef25fa | ||
|
|
a3f568fc64 | ||
|
|
57ee304260 | ||
|
|
0794a866a7 | ||
|
|
49e4d344da | ||
|
|
21a9dea99f | ||
|
|
6e505c0c3f | ||
|
|
e9a294fd9c | ||
|
|
44d851d5bb | ||
|
|
5ed03ce7f0 | ||
|
|
c1b9660ec8 | ||
|
|
c2c334d22f | ||
|
|
ed5c848473 | ||
|
|
f144fd1ed3 | ||
|
|
e96557f410 | ||
|
|
ac96d5ccf0 | ||
|
|
b2af19471e | ||
|
|
6805d0ff2b | ||
|
|
6e1de9d820 | ||
|
|
d27ca7854f | ||
|
|
c4e57477fb | ||
|
|
f1c59a91a1 | ||
|
|
74c573ef04 | ||
|
|
5f082bc0e5 | ||
|
|
0e3b7127b5 | ||
|
|
5d3414a40b | ||
|
|
f4638c7580 | ||
|
|
8b0b500c89 | ||
|
|
04746fc4d8 | ||
|
|
463ef3f8bc | ||
|
|
5e2f78424f | ||
|
|
3889c2e01c | ||
|
|
1887e984a0 | ||
|
|
a495bbc1db | ||
|
|
cd429d3654 | ||
|
|
771aef0b44 | ||
|
|
f7ef452d8a | ||
|
|
0f64f8db90 | ||
|
|
c04a6254b9 | ||
|
|
485e17d6ed | ||
|
|
952ab03d2a | ||
|
|
bbb524018e | ||
|
|
859c08620b | ||
|
|
f6cd42e6e0 | ||
|
|
61ee67d612 | ||
|
|
939c1def5d | ||
|
|
b7ab80e8ea | ||
|
|
b69d4b0ecc | ||
|
|
2f437d7452 | ||
|
|
d761216ec1 | ||
|
|
088e9be931 | ||
|
|
32ecac6464 | ||
|
|
7760c3e4aa | ||
|
|
3cfb76e57f | ||
|
|
e1faeb0f6c | ||
|
|
25bff851dd | ||
|
|
3a39904011 | ||
|
|
7e1fbfba64 | ||
|
|
a52064184e | ||
|
|
b4a1ba828a | ||
|
|
c8c6105ee2 | ||
|
|
0b49eeeba3 | ||
|
|
b0830f0cd7 | ||
|
|
565d4a53b0 | ||
|
|
9624662bf6 | ||
|
|
8195957461 | ||
|
|
02fed4a082 | ||
|
|
34ecf4ea51 | ||
|
|
a422ad0d50 | ||
|
|
096d96e344 | ||
|
|
e61310bc89 | ||
|
|
111ca9584e | ||
|
|
7d35c4592c | ||
|
|
3e8386cbf3 | ||
|
|
4354162067 | ||
|
|
a62a444229 | ||
|
|
f6b72fa830 | ||
|
|
9667e8615f | ||
|
|
026da060f2 | ||
|
|
3feab1cb2d | ||
|
|
804e9a09c0 | ||
|
|
4c6829513c | ||
|
|
4038954f96 | ||
|
|
52a6dd5427 | ||
|
|
c301dd5d94 | ||
|
|
28473e919f | ||
|
|
69636d2453 | ||
|
|
7cb7eee29d | ||
|
|
a52e1069ce | ||
|
|
a8e5002aeb | ||
|
|
c515a351c6 | ||
|
|
7777b30693 | ||
|
|
d04fbf3f78 | ||
|
|
54207877bd | ||
|
|
3c6b1480b8 | ||
|
|
b075960e3b | ||
|
|
85596c2610 | ||
|
|
0613e3c24d | ||
|
|
ee5f009b95 | ||
|
|
d76816d043 | ||
|
|
45274560ec | ||
|
|
02a8e78902 | ||
|
|
8f3d9e93ce | ||
|
|
a244aca6a4 | ||
|
|
5ae85b9de5 | ||
|
|
d176818c44 | ||
|
|
aeec0f9a71 | ||
|
|
08a02af833 | ||
|
|
cf26585cff | ||
|
|
3f4a375ac4 | ||
|
|
cc632f2713 | ||
|
|
5ebc9a380c | ||
|
|
6453650895 | ||
|
|
9cb12cf250 | ||
|
|
68e8d74545 | ||
|
|
fc054e21f6 | ||
|
|
3256f4bc0f | ||
|
|
0e9ad8f2c7 | ||
|
|
efbb5b2db3 | ||
|
|
85ed4157ff | ||
|
|
a43569c8a5 | ||
|
|
e771d0ea39 | ||
|
|
9073eba405 | ||
|
|
a5cd73d047 | ||
|
|
a0b1eda1d0 | ||
|
|
ad80e09ac5 | ||
|
|
77e5171679 | ||
|
|
0c66418dad | ||
|
|
45a9eac7d2 | ||
|
|
838adf7475 | ||
|
|
fa05d15093 | ||
|
|
1122740bd7 | ||
|
|
f877278075 | ||
|
|
cbaa6abdd0 | ||
|
|
76a4803292 | ||
|
|
b286b2eb31 | ||
|
|
295103adc0 | ||
|
|
d31c040dc0 | ||
|
|
8a63b35f44 | ||
|
|
bfff06d402 | ||
|
|
21d3d75827 | ||
|
|
2c3538981a | ||
|
|
30a9899262 | ||
|
|
dd10b8a27c | ||
|
|
dbf13290f5 | ||
|
|
f9ff93c606 | ||
|
|
df476b0088 | ||
|
|
56664b34a6 | ||
|
|
efb45733de | ||
|
|
0cbc3d8df6 | ||
|
|
27b4e61c9f | ||
|
|
069606947c | ||
|
|
6ae6b7cfcd | ||
|
|
d197ce230f | ||
|
|
c6cb0d3984 | ||
|
|
00cfead9bb | ||
|
|
20b1e4db0b | ||
|
|
a098a32f7d | ||
|
|
9ee9a1033f | ||
|
|
eb904668b2 | ||
|
|
75b69876a3 | ||
|
|
08d9d24320 | ||
|
|
c7d61af332 | ||
|
|
5f7607412b | ||
|
|
403fea39f7 | ||
|
|
f2a4619c57 | ||
|
|
712872efba | ||
|
|
8cbf3fe5f8 | ||
|
|
02137f8cee | ||
|
|
43ea281a7f | ||
|
|
0006e5ab45 | ||
|
|
d821448e2f | ||
|
|
3bd46f7ac8 | ||
|
|
ebf9daf73e | ||
|
|
2ba66f0b26 | ||
|
|
0afadb9149 | ||
|
|
19d0159e33 | ||
|
|
d4f15ab402 | ||
|
|
527e030283 | ||
|
|
634e6a381c | ||
|
|
042d094ce7 | ||
|
|
3cc1491833 | ||
|
|
d19e6dec7a | ||
|
|
6becfc52a8 | ||
|
|
a2cbbc5c4f | ||
|
|
10173525d8 | ||
|
|
ccdb72a422 | ||
|
|
df96617d3c | ||
|
|
09aa3e0e79 | ||
|
|
a673e97f02 | ||
|
|
43e86921e0 | ||
|
|
ad58e08a41 | ||
|
|
0bfc2d0f2f | ||
|
|
475a42767a | ||
|
|
ce4eefff6a | ||
|
|
82b247d1a4 | ||
|
|
a21eb036ee | ||
|
|
9c1701f2aa | ||
|
|
fd17c37feb | ||
|
|
cde5451e79 | ||
|
|
ca9ea097df | ||
|
|
b84cc14694 | ||
|
|
a84175b3b9 | ||
|
|
438b4e9625 | ||
|
|
a510e7b8f3 | ||
|
|
e16ebcad6e | ||
|
|
e91e58aec9 | ||
|
|
3629b9051d | ||
|
|
ef919d963b | ||
|
|
4545114408 | ||
|
|
9ed32b9dd0 | ||
|
|
45dbe6d542 | ||
|
|
bff955ff7e | ||
|
|
80c0e747a7 | ||
|
|
617edda9ba | ||
|
|
7ab04b2e73 | ||
|
|
e89056a614 | ||
|
|
97ebbb9672 | ||
|
|
c02213e4af | ||
|
|
73e0aeb4ca | ||
|
|
a1ec6f401c | ||
|
|
5337d37a1c | ||
|
|
d92d955aeb | ||
|
|
7ac84d386c | ||
|
|
8397baa700 | ||
|
|
2d65554cb9 | ||
|
|
64e40d471c | ||
|
|
c5ea29649b | ||
|
|
410438a0e3 | ||
|
|
fbaef7e60f | ||
|
|
017a813621 | ||
|
|
4c891b8bb0 | ||
|
|
948d9bdadb | ||
|
|
b7258ec3bb | ||
|
|
93cb5a5bd6 | ||
|
|
d8f46c4410 | ||
|
|
d0757ccc5e | ||
|
|
f4f730bd8a | ||
|
|
f5e27f1a21 | ||
|
|
bb6415ddc4 | ||
|
|
2b6179841b | ||
|
|
e877cd2874 | ||
|
|
203ddfcd43 | ||
|
|
09847567ae | ||
|
|
732ae69d22 | ||
|
|
2b10376339 | ||
|
|
9667ac3baf | ||
|
|
b5be335db3 | ||
|
|
d33945780d | ||
|
|
5f4cc3e1de | ||
|
|
ec567bd53c | ||
|
|
aeadaa1184 | ||
|
|
2f0f0006e3 | ||
|
|
de047a2b8c | ||
|
|
86a35652bb | ||
|
|
6ae70e03cb | ||
|
|
2c532cb74d | ||
|
|
779f20d64e | ||
|
|
89ae9f1f88 | ||
|
|
ed1ab11001 | ||
|
|
d2e010cbe1 | ||
|
|
a44a0990f5 | ||
|
|
2f88c9eefe | ||
|
|
60f1936a62 | ||
|
|
ee15f99dd7 | ||
|
|
b0ee27ba46 | ||
|
|
067bbaa473 | ||
|
|
c07d60bc90 | ||
|
|
29fd957352 | ||
|
|
ef10ce04e2 | ||
|
|
f0269b28f4 | ||
|
|
0a7c6eb9dc | ||
|
|
3f0c13af8a | ||
|
|
fcd78eb1f7 | ||
|
|
17dfae6d4e | ||
|
|
e414c25fd7 | ||
|
|
34a71554ae | ||
|
|
3b1a196c75 | ||
|
|
105dbf471e | ||
|
|
d4d9f27a8d | ||
|
|
68df0d4909 | ||
|
|
9c572fe54b | ||
|
|
245e05ce61 | ||
|
|
f4ec2d18e5 | ||
|
|
4124d84c00 | ||
|
|
3c713a3f53 | ||
|
|
89e570493a | ||
|
|
16674774c7 | ||
|
|
0180ad7f38 | ||
|
|
bfd1ea1da1 | ||
|
|
3eacd0c871 | ||
|
|
d587270293 | ||
|
|
3eb13e83cf | ||
|
|
df761713aa | ||
|
|
de50f37fea | ||
|
|
bad6076905 | ||
|
|
c2bd76a22e | ||
|
|
010fe30b53 | ||
|
|
e5779ab786 | ||
|
|
71e14a13b4 | ||
|
|
491074aab1 | ||
|
|
54af533b31 | ||
|
|
4f13043d14 | ||
|
|
6a5df4d999 | ||
|
|
d41602088b | ||
|
|
f3a0f73588 | ||
|
|
be1e1b41bd | ||
|
|
fd30131dc2 | ||
|
|
5122697f0b | ||
|
|
b7bf502e02 | ||
|
|
3f70e3a843 | ||
|
|
cae2982d81 | ||
|
|
b638c89556 | ||
|
|
9bc51bd0e2 | ||
|
|
408b4f3f42 | ||
|
|
d818ac1d59 | ||
|
|
bd1c764a1a | ||
|
|
8f377ad8bd | ||
|
|
df3e11bdb8 | ||
|
|
97dabbe997 | ||
|
|
5a7a3f6d4a | ||
|
|
b4327fdc99 | ||
|
|
10f924a617 | ||
|
|
3dd6a01c8b | ||
|
|
585afef945 | ||
|
|
bdc65990e1 | ||
|
|
f2e4ffcac2 | ||
|
|
ae66b6e648 | ||
|
|
923057c1a8 | ||
|
|
0f6e08d34f | ||
|
|
4889a3e2e1 | ||
|
|
39d87a96aa | ||
|
|
e7c03ba66a | ||
|
|
08822ec684 | ||
|
|
6463a01e04 | ||
|
|
0cf1850465 | ||
|
|
1418fb394b | ||
|
|
e4eda88ca9 | ||
|
|
71a3c97d6f | ||
|
|
1c3d2924ae | ||
|
|
a11b9d28bd | ||
|
|
b54eb609bf | ||
|
|
dc8ff413f9 | ||
|
|
f8ffa1601d | ||
|
|
da01bc1fbb | ||
|
|
a2079a9ca9 | ||
|
|
bbc8c09753 | ||
|
|
a627299468 | ||
|
|
e5fdc63bdd | ||
|
|
fe83e70074 | ||
|
|
46c177b982 | ||
|
|
1df50adc1c | ||
|
|
b6cd9a4c4b | ||
|
|
2333ec4d1f | ||
|
|
85a8a54d3e | ||
|
|
7294a22901 | ||
|
|
f4b7474ade | ||
|
|
9428321607 | ||
|
|
882544446a | ||
|
|
73160c9b90 | ||
|
|
2184d6a3ff | ||
|
|
6e35895b44 | ||
|
|
8009ff8537 | ||
|
|
9bf792ce0b | ||
|
|
f05aaeb329 | ||
|
|
1bdf34e7dc | ||
|
|
cd25bfca91 | ||
|
|
1b621ab81c | ||
|
|
cb2e5ac776 | ||
|
|
8ce32eb3e1 | ||
|
|
aae0314bda | ||
|
|
35d5248d41 | ||
|
|
0ccc2555d3 | ||
|
|
b26a711e96 | ||
|
|
2218a052b2 | ||
|
|
40f419ca54 | ||
|
|
f742fc3dd1 | ||
|
|
33fbcc56d6 | ||
|
|
61d05dea58 | ||
|
|
8a821060a3 | ||
|
|
0d44599a63 | ||
|
|
8e29b08070 | ||
|
|
b6c3e61603 | ||
|
|
dc08b75c6a | ||
|
|
5420fa942e | ||
|
|
1ee33d3a8d | ||
|
|
61dab8dc0b | ||
|
|
0022a2b29e | ||
|
|
b2a27ed089 | ||
|
|
d8ae50800a | ||
|
|
43fa72b7b7 | ||
|
|
36b62b7270 | ||
|
|
73204c868d | ||
|
|
2ee889843a | ||
|
|
74b78e75a1 | ||
|
|
6905edbeb6 | ||
|
|
6c69da1573 | ||
|
|
e776dfd800 | ||
|
|
95bf380d07 | ||
|
|
2a61ad1b57 | ||
|
|
80703010bd | ||
|
|
e88c10670e | ||
|
|
2a2953c674 | ||
|
|
1054f37765 | ||
|
|
f77257cf79 | ||
|
|
f004cc07df | ||
|
|
065a4da72d | ||
|
|
98c7f2eb13 | ||
|
|
d332502d3d | ||
|
|
a7bf7867d7 | ||
|
|
c63cda7c21 | ||
|
|
caab0cdf27 | ||
|
|
1191876ae8 | ||
|
|
fa51a589ef | ||
|
|
3f274115b0 | ||
|
|
3b0918981e | ||
|
|
a327dfeed7 | ||
|
|
d8cef34d6c | ||
|
|
6fb6947feb | ||
|
|
db8173da28 | ||
|
|
bcdfb3cfb0 | ||
|
|
79aeb10431 | ||
|
|
5fd2b151b9 | ||
|
|
3c107ef4dc | ||
|
|
a5f93d6013 | ||
|
|
38338e848d | ||
|
|
e9518072a8 | ||
|
|
10dbd0afbd | ||
|
|
e22f938ae5 | ||
|
|
1dce56e2f8 | ||
|
|
1f0b2eac12 | ||
|
|
d9539e0f27 | ||
|
|
0909368339 | ||
|
|
091b634ea1 | ||
|
|
d18804b0bb | ||
|
|
a8b5b856d1 | ||
|
|
1d2a18b355 | ||
|
|
4a59340182 | ||
|
|
aa33613b98 | ||
|
|
cf042b2a4c | ||
|
|
65c86377fc | ||
|
|
96372c15e2 | ||
|
|
f365b32c60 | ||
|
|
5af2c42bde | ||
|
|
c0400e9db5 | ||
|
|
f7447837c5 | ||
|
|
a4dbee3e38 | ||
|
|
fb7899aa06 | ||
|
|
6d54d9f49a | ||
|
|
6546869c42 | ||
|
|
aa79a02f9c | ||
|
|
447febcdd6 | ||
|
|
61732847b6 | ||
|
|
fcd9d97f10 | ||
|
|
b6b5d52f78 | ||
|
|
4b6f29d5e1 | ||
|
|
f5d5230034 | ||
|
|
8dc19374cc | ||
|
|
a8f2af0503 | ||
|
|
d8a2941e9e | ||
|
|
55b6d0bbdd | ||
|
|
a3c044b657 | ||
|
|
4a2abc1a46 | ||
|
|
410c78f2e5 | ||
|
|
3b5830a1cf | ||
|
|
ab7df10a7d | ||
|
|
93663e987c | ||
|
|
6114266b84 | ||
|
|
97f96a6376 | ||
|
|
58062be2a3 | ||
|
|
5ec4efe88e | ||
|
|
e02aae71a1 | ||
|
|
1f9f885379 | ||
|
|
80509673d2 | ||
|
|
b902110d75 | ||
|
|
53affb9bc0 | ||
|
|
8e4e3998dd |
47
.github/ISSUE_TEMPLATE.md
vendored
Normal file
47
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
<!-- Thanks for filing an issue! Before hitting the button, please answer these questions.-->
|
||||||
|
|
||||||
|
**Is this a BUG REPORT or FEATURE REQUEST?** (choose one):
|
||||||
|
|
||||||
|
<!--
|
||||||
|
If this is a BUG REPORT, please:
|
||||||
|
- Fill in as much of the template below as you can. If you leave out
|
||||||
|
information, we can't help you as well.
|
||||||
|
|
||||||
|
If this is a FEATURE REQUEST, please:
|
||||||
|
- Describe *in detail* the feature/behavior/change you'd like to see.
|
||||||
|
|
||||||
|
In both cases, be ready for followup questions, and please respond in a timely
|
||||||
|
manner. If we can't reproduce a bug or think a feature already exists, we
|
||||||
|
might close your issue. If we're wrong, PLEASE feel free to reopen it and
|
||||||
|
explain why.
|
||||||
|
-->
|
||||||
|
|
||||||
|
**Environment**:
|
||||||
|
- **Cloud provider or hardware configuration:**
|
||||||
|
|
||||||
|
- **OS (`printf "$(uname -srm)\n$(cat /etc/os-release)\n"`):**
|
||||||
|
|
||||||
|
- **Version of Ansible** (`ansible --version`):
|
||||||
|
|
||||||
|
|
||||||
|
**Kubespray version (commit) (`git rev-parse --short HEAD`):**
|
||||||
|
|
||||||
|
|
||||||
|
**Network plugin used**:
|
||||||
|
|
||||||
|
|
||||||
|
**Copy of your inventory file:**
|
||||||
|
|
||||||
|
|
||||||
|
**Command used to invoke ansible**:
|
||||||
|
|
||||||
|
|
||||||
|
**Output of ansible run**:
|
||||||
|
<!-- We recommend using snippets services like https://gist.github.com/ etc. -->
|
||||||
|
|
||||||
|
**Anything else do we need to know**:
|
||||||
|
<!-- By running scripts/collect-info.yaml you can get a lot of useful informations.
|
||||||
|
Script can be started by:
|
||||||
|
ansible-playbook -i <inventory_file_path> -u <ssh_user> -e ansible_ssh_user=<ssh_user> -b --become-user=root -e dir=`pwd` scripts/collect-info.yaml
|
||||||
|
(If you using CoreOS remember to add '-e ansible_python_interpreter=/opt/bin/python').
|
||||||
|
After running this command you can find logs in `pwd`/logs.tar.gz. You can even upload somewhere entire file and paste link here.-->
|
||||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -1,13 +1,19 @@
|
|||||||
.vagrant
|
.vagrant
|
||||||
*.retry
|
*.retry
|
||||||
inventory/vagrant_ansible_inventory
|
inventory/vagrant_ansible_inventory
|
||||||
|
inventory/group_vars/fake_hosts.yml
|
||||||
|
inventory/host_vars/
|
||||||
temp
|
temp
|
||||||
.idea
|
.idea
|
||||||
.tox
|
.tox
|
||||||
.cache
|
.cache
|
||||||
|
*.bak
|
||||||
*.egg-info
|
*.egg-info
|
||||||
*.pyc
|
*.pyc
|
||||||
*.pyo
|
*.pyo
|
||||||
*.tfstate
|
*.tfstate
|
||||||
*.tfstate.backup
|
*.tfstate.backup
|
||||||
|
**/*.sw[pon]
|
||||||
/ssh-bastion.conf
|
/ssh-bastion.conf
|
||||||
|
**/*.sw[pon]
|
||||||
|
vagrant/
|
||||||
|
|||||||
311
.gitlab-ci.yml
311
.gitlab-ci.yml
@@ -1,4 +1,5 @@
|
|||||||
stages:
|
stages:
|
||||||
|
- moderator
|
||||||
- unit-tests
|
- unit-tests
|
||||||
- deploy-gce-part1
|
- deploy-gce-part1
|
||||||
- deploy-gce-part2
|
- deploy-gce-part2
|
||||||
@@ -17,7 +18,7 @@ variables:
|
|||||||
# us-west1-a
|
# us-west1-a
|
||||||
|
|
||||||
before_script:
|
before_script:
|
||||||
- pip install ansible
|
- pip install ansible==2.3.0
|
||||||
- pip install netaddr
|
- pip install netaddr
|
||||||
- pip install apache-libcloud==0.20.1
|
- pip install apache-libcloud==0.20.1
|
||||||
- pip install boto==2.9.0
|
- pip install boto==2.9.0
|
||||||
@@ -46,9 +47,23 @@ before_script:
|
|||||||
PRIVATE_KEY: $GCE_PRIVATE_KEY
|
PRIVATE_KEY: $GCE_PRIVATE_KEY
|
||||||
GS_ACCESS_KEY_ID: $GS_KEY
|
GS_ACCESS_KEY_ID: $GS_KEY
|
||||||
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
||||||
|
CLOUD_MACHINE_TYPE: "g1-small"
|
||||||
ANSIBLE_KEEP_REMOTE_FILES: "1"
|
ANSIBLE_KEEP_REMOTE_FILES: "1"
|
||||||
|
ANSIBLE_CONFIG: ./tests/ansible.cfg
|
||||||
BOOTSTRAP_OS: none
|
BOOTSTRAP_OS: none
|
||||||
|
DOWNLOAD_LOCALHOST: "false"
|
||||||
|
DOWNLOAD_RUN_ONCE: "false"
|
||||||
|
IDEMPOT_CHECK: "false"
|
||||||
|
RESET_CHECK: "false"
|
||||||
|
UPGRADE_TEST: "false"
|
||||||
|
RESOLVCONF_MODE: docker_dns
|
||||||
LOG_LEVEL: "-vv"
|
LOG_LEVEL: "-vv"
|
||||||
|
ETCD_DEPLOYMENT: "docker"
|
||||||
|
KUBELET_DEPLOYMENT: "docker"
|
||||||
|
VAULT_DEPLOYMENT: "docker"
|
||||||
|
WEAVE_CPU_LIMIT: "100m"
|
||||||
|
AUTHORIZATION_MODES: "{ 'authorization_modes': [] }"
|
||||||
|
MAGIC: "ci check this"
|
||||||
|
|
||||||
.gce: &gce
|
.gce: &gce
|
||||||
<<: *job
|
<<: *job
|
||||||
@@ -58,62 +73,181 @@ before_script:
|
|||||||
paths:
|
paths:
|
||||||
- downloads/
|
- downloads/
|
||||||
- $HOME/.cache
|
- $HOME/.cache
|
||||||
stage: deploy-gce
|
|
||||||
before_script:
|
before_script:
|
||||||
- docker info
|
- docker info
|
||||||
- pip install ansible==2.1.3.0
|
- pip install ansible==2.3.0
|
||||||
- pip install netaddr
|
- pip install netaddr
|
||||||
- pip install apache-libcloud==0.20.1
|
- pip install apache-libcloud==0.20.1
|
||||||
- pip install boto==2.9.0
|
- pip install boto==2.9.0
|
||||||
- mkdir -p /.ssh
|
- mkdir -p /.ssh
|
||||||
- cp tests/ansible.cfg .
|
|
||||||
- mkdir -p $HOME/.ssh
|
- mkdir -p $HOME/.ssh
|
||||||
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
||||||
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
|
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
|
||||||
- echo $GCE_CREDENTIALS > $HOME/.ssh/gce.json
|
- echo $GCE_CREDENTIALS > $HOME/.ssh/gce.json
|
||||||
- chmod 400 $HOME/.ssh/id_rsa
|
- chmod 400 $HOME/.ssh/id_rsa
|
||||||
- ansible-playbook --version
|
- ansible-playbook --version
|
||||||
- cp tests/ansible.cfg .
|
|
||||||
- export PYPATH=$([ $BOOTSTRAP_OS = none ] && echo /usr/bin/python || echo /opt/bin/python)
|
- export PYPATH=$([ $BOOTSTRAP_OS = none ] && echo /usr/bin/python || echo /opt/bin/python)
|
||||||
script:
|
script:
|
||||||
- pwd
|
- pwd
|
||||||
- ls
|
- ls
|
||||||
- echo ${PWD}
|
- echo ${PWD}
|
||||||
|
- echo "${STARTUP_SCRIPT}"
|
||||||
- >
|
- >
|
||||||
ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local $LOG_LEVEL
|
ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local
|
||||||
-e mode=${CLUSTER_MODE}
|
${LOG_LEVEL}
|
||||||
-e test_id=${TEST_ID}
|
-e cloud_image=${CLOUD_IMAGE}
|
||||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
-e cloud_region=${CLOUD_REGION}
|
||||||
|
-e gce_credentials_file=${HOME}/.ssh/gce.json
|
||||||
-e gce_project_id=${GCE_PROJECT_ID}
|
-e gce_project_id=${GCE_PROJECT_ID}
|
||||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||||
-e gce_credentials_file=${HOME}/.ssh/gce.json
|
-e cloud_machine_type=${CLOUD_MACHINE_TYPE}
|
||||||
-e cloud_image=${CLOUD_IMAGE}
|
|
||||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||||
-e cloud_region=${CLOUD_REGION}
|
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
|
-e mode=${CLUSTER_MODE}
|
||||||
|
-e test_id=${TEST_ID}
|
||||||
|
-e startup_script="'${STARTUP_SCRIPT}'"
|
||||||
|
|
||||||
|
# Check out latest tag if testing upgrade
|
||||||
|
# Uncomment when gitlab kargo repo has tags
|
||||||
|
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
|
||||||
|
- test "${UPGRADE_TEST}" != "false" && git checkout acae0fe4a36bd1d3cd267e72ad01126a72d1458a
|
||||||
|
|
||||||
|
|
||||||
# Create cluster
|
# Create cluster
|
||||||
- >
|
- >
|
||||||
|
ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
|
||||||
|
${SSH_ARGS}
|
||||||
|
${LOG_LEVEL}
|
||||||
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
|
-e ansible_ssh_user=${SSH_USER}
|
||||||
|
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||||
|
-e cert_management=${CERT_MGMT:-script}
|
||||||
|
-e cloud_provider=gce
|
||||||
|
-e deploy_netchecker=true
|
||||||
|
-e download_localhost=${DOWNLOAD_LOCALHOST}
|
||||||
|
-e download_run_once=${DOWNLOAD_RUN_ONCE}
|
||||||
|
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||||
|
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
|
-e kubedns_min_replicas=1
|
||||||
|
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||||
|
-e local_release_dir=${PWD}/downloads
|
||||||
|
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||||
|
-e vault_deployment_type=${VAULT_DEPLOYMENT}
|
||||||
|
-e "${AUTHORIZATION_MODES}"
|
||||||
|
--limit "all:!fake_hosts"
|
||||||
|
cluster.yml
|
||||||
|
|
||||||
|
# Repeat deployment if testing upgrade
|
||||||
|
- >
|
||||||
|
if [ "${UPGRADE_TEST}" != "false" ]; then
|
||||||
|
test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml";
|
||||||
|
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
|
||||||
|
git checkout "${CI_BUILD_REF}";
|
||||||
|
ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
|
||||||
|
${SSH_ARGS}
|
||||||
|
${LOG_LEVEL}
|
||||||
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
|
-e ansible_ssh_user=${SSH_USER}
|
||||||
|
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||||
|
-e cloud_provider=gce
|
||||||
|
-e deploy_netchecker=true
|
||||||
|
-e download_localhost=${DOWNLOAD_LOCALHOST}
|
||||||
|
-e download_run_once=${DOWNLOAD_RUN_ONCE}
|
||||||
|
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||||
|
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
|
-e kubedns_min_replicas=1
|
||||||
|
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||||
|
-e local_release_dir=${PWD}/downloads
|
||||||
|
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||||
|
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
|
||||||
|
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
|
||||||
|
-e "${AUTHORIZATION_MODES}"
|
||||||
|
--limit "all:!fake_hosts"
|
||||||
|
$PLAYBOOK;
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Tests Cases
|
||||||
|
## Test Master API
|
||||||
|
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
||||||
|
|
||||||
|
## Ping the between 2 pod
|
||||||
|
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
|
||||||
|
|
||||||
|
## Advanced DNS checks
|
||||||
|
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL
|
||||||
|
|
||||||
|
## Idempotency checks 1/5 (repeat deployment)
|
||||||
|
- >
|
||||||
|
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||||
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
--private-key=${HOME}/.ssh/id_rsa
|
--private-key=${HOME}/.ssh/id_rsa
|
||||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
-e download_run_once=true
|
-e download_localhost=${DOWNLOAD_LOCALHOST}
|
||||||
-e download_localhost=true
|
-e download_run_once=${DOWNLOAD_RUN_ONCE}
|
||||||
-e deploy_netchecker=true
|
-e deploy_netchecker=true
|
||||||
|
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||||
-e local_release_dir=${PWD}/downloads
|
-e local_release_dir=${PWD}/downloads
|
||||||
cluster.yml
|
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||||
|
-e kubedns_min_replicas=1
|
||||||
|
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||||
|
-e "${AUTHORIZATION_MODES}"
|
||||||
|
--limit "all:!fake_hosts"
|
||||||
|
cluster.yml;
|
||||||
|
fi
|
||||||
|
|
||||||
|
## Idempotency checks 2/5 (Advanced DNS checks)
|
||||||
|
- >
|
||||||
|
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||||
|
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
|
||||||
|
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
|
||||||
|
--limit "all:!fake_hosts"
|
||||||
|
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
||||||
|
fi
|
||||||
|
|
||||||
# Tests Cases
|
## Idempotency checks 3/5 (reset deployment)
|
||||||
## Test Master API
|
- >
|
||||||
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
|
||||||
|
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||||
|
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
|
--private-key=${HOME}/.ssh/id_rsa
|
||||||
|
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||||
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
|
-e reset_confirmation=yes
|
||||||
|
--limit "all:!fake_hosts"
|
||||||
|
reset.yml;
|
||||||
|
fi
|
||||||
|
|
||||||
## Ping the between 2 pod
|
## Idempotency checks 4/5 (redeploy after reset)
|
||||||
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
|
- >
|
||||||
|
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
|
||||||
|
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||||
|
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
|
--private-key=${HOME}/.ssh/id_rsa
|
||||||
|
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||||
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
|
-e download_localhost=${DOWNLOAD_LOCALHOST}
|
||||||
|
-e download_run_once=${DOWNLOAD_RUN_ONCE}
|
||||||
|
-e deploy_netchecker=true
|
||||||
|
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||||
|
-e local_release_dir=${PWD}/downloads
|
||||||
|
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||||
|
-e kubedns_min_replicas=1
|
||||||
|
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||||
|
-e "${AUTHORIZATION_MODES}"
|
||||||
|
--limit "all:!fake_hosts"
|
||||||
|
cluster.yml;
|
||||||
|
fi
|
||||||
|
|
||||||
## Advanced DNS checks
|
## Idempotency checks 5/5 (Advanced DNS checks)
|
||||||
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/040_check-network-adv.yml $LOG_LEVEL
|
- >
|
||||||
|
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
|
||||||
|
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
|
||||||
|
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
|
||||||
|
--limit "all:!fake_hosts"
|
||||||
|
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
||||||
|
fi
|
||||||
|
|
||||||
after_script:
|
after_script:
|
||||||
- >
|
- >
|
||||||
@@ -134,15 +268,21 @@ before_script:
|
|||||||
KUBE_NETWORK_PLUGIN: calico
|
KUBE_NETWORK_PLUGIN: calico
|
||||||
CLOUD_IMAGE: coreos-stable
|
CLOUD_IMAGE: coreos-stable
|
||||||
CLOUD_REGION: us-west1-b
|
CLOUD_REGION: us-west1-b
|
||||||
CLUSTER_MODE: separated
|
CLUSTER_MODE: separate
|
||||||
BOOTSTRAP_OS: coreos
|
BOOTSTRAP_OS: coreos
|
||||||
|
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
|
||||||
|
##User-data to simply turn off coreos upgrades
|
||||||
|
STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd'
|
||||||
|
|
||||||
.debian8_canal_ha_variables: &debian8_canal_ha_variables
|
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
|
||||||
# stage: deploy-gce-part1
|
# stage: deploy-gce-part1
|
||||||
KUBE_NETWORK_PLUGIN: canal
|
KUBE_NETWORK_PLUGIN: canal
|
||||||
CLOUD_IMAGE: debian-8-kubespray
|
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||||
CLOUD_REGION: us-east1-b
|
CLOUD_REGION: europe-west1-b
|
||||||
|
CLOUD_MACHINE_TYPE: "n1-standard-2"
|
||||||
CLUSTER_MODE: ha
|
CLUSTER_MODE: ha
|
||||||
|
UPGRADE_TEST: "graceful"
|
||||||
|
STARTUP_SCRIPT: ""
|
||||||
|
|
||||||
.rhel7_weave_variables: &rhel7_weave_variables
|
.rhel7_weave_variables: &rhel7_weave_variables
|
||||||
# stage: deploy-gce-part1
|
# stage: deploy-gce-part1
|
||||||
@@ -150,6 +290,7 @@ before_script:
|
|||||||
CLOUD_IMAGE: rhel-7
|
CLOUD_IMAGE: rhel-7
|
||||||
CLOUD_REGION: europe-west1-b
|
CLOUD_REGION: europe-west1-b
|
||||||
CLUSTER_MODE: default
|
CLUSTER_MODE: default
|
||||||
|
STARTUP_SCRIPT: ""
|
||||||
|
|
||||||
.centos7_flannel_variables: ¢os7_flannel_variables
|
.centos7_flannel_variables: ¢os7_flannel_variables
|
||||||
# stage: deploy-gce-part2
|
# stage: deploy-gce-part2
|
||||||
@@ -157,13 +298,15 @@ before_script:
|
|||||||
CLOUD_IMAGE: centos-7
|
CLOUD_IMAGE: centos-7
|
||||||
CLOUD_REGION: us-west1-a
|
CLOUD_REGION: us-west1-a
|
||||||
CLUSTER_MODE: default
|
CLUSTER_MODE: default
|
||||||
|
STARTUP_SCRIPT: ""
|
||||||
|
|
||||||
.debian8_calico_variables: &debian8_calico_variables
|
.debian8_calico_variables: &debian8_calico_variables
|
||||||
# stage: deploy-gce-part2
|
# stage: deploy-gce-part2
|
||||||
KUBE_NETWORK_PLUGIN: calico
|
KUBE_NETWORK_PLUGIN: calico
|
||||||
CLOUD_IMAGE: debian-8-kubespray
|
CLOUD_IMAGE: debian-8-kubespray
|
||||||
CLOUD_REGION: us-central1-b
|
CLOUD_REGION: us-central1-b
|
||||||
CLUSTER_MODE: default
|
CLUSTER_MODE: default
|
||||||
|
STARTUP_SCRIPT: ""
|
||||||
|
|
||||||
.coreos_canal_variables: &coreos_canal_variables
|
.coreos_canal_variables: &coreos_canal_variables
|
||||||
# stage: deploy-gce-part2
|
# stage: deploy-gce-part2
|
||||||
@@ -172,37 +315,78 @@ before_script:
|
|||||||
CLOUD_REGION: us-east1-b
|
CLOUD_REGION: us-east1-b
|
||||||
CLUSTER_MODE: default
|
CLUSTER_MODE: default
|
||||||
BOOTSTRAP_OS: coreos
|
BOOTSTRAP_OS: coreos
|
||||||
|
IDEMPOT_CHECK: "true"
|
||||||
|
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
|
||||||
|
STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd'
|
||||||
|
|
||||||
.rhel7_canal_sep_variables: &rhel7_canal_sep_variables
|
.rhel7_canal_sep_variables: &rhel7_canal_sep_variables
|
||||||
# stage: deploy-gce-special
|
# stage: deploy-gce-special
|
||||||
KUBE_NETWORK_PLUGIN: canal
|
KUBE_NETWORK_PLUGIN: canal
|
||||||
CLOUD_IMAGE: rhel-7
|
CLOUD_IMAGE: rhel-7
|
||||||
CLOUD_REGION: us-east1-b
|
CLOUD_REGION: us-east1-b
|
||||||
CLUSTER_MODE: separated
|
CLUSTER_MODE: separate
|
||||||
|
STARTUP_SCRIPT: ""
|
||||||
|
|
||||||
.ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables
|
.ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables
|
||||||
# stage: deploy-gce-special
|
# stage: deploy-gce-special
|
||||||
KUBE_NETWORK_PLUGIN: weave
|
KUBE_NETWORK_PLUGIN: weave
|
||||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||||
CLOUD_REGION: us-central1-b
|
CLOUD_REGION: us-central1-b
|
||||||
CLUSTER_MODE: separated
|
CLUSTER_MODE: separate
|
||||||
|
IDEMPOT_CHECK: "false"
|
||||||
|
STARTUP_SCRIPT: ""
|
||||||
|
|
||||||
.centos7_calico_ha_variables: ¢os7_calico_ha_variables
|
.centos7_calico_ha_variables: ¢os7_calico_ha_variables
|
||||||
# stage: deploy-gce-special
|
# stage: deploy-gce-special
|
||||||
KUBE_NETWORK_PLUGIN: calico
|
KUBE_NETWORK_PLUGIN: calico
|
||||||
|
DOWNLOAD_LOCALHOST: "true"
|
||||||
|
DOWNLOAD_RUN_ONCE: "true"
|
||||||
CLOUD_IMAGE: centos-7
|
CLOUD_IMAGE: centos-7
|
||||||
CLOUD_REGION: europe-west1-b
|
CLOUD_REGION: europe-west1-b
|
||||||
CLUSTER_MODE: ha
|
CLUSTER_MODE: ha-scale
|
||||||
|
IDEMPOT_CHECK: "true"
|
||||||
|
STARTUP_SCRIPT: ""
|
||||||
|
|
||||||
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
|
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
|
||||||
# stage: deploy-gce-special
|
# stage: deploy-gce-special
|
||||||
KUBE_NETWORK_PLUGIN: weave
|
KUBE_NETWORK_PLUGIN: weave
|
||||||
CLOUD_IMAGE: coreos-alpha
|
CLOUD_IMAGE: coreos-alpha-1325-0-0-v20170216
|
||||||
CLOUD_REGION: us-west1-a
|
CLOUD_REGION: us-west1-a
|
||||||
CLUSTER_MODE: ha
|
CLUSTER_MODE: ha-scale
|
||||||
BOOTSTRAP_OS: coreos
|
BOOTSTRAP_OS: coreos
|
||||||
|
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
|
||||||
|
STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd'
|
||||||
|
|
||||||
# Builds for PRs only (auto) and triggers (auto)
|
.ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables
|
||||||
|
# stage: deploy-gce-part1
|
||||||
|
KUBE_NETWORK_PLUGIN: flannel
|
||||||
|
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||||
|
CLOUD_REGION: us-central1-b
|
||||||
|
CLUSTER_MODE: separate
|
||||||
|
ETCD_DEPLOYMENT: rkt
|
||||||
|
KUBELET_DEPLOYMENT: rkt
|
||||||
|
STARTUP_SCRIPT: ""
|
||||||
|
|
||||||
|
#Note(mattymo): Vault deployment is broken and needs work
|
||||||
|
#.ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables
|
||||||
|
## stage: deploy-gce-part1
|
||||||
|
# KUBE_NETWORK_PLUGIN: canal
|
||||||
|
# CERT_MGMT: vault
|
||||||
|
# CLOUD_IMAGE: ubuntu-1604-xenial
|
||||||
|
# CLOUD_REGION: us-central1-b
|
||||||
|
# CLUSTER_MODE: separate
|
||||||
|
# STARTUP_SCRIPT: ""
|
||||||
|
|
||||||
|
.ubuntu_flannel_rbac_variables: &ubuntu_flannel_rbac_variables
|
||||||
|
# stage: deploy-gce-special
|
||||||
|
AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }"
|
||||||
|
KUBE_NETWORK_PLUGIN: flannel
|
||||||
|
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||||
|
CLOUD_REGION: europe-west1-b
|
||||||
|
CLUSTER_MODE: separate
|
||||||
|
STARTUP_SCRIPT: ""
|
||||||
|
|
||||||
|
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
|
||||||
coreos-calico-sep:
|
coreos-calico-sep:
|
||||||
stage: deploy-gce-part1
|
stage: deploy-gce-part1
|
||||||
<<: *job
|
<<: *job
|
||||||
@@ -267,24 +451,24 @@ ubuntu-weave-sep-triggers:
|
|||||||
only: ['triggers']
|
only: ['triggers']
|
||||||
|
|
||||||
# More builds for PRs/merges (manual) and triggers (auto)
|
# More builds for PRs/merges (manual) and triggers (auto)
|
||||||
debian8-canal-ha:
|
ubuntu-canal-ha:
|
||||||
stage: deploy-gce-part1
|
stage: deploy-gce-part1
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
<<: *gce_variables
|
<<: *gce_variables
|
||||||
<<: *debian8_canal_ha_variables
|
<<: *ubuntu_canal_ha_variables
|
||||||
when: manual
|
when: manual
|
||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: ['master', /^pr-.*$/]
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
debian8-canal-ha-triggers:
|
ubuntu-canal-ha-triggers:
|
||||||
stage: deploy-gce-part1
|
stage: deploy-gce-part1
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
<<: *gce_variables
|
<<: *gce_variables
|
||||||
<<: *debian8_canal_ha_variables
|
<<: *ubuntu_canal_ha_variables
|
||||||
when: on_success
|
when: on_success
|
||||||
only: ['triggers']
|
only: ['triggers']
|
||||||
|
|
||||||
@@ -309,7 +493,7 @@ rhel7-weave-triggers:
|
|||||||
when: on_success
|
when: on_success
|
||||||
only: ['triggers']
|
only: ['triggers']
|
||||||
|
|
||||||
debian8-calico:
|
debian8-calico-upgrade:
|
||||||
stage: deploy-gce-part2
|
stage: deploy-gce-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
@@ -405,12 +589,59 @@ coreos-alpha-weave-ha:
|
|||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: ['master', /^pr-.*$/]
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
ubuntu-rkt-sep:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *ubuntu_rkt_sep_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
#Note(mattymo): Vault deployment is broken (https://github.com/kubernetes-incubator/kubespray/issues/1545)
|
||||||
|
#ubuntu-vault-sep:
|
||||||
|
# stage: deploy-gce-part1
|
||||||
|
# <<: *job
|
||||||
|
# <<: *gce
|
||||||
|
# variables:
|
||||||
|
# <<: *gce_variables
|
||||||
|
# <<: *ubuntu_vault_sep_variables
|
||||||
|
# when: manual
|
||||||
|
# except: ['triggers']
|
||||||
|
# only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
ubuntu-flannel-rbac-sep:
|
||||||
|
stage: deploy-gce-special
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *ubuntu_flannel_rbac_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
# Premoderated with manual actions
|
||||||
|
ci-authorized:
|
||||||
|
<<: *job
|
||||||
|
stage: moderator
|
||||||
|
before_script:
|
||||||
|
- apt-get -y install jq
|
||||||
|
script:
|
||||||
|
- /bin/sh scripts/premoderator.sh
|
||||||
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
syntax-check:
|
syntax-check:
|
||||||
<<: *job
|
<<: *job
|
||||||
stage: unit-tests
|
stage: unit-tests
|
||||||
script:
|
script:
|
||||||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root cluster.yml -vvv --syntax-check
|
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root cluster.yml -vvv --syntax-check
|
||||||
except: ['triggers']
|
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root upgrade-cluster.yml -vvv --syntax-check
|
||||||
|
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root reset.yml -vvv --syntax-check
|
||||||
|
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root extra_playbooks/upgrade-only-k8s.yml -vvv --syntax-check
|
||||||
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
tox-inventory-builder:
|
tox-inventory-builder:
|
||||||
stage: unit-tests
|
stage: unit-tests
|
||||||
@@ -419,4 +650,4 @@ tox-inventory-builder:
|
|||||||
- pip install tox
|
- pip install tox
|
||||||
- cd contrib/inventory_builder && tox
|
- cd contrib/inventory_builder && tox
|
||||||
when: manual
|
when: manual
|
||||||
except: ['triggers']
|
except: ['triggers', 'master']
|
||||||
|
|||||||
161
.travis.yml.bak
161
.travis.yml.bak
@@ -1,161 +0,0 @@
|
|||||||
sudo: required
|
|
||||||
|
|
||||||
services:
|
|
||||||
- docker
|
|
||||||
|
|
||||||
git:
|
|
||||||
depth: 5
|
|
||||||
|
|
||||||
env:
|
|
||||||
global:
|
|
||||||
GCE_USER=travis
|
|
||||||
SSH_USER=$GCE_USER
|
|
||||||
TEST_ID=$TRAVIS_JOB_NUMBER
|
|
||||||
CONTAINER_ENGINE=docker
|
|
||||||
PRIVATE_KEY=$GCE_PRIVATE_KEY
|
|
||||||
GS_ACCESS_KEY_ID=$GS_KEY
|
|
||||||
GS_SECRET_ACCESS_KEY=$GS_SECRET
|
|
||||||
ANSIBLE_KEEP_REMOTE_FILES=1
|
|
||||||
CLUSTER_MODE=default
|
|
||||||
BOOTSTRAP_OS=none
|
|
||||||
matrix:
|
|
||||||
# Debian Jessie
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=canal
|
|
||||||
CLOUD_IMAGE=debian-8-kubespray
|
|
||||||
CLOUD_REGION=asia-east1-a
|
|
||||||
CLUSTER_MODE=ha
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=calico
|
|
||||||
CLOUD_IMAGE=debian-8-kubespray
|
|
||||||
CLOUD_REGION=europe-west1-c
|
|
||||||
CLUSTER_MODE=default
|
|
||||||
|
|
||||||
# Centos 7
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=flannel
|
|
||||||
CLOUD_IMAGE=centos-7
|
|
||||||
CLOUD_REGION=asia-northeast1-c
|
|
||||||
CLUSTER_MODE=default
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=calico
|
|
||||||
CLOUD_IMAGE=centos-7
|
|
||||||
CLOUD_REGION=us-central1-b
|
|
||||||
CLUSTER_MODE=ha
|
|
||||||
|
|
||||||
# Redhat 7
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=weave
|
|
||||||
CLOUD_IMAGE=rhel-7
|
|
||||||
CLOUD_REGION=us-east1-c
|
|
||||||
CLUSTER_MODE=default
|
|
||||||
|
|
||||||
# CoreOS stable
|
|
||||||
#- >-
|
|
||||||
# KUBE_NETWORK_PLUGIN=weave
|
|
||||||
# CLOUD_IMAGE=coreos-stable
|
|
||||||
# CLOUD_REGION=europe-west1-b
|
|
||||||
# CLUSTER_MODE=ha
|
|
||||||
# BOOTSTRAP_OS=coreos
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=canal
|
|
||||||
CLOUD_IMAGE=coreos-stable
|
|
||||||
CLOUD_REGION=us-west1-b
|
|
||||||
CLUSTER_MODE=default
|
|
||||||
BOOTSTRAP_OS=coreos
|
|
||||||
|
|
||||||
# Extra cases for separated roles
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=canal
|
|
||||||
CLOUD_IMAGE=rhel-7
|
|
||||||
CLOUD_REGION=asia-northeast1-b
|
|
||||||
CLUSTER_MODE=separate
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=weave
|
|
||||||
CLOUD_IMAGE=ubuntu-1604-xenial
|
|
||||||
CLOUD_REGION=europe-west1-d
|
|
||||||
CLUSTER_MODE=separate
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=calico
|
|
||||||
CLOUD_IMAGE=coreos-stable
|
|
||||||
CLOUD_REGION=us-central1-f
|
|
||||||
CLUSTER_MODE=separate
|
|
||||||
BOOTSTRAP_OS=coreos
|
|
||||||
|
|
||||||
matrix:
|
|
||||||
allow_failures:
|
|
||||||
- env: KUBE_NETWORK_PLUGIN=weave CLOUD_IMAGE=coreos-stable CLOUD_REGION=europe-west1-b CLUSTER_MODE=ha BOOTSTRAP_OS=coreos
|
|
||||||
|
|
||||||
before_install:
|
|
||||||
# Install Ansible.
|
|
||||||
- pip install --user ansible
|
|
||||||
- pip install --user netaddr
|
|
||||||
# W/A https://github.com/ansible/ansible-modules-core/issues/5196#issuecomment-253766186
|
|
||||||
- pip install --user apache-libcloud==0.20.1
|
|
||||||
- pip install --user boto==2.9.0 -U
|
|
||||||
# Load cached docker images
|
|
||||||
- if [ -d /var/tmp/releases ]; then find /var/tmp/releases -type f -name "*.tar" | xargs -I {} sh -c "zcat {} | docker load"; fi
|
|
||||||
|
|
||||||
cache:
|
|
||||||
- directories:
|
|
||||||
- $HOME/.cache/pip
|
|
||||||
- $HOME/.local
|
|
||||||
- /var/tmp/releases
|
|
||||||
|
|
||||||
before_script:
|
|
||||||
- echo "RUN $TRAVIS_JOB_NUMBER $KUBE_NETWORK_PLUGIN $CONTAINER_ENGINE "
|
|
||||||
- mkdir -p $HOME/.ssh
|
|
||||||
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
|
||||||
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
|
|
||||||
- chmod 400 $HOME/.ssh/id_rsa
|
|
||||||
- chmod 755 $HOME/.local/bin/ansible-playbook
|
|
||||||
- $HOME/.local/bin/ansible-playbook --version
|
|
||||||
- cp tests/ansible.cfg .
|
|
||||||
- export PYPATH=$([ $BOOTSTRAP_OS = none ] && echo /usr/bin/python || echo /opt/bin/python)
|
|
||||||
# - "echo $HOME/.local/bin/ansible-playbook -i inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml"
|
|
||||||
|
|
||||||
script:
|
|
||||||
- >
|
|
||||||
$HOME/.local/bin/ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local $LOG_LEVEL
|
|
||||||
-e mode=${CLUSTER_MODE}
|
|
||||||
-e test_id=${TEST_ID}
|
|
||||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
|
||||||
-e gce_project_id=${GCE_PROJECT_ID}
|
|
||||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
|
||||||
-e gce_pem_file=${HOME}/.ssh/gce
|
|
||||||
-e cloud_image=${CLOUD_IMAGE}
|
|
||||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
|
||||||
-e cloud_region=${CLOUD_REGION}
|
|
||||||
|
|
||||||
# Create cluster with netchecker app deployed
|
|
||||||
- >
|
|
||||||
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
|
||||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
|
||||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
|
||||||
-e download_run_once=true
|
|
||||||
-e download_localhost=true
|
|
||||||
-e local_release_dir=/var/tmp/releases
|
|
||||||
-e deploy_netchecker=true
|
|
||||||
cluster.yml
|
|
||||||
|
|
||||||
# Tests Cases
|
|
||||||
## Test Master API
|
|
||||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
|
||||||
## Ping the between 2 pod
|
|
||||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
|
|
||||||
## Advanced DNS checks
|
|
||||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/040_check-network-adv.yml $LOG_LEVEL
|
|
||||||
|
|
||||||
after_script:
|
|
||||||
- >
|
|
||||||
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
|
|
||||||
-e mode=${CLUSTER_MODE}
|
|
||||||
-e test_id=${TEST_ID}
|
|
||||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
|
||||||
-e gce_project_id=${GCE_PROJECT_ID}
|
|
||||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
|
||||||
-e gce_pem_file=${HOME}/.ssh/gce
|
|
||||||
-e cloud_image=${CLOUD_IMAGE}
|
|
||||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
|
||||||
-e cloud_region=${CLOUD_REGION}
|
|
||||||
103
README.md
103
README.md
@@ -1,8 +1,8 @@
|
|||||||

|

|
||||||
|
|
||||||
##Deploy a production ready kubernetes cluster
|
## Deploy a production ready kubernetes cluster
|
||||||
|
|
||||||
If you have questions, join us on the [kubernetes slack](https://slack.k8s.io), channel **#kargo**.
|
If you have questions, join us on the [kubernetes slack](https://slack.k8s.io), channel **#kubespray**.
|
||||||
|
|
||||||
- Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal**
|
- Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal**
|
||||||
- **High available** cluster
|
- **High available** cluster
|
||||||
@@ -13,76 +13,109 @@ If you have questions, join us on the [kubernetes slack](https://slack.k8s.io),
|
|||||||
|
|
||||||
To deploy the cluster you can use :
|
To deploy the cluster you can use :
|
||||||
|
|
||||||
[**kargo-cli**](https://github.com/kubespray/kargo-cli) <br>
|
[**kubespray-cli**](https://github.com/kubespray/kubespray-cli) <br>
|
||||||
**Ansible** usual commands <br>
|
**Ansible** usual commands and [**inventory builder**](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py) <br>
|
||||||
**vagrant** by simply running `vagrant up` (for tests purposes) <br>
|
**vagrant** by simply running `vagrant up` (for tests purposes) <br>
|
||||||
|
|
||||||
|
|
||||||
* [Requirements](#requirements)
|
* [Requirements](#requirements)
|
||||||
|
* [Kubespray vs ...](docs/comparisons.md)
|
||||||
* [Getting started](docs/getting-started.md)
|
* [Getting started](docs/getting-started.md)
|
||||||
|
* [Ansible inventory and tags](docs/ansible.md)
|
||||||
|
* [Integration with existing ansible repo](docs/integration.md)
|
||||||
|
* [Deployment data variables](docs/vars.md)
|
||||||
|
* [DNS stack](docs/dns-stack.md)
|
||||||
|
* [HA mode](docs/ha-mode.md)
|
||||||
|
* [Network plugins](#network-plugins)
|
||||||
* [Vagrant install](docs/vagrant.md)
|
* [Vagrant install](docs/vagrant.md)
|
||||||
* [CoreOS bootstrap](docs/coreos.md)
|
* [CoreOS bootstrap](docs/coreos.md)
|
||||||
* [Ansible variables](docs/ansible.md)
|
* [Downloaded artifacts](docs/downloads.md)
|
||||||
* [Cloud providers](docs/cloud.md)
|
* [Cloud providers](docs/cloud.md)
|
||||||
* [OpenStack](docs/openstack.md)
|
* [OpenStack](docs/openstack.md)
|
||||||
* [AWS](docs/aws.md)
|
* [AWS](docs/aws.md)
|
||||||
* [Azure](docs/azure.md)
|
* [Azure](docs/azure.md)
|
||||||
* [Network plugins](#network-plugins)
|
* [vSphere](docs/vsphere.md)
|
||||||
|
* [Large deployments](docs/large-deployments.md)
|
||||||
|
* [Upgrades basics](docs/upgrades.md)
|
||||||
* [Roadmap](docs/roadmap.md)
|
* [Roadmap](docs/roadmap.md)
|
||||||
|
|
||||||
Supported Linux distributions
|
Supported Linux distributions
|
||||||
===============
|
===============
|
||||||
|
|
||||||
* **CoreOS**
|
* **Container Linux by CoreOS**
|
||||||
* **Debian** Wheezy, Jessie
|
* **Debian** Jessie
|
||||||
* **Ubuntu** 14.10, 15.04, 15.10, 16.04
|
* **Ubuntu** 16.04
|
||||||
* **Fedora** 23
|
|
||||||
* **CentOS/RHEL** 7
|
* **CentOS/RHEL** 7
|
||||||
|
|
||||||
Versions
|
Note: Upstart/SysV init based OS types are not supported.
|
||||||
--------------
|
|
||||||
|
|
||||||
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.4.6 <br>
|
Versions of supported components
|
||||||
[etcd](https://github.com/coreos/etcd/releases) v3.0.6 <br>
|
--------------------------------
|
||||||
[flanneld](https://github.com/coreos/flannel/releases) v0.6.2 <br>
|
|
||||||
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.22.0 <br>
|
|
||||||
[weave](http://weave.works/) v1.6.1 <br>
|
|
||||||
[docker](https://www.docker.com/) v1.10.3 <br>
|
|
||||||
|
|
||||||
|
|
||||||
|
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.6.7 <br>
|
||||||
|
[etcd](https://github.com/coreos/etcd/releases) v3.0.17 <br>
|
||||||
|
[flanneld](https://github.com/coreos/flannel/releases) v0.8.0 <br>
|
||||||
|
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.23.0 <br>
|
||||||
|
[canal](https://github.com/projectcalico/canal) (given calico/flannel versions) <br>
|
||||||
|
[weave](http://weave.works/) v2.0.1 <br>
|
||||||
|
[docker](https://www.docker.com/) v1.13.1 (see note)<br>
|
||||||
|
[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)<br>
|
||||||
|
|
||||||
|
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||||
|
|
||||||
|
Note 2: rkt support as docker alternative is limited to control plane (etcd and
|
||||||
|
kubelet). Docker is still used for Kubernetes cluster workloads and network
|
||||||
|
plugins' related OS services. Also note, only one of the supported network
|
||||||
|
plugins can be deployed for a given single cluster.
|
||||||
|
|
||||||
Requirements
|
Requirements
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
|
* **Ansible v2.3 (or newer) and python-netaddr is installed on the machine
|
||||||
|
that will run Ansible commands**
|
||||||
|
* **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
|
||||||
* The target servers must have **access to the Internet** in order to pull docker images.
|
* The target servers must have **access to the Internet** in order to pull docker images.
|
||||||
|
* The target servers are configured to allow **IPv4 forwarding**.
|
||||||
|
* **Your ssh key must be copied** to all the servers part of your inventory.
|
||||||
* The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
* The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||||
in order to avoid any issue during deployment you should disable your firewall
|
in order to avoid any issue during deployment you should disable your firewall.
|
||||||
* **Copy your ssh keys** to all the servers part of your inventory.
|
|
||||||
* **Ansible v2.x and python-netaddr**
|
|
||||||
|
|
||||||
|
|
||||||
## Network plugins
|
## Network plugins
|
||||||
You can choose between 3 network plugins. (default: `flannel` with vxlan backend)
|
|
||||||
|
You can choose between 4 network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
||||||
|
|
||||||
* [**flannel**](docs/flannel.md): gre/vxlan (layer 2) networking.
|
* [**flannel**](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||||
|
|
||||||
* [**calico**](docs/calico.md): bgp (layer 3) networking.
|
* [**calico**](docs/calico.md): bgp (layer 3) networking.
|
||||||
|
|
||||||
* **weave**: Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. <br>
|
* [**canal**](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
|
||||||
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html))
|
|
||||||
|
|
||||||
The choice is defined with the variable `kube_network_plugin`
|
* [**weave**](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. <br>
|
||||||
|
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
|
||||||
|
|
||||||
|
The choice is defined with the variable `kube_network_plugin`. There is also an
|
||||||
|
option to leverage built-in cloud provider networking instead.
|
||||||
|
See also [Network checker](docs/netcheck.md).
|
||||||
|
|
||||||
|
## Community docs and resources
|
||||||
|
- [kubernetes.io/docs/getting-started-guides/kubespray/](https://kubernetes.io/docs/getting-started-guides/kubespray/)
|
||||||
|
- [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
|
||||||
|
- [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty
|
||||||
|
- [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=N9q51JgbWu8)
|
||||||
|
|
||||||
|
## Tools and projects on top of Kubespray
|
||||||
|
- [Digital Rebar](https://github.com/digitalrebar/digitalrebar)
|
||||||
|
- [Kubespray-cli](https://github.com/kubespray/kubespray-cli)
|
||||||
|
- [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer)
|
||||||
|
- [Terraform Contrib](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/terraform)
|
||||||
|
|
||||||
## CI Tests
|
## CI Tests
|
||||||
|
|
||||||
[](https://travis-ci.org/kubernetes-incubator/kargo) </br>
|

|
||||||
|
|
||||||
### Google Compute Engine
|
[](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines) </br>
|
||||||
|
|
||||||
| Calico | Flannel | Weave |
|
CI/end-to-end tests sponsored by Google (GCE), DigitalOcean, [teuto.net](https://teuto.net/) (openstack).
|
||||||
------------- | ------------- | ------------- | ------------- |
|
See the [test matrix](docs/test_cases.md) for details.
|
||||||
Ubuntu Xenial |[](https://ci.kubespray.io/job/kargo-gce-xenial-calico/)|[](https://ci.kubespray.io/job/kargo-gce-xenial-flannel/)|[](https://ci.kubespray.io/job/kargo-gce-xenial-weave)|
|
|
||||||
CentOS 7 |[](https://ci.kubespray.io/job/kargo-gce-centos7-calico/)|[](https://ci.kubespray.io/job/kargo-gce-centos7-flannel/)|[](https://ci.kubespray.io/job/kargo-gce-centos7-weave/)|
|
|
||||||
CoreOS (stable) |[](https://ci.kubespray.io/job/kargo-gce-coreos-calico/)|[](https://ci.kubespray.io/job/kargo-gce-coreos-flannel/)|[](https://ci.kubespray.io/job/kargo-gce-coreos-weave/)|
|
|
||||||
|
|
||||||
CI tests sponsored by Google (GCE), and [teuto.net](https://teuto.net/) for OpenStack.
|
|
||||||
|
|||||||
37
RELEASE.md
37
RELEASE.md
@@ -1,9 +1,40 @@
|
|||||||
# Release Process
|
# Release Process
|
||||||
|
|
||||||
The Kargo Project is released on an as-needed basis. The process is as follows:
|
The Kubespray Project is released on an as-needed basis. The process is as follows:
|
||||||
|
|
||||||
1. An issue is proposing a new release with a changelog since the last release
|
1. An issue is proposing a new release with a changelog since the last release
|
||||||
2. At least on of the [OWNERS](OWNERS) must LGTM this release
|
2. At least one of the [OWNERS](OWNERS) must LGTM this release
|
||||||
3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
|
3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
|
||||||
4. The release issue is closed
|
4. The release issue is closed
|
||||||
5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kargo $VERSION is released`
|
5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||||
|
|
||||||
|
## Major/minor releases, merge freezes and milestones
|
||||||
|
|
||||||
|
* Kubespray does not maintain stable branches for releases. Releases are tags, not
|
||||||
|
branches, and there are no backports. Therefore, there is no need for merge
|
||||||
|
freezes as well.
|
||||||
|
|
||||||
|
* Fixes for major releases (vX.x.0) and minor releases (vX.Y.x) are delivered
|
||||||
|
via maintenance releases (vX.Y.Z) and assigned to the corresponding open
|
||||||
|
milestone (vX.Y). That milestone remains open for the major/minor releases
|
||||||
|
support lifetime, which ends once the milestone closed. Then only a next major
|
||||||
|
or minor release can be done.
|
||||||
|
|
||||||
|
* Kubespray major and minor releases are bound to the given ``kube_version`` major/minor
|
||||||
|
version numbers and other components' arbitrary versions, like etcd or network plugins.
|
||||||
|
Older or newer versions are not supported and not tested for the given release.
|
||||||
|
|
||||||
|
* There is no unstable releases and no APIs, thus Kubespray doesn't follow
|
||||||
|
[semver](http://semver.org/). Every version describes only a stable release.
|
||||||
|
Breaking changes, if any introduced by changed defaults or non-contrib ansible roles'
|
||||||
|
playbooks, shall be described in the release notes. Other breaking changes, if any in
|
||||||
|
the contributed addons or bound versions of Kubernetes and other components, are
|
||||||
|
considered out of Kubespray scope and are up to the components' teams to deal with and
|
||||||
|
document.
|
||||||
|
|
||||||
|
* Minor releases can change components' versions, but not the major ``kube_version``.
|
||||||
|
Greater ``kube_version`` requires a new major or minor release. For example, if Kubespray v2.0.0
|
||||||
|
is bound to ``kube_version: 1.4.x``, ``calico_version: 0.22.0``, ``etcd_version: v3.0.6``,
|
||||||
|
then Kubespray v2.1.0 may be bound to only minor changes to ``kube_version``, like v1.5.1
|
||||||
|
and *any* changes to other components, like etcd v4, or calico 1.2.3.
|
||||||
|
And Kubespray v3.x.x shall be bound to ``kube_version: 2.x.x`` respectively.
|
||||||
|
|||||||
54
Vagrantfile
vendored
54
Vagrantfile
vendored
@@ -7,6 +7,16 @@ Vagrant.require_version ">= 1.8.0"
|
|||||||
|
|
||||||
CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb")
|
CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb")
|
||||||
|
|
||||||
|
COREOS_URL_TEMPLATE = "https://storage.googleapis.com/%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json"
|
||||||
|
|
||||||
|
SUPPORTED_OS = {
|
||||||
|
"coreos-stable" => {box: "coreos-stable", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
|
||||||
|
"coreos-alpha" => {box: "coreos-alpha", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
|
||||||
|
"coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
|
||||||
|
"ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "vagrant"},
|
||||||
|
"centos" => {box: "bento/centos-7.3", bootstrap_os: "centos", user: "vagrant"},
|
||||||
|
}
|
||||||
|
|
||||||
# Defaults for config options defined in CONFIG
|
# Defaults for config options defined in CONFIG
|
||||||
$num_instances = 3
|
$num_instances = 3
|
||||||
$instance_name_prefix = "k8s"
|
$instance_name_prefix = "k8s"
|
||||||
@@ -16,7 +26,12 @@ $vm_cpus = 1
|
|||||||
$shared_folders = {}
|
$shared_folders = {}
|
||||||
$forwarded_ports = {}
|
$forwarded_ports = {}
|
||||||
$subnet = "172.17.8"
|
$subnet = "172.17.8"
|
||||||
$box = "bento/ubuntu-16.04"
|
$os = "ubuntu"
|
||||||
|
# The first three nodes are etcd servers
|
||||||
|
$etcd_instances = $num_instances
|
||||||
|
# The first two nodes are masters
|
||||||
|
$kube_master_instances = $num_instances == 1 ? $num_instances : ($num_instances - 1)
|
||||||
|
$local_release_dir = "/vagrant/temp"
|
||||||
|
|
||||||
host_vars = {}
|
host_vars = {}
|
||||||
|
|
||||||
@@ -24,6 +39,10 @@ if File.exist?(CONFIG)
|
|||||||
require CONFIG
|
require CONFIG
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# All nodes are kube nodes
|
||||||
|
$kube_node_instances = $num_instances
|
||||||
|
|
||||||
|
$box = SUPPORTED_OS[$os][:box]
|
||||||
# if $inventory is not set, try to use example
|
# if $inventory is not set, try to use example
|
||||||
$inventory = File.join(File.dirname(__FILE__), "inventory") if ! $inventory
|
$inventory = File.join(File.dirname(__FILE__), "inventory") if ! $inventory
|
||||||
|
|
||||||
@@ -49,7 +68,10 @@ Vagrant.configure("2") do |config|
|
|||||||
# always use Vagrants insecure key
|
# always use Vagrants insecure key
|
||||||
config.ssh.insert_key = false
|
config.ssh.insert_key = false
|
||||||
config.vm.box = $box
|
config.vm.box = $box
|
||||||
|
if SUPPORTED_OS[$os].has_key? :box_url
|
||||||
|
config.vm.box_url = SUPPORTED_OS[$os][:box_url]
|
||||||
|
end
|
||||||
|
config.ssh.username = SUPPORTED_OS[$os][:user]
|
||||||
# plugin conflict
|
# plugin conflict
|
||||||
if Vagrant.has_plugin?("vagrant-vbguest") then
|
if Vagrant.has_plugin?("vagrant-vbguest") then
|
||||||
config.vbguest.auto_update = false
|
config.vbguest.auto_update = false
|
||||||
@@ -80,6 +102,10 @@ Vagrant.configure("2") do |config|
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
$shared_folders.each do |src, dst|
|
||||||
|
config.vm.synced_folder src, dst
|
||||||
|
end
|
||||||
|
|
||||||
config.vm.provider :virtualbox do |vb|
|
config.vm.provider :virtualbox do |vb|
|
||||||
vb.gui = $vm_gui
|
vb.gui = $vm_gui
|
||||||
vb.memory = $vm_memory
|
vb.memory = $vm_memory
|
||||||
@@ -88,12 +114,15 @@ Vagrant.configure("2") do |config|
|
|||||||
|
|
||||||
ip = "#{$subnet}.#{i+100}"
|
ip = "#{$subnet}.#{i+100}"
|
||||||
host_vars[vm_name] = {
|
host_vars[vm_name] = {
|
||||||
"ip" => ip,
|
"ip": ip,
|
||||||
#"access_ip" => ip,
|
"flannel_interface": ip,
|
||||||
"flannel_interface" => ip,
|
"flannel_backend_type": "host-gw",
|
||||||
"flannel_backend_type" => "host-gw",
|
"local_release_dir" => $local_release_dir,
|
||||||
"local_release_dir" => "/vagrant/temp",
|
"download_run_once": "False",
|
||||||
"download_run_once" => "False"
|
# Override the default 'calico' with flannel.
|
||||||
|
# inventory/group_vars/k8s-cluster.yml
|
||||||
|
"kube_network_plugin": "flannel",
|
||||||
|
"bootstrap_os": SUPPORTED_OS[$os][:bootstrap_os]
|
||||||
}
|
}
|
||||||
config.vm.network :private_network, ip: ip
|
config.vm.network :private_network, ip: ip
|
||||||
|
|
||||||
@@ -112,12 +141,9 @@ Vagrant.configure("2") do |config|
|
|||||||
ansible.host_vars = host_vars
|
ansible.host_vars = host_vars
|
||||||
#ansible.tags = ['download']
|
#ansible.tags = ['download']
|
||||||
ansible.groups = {
|
ansible.groups = {
|
||||||
# The first three nodes should be etcd servers
|
"etcd" => ["#{$instance_name_prefix}-0[1:#{$etcd_instances}]"],
|
||||||
"etcd" => ["#{$instance_name_prefix}-0[1:3]"],
|
"kube-master" => ["#{$instance_name_prefix}-0[1:#{$kube_master_instances}]"],
|
||||||
# The first two nodes should be masters
|
"kube-node" => ["#{$instance_name_prefix}-0[1:#{$kube_node_instances}]"],
|
||||||
"kube-master" => ["#{$instance_name_prefix}-0[1:2]"],
|
|
||||||
# all nodes should be kube nodes
|
|
||||||
"kube-node" => ["#{$instance_name_prefix}-0[1:#{$num_instances}]"],
|
|
||||||
"k8s-cluster:children" => ["kube-master", "kube-node"],
|
"k8s-cluster:children" => ["kube-master", "kube-node"],
|
||||||
}
|
}
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -7,3 +7,6 @@ host_key_checking=False
|
|||||||
gathering = smart
|
gathering = smart
|
||||||
fact_caching = jsonfile
|
fact_caching = jsonfile
|
||||||
fact_caching_connection = /tmp
|
fact_caching_connection = /tmp
|
||||||
|
stdout_callback = skippy
|
||||||
|
library = ./library
|
||||||
|
callback_whitelist = profile_tasks
|
||||||
|
|||||||
68
cluster.yml
68
cluster.yml
@@ -2,65 +2,91 @@
|
|||||||
- hosts: localhost
|
- hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- bastion-ssh-config
|
- { role: kubespray-defaults}
|
||||||
tags: [localhost, bastion]
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||||
|
|
||||||
- hosts: k8s-cluster:etcd:calico-rr
|
- hosts: k8s-cluster:etcd:calico-rr
|
||||||
any_errors_fatal: true
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
vars:
|
vars:
|
||||||
# Need to disable pipelining for bootstrap-os as some systems have requiretty in sudoers set, which makes pipelining
|
# Need to disable pipelining for bootstrap-os as some systems have requiretty in sudoers set, which makes pipelining
|
||||||
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
|
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
|
||||||
ansible_ssh_pipelining: false
|
ansible_ssh_pipelining: false
|
||||||
roles:
|
roles:
|
||||||
- bootstrap-os
|
- { role: kubespray-defaults}
|
||||||
tags:
|
- { role: bootstrap-os, tags: bootstrap-os}
|
||||||
- bootstrap-os
|
|
||||||
|
|
||||||
- hosts: k8s-cluster:etcd:calico-rr
|
- hosts: k8s-cluster:etcd:calico-rr
|
||||||
any_errors_fatal: true
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
vars:
|
vars:
|
||||||
ansible_ssh_pipelining: true
|
ansible_ssh_pipelining: true
|
||||||
gather_facts: true
|
gather_facts: true
|
||||||
|
|
||||||
- hosts: k8s-cluster:etcd:calico-rr
|
- hosts: k8s-cluster:etcd:calico-rr
|
||||||
any_errors_fatal: true
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
|
- { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade }
|
||||||
- { role: kubernetes/preinstall, tags: preinstall }
|
- { role: kubernetes/preinstall, tags: preinstall }
|
||||||
- { role: docker, tags: docker }
|
- { role: docker, tags: docker }
|
||||||
|
- role: rkt
|
||||||
|
tags: rkt
|
||||||
|
when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"
|
||||||
|
|
||||||
- hosts: etcd:!k8s-cluster
|
- hosts: etcd:k8s-cluster:vault
|
||||||
any_errors_fatal: true
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: etcd, tags: etcd }
|
- { role: kubespray-defaults, when: "cert_management == 'vault'" }
|
||||||
|
- { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" }
|
||||||
|
|
||||||
|
- hosts: etcd
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
|
- { role: etcd, tags: etcd, etcd_cluster_setup: true }
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s-cluster
|
||||||
any_errors_fatal: true
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: etcd, tags: etcd }
|
- { role: kubespray-defaults}
|
||||||
|
- { role: etcd, tags: etcd, etcd_cluster_setup: false }
|
||||||
|
|
||||||
|
- hosts: etcd:k8s-cluster:vault
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
|
- { role: vault, tags: vault, when: "cert_management == 'vault'"}
|
||||||
|
|
||||||
|
- hosts: k8s-cluster
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes/node, tags: node }
|
- { role: kubernetes/node, tags: node }
|
||||||
- { role: network_plugin, tags: network }
|
- { role: network_plugin, tags: network }
|
||||||
|
|
||||||
- hosts: kube-master
|
- hosts: kube-master
|
||||||
any_errors_fatal: true
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes/master, tags: master }
|
- { role: kubernetes/master, tags: master }
|
||||||
- { role: kubernetes-apps/lib, tags: apps }
|
|
||||||
- { role: kubernetes-apps/network_plugin, tags: network }
|
- { role: kubernetes-apps/network_plugin, tags: network }
|
||||||
|
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
||||||
|
|
||||||
- hosts: calico-rr
|
- hosts: calico-rr
|
||||||
any_errors_fatal: true
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
- { role: network_plugin/calico/rr, tags: network }
|
- { role: network_plugin/calico/rr, tags: network }
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s-cluster
|
||||||
any_errors_fatal: true
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: dnsmasq, tags: dnsmasq }
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes/preinstall, tags: resolvconf }
|
- { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq }
|
||||||
|
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf }
|
||||||
|
|
||||||
- hosts: kube-master[0]
|
- hosts: kube-master[0]
|
||||||
any_errors_fatal: true
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubernetes-apps/lib, tags: apps }
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes-apps, tags: apps }
|
- { role: kubernetes-apps, tags: apps }
|
||||||
|
|||||||
@@ -32,8 +32,7 @@ Conduct may be permanently removed from the project team.
|
|||||||
This code of conduct applies both within project spaces and in public spaces
|
This code of conduct applies both within project spaces and in public spaces
|
||||||
when an individual is representing the project or its community.
|
when an individual is representing the project or its community.
|
||||||
|
|
||||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting a Kubernetes maintainer, Sarah Novotny <sarahnovotny@google.com>, and/or Dan Kohn <dan@linuxfoundation.org>.
|
||||||
opening an issue or contacting one or more of the project maintainers.
|
|
||||||
|
|
||||||
This Code of Conduct is adapted from the Contributor Covenant
|
This Code of Conduct is adapted from the Contributor Covenant
|
||||||
(http://contributor-covenant.org), version 1.2.0, available at
|
(http://contributor-covenant.org), version 1.2.0, available at
|
||||||
@@ -53,7 +52,7 @@ The Kubernetes team does not condone any statements by speakers contrary to thes
|
|||||||
team reserves the right to deny entrance and/or eject from an event (without refund) any individual found to
|
team reserves the right to deny entrance and/or eject from an event (without refund) any individual found to
|
||||||
be engaging in discriminatory or offensive speech or actions.
|
be engaging in discriminatory or offensive speech or actions.
|
||||||
|
|
||||||
Please bring any concerns to to the immediate attention of Kubernetes event staff
|
Please bring any concerns to the immediate attention of Kubernetes event staff.
|
||||||
|
|
||||||
|
|
||||||
[]()
|
[]()
|
||||||
|
|||||||
27
contrib/aws_iam/kubernetes-master-policy.json
Normal file
27
contrib/aws_iam/kubernetes-master-policy.json
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": ["ec2:*"],
|
||||||
|
"Resource": ["*"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": ["elasticloadbalancing:*"],
|
||||||
|
"Resource": ["*"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": ["route53:*"],
|
||||||
|
"Resource": ["*"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "s3:*",
|
||||||
|
"Resource": [
|
||||||
|
"arn:aws:s3:::kubernetes-*"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
10
contrib/aws_iam/kubernetes-master-role.json
Normal file
10
contrib/aws_iam/kubernetes-master-role.json
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Principal": { "Service": "ec2.amazonaws.com"},
|
||||||
|
"Action": "sts:AssumeRole"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
45
contrib/aws_iam/kubernetes-minion-policy.json
Normal file
45
contrib/aws_iam/kubernetes-minion-policy.json
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "s3:*",
|
||||||
|
"Resource": [
|
||||||
|
"arn:aws:s3:::kubernetes-*"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "ec2:Describe*",
|
||||||
|
"Resource": "*"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "ec2:AttachVolume",
|
||||||
|
"Resource": "*"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "ec2:DetachVolume",
|
||||||
|
"Resource": "*"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": ["route53:*"],
|
||||||
|
"Resource": ["*"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": [
|
||||||
|
"ecr:GetAuthorizationToken",
|
||||||
|
"ecr:BatchCheckLayerAvailability",
|
||||||
|
"ecr:GetDownloadUrlForLayer",
|
||||||
|
"ecr:GetRepositoryPolicy",
|
||||||
|
"ecr:DescribeRepositories",
|
||||||
|
"ecr:ListImages",
|
||||||
|
"ecr:BatchGetImage"
|
||||||
|
],
|
||||||
|
"Resource": "*"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
10
contrib/aws_iam/kubernetes-minion-role.json
Normal file
10
contrib/aws_iam/kubernetes-minion-role.json
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Principal": { "Service": "ec2.amazonaws.com"},
|
||||||
|
"Action": "sts:AssumeRole"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
61
contrib/aws_inventory/kubespray-aws-inventory.py
Executable file
61
contrib/aws_inventory/kubespray-aws-inventory.py
Executable file
@@ -0,0 +1,61 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
import boto3
|
||||||
|
import os
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
|
||||||
|
class SearchEC2Tags(object):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.parse_args()
|
||||||
|
if self.args.list:
|
||||||
|
self.search_tags()
|
||||||
|
if self.args.host:
|
||||||
|
data = {}
|
||||||
|
print json.dumps(data, indent=2)
|
||||||
|
|
||||||
|
def parse_args(self):
|
||||||
|
|
||||||
|
##Check if VPC_VISIBILITY is set, if not default to private
|
||||||
|
if "VPC_VISIBILITY" in os.environ:
|
||||||
|
self.vpc_visibility = os.environ['VPC_VISIBILITY']
|
||||||
|
else:
|
||||||
|
self.vpc_visibility = "private"
|
||||||
|
|
||||||
|
##Support --list and --host flags. We largely ignore the host one.
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument('--list', action='store_true', default=False, help='List instances')
|
||||||
|
parser.add_argument('--host', action='store_true', help='Get all the variables about a specific instance')
|
||||||
|
self.args = parser.parse_args()
|
||||||
|
|
||||||
|
def search_tags(self):
|
||||||
|
hosts = {}
|
||||||
|
hosts['_meta'] = { 'hostvars': {} }
|
||||||
|
|
||||||
|
##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
|
||||||
|
for group in ["kube-master", "kube-node", "etcd"]:
|
||||||
|
hosts[group] = []
|
||||||
|
tag_key = "kubespray-role"
|
||||||
|
tag_value = ["*"+group+"*"]
|
||||||
|
region = os.environ['REGION']
|
||||||
|
|
||||||
|
ec2 = boto3.resource('ec2', region)
|
||||||
|
|
||||||
|
instances = ec2.instances.filter(Filters=[{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}])
|
||||||
|
for instance in instances:
|
||||||
|
if self.vpc_visibility == "public":
|
||||||
|
hosts[group].append(instance.public_dns_name)
|
||||||
|
hosts['_meta']['hostvars'][instance.public_dns_name] = {
|
||||||
|
'ansible_ssh_host': instance.public_ip_address
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
hosts[group].append(instance.private_dns_name)
|
||||||
|
hosts['_meta']['hostvars'][instance.private_dns_name] = {
|
||||||
|
'ansible_ssh_host': instance.private_ip_address
|
||||||
|
}
|
||||||
|
|
||||||
|
hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
|
||||||
|
print json.dumps(hosts, sort_keys=True, indent=2)
|
||||||
|
|
||||||
|
SearchEC2Tags()
|
||||||
@@ -5,7 +5,7 @@ Provision the base infrastructure for a Kubernetes cluster by using [Azure Resou
|
|||||||
## Status
|
## Status
|
||||||
|
|
||||||
This will provision the base infrastructure (vnet, vms, nics, ips, ...) needed for Kubernetes in Azure into the specified
|
This will provision the base infrastructure (vnet, vms, nics, ips, ...) needed for Kubernetes in Azure into the specified
|
||||||
Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kargo of course).
|
Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kubespray of course).
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
@@ -47,7 +47,7 @@ $ ./clear-rg.sh <resource_group_name>
|
|||||||
**WARNING** this really deletes everything from your resource group, including everything that was later created by you!
|
**WARNING** this really deletes everything from your resource group, including everything that was later created by you!
|
||||||
|
|
||||||
|
|
||||||
## Generating an inventory for kargo
|
## Generating an inventory for kubespray
|
||||||
|
|
||||||
After you have applied the templates, you can generate an inventory with this call:
|
After you have applied the templates, you can generate an inventory with this call:
|
||||||
|
|
||||||
@@ -55,10 +55,10 @@ After you have applied the templates, you can generate an inventory with this ca
|
|||||||
$ ./generate-inventory.sh <resource_group_name>
|
$ ./generate-inventory.sh <resource_group_name>
|
||||||
```
|
```
|
||||||
|
|
||||||
It will create the file ./inventory which can then be used with kargo, e.g.:
|
It will create the file ./inventory which can then be used with kubespray, e.g.:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ cd kargo-root-dir
|
$ cd kubespray-root-dir
|
||||||
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/group_vars/all.yml" cluster.yml
|
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/group_vars/all.yml" cluster.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -9,11 +9,18 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ansible-playbook generate-templates.yml
|
if [ $(az &>/dev/null) ] ; then
|
||||||
|
echo "azure cli 2.0 found, using it instead of 1.0"
|
||||||
azure group deployment create -f ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
./apply-rg_2.sh "$AZURE_RESOURCE_GROUP"
|
||||||
azure group deployment create -f ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
|
elif [ $(azure &>/dev/null) ] ; then
|
||||||
azure group deployment create -f ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
|
ansible-playbook generate-templates.yml
|
||||||
azure group deployment create -f ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
|
|
||||||
azure group deployment create -f ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
|
azure group deployment create -f ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
||||||
azure group deployment create -f ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
|
azure group deployment create -f ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
azure group deployment create -f ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
azure group deployment create -f ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
azure group deployment create -f ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
azure group deployment create -f ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
else
|
||||||
|
echo "Azure cli not found"
|
||||||
|
fi
|
||||||
|
|||||||
19
contrib/azurerm/apply-rg_2.sh
Executable file
19
contrib/azurerm/apply-rg_2.sh
Executable file
@@ -0,0 +1,19 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
AZURE_RESOURCE_GROUP="$1"
|
||||||
|
|
||||||
|
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||||
|
echo "AZURE_RESOURCE_GROUP is missing"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
ansible-playbook generate-templates.yml
|
||||||
|
|
||||||
|
az group deployment create --template-file ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
az group deployment create --template-file ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
az group deployment create --template-file ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
az group deployment create --template-file ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
az group deployment create --template-file ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
az group deployment create --template-file ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
|
||||||
@@ -9,6 +9,10 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ansible-playbook generate-templates.yml
|
if [ $(az &>/dev/null) ] ; then
|
||||||
|
echo "azure cli 2.0 found, using it instead of 1.0"
|
||||||
azure group deployment create -g "$AZURE_RESOURCE_GROUP" -f ./.generated/clear-rg.json -m Complete
|
./clear-rg_2.sh "$AZURE_RESOURCE_GROUP"
|
||||||
|
else
|
||||||
|
ansible-playbook generate-templates.yml
|
||||||
|
azure group deployment create -g "$AZURE_RESOURCE_GROUP" -f ./.generated/clear-rg.json -m Complete
|
||||||
|
fi
|
||||||
|
|||||||
14
contrib/azurerm/clear-rg_2.sh
Executable file
14
contrib/azurerm/clear-rg_2.sh
Executable file
@@ -0,0 +1,14 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
AZURE_RESOURCE_GROUP="$1"
|
||||||
|
|
||||||
|
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||||
|
echo "AZURE_RESOURCE_GROUP is missing"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
ansible-playbook generate-templates.yml
|
||||||
|
|
||||||
|
az group deployment create -g "$AZURE_RESOURCE_GROUP" --template-file ./.generated/clear-rg.json --mode Complete
|
||||||
@@ -8,5 +8,11 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
|||||||
echo "AZURE_RESOURCE_GROUP is missing"
|
echo "AZURE_RESOURCE_GROUP is missing"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
# check if azure cli 2.0 exists else use azure cli 1.0
|
||||||
ansible-playbook generate-inventory.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"
|
if [ $(az &>/dev/null) ] ; then
|
||||||
|
ansible-playbook generate-inventory_2.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"
|
||||||
|
elif [ $(azure &>/dev/null) ]; then
|
||||||
|
ansible-playbook generate-inventory.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"
|
||||||
|
else
|
||||||
|
echo "Azure cli not found"
|
||||||
|
fi
|
||||||
|
|||||||
5
contrib/azurerm/generate-inventory_2.yml
Normal file
5
contrib/azurerm/generate-inventory_2.yml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
- hosts: localhost
|
||||||
|
gather_facts: False
|
||||||
|
roles:
|
||||||
|
- generate-inventory_2
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
|
|
||||||
# Due to some Azure limitations, this name must be globally unique
|
# Due to some Azure limitations (ex:- Storage Account's name must be unique),
|
||||||
|
# this name must be globally unique - it will be used as a prefix for azure components
|
||||||
cluster_name: example
|
cluster_name: example
|
||||||
|
|
||||||
# Set this to true if you do not want to have public IPs for your masters and minions. This will provision a bastion
|
# Set this to true if you do not want to have public IPs for your masters and minions. This will provision a bastion
|
||||||
@@ -17,10 +18,29 @@ minions_os_disk_size: 1000
|
|||||||
|
|
||||||
admin_username: devops
|
admin_username: devops
|
||||||
admin_password: changeme
|
admin_password: changeme
|
||||||
|
|
||||||
|
# MAKE SURE TO CHANGE THIS TO YOUR PUBLIC KEY to access your azure machines
|
||||||
ssh_public_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy"
|
ssh_public_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy"
|
||||||
|
|
||||||
|
# Disable using ssh using password. Change it to false to allow to connect to ssh by password
|
||||||
|
disablePasswordAuthentication: true
|
||||||
|
|
||||||
# Azure CIDRs
|
# Azure CIDRs
|
||||||
azure_vnet_cidr: 10.0.0.0/8
|
azure_vnet_cidr: 10.0.0.0/8
|
||||||
azure_admin_cidr: 10.241.2.0/24
|
azure_admin_cidr: 10.241.2.0/24
|
||||||
azure_masters_cidr: 10.0.4.0/24
|
azure_masters_cidr: 10.0.4.0/24
|
||||||
azure_minions_cidr: 10.240.0.0/16
|
azure_minions_cidr: 10.240.0.0/16
|
||||||
|
|
||||||
|
# Azure loadbalancer port to use to access your cluster
|
||||||
|
kube_apiserver_port: 6443
|
||||||
|
|
||||||
|
# Azure Netwoking and storage naming to use with inventory/all.yml
|
||||||
|
#azure_virtual_network_name: KubeVNET
|
||||||
|
#azure_subnet_admin_name: ad-subnet
|
||||||
|
#azure_subnet_masters_name: master-subnet
|
||||||
|
#azure_subnet_minions_name: minion-subnet
|
||||||
|
#azure_route_table_name: routetable
|
||||||
|
#azure_security_group_name: secgroup
|
||||||
|
|
||||||
|
# Storage types available are: "Standard_LRS","Premium_LRS"
|
||||||
|
#azure_storage_account_type: Standard_LRS
|
||||||
|
|||||||
@@ -8,4 +8,4 @@
|
|||||||
vm_list: "{{ vm_list_cmd.stdout }}"
|
vm_list: "{{ vm_list_cmd.stdout }}"
|
||||||
|
|
||||||
- name: Generate inventory
|
- name: Generate inventory
|
||||||
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
|
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
|
||||||
|
|||||||
16
contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
Normal file
16
contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
- name: Query Azure VMs IPs
|
||||||
|
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
|
||||||
|
register: vm_ip_list_cmd
|
||||||
|
|
||||||
|
- name: Query Azure VMs Roles
|
||||||
|
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
||||||
|
register: vm_list_cmd
|
||||||
|
|
||||||
|
- set_fact:
|
||||||
|
vm_ip_list: "{{ vm_ip_list_cmd.stdout }}"
|
||||||
|
vm_roles_list: "{{ vm_list_cmd.stdout }}"
|
||||||
|
|
||||||
|
- name: Generate inventory
|
||||||
|
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
|
||||||
@@ -0,0 +1,34 @@
|
|||||||
|
|
||||||
|
{% for vm in vm_ip_list %}
|
||||||
|
{% if not use_bastion or vm.virtualMachinename == 'bastion' %}
|
||||||
|
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.publicIpAddresses[0].ipAddress }} ip={{ vm.virtualMachine.network.privateIpAddresses[0] }}
|
||||||
|
{% else %}
|
||||||
|
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.privateIpAddresses[0] }}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
[kube-master]
|
||||||
|
{% for vm in vm_roles_list %}
|
||||||
|
{% if 'kube-master' in vm.tags.roles %}
|
||||||
|
{{ vm.name }}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
[etcd]
|
||||||
|
{% for vm in vm_roles_list %}
|
||||||
|
{% if 'etcd' in vm.tags.roles %}
|
||||||
|
{{ vm.name }}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
[kube-node]
|
||||||
|
{% for vm in vm_roles_list %}
|
||||||
|
{% if 'kube-node' in vm.tags.roles %}
|
||||||
|
{{ vm.name }}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
[k8s-cluster:children]
|
||||||
|
kube-node
|
||||||
|
kube-master
|
||||||
|
|
||||||
@@ -1,15 +1,15 @@
|
|||||||
apiVersion: "2015-06-15"
|
apiVersion: "2015-06-15"
|
||||||
|
|
||||||
virtualNetworkName: "KubVNET"
|
virtualNetworkName: "{{ azure_virtual_network_name | default('KubeVNET') }}"
|
||||||
|
|
||||||
subnetAdminName: "ad-subnet"
|
subnetAdminName: "{{ azure_subnet_admin_name | default('ad-subnet') }}"
|
||||||
subnetMastersName: "master-subnet"
|
subnetMastersName: "{{ azure_subnet_masters_name | default('master-subnet') }}"
|
||||||
subnetMinionsName: "minion-subnet"
|
subnetMinionsName: "{{ azure_subnet_minions_name | default('minion-subnet') }}"
|
||||||
|
|
||||||
routeTableName: "routetable"
|
routeTableName: "{{ azure_route_table_name | default('routetable') }}"
|
||||||
securityGroupName: "secgroup"
|
securityGroupName: "{{ azure_security_group_name | default('secgroup') }}"
|
||||||
|
|
||||||
nameSuffix: "{{cluster_name}}"
|
nameSuffix: "{{ cluster_name }}"
|
||||||
|
|
||||||
availabilitySetMasters: "master-avs"
|
availabilitySetMasters: "master-avs"
|
||||||
availabilitySetMinions: "minion-avs"
|
availabilitySetMinions: "minion-avs"
|
||||||
@@ -33,5 +33,5 @@ imageReference:
|
|||||||
imageReferenceJson: "{{imageReference|to_json}}"
|
imageReferenceJson: "{{imageReference|to_json}}"
|
||||||
|
|
||||||
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
|
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
|
||||||
storageAccountType: "Standard_LRS"
|
storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"
|
||||||
|
|
||||||
|
|||||||
@@ -62,8 +62,8 @@
|
|||||||
"id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]"
|
"id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]"
|
||||||
},
|
},
|
||||||
"protocol": "tcp",
|
"protocol": "tcp",
|
||||||
"frontendPort": 443,
|
"frontendPort": "{{kube_apiserver_port}}",
|
||||||
"backendPort": 443,
|
"backendPort": "{{kube_apiserver_port}}",
|
||||||
"enableFloatingIP": false,
|
"enableFloatingIP": false,
|
||||||
"idleTimeoutInMinutes": 5,
|
"idleTimeoutInMinutes": 5,
|
||||||
"probe": {
|
"probe": {
|
||||||
@@ -77,7 +77,7 @@
|
|||||||
"name": "kube-api",
|
"name": "kube-api",
|
||||||
"properties": {
|
"properties": {
|
||||||
"protocol": "tcp",
|
"protocol": "tcp",
|
||||||
"port": 443,
|
"port": "{{kube_apiserver_port}}",
|
||||||
"intervalInSeconds": 5,
|
"intervalInSeconds": 5,
|
||||||
"numberOfProbes": 2
|
"numberOfProbes": 2
|
||||||
}
|
}
|
||||||
@@ -193,4 +193,4 @@
|
|||||||
} {% if not loop.last %},{% endif %}
|
} {% if not loop.last %},{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -92,7 +92,7 @@
|
|||||||
"description": "Allow secure kube-api",
|
"description": "Allow secure kube-api",
|
||||||
"protocol": "Tcp",
|
"protocol": "Tcp",
|
||||||
"sourcePortRange": "*",
|
"sourcePortRange": "*",
|
||||||
"destinationPortRange": "443",
|
"destinationPortRange": "{{kube_apiserver_port}}",
|
||||||
"sourceAddressPrefix": "Internet",
|
"sourceAddressPrefix": "Internet",
|
||||||
"destinationAddressPrefix": "*",
|
"destinationAddressPrefix": "*",
|
||||||
"access": "Allow",
|
"access": "Allow",
|
||||||
@@ -106,4 +106,4 @@
|
|||||||
"dependsOn": []
|
"dependsOn": []
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -40,7 +40,8 @@ import os
|
|||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
ROLES = ['kube-master', 'all', 'k8s-cluster:children', 'kube-node', 'etcd']
|
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster:children',
|
||||||
|
'calico-rr', 'vault']
|
||||||
PROTECTED_NAMES = ROLES
|
PROTECTED_NAMES = ROLES
|
||||||
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'load']
|
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'load']
|
||||||
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
||||||
@@ -51,12 +52,20 @@ def get_var_as_bool(name, default):
|
|||||||
value = os.environ.get(name, '')
|
value = os.environ.get(name, '')
|
||||||
return _boolean_states.get(value.lower(), default)
|
return _boolean_states.get(value.lower(), default)
|
||||||
|
|
||||||
|
# Configurable as shell vars start
|
||||||
|
|
||||||
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory.cfg")
|
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory.cfg")
|
||||||
|
# Reconfigures cluster distribution at scale
|
||||||
|
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
|
||||||
|
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 200))
|
||||||
|
|
||||||
DEBUG = get_var_as_bool("DEBUG", True)
|
DEBUG = get_var_as_bool("DEBUG", True)
|
||||||
HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
|
HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
|
||||||
|
|
||||||
|
# Configurable as shell vars end
|
||||||
|
|
||||||
class KargoInventory(object):
|
|
||||||
|
class KubesprayInventory(object):
|
||||||
|
|
||||||
def __init__(self, changed_hosts=None, config_file=None):
|
def __init__(self, changed_hosts=None, config_file=None):
|
||||||
self.config = configparser.ConfigParser(allow_no_value=True,
|
self.config = configparser.ConfigParser(allow_no_value=True,
|
||||||
@@ -74,11 +83,16 @@ class KargoInventory(object):
|
|||||||
if changed_hosts:
|
if changed_hosts:
|
||||||
self.hosts = self.build_hostnames(changed_hosts)
|
self.hosts = self.build_hostnames(changed_hosts)
|
||||||
self.purge_invalid_hosts(self.hosts.keys(), PROTECTED_NAMES)
|
self.purge_invalid_hosts(self.hosts.keys(), PROTECTED_NAMES)
|
||||||
self.set_kube_master(list(self.hosts.keys())[:2])
|
|
||||||
self.set_all(self.hosts)
|
self.set_all(self.hosts)
|
||||||
self.set_k8s_cluster()
|
self.set_k8s_cluster()
|
||||||
self.set_kube_node(self.hosts.keys())
|
|
||||||
self.set_etcd(list(self.hosts.keys())[:3])
|
self.set_etcd(list(self.hosts.keys())[:3])
|
||||||
|
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||||
|
self.set_kube_master(list(self.hosts.keys())[3:5])
|
||||||
|
else:
|
||||||
|
self.set_kube_master(list(self.hosts.keys())[:2])
|
||||||
|
self.set_kube_node(self.hosts.keys())
|
||||||
|
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||||
|
self.set_calico_rr(list(self.hosts.keys())[:3])
|
||||||
else: # Show help if no options
|
else: # Show help if no options
|
||||||
self.show_help()
|
self.show_help()
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
@@ -205,13 +219,38 @@ class KargoInventory(object):
|
|||||||
self.add_host_to_group('k8s-cluster:children', 'kube-node')
|
self.add_host_to_group('k8s-cluster:children', 'kube-node')
|
||||||
self.add_host_to_group('k8s-cluster:children', 'kube-master')
|
self.add_host_to_group('k8s-cluster:children', 'kube-master')
|
||||||
|
|
||||||
|
def set_calico_rr(self, hosts):
|
||||||
|
for host in hosts:
|
||||||
|
if host in self.config.items('kube-master'):
|
||||||
|
self.debug("Not adding {0} to calico-rr group because it "
|
||||||
|
"conflicts with kube-master group".format(host))
|
||||||
|
continue
|
||||||
|
if host in self.config.items('kube-node'):
|
||||||
|
self.debug("Not adding {0} to calico-rr group because it "
|
||||||
|
"conflicts with kube-node group".format(host))
|
||||||
|
continue
|
||||||
|
self.add_host_to_group('calico-rr', host)
|
||||||
|
|
||||||
def set_kube_node(self, hosts):
|
def set_kube_node(self, hosts):
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
|
if len(self.config['all']) >= SCALE_THRESHOLD:
|
||||||
|
if self.config.has_option('etcd', host):
|
||||||
|
self.debug("Not adding {0} to kube-node group because of "
|
||||||
|
"scale deployment and host is in etcd "
|
||||||
|
"group.".format(host))
|
||||||
|
continue
|
||||||
|
if len(self.config['all']) >= MASSIVE_SCALE_THRESHOLD:
|
||||||
|
if self.config.has_option('kube-master', host):
|
||||||
|
self.debug("Not adding {0} to kube-node group because of "
|
||||||
|
"scale deployment and host is in kube-master "
|
||||||
|
"group.".format(host))
|
||||||
|
continue
|
||||||
self.add_host_to_group('kube-node', host)
|
self.add_host_to_group('kube-node', host)
|
||||||
|
|
||||||
def set_etcd(self, hosts):
|
def set_etcd(self, hosts):
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
self.add_host_to_group('etcd', host)
|
self.add_host_to_group('etcd', host)
|
||||||
|
self.add_host_to_group('vault', host)
|
||||||
|
|
||||||
def load_file(self, files=None):
|
def load_file(self, files=None):
|
||||||
'''Directly loads JSON, or YAML file to inventory.'''
|
'''Directly loads JSON, or YAML file to inventory.'''
|
||||||
@@ -275,7 +314,15 @@ print_ips - Write a space-delimited list of IPs from "all" group
|
|||||||
Advanced usage:
|
Advanced usage:
|
||||||
Add another host after initial creation: inventory.py 10.10.1.5
|
Add another host after initial creation: inventory.py 10.10.1.5
|
||||||
Delete a host: inventory.py -10.10.1.3
|
Delete a host: inventory.py -10.10.1.3
|
||||||
Delete a host by id: inventory.py -node1'''
|
Delete a host by id: inventory.py -node1
|
||||||
|
|
||||||
|
Configurable env vars:
|
||||||
|
DEBUG Enable debug printing. Default: True
|
||||||
|
CONFIG_FILE File to write config to Default: ./inventory.cfg
|
||||||
|
HOST_PREFIX Host prefix for generated hosts. Default: node
|
||||||
|
SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50
|
||||||
|
MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
|
||||||
|
'''
|
||||||
print(help_text)
|
print(help_text)
|
||||||
|
|
||||||
def print_config(self):
|
def print_config(self):
|
||||||
@@ -291,7 +338,7 @@ Delete a host by id: inventory.py -node1'''
|
|||||||
def main(argv=None):
|
def main(argv=None):
|
||||||
if not argv:
|
if not argv:
|
||||||
argv = sys.argv[1:]
|
argv = sys.argv[1:]
|
||||||
KargoInventory(argv, CONFIG_FILE)
|
KubesprayInventory(argv, CONFIG_FILE)
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|||||||
@@ -1,48 +0,0 @@
|
|||||||
---
|
|
||||||
- src: https://gitlab.com/kubespray-ansibl8s/k8s-common.git
|
|
||||||
path: roles/apps
|
|
||||||
scm: git
|
|
||||||
|
|
||||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-dashboard.git
|
|
||||||
# path: roles/apps
|
|
||||||
# scm: git
|
|
||||||
#
|
|
||||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-kubedns.git
|
|
||||||
# path: roles/apps
|
|
||||||
# scm: git
|
|
||||||
#
|
|
||||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-elasticsearch.git
|
|
||||||
# path: roles/apps
|
|
||||||
# scm: git
|
|
||||||
#
|
|
||||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-redis.git
|
|
||||||
# path: roles/apps
|
|
||||||
# scm: git
|
|
||||||
#
|
|
||||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-memcached.git
|
|
||||||
# path: roles/apps
|
|
||||||
# scm: git
|
|
||||||
#
|
|
||||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-postgres.git
|
|
||||||
# path: roles/apps
|
|
||||||
# scm: git
|
|
||||||
#
|
|
||||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-pgbouncer.git
|
|
||||||
# path: roles/apps
|
|
||||||
# scm: git
|
|
||||||
#
|
|
||||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-heapster.git
|
|
||||||
# path: roles/apps
|
|
||||||
# scm: git
|
|
||||||
#
|
|
||||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-influxdb.git
|
|
||||||
# path: roles/apps
|
|
||||||
# scm: git
|
|
||||||
#
|
|
||||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-kubedash.git
|
|
||||||
# path: roles/apps
|
|
||||||
# scm: git
|
|
||||||
#
|
|
||||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-kube-logstash.git
|
|
||||||
# path: roles/apps
|
|
||||||
# scm: git
|
|
||||||
@@ -1,3 +1,3 @@
|
|||||||
[metadata]
|
[metadata]
|
||||||
name = kargo-inventory-builder
|
name = kubespray-inventory-builder
|
||||||
version = 0.1
|
version = 0.1
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ class TestInventory(unittest.TestCase):
|
|||||||
sys_mock.exit = mock.Mock()
|
sys_mock.exit = mock.Mock()
|
||||||
super(TestInventory, self).setUp()
|
super(TestInventory, self).setUp()
|
||||||
self.data = ['10.90.3.2', '10.90.3.3', '10.90.3.4']
|
self.data = ['10.90.3.2', '10.90.3.3', '10.90.3.4']
|
||||||
self.inv = inventory.KargoInventory()
|
self.inv = inventory.KubesprayInventory()
|
||||||
|
|
||||||
def test_get_ip_from_opts(self):
|
def test_get_ip_from_opts(self):
|
||||||
optstring = "ansible_host=10.90.3.2 ip=10.90.3.2"
|
optstring = "ansible_host=10.90.3.2 ip=10.90.3.2"
|
||||||
@@ -210,3 +210,31 @@ class TestInventory(unittest.TestCase):
|
|||||||
|
|
||||||
self.inv.set_etcd([host])
|
self.inv.set_etcd([host])
|
||||||
self.assertTrue(host in self.inv.config[group])
|
self.assertTrue(host in self.inv.config[group])
|
||||||
|
|
||||||
|
def test_scale_scenario_one(self):
|
||||||
|
num_nodes = 50
|
||||||
|
hosts = OrderedDict()
|
||||||
|
|
||||||
|
for hostid in range(1, num_nodes+1):
|
||||||
|
hosts["node" + str(hostid)] = ""
|
||||||
|
|
||||||
|
self.inv.set_all(hosts)
|
||||||
|
self.inv.set_etcd(hosts.keys()[0:3])
|
||||||
|
self.inv.set_kube_master(hosts.keys()[0:2])
|
||||||
|
self.inv.set_kube_node(hosts.keys())
|
||||||
|
for h in range(3):
|
||||||
|
self.assertFalse(hosts.keys()[h] in self.inv.config['kube-node'])
|
||||||
|
|
||||||
|
def test_scale_scenario_two(self):
|
||||||
|
num_nodes = 500
|
||||||
|
hosts = OrderedDict()
|
||||||
|
|
||||||
|
for hostid in range(1, num_nodes+1):
|
||||||
|
hosts["node" + str(hostid)] = ""
|
||||||
|
|
||||||
|
self.inv.set_all(hosts)
|
||||||
|
self.inv.set_etcd(hosts.keys()[0:3])
|
||||||
|
self.inv.set_kube_master(hosts.keys()[3:5])
|
||||||
|
self.inv.set_kube_node(hosts.keys())
|
||||||
|
for h in range(5):
|
||||||
|
self.assertFalse(hosts.keys()[h] in self.inv.config['kube-node'])
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ deps =
|
|||||||
-r{toxinidir}/test-requirements.txt
|
-r{toxinidir}/test-requirements.txt
|
||||||
setenv = VIRTUAL_ENV={envdir}
|
setenv = VIRTUAL_ENV={envdir}
|
||||||
passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
|
passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
|
||||||
commands = py.test -vv #{posargs:./tests}
|
commands = pytest -vv #{posargs:./tests}
|
||||||
|
|
||||||
[testenv:pep8]
|
[testenv:pep8]
|
||||||
usedevelop = False
|
usedevelop = False
|
||||||
|
|||||||
11
contrib/kvm-setup/README.md
Normal file
11
contrib/kvm-setup/README.md
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
# Kubespray on KVM Virtual Machines hypervisor preparation
|
||||||
|
|
||||||
|
A simple playbook to ensure your system has the right settings to enable Kubespray
|
||||||
|
deployment on VMs.
|
||||||
|
|
||||||
|
This playbook does not create Virtual Machines, nor does it run Kubespray itself.
|
||||||
|
|
||||||
|
### User creation
|
||||||
|
|
||||||
|
If you want to create a user for running Kubespray deployment, you should specify
|
||||||
|
both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`.
|
||||||
3
contrib/kvm-setup/group_vars/all
Normal file
3
contrib/kvm-setup/group_vars/all
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
#k8s_deployment_user: kubespray
|
||||||
|
#k8s_deployment_user_pkey_path: /tmp/ssh_rsa
|
||||||
|
|
||||||
8
contrib/kvm-setup/kvm-setup.yml
Normal file
8
contrib/kvm-setup/kvm-setup.yml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
- hosts: localhost
|
||||||
|
gather_facts: False
|
||||||
|
become: yes
|
||||||
|
vars:
|
||||||
|
- bootstrap_os: none
|
||||||
|
roles:
|
||||||
|
- kvm-setup
|
||||||
46
contrib/kvm-setup/roles/kvm-setup/tasks/main.yml
Normal file
46
contrib/kvm-setup/roles/kvm-setup/tasks/main.yml
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
- name: Upgrade all packages to the latest version (yum)
|
||||||
|
yum:
|
||||||
|
name: '*'
|
||||||
|
state: latest
|
||||||
|
when: ansible_os_family == "RedHat"
|
||||||
|
|
||||||
|
- name: Install required packages
|
||||||
|
yum:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: latest
|
||||||
|
with_items:
|
||||||
|
- bind-utils
|
||||||
|
- ntp
|
||||||
|
when: ansible_os_family == "RedHat"
|
||||||
|
|
||||||
|
- name: Install required packages
|
||||||
|
apt:
|
||||||
|
upgrade: yes
|
||||||
|
update_cache: yes
|
||||||
|
cache_valid_time: 3600
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: latest
|
||||||
|
install_recommends: no
|
||||||
|
with_items:
|
||||||
|
- dnsutils
|
||||||
|
- ntp
|
||||||
|
when: ansible_os_family == "Debian"
|
||||||
|
|
||||||
|
- name: Upgrade all packages to the latest version (apt)
|
||||||
|
shell: apt-get -o \
|
||||||
|
Dpkg::Options::=--force-confdef -o \
|
||||||
|
Dpkg::Options::=--force-confold -q -y \
|
||||||
|
dist-upgrade
|
||||||
|
environment:
|
||||||
|
DEBIAN_FRONTEND: noninteractive
|
||||||
|
when: ansible_os_family == "Debian"
|
||||||
|
|
||||||
|
|
||||||
|
# Create deployment user if required
|
||||||
|
- include: user.yml
|
||||||
|
when: k8s_deployment_user is defined
|
||||||
|
|
||||||
|
# Set proper sysctl values
|
||||||
|
- include: sysctl.yml
|
||||||
46
contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml
Normal file
46
contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
---
|
||||||
|
- name: Load br_netfilter module
|
||||||
|
modprobe:
|
||||||
|
name: br_netfilter
|
||||||
|
state: present
|
||||||
|
register: br_netfilter
|
||||||
|
|
||||||
|
- name: Add br_netfilter into /etc/modules
|
||||||
|
lineinfile:
|
||||||
|
dest: /etc/modules
|
||||||
|
state: present
|
||||||
|
line: 'br_netfilter'
|
||||||
|
when: br_netfilter is defined and ansible_os_family == 'Debian'
|
||||||
|
|
||||||
|
- name: Add br_netfilter into /etc/modules-load.d/kubespray.conf
|
||||||
|
copy:
|
||||||
|
dest: /etc/modules-load.d/kubespray.conf
|
||||||
|
content: |-
|
||||||
|
### This file is managed by Ansible
|
||||||
|
br-netfilter
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
when: br_netfilter is defined
|
||||||
|
|
||||||
|
|
||||||
|
- name: Enable net.ipv4.ip_forward in sysctl
|
||||||
|
sysctl:
|
||||||
|
name: net.ipv4.ip_forward
|
||||||
|
value: 1
|
||||||
|
sysctl_file: /etc/sysctl.d/ipv4-ip_forward.conf
|
||||||
|
state: present
|
||||||
|
reload: yes
|
||||||
|
|
||||||
|
- name: Set bridge-nf-call-{arptables,iptables} to 0
|
||||||
|
sysctl:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: present
|
||||||
|
value: 0
|
||||||
|
sysctl_file: /etc/sysctl.d/bridge-nf-call.conf
|
||||||
|
reload: yes
|
||||||
|
with_items:
|
||||||
|
- net.bridge.bridge-nf-call-arptables
|
||||||
|
- net.bridge.bridge-nf-call-ip6tables
|
||||||
|
- net.bridge.bridge-nf-call-iptables
|
||||||
|
when: br_netfilter is defined
|
||||||
46
contrib/kvm-setup/roles/kvm-setup/tasks/user.yml
Normal file
46
contrib/kvm-setup/roles/kvm-setup/tasks/user.yml
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
---
|
||||||
|
- name: Create user {{ k8s_deployment_user }}
|
||||||
|
user:
|
||||||
|
name: "{{ k8s_deployment_user }}"
|
||||||
|
groups: adm
|
||||||
|
shell: /bin/bash
|
||||||
|
|
||||||
|
- name: Ensure that .ssh exists
|
||||||
|
file:
|
||||||
|
path: "/home/{{ k8s_deployment_user }}/.ssh"
|
||||||
|
state: directory
|
||||||
|
owner: "{{ k8s_deployment_user }}"
|
||||||
|
group: "{{ k8s_deployment_user }}"
|
||||||
|
|
||||||
|
- name: Configure sudo for deployment user
|
||||||
|
copy:
|
||||||
|
content: |
|
||||||
|
%{{ k8s_deployment_user }} ALL=(ALL) NOPASSWD: ALL
|
||||||
|
dest: "/etc/sudoers.d/55-k8s-deployment"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
|
||||||
|
- name: Write private SSH key
|
||||||
|
copy:
|
||||||
|
src: "{{ k8s_deployment_user_pkey_path }}"
|
||||||
|
dest: "/home/{{ k8s_deployment_user }}/.ssh/id_rsa"
|
||||||
|
mode: 0400
|
||||||
|
owner: "{{ k8s_deployment_user }}"
|
||||||
|
group: "{{ k8s_deployment_user }}"
|
||||||
|
when: k8s_deployment_user_pkey_path is defined
|
||||||
|
|
||||||
|
- name: Write public SSH key
|
||||||
|
shell: "ssh-keygen -y -f /home/{{ k8s_deployment_user }}/.ssh/id_rsa \
|
||||||
|
> /home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
|
||||||
|
args:
|
||||||
|
creates: "/home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
|
||||||
|
when: k8s_deployment_user_pkey_path is defined
|
||||||
|
|
||||||
|
- name: Fix ssh-pub-key permissions
|
||||||
|
file:
|
||||||
|
path: "/home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
|
||||||
|
mode: 0600
|
||||||
|
owner: "{{ k8s_deployment_user }}"
|
||||||
|
group: "{{ k8s_deployment_user }}"
|
||||||
|
when: k8s_deployment_user_pkey_path is defined
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
# Deploying a Kargo Kubernetes Cluster with GlusterFS
|
# Deploying a Kubespray Kubernetes Cluster with GlusterFS
|
||||||
|
|
||||||
You can either deploy using Ansible on its own by supplying your own inventory file or by using Terraform to create the VMs and then providing a dynamic inventory to Ansible. The following two sections are self-contained, you don't need to go through one to use the other. So, if you want to provision with Terraform, you can skip the **Using an Ansible inventory** section, and if you want to provision with a pre-built ansible inventory, you can neglect the **Using Terraform and Ansible** section.
|
You can either deploy using Ansible on its own by supplying your own inventory file or by using Terraform to create the VMs and then providing a dynamic inventory to Ansible. The following two sections are self-contained, you don't need to go through one to use the other. So, if you want to provision with Terraform, you can skip the **Using an Ansible inventory** section, and if you want to provision with a pre-built ansible inventory, you can neglect the **Using Terraform and Ansible** section.
|
||||||
|
|
||||||
@@ -6,7 +6,7 @@ You can either deploy using Ansible on its own by supplying your own inventory f
|
|||||||
|
|
||||||
In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group.
|
In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group.
|
||||||
|
|
||||||
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/k8s_gfs_inventory`. Make sure that the settings on `inventory/group_vars/all.yml` make sense with your deployment. Then execute change to the kargo root folder, and execute (supposing that the machines are all using ubuntu):
|
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/k8s_gfs_inventory`. Make sure that the settings on `inventory/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
|
||||||
|
|
||||||
```
|
```
|
||||||
ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./cluster.yml
|
ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./cluster.yml
|
||||||
@@ -28,7 +28,7 @@ k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_us
|
|||||||
|
|
||||||
## Using Terraform and Ansible
|
## Using Terraform and Ansible
|
||||||
|
|
||||||
First step is to fill in a `my-kargo-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
|
First step is to fill in a `my-kubespray-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
|
||||||
|
|
||||||
```
|
```
|
||||||
cluster_name = "cluster1"
|
cluster_name = "cluster1"
|
||||||
@@ -65,15 +65,15 @@ $ echo Setting up Terraform creds && \
|
|||||||
export TF_VAR_auth_url=${OS_AUTH_URL}
|
export TF_VAR_auth_url=${OS_AUTH_URL}
|
||||||
```
|
```
|
||||||
|
|
||||||
Then, standing on the kargo directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
|
Then, standing on the kubespray directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
|
||||||
|
|
||||||
```
|
```
|
||||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kargo-gluster-cluster.tfvars contrib/terraform/openstack
|
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
|
||||||
```
|
```
|
||||||
|
|
||||||
This will create both your Kubernetes and Gluster VMs. Make sure that the ansible file `contrib/terraform/openstack/group_vars/all.yml` includes any ansible variable that you want to setup (like, for instance, the type of machine for bootstrapping).
|
This will create both your Kubernetes and Gluster VMs. Make sure that the ansible file `contrib/terraform/openstack/group_vars/all.yml` includes any ansible variable that you want to setup (like, for instance, the type of machine for bootstrapping).
|
||||||
|
|
||||||
Then, provision your Kubernetes (Kargo) cluster with the following ansible call:
|
Then, provision your Kubernetes (kubespray) cluster with the following ansible call:
|
||||||
|
|
||||||
```
|
```
|
||||||
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml
|
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml
|
||||||
@@ -88,5 +88,5 @@ ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./co
|
|||||||
If you need to destroy the cluster, you can run:
|
If you need to destroy the cluster, you can run:
|
||||||
|
|
||||||
```
|
```
|
||||||
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kargo-gluster-cluster.tfvars contrib/terraform/openstack
|
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
../../../../../roles/kubernetes-apps/lib
|
|
||||||
2
contrib/terraform/aws/.gitignore
vendored
2
contrib/terraform/aws/.gitignore
vendored
@@ -1,2 +1,2 @@
|
|||||||
*.tfstate*
|
*.tfstate*
|
||||||
inventory
|
.terraform
|
||||||
|
|||||||
@@ -1,261 +0,0 @@
|
|||||||
variable "deploymentName" {
|
|
||||||
type = "string"
|
|
||||||
description = "The desired name of your deployment."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "numControllers"{
|
|
||||||
type = "string"
|
|
||||||
description = "Desired # of controllers."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "numEtcd" {
|
|
||||||
type = "string"
|
|
||||||
description = "Desired # of etcd nodes. Should be an odd number."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "numNodes" {
|
|
||||||
type = "string"
|
|
||||||
description = "Desired # of nodes."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "volSizeController" {
|
|
||||||
type = "string"
|
|
||||||
description = "Volume size for the controllers (GB)."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "volSizeEtcd" {
|
|
||||||
type = "string"
|
|
||||||
description = "Volume size for etcd (GB)."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "volSizeNodes" {
|
|
||||||
type = "string"
|
|
||||||
description = "Volume size for nodes (GB)."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "subnet" {
|
|
||||||
type = "string"
|
|
||||||
description = "The subnet in which to put your cluster."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "securityGroups" {
|
|
||||||
type = "string"
|
|
||||||
description = "The sec. groups in which to put your cluster."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "ami"{
|
|
||||||
type = "string"
|
|
||||||
description = "AMI to use for all VMs in cluster."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "SSHKey" {
|
|
||||||
type = "string"
|
|
||||||
description = "SSH key to use for VMs."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "master_instance_type" {
|
|
||||||
type = "string"
|
|
||||||
description = "Size of VM to use for masters."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "etcd_instance_type" {
|
|
||||||
type = "string"
|
|
||||||
description = "Size of VM to use for etcd."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "node_instance_type" {
|
|
||||||
type = "string"
|
|
||||||
description = "Size of VM to use for nodes."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "terminate_protect" {
|
|
||||||
type = "string"
|
|
||||||
default = "false"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "awsRegion" {
|
|
||||||
type = "string"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "aws" {
|
|
||||||
region = "${var.awsRegion}"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "iam_prefix" {
|
|
||||||
type = "string"
|
|
||||||
description = "Prefix name for IAM profiles"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_iam_instance_profile" "kubernetes_master_profile" {
|
|
||||||
name = "${var.iam_prefix}_kubernetes_master_profile"
|
|
||||||
roles = ["${aws_iam_role.kubernetes_master_role.name}"]
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_iam_role" "kubernetes_master_role" {
|
|
||||||
name = "${var.iam_prefix}_kubernetes_master_role"
|
|
||||||
assume_role_policy = <<EOF
|
|
||||||
{
|
|
||||||
"Version": "2012-10-17",
|
|
||||||
"Statement": [
|
|
||||||
{
|
|
||||||
"Effect": "Allow",
|
|
||||||
"Principal": { "Service": "ec2.amazonaws.com"},
|
|
||||||
"Action": "sts:AssumeRole"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_iam_role_policy" "kubernetes_master_policy" {
|
|
||||||
name = "${var.iam_prefix}_kubernetes_master_policy"
|
|
||||||
role = "${aws_iam_role.kubernetes_master_role.id}"
|
|
||||||
policy = <<EOF
|
|
||||||
{
|
|
||||||
"Version": "2012-10-17",
|
|
||||||
"Statement": [
|
|
||||||
{
|
|
||||||
"Effect": "Allow",
|
|
||||||
"Action": ["ec2:*"],
|
|
||||||
"Resource": ["*"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Effect": "Allow",
|
|
||||||
"Action": ["elasticloadbalancing:*"],
|
|
||||||
"Resource": ["*"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Effect": "Allow",
|
|
||||||
"Action": "s3:*",
|
|
||||||
"Resource": "*"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_iam_instance_profile" "kubernetes_node_profile" {
|
|
||||||
name = "${var.iam_prefix}_kubernetes_node_profile"
|
|
||||||
roles = ["${aws_iam_role.kubernetes_node_role.name}"]
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_iam_role" "kubernetes_node_role" {
|
|
||||||
name = "${var.iam_prefix}_kubernetes_node_role"
|
|
||||||
assume_role_policy = <<EOF
|
|
||||||
{
|
|
||||||
"Version": "2012-10-17",
|
|
||||||
"Statement": [
|
|
||||||
{
|
|
||||||
"Effect": "Allow",
|
|
||||||
"Principal": { "Service": "ec2.amazonaws.com"},
|
|
||||||
"Action": "sts:AssumeRole"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_iam_role_policy" "kubernetes_node_policy" {
|
|
||||||
name = "${var.iam_prefix}_kubernetes_node_policy"
|
|
||||||
role = "${aws_iam_role.kubernetes_node_role.id}"
|
|
||||||
policy = <<EOF
|
|
||||||
{
|
|
||||||
"Version": "2012-10-17",
|
|
||||||
"Statement": [
|
|
||||||
{
|
|
||||||
"Effect": "Allow",
|
|
||||||
"Action": "s3:*",
|
|
||||||
"Resource": "*"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Effect": "Allow",
|
|
||||||
"Action": "ec2:Describe*",
|
|
||||||
"Resource": "*"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Effect": "Allow",
|
|
||||||
"Action": "ec2:AttachVolume",
|
|
||||||
"Resource": "*"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Effect": "Allow",
|
|
||||||
"Action": "ec2:DetachVolume",
|
|
||||||
"Resource": "*"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_instance" "master" {
|
|
||||||
count = "${var.numControllers}"
|
|
||||||
ami = "${var.ami}"
|
|
||||||
instance_type = "${var.master_instance_type}"
|
|
||||||
subnet_id = "${var.subnet}"
|
|
||||||
vpc_security_group_ids = ["${var.securityGroups}"]
|
|
||||||
key_name = "${var.SSHKey}"
|
|
||||||
disable_api_termination = "${var.terminate_protect}"
|
|
||||||
iam_instance_profile = "${aws_iam_instance_profile.kubernetes_master_profile.id}"
|
|
||||||
root_block_device {
|
|
||||||
volume_size = "${var.volSizeController}"
|
|
||||||
}
|
|
||||||
tags {
|
|
||||||
Name = "${var.deploymentName}-master-${count.index + 1}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_instance" "etcd" {
|
|
||||||
count = "${var.numEtcd}"
|
|
||||||
ami = "${var.ami}"
|
|
||||||
instance_type = "${var.etcd_instance_type}"
|
|
||||||
subnet_id = "${var.subnet}"
|
|
||||||
vpc_security_group_ids = ["${var.securityGroups}"]
|
|
||||||
key_name = "${var.SSHKey}"
|
|
||||||
disable_api_termination = "${var.terminate_protect}"
|
|
||||||
root_block_device {
|
|
||||||
volume_size = "${var.volSizeEtcd}"
|
|
||||||
}
|
|
||||||
tags {
|
|
||||||
Name = "${var.deploymentName}-etcd-${count.index + 1}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
resource "aws_instance" "minion" {
|
|
||||||
count = "${var.numNodes}"
|
|
||||||
ami = "${var.ami}"
|
|
||||||
instance_type = "${var.node_instance_type}"
|
|
||||||
subnet_id = "${var.subnet}"
|
|
||||||
vpc_security_group_ids = ["${var.securityGroups}"]
|
|
||||||
key_name = "${var.SSHKey}"
|
|
||||||
disable_api_termination = "${var.terminate_protect}"
|
|
||||||
iam_instance_profile = "${aws_iam_instance_profile.kubernetes_node_profile.id}"
|
|
||||||
root_block_device {
|
|
||||||
volume_size = "${var.volSizeNodes}"
|
|
||||||
}
|
|
||||||
tags {
|
|
||||||
Name = "${var.deploymentName}-minion-${count.index + 1}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
output "kubernetes_master_profile" {
|
|
||||||
value = "${aws_iam_instance_profile.kubernetes_master_profile.id}"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "kubernetes_node_profile" {
|
|
||||||
value = "${aws_iam_instance_profile.kubernetes_node_profile.id}"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "master-ip" {
|
|
||||||
value = "${join(", ", aws_instance.master.*.private_ip)}"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "etcd-ip" {
|
|
||||||
value = "${join(", ", aws_instance.etcd.*.private_ip)}"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "minion-ip" {
|
|
||||||
value = "${join(", ", aws_instance.minion.*.private_ip)}"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,37 +0,0 @@
|
|||||||
variable "SSHUser" {
|
|
||||||
type = "string"
|
|
||||||
description = "SSH User for VMs."
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "null_resource" "ansible-provision" {
|
|
||||||
|
|
||||||
depends_on = ["aws_instance.master","aws_instance.etcd","aws_instance.minion"]
|
|
||||||
|
|
||||||
##Create Master Inventory
|
|
||||||
provisioner "local-exec" {
|
|
||||||
command = "echo \"[kube-master]\" > inventory"
|
|
||||||
}
|
|
||||||
provisioner "local-exec" {
|
|
||||||
command = "echo \"${join("\n",formatlist("%s ansible_ssh_user=%s", aws_instance.master.*.private_ip, var.SSHUser))}\" >> inventory"
|
|
||||||
}
|
|
||||||
|
|
||||||
##Create ETCD Inventory
|
|
||||||
provisioner "local-exec" {
|
|
||||||
command = "echo \"\n[etcd]\" >> inventory"
|
|
||||||
}
|
|
||||||
provisioner "local-exec" {
|
|
||||||
command = "echo \"${join("\n",formatlist("%s ansible_ssh_user=%s", aws_instance.etcd.*.private_ip, var.SSHUser))}\" >> inventory"
|
|
||||||
}
|
|
||||||
|
|
||||||
##Create Nodes Inventory
|
|
||||||
provisioner "local-exec" {
|
|
||||||
command = "echo \"\n[kube-node]\" >> inventory"
|
|
||||||
}
|
|
||||||
provisioner "local-exec" {
|
|
||||||
command = "echo \"${join("\n",formatlist("%s ansible_ssh_user=%s", aws_instance.minion.*.private_ip, var.SSHUser))}\" >> inventory"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "local-exec" {
|
|
||||||
command = "echo \"\n[k8s-cluster:children]\nkube-node\nkube-master\" >> inventory"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -2,27 +2,56 @@
|
|||||||
|
|
||||||
**Overview:**
|
**Overview:**
|
||||||
|
|
||||||
- This will create nodes in a VPC inside of AWS
|
This project will create:
|
||||||
|
* VPC with Public and Private Subnets in # Availability Zones
|
||||||
|
* Bastion Hosts and NAT Gateways in the Public Subnet
|
||||||
|
* A dynamic number of masters, etcd, and worker nodes in the Private Subnet
|
||||||
|
* even distributed over the # of Availability Zones
|
||||||
|
* AWS ELB in the Public Subnet for accessing the Kubernetes API from the internet
|
||||||
|
|
||||||
- A dynamic number of masters, etcd, and nodes can be created
|
**Requirements**
|
||||||
|
- Terraform 0.8.7 or newer
|
||||||
- These scripts currently expect Private IP connectivity with the nodes that are created. This means that you may need a tunnel to your VPC or to run these scripts from a VM inside the VPC. Will be looking into how to work around this later.
|
|
||||||
|
|
||||||
**How to Use:**
|
**How to Use:**
|
||||||
|
|
||||||
- Export the variables for your Amazon credentials:
|
- Export the variables for your AWS credentials or edit `credentials.tfvars`:
|
||||||
|
|
||||||
```
|
```
|
||||||
export AWS_ACCESS_KEY_ID="xxx"
|
export AWS_ACCESS_KEY_ID="www"
|
||||||
export AWS_SECRET_ACCESS_KEY="yyy"
|
export AWS_SECRET_ACCESS_KEY ="xxx"
|
||||||
|
export AWS_SSH_KEY_NAME="yyy"
|
||||||
|
export AWS_DEFAULT_REGION="zzz"
|
||||||
|
```
|
||||||
|
- Rename `contrib/terraform/aws/terraform.tfvars.example` to `terraform.tfvars`
|
||||||
|
|
||||||
|
- Update `contrib/terraform/aws/terraform.tfvars` with your data
|
||||||
|
- Allocate new AWS Elastic IPs: Depending on # of Availability Zones used (2 for each AZ)
|
||||||
|
- Create an AWS EC2 SSH Key
|
||||||
|
|
||||||
|
|
||||||
|
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
|
||||||
|
|
||||||
|
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
|
||||||
|
|
||||||
|
- Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag.
|
||||||
|
|
||||||
|
**Troubleshooting**
|
||||||
|
|
||||||
|
***Remaining AWS IAM Instance Profile***:
|
||||||
|
|
||||||
|
If the cluster was destroyed without using Terraform it is possible that
|
||||||
|
the AWS IAM Instance Profiles still remain. To delete them you can use
|
||||||
|
the `AWS CLI` with the following command:
|
||||||
|
```
|
||||||
|
aws iam delete-instance-profile --region <region_name> --instance-profile-name <profile_name>
|
||||||
```
|
```
|
||||||
|
|
||||||
- Update contrib/terraform/aws/terraform.tfvars with your data
|
***Ansible Inventory doesnt get created:***
|
||||||
|
|
||||||
- Run with `terraform apply`
|
It could happen that Terraform doesnt create an Ansible Inventory file automatically. If this is the case copy the output after `inventory=` and create a file named `hosts`in the directory `inventory` and paste the inventory into the file.
|
||||||
|
|
||||||
- Once the infrastructure is created, you can run the kubespray playbooks and supply contrib/terraform/aws/inventory with the `-i` flag.
|
**Architecture**
|
||||||
|
|
||||||
**Future Work:**
|
Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones.
|
||||||
|
|
||||||
- Update the inventory creation file to be something a little more reasonable. It's just a local-exec from Terraform now, using terraform.py or something may make sense in the future.
|

|
||||||
|
|||||||
186
contrib/terraform/aws/create-infrastructure.tf
Normal file
186
contrib/terraform/aws/create-infrastructure.tf
Normal file
@@ -0,0 +1,186 @@
|
|||||||
|
terraform {
|
||||||
|
required_version = ">= 0.8.7"
|
||||||
|
}
|
||||||
|
|
||||||
|
provider "aws" {
|
||||||
|
access_key = "${var.AWS_ACCESS_KEY_ID}"
|
||||||
|
secret_key = "${var.AWS_SECRET_ACCESS_KEY}"
|
||||||
|
region = "${var.AWS_DEFAULT_REGION}"
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Calling modules who create the initial AWS VPC / AWS ELB
|
||||||
|
* and AWS IAM Roles for Kubernetes Deployment
|
||||||
|
*/
|
||||||
|
|
||||||
|
module "aws-vpc" {
|
||||||
|
source = "modules/vpc"
|
||||||
|
|
||||||
|
aws_cluster_name = "${var.aws_cluster_name}"
|
||||||
|
aws_vpc_cidr_block = "${var.aws_vpc_cidr_block}"
|
||||||
|
aws_avail_zones="${var.aws_avail_zones}"
|
||||||
|
|
||||||
|
aws_cidr_subnets_private="${var.aws_cidr_subnets_private}"
|
||||||
|
aws_cidr_subnets_public="${var.aws_cidr_subnets_public}"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
module "aws-elb" {
|
||||||
|
source = "modules/elb"
|
||||||
|
|
||||||
|
aws_cluster_name="${var.aws_cluster_name}"
|
||||||
|
aws_vpc_id="${module.aws-vpc.aws_vpc_id}"
|
||||||
|
aws_avail_zones="${var.aws_avail_zones}"
|
||||||
|
aws_subnet_ids_public="${module.aws-vpc.aws_subnet_ids_public}"
|
||||||
|
aws_elb_api_port = "${var.aws_elb_api_port}"
|
||||||
|
k8s_secure_api_port = "${var.k8s_secure_api_port}"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
module "aws-iam" {
|
||||||
|
source = "modules/iam"
|
||||||
|
|
||||||
|
aws_cluster_name="${var.aws_cluster_name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Create Bastion Instances in AWS
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
resource "aws_instance" "bastion-server" {
|
||||||
|
ami = "${var.aws_bastion_ami}"
|
||||||
|
instance_type = "${var.aws_bastion_size}"
|
||||||
|
count = "${length(var.aws_cidr_subnets_public)}"
|
||||||
|
associate_public_ip_address = true
|
||||||
|
availability_zone = "${element(var.aws_avail_zones,count.index)}"
|
||||||
|
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_public,count.index)}"
|
||||||
|
|
||||||
|
|
||||||
|
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
|
||||||
|
|
||||||
|
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-bastion-${count.index}"
|
||||||
|
Cluster = "${var.aws_cluster_name}"
|
||||||
|
Role = "bastion-${var.aws_cluster_name}-${count.index}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Create K8s Master and worker nodes and etcd instances
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
resource "aws_instance" "k8s-master" {
|
||||||
|
ami = "${var.aws_cluster_ami}"
|
||||||
|
instance_type = "${var.aws_kube_master_size}"
|
||||||
|
|
||||||
|
count = "${var.aws_kube_master_num}"
|
||||||
|
|
||||||
|
|
||||||
|
availability_zone = "${element(var.aws_avail_zones,count.index)}"
|
||||||
|
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
||||||
|
|
||||||
|
|
||||||
|
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
|
||||||
|
|
||||||
|
|
||||||
|
iam_instance_profile = "${module.aws-iam.kube-master-profile}"
|
||||||
|
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||||
|
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-master${count.index}"
|
||||||
|
Cluster = "${var.aws_cluster_name}"
|
||||||
|
Role = "master"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_elb_attachment" "attach_master_nodes" {
|
||||||
|
count = "${var.aws_kube_master_num}"
|
||||||
|
elb = "${module.aws-elb.aws_elb_api_id}"
|
||||||
|
instance = "${element(aws_instance.k8s-master.*.id,count.index)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
resource "aws_instance" "k8s-etcd" {
|
||||||
|
ami = "${var.aws_cluster_ami}"
|
||||||
|
instance_type = "${var.aws_etcd_size}"
|
||||||
|
|
||||||
|
count = "${var.aws_etcd_num}"
|
||||||
|
|
||||||
|
|
||||||
|
availability_zone = "${element(var.aws_avail_zones,count.index)}"
|
||||||
|
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
||||||
|
|
||||||
|
|
||||||
|
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
|
||||||
|
|
||||||
|
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||||
|
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-etcd${count.index}"
|
||||||
|
Cluster = "${var.aws_cluster_name}"
|
||||||
|
Role = "etcd"
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
resource "aws_instance" "k8s-worker" {
|
||||||
|
ami = "${var.aws_cluster_ami}"
|
||||||
|
instance_type = "${var.aws_kube_worker_size}"
|
||||||
|
|
||||||
|
count = "${var.aws_kube_worker_num}"
|
||||||
|
|
||||||
|
availability_zone = "${element(var.aws_avail_zones,count.index)}"
|
||||||
|
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
||||||
|
|
||||||
|
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
|
||||||
|
|
||||||
|
iam_instance_profile = "${module.aws-iam.kube-worker-profile}"
|
||||||
|
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||||
|
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-worker${count.index}"
|
||||||
|
Cluster = "${var.aws_cluster_name}"
|
||||||
|
Role = "worker"
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Create Kubespray Inventory File
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
data "template_file" "inventory" {
|
||||||
|
template = "${file("${path.module}/templates/inventory.tpl")}"
|
||||||
|
|
||||||
|
vars {
|
||||||
|
public_ip_address_bastion = "${join("\n",formatlist("bastion ansible_ssh_host=%s" , aws_instance.bastion-server.*.public_ip))}"
|
||||||
|
connection_strings_master = "${join("\n",formatlist("%s ansible_ssh_host=%s",aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}"
|
||||||
|
connection_strings_node = "${join("\n", formatlist("%s ansible_ssh_host=%s", aws_instance.k8s-worker.*.tags.Name, aws_instance.k8s-worker.*.private_ip))}"
|
||||||
|
connection_strings_etcd = "${join("\n",formatlist("%s ansible_ssh_host=%s", aws_instance.k8s-etcd.*.tags.Name, aws_instance.k8s-etcd.*.private_ip))}"
|
||||||
|
list_master = "${join("\n",aws_instance.k8s-master.*.tags.Name)}"
|
||||||
|
list_node = "${join("\n",aws_instance.k8s-worker.*.tags.Name)}"
|
||||||
|
list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}"
|
||||||
|
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
|
||||||
|
elb_api_port = "loadbalancer_apiserver.port=${var.aws_elb_api_port}"
|
||||||
|
kube_insecure_apiserver_address = "kube_apiserver_insecure_bind_address: ${var.kube_insecure_apiserver_address}"
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "null_resource" "inventories" {
|
||||||
|
provisioner "local-exec" {
|
||||||
|
command = "echo '${data.template_file.inventory.rendered}' > ../../../inventory/hosts"
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
8
contrib/terraform/aws/credentials.tfvars.example
Normal file
8
contrib/terraform/aws/credentials.tfvars.example
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
#AWS Access Key
|
||||||
|
AWS_ACCESS_KEY_ID = ""
|
||||||
|
#AWS Secret Key
|
||||||
|
AWS_SECRET_ACCESS_KEY = ""
|
||||||
|
#EC2 SSH Key Name
|
||||||
|
AWS_SSH_KEY_NAME = ""
|
||||||
|
#AWS Region
|
||||||
|
AWS_DEFAULT_REGION = "eu-central-1"
|
||||||
BIN
contrib/terraform/aws/docs/aws_kubespray.png
Normal file
BIN
contrib/terraform/aws/docs/aws_kubespray.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 114 KiB |
58
contrib/terraform/aws/modules/elb/main.tf
Normal file
58
contrib/terraform/aws/modules/elb/main.tf
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
resource "aws_security_group" "aws-elb" {
|
||||||
|
name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||||
|
vpc_id = "${var.aws_vpc_id}"
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "aws-allow-api-access" {
|
||||||
|
type = "ingress"
|
||||||
|
from_port = "${var.aws_elb_api_port}"
|
||||||
|
to_port = "${var.k8s_secure_api_port}"
|
||||||
|
protocol = "TCP"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
security_group_id = "${aws_security_group.aws-elb.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "aws-allow-api-egress" {
|
||||||
|
type = "egress"
|
||||||
|
from_port = 0
|
||||||
|
to_port = 65535
|
||||||
|
protocol = "TCP"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
security_group_id = "${aws_security_group.aws-elb.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create a new AWS ELB for K8S API
|
||||||
|
resource "aws_elb" "aws-elb-api" {
|
||||||
|
name = "kubernetes-elb-${var.aws_cluster_name}"
|
||||||
|
subnets = ["${var.aws_subnet_ids_public}"]
|
||||||
|
security_groups = ["${aws_security_group.aws-elb.id}"]
|
||||||
|
|
||||||
|
listener {
|
||||||
|
instance_port = "${var.k8s_secure_api_port}"
|
||||||
|
instance_protocol = "tcp"
|
||||||
|
lb_port = "${var.aws_elb_api_port}"
|
||||||
|
lb_protocol = "tcp"
|
||||||
|
}
|
||||||
|
|
||||||
|
health_check {
|
||||||
|
healthy_threshold = 2
|
||||||
|
unhealthy_threshold = 2
|
||||||
|
timeout = 3
|
||||||
|
target = "HTTP:8080/"
|
||||||
|
interval = 30
|
||||||
|
}
|
||||||
|
|
||||||
|
cross_zone_load_balancing = true
|
||||||
|
idle_timeout = 400
|
||||||
|
connection_draining = true
|
||||||
|
connection_draining_timeout = 400
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-elb-api"
|
||||||
|
}
|
||||||
|
}
|
||||||
7
contrib/terraform/aws/modules/elb/outputs.tf
Normal file
7
contrib/terraform/aws/modules/elb/outputs.tf
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
output "aws_elb_api_id" {
|
||||||
|
value = "${aws_elb.aws-elb-api.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "aws_elb_api_fqdn" {
|
||||||
|
value = "${aws_elb.aws-elb-api.dns_name}"
|
||||||
|
}
|
||||||
28
contrib/terraform/aws/modules/elb/variables.tf
Normal file
28
contrib/terraform/aws/modules/elb/variables.tf
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
variable "aws_cluster_name" {
|
||||||
|
description = "Name of Cluster"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_vpc_id" {
|
||||||
|
description = "AWS VPC ID"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_elb_api_port" {
|
||||||
|
description = "Port for AWS ELB"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "k8s_secure_api_port" {
|
||||||
|
description = "Secure Port of K8S API Server"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
variable "aws_avail_zones" {
|
||||||
|
description = "Availability Zones Used"
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
variable "aws_subnet_ids_public" {
|
||||||
|
description = "IDs of Public Subnets"
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
138
contrib/terraform/aws/modules/iam/main.tf
Normal file
138
contrib/terraform/aws/modules/iam/main.tf
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
#Add AWS Roles for Kubernetes
|
||||||
|
|
||||||
|
resource "aws_iam_role" "kube-master" {
|
||||||
|
name = "kubernetes-${var.aws_cluster_name}-master"
|
||||||
|
assume_role_policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "sts:AssumeRole",
|
||||||
|
"Principal": {
|
||||||
|
"Service": "ec2.amazonaws.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_role" "kube-worker" {
|
||||||
|
name = "kubernetes-${var.aws_cluster_name}-node"
|
||||||
|
assume_role_policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "sts:AssumeRole",
|
||||||
|
"Principal": {
|
||||||
|
"Service": "ec2.amazonaws.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
#Add AWS Policies for Kubernetes
|
||||||
|
|
||||||
|
resource "aws_iam_role_policy" "kube-master" {
|
||||||
|
name = "kubernetes-${var.aws_cluster_name}-master"
|
||||||
|
role = "${aws_iam_role.kube-master.id}"
|
||||||
|
policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": ["ec2:*"],
|
||||||
|
"Resource": ["*"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": ["elasticloadbalancing:*"],
|
||||||
|
"Resource": ["*"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": ["route53:*"],
|
||||||
|
"Resource": ["*"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "s3:*",
|
||||||
|
"Resource": [
|
||||||
|
"arn:aws:s3:::kubernetes-*"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_role_policy" "kube-worker" {
|
||||||
|
name = "kubernetes-${var.aws_cluster_name}-node"
|
||||||
|
role = "${aws_iam_role.kube-worker.id}"
|
||||||
|
policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "s3:*",
|
||||||
|
"Resource": [
|
||||||
|
"arn:aws:s3:::kubernetes-*"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "ec2:Describe*",
|
||||||
|
"Resource": "*"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "ec2:AttachVolume",
|
||||||
|
"Resource": "*"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "ec2:DetachVolume",
|
||||||
|
"Resource": "*"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": ["route53:*"],
|
||||||
|
"Resource": ["*"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": [
|
||||||
|
"ecr:GetAuthorizationToken",
|
||||||
|
"ecr:BatchCheckLayerAvailability",
|
||||||
|
"ecr:GetDownloadUrlForLayer",
|
||||||
|
"ecr:GetRepositoryPolicy",
|
||||||
|
"ecr:DescribeRepositories",
|
||||||
|
"ecr:ListImages",
|
||||||
|
"ecr:BatchGetImage"
|
||||||
|
],
|
||||||
|
"Resource": "*"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#Create AWS Instance Profiles
|
||||||
|
|
||||||
|
resource "aws_iam_instance_profile" "kube-master" {
|
||||||
|
name = "kube_${var.aws_cluster_name}_master_profile"
|
||||||
|
roles = ["${aws_iam_role.kube-master.name}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_instance_profile" "kube-worker" {
|
||||||
|
name = "kube_${var.aws_cluster_name}_node_profile"
|
||||||
|
roles = ["${aws_iam_role.kube-worker.name}"]
|
||||||
|
}
|
||||||
7
contrib/terraform/aws/modules/iam/outputs.tf
Normal file
7
contrib/terraform/aws/modules/iam/outputs.tf
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
output "kube-master-profile" {
|
||||||
|
value = "${aws_iam_instance_profile.kube-master.name }"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "kube-worker-profile" {
|
||||||
|
value = "${aws_iam_instance_profile.kube-worker.name }"
|
||||||
|
}
|
||||||
3
contrib/terraform/aws/modules/iam/variables.tf
Normal file
3
contrib/terraform/aws/modules/iam/variables.tf
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
variable "aws_cluster_name" {
|
||||||
|
description = "Name of Cluster"
|
||||||
|
}
|
||||||
138
contrib/terraform/aws/modules/vpc/main.tf
Normal file
138
contrib/terraform/aws/modules/vpc/main.tf
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
|
||||||
|
resource "aws_vpc" "cluster-vpc" {
|
||||||
|
cidr_block = "${var.aws_vpc_cidr_block}"
|
||||||
|
|
||||||
|
#DNS Related Entries
|
||||||
|
enable_dns_support = true
|
||||||
|
enable_dns_hostnames = true
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-vpc"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
resource "aws_eip" "cluster-nat-eip" {
|
||||||
|
count = "${length(var.aws_cidr_subnets_public)}"
|
||||||
|
vpc = true
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
resource "aws_internet_gateway" "cluster-vpc-internetgw" {
|
||||||
|
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-internetgw"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "cluster-vpc-subnets-public" {
|
||||||
|
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||||
|
count="${length(var.aws_avail_zones)}"
|
||||||
|
availability_zone = "${element(var.aws_avail_zones, count.index)}"
|
||||||
|
cidr_block = "${element(var.aws_cidr_subnets_public, count.index)}"
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_nat_gateway" "cluster-nat-gateway" {
|
||||||
|
count = "${length(var.aws_cidr_subnets_public)}"
|
||||||
|
allocation_id = "${element(aws_eip.cluster-nat-eip.*.id, count.index)}"
|
||||||
|
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index)}"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "cluster-vpc-subnets-private" {
|
||||||
|
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||||
|
count="${length(var.aws_avail_zones)}"
|
||||||
|
availability_zone = "${element(var.aws_avail_zones, count.index)}"
|
||||||
|
cidr_block = "${element(var.aws_cidr_subnets_private, count.index)}"
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#Routing in VPC
|
||||||
|
|
||||||
|
#TODO: Do we need two routing tables for each subnet for redundancy or is one enough?
|
||||||
|
|
||||||
|
resource "aws_route_table" "kubernetes-public" {
|
||||||
|
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||||
|
route {
|
||||||
|
cidr_block = "0.0.0.0/0"
|
||||||
|
gateway_id = "${aws_internet_gateway.cluster-vpc-internetgw.id}"
|
||||||
|
}
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-routetable-public"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route_table" "kubernetes-private" {
|
||||||
|
count = "${length(var.aws_cidr_subnets_private)}"
|
||||||
|
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||||
|
route {
|
||||||
|
cidr_block = "0.0.0.0/0"
|
||||||
|
nat_gateway_id = "${element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index)}"
|
||||||
|
}
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route_table_association" "kubernetes-public" {
|
||||||
|
count = "${length(var.aws_cidr_subnets_public)}"
|
||||||
|
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id,count.index)}"
|
||||||
|
route_table_id = "${aws_route_table.kubernetes-public.id}"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route_table_association" "kubernetes-private" {
|
||||||
|
count = "${length(var.aws_cidr_subnets_private)}"
|
||||||
|
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-private.*.id,count.index)}"
|
||||||
|
route_table_id = "${element(aws_route_table.kubernetes-private.*.id,count.index)}"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#Kubernetes Security Groups
|
||||||
|
|
||||||
|
resource "aws_security_group" "kubernetes" {
|
||||||
|
name = "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||||
|
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "allow-all-ingress" {
|
||||||
|
type = "ingress"
|
||||||
|
from_port = 0
|
||||||
|
to_port = 65535
|
||||||
|
protocol = "-1"
|
||||||
|
cidr_blocks= ["${var.aws_vpc_cidr_block}"]
|
||||||
|
security_group_id = "${aws_security_group.kubernetes.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "allow-all-egress" {
|
||||||
|
type = "egress"
|
||||||
|
from_port = 0
|
||||||
|
to_port = 65535
|
||||||
|
protocol = "-1"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
security_group_id = "${aws_security_group.kubernetes.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "allow-ssh-connections" {
|
||||||
|
type = "ingress"
|
||||||
|
from_port = 22
|
||||||
|
to_port = 22
|
||||||
|
protocol = "TCP"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
security_group_id = "${aws_security_group.kubernetes.id}"
|
||||||
|
}
|
||||||
16
contrib/terraform/aws/modules/vpc/outputs.tf
Normal file
16
contrib/terraform/aws/modules/vpc/outputs.tf
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
output "aws_vpc_id" {
|
||||||
|
value = "${aws_vpc.cluster-vpc.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "aws_subnet_ids_private" {
|
||||||
|
value = ["${aws_subnet.cluster-vpc-subnets-private.*.id}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
output "aws_subnet_ids_public" {
|
||||||
|
value = ["${aws_subnet.cluster-vpc-subnets-public.*.id}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
output "aws_security_group" {
|
||||||
|
value = ["${aws_security_group.kubernetes.*.id}"]
|
||||||
|
|
||||||
|
}
|
||||||
24
contrib/terraform/aws/modules/vpc/variables.tf
Normal file
24
contrib/terraform/aws/modules/vpc/variables.tf
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
variable "aws_vpc_cidr_block" {
|
||||||
|
description = "CIDR Blocks for AWS VPC"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
variable "aws_cluster_name" {
|
||||||
|
description = "Name of Cluster"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
variable "aws_avail_zones" {
|
||||||
|
description = "AWS Availability Zones Used"
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_cidr_subnets_private" {
|
||||||
|
description = "CIDR Blocks for private subnets in Availability zones"
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_cidr_subnets_public" {
|
||||||
|
description = "CIDR Blocks for public subnets in Availability zones"
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
24
contrib/terraform/aws/output.tf
Normal file
24
contrib/terraform/aws/output.tf
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
output "bastion_ip" {
|
||||||
|
value = "${join("\n", aws_instance.bastion-server.*.public_ip)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "masters" {
|
||||||
|
value = "${join("\n", aws_instance.k8s-master.*.private_ip)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "workers" {
|
||||||
|
value = "${join("\n", aws_instance.k8s-worker.*.private_ip)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "etcd" {
|
||||||
|
value = "${join("\n", aws_instance.k8s-etcd.*.private_ip)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
output "aws_elb_api_fqdn" {
|
||||||
|
value = "${module.aws-elb.aws_elb_api_fqdn}:${var.aws_elb_api_port}"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "inventory" {
|
||||||
|
value = "${data.template_file.inventory.rendered}"
|
||||||
|
}
|
||||||
28
contrib/terraform/aws/templates/inventory.tpl
Normal file
28
contrib/terraform/aws/templates/inventory.tpl
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
${connection_strings_master}
|
||||||
|
${connection_strings_node}
|
||||||
|
${connection_strings_etcd}
|
||||||
|
|
||||||
|
|
||||||
|
${public_ip_address_bastion}
|
||||||
|
|
||||||
|
[kube-master]
|
||||||
|
${list_master}
|
||||||
|
|
||||||
|
|
||||||
|
[kube-node]
|
||||||
|
${list_node}
|
||||||
|
|
||||||
|
|
||||||
|
[etcd]
|
||||||
|
${list_etcd}
|
||||||
|
|
||||||
|
|
||||||
|
[k8s-cluster:children]
|
||||||
|
kube-node
|
||||||
|
kube-master
|
||||||
|
|
||||||
|
|
||||||
|
[k8s-cluster:vars]
|
||||||
|
${elb_api_fqdn}
|
||||||
|
${elb_api_port}
|
||||||
|
${kube_insecure_apiserver_address}
|
||||||
@@ -1,22 +1,31 @@
|
|||||||
deploymentName="test-kube-deploy"
|
#Global Vars
|
||||||
|
aws_cluster_name = "devtest"
|
||||||
|
|
||||||
numControllers="2"
|
#VPC Vars
|
||||||
numEtcd="3"
|
aws_vpc_cidr_block = "10.250.192.0/18"
|
||||||
numNodes="2"
|
aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"]
|
||||||
|
aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
|
||||||
|
aws_avail_zones = ["eu-central-1a","eu-central-1b"]
|
||||||
|
|
||||||
volSizeController="20"
|
#Bastion Host
|
||||||
volSizeEtcd="20"
|
aws_bastion_ami = "ami-5900cc36"
|
||||||
volSizeNodes="20"
|
aws_bastion_size = "t2.small"
|
||||||
|
|
||||||
awsRegion="us-west-2"
|
|
||||||
subnet="subnet-xxxxx"
|
|
||||||
ami="ami-32a85152"
|
|
||||||
securityGroups="sg-xxxxx"
|
|
||||||
SSHUser="core"
|
|
||||||
SSHKey="my-key"
|
|
||||||
|
|
||||||
master_instance_type="m3.xlarge"
|
#Kubernetes Cluster
|
||||||
etcd_instance_type="m3.xlarge"
|
|
||||||
node_instance_type="m3.xlarge"
|
|
||||||
|
|
||||||
terminate_protect="false"
|
aws_kube_master_num = 3
|
||||||
|
aws_kube_master_size = "t2.medium"
|
||||||
|
|
||||||
|
aws_etcd_num = 3
|
||||||
|
aws_etcd_size = "t2.medium"
|
||||||
|
|
||||||
|
aws_kube_worker_num = 4
|
||||||
|
aws_kube_worker_size = "t2.medium"
|
||||||
|
|
||||||
|
aws_cluster_ami = "ami-903df7ff"
|
||||||
|
|
||||||
|
#Settings AWS ELB
|
||||||
|
|
||||||
|
aws_elb_api_port = 443
|
||||||
|
k8s_secure_api_port = 443
|
||||||
|
|||||||
32
contrib/terraform/aws/terraform.tfvars.example
Normal file
32
contrib/terraform/aws/terraform.tfvars.example
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
#Global Vars
|
||||||
|
aws_cluster_name = "devtest"
|
||||||
|
|
||||||
|
#VPC Vars
|
||||||
|
aws_vpc_cidr_block = "10.250.192.0/18"
|
||||||
|
aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"]
|
||||||
|
aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
|
||||||
|
aws_avail_zones = ["eu-central-1a","eu-central-1b"]
|
||||||
|
|
||||||
|
#Bastion Host
|
||||||
|
aws_bastion_ami = "ami-5900cc36"
|
||||||
|
aws_bastion_size = "t2.small"
|
||||||
|
|
||||||
|
|
||||||
|
#Kubernetes Cluster
|
||||||
|
|
||||||
|
aws_kube_master_num = 3
|
||||||
|
aws_kube_master_size = "t2.medium"
|
||||||
|
|
||||||
|
aws_etcd_num = 3
|
||||||
|
aws_etcd_size = "t2.medium"
|
||||||
|
|
||||||
|
aws_kube_worker_num = 4
|
||||||
|
aws_kube_worker_size = "t2.medium"
|
||||||
|
|
||||||
|
aws_cluster_ami = "ami-903df7ff"
|
||||||
|
|
||||||
|
#Settings AWS ELB
|
||||||
|
|
||||||
|
aws_elb_api_port = 6443
|
||||||
|
k8s_secure_api_port = 6443
|
||||||
|
kube_insecure_apiserver_address = 0.0.0.0
|
||||||
101
contrib/terraform/aws/variables.tf
Normal file
101
contrib/terraform/aws/variables.tf
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
variable "AWS_ACCESS_KEY_ID" {
|
||||||
|
description = "AWS Access Key"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "AWS_SECRET_ACCESS_KEY" {
|
||||||
|
description = "AWS Secret Key"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "AWS_SSH_KEY_NAME" {
|
||||||
|
description = "Name of the SSH keypair to use in AWS."
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "AWS_DEFAULT_REGION" {
|
||||||
|
description = "AWS Region"
|
||||||
|
}
|
||||||
|
|
||||||
|
//General Cluster Settings
|
||||||
|
|
||||||
|
variable "aws_cluster_name" {
|
||||||
|
description = "Name of AWS Cluster"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
//AWS VPC Variables
|
||||||
|
|
||||||
|
variable "aws_vpc_cidr_block" {
|
||||||
|
description = "CIDR Block for VPC"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_avail_zones" {
|
||||||
|
description = "Availability Zones Used"
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_cidr_subnets_private" {
|
||||||
|
description = "CIDR Blocks for private subnets in Availability Zones"
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_cidr_subnets_public" {
|
||||||
|
description = "CIDR Blocks for public subnets in Availability Zones"
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
//AWS EC2 Settings
|
||||||
|
|
||||||
|
variable "aws_bastion_ami" {
|
||||||
|
description = "AMI ID for Bastion Host in chosen AWS Region"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_bastion_size" {
|
||||||
|
description = "EC2 Instance Size of Bastion Host"
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* AWS EC2 Settings
|
||||||
|
* The number should be divisable by the number of used
|
||||||
|
* AWS Availability Zones without an remainder.
|
||||||
|
*/
|
||||||
|
variable "aws_kube_master_num" {
|
||||||
|
description = "Number of Kubernetes Master Nodes"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_kube_master_size" {
|
||||||
|
description = "Instance size of Kube Master Nodes"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_etcd_num" {
|
||||||
|
description = "Number of etcd Nodes"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_etcd_size" {
|
||||||
|
description = "Instance size of etcd Nodes"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_kube_worker_num" {
|
||||||
|
description = "Number of Kubernetes Worker Nodes"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_kube_worker_size" {
|
||||||
|
description = "Instance size of Kubernetes Worker Nodes"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_cluster_ami" {
|
||||||
|
description = "AMI ID for Kubernetes Cluster"
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* AWS ELB Settings
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
variable "aws_elb_api_port" {
|
||||||
|
description = "Port for AWS ELB"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "k8s_secure_api_port" {
|
||||||
|
description = "Secure Port of K8S API Server"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "kube_insecure_apiserver_address" {
|
||||||
|
description= "Bind Address for insecure Port of K8s API Server"
|
||||||
|
}
|
||||||
1
contrib/terraform/group_vars
Symbolic link
1
contrib/terraform/group_vars
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../inventory/group_vars
|
||||||
@@ -30,13 +30,15 @@ requirements.
|
|||||||
|
|
||||||
#### OpenStack
|
#### OpenStack
|
||||||
|
|
||||||
Ensure your OpenStack credentials are loaded in environment variables. This can be done by downloading a credentials .rc file from your OpenStack dashboard and sourcing it:
|
Ensure your OpenStack **Identity v2** credentials are loaded in environment variables. This can be done by downloading a credentials .rc file from your OpenStack dashboard and sourcing it:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ source ~/.stackrc
|
$ source ~/.stackrc
|
||||||
```
|
```
|
||||||
|
|
||||||
You will need two networks before installing, an internal network and
|
> You must set **OS_REGION_NAME** and **OS_TENANT_ID** environment variables not required by openstack CLI
|
||||||
|
|
||||||
|
You will need two networks before installing, an internal network and
|
||||||
an external (floating IP Pool) network. The internet network can be shared as
|
an external (floating IP Pool) network. The internet network can be shared as
|
||||||
we use security groups to provide network segregation. Due to the many
|
we use security groups to provide network segregation. Due to the many
|
||||||
differences between OpenStack installs the Terraform does not attempt to create
|
differences between OpenStack installs the Terraform does not attempt to create
|
||||||
@@ -86,7 +88,7 @@ This will provision one VM as master using a floating ip, two additional masters
|
|||||||
Additionally, now the terraform based installation supports provisioning of a GlusterFS shared file system based on a separate set of VMs, running either a Debian or RedHat based set of VMs. To enable this, you need to add to your `my-terraform-vars.tfvars` the following variables:
|
Additionally, now the terraform based installation supports provisioning of a GlusterFS shared file system based on a separate set of VMs, running either a Debian or RedHat based set of VMs. To enable this, you need to add to your `my-terraform-vars.tfvars` the following variables:
|
||||||
|
|
||||||
```
|
```
|
||||||
# Flavour depends on your openstack installation, you can get available flavours through `nova list-flavors`
|
# Flavour depends on your openstack installation, you can get available flavours through `nova flavor-list`
|
||||||
flavor_gfs_node = "af659280-5b8a-42b5-8865-a703775911da"
|
flavor_gfs_node = "af659280-5b8a-42b5-8865-a703775911da"
|
||||||
# This is the name of an image already available in your openstack installation.
|
# This is the name of an image already available in your openstack installation.
|
||||||
image_gfs = "Ubuntu 15.10"
|
image_gfs = "Ubuntu 15.10"
|
||||||
@@ -97,8 +99,48 @@ gfs_volume_size_in_gb = "50"
|
|||||||
ssh_user_gfs = "ubuntu"
|
ssh_user_gfs = "ubuntu"
|
||||||
```
|
```
|
||||||
|
|
||||||
If these variables are provided, this will give rise to a new ansible group called `gfs-cluster`, for which we have added ansible roles to execute in the ansible provisioning step. If you are using CoreOS, these GlusterFS VM necessarily need to be either Debian or RedHat based VMs, CoreOS cannot serve GlusterFS, but can connect to it through binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
If these variables are provided, this will give rise to a new ansible group called `gfs-cluster`, for which we have added ansible roles to execute in the ansible provisioning step. If you are using Container Linux by CoreOS, these GlusterFS VM necessarily need to be either Debian or RedHat based VMs, Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
||||||
|
|
||||||
|
# Configure Cluster variables
|
||||||
|
|
||||||
|
Edit `inventory/group_vars/all.yml`:
|
||||||
|
- Set variable **bootstrap_os** according selected image
|
||||||
|
```
|
||||||
|
# Valid bootstrap options (required): ubuntu, coreos, centos, none
|
||||||
|
bootstrap_os: coreos
|
||||||
|
```
|
||||||
|
- **bin_dir**
|
||||||
|
```
|
||||||
|
# Directory where the binaries will be installed
|
||||||
|
# Default:
|
||||||
|
# bin_dir: /usr/local/bin
|
||||||
|
# For Container Linux by CoreOS:
|
||||||
|
bin_dir: /opt/bin
|
||||||
|
```
|
||||||
|
- and **cloud_provider**
|
||||||
|
```
|
||||||
|
cloud_provider: openstack
|
||||||
|
```
|
||||||
|
Edit `inventory/group_vars/k8s-cluster.yml`:
|
||||||
|
- Set variable **kube_network_plugin** according selected networking
|
||||||
|
```
|
||||||
|
# Choose network plugin (calico, weave or flannel)
|
||||||
|
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||||
|
kube_network_plugin: flannel
|
||||||
|
```
|
||||||
|
> flannel works out-of-the-box
|
||||||
|
|
||||||
|
> calico requires allowing service's and pod's subnets on according OpenStack Neutron ports
|
||||||
|
- Set variable **resolvconf_mode**
|
||||||
|
```
|
||||||
|
# Can be docker_dns, host_resolvconf or none
|
||||||
|
# Default:
|
||||||
|
# resolvconf_mode: docker_dns
|
||||||
|
# For Container Linux by CoreOS:
|
||||||
|
resolvconf_mode: host_resolvconf
|
||||||
|
```
|
||||||
|
|
||||||
|
For calico configure OpenStack Neutron ports: [OpenStack](/docs/openstack.md)
|
||||||
|
|
||||||
# Provision a Kubernetes Cluster on OpenStack
|
# Provision a Kubernetes Cluster on OpenStack
|
||||||
|
|
||||||
@@ -133,20 +175,20 @@ Make sure you can connect to the hosts:
|
|||||||
```
|
```
|
||||||
$ ansible -i contrib/terraform/openstack/hosts -m ping all
|
$ ansible -i contrib/terraform/openstack/hosts -m ping all
|
||||||
example-k8s_node-1 | SUCCESS => {
|
example-k8s_node-1 | SUCCESS => {
|
||||||
"changed": false,
|
"changed": false,
|
||||||
"ping": "pong"
|
"ping": "pong"
|
||||||
}
|
}
|
||||||
example-etcd-1 | SUCCESS => {
|
example-etcd-1 | SUCCESS => {
|
||||||
"changed": false,
|
"changed": false,
|
||||||
"ping": "pong"
|
"ping": "pong"
|
||||||
}
|
}
|
||||||
example-k8s-master-1 | SUCCESS => {
|
example-k8s-master-1 | SUCCESS => {
|
||||||
"changed": false,
|
"changed": false,
|
||||||
"ping": "pong"
|
"ping": "pong"
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
if you are deploying a system that needs bootstrapping, like CoreOS, these might have a state `FAILED` due to CoreOS not having python. As long as the state is not `UNREACHABLE`, this is fine.
|
if you are deploying a system that needs bootstrapping, like Container Linux by CoreOS, these might have a state `FAILED` due to Container Linux by CoreOS not having python. As long as the state is not `UNREACHABLE`, this is fine.
|
||||||
|
|
||||||
if it fails try to connect manually via SSH ... it could be somthing as simple as a stale host key.
|
if it fails try to connect manually via SSH ... it could be somthing as simple as a stale host key.
|
||||||
|
|
||||||
@@ -156,6 +198,49 @@ Deploy kubernetes:
|
|||||||
$ ansible-playbook --become -i contrib/terraform/openstack/hosts cluster.yml
|
$ ansible-playbook --become -i contrib/terraform/openstack/hosts cluster.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
|
# Set up local kubectl
|
||||||
|
1. Install kubectl on your workstation:
|
||||||
|
[Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
|
||||||
|
2. Add route to internal IP of master node (if needed):
|
||||||
|
```
|
||||||
|
sudo route add [master-internal-ip] gw [router-ip]
|
||||||
|
```
|
||||||
|
or
|
||||||
|
```
|
||||||
|
sudo route add -net [internal-subnet]/24 gw [router-ip]
|
||||||
|
```
|
||||||
|
3. List Kubernetes certs&keys:
|
||||||
|
```
|
||||||
|
ssh [os-user]@[master-ip] sudo ls /etc/kubernetes/ssl/
|
||||||
|
```
|
||||||
|
4. Get admin's certs&key:
|
||||||
|
```
|
||||||
|
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1-key.pem > admin-key.pem
|
||||||
|
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1.pem > admin.pem
|
||||||
|
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/ca.pem > ca.pem
|
||||||
|
```
|
||||||
|
5. Edit OpenStack Neutron master's Security Group to allow TCP connections to port 6443
|
||||||
|
6. Configure kubectl:
|
||||||
|
```
|
||||||
|
kubectl config set-cluster default-cluster --server=https://[master-internal-ip]:6443 \
|
||||||
|
--certificate-authority=ca.pem
|
||||||
|
|
||||||
|
kubectl config set-credentials default-admin \
|
||||||
|
--certificate-authority=ca.pem \
|
||||||
|
--client-key=admin-key.pem \
|
||||||
|
--client-certificate=admin.pem
|
||||||
|
|
||||||
|
kubectl config set-context default-system --cluster=default-cluster --user=default-admin
|
||||||
|
kubectl config use-context default-system
|
||||||
|
```
|
||||||
|
7. Check it:
|
||||||
|
```
|
||||||
|
kubectl version
|
||||||
|
```
|
||||||
|
|
||||||
|
# What's next
|
||||||
|
[Start Hello Kubernetes Service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/)
|
||||||
|
|
||||||
# clean up:
|
# clean up:
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|||||||
1
contrib/terraform/openstack/group_vars
Symbolic link
1
contrib/terraform/openstack/group_vars
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../../inventory/group_vars
|
||||||
@@ -1,165 +0,0 @@
|
|||||||
# Valid bootstrap options (required): ubuntu, coreos, none
|
|
||||||
bootstrap_os: none
|
|
||||||
|
|
||||||
# Directory where the binaries will be installed
|
|
||||||
bin_dir: /usr/local/bin
|
|
||||||
|
|
||||||
# Where the binaries will be downloaded.
|
|
||||||
# Note: ensure that you've enough disk space (about 1G)
|
|
||||||
local_release_dir: "/tmp/releases"
|
|
||||||
# Random shifts for retrying failed ops like pushing/downloading
|
|
||||||
retry_stagger: 5
|
|
||||||
|
|
||||||
# Uncomment this line for CoreOS only.
|
|
||||||
# Directory where python binary is installed
|
|
||||||
# ansible_python_interpreter: "/opt/bin/python"
|
|
||||||
|
|
||||||
# This is the group that the cert creation scripts chgrp the
|
|
||||||
# cert files to. Not really changable...
|
|
||||||
kube_cert_group: kube-cert
|
|
||||||
|
|
||||||
# Cluster Loglevel configuration
|
|
||||||
kube_log_level: 2
|
|
||||||
|
|
||||||
# Users to create for basic auth in Kubernetes API via HTTP
|
|
||||||
kube_api_pwd: "changeme"
|
|
||||||
kube_users:
|
|
||||||
kube:
|
|
||||||
pass: "{{kube_api_pwd}}"
|
|
||||||
role: admin
|
|
||||||
root:
|
|
||||||
pass: "changeme"
|
|
||||||
role: admin
|
|
||||||
|
|
||||||
# Kubernetes cluster name, also will be used as DNS domain
|
|
||||||
cluster_name: cluster.local
|
|
||||||
# Subdomains of DNS domain to be resolved via /etc/resolv.conf
|
|
||||||
ndots: 5
|
|
||||||
# Deploy netchecker app to verify DNS resolve as an HTTP service
|
|
||||||
deploy_netchecker: false
|
|
||||||
|
|
||||||
# For some environments, each node has a pubilcally accessible
|
|
||||||
# address and an address it should bind services to. These are
|
|
||||||
# really inventory level variables, but described here for consistency.
|
|
||||||
#
|
|
||||||
# When advertising access, the access_ip will be used, but will defer to
|
|
||||||
# ip and then the default ansible ip when unspecified.
|
|
||||||
#
|
|
||||||
# When binding to restrict access, the ip variable will be used, but will
|
|
||||||
# defer to the default ansible ip when unspecified.
|
|
||||||
#
|
|
||||||
# The ip variable is used for specific address binding, e.g. listen address
|
|
||||||
# for etcd. This is use to help with environments like Vagrant or multi-nic
|
|
||||||
# systems where one address should be preferred over another.
|
|
||||||
# ip: 10.2.2.2
|
|
||||||
#
|
|
||||||
# The access_ip variable is used to define how other nodes should access
|
|
||||||
# the node. This is used in flannel to allow other flannel nodes to see
|
|
||||||
# this node for example. The access_ip is really useful AWS and Google
|
|
||||||
# environments where the nodes are accessed remotely by the "public" ip,
|
|
||||||
# but don't know about that address themselves.
|
|
||||||
# access_ip: 1.1.1.1
|
|
||||||
|
|
||||||
# Etcd access modes:
|
|
||||||
# Enable multiaccess to configure clients to access all of the etcd members directly
|
|
||||||
# as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
|
|
||||||
# This may be the case if clients support and loadbalance multiple etcd servers natively.
|
|
||||||
etcd_multiaccess: true
|
|
||||||
|
|
||||||
# Assume there are no internal loadbalancers for apiservers exist and listen on
|
|
||||||
# kube_apiserver_port (default 443)
|
|
||||||
loadbalancer_apiserver_localhost: true
|
|
||||||
|
|
||||||
# Choose network plugin (calico, weave or flannel)
|
|
||||||
kube_network_plugin: flannel
|
|
||||||
|
|
||||||
# Kubernetes internal network for services, unused block of space.
|
|
||||||
kube_service_addresses: 10.233.0.0/18
|
|
||||||
|
|
||||||
# internal network. When used, it will assign IP
|
|
||||||
# addresses from this range to individual pods.
|
|
||||||
# This network must be unused in your network infrastructure!
|
|
||||||
kube_pods_subnet: 10.233.64.0/18
|
|
||||||
|
|
||||||
# internal network total size (optional). This is the prefix of the
|
|
||||||
# entire network. Must be unused in your environment.
|
|
||||||
# kube_network_prefix: 18
|
|
||||||
|
|
||||||
# internal network node size allocation (optional). This is the size allocated
|
|
||||||
# to each node on your network. With these defaults you should have
|
|
||||||
# room for 4096 nodes with 254 pods per node.
|
|
||||||
kube_network_node_prefix: 24
|
|
||||||
|
|
||||||
# With calico it is possible to distributed routes with border routers of the datacenter.
|
|
||||||
peer_with_router: false
|
|
||||||
# Warning : enabling router peering will disable calico's default behavior ('node mesh').
|
|
||||||
# The subnets of each nodes will be distributed by the datacenter router
|
|
||||||
|
|
||||||
# The port the API Server will be listening on.
|
|
||||||
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
|
|
||||||
kube_apiserver_port: 443 # (https)
|
|
||||||
kube_apiserver_insecure_port: 8080 # (http)
|
|
||||||
|
|
||||||
# Internal DNS configuration.
|
|
||||||
# Kubernetes can create and mainatain its own DNS server to resolve service names
|
|
||||||
# into appropriate IP addresses. It's highly advisable to run such DNS server,
|
|
||||||
# as it greatly simplifies configuration of your applications - you can use
|
|
||||||
# service names instead of magic environment variables.
|
|
||||||
# You still must manually configure all your containers to use this DNS server,
|
|
||||||
# Kubernetes won't do this for you (yet).
|
|
||||||
|
|
||||||
# Do not install additional dnsmasq
|
|
||||||
skip_dnsmasq: false
|
|
||||||
# Upstream dns servers used by dnsmasq
|
|
||||||
#upstream_dns_servers:
|
|
||||||
# - 8.8.8.8
|
|
||||||
# - 8.8.4.4
|
|
||||||
#
|
|
||||||
# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
|
|
||||||
dns_setup: true
|
|
||||||
dns_domain: "{{ cluster_name }}"
|
|
||||||
#
|
|
||||||
# # Ip address of the kubernetes skydns service
|
|
||||||
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
|
|
||||||
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
|
||||||
|
|
||||||
# There are some changes specific to the cloud providers
|
|
||||||
# for instance we need to encapsulate packets with some network plugins
|
|
||||||
# If set the possible values are either 'gce', 'aws', 'azure' or 'openstack'
|
|
||||||
# When openstack is used make sure to source in the openstack credentials
|
|
||||||
# like you would do when using nova-client before starting the playbook.
|
|
||||||
# When azure is used, you need to also set the following variables.
|
|
||||||
# cloud_provider:
|
|
||||||
|
|
||||||
# see docs/azure.md for details on how to get these values
|
|
||||||
#azure_tenant_id:
|
|
||||||
#azure_subscription_id:
|
|
||||||
#azure_aad_client_id:
|
|
||||||
#azure_aad_client_secret:
|
|
||||||
#azure_resource_group:
|
|
||||||
#azure_location:
|
|
||||||
#azure_subnet_name:
|
|
||||||
#azure_security_group_name:
|
|
||||||
#azure_vnet_name:
|
|
||||||
|
|
||||||
|
|
||||||
## Set these proxy values in order to update docker daemon to use proxies
|
|
||||||
# http_proxy: ""
|
|
||||||
# https_proxy: ""
|
|
||||||
# no_proxy: ""
|
|
||||||
|
|
||||||
# Path used to store Docker data
|
|
||||||
docker_daemon_graph: "/var/lib/docker"
|
|
||||||
|
|
||||||
## A string of extra options to pass to the docker daemon.
|
|
||||||
## This string should be exactly as you wish it to appear.
|
|
||||||
## An obvious use case is allowing insecure-registry access
|
|
||||||
## to self hosted registries like so:
|
|
||||||
docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }}"
|
|
||||||
|
|
||||||
# K8s image pull policy (imagePullPolicy)
|
|
||||||
k8s_image_pull_policy: IfNotPresent
|
|
||||||
|
|
||||||
# default packages to install within the cluster
|
|
||||||
kpm_packages: []
|
|
||||||
# - name: kube-system/grafana
|
|
||||||
@@ -68,7 +68,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
|||||||
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_master.*.address, count.index)}"
|
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_master.*.address, count.index)}"
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster"
|
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster,vault"
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -87,10 +87,10 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
|||||||
"${openstack_compute_secgroup_v2.k8s.name}" ]
|
"${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster"
|
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster,vault,no-floating"
|
||||||
}
|
}
|
||||||
provisioner "local-exec" {
|
provisioner "local-exec" {
|
||||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/k8s-cluster.yml"
|
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -107,7 +107,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
|||||||
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_node.*.address, count.index)}"
|
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_node.*.address, count.index)}"
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "kube-node,k8s-cluster"
|
kubespray_groups = "kube-node,k8s-cluster,vault"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -123,10 +123,10 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
|||||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
|
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "kube-node,k8s-cluster"
|
kubespray_groups = "kube-node,k8s-cluster,vault,no-floating"
|
||||||
}
|
}
|
||||||
provisioner "local-exec" {
|
provisioner "local-exec" {
|
||||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/k8s-cluster.yml"
|
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -8,20 +8,39 @@ The inventory is composed of 3 groups:
|
|||||||
|
|
||||||
* **kube-node** : list of kubernetes nodes where the pods will run.
|
* **kube-node** : list of kubernetes nodes where the pods will run.
|
||||||
* **kube-master** : list of servers where kubernetes master components (apiserver, scheduler, controller) will run.
|
* **kube-master** : list of servers where kubernetes master components (apiserver, scheduler, controller) will run.
|
||||||
Note: if you want the server to act both as master and node the server must be defined on both groups _kube-master_ and _kube-node_
|
* **etcd**: list of servers to compose the etcd server. You should have at least 3 servers for failover purpose.
|
||||||
* **etcd**: list of server to compose the etcd server. you should have at least 3 servers for failover purposes.
|
|
||||||
|
Note: do not modify the children of _k8s-cluster_, like putting
|
||||||
|
the _etcd_ group into the _k8s-cluster_, unless you are certain
|
||||||
|
to do that and you have it fully contained in the latter:
|
||||||
|
|
||||||
|
```
|
||||||
|
k8s-cluster ⊂ etcd => kube-node ∩ etcd = etcd
|
||||||
|
```
|
||||||
|
|
||||||
|
When _kube-node_ contains _etcd_, you define your etcd cluster to be as well schedulable for Kubernetes workloads.
|
||||||
|
If you want it a standalone, make sure those groups do not intersect.
|
||||||
|
If you want the server to act both as master and node, the server must be defined
|
||||||
|
on both groups _kube-master_ and _kube-node_. If you want a standalone and
|
||||||
|
unschedulable master, the server must be defined only in the _kube-master_ and
|
||||||
|
not _kube-node_.
|
||||||
|
|
||||||
|
There are also two special groups:
|
||||||
|
|
||||||
|
* **calico-rr** : explained for [advanced Calico networking cases](calico.md)
|
||||||
|
* **bastion** : configure a bastion host if your nodes are not directly reachable
|
||||||
|
|
||||||
Below is a complete inventory example:
|
Below is a complete inventory example:
|
||||||
|
|
||||||
```
|
```
|
||||||
## Configure 'ip' variable to bind kubernetes services on a
|
## Configure 'ip' variable to bind kubernetes services on a
|
||||||
## different ip than the default iface
|
## different ip than the default iface
|
||||||
node1 ansible_ssh_host=95.54.0.12 # ip=10.3.0.1
|
node1 ansible_ssh_host=95.54.0.12 ip=10.3.0.1
|
||||||
node2 ansible_ssh_host=95.54.0.13 # ip=10.3.0.2
|
node2 ansible_ssh_host=95.54.0.13 ip=10.3.0.2
|
||||||
node3 ansible_ssh_host=95.54.0.14 # ip=10.3.0.3
|
node3 ansible_ssh_host=95.54.0.14 ip=10.3.0.3
|
||||||
node4 ansible_ssh_host=95.54.0.15 # ip=10.3.0.4
|
node4 ansible_ssh_host=95.54.0.15 ip=10.3.0.4
|
||||||
node5 ansible_ssh_host=95.54.0.16 # ip=10.3.0.5
|
node5 ansible_ssh_host=95.54.0.16 ip=10.3.0.5
|
||||||
node6 ansible_ssh_host=95.54.0.17 # ip=10.3.0.6
|
node6 ansible_ssh_host=95.54.0.17 ip=10.3.0.6
|
||||||
|
|
||||||
[kube-master]
|
[kube-master]
|
||||||
node1
|
node1
|
||||||
@@ -42,12 +61,41 @@ node6
|
|||||||
[k8s-cluster:children]
|
[k8s-cluster:children]
|
||||||
kube-node
|
kube-node
|
||||||
kube-master
|
kube-master
|
||||||
etcd
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Group vars
|
Group vars and overriding variables precedence
|
||||||
--------------
|
----------------------------------------------
|
||||||
The main variables to change are located in the directory ```inventory/group_vars/all.yml```.
|
|
||||||
|
The group variables to control main deployment options are located in the directory ``inventory/group_vars``.
|
||||||
|
Optional variables are located in the `inventory/group_vars/all.yml`.
|
||||||
|
Mandatory variables that are common for at least one role (or a node group) can be found in the
|
||||||
|
`inventory/group_vars/k8s-cluster.yml`.
|
||||||
|
There are also role vars for docker, rkt, kubernetes preinstall and master roles.
|
||||||
|
According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
|
||||||
|
those cannot be overriden from the group vars. In order to override, one should use
|
||||||
|
the `-e ` runtime flags (most simple way) or other layers described in the docs.
|
||||||
|
|
||||||
|
Kubespray uses only a few layers to override things (or expect them to
|
||||||
|
be overriden for roles):
|
||||||
|
|
||||||
|
Layer | Comment
|
||||||
|
------|--------
|
||||||
|
**role defaults** | provides best UX to override things for Kubespray deployments
|
||||||
|
inventory vars | Unused
|
||||||
|
**inventory group_vars** | Expects users to use ``all.yml``,``k8s-cluster.yml`` etc. to override things
|
||||||
|
inventory host_vars | Unused
|
||||||
|
playbook group_vars | Unused
|
||||||
|
playbook host_vars | Unused
|
||||||
|
**host facts** | Kubespray overrides for internal roles' logic, like state flags
|
||||||
|
play vars | Unused
|
||||||
|
play vars_prompt | Unused
|
||||||
|
play vars_files | Unused
|
||||||
|
registered vars | Unused
|
||||||
|
set_facts | Kubespray overrides those, for some places
|
||||||
|
**role and include vars** | Provides bad UX to override things! Use extra vars to enforce
|
||||||
|
block vars (only for tasks in block) | Kubespray overrides for internal roles' logic
|
||||||
|
task vars (only for the task) | Unused for roles, but only for helper scripts
|
||||||
|
**extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml``
|
||||||
|
|
||||||
Ansible tags
|
Ansible tags
|
||||||
------------
|
------------
|
||||||
@@ -76,12 +124,12 @@ The following tags are defined in playbooks:
|
|||||||
| k8s-pre-upgrade | Upgrading K8s cluster
|
| k8s-pre-upgrade | Upgrading K8s cluster
|
||||||
| k8s-secrets | Configuring K8s certs/keys
|
| k8s-secrets | Configuring K8s certs/keys
|
||||||
| kpm | Installing K8s apps definitions with KPM
|
| kpm | Installing K8s apps definitions with KPM
|
||||||
| kube-apiserver | Configuring self-hosted kube-apiserver
|
| kube-apiserver | Configuring static pod kube-apiserver
|
||||||
| kube-controller-manager | Configuring self-hosted kube-controller-manager
|
| kube-controller-manager | Configuring static pod kube-controller-manager
|
||||||
| kubectl | Installing kubectl and bash completion
|
| kubectl | Installing kubectl and bash completion
|
||||||
| kubelet | Configuring kubelet service
|
| kubelet | Configuring kubelet service
|
||||||
| kube-proxy | Configuring self-hosted kube-proxy
|
| kube-proxy | Configuring static pod kube-proxy
|
||||||
| kube-scheduler | Configuring self-hosted kube-scheduler
|
| kube-scheduler | Configuring static pod kube-scheduler
|
||||||
| localhost | Special steps for the localhost (ansible runner)
|
| localhost | Special steps for the localhost (ansible runner)
|
||||||
| master | Configuring K8s master node role
|
| master | Configuring K8s master node role
|
||||||
| netchecker | Installing netchecker K8s app
|
| netchecker | Installing netchecker K8s app
|
||||||
@@ -114,7 +162,7 @@ ansible-playbook -i inventory/inventory.ini -e dns_server='' cluster.yml --tags
|
|||||||
And this prepares all container images localy (at the ansible runner node) without installing
|
And this prepares all container images localy (at the ansible runner node) without installing
|
||||||
or upgrading related stuff or trying to upload container to K8s cluster nodes:
|
or upgrading related stuff or trying to upload container to K8s cluster nodes:
|
||||||
```
|
```
|
||||||
ansible-playbook -i inventory/inventory.ini cluster.yaml \
|
ansible-playbook -i inventory/inventory.ini cluster.yml \
|
||||||
-e download_run_once=true -e download_localhost=true \
|
-e download_run_once=true -e download_localhost=true \
|
||||||
--tags download --skip-tags upload,upgrade
|
--tags download --skip-tags upload,upgrade
|
||||||
```
|
```
|
||||||
@@ -132,5 +180,5 @@ bastion host.
|
|||||||
bastion ansible_ssh_host=x.x.x.x
|
bastion ansible_ssh_host=x.x.x.x
|
||||||
```
|
```
|
||||||
|
|
||||||
For more information about Ansible and bastion hosts, read
|
For more information about Ansible and bastion hosts, read
|
||||||
[Running Ansible Through an SSH Bastion Host](http://blog.scottlowe.org/2015/12/24/running-ansible-through-ssh-bastion-host/)
|
[Running Ansible Through an SSH Bastion Host](http://blog.scottlowe.org/2015/12/24/running-ansible-through-ssh-bastion-host/)
|
||||||
|
|||||||
22
docs/atomic.md
Normal file
22
docs/atomic.md
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
Atomic host bootstrap
|
||||||
|
=====================
|
||||||
|
|
||||||
|
Atomic host testing has been done with the network plugin flannel. Change the inventory var `kube_network_plugin: flannel`.
|
||||||
|
|
||||||
|
Note: Flannel is the only plugin that has currently been tested with atomic
|
||||||
|
|
||||||
|
### Vagrant
|
||||||
|
|
||||||
|
* For bootstrapping with Vagrant, use box centos/atomic-host
|
||||||
|
* Update VagrantFile variable `local_release_dir` to `/var/vagrant/temp`.
|
||||||
|
* Update `vm_memory = 2048` and `vm_cpus = 2`
|
||||||
|
* Networking on vagrant hosts has to be brought up manually once they are booted.
|
||||||
|
|
||||||
|
```
|
||||||
|
vagrant ssh
|
||||||
|
sudo /sbin/ifup enp0s8
|
||||||
|
```
|
||||||
|
|
||||||
|
* For users of vagrant-libvirt download qcow2 format from https://wiki.centos.org/SpecialInterestGroup/Atomic/Download/
|
||||||
|
|
||||||
|
Then you can proceed to [cluster deployment](#run-deployment)
|
||||||
52
docs/aws.md
52
docs/aws.md
@@ -3,8 +3,58 @@ AWS
|
|||||||
|
|
||||||
To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`.
|
To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`.
|
||||||
|
|
||||||
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes/kubernetes/tree/master/cluster/aws/templates/iam). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
|
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
|
||||||
|
|
||||||
|
You would also need to tag the resources in your VPC accordingly for the aws provider to utilize them. Tag the subnets and all instances that kubernetes will be run on with key `kuberentes.io/cluster/$cluster_name` (`$cluster_name` must be a unique identifier for the cluster). Tag the subnets that must be targetted by external ELBs with the key `kubernetes.io/role/elb` and internal ELBs with the key `kubernetes.io/role/internal-elb`.
|
||||||
|
|
||||||
|
Make sure your VPC has both DNS Hostnames support and Private DNS enabled.
|
||||||
|
|
||||||
The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`.
|
The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`.
|
||||||
|
|
||||||
You can now create your cluster!
|
You can now create your cluster!
|
||||||
|
|
||||||
|
### Dynamic Inventory ###
|
||||||
|
There is also a dynamic inventory script for AWS that can be used if desired. However, be aware that it makes some certain assumptions about how you'll create your inventory. It also does not handle all use cases and groups that we may use as part of more advanced deployments. Additions welcome.
|
||||||
|
|
||||||
|
This will produce an inventory that is passed into Ansible that looks like the following:
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"_meta": {
|
||||||
|
"hostvars": {
|
||||||
|
"ip-172-31-3-xxx.us-east-2.compute.internal": {
|
||||||
|
"ansible_ssh_host": "172.31.3.xxx"
|
||||||
|
},
|
||||||
|
"ip-172-31-8-xxx.us-east-2.compute.internal": {
|
||||||
|
"ansible_ssh_host": "172.31.8.xxx"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"etcd": [
|
||||||
|
"ip-172-31-3-xxx.us-east-2.compute.internal"
|
||||||
|
],
|
||||||
|
"k8s-cluster": {
|
||||||
|
"children": [
|
||||||
|
"kube-master",
|
||||||
|
"kube-node"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"kube-master": [
|
||||||
|
"ip-172-31-3-xxx.us-east-2.compute.internal"
|
||||||
|
],
|
||||||
|
"kube-node": [
|
||||||
|
"ip-172-31-8-xxx.us-east-2.compute.internal"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Guide:
|
||||||
|
- Create instances in AWS as needed.
|
||||||
|
- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube-master`, `etcd`, or `kube-node`. You can also share roles like `kube-master, etcd`
|
||||||
|
- Copy the `kubespray-aws-inventory.py` script from `kubespray/contrib/aws_inventory` to the `kubespray/inventory` directory.
|
||||||
|
- Set the following AWS credentials and info as environment variables in your terminal:
|
||||||
|
```
|
||||||
|
export AWS_ACCESS_KEY_ID="xxxxx"
|
||||||
|
export AWS_SECRET_ACCESS_KEY="yyyyy"
|
||||||
|
export REGION="us-east-2"
|
||||||
|
```
|
||||||
|
- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kubespray-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml`
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
Azure
|
Azure
|
||||||
===============
|
===============
|
||||||
|
|
||||||
To deploy kubespray on [Azure](https://azure.microsoft.com) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'azure'`.
|
To deploy Kubernetes on [Azure](https://azure.microsoft.com) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'azure'`.
|
||||||
|
|
||||||
All your instances are required to run in a resource group and a routing table has to be attached to the subnet your instances are in.
|
All your instances are required to run in a resource group and a routing table has to be attached to the subnet your instances are in.
|
||||||
|
|
||||||
@@ -49,8 +49,8 @@ This is the AppId from the last command
|
|||||||
- Create the role assignment with:
|
- Create the role assignment with:
|
||||||
`azure role assignment create --spn http://kubernetes -o "Owner" -c /subscriptions/SUBSCRIPTION_ID`
|
`azure role assignment create --spn http://kubernetes -o "Owner" -c /subscriptions/SUBSCRIPTION_ID`
|
||||||
|
|
||||||
azure\_aad\_client\_id musst be set to the AppId, azure\_aad\_client\_secret is your choosen secret.
|
azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your choosen secret.
|
||||||
|
|
||||||
## Provisioning Azure with Resource Group Templates
|
## Provisioning Azure with Resource Group Templates
|
||||||
|
|
||||||
You'll find Resource Group Templates and scripts to provision the required infrastructore to Azure in [*contrib/azurerm*](../contrib/azurerm/README.md)
|
You'll find Resource Group Templates and scripts to provision the required infrastructure to Azure in [*contrib/azurerm*](../contrib/azurerm/README.md)
|
||||||
|
|||||||
@@ -96,7 +96,7 @@ You need to edit your inventory and add:
|
|||||||
* `cluster_id` by route reflector node/group (see details
|
* `cluster_id` by route reflector node/group (see details
|
||||||
[here](https://hub.docker.com/r/calico/routereflector/))
|
[here](https://hub.docker.com/r/calico/routereflector/))
|
||||||
|
|
||||||
Here's an example of Kargo inventory with route reflectors:
|
Here's an example of Kubespray inventory with route reflectors:
|
||||||
|
|
||||||
```
|
```
|
||||||
[all]
|
[all]
|
||||||
@@ -145,7 +145,17 @@ cluster_id="1.0.0.1"
|
|||||||
The inventory above will deploy the following topology assuming that calico's
|
The inventory above will deploy the following topology assuming that calico's
|
||||||
`global_as_num` is set to `65400`:
|
`global_as_num` is set to `65400`:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
##### Optional : Define default endpoint to host action
|
||||||
|
|
||||||
|
By default Calico blocks traffic from endpoints to the host itself by using an iptables DROP action. When using it in kubernetes the action has to be changed to RETURN (default in kubespray) or ACCEPT (see https://github.com/projectcalico/felix/issues/660 and https://github.com/projectcalico/calicoctl/issues/1389). Otherwise all network packets from pods (with hostNetwork=False) to services endpoints (with hostNetwork=True) withing the same node are dropped.
|
||||||
|
|
||||||
|
|
||||||
|
To re-define default action please set the following variable in your inventory:
|
||||||
|
```
|
||||||
|
calico_endpoint_to_host_action: "ACCEPT"
|
||||||
|
```
|
||||||
|
|
||||||
Cloud providers configuration
|
Cloud providers configuration
|
||||||
=============================
|
=============================
|
||||||
|
|||||||
@@ -3,17 +3,17 @@ Cloud providers
|
|||||||
|
|
||||||
#### Provisioning
|
#### Provisioning
|
||||||
|
|
||||||
You can use kargo-cli to start new instances on cloud providers
|
You can use kubespray-cli to start new instances on cloud providers
|
||||||
here's an example
|
here's an example
|
||||||
```
|
```
|
||||||
kargo [aws|gce] --nodes 2 --etcd 3 --cluster-name test-smana
|
kubespray [aws|gce] --nodes 2 --etcd 3 --cluster-name test-smana
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Deploy kubernetes
|
#### Deploy kubernetes
|
||||||
|
|
||||||
With kargo-cli
|
With kubespray-cli
|
||||||
```
|
```
|
||||||
kargo deploy [--aws|--gce] -u admin
|
kubespray deploy [--aws|--gce] -u admin
|
||||||
```
|
```
|
||||||
|
|
||||||
Or ansible-playbook command
|
Or ansible-playbook command
|
||||||
|
|||||||
25
docs/comparisons.md
Normal file
25
docs/comparisons.md
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
Kubespray vs [Kops](https://github.com/kubernetes/kops)
|
||||||
|
---------------
|
||||||
|
|
||||||
|
Kubespray runs on bare metal and most clouds, using Ansible as its substrate for
|
||||||
|
provisioning and orchestration. Kops performs the provisioning and orchestration
|
||||||
|
itself, and as such is less flexible in deployment platforms. For people with
|
||||||
|
familiarity with Ansible, existing Ansible deployments or the desire to run a
|
||||||
|
Kubernetes cluster across multiple platforms, Kubespray is a good choice. Kops,
|
||||||
|
however, is more tightly integrated with the unique features of the clouds it
|
||||||
|
supports so it could be a better choice if you know that you will only be using
|
||||||
|
one platform for the foreseeable future.
|
||||||
|
|
||||||
|
Kubespray vs [Kubeadm](https://github.com/kubernetes/kubeadm)
|
||||||
|
------------------
|
||||||
|
|
||||||
|
Kubeadm provides domain Knowledge of Kubernetes clusters' life cycle
|
||||||
|
management, including self-hosted layouts, dynamic discovery services and so
|
||||||
|
on. Had it belonged to the new [operators world](https://coreos.com/blog/introducing-operators.html),
|
||||||
|
it may have been named a "Kubernetes cluster operator". Kubespray however,
|
||||||
|
does generic configuration management tasks from the "OS operators" ansible
|
||||||
|
world, plus some initial K8s clustering (with networking plugins included) and
|
||||||
|
control plane bootstrapping. Kubespray [strives](https://github.com/kubernetes-incubator/kubespray/issues/553)
|
||||||
|
to adopt kubeadm as a tool in order to consume life cycle management domain
|
||||||
|
knowledge from it and offload generic OS configuration things from it, which
|
||||||
|
hopefully benefits both sides.
|
||||||
@@ -1,24 +1,20 @@
|
|||||||
CoreOS bootstrap
|
CoreOS bootstrap
|
||||||
===============
|
===============
|
||||||
|
|
||||||
Example with **kargo-cli**:
|
Example with **kubespray-cli**:
|
||||||
|
|
||||||
```
|
```
|
||||||
kargo deploy --gce --coreos
|
kubespray deploy --gce --coreos
|
||||||
```
|
```
|
||||||
|
|
||||||
Or with Ansible:
|
Or with Ansible:
|
||||||
|
|
||||||
Before running the cluster playbook you must satisfy the following requirements:
|
Before running the cluster playbook you must satisfy the following requirements:
|
||||||
|
|
||||||
* On each CoreOS nodes a writable directory **/opt/bin** (~400M disk space)
|
General CoreOS Pre-Installation Notes:
|
||||||
|
- You should set the bootstrap_os variable to `coreos`
|
||||||
* Uncomment the variable **ansible\_python\_interpreter** in the file `inventory/group_vars/all.yml`
|
- Ensure that the bin_dir is set to `/opt/bin`
|
||||||
|
- ansible_python_interpreter should be `/opt/bin/python`. This will be laid down by the bootstrap task.
|
||||||
* run the Python bootstrap playbook
|
- The default resolvconf_mode setting of `docker_dns` **does not** work for CoreOS. This is because we do not edit the systemd service file for docker on CoreOS nodes. Instead, just use the `host_resolvconf` mode. It should work out of the box.
|
||||||
|
|
||||||
```
|
|
||||||
ansible-playbook -u smana -e ansible_ssh_user=smana -b --become-user=root -i inventory/inventory.cfg coreos-bootstrap.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
Then you can proceed to [cluster deployment](#run-deployment)
|
Then you can proceed to [cluster deployment](#run-deployment)
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
K8s DNS stack by Kargo
|
K8s DNS stack by Kubespray
|
||||||
======================
|
======================
|
||||||
|
|
||||||
For K8s cluster nodes, kargo configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/)
|
For K8s cluster nodes, Kubespray configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/)
|
||||||
[cluster add-on](http://releases.k8s.io/master/cluster/addons/README.md)
|
[cluster add-on](http://releases.k8s.io/master/cluster/addons/README.md)
|
||||||
to serve as an authoritative DNS server for a given ``dns_domain`` and its
|
to serve as an authoritative DNS server for a given ``dns_domain`` and its
|
||||||
``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels).
|
``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels).
|
||||||
@@ -9,47 +9,14 @@ to serve as an authoritative DNS server for a given ``dns_domain`` and its
|
|||||||
Other nodes in the inventory, like external storage nodes or a separate etcd cluster
|
Other nodes in the inventory, like external storage nodes or a separate etcd cluster
|
||||||
node group, considered non-cluster and left up to the user to configure DNS resolve.
|
node group, considered non-cluster and left up to the user to configure DNS resolve.
|
||||||
|
|
||||||
Note, custom ``ndots`` values affect only the dnsmasq daemon set (explained below).
|
|
||||||
While the kubedns has the ``ndots=5`` hardcoded, which is not recommended due to
|
|
||||||
[DNS performance reasons](https://github.com/kubernetes/kubernetes/issues/14051).
|
|
||||||
You can use config maps for the kubedns app to workaround the issue, which is
|
|
||||||
yet in the Kargo scope.
|
|
||||||
|
|
||||||
Additional search (sub)domains may be defined in the ``searchdomains``
|
DNS variables
|
||||||
and ``ndots`` vars. And additional recursive DNS resolvers in the `` upstream_dns_servers``,
|
=============
|
||||||
``nameservers`` vars. Intranet/cloud provider DNS resolvers should be specified
|
|
||||||
in the first place, followed by external resolvers, for example:
|
|
||||||
|
|
||||||
```
|
There are several global variables which can be used to modify DNS settings:
|
||||||
skip_dnsmasq: true
|
|
||||||
nameservers: [8.8.8.8]
|
|
||||||
upstream_dns_servers: [172.18.32.6]
|
|
||||||
```
|
|
||||||
or
|
|
||||||
```
|
|
||||||
skip_dnsmasq: false
|
|
||||||
upstream_dns_servers: [172.18.32.6, 172.18.32.7, 8.8.8.8, 8.8.8.4]
|
|
||||||
```
|
|
||||||
The vars are explained below. For the early cluster deployment stage, when there
|
|
||||||
is yet K8s cluster and apps exist, a user may expect local repos to be
|
|
||||||
accessible via authoritative intranet resolvers. For that case, if none custom vars
|
|
||||||
was specified, the default resolver is set to either the cloud provider default
|
|
||||||
or `8.8.8.8`. And domain is set to the default ``dns_domain`` value as well.
|
|
||||||
Later, the nameservers will be reconfigured to the DNS service IP that Kargo
|
|
||||||
configures for K8s cluster.
|
|
||||||
|
|
||||||
Also note, existing records will be purged from the `/etc/resolv.conf`,
|
#### ndots
|
||||||
including resolvconf's base/head/cloud-init config files and those that come from dhclient.
|
ndots value to be used in ``/etc/resolv.conf``
|
||||||
This is required for hostnet pods networking and for [kubelet to not exceed search domains
|
|
||||||
limits](https://github.com/kubernetes/kubernetes/issues/9229).
|
|
||||||
|
|
||||||
Instead, new domain, search, nameserver records and options will be defined from the
|
|
||||||
aforementioned vars:
|
|
||||||
* Superseded via dhclient's DNS update hook.
|
|
||||||
* Generated via cloud-init (CoreOS only).
|
|
||||||
* Statically defined in the `/etc/resolv.conf`, if none of above is applicable.
|
|
||||||
* Resolvconf's head/base files are disabled from populating anything into the
|
|
||||||
`/etc/resolv.conf`.
|
|
||||||
|
|
||||||
It is important to note that multiple search domains combined with high ``ndots``
|
It is important to note that multiple search domains combined with high ``ndots``
|
||||||
values lead to poor performance of DNS stack, so please choose it wisely.
|
values lead to poor performance of DNS stack, so please choose it wisely.
|
||||||
@@ -58,53 +25,102 @@ replies for [bogus internal FQDNS](https://github.com/kubernetes/kubernetes/issu
|
|||||||
before it even hits the kubedns app. This enables dnsmasq to serve as a
|
before it even hits the kubedns app. This enables dnsmasq to serve as a
|
||||||
protective, but still recursive resolver in front of kubedns.
|
protective, but still recursive resolver in front of kubedns.
|
||||||
|
|
||||||
DNS configuration details
|
#### searchdomains
|
||||||
-------------------------
|
Custom search domains to be added in addition to the cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``).
|
||||||
|
|
||||||
Here is an approximate picture of how DNS things working and
|
Most Linux systems limit the total number of search domains to 6 and the total length of all search domains
|
||||||
being configured by Kargo ansible playbooks:
|
to 256 characters. Depending on the length of ``dns_domain``, you're limitted to less then the total limit.
|
||||||
|
|
||||||

|
Please note that ``resolvconf_mode: docker_dns`` will automatically add your systems search domains as
|
||||||
|
additional search domains. Please take this into the accounts for the limits.
|
||||||
|
|
||||||
Note that an additional dnsmasq daemon set is installed by Kargo
|
#### nameservers
|
||||||
by default. Kubelet will configure DNS base of all pods to use the
|
This variable is only used by ``resolvconf_mode: host_resolvconf``. These nameservers are added to the hosts
|
||||||
given dnsmasq cluster IP, which is defined via the ``dns_server`` var.
|
``/etc/resolv.conf`` *after* ``upstream_dns_servers`` and thus serve as backup nameservers. If this variable
|
||||||
The dnsmasq forwards requests for a given cluster ``dns_domain`` to
|
is not set, a default resolver is chosen (depending on cloud provider or 8.8.8.8 when no cloud provider is specified).
|
||||||
Kubedns's SkyDns service. The SkyDns server is configured to be an
|
|
||||||
authoritative DNS server for the given cluser domain (and its subdomains
|
|
||||||
up to ``ndots:5`` depth). Note: you should scale its replication controller
|
|
||||||
up, if SkyDns chokes. These two layered DNS forwarders provide HA for the
|
|
||||||
DNS cluster IP endpoint, which is a critical moving part for Kubernetes apps.
|
|
||||||
|
|
||||||
Nameservers are as well configured in the hosts' ``/etc/resolv.conf`` files,
|
#### upstream_dns_servers
|
||||||
as the given DNS cluster IP merged with ``nameservers`` values. While the
|
DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode`` modes. These serve as backup
|
||||||
DNS cluster IP merged with the ``upstream_dns_servers`` defines additional
|
DNS servers in early cluster deployment when no cluster DNS is available yet. These are also added as upstream
|
||||||
nameservers for the aforementioned nsmasq daemon set running on all hosts.
|
DNS servers used by ``dnsmasq`` (when deployed with ``dns_mode: dnsmasq_kubedns``).
|
||||||
This mitigates existing Linux limitation of max 3 nameservers in the
|
|
||||||
``/etc/resolv.conf`` and also brings an additional caching layer for the
|
|
||||||
clustered DNS services.
|
|
||||||
|
|
||||||
You can skip the dnsmasq daemon set install steps by setting the
|
DNS modes supported by Kubespray
|
||||||
``skip_dnsmasq: true``. This may be the case, if you're fine with
|
============================
|
||||||
the nameservers limitation. Sadly, there is no way to work around the
|
|
||||||
search domain limitations of a 256 chars and 6 domains. Thus, you can
|
You can modify how Kubespray sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
|
||||||
use the ``searchdomains`` var to define no more than a three custom domains.
|
|
||||||
Remaining three slots are reserved for K8s cluster default subdomains.
|
## dns_mode
|
||||||
|
``dns_mode`` configures how Kubespray will setup cluster DNS. There are three modes available:
|
||||||
|
|
||||||
|
#### dnsmasq_kubedns (default)
|
||||||
|
This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some
|
||||||
|
limitations (e.g. number of nameservers). Kubelet is instructed to use dnsmasq instead of kubedns/skydns.
|
||||||
|
It is configured to forward all DNS queries belonging to cluster services to kubedns/skydns. All
|
||||||
|
other queries are forwardet to the nameservers found in ``upstream_dns_servers`` or ``default_resolver``
|
||||||
|
|
||||||
|
#### kubedns
|
||||||
|
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use kubedns/skydns for
|
||||||
|
all queries.
|
||||||
|
|
||||||
|
#### none
|
||||||
|
This does not install any of dnsmasq and kubedns/skydns. This basically disables cluster DNS completely and
|
||||||
|
leaves you with a non functional cluster.
|
||||||
|
|
||||||
|
## resolvconf_mode
|
||||||
|
``resolvconf_mode`` configures how Kubespray will setup DNS for ``hostNetwork: true`` PODs and non-k8s containers.
|
||||||
|
There are three modes available:
|
||||||
|
|
||||||
|
#### docker_dns (default)
|
||||||
|
This sets up the docker daemon with additional --dns/--dns-search/--dns-opt flags.
|
||||||
|
|
||||||
|
The following nameservers are added to the docker daemon (in the same order as listed here):
|
||||||
|
* cluster nameserver (depends on dns_mode)
|
||||||
|
* content of optional upstream_dns_servers variable
|
||||||
|
* host system nameservers (read from hosts /etc/resolv.conf)
|
||||||
|
|
||||||
|
The following search domains are added to the docker daemon (in the same order as listed here):
|
||||||
|
* cluster domains (``default.svc.{{ dns_domain }}``, ``svc.{{ dns_domain }}``)
|
||||||
|
* content of optional searchdomains variable
|
||||||
|
* host system search domains (read from hosts /etc/resolv.conf)
|
||||||
|
|
||||||
|
The following dns options are added to the docker daemon
|
||||||
|
* ndots:{{ ndots }}
|
||||||
|
* timeout:2
|
||||||
|
* attempts:2
|
||||||
|
|
||||||
|
For normal PODs, k8s will ignore these options and setup its own DNS settings for the PODs, taking
|
||||||
|
the --cluster_dns (either dnsmasq or kubedns, depending on dns_mode) kubelet option into account.
|
||||||
|
For ``hostNetwork: true`` PODs however, k8s will let docker setup DNS settings. Docker containers which
|
||||||
|
are not started/managed by k8s will also use these docker options.
|
||||||
|
|
||||||
|
The host system name servers are added to ensure name resolution is also working while cluster DNS is not
|
||||||
|
running yet. This is especially important in early stages of cluster deployment. In this early stage,
|
||||||
|
DNS queries to the cluster DNS will timeout after a few seconds, resulting in the system nameserver being
|
||||||
|
used as a backup nameserver. After cluster DNS is running, all queries will be answered by the cluster DNS
|
||||||
|
servers, which in turn will forward queries to the system nameserver if required.
|
||||||
|
|
||||||
|
#### host_resolvconf
|
||||||
|
This activates the classic Kubespray behaviour that modifies the hosts ``/etc/resolv.conf`` file and dhclient
|
||||||
|
configuration to point to the cluster dns server (either dnsmasq or kubedns, depending on dns_mode).
|
||||||
|
|
||||||
|
As cluster DNS is not available on early deployment stage, this mode is split into 2 stages. In the first
|
||||||
|
stage (``dns_early: true``), ``/etc/resolv.conf`` is configured to use the DNS servers found in ``upstream_dns_servers``
|
||||||
|
and ``nameservers``. Later, ``/etc/resolv.conf`` is reconfigured to use the cluster DNS server first, leaving
|
||||||
|
the other nameservers as backups.
|
||||||
|
|
||||||
|
Also note, existing records will be purged from the `/etc/resolv.conf`,
|
||||||
|
including resolvconf's base/head/cloud-init config files and those that come from dhclient.
|
||||||
|
|
||||||
|
#### none
|
||||||
|
Does nothing regarding ``/etc/resolv.conf``. This leaves you with a cluster that works as expected in most cases.
|
||||||
|
The only exception is that ``hostNetwork: true`` PODs and non-k8s managed containers will not be able to resolve
|
||||||
|
cluster service names.
|
||||||
|
|
||||||
When dnsmasq skipped, Kargo redefines the DNS cluster IP to point directly
|
|
||||||
to SkyDns cluster IP ``skydns_server`` and configures Kubelet's
|
|
||||||
``--dns_cluster`` to use that IP as well. While this greatly simplifies
|
|
||||||
things, it comes by the price of limited nameservers though. As you know now,
|
|
||||||
the DNS cluster IP takes a slot in the ``/etc/resolv.conf``, thus you can
|
|
||||||
specify no more than a two nameservers for infra and/or external use.
|
|
||||||
Those may be specified either in ``nameservers`` or ``upstream_dns_servers``
|
|
||||||
and will be merged together with the ``skydns_server`` IP into the hots'
|
|
||||||
``/etc/resolv.conf``.
|
|
||||||
|
|
||||||
Limitations
|
Limitations
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
* Kargo has yet ways to configure Kubedns addon to forward requests SkyDns can
|
* Kubespray has yet ways to configure Kubedns addon to forward requests SkyDns can
|
||||||
not answer with authority to arbitrary recursive resolvers. This task is left
|
not answer with authority to arbitrary recursive resolvers. This task is left
|
||||||
for future. See [official SkyDns docs](https://github.com/skynetservices/skydns)
|
for future. See [official SkyDns docs](https://github.com/skynetservices/skydns)
|
||||||
for details.
|
for details.
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
Downloading binaries and containers
|
Downloading binaries and containers
|
||||||
===================================
|
===================================
|
||||||
|
|
||||||
Kargo supports several download/upload modes. The default is:
|
Kubespray supports several download/upload modes. The default is:
|
||||||
|
|
||||||
* Each node downloads binaries and container images on its own, which is
|
* Each node downloads binaries and container images on its own, which is
|
||||||
``download_run_once: False``.
|
``download_run_once: False``.
|
||||||
|
|||||||
Binary file not shown.
|
Before Width: | Height: | Size: 654 KiB |
|
Before Width: | Height: | Size: 40 KiB After Width: | Height: | Size: 40 KiB |
@@ -1,32 +1,69 @@
|
|||||||
Getting started
|
Getting started
|
||||||
===============
|
===============
|
||||||
|
|
||||||
The easiest way to run the deployement is to use the **kargo-cli** tool.
|
The easiest way to run the deployement is to use the **kubespray-cli** tool.
|
||||||
A complete documentation can be found in its [github repository](https://github.com/kubespray/kargo-cli).
|
A complete documentation can be found in its [github repository](https://github.com/kubespray/kubespray-cli).
|
||||||
|
|
||||||
Here is a simple example on AWS:
|
Here is a simple example on AWS:
|
||||||
|
|
||||||
* Create instances and generate the inventory
|
* Create instances and generate the inventory
|
||||||
|
|
||||||
```
|
```
|
||||||
kargo aws --instances 3
|
kubespray aws --instances 3
|
||||||
```
|
```
|
||||||
|
|
||||||
* Run the deployment
|
* Run the deployment
|
||||||
|
|
||||||
```
|
```
|
||||||
kargo deploy --aws -u centos -n calico
|
kubespray deploy --aws -u centos -n calico
|
||||||
```
|
```
|
||||||
|
|
||||||
Building your own inventory
|
Building your own inventory
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
---------------------------
|
||||||
|
|
||||||
Ansible inventory can be stored in 3 formats: YAML, JSON, or inifile. There is
|
Ansible inventory can be stored in 3 formats: YAML, JSON, or INI-like. There is
|
||||||
an example inventory located
|
an example inventory located
|
||||||
[here](https://github.com/kubernetes-incubator/kargo/blob/master/inventory/inventory.example).
|
[here](https://github.com/kubernetes-incubator/kubespray/blob/master/inventory/inventory.example).
|
||||||
|
|
||||||
You can use an
|
You can use an
|
||||||
[inventory generator](https://github.com/kubernetes-incubator/kargo/blob/master/contrib/inventory_generator/inventory_generator.py)
|
[inventory generator](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py)
|
||||||
to create or modify an Ansible inventory. Currently, it is limited in
|
to create or modify an Ansible inventory. Currently, it is limited in
|
||||||
functionality and is only use for making a basic Kargo cluster, but it does
|
functionality and is only use for making a basic Kubespray cluster, but it does
|
||||||
support creating large clusters.
|
support creating large clusters. It now supports
|
||||||
|
separated ETCD and Kubernetes master roles from node role if the size exceeds a
|
||||||
|
certain threshold. Run inventory.py help for more information.
|
||||||
|
|
||||||
|
Example inventory generator usage:
|
||||||
|
|
||||||
|
```
|
||||||
|
cp -r inventory my_inventory
|
||||||
|
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
||||||
|
CONFIG_FILE=my_inventory/inventory.cfg python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
||||||
|
```
|
||||||
|
|
||||||
|
Starting custom deployment
|
||||||
|
--------------------------
|
||||||
|
|
||||||
|
Once you have an inventory, you may want to customize deployment data vars
|
||||||
|
and start the deployment:
|
||||||
|
|
||||||
|
**IMPORTANT: Edit my_inventory/groups_vars/*.yaml to override data vars**
|
||||||
|
|
||||||
|
```
|
||||||
|
ansible-playbook -i my_inventory/inventory.cfg cluster.yml -b -v \
|
||||||
|
--private-key=~/.ssh/private_key
|
||||||
|
```
|
||||||
|
|
||||||
|
See more details in the [ansible guide](ansible.md).
|
||||||
|
|
||||||
|
Adding nodes
|
||||||
|
--------------------------
|
||||||
|
|
||||||
|
You may want to add worker nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters.
|
||||||
|
|
||||||
|
- Add the new worker node to your inventory under kube-node (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
||||||
|
- Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`:
|
||||||
|
```
|
||||||
|
ansible-playbook -i my_inventory/inventory.cfg scale.yml -b -v \
|
||||||
|
--private-key=~/.ssh/private_key
|
||||||
|
```
|
||||||
@@ -11,37 +11,35 @@ achieve the same goal.
|
|||||||
Etcd
|
Etcd
|
||||||
----
|
----
|
||||||
|
|
||||||
Etcd proxies are deployed on each node in the `k8s-cluster` group. A proxy is
|
|
||||||
a separate etcd process. It has a `localhost:2379` frontend and all of the etcd
|
|
||||||
cluster members as backends. Note that the `access_ip` is used as the backend
|
|
||||||
IP, if specified. Frontend endpoints cannot be accessed externally as they are
|
|
||||||
bound to a localhost only.
|
|
||||||
|
|
||||||
The `etcd_access_endpoint` fact provides an access pattern for clients. And the
|
The `etcd_access_endpoint` fact provides an access pattern for clients. And the
|
||||||
`etcd_multiaccess` (defaults to `false`) group var controlls that behavior.
|
`etcd_multiaccess` (defaults to `True`) group var controls that behavior.
|
||||||
When enabled, it makes deployed components to access the etcd cluster members
|
It makes deployed components to access the etcd cluster members
|
||||||
directly: `http://ip1:2379, http://ip2:2379,...`. This mode assumes the clients
|
directly: `http://ip1:2379, http://ip2:2379,...`. This mode assumes the clients
|
||||||
do a loadbalancing and handle HA for connections. Note, a pod definition of a
|
do a loadbalancing and handle HA for connections.
|
||||||
flannel networking plugin always uses a single `--etcd-server` endpoint!
|
|
||||||
|
|
||||||
|
|
||||||
Kube-apiserver
|
Kube-apiserver
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
K8s components require a loadbalancer to access the apiservers via a reverse
|
K8s components require a loadbalancer to access the apiservers via a reverse
|
||||||
proxy. Kargo includes support for an nginx-based proxy that resides on each
|
proxy. Kubespray includes support for an nginx-based proxy that resides on each
|
||||||
non-master Kubernetes node. This is referred to as localhost loadbalancing. It
|
non-master Kubernetes node. This is referred to as localhost loadbalancing. It
|
||||||
is less efficient than a dedicated load balancer because it creates extra
|
is less efficient than a dedicated load balancer because it creates extra
|
||||||
health checks on the Kubernetes apiserver, but is more practical for scenarios
|
health checks on the Kubernetes apiserver, but is more practical for scenarios
|
||||||
where an external LB or virtual IP management is inconvenient.
|
where an external LB or virtual IP management is inconvenient. This option is
|
||||||
|
configured by the variable `loadbalancer_apiserver_localhost` (defaults to `True`).
|
||||||
|
You may also define the port the local internal loadbalancer uses by changing,
|
||||||
|
`nginx_kube_apiserver_port`. This defaults to the value of `kube_apiserver_port`.
|
||||||
|
It is also important to note that Kubespray will only configure kubelet and kube-proxy
|
||||||
|
on non-master nodes to use the local internal loadbalancer.
|
||||||
|
|
||||||
This option is configured by the variable `loadbalancer_apiserver_localhost`.
|
If you choose to NOT use the local internal loadbalancer, you will need to configure
|
||||||
you will need to configure your own loadbalancer to achieve HA. Note that
|
your own loadbalancer to achieve HA. Note that deploying a loadbalancer is up to
|
||||||
deploying a loadbalancer is up to a user and is not covered by ansible roles
|
a user and is not covered by ansible roles in Kubespray. By default, it only configures
|
||||||
in Kargo. By default, it only configures a non-HA endpoint, which points to
|
a non-HA endpoint, which points to the `access_ip` or IP address of the first server
|
||||||
the `access_ip` or IP address of the first server node in the `kube-master`
|
node in the `kube-master` group. It can also configure clients to use endpoints
|
||||||
group. It can also configure clients to use endpoints for a given loadbalancer
|
for a given loadbalancer type. The following diagram shows how traffic to the
|
||||||
type. The following diagram shows how traffic to the apiserver is directed.
|
apiserver is directed.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
@@ -63,8 +61,8 @@ listen kubernetes-apiserver-https
|
|||||||
mode tcp
|
mode tcp
|
||||||
timeout client 3h
|
timeout client 3h
|
||||||
timeout server 3h
|
timeout server 3h
|
||||||
server master1 <IP1>:443
|
server master1 <IP1>:6443
|
||||||
server master2 <IP2>:443
|
server master2 <IP2>:6443
|
||||||
balance roundrobin
|
balance roundrobin
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -90,16 +88,18 @@ Access endpoints are evaluated automagically, as the following:
|
|||||||
|
|
||||||
| Endpoint type | kube-master | non-master |
|
| Endpoint type | kube-master | non-master |
|
||||||
|------------------------------|---------------|---------------------|
|
|------------------------------|---------------|---------------------|
|
||||||
| Local LB | http://lc:p | https://lc:sp |
|
| Local LB (default) | http://lc:p | https://lc:nsp |
|
||||||
| External LB, no internal | https://lb:lp | https://lb:lp |
|
| External LB, no internal | https://lb:lp | https://lb:lp |
|
||||||
| No ext/int LB (default) | http://lc:p | https://m[0].aip:sp |
|
| No ext/int LB | http://lc:p | https://m[0].aip:sp |
|
||||||
|
|
||||||
Where:
|
Where:
|
||||||
* `m[0]` - the first node in the `kube-master` group;
|
* `m[0]` - the first node in the `kube-master` group;
|
||||||
* `lb` - LB FQDN, `apiserver_loadbalancer_domain_name`;
|
* `lb` - LB FQDN, `apiserver_loadbalancer_domain_name`;
|
||||||
* `lc` - localhost;
|
* `lc` - localhost;
|
||||||
* `p` - insecure port, `kube_apiserver_insecure_port`
|
* `p` - insecure port, `kube_apiserver_insecure_port`
|
||||||
|
* `nsp` - nginx secure port, `nginx_kube_apiserver_port`;
|
||||||
* `sp` - secure port, `kube_apiserver_port`;
|
* `sp` - secure port, `kube_apiserver_port`;
|
||||||
* `lp` - LB port, `loadbalancer_apiserver.port`, defers to the secure port;
|
* `lp` - LB port, `loadbalancer_apiserver.port`, defers to the secure port;
|
||||||
* `ip` - the node IP, defers to the ansible IP;
|
* `ip` - the node IP, defers to the ansible IP;
|
||||||
* `aip` - `access_ip`, defers to the ip.
|
* `aip` - `access_ip`, defers to the ip.
|
||||||
|
|
||||||
|
|||||||
121
docs/integration.md
Normal file
121
docs/integration.md
Normal file
@@ -0,0 +1,121 @@
|
|||||||
|
# Kubespray (kargo) in own ansible playbooks repo
|
||||||
|
|
||||||
|
1. Fork [kubespray repo](https://github.com/kubernetes-incubator/kubespray) to your personal/organisation account on github.
|
||||||
|
Note:
|
||||||
|
* All forked public repos at github will be also public, so **never commit sensitive data to your public forks**.
|
||||||
|
* List of all forked repos could be retrieved from github page of original project.
|
||||||
|
|
||||||
|
2. Add **forked repo** as submodule to desired folder in your existent ansible repo(for example 3d/kubespray):
|
||||||
|
```git submodule add https://github.com/YOUR_GITHUB/kubespray.git kubespray```
|
||||||
|
Git will create _.gitmodules_ file in your existent ansible repo:
|
||||||
|
```
|
||||||
|
[submodule "3d/kubespray"]
|
||||||
|
path = 3d/kubespray
|
||||||
|
url = https://github.com/YOUR_GITHUB/kubespray.git
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Configure git to show submodule status:
|
||||||
|
```git config --global status.submoduleSummary true```
|
||||||
|
|
||||||
|
4. Add *original* kubespray repo as upstream:
|
||||||
|
```git remote add upstream https://github.com/kubernetes-incubator/kubespray.git```
|
||||||
|
|
||||||
|
5. Sync your master branch with upstream:
|
||||||
|
```
|
||||||
|
git checkout master
|
||||||
|
git fetch upstream
|
||||||
|
git merge upstream/master
|
||||||
|
git push origin master
|
||||||
|
```
|
||||||
|
|
||||||
|
6. Create a new branch which you will use in your working environment:
|
||||||
|
```git checkout -b work```
|
||||||
|
***Never*** use master branch of your repository for your commits.
|
||||||
|
|
||||||
|
7. Modify path to library and roles in your ansible.cfg file (role naming should be uniq, you may have to rename your existent roles if they have same names as kubespray project):
|
||||||
|
```
|
||||||
|
...
|
||||||
|
library = 3d/kubespray/library/
|
||||||
|
roles_path = 3d/kubespray/roles/
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
8. Copy and modify configs from kubespray `group_vars` folder to corresponging `group_vars` folder in your existent project.
|
||||||
|
You could rename *all.yml* config to something else, i.e. *kubespray.yml* and create corresponding group in your inventory file, which will include all hosts groups related to kubernetes setup.
|
||||||
|
|
||||||
|
9. Modify your ansible inventory file by adding mapping of your existent groups (if any) to kubespray naming.
|
||||||
|
For example:
|
||||||
|
```
|
||||||
|
...
|
||||||
|
#Kargo groups:
|
||||||
|
[kube-node:children]
|
||||||
|
kubenode
|
||||||
|
|
||||||
|
[k8s-cluster:children]
|
||||||
|
kubernetes
|
||||||
|
|
||||||
|
[etcd:children]
|
||||||
|
kubemaster
|
||||||
|
kubemaster-ha
|
||||||
|
|
||||||
|
[kube-master:children]
|
||||||
|
kubemaster
|
||||||
|
kubemaster-ha
|
||||||
|
|
||||||
|
[vault:children]
|
||||||
|
kube-master
|
||||||
|
|
||||||
|
[kubespray:children]
|
||||||
|
kubernetes
|
||||||
|
```
|
||||||
|
* Last entry here needed to apply kubespray.yml config file, renamed from all.yml of kubespray project.
|
||||||
|
|
||||||
|
10. Now you can include kargo tasks in you existent playbooks by including cluster.yml file:
|
||||||
|
```
|
||||||
|
- name: Include kargo tasks
|
||||||
|
include: 3d/kubespray/cluster.yml
|
||||||
|
```
|
||||||
|
Or your could copy separate tasks from cluster.yml into your ansible repository.
|
||||||
|
|
||||||
|
11. Commit changes to your ansible repo. Keep in mind, that submodule folder is just a link to the git commit hash of your forked repo.
|
||||||
|
When you update your "work" branch you need to commit changes to ansible repo as well.
|
||||||
|
Other members of your team should use ```git submodule sync```, ```git submodule update --init``` to get actual code from submodule.
|
||||||
|
|
||||||
|
# Contributing
|
||||||
|
If you made useful changes or fixed a bug in existent kubespray repo, use this flow for PRs to original kubespray repo.
|
||||||
|
|
||||||
|
0. Sign the [CNCF CLA](https://github.com/kubernetes/kubernetes/wiki/CLA-FAQ).
|
||||||
|
|
||||||
|
1. Change working directory to git submodule directory (3d/kubespray).
|
||||||
|
|
||||||
|
2. Setup desired user.name and user.email for submodule.
|
||||||
|
If kubespray is only one submodule in your repo you could use something like:
|
||||||
|
```git submodule foreach --recursive 'git config user.name "First Last" && git config user.email "your-email-addres@used.for.cncf"'```
|
||||||
|
|
||||||
|
3. Sync with upstream master:
|
||||||
|
```
|
||||||
|
git fetch upstream
|
||||||
|
git merge upstream/master
|
||||||
|
git push origin master
|
||||||
|
```
|
||||||
|
4. Create new branch for the specific fixes that you want to contribute:
|
||||||
|
```git checkout -b fixes-name-date-index```
|
||||||
|
Branch name should be self explaining to you, adding date and/or index will help you to track/delete your old PRs.
|
||||||
|
|
||||||
|
5. Find git hash of your commit in "work" repo and apply it to newly created "fix" repo:
|
||||||
|
```
|
||||||
|
git cherry-pick <COMMIT_HASH>
|
||||||
|
```
|
||||||
|
6. If your have several temporary-stage commits - squash them using [```git rebase -i```](http://eli.thegreenplace.net/2014/02/19/squashing-github-pull-requests-into-a-single-commit)
|
||||||
|
Also you could use interactive rebase (```git rebase -i HEAD~10```) to delete commits which you don't want to contribute into original repo.
|
||||||
|
|
||||||
|
7. When your changes is in place, you need to check upstream repo one more time because it could be changed during your work.
|
||||||
|
Check that you're on correct branch:
|
||||||
|
```git status```
|
||||||
|
And pull changes from upstream (if any):
|
||||||
|
```git pull --rebase upstream master```
|
||||||
|
|
||||||
|
8. Now push your changes to your **fork** repo with ```git push```. If your branch doesn't exists on github, git will propose you to use something like ```git push --set-upstream origin fixes-name-date-index```.
|
||||||
|
|
||||||
|
9. Open you forked repo in browser, on the main page you will see proposition to create pull request for your newly created branch. Check proposed diff of your PR. If something is wrong you could safely delete "fix" branch on github using ```git push origin --delete fixes-name-date-index```, ```git branch -D fixes-name-date-index``` and start whole process from the beginning.
|
||||||
|
If everything is fine - add description about your changes (what they do and why they're needed) and confirm pull request creation.
|
||||||
103
docs/kubernetes-reliability.md
Normal file
103
docs/kubernetes-reliability.md
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
# Overview
|
||||||
|
|
||||||
|
Distributed system such as Kubernetes are designed to be resilient to the
|
||||||
|
failures. More details about Kubernetes High-Availability (HA) may be found at
|
||||||
|
[Building High-Availability Clusters](https://kubernetes.io/docs/admin/high-availability/)
|
||||||
|
|
||||||
|
To have a simple view the most of parts of HA will be skipped to describe
|
||||||
|
Kubelet<->Controller Manager communication only.
|
||||||
|
|
||||||
|
By default the normal behavior looks like:
|
||||||
|
|
||||||
|
1. Kubelet updates it status to apiserver periodically, as specified by
|
||||||
|
`--node-status-update-frequency`. The default value is **10s**.
|
||||||
|
|
||||||
|
2. Kubernetes controller manager checks the statuses of Kubelets every
|
||||||
|
`–-node-monitor-period`. The default value is **5s**.
|
||||||
|
|
||||||
|
3. In case the status is updated within `--node-monitor-grace-period` of time,
|
||||||
|
Kubernetes controller manager considers healthy status of Kubelet. The
|
||||||
|
default value is **40s**.
|
||||||
|
|
||||||
|
> Kubernetes controller manager and Kubelets work asynchronously. It means that
|
||||||
|
> the delay may include any network latency, API Server latency, etcd latency,
|
||||||
|
> latency caused by load on one's master nodes and so on. So if
|
||||||
|
> `--node-status-update-frequency` is set to 5s in reality it may appear in
|
||||||
|
> etcd in 6-7 seconds or even longer when etcd cannot commit data to quorum
|
||||||
|
> nodes.
|
||||||
|
|
||||||
|
# Failure
|
||||||
|
|
||||||
|
Kubelet will try to make `nodeStatusUpdateRetry` post attempts. Currently
|
||||||
|
`nodeStatusUpdateRetry` is constantly set to 5 in
|
||||||
|
[kubelet.go](https://github.com/kubernetes/kubernetes/blob/release-1.5/pkg/kubelet/kubelet.go#L102).
|
||||||
|
|
||||||
|
Kubelet will try to update the status in
|
||||||
|
[tryUpdateNodeStatus](https://github.com/kubernetes/kubernetes/blob/release-1.5/pkg/kubelet/kubelet_node_status.go#L345)
|
||||||
|
function. Kubelet uses `http.Client()` Golang method, but has no specified
|
||||||
|
timeout. Thus there may be some glitches when API Server is overloaded while
|
||||||
|
TCP connection is established.
|
||||||
|
|
||||||
|
So, there will be `nodeStatusUpdateRetry` * `--node-status-update-frequency`
|
||||||
|
attempts to set a status of node.
|
||||||
|
|
||||||
|
At the same time Kubernetes controller manager will try to check
|
||||||
|
`nodeStatusUpdateRetry` times every `--node-monitor-period` of time. After
|
||||||
|
`--node-monitor-grace-period` it will consider node unhealthy. It will remove
|
||||||
|
its pods based on `--pod-eviction-timeout`
|
||||||
|
|
||||||
|
Kube proxy has a watcher over API. Once pods are evicted, Kube proxy will
|
||||||
|
notice and will update iptables of the node. It will remove endpoints from
|
||||||
|
services so pods from failed node won't be accessible anymore.
|
||||||
|
|
||||||
|
# Recommendations for different cases
|
||||||
|
|
||||||
|
## Fast Update and Fast Reaction
|
||||||
|
|
||||||
|
If `-–node-status-update-frequency` is set to **4s** (10s is default).
|
||||||
|
`--node-monitor-period` to **2s** (5s is default).
|
||||||
|
`--node-monitor-grace-period` to **20s** (40s is default).
|
||||||
|
`--pod-eviction-timeout` is set to **30s** (5m is default)
|
||||||
|
|
||||||
|
In such scenario, pods will be evicted in **50s** because the node will be
|
||||||
|
considered as down after **20s**, and `--pod-eviction-timeout` occurs after
|
||||||
|
**30s** more. However, this scenario creates an overhead on etcd as every node
|
||||||
|
will try to update its status every 2 seconds.
|
||||||
|
|
||||||
|
If the environment has 1000 nodes, there will be 15000 node updates per
|
||||||
|
minute which may require large etcd containers or even dedicated nodes for etcd.
|
||||||
|
|
||||||
|
> If we calculate the number of tries, the division will give 5, but in reality
|
||||||
|
> it will be from 3 to 5 with `nodeStatusUpdateRetry` attempts of each try. The
|
||||||
|
> total number of attemtps will vary from 15 to 25 due to latency of all
|
||||||
|
> components.
|
||||||
|
|
||||||
|
## Medium Update and Average Reaction
|
||||||
|
|
||||||
|
Let's set `-–node-status-update-frequency` to **20s**
|
||||||
|
`--node-monitor-grace-period` to **2m** and `--pod-eviction-timeout` to **1m**.
|
||||||
|
In that case, Kubelet will try to update status every 20s. So, it will be 6 * 5
|
||||||
|
= 30 attempts before Kubernetes controller manager will consider unhealthy
|
||||||
|
status of node. After 1m it will evict all pods. The total time will be 3m
|
||||||
|
before eviction process.
|
||||||
|
|
||||||
|
Such scenario is good for medium environments as 1000 nodes will require 3000
|
||||||
|
etcd updates per minute.
|
||||||
|
|
||||||
|
> In reality, there will be from 4 to 6 node update tries. The total number of
|
||||||
|
> of attempts will vary from 20 to 30.
|
||||||
|
|
||||||
|
## Low Update and Slow reaction
|
||||||
|
|
||||||
|
Let's set `-–node-status-update-frequency` to **1m**.
|
||||||
|
`--node-monitor-grace-period` will set to **5m** and `--pod-eviction-timeout`
|
||||||
|
to **1m**. In this scenario, every kubelet will try to update the status every
|
||||||
|
minute. There will be 5 * 5 = 25 attempts before unhealty status. After 5m,
|
||||||
|
Kubernetes controller manager will set unhealthy status. This means that pods
|
||||||
|
will be evicted after 1m after being marked unhealthy. (6m in total).
|
||||||
|
|
||||||
|
> In reality, there will be from 3 to 5 tries. The total number of attempt will
|
||||||
|
> vary from 15 to 25.
|
||||||
|
|
||||||
|
There can be different combinations such as Fast Update with Slow reaction to
|
||||||
|
satisfy specific cases.
|
||||||
@@ -3,7 +3,8 @@ Large deployments of K8s
|
|||||||
|
|
||||||
For a large scaled deployments, consider the following configuration changes:
|
For a large scaled deployments, consider the following configuration changes:
|
||||||
|
|
||||||
* Tune [ansible settings](http://docs.ansible.com/ansible/intro_configuration.html)
|
* Tune [ansible settings]
|
||||||
|
(http://docs.ansible.com/ansible/intro_configuration.html)
|
||||||
for `forks` and `timeout` vars to fit large numbers of nodes being deployed.
|
for `forks` and `timeout` vars to fit large numbers of nodes being deployed.
|
||||||
|
|
||||||
* Override containers' `foo_image_repo` vars to point to intranet registry.
|
* Override containers' `foo_image_repo` vars to point to intranet registry.
|
||||||
@@ -23,9 +24,25 @@ For a large scaled deployments, consider the following configuration changes:
|
|||||||
* Tune CPU/memory limits and requests. Those are located in roles' defaults
|
* Tune CPU/memory limits and requests. Those are located in roles' defaults
|
||||||
and named like ``foo_memory_limit``, ``foo_memory_requests`` and
|
and named like ``foo_memory_limit``, ``foo_memory_requests`` and
|
||||||
``foo_cpu_limit``, ``foo_cpu_requests``. Note that 'Mi' memory units for K8s
|
``foo_cpu_limit``, ``foo_cpu_requests``. Note that 'Mi' memory units for K8s
|
||||||
will be submitted as 'M', if applied for ``docker run``, and cpu K8s units will
|
will be submitted as 'M', if applied for ``docker run``, and cpu K8s units
|
||||||
end up with the 'm' skipped for docker as well. This is required as docker does not
|
will end up with the 'm' skipped for docker as well. This is required as
|
||||||
understand k8s units well.
|
docker does not understand k8s units well.
|
||||||
|
|
||||||
|
* Tune ``kubelet_status_update_frequency`` to increase reliability of kubelet.
|
||||||
|
``kube_controller_node_monitor_grace_period``,
|
||||||
|
``kube_controller_node_monitor_period``,
|
||||||
|
``kube_controller_pod_eviction_timeout`` for better Kubernetes reliability.
|
||||||
|
Check out [Kubernetes Reliability](kubernetes-reliability.md)
|
||||||
|
|
||||||
|
* Add calico-rr nodes if you are deploying with Calico or Canal. Nodes recover
|
||||||
|
from host/network interruption much quicker with calico-rr. Note that
|
||||||
|
calico-rr role must be on a host without kube-master or kube-node role (but
|
||||||
|
etcd role is okay).
|
||||||
|
|
||||||
|
* Check out the
|
||||||
|
[Inventory](getting-started.md#building-your-own-inventory)
|
||||||
|
section of the Getting started guide for tips on creating a large scale
|
||||||
|
Ansible inventory.
|
||||||
|
|
||||||
For example, when deploying 200 nodes, you may want to run ansible with
|
For example, when deploying 200 nodes, you may want to run ansible with
|
||||||
``--forks=50``, ``--timeout=600`` and define the ``retry_stagger: 60``.
|
``--forks=50``, ``--timeout=600`` and define the ``retry_stagger: 60``.
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
Network Checker Application
|
Network Checker Application
|
||||||
===========================
|
===========================
|
||||||
|
|
||||||
With the ``deploy_netchecker`` var enabled (defaults to false), Kargo deploys a
|
With the ``deploy_netchecker`` var enabled (defaults to false), Kubespray deploys a
|
||||||
Network Checker Application from the 3rd side `l23network/mcp-netchecker` docker
|
Network Checker Application from the 3rd side `l23network/k8s-netchecker` docker
|
||||||
images. It consists of the server and agents trying to reach the server by usual
|
images. It consists of the server and agents trying to reach the server by usual
|
||||||
for Kubernetes applications network connectivity meanings. Therefore, this
|
for Kubernetes applications network connectivity meanings. Therefore, this
|
||||||
automagically verifies a pod to pod connectivity via the cluster IP and checks
|
automagically verifies a pod to pod connectivity via the cluster IP and checks
|
||||||
@@ -17,7 +17,7 @@ any of the cluster nodes:
|
|||||||
```
|
```
|
||||||
curl http://localhost:31081/api/v1/connectivity_check
|
curl http://localhost:31081/api/v1/connectivity_check
|
||||||
```
|
```
|
||||||
Note that Kargo does not invoke the check but only deploys the application, if
|
Note that Kubespray does not invoke the check but only deploys the application, if
|
||||||
requested.
|
requested.
|
||||||
|
|
||||||
There are related application specifc variables:
|
There are related application specifc variables:
|
||||||
@@ -25,8 +25,8 @@ There are related application specifc variables:
|
|||||||
netchecker_port: 31081
|
netchecker_port: 31081
|
||||||
agent_report_interval: 15
|
agent_report_interval: 15
|
||||||
netcheck_namespace: default
|
netcheck_namespace: default
|
||||||
agent_img: "quay.io/l23network/mcp-netchecker-agent:v0.1"
|
agent_img: "quay.io/l23network/k8s-netchecker-agent:v1.0"
|
||||||
server_img: "quay.io/l23network/mcp-netchecker-server:v0.1"
|
server_img: "quay.io/l23network/k8s-netchecker-server:v1.0"
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that the application verifies DNS resolve for FQDNs comprising only the
|
Note that the application verifies DNS resolve for FQDNs comprising only the
|
||||||
|
|||||||
@@ -35,14 +35,12 @@ Then you can use the instance ids to find the connected [neutron](https://wiki.o
|
|||||||
| 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 |
|
| 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 |
|
||||||
| e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 |
|
| e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 |
|
||||||
|
|
||||||
Given the port ids on the left, you can set the `allowed_address_pairs` in neutron:
|
Given the port ids on the left, you can set the `allowed_address_pairs` in neutron.
|
||||||
|
Note that you have to allow both of `kube_service_addresses` (default `10.233.0.0/18`)
|
||||||
|
and `kube_pods_subnet` (default `10.233.64.0/18`.)
|
||||||
|
|
||||||
# allow kube_service_addresses network
|
# allow kube_service_addresses and kube_pods_subnet network
|
||||||
neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18
|
neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18 ip_address=10.233.64.0/18
|
||||||
neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18
|
neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18 ip_address=10.233.64.0/18
|
||||||
|
|
||||||
# allow kube_pods_subnet network
|
|
||||||
neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.64.0/18
|
|
||||||
neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.64.0/18
|
|
||||||
|
|
||||||
Now you can finally run the playbook.
|
Now you can finally run the playbook.
|
||||||
|
|||||||
@@ -1,65 +1,66 @@
|
|||||||
Kargo's roadmap
|
Kubespray's roadmap
|
||||||
=================
|
=================
|
||||||
|
|
||||||
### Kubeadm
|
### Kubeadm
|
||||||
- Propose kubeadm as an option in order to setup the kubernetes cluster.
|
- Propose kubeadm as an option in order to setup the kubernetes cluster.
|
||||||
That would probably improve deployment speed and certs management [#553](https://github.com/kubespray/kargo/issues/553)
|
That would probably improve deployment speed and certs management [#553](https://github.com/kubespray/kubespray/issues/553)
|
||||||
|
|
||||||
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kargo/issues/320)
|
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kubespray/issues/320)
|
||||||
- the playbook would install and configure docker/rkt and the etcd cluster
|
- the playbook would install and configure docker/rkt and the etcd cluster
|
||||||
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
|
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
|
||||||
- a "kubespray" container would be deployed (kargo-cli, ansible-playbook, kpm)
|
- a "kubespray" container would be deployed (kubespray-cli, ansible-playbook, kpm)
|
||||||
- to be discussed, a way to provide the inventory
|
- to be discussed, a way to provide the inventory
|
||||||
- **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kargo/issues/321)
|
- **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kubespray/issues/321)
|
||||||
|
|
||||||
### Provisionning and cloud providers
|
### Provisionning and cloud providers
|
||||||
- Terraform to provision instances on **GCE, AWS, Openstack, Digital Ocean, Azure**
|
- [ ] Terraform to provision instances on **GCE, AWS, Openstack, Digital Ocean, Azure**
|
||||||
- On AWS autoscaling, multi AZ
|
- [ ] On AWS autoscaling, multi AZ
|
||||||
- On Azure autoscaling, create loadbalancer [#297](https://github.com/kubespray/kargo/issues/297)
|
- [ ] On Azure autoscaling, create loadbalancer [#297](https://github.com/kubespray/kubespray/issues/297)
|
||||||
- On GCE be able to create a loadbalancer automatically (IAM ?) [#280](https://github.com/kubespray/kargo/issues/280)
|
- [ ] On GCE be able to create a loadbalancer automatically (IAM ?) [#280](https://github.com/kubespray/kubespray/issues/280)
|
||||||
- **TLS boostrap** support for kubelet [#234](https://github.com/kubespray/kargo/issues/234)
|
- [x] **TLS boostrap** support for kubelet [#234](https://github.com/kubespray/kubespray/issues/234)
|
||||||
(related issues: https://github.com/kubernetes/kubernetes/pull/20439 <br>
|
(related issues: https://github.com/kubernetes/kubernetes/pull/20439 <br>
|
||||||
https://github.com/kubernetes/kubernetes/issues/18112)
|
https://github.com/kubernetes/kubernetes/issues/18112)
|
||||||
|
|
||||||
### Tests
|
### Tests
|
||||||
- Run kubernetes e2e tests
|
- [x] Run kubernetes e2e tests
|
||||||
- migrate to jenkins
|
- [x] migrate to jenkins
|
||||||
(a test is currently a deployment on a 3 node cluste, testing k8s api, ping between 2 pods)
|
(a test is currently a deployment on a 3 node cluste, testing k8s api, ping between 2 pods)
|
||||||
- Full tests on GCE per day (All OS's, all network plugins)
|
- [x] Full tests on GCE per day (All OS's, all network plugins)
|
||||||
- trigger a single test per pull request
|
- [x] trigger a single test per pull request
|
||||||
- single test with the Ansible version n-1 per day
|
- [ ] ~~single test with the Ansible version n-1 per day~~
|
||||||
- Test idempotency on on single OS but for all network plugins/container engines
|
- [x] Test idempotency on on single OS but for all network plugins/container engines
|
||||||
- single test on AWS per day
|
- [ ] single test on AWS per day
|
||||||
- test different achitectures :
|
- [x] test different achitectures :
|
||||||
- 3 instances, 3 are members of the etcd cluster, 2 of them acting as master and node, 1 as node
|
- 3 instances, 3 are members of the etcd cluster, 2 of them acting as master and node, 1 as node
|
||||||
- 5 instances, 3 are etcd and nodes, 2 are masters only
|
- 5 instances, 3 are etcd and nodes, 2 are masters only
|
||||||
- 7 instances, 3 etcd only, 2 masters, 2 nodes
|
- 7 instances, 3 etcd only, 2 masters, 2 nodes
|
||||||
- test scale up cluster: +1 etcd, +1 master, +1 node
|
- [ ] test scale up cluster: +1 etcd, +1 master, +1 node
|
||||||
|
|
||||||
### Lifecycle
|
### Lifecycle
|
||||||
- Adopt the kubeadm tool by delegating CM tasks it is capable to accomplish well [#553](https://github.com/kubespray/kargo/issues/553)
|
- [ ] Adopt the kubeadm tool by delegating CM tasks it is capable to accomplish well [#553](https://github.com/kubespray/kubespray/issues/553)
|
||||||
- Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kargo/issues/154)
|
- [x] Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kubespray/issues/154)
|
||||||
- Drain worker node when shutting down/deleting an instance
|
- [ ] Drain worker node when shutting down/deleting an instance
|
||||||
|
- [ ] Upgrade granularity: select components to upgrade and skip others
|
||||||
|
|
||||||
### Networking
|
### Networking
|
||||||
- romana.io support [#160](https://github.com/kubespray/kargo/issues/160)
|
- [ ] romana.io support [#160](https://github.com/kubespray/kubespray/issues/160)
|
||||||
- Configure network policy for Calico. [#159](https://github.com/kubespray/kargo/issues/159)
|
- [ ] Configure network policy for Calico. [#159](https://github.com/kubespray/kubespray/issues/159)
|
||||||
- Opencontrail
|
- [ ] Opencontrail
|
||||||
- Canal
|
- [x] Canal
|
||||||
- Cloud Provider native networking (instead of our network plugins)
|
- [x] Cloud Provider native networking (instead of our network plugins)
|
||||||
|
|
||||||
### High availability
|
### High availability
|
||||||
- (to be discussed) option to set a loadbalancer for the apiservers like ucarp/packemaker/keepalived
|
- (to be discussed) option to set a loadbalancer for the apiservers like ucarp/packemaker/keepalived
|
||||||
While waiting for the issue [kubernetes/kubernetes#18174](https://github.com/kubernetes/kubernetes/issues/18174) to be fixed.
|
While waiting for the issue [kubernetes/kubernetes#18174](https://github.com/kubernetes/kubernetes/issues/18174) to be fixed.
|
||||||
|
|
||||||
### Kargo-cli
|
### Kubespray-cli
|
||||||
- Delete instances
|
- Delete instances
|
||||||
- `kargo vagrant` to setup a test cluster locally
|
- `kubespray vagrant` to setup a test cluster locally
|
||||||
- `kargo azure` for Microsoft Azure support
|
- `kubespray azure` for Microsoft Azure support
|
||||||
- switch to Terraform instead of Ansible for provisionning
|
- switch to Terraform instead of Ansible for provisionning
|
||||||
- update $HOME/.kube/config when a cluster is deployed. Optionally switch to this context
|
- update $HOME/.kube/config when a cluster is deployed. Optionally switch to this context
|
||||||
|
|
||||||
### Kargo API
|
### Kubespray API
|
||||||
- Perform all actions through an **API**
|
- Perform all actions through an **API**
|
||||||
- Store inventories / configurations of mulltiple clusters
|
- Store inventories / configurations of mulltiple clusters
|
||||||
- make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
|
- make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
|
||||||
@@ -72,7 +73,7 @@ Include optionals deployments to init the cluster:
|
|||||||
|
|
||||||
##### Others
|
##### Others
|
||||||
|
|
||||||
##### Dashboards:
|
##### Dashboards:
|
||||||
- kubernetes-dashboard
|
- kubernetes-dashboard
|
||||||
- Fabric8
|
- Fabric8
|
||||||
- Tectonic
|
- Tectonic
|
||||||
@@ -86,8 +87,8 @@ Include optionals deployments to init the cluster:
|
|||||||
### Others
|
### Others
|
||||||
- remove nodes (adding is already supported)
|
- remove nodes (adding is already supported)
|
||||||
- being able to choose any k8s version (almost done)
|
- being able to choose any k8s version (almost done)
|
||||||
- **rkt** support [#59](https://github.com/kubespray/kargo/issues/59)
|
- **rkt** support [#59](https://github.com/kubespray/kubespray/issues/59)
|
||||||
- Review documentation (split in categories)
|
- Review documentation (split in categories)
|
||||||
- **consul** -> if officialy supported by k8s
|
- **consul** -> if officialy supported by k8s
|
||||||
- flex volumes options (e.g. **torrus** support) [#312](https://github.com/kubespray/kargo/issues/312)
|
- flex volumes options (e.g. **torrus** support) [#312](https://github.com/kubespray/kubespray/issues/312)
|
||||||
- Clusters federation option (aka **ubernetes**) [#329](https://github.com/kubespray/kargo/issues/329)
|
- Clusters federation option (aka **ubernetes**) [#329](https://github.com/kubespray/kubespray/issues/329)
|
||||||
|
|||||||
@@ -4,25 +4,40 @@ Travis CI test matrix
|
|||||||
GCE instances
|
GCE instances
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
Here is the test matrix for the Travis CI gates:
|
Here is the test matrix for the CI gates:
|
||||||
|
|
||||||
| Network plugin| OS type| GCE region| Nodes layout|
|
| Network plugin| OS type| GCE region| Nodes layout|
|
||||||
|-------------------------|-------------------------|-------------------------|-------------------------|
|
|-------------------------|-------------------------|-------------------------|-------------------------|
|
||||||
| canal| debian-8-kubespray| asia-east1-a| ha|
|
| canal| debian-8-kubespray| asia-east1-a| ha-scale|
|
||||||
| calico| debian-8-kubespray| europe-west1-c| default|
|
| calico| debian-8-kubespray| europe-west1-c| default|
|
||||||
| flannel| centos-7| asia-northeast1-c| default|
|
| flannel| centos-7| asia-northeast1-c| default|
|
||||||
| calico| centos-7| us-central1-b| ha|
|
| calico| centos-7| us-central1-b| ha|
|
||||||
| weave| rhel-7| us-east1-c| default|
|
| weave| rhel-7| us-east1-c| default|
|
||||||
| canal| coreos-stable| us-west1-b| default|
|
| canal| coreos-stable| us-west1-b| ha-scale|
|
||||||
| canal| rhel-7| asia-northeast1-b| separate|
|
| canal| rhel-7| asia-northeast1-b| separate|
|
||||||
| weave| ubuntu-1604-xenial| europe-west1-d| separate|
|
| weave| ubuntu-1604-xenial| europe-west1-d| separate|
|
||||||
| calico| coreos-stable| us-central1-f| separate|
|
| calico| coreos-stable| us-central1-f| separate|
|
||||||
|
|
||||||
Where the nodes layout `default` is a non-HA two nodes setup with the separate `kube-node`
|
|
||||||
and the `etcd` group merged with the `kube-master`. The `separate` layout is when
|
Node Layouts
|
||||||
there is only node of each type, which is a kube master, compute and etcd cluster member.
|
------------
|
||||||
And the `ha` layout stands for a two etcd nodes, two masters and a single worker node,
|
|
||||||
partially intersecting though.
|
There are four node layout types: `default`, `separate`, `ha`, and `scale`.
|
||||||
|
|
||||||
|
|
||||||
|
`default` is a non-HA two nodes setup with one separate `kube-node`
|
||||||
|
and the `etcd` group merged with the `kube-master`.
|
||||||
|
|
||||||
|
`separate` layout is when there is only node of each type, which includes
|
||||||
|
a kube-master, kube-node, and etcd cluster member.
|
||||||
|
|
||||||
|
`ha` layout consists of two etcd nodes, two masters and a single worker node,
|
||||||
|
with role intersection.
|
||||||
|
|
||||||
|
`scale` layout can be combined with above layouts. It includes 200 fake hosts
|
||||||
|
in the Ansible inventory. This helps test TLS certificate generation at scale
|
||||||
|
to prevent regressions and profile certain long-running tasks. These nodes are
|
||||||
|
never actually deployed, but certificates are generated for them.
|
||||||
|
|
||||||
Note, the canal network plugin deploys flannel as well plus calico policy controller.
|
Note, the canal network plugin deploys flannel as well plus calico policy controller.
|
||||||
|
|
||||||
@@ -40,15 +55,15 @@ GCE instances
|
|||||||
|
|
||||||
| Stage| Network plugin| OS type| GCE region| Nodes layout
|
| Stage| Network plugin| OS type| GCE region| Nodes layout
|
||||||
|--------------------|--------------------|--------------------|--------------------|--------------------|
|
|--------------------|--------------------|--------------------|--------------------|--------------------|
|
||||||
| part1| calico| coreos-stable| us-west1-b| separated|
|
| part1| calico| coreos-stable| us-west1-b| separate|
|
||||||
| part1| canal| debian-8-kubespray| us-east1-b| ha|
|
| part1| canal| debian-8-kubespray| us-east1-b| ha|
|
||||||
| part1| weave| rhel-7| europe-west1-b| default|
|
| part1| weave| rhel-7| europe-west1-b| default|
|
||||||
| part2| flannel| centos-7| us-west1-a| default|
|
| part2| flannel| centos-7| us-west1-a| default|
|
||||||
| part2| calico| debian-8-kubespray| us-central1-b| default|
|
| part2| calico| debian-8-kubespray| us-central1-b| default|
|
||||||
| part2| canal| coreos-stable| us-east1-b| default|
|
| part2| canal| coreos-stable| us-east1-b| default|
|
||||||
| special| canal| rhel-7| us-east1-b| separated|
|
| special| canal| rhel-7| us-east1-b| separate|
|
||||||
| special| weave| ubuntu-1604-xenial| us-central1-b| separated|
|
| special| weave| ubuntu-1604-xenial| us-central1-b| default|
|
||||||
| special| calico| centos-7| europe-west1-b| ha|
|
| special| calico| centos-7| europe-west1-b| ha-scale|
|
||||||
| special| weave| coreos-alpha| us-west1-a| ha|
|
| special| weave| coreos-alpha| us-west1-a| ha-scale|
|
||||||
|
|
||||||
The "Stage" means a build step of the build pipeline. The steps are ordered as `part1->part2->special`.
|
The "Stage" means a build step of the build pipeline. The steps are ordered as `part1->part2->special`.
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
Upgrading Kubernetes in Kargo
|
Upgrading Kubernetes in Kubespray
|
||||||
=============================
|
=============================
|
||||||
|
|
||||||
#### Description
|
#### Description
|
||||||
|
|
||||||
Kargo handles upgrades the same way it handles initial deployment. That is to
|
Kubespray handles upgrades the same way it handles initial deployment. That is to
|
||||||
say that each component is laid down in a fixed order. You should be able to
|
say that each component is laid down in a fixed order. You should be able to
|
||||||
upgrade from Kargo tag 2.0 up to the current master without difficulty. You can
|
upgrade from Kubespray tag 2.0 up to the current master without difficulty. You can
|
||||||
also individually control versions of components by explicitly defining their
|
also individually control versions of components by explicitly defining their
|
||||||
versions. Here are all version vars for each component:
|
versions. Here are all version vars for each component:
|
||||||
|
|
||||||
@@ -18,7 +18,7 @@ versions. Here are all version vars for each component:
|
|||||||
* flannel_version
|
* flannel_version
|
||||||
* kubedns_version
|
* kubedns_version
|
||||||
|
|
||||||
#### Example
|
#### Unsafe upgrade example
|
||||||
|
|
||||||
If you wanted to upgrade just kube_version from v1.4.3 to v1.4.6, you could
|
If you wanted to upgrade just kube_version from v1.4.3 to v1.4.6, you could
|
||||||
deploy the following way:
|
deploy the following way:
|
||||||
@@ -33,15 +33,37 @@ And then repeat with v1.4.6 as kube_version:
|
|||||||
ansible-playbook cluster.yml -i inventory/inventory.cfg -e kube_version=v1.4.6
|
ansible-playbook cluster.yml -i inventory/inventory.cfg -e kube_version=v1.4.6
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Graceful upgrade
|
||||||
|
|
||||||
|
Kubespray also supports cordon, drain and uncordoning of nodes when performing
|
||||||
|
a cluster upgrade. There is a separate playbook used for this purpose. It is
|
||||||
|
important to note that upgrade-cluster.yml can only be used for upgrading an
|
||||||
|
existing cluster. That means there must be at least 1 kube-master already
|
||||||
|
deployed.
|
||||||
|
|
||||||
|
```
|
||||||
|
git fetch origin
|
||||||
|
git checkout origin/master
|
||||||
|
ansible-playbook upgrade-cluster.yml -b -i inventory/inventory.cfg -e kube_version=v1.6.0
|
||||||
|
```
|
||||||
|
|
||||||
|
After a successul upgrade, the Server Version should be updated:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ kubectl version
|
||||||
|
Client Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.0", GitCommit:"fff5156092b56e6bd60fff75aad4dc9de6b6ef37", GitTreeState:"clean", BuildDate:"2017-03-28T19:15:41Z", GoVersion:"go1.8", Compiler:"gc", Platform:"darwin/amd64"}
|
||||||
|
Server Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.0+coreos.0", GitCommit:"8031716957d697332f9234ddf85febb07ac6c3e3", GitTreeState:"clean", BuildDate:"2017-03-29T04:33:09Z", GoVersion:"go1.7.5", Compiler:"gc", Platform:"linux/amd64"}
|
||||||
|
```
|
||||||
|
|
||||||
#### Upgrade order
|
#### Upgrade order
|
||||||
|
|
||||||
As mentioned above, components are upgraded in the order in which they were
|
As mentioned above, components are upgraded in the order in which they were
|
||||||
installed in the Ansible playbook. The order of component installation is as
|
installed in the Ansible playbook. The order of component installation is as
|
||||||
follows:
|
follows:
|
||||||
|
|
||||||
# Docker
|
* Docker
|
||||||
# etcd
|
* etcd
|
||||||
# kubelet and kube-proxy
|
* kubelet and kube-proxy
|
||||||
# network_plugin (such as Calico or Weave)
|
* network_plugin (such as Calico or Weave)
|
||||||
# kube-apiserver, kube-scheduler, and kube-controller-manager
|
* kube-apiserver, kube-scheduler, and kube-controller-manager
|
||||||
# Add-ons (such as KubeDNS)
|
* Add-ons (such as KubeDNS)
|
||||||
|
|||||||
@@ -39,3 +39,31 @@ k8s-01 Ready 45s
|
|||||||
k8s-02 Ready 45s
|
k8s-02 Ready 45s
|
||||||
k8s-03 Ready 45s
|
k8s-03 Ready 45s
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Customize Vagrant
|
||||||
|
=================
|
||||||
|
|
||||||
|
You can override the default settings in the `Vagrantfile` either by directly modifying the `Vagrantfile`
|
||||||
|
or through an override file.
|
||||||
|
|
||||||
|
In the same directory as the `Vagrantfile`, create a folder called `vagrant` and create `config.rb` file in it.
|
||||||
|
|
||||||
|
You're able to override the variables defined in `Vagrantfile` by providing the value in the `vagrant/config.rb` file,
|
||||||
|
e.g.:
|
||||||
|
|
||||||
|
echo '$forwarded_ports = {8001 => 8001}' >> vagrant/config.rb
|
||||||
|
|
||||||
|
and after `vagrant up` or `vagrant reload`, your host will have port forwarding setup with the guest on port 8001.
|
||||||
|
|
||||||
|
Use alternative OS for Vagrant
|
||||||
|
==============================
|
||||||
|
|
||||||
|
By default, Vagrant uses Ubuntu 16.04 box to provision a local cluster. You may use an alternative supported
|
||||||
|
operating system for your local cluster.
|
||||||
|
|
||||||
|
Customize `$os` variable in `Vagrantfile` or as override, e.g.,:
|
||||||
|
|
||||||
|
echo '$os = "coreos-stable"' >> vagrant/config.rb
|
||||||
|
|
||||||
|
|
||||||
|
The supported operating systems for vagrant are defined in the `SUPPORTED_OS` constant in the `Vagrantfile`.
|
||||||
|
|||||||
47
docs/vars.md
47
docs/vars.md
@@ -1,4 +1,4 @@
|
|||||||
Configurable Parameters in Kargo
|
Configurable Parameters in Kubespray
|
||||||
================================
|
================================
|
||||||
|
|
||||||
#### Generic Ansible variables
|
#### Generic Ansible variables
|
||||||
@@ -12,7 +12,7 @@ Some variables of note include:
|
|||||||
* *ansible_default_ipv4.address*: IP address Ansible automatically chooses.
|
* *ansible_default_ipv4.address*: IP address Ansible automatically chooses.
|
||||||
Generated based on the output from the command ``ip -4 route get 8.8.8.8``
|
Generated based on the output from the command ``ip -4 route get 8.8.8.8``
|
||||||
|
|
||||||
#### Common vars that are used in Kargo
|
#### Common vars that are used in Kubespray
|
||||||
|
|
||||||
* *calico_version* - Specify version of Calico to use
|
* *calico_version* - Specify version of Calico to use
|
||||||
* *calico_cni_version* - Specify version of Calico CNI plugin to use
|
* *calico_cni_version* - Specify version of Calico CNI plugin to use
|
||||||
@@ -23,7 +23,7 @@ Some variables of note include:
|
|||||||
* *hyperkube_image_repo* - Specify the Docker repository where Hyperkube
|
* *hyperkube_image_repo* - Specify the Docker repository where Hyperkube
|
||||||
resides
|
resides
|
||||||
* *hyperkube_image_tag* - Specify the Docker tag where Hyperkube resides
|
* *hyperkube_image_tag* - Specify the Docker tag where Hyperkube resides
|
||||||
* *kube_network_plugin* - Changes k8s plugin to Calico
|
* *kube_network_plugin* - Sets k8s network plugin (default Calico)
|
||||||
* *kube_proxy_mode* - Changes k8s proxy mode to iptables mode
|
* *kube_proxy_mode* - Changes k8s proxy mode to iptables mode
|
||||||
* *kube_version* - Specify a given Kubernetes hyperkube version
|
* *kube_version* - Specify a given Kubernetes hyperkube version
|
||||||
* *searchdomains* - Array of DNS domains to search when looking up hostnames
|
* *searchdomains* - Array of DNS domains to search when looking up hostnames
|
||||||
@@ -35,15 +35,16 @@ Some variables of note include:
|
|||||||
* *access_ip* - IP for other hosts to use to connect to. Often required when
|
* *access_ip* - IP for other hosts to use to connect to. Often required when
|
||||||
deploying from a cloud, such as OpenStack or GCE and you have separate
|
deploying from a cloud, such as OpenStack or GCE and you have separate
|
||||||
public/floating and private IPs.
|
public/floating and private IPs.
|
||||||
* *ansible_default_ipv4.address* - Not Kargo-specific, but it is used if ip
|
* *ansible_default_ipv4.address* - Not Kubespray-specific, but it is used if ip
|
||||||
and access_ip are undefined
|
and access_ip are undefined
|
||||||
* *loadbalancer_apiserver* - If defined, all hosts will connect to this
|
* *loadbalancer_apiserver* - If defined, all hosts will connect to this
|
||||||
address instead of localhost for kube-masters and kube-master[0] for
|
address instead of localhost for kube-masters and kube-master[0] for
|
||||||
kube-nodes. See more details in the
|
kube-nodes. See more details in the
|
||||||
[HA guide](https://github.com/kubernetes-incubator/kargo/blob/master/docs/ha-mode.md).
|
[HA guide](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/ha-mode.md).
|
||||||
* *loadbalancer_apiserver_localhost* - If enabled, all hosts will connect to
|
* *loadbalancer_apiserver_localhost* - makes all hosts to connect to
|
||||||
the apiserver internally load balanced endpoint. See more details in the
|
the apiserver internally load balanced endpoint. Mutual exclusive to the
|
||||||
[HA guide](https://github.com/kubernetes-incubator/kargo/blob/master/docs/ha-mode.md).
|
`loadbalancer_apiserver`. See more details in the
|
||||||
|
[HA guide](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/ha-mode.md).
|
||||||
|
|
||||||
#### Cluster variables
|
#### Cluster variables
|
||||||
|
|
||||||
@@ -66,6 +67,11 @@ following default cluster paramters:
|
|||||||
OpenStack (default is unset)
|
OpenStack (default is unset)
|
||||||
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
|
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
|
||||||
Kubernetes
|
Kubernetes
|
||||||
|
* *authorization_modes* - A list of [authorization mode](
|
||||||
|
https://kubernetes.io/docs/admin/authorization/#using-flags-for-your-authorization-module)
|
||||||
|
that the cluster should be configured for. Defaults to `[]` (i.e. no authorization).
|
||||||
|
Note: `RBAC` is currently in experimental phase, and do not support either calico or
|
||||||
|
vault. Upgrade from non-RBAC to RBAC is not tested.
|
||||||
|
|
||||||
Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances'
|
Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances'
|
||||||
private addresses, make sure to pick another values for ``kube_service_addresses``
|
private addresses, make sure to pick another values for ``kube_service_addresses``
|
||||||
@@ -78,13 +84,13 @@ other settings from your existing /etc/resolv.conf are lost. Set the following
|
|||||||
variables to match your requirements.
|
variables to match your requirements.
|
||||||
|
|
||||||
* *upstream_dns_servers* - Array of upstream DNS servers configured on host in
|
* *upstream_dns_servers* - Array of upstream DNS servers configured on host in
|
||||||
addition to Kargo deployed DNS
|
addition to Kubespray deployed DNS
|
||||||
* *nameservers* - Array of DNS servers configured for use in dnsmasq
|
* *nameservers* - Array of DNS servers configured for use in dnsmasq
|
||||||
* *searchdomains* - Array of up to 4 search domains
|
* *searchdomains* - Array of up to 4 search domains
|
||||||
* *skip_dnsmasq* - Don't set up dnsmasq (use only KubeDNS)
|
* *skip_dnsmasq* - Don't set up dnsmasq (use only KubeDNS)
|
||||||
|
|
||||||
For more information, see [DNS
|
For more information, see [DNS
|
||||||
Stack](https://github.com/kubernetes-incubator/kargo/blob/master/docs/dns-stack.md).
|
Stack](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/dns-stack.md).
|
||||||
|
|
||||||
#### Other service variables
|
#### Other service variables
|
||||||
|
|
||||||
@@ -92,9 +98,26 @@ Stack](https://github.com/kubernetes-incubator/kargo/blob/master/docs/dns-stack.
|
|||||||
``--insecure-registry=myregistry.mydomain:5000``
|
``--insecure-registry=myregistry.mydomain:5000``
|
||||||
* *http_proxy/https_proxy/no_proxy* - Proxy variables for deploying behind a
|
* *http_proxy/https_proxy/no_proxy* - Proxy variables for deploying behind a
|
||||||
proxy
|
proxy
|
||||||
|
* *kubelet_load_modules* - For some things, kubelet needs to load kernel modules. For example,
|
||||||
|
dynamic kernel services are needed for mounting persistent volumes into containers. These may not be
|
||||||
|
loaded by preinstall kubernetes processes. For example, ceph and rbd backed volumes. Set this variable to
|
||||||
|
true to let kubelet load kernel modules.
|
||||||
|
|
||||||
|
##### Custom flags for Kube Components
|
||||||
|
For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. This can be done by providing a list of flags. Example:
|
||||||
|
```
|
||||||
|
kubelet_custom_flags:
|
||||||
|
- "--eviction-hard=memory.available<100Mi"
|
||||||
|
- "--eviction-soft-grace-period=memory.available=30s"
|
||||||
|
- "--eviction-soft=memory.available<300Mi"
|
||||||
|
```
|
||||||
|
The possible vars are:
|
||||||
|
* *apiserver_custom_flags*
|
||||||
|
* *controller_mgr_custom_flags*
|
||||||
|
* *scheduler_custom_flags*
|
||||||
|
* *kubelet_custom_flags*
|
||||||
|
|
||||||
#### User accounts
|
#### User accounts
|
||||||
|
|
||||||
Kargo sets up two Kubernetes accounts by default: ``root`` and ``kube``. Their
|
Kubespray sets up two Kubernetes accounts by default: ``root`` and ``kube``. Their
|
||||||
passwords default to changeme. You can set this by changing ``kube_api_pwd``.
|
passwords default to changeme. You can set this by changing ``kube_api_pwd``.
|
||||||
|
|
||||||
|
|||||||
92
docs/vault.md
Normal file
92
docs/vault.md
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
Hashicorp Vault Role
|
||||||
|
====================
|
||||||
|
|
||||||
|
Overview
|
||||||
|
--------
|
||||||
|
|
||||||
|
The Vault role is a two-step process:
|
||||||
|
|
||||||
|
1. Bootstrap
|
||||||
|
|
||||||
|
You cannot start your certificate management service securely with SSL (and
|
||||||
|
the datastore behind it) without having the certificates in-hand already. This
|
||||||
|
presents an unfortunate chicken and egg scenario, with one requiring the other.
|
||||||
|
To solve for this, the Bootstrap step was added.
|
||||||
|
|
||||||
|
This step spins up a temporary instance of Vault to issue certificates for
|
||||||
|
Vault itself. It then leaves the temporary instance running, so that the Etcd
|
||||||
|
role can generate certs for itself as well. Eventually, this may be improved
|
||||||
|
to allow alternate backends (such as Consul), but currently the tasks are
|
||||||
|
hardcoded to only create a Vault role for Etcd.
|
||||||
|
|
||||||
|
2. Cluster
|
||||||
|
|
||||||
|
This step is where the long-term Vault cluster is started and configured. Its
|
||||||
|
first task, is to stop any temporary instances of Vault, to free the port for
|
||||||
|
the long-term. At the end of this task, the entire Vault cluster should be up
|
||||||
|
and read to go.
|
||||||
|
|
||||||
|
|
||||||
|
Keys to the Kingdom
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
The two most important security pieces of Vault are the ``root_token``
|
||||||
|
and ``unsealing_keys``. Both of these values are given exactly once, during
|
||||||
|
the initialization of the Vault cluster. For convenience, they are saved
|
||||||
|
to the ``vault_secret_dir`` (default: /etc/vault/secrets) of every host in the
|
||||||
|
vault group.
|
||||||
|
|
||||||
|
It is *highly* recommended that these secrets are removed from the servers after
|
||||||
|
your cluster has been deployed, and kept in a safe location of your choosing.
|
||||||
|
Naturally, the seriousness of the situation depends on what you're doing with
|
||||||
|
your Kubespray cluster, but with these secrets, an attacker will have the ability
|
||||||
|
to authenticate to almost everything in Kubernetes and decode all private
|
||||||
|
(HTTPS) traffic on your network signed by Vault certificates.
|
||||||
|
|
||||||
|
For even greater security, you may want to remove and store elsewhere any
|
||||||
|
CA keys generated as well (e.g. /etc/vault/ssl/ca-key.pem).
|
||||||
|
|
||||||
|
Vault by default encrypts all traffic to and from the datastore backend, all
|
||||||
|
resting data, and uses TLS for its TCP listener. It is recommended that you
|
||||||
|
do not change the Vault config to disable TLS, unless you absolutely have to.
|
||||||
|
|
||||||
|
|
||||||
|
Usage
|
||||||
|
-----
|
||||||
|
|
||||||
|
To get the Vault role running, you must to do two things at a minimum:
|
||||||
|
|
||||||
|
1. Assign the ``vault`` group to at least 1 node in your inventory
|
||||||
|
2. Change ``cert_management`` to be ``vault`` instead of ``script``
|
||||||
|
|
||||||
|
Nothing else is required, but customization is possible. Check
|
||||||
|
``roles/vault/defaults/main.yml`` for the different variables that can be
|
||||||
|
overridden, most common being ``vault_config``, ``vault_port``, and
|
||||||
|
``vault_deployment_type``.
|
||||||
|
|
||||||
|
Also, if you intend to use a Root or Intermediate CA generated elsewhere,
|
||||||
|
you'll need to copy the certificate and key to the hosts in the vault group
|
||||||
|
prior to running the vault role. By default, they'll be located at
|
||||||
|
``/etc/vault/ssl/ca.pem`` and ``/etc/vault/ssl/ca-key.pem``, respectively.
|
||||||
|
|
||||||
|
Additional Notes:
|
||||||
|
|
||||||
|
- ``groups.vault|first`` is considered the source of truth for Vault variables
|
||||||
|
- ``vault_leader_url`` is used as pointer for the current running Vault
|
||||||
|
- Each service should have its own role and credentials. Currently those
|
||||||
|
credentials are saved to ``/etc/vault/roles/<role>/``. The service will
|
||||||
|
need to read in those credentials, if they want to interact with Vault.
|
||||||
|
|
||||||
|
|
||||||
|
Potential Work
|
||||||
|
--------------
|
||||||
|
|
||||||
|
- Change the Vault role to not run certain tasks when ``root_token`` and
|
||||||
|
``unseal_keys`` are not present. Alternatively, allow user input for these
|
||||||
|
values when missing.
|
||||||
|
- Add the ability to start temp Vault with Host, Rkt, or Docker
|
||||||
|
- Add a dynamic way to change out the backend role creation during Bootstrap,
|
||||||
|
so other services can be used (such as Consul)
|
||||||
|
- Segregate Server Cert generation from Auth Cert generation (separate CAs).
|
||||||
|
This work was partially started with the `auth_cert_backend` tasks, but would
|
||||||
|
need to be further applied to all roles (particularly Etcd and Kubernetes).
|
||||||
61
docs/vsphere.md
Normal file
61
docs/vsphere.md
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
# vSphere cloud provider
|
||||||
|
|
||||||
|
Kubespray can be deployed with vSphere as Cloud provider. This feature supports
|
||||||
|
- Volumes
|
||||||
|
- Persistent Volumes
|
||||||
|
- Storage Classes and provisioning of volumes.
|
||||||
|
- vSphere Storage Policy Based Management for Containers orchestrated by Kubernetes.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
You need at first to configure you vSphere environement by following the [official documentation](https://kubernetes.io/docs/getting-started-guides/vsphere/#vsphere-cloud-provider).
|
||||||
|
|
||||||
|
After this step you should have:
|
||||||
|
- UUID activated for each VM where Kubernetes will be deployed
|
||||||
|
- A vSphere account with required privileges
|
||||||
|
|
||||||
|
## Kubespray configuration
|
||||||
|
|
||||||
|
Fist you must define the cloud provider in `inventory/group_vars/all.yml` and set it to `vsphere`.
|
||||||
|
```yml
|
||||||
|
cloud_provider: vsphere
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, in the same file, you need to declare your vCenter credential following the description bellow.
|
||||||
|
|
||||||
|
| Variable | Required | Type | Choices | Default | Comment |
|
||||||
|
|------------------------------|----------|---------|----------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| vsphere_vcenter_ip | TRUE | string | | | IP/URL of the vCenter |
|
||||||
|
| vsphere_vcenter_port | TRUE | integer | | | Port of the vCenter API. Commonly 443 |
|
||||||
|
| vsphere_insecure | TRUE | integer | 1, 0 | | set to 1 if the host above uses a self-signed cert |
|
||||||
|
| vsphere_user | TRUE | string | | | User name for vCenter with required privileges |
|
||||||
|
| vsphere_password | TRUE | string | | | Password for vCenter |
|
||||||
|
| vsphere_datacenter | TRUE | string | | | Datacenter name to use |
|
||||||
|
| vsphere_datastore | TRUE | string | | | Datastore name to use |
|
||||||
|
| vsphere_working_dir | TRUE | string | | | Working directory from the view "VMs and template" in the vCenter where VM are placed |
|
||||||
|
| vsphere_scsi_controller_type | TRUE | string | buslogic, pvscsi, parallel | pvscsi | SCSI controller name. Commonly "pvscsi". |
|
||||||
|
| vsphere_vm_uuid | FALSE | string | | | VM Instance UUID of virtual machine that host K8s master. Can be retrieved from instanceUuid property in VmConfigInfo, or as vc.uuid in VMX file or in `/sys/class/dmi/id/product_serial` |
|
||||||
|
| vsphere_public_network | FALSE | string | | Blank | Name of the network the VMs are joined to |
|
||||||
|
|
||||||
|
Example configuration
|
||||||
|
```yml
|
||||||
|
vsphere_vcenter_ip: "myvcenter.domain.com"
|
||||||
|
vsphere_vcenter_port: 443
|
||||||
|
vsphere_insecure: 1
|
||||||
|
vsphere_user: "k8s@vsphere.local"
|
||||||
|
vsphere_password: "K8s_admin"
|
||||||
|
vsphere_datacenter: "DATACENTER_name"
|
||||||
|
vsphere_datastore: "DATASTORE_name"
|
||||||
|
vsphere_working_dir: "Docker_hosts"
|
||||||
|
vsphere_scsi_controller_type: "pvscsi"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Deployment
|
||||||
|
|
||||||
|
Once the configuration is set, you can execute the playbook again to apply the new configuration
|
||||||
|
```
|
||||||
|
cd kubespray
|
||||||
|
ansible-playbook -i inventory/inventory.cfg -b -v cluster.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
You'll find some usefull examples [here](https://github.com/kubernetes/kubernetes/tree/master/examples/volumes/vsphere) to test your configuration.
|
||||||
98
docs/weave.md
Normal file
98
docs/weave.md
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
Weave
|
||||||
|
=======
|
||||||
|
|
||||||
|
Weave 2.0.1 is supported by kubespray
|
||||||
|
|
||||||
|
Weave uses [**consensus**](https://www.weave.works/docs/net/latest/ipam/##consensus) mode (default mode) and [**seed**](https://www.weave.works/docs/net/latest/ipam/#seed) mode.
|
||||||
|
|
||||||
|
`Consensus` mode is best to use on static size cluster and `seed` mode is best to use on dynamic size cluster
|
||||||
|
|
||||||
|
Weave encryption is supported for all communication
|
||||||
|
|
||||||
|
* To use Weave encryption, specify a strong password (if no password, no encrytion)
|
||||||
|
|
||||||
|
```
|
||||||
|
# In file ./inventory/group_vars/k8s-cluster.yml
|
||||||
|
weave_password: EnterPasswordHere
|
||||||
|
```
|
||||||
|
|
||||||
|
This password is used to set an environment variable inside weave container.
|
||||||
|
|
||||||
|
Weave is deployed by kubespray using a daemonSet
|
||||||
|
|
||||||
|
* Check the status of Weave containers
|
||||||
|
|
||||||
|
```
|
||||||
|
# From client
|
||||||
|
kubectl -n kube-system get pods | grep weave
|
||||||
|
# output
|
||||||
|
weave-net-50wd2 2/2 Running 0 2m
|
||||||
|
weave-net-js9rb 2/2 Running 0 2m
|
||||||
|
```
|
||||||
|
There must be as many pods as nodes (here kubernetes have 2 nodes so there are 2 weave pods).
|
||||||
|
|
||||||
|
* Check status of weave (connection,encryption ...) for each node
|
||||||
|
|
||||||
|
```
|
||||||
|
# On nodes
|
||||||
|
curl http://127.0.0.1:6784/status
|
||||||
|
# output on node1
|
||||||
|
Version: 2.0.1 (up to date; next check at 2017/08/01 13:51:34)
|
||||||
|
|
||||||
|
Service: router
|
||||||
|
Protocol: weave 1..2
|
||||||
|
Name: fa:16:3e:b3:d6:b2(node1)
|
||||||
|
Encryption: enabled
|
||||||
|
PeerDiscovery: enabled
|
||||||
|
Targets: 2
|
||||||
|
Connections: 2 (1 established, 1 failed)
|
||||||
|
Peers: 2 (with 2 established connections)
|
||||||
|
TrustedSubnets: none
|
||||||
|
|
||||||
|
Service: ipam
|
||||||
|
Status: ready
|
||||||
|
Range: 10.233.64.0/18
|
||||||
|
DefaultSubnet: 10.233.64.0/18
|
||||||
|
```
|
||||||
|
|
||||||
|
* Check parameters of weave for each node
|
||||||
|
|
||||||
|
```
|
||||||
|
# On nodes
|
||||||
|
ps -aux | grep weaver
|
||||||
|
# output on node1 (here its use seed mode)
|
||||||
|
root 8559 0.2 3.0 365280 62700 ? Sl 08:25 0:00 /home/weave/weaver --name=fa:16:3e:b3:d6:b2 --port=6783 --datapath=datapath --host-root=/host --http-addr=127.0.0.1:6784 --status-addr=0.0.0.0:6782 --docker-api= --no-dns --db-prefix=/weavedb/weave-net --ipalloc-range=10.233.64.0/18 --nickname=node1 --ipalloc-init seed=fa:16:3e:b3:d6:b2,fa:16:3e:f0:50:53 --conn-limit=30 --expect-npc 192.168.208.28 192.168.208.19
|
||||||
|
```
|
||||||
|
|
||||||
|
### Consensus mode (default mode)
|
||||||
|
|
||||||
|
This mode is best to use on static size cluster
|
||||||
|
|
||||||
|
### Seed mode
|
||||||
|
|
||||||
|
This mode is best to use on dynamic size cluster
|
||||||
|
|
||||||
|
The seed mode also allows multi-clouds and hybrid on-premise/cloud clusters deployement.
|
||||||
|
|
||||||
|
* Switch from consensus mode to seed mode
|
||||||
|
|
||||||
|
```
|
||||||
|
# In file ./inventory/group_vars/k8s-cluster.yml
|
||||||
|
weave_mode_seed: true
|
||||||
|
```
|
||||||
|
|
||||||
|
These two variables are only used when `weave_mode_seed` is set to `true` (**/!\ do not manually change these values**)
|
||||||
|
|
||||||
|
```
|
||||||
|
# In file ./inventory/group_vars/k8s-cluster.yml
|
||||||
|
weave_seed: uninitialized
|
||||||
|
weave_peers: uninitialized
|
||||||
|
```
|
||||||
|
|
||||||
|
The first variable, `weave_seed`, contains the initial nodes of the weave network
|
||||||
|
|
||||||
|
The seconde variable, `weave_peers`, saves the IPs of all nodes joined to the weave network
|
||||||
|
|
||||||
|
These two variables are used to connect a new node to the weave network. The new node needs to know the firsts nodes (seed) and the list of IPs of all nodes.
|
||||||
|
|
||||||
|
To reset these variables and reset the weave network set them to `uninitialized`
|
||||||
1
extra_playbooks/inventory
Symbolic link
1
extra_playbooks/inventory
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../inventory
|
||||||
1
extra_playbooks/roles
Symbolic link
1
extra_playbooks/roles
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../roles
|
||||||
60
extra_playbooks/upgrade-only-k8s.yml
Normal file
60
extra_playbooks/upgrade-only-k8s.yml
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
### NOTE: This playbook cannot be used to deploy any new nodes to the cluster.
|
||||||
|
### Additional information:
|
||||||
|
### * Will not upgrade etcd
|
||||||
|
### * Will not upgrade network plugins
|
||||||
|
### * Will not upgrade Docker
|
||||||
|
### * Currently does not support Vault deployment.
|
||||||
|
###
|
||||||
|
### In most cases, you probably want to use upgrade-cluster.yml playbook and
|
||||||
|
### not this one.
|
||||||
|
|
||||||
|
- hosts: localhost
|
||||||
|
gather_facts: False
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||||
|
|
||||||
|
- hosts: k8s-cluster:etcd:calico-rr
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
gather_facts: false
|
||||||
|
vars:
|
||||||
|
# Need to disable pipelining for bootstrap-os as some systems have requiretty in sudoers set, which makes pipelining
|
||||||
|
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
|
||||||
|
ansible_ssh_pipelining: false
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
|
- { role: bootstrap-os, tags: bootstrap-os}
|
||||||
|
|
||||||
|
- hosts: k8s-cluster:etcd:calico-rr
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
vars:
|
||||||
|
ansible_ssh_pipelining: true
|
||||||
|
gather_facts: true
|
||||||
|
|
||||||
|
- hosts: k8s-cluster:etcd:calico-rr
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
|
- { role: kubernetes/preinstall, tags: preinstall }
|
||||||
|
|
||||||
|
#Handle upgrades to master components first to maintain backwards compat.
|
||||||
|
- hosts: kube-master
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
serial: 1
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
|
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
|
||||||
|
- { role: kubernetes/node, tags: node }
|
||||||
|
- { role: kubernetes/master, tags: master }
|
||||||
|
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
||||||
|
|
||||||
|
#Finally handle worker upgrades, based on given batch size
|
||||||
|
- hosts: kube-node:!kube-master
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
serial: "{{ serial | default('20%') }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
|
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
|
||||||
|
- { role: kubernetes/node, tags: node }
|
||||||
|
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
||||||
|
- { role: kubespray-defaults}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user