mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 22:04:43 +03:00
Compare commits
553 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ba0a03a8ba | ||
|
|
b0f04d925a | ||
|
|
7b78e68727 | ||
|
|
ec53b8b66a | ||
|
|
86fb669fd3 | ||
|
|
7123956ecd | ||
|
|
46cf6b77cf | ||
|
|
a52bc44f5a | ||
|
|
acb63a57fa | ||
|
|
5b08277ce4 | ||
|
|
5dc56df64e | ||
|
|
33c4d64b62 | ||
|
|
25de6825df | ||
|
|
0b60201a1e | ||
|
|
cfea99c4ee | ||
|
|
cea41a544e | ||
|
|
8371a060a0 | ||
|
|
7ed140cea7 | ||
|
|
cb97c2184e | ||
|
|
0b4fcc83bd | ||
|
|
514359e556 | ||
|
|
55b9d02a99 | ||
|
|
fc9a65be2b | ||
|
|
49dff97d9c | ||
|
|
4efb0b78fa | ||
|
|
c9fe8fde59 | ||
|
|
74d54946bf | ||
|
|
16462292e1 | ||
|
|
7ef1e1ef9d | ||
|
|
20d80311f0 | ||
|
|
f1a1f53f72 | ||
|
|
c766bd077b | ||
|
|
54320c5b09 | ||
|
|
291b71ea3b | ||
|
|
356515222a | ||
|
|
688e589e0c | ||
|
|
6c98201aa4 | ||
|
|
d4b10eb9f5 | ||
|
|
728d56e74d | ||
|
|
a9f4038fcd | ||
|
|
77f1d4b0f1 | ||
|
|
d78577c810 | ||
|
|
5fb6b2eaf7 | ||
|
|
404caa111a | ||
|
|
b838468500 | ||
|
|
f2235be1d3 | ||
|
|
6ec45b10f1 | ||
|
|
d9879d8026 | ||
|
|
d487b2f927 | ||
|
|
66e5e14bac | ||
|
|
7e4668859b | ||
|
|
92d038062e | ||
|
|
2972bceb90 | ||
|
|
cb0a60a0fe | ||
|
|
3ee91e15ff | ||
|
|
ef47a73382 | ||
|
|
dc515e5ac5 | ||
|
|
56763d4288 | ||
|
|
ad9fa73301 | ||
|
|
10dd049912 | ||
|
|
4209f1cbfd | ||
|
|
ee83e874a8 | ||
|
|
27ed73e3e3 | ||
|
|
e41c0532e3 | ||
|
|
eeb7274d65 | ||
|
|
eb0dcf6063 | ||
|
|
83be0735cd | ||
|
|
fe4ba51d1a | ||
|
|
adf575b75e | ||
|
|
e5426f74a8 | ||
|
|
f5212d3b79 | ||
|
|
3d09c4be75 | ||
|
|
f2db15873d | ||
|
|
7c663de6c9 | ||
|
|
c14bbcdbf2 | ||
|
|
1be4c1935a | ||
|
|
764b1aa5f8 | ||
|
|
d13b07ba59 | ||
|
|
028afab908 | ||
|
|
55dfae2a52 | ||
|
|
994324e19c | ||
|
|
b81c0d869c | ||
|
|
f14f04c5ea | ||
|
|
9c86da1403 | ||
|
|
cb611b5ed0 | ||
|
|
891269ef39 | ||
|
|
ab171a1d6d | ||
|
|
a56738324a | ||
|
|
da61b8e7c9 | ||
|
|
d6d58bc938 | ||
|
|
e42cb43ca5 | ||
|
|
ca541c7e4a | ||
|
|
96e14424f0 | ||
|
|
47830896e8 | ||
|
|
5fd4b4afae | ||
|
|
dae9f6d3c2 | ||
|
|
8e1210f96e | ||
|
|
56aa683f28 | ||
|
|
1b9a6d7ad8 | ||
|
|
f591c4db56 | ||
|
|
371fa51e82 | ||
|
|
a927ed2da4 | ||
|
|
a55675acf8 | ||
|
|
25dd3d476a | ||
|
|
3ff5f40bdb | ||
|
|
689ded0413 | ||
|
|
327ed157ef | ||
|
|
c819238da9 | ||
|
|
477afa8711 | ||
|
|
bd272e0b3c | ||
|
|
1067595b5c | ||
|
|
14c232e3c4 | ||
|
|
57f5fb1f4f | ||
|
|
bcddfb786d | ||
|
|
20db1738fa | ||
|
|
b23d81f825 | ||
|
|
bc15ceaba1 | ||
|
|
6f17d0817b | ||
|
|
a1cde03b20 | ||
|
|
cfce23950a | ||
|
|
64740249ab | ||
|
|
126f42de06 | ||
|
|
d94e3a81eb | ||
|
|
70d0235770 | ||
|
|
30b5493fd6 | ||
|
|
4f6362515f | ||
|
|
dbbe9419e5 | ||
|
|
188bae142b | ||
|
|
7c2b12ebd7 | ||
|
|
ef8e35e39b | ||
|
|
975accbe1d | ||
|
|
aaa27d0a34 | ||
|
|
9302ce0036 | ||
|
|
0aab3c97a0 | ||
|
|
8e731337ba | ||
|
|
b294db5aed | ||
|
|
8d766a2ca9 | ||
|
|
f2ae16e71d | ||
|
|
ac281476c8 | ||
|
|
1b1c8d31a9 | ||
|
|
4b587aaf99 | ||
|
|
016301508e | ||
|
|
6744726089 | ||
|
|
0a89f88b89 | ||
|
|
69fac8ea58 | ||
|
|
a51104e844 | ||
|
|
943aaf84e5 | ||
|
|
e8bde03a50 | ||
|
|
75b13caf0b | ||
|
|
0f231f0e76 | ||
|
|
5d99fa0940 | ||
|
|
649388188b | ||
|
|
9fa1873a65 | ||
|
|
f2057dd43d | ||
|
|
eeffbbb43c | ||
|
|
aaa0105f75 | ||
|
|
f29a42721f | ||
|
|
079d317ade | ||
|
|
6f1fd12265 | ||
|
|
e16b57aa05 | ||
|
|
fb30f65951 | ||
|
|
a47aaae078 | ||
|
|
7117614ee5 | ||
|
|
e26aec96b0 | ||
|
|
c60d104056 | ||
|
|
e6ff8c92a0 | ||
|
|
9bce364b3c | ||
|
|
cbaa2b5773 | ||
|
|
0453ed8235 | ||
|
|
a341adb7f3 | ||
|
|
4c88ac69f2 | ||
|
|
85c237bc1d | ||
|
|
35d48cc88c | ||
|
|
957b7115fe | ||
|
|
82eedbd622 | ||
|
|
b930b0ef5a | ||
|
|
ad313c9d49 | ||
|
|
06035c0f4e | ||
|
|
e1384f6618 | ||
|
|
3acb86805b | ||
|
|
bf0af1cd3d | ||
|
|
c77d11f1c7 | ||
|
|
d279d145d5 | ||
|
|
fc7905653e | ||
|
|
660282e82f | ||
|
|
77602dbb93 | ||
|
|
a3e6896a43 | ||
|
|
702ce446df | ||
|
|
8ae77e955e | ||
|
|
783924e671 | ||
|
|
93304e5f58 | ||
|
|
917373ee55 | ||
|
|
7a98ad50b4 | ||
|
|
982058cc19 | ||
|
|
576beaa6a6 | ||
|
|
6eb22c5db2 | ||
|
|
72a0d78b3c | ||
|
|
13d08af054 | ||
|
|
80a7ae9845 | ||
|
|
6c30a7b2eb | ||
|
|
76b72338da | ||
|
|
a39e78d42d | ||
|
|
4550dccb84 | ||
|
|
01ce09f343 | ||
|
|
71dca67ca2 | ||
|
|
327f9baccf | ||
|
|
a98b866a66 | ||
|
|
3aabba7535 | ||
|
|
c22cfa255b | ||
|
|
af211b3d71 | ||
|
|
6bb3463e7c | ||
|
|
8b151d12b9 | ||
|
|
ecb6dc3679 | ||
|
|
49a223a17d | ||
|
|
e5cfdc648c | ||
|
|
9f9f70aade | ||
|
|
e91c04f586 | ||
|
|
277fa6c12d | ||
|
|
ca3050ec3d | ||
|
|
1b3ced152b | ||
|
|
97031f9133 | ||
|
|
c92506e2e7 | ||
|
|
65a9772adf | ||
|
|
1e07ee6cc4 | ||
|
|
01a130273f | ||
|
|
3c710219a1 | ||
|
|
2ba285a544 | ||
|
|
72ae7638bc | ||
|
|
3bfad5ca73 | ||
|
|
668d02846d | ||
|
|
781f31d2b8 | ||
|
|
df28db0066 | ||
|
|
20183f3860 | ||
|
|
48edf1757b | ||
|
|
2645e88b0c | ||
|
|
db121049b3 | ||
|
|
8058cdbc0e | ||
|
|
31d357284a | ||
|
|
4ee77ce026 | ||
|
|
8373129588 | ||
|
|
9a3c6f236d | ||
|
|
55ba81fee5 | ||
|
|
bc5159a1f5 | ||
|
|
af007c7189 | ||
|
|
dc79d07303 | ||
|
|
79167c7577 | ||
|
|
08dd057864 | ||
|
|
fee3f288c0 | ||
|
|
b22bef5cfb | ||
|
|
7ad5523113 | ||
|
|
460b5824c3 | ||
|
|
b0a28b1e80 | ||
|
|
ca6535f210 | ||
|
|
1155008719 | ||
|
|
d07594ed59 | ||
|
|
5efda3eda9 | ||
|
|
4b137efdbd | ||
|
|
383d582b47 | ||
|
|
6eacedc443 | ||
|
|
b1a5bb593c | ||
|
|
9369c6549a | ||
|
|
c7731a3b93 | ||
|
|
24706c163a | ||
|
|
a276dc47e0 | ||
|
|
e55f8a61cd | ||
|
|
c8bcca0845 | ||
|
|
cb6892d2ed | ||
|
|
43eda8d878 | ||
|
|
a2534e03bd | ||
|
|
dc5b955930 | ||
|
|
5de7896ffb | ||
|
|
01af45d14a | ||
|
|
cc9f3ea938 | ||
|
|
ff43de695e | ||
|
|
8bc717a55c | ||
|
|
d09222c900 | ||
|
|
87cdb81fae | ||
|
|
38eb1d548a | ||
|
|
e0960f6288 | ||
|
|
74403f2003 | ||
|
|
b2c83714d1 | ||
|
|
2c21672de6 | ||
|
|
f7dc21773d | ||
|
|
3e457e4edf | ||
|
|
03572d175f | ||
|
|
c4894d6092 | ||
|
|
3fb0383df4 | ||
|
|
ee36763f9d | ||
|
|
955c5549ae | ||
|
|
4a34514b21 | ||
|
|
805d9f22ce | ||
|
|
20f29327e9 | ||
|
|
018b5039e7 | ||
|
|
d6aeb767a0 | ||
|
|
b5d3d4741f | ||
|
|
85c747d444 | ||
|
|
927e6d89d7 | ||
|
|
3d87f23bf5 | ||
|
|
45845d4a2a | ||
|
|
00ef129b2a | ||
|
|
06b219217b | ||
|
|
789910d8eb | ||
|
|
a8e6a0763d | ||
|
|
e1386ba604 | ||
|
|
83deecb9e9 | ||
|
|
d8dcb8f6e0 | ||
|
|
5fa31eaead | ||
|
|
d245201614 | ||
|
|
a5b84a47b0 | ||
|
|
552b2f0635 | ||
|
|
0b3badf3d8 | ||
|
|
cea3e224aa | ||
|
|
1eaf0e1c63 | ||
|
|
2cda982345 | ||
|
|
c9734b6d7b | ||
|
|
fd01377f12 | ||
|
|
8d2fc88336 | ||
|
|
092bf07cbf | ||
|
|
5145a8e8be | ||
|
|
b495d36fa5 | ||
|
|
3bdeaa4a6f | ||
|
|
d1f58fed4c | ||
|
|
12e918bd31 | ||
|
|
637f445c3f | ||
|
|
d0e4cf5895 | ||
|
|
e0bf8b2aab | ||
|
|
483c06b4ab | ||
|
|
f4a3b31415 | ||
|
|
5c7e309d13 | ||
|
|
7a72b2d558 | ||
|
|
c75b21a510 | ||
|
|
a9f318d523 | ||
|
|
1dca0bd8d7 | ||
|
|
f3165a716a | ||
|
|
9f45eba6f6 | ||
|
|
ecaa7dad49 | ||
|
|
ee84e34570 | ||
|
|
442be2ac02 | ||
|
|
22d600e8c0 | ||
|
|
e160018826 | ||
|
|
d1a02bd3e9 | ||
|
|
380fb986b6 | ||
|
|
e7f794531e | ||
|
|
992023288f | ||
|
|
ef5a36dd69 | ||
|
|
3ab90db6ee | ||
|
|
e26be9cb8a | ||
|
|
bba555bb08 | ||
|
|
4b0af73dd2 | ||
|
|
da72b8c385 | ||
|
|
44079b7176 | ||
|
|
19c36fe4c9 | ||
|
|
a742d10c54 | ||
|
|
6bd27038cc | ||
|
|
5df757a403 | ||
|
|
38f5d1b18e | ||
|
|
5f75d4c099 | ||
|
|
319a0d65af | ||
|
|
3d2680a102 | ||
|
|
c36fb5919a | ||
|
|
46d3f4369e | ||
|
|
c2b3920b50 | ||
|
|
6e7323e3e8 | ||
|
|
e98b0371e5 | ||
|
|
f085419055 | ||
|
|
1fedbded62 | ||
|
|
c8258171ca | ||
|
|
007ee0da8e | ||
|
|
5e1ac9ce87 | ||
|
|
a7cd08603e | ||
|
|
854cd1a517 | ||
|
|
cf8c74cb07 | ||
|
|
23565ebe62 | ||
|
|
8467bce2a6 | ||
|
|
e6225d70a1 | ||
|
|
a69de8be40 | ||
|
|
649654207f | ||
|
|
3123502f4c | ||
|
|
17d54cffbb | ||
|
|
bddee7c38e | ||
|
|
6f9c311285 | ||
|
|
0cfa6a8981 | ||
|
|
d5516a4ca9 | ||
|
|
d2b793057e | ||
|
|
b2a409fd4d | ||
|
|
4ba237c5d8 | ||
|
|
f5ef02d4cc | ||
|
|
ec2255764a | ||
|
|
1a8e92c922 | ||
|
|
5c1891ec9f | ||
|
|
83265b7f75 | ||
|
|
5364a10033 | ||
|
|
c2a46e4aa3 | ||
|
|
bae5ce0bfa | ||
|
|
cc5edb720c | ||
|
|
e17c2ef698 | ||
|
|
61b74f9a5b | ||
|
|
0cd83eadc0 | ||
|
|
1757c45490 | ||
|
|
d85f98d2a9 | ||
|
|
9e123011c2 | ||
|
|
774c4d0d6f | ||
|
|
7332679678 | ||
|
|
bb6f727f25 | ||
|
|
586d2a41ce | ||
|
|
91dff61008 | ||
|
|
8203383c03 | ||
|
|
a3c88a0de5 | ||
|
|
fff0aec720 | ||
|
|
b73786c6d5 | ||
|
|
67eeccb31f | ||
|
|
266ca9318d | ||
|
|
3e97299a46 | ||
|
|
eacc42fedd | ||
|
|
db3e8edacd | ||
|
|
6e41634295 | ||
|
|
ef3c2d86d3 | ||
|
|
780308c194 | ||
|
|
696fd690ae | ||
|
|
d323501c7f | ||
|
|
66d8b2c18a | ||
|
|
6d8a415b4d | ||
|
|
dad268a686 | ||
|
|
e7acc2fddf | ||
|
|
6fb17a813c | ||
|
|
11ede9f872 | ||
|
|
6ac1c1c886 | ||
|
|
01c0ab4f06 | ||
|
|
7713f35326 | ||
|
|
7220b09ff9 | ||
|
|
b7298ef51a | ||
|
|
16b10b026b | ||
|
|
9b18c073b6 | ||
|
|
dd89e705f2 | ||
|
|
56b86bbfca | ||
|
|
7e2aafcc76 | ||
|
|
11c774b04f | ||
|
|
6ba926381b | ||
|
|
af55e179c7 | ||
|
|
18a42e4b38 | ||
|
|
a10ccadb54 | ||
|
|
15fee582cc | ||
|
|
43408634bb | ||
|
|
d47fce6ce7 | ||
|
|
9e64267867 | ||
|
|
7ae5785447 | ||
|
|
ef8d3f684f | ||
|
|
cc6e3d14ce | ||
|
|
83f44b1ac1 | ||
|
|
1f470eadd1 | ||
|
|
005b01bd9a | ||
|
|
6f67367b57 | ||
|
|
9ee0600a7f | ||
|
|
30cc7c847e | ||
|
|
a5bb24b886 | ||
|
|
f02d810af8 | ||
|
|
55f6b6a6ab | ||
|
|
b999ee60aa | ||
|
|
85afd3ef14 | ||
|
|
1907030d89 | ||
|
|
361a5eac7e | ||
|
|
fecb41d2ef | ||
|
|
4cdb641e7b | ||
|
|
efa2dff681 | ||
|
|
31a7b7d24e | ||
|
|
af8cc4dc4a | ||
|
|
8eb60f5624 | ||
|
|
791ea89b88 | ||
|
|
c572760a66 | ||
|
|
69fc19f7e0 | ||
|
|
b939c24b3d | ||
|
|
3eb494dbe3 | ||
|
|
d6a66c83c2 | ||
|
|
582a9a5db8 | ||
|
|
0afbc19ffb | ||
|
|
ac9290f985 | ||
|
|
a133ba1998 | ||
|
|
5657738f7e | ||
|
|
d310acc1eb | ||
|
|
2b88f10b04 | ||
|
|
883ba7aa90 | ||
|
|
28f55deaae | ||
|
|
40407930d5 | ||
|
|
674b71b535 | ||
|
|
677d9c47ac | ||
|
|
2638ab98ad | ||
|
|
bc3068c2f9 | ||
|
|
2bde9bea1c | ||
|
|
502f2f040d | ||
|
|
041d4d666e | ||
|
|
c0c10a97e7 | ||
|
|
5a7c50027f | ||
|
|
88b5065e7d | ||
|
|
b690008192 | ||
|
|
2d6bc9536c | ||
|
|
01dc6b2f0e | ||
|
|
d8aa2d0a9e | ||
|
|
19bb97d24d | ||
|
|
9f4f168804 | ||
|
|
82e133b382 | ||
|
|
cf3083d68e | ||
|
|
e796cdbb27 | ||
|
|
2d44582f88 | ||
|
|
2a61344c03 | ||
|
|
77c6aad1b5 | ||
|
|
b60a897265 | ||
|
|
fdd41c706a | ||
|
|
d68cfeed6e | ||
|
|
14911e0d22 | ||
|
|
9503434d53 | ||
|
|
c3c9e955e5 | ||
|
|
72d5db92a8 | ||
|
|
3f302c8d47 | ||
|
|
04a769bb37 | ||
|
|
f9d4a1c1d8 | ||
|
|
3e7db46195 | ||
|
|
e52aca4837 | ||
|
|
5ec503bd6f | ||
|
|
49be805001 | ||
|
|
94596388f7 | ||
|
|
5c4980c6e0 | ||
|
|
6d157f0b3e | ||
|
|
c3d5fdff64 | ||
|
|
d6cbdbd6aa | ||
|
|
d7b8fb3113 | ||
|
|
45044c2d75 | ||
|
|
a9f260d135 | ||
|
|
072b3b9d8c | ||
|
|
ae7f59e249 | ||
|
|
450b4e16b2 | ||
|
|
c48ffa24be | ||
|
|
7f0c0a0922 | ||
|
|
bce1c62308 | ||
|
|
9b3aa3451e | ||
|
|
436c0b58db | ||
|
|
7ac62822cb | ||
|
|
af8ae83ea0 | ||
|
|
0bcecae2a3 | ||
|
|
bd130315b6 | ||
|
|
504711647e | ||
|
|
a9a016d7b1 | ||
|
|
ab12b23e6f | ||
|
|
797bdbd998 | ||
|
|
1c45d37348 | ||
|
|
b521255ec9 | ||
|
|
75ea001bfe | ||
|
|
ff2fb9196f | ||
|
|
ccc11e5680 | ||
|
|
fbded9cdac | ||
|
|
5d3414a40b | ||
|
|
28473e919f | ||
|
|
69636d2453 | ||
|
|
8a63b35f44 |
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@@ -24,7 +24,7 @@ explain why.
|
|||||||
- **Version of Ansible** (`ansible --version`):
|
- **Version of Ansible** (`ansible --version`):
|
||||||
|
|
||||||
|
|
||||||
**Kargo version (commit) (`git rev-parse --short HEAD`):**
|
**Kubespray version (commit) (`git rev-parse --short HEAD`):**
|
||||||
|
|
||||||
|
|
||||||
**Network plugin used**:
|
**Network plugin used**:
|
||||||
|
|||||||
83
.gitignore
vendored
83
.gitignore
vendored
@@ -7,12 +7,89 @@ temp
|
|||||||
.idea
|
.idea
|
||||||
.tox
|
.tox
|
||||||
.cache
|
.cache
|
||||||
*.egg-info
|
*.bak
|
||||||
*.pyc
|
|
||||||
*.pyo
|
|
||||||
*.tfstate
|
*.tfstate
|
||||||
*.tfstate.backup
|
*.tfstate.backup
|
||||||
|
contrib/terraform/aws/credentials.tfvars
|
||||||
**/*.sw[pon]
|
**/*.sw[pon]
|
||||||
/ssh-bastion.conf
|
/ssh-bastion.conf
|
||||||
**/*.sw[pon]
|
**/*.sw[pon]
|
||||||
vagrant/
|
vagrant/
|
||||||
|
|
||||||
|
# Byte-compiled / optimized / DLL files
|
||||||
|
__pycache__/
|
||||||
|
*.py[cod]
|
||||||
|
*$py.class
|
||||||
|
|
||||||
|
# Distribution / packaging
|
||||||
|
.Python
|
||||||
|
artifacts/
|
||||||
|
env/
|
||||||
|
build/
|
||||||
|
credentials/
|
||||||
|
develop-eggs/
|
||||||
|
dist/
|
||||||
|
downloads/
|
||||||
|
eggs/
|
||||||
|
.eggs/
|
||||||
|
parts/
|
||||||
|
sdist/
|
||||||
|
var/
|
||||||
|
*.egg-info/
|
||||||
|
.installed.cfg
|
||||||
|
*.egg
|
||||||
|
|
||||||
|
# PyInstaller
|
||||||
|
# Usually these files are written by a python script from a template
|
||||||
|
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||||
|
*.manifest
|
||||||
|
*.spec
|
||||||
|
|
||||||
|
# Installer logs
|
||||||
|
pip-log.txt
|
||||||
|
pip-delete-this-directory.txt
|
||||||
|
|
||||||
|
# Unit test / coverage reports
|
||||||
|
htmlcov/
|
||||||
|
.tox/
|
||||||
|
.coverage
|
||||||
|
.coverage.*
|
||||||
|
.cache
|
||||||
|
nosetests.xml
|
||||||
|
coverage.xml
|
||||||
|
*,cover
|
||||||
|
.hypothesis/
|
||||||
|
|
||||||
|
# Translations
|
||||||
|
*.mo
|
||||||
|
*.pot
|
||||||
|
|
||||||
|
# Django stuff:
|
||||||
|
*.log
|
||||||
|
local_settings.py
|
||||||
|
|
||||||
|
# Flask stuff:
|
||||||
|
instance/
|
||||||
|
.webassets-cache
|
||||||
|
|
||||||
|
# Scrapy stuff:
|
||||||
|
.scrapy
|
||||||
|
|
||||||
|
# Sphinx documentation
|
||||||
|
docs/_build/
|
||||||
|
|
||||||
|
# PyBuilder
|
||||||
|
target/
|
||||||
|
|
||||||
|
# IPython Notebook
|
||||||
|
.ipynb_checkpoints
|
||||||
|
|
||||||
|
# pyenv
|
||||||
|
.python-version
|
||||||
|
|
||||||
|
# dotenv
|
||||||
|
.env
|
||||||
|
|
||||||
|
# virtualenv
|
||||||
|
venv/
|
||||||
|
ENV/
|
||||||
|
|||||||
361
.gitlab-ci.yml
361
.gitlab-ci.yml
@@ -18,12 +18,8 @@ variables:
|
|||||||
# us-west1-a
|
# us-west1-a
|
||||||
|
|
||||||
before_script:
|
before_script:
|
||||||
- pip install ansible==2.2.1.0
|
- pip install -r tests/requirements.txt
|
||||||
- pip install netaddr
|
|
||||||
- pip install apache-libcloud==0.20.1
|
|
||||||
- pip install boto==2.9.0
|
|
||||||
- mkdir -p /.ssh
|
- mkdir -p /.ssh
|
||||||
- cp tests/ansible.cfg .
|
|
||||||
|
|
||||||
.job: &job
|
.job: &job
|
||||||
tags:
|
tags:
|
||||||
@@ -43,25 +39,20 @@ before_script:
|
|||||||
GCE_USER: travis
|
GCE_USER: travis
|
||||||
SSH_USER: $GCE_USER
|
SSH_USER: $GCE_USER
|
||||||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||||
|
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
||||||
CONTAINER_ENGINE: docker
|
CONTAINER_ENGINE: docker
|
||||||
PRIVATE_KEY: $GCE_PRIVATE_KEY
|
PRIVATE_KEY: $GCE_PRIVATE_KEY
|
||||||
GS_ACCESS_KEY_ID: $GS_KEY
|
GS_ACCESS_KEY_ID: $GS_KEY
|
||||||
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
||||||
CLOUD_MACHINE_TYPE: "g1-small"
|
CLOUD_MACHINE_TYPE: "g1-small"
|
||||||
|
GCE_PREEMPTIBLE: "false"
|
||||||
ANSIBLE_KEEP_REMOTE_FILES: "1"
|
ANSIBLE_KEEP_REMOTE_FILES: "1"
|
||||||
ANSIBLE_CONFIG: ./tests/ansible.cfg
|
ANSIBLE_CONFIG: ./tests/ansible.cfg
|
||||||
BOOTSTRAP_OS: none
|
|
||||||
DOWNLOAD_LOCALHOST: "false"
|
|
||||||
DOWNLOAD_RUN_ONCE: "false"
|
|
||||||
IDEMPOT_CHECK: "false"
|
IDEMPOT_CHECK: "false"
|
||||||
RESET_CHECK: "false"
|
RESET_CHECK: "false"
|
||||||
UPGRADE_TEST: "false"
|
UPGRADE_TEST: "false"
|
||||||
RESOLVCONF_MODE: docker_dns
|
KUBEADM_ENABLED: "false"
|
||||||
LOG_LEVEL: "-vv"
|
LOG_LEVEL: "-vv"
|
||||||
ETCD_DEPLOYMENT: "docker"
|
|
||||||
KUBELET_DEPLOYMENT: "docker"
|
|
||||||
VAULT_DEPLOYMENT: "docker"
|
|
||||||
WEAVE_CPU_LIMIT: "100m"
|
|
||||||
MAGIC: "ci check this"
|
MAGIC: "ci check this"
|
||||||
|
|
||||||
.gce: &gce
|
.gce: &gce
|
||||||
@@ -74,10 +65,7 @@ before_script:
|
|||||||
- $HOME/.cache
|
- $HOME/.cache
|
||||||
before_script:
|
before_script:
|
||||||
- docker info
|
- docker info
|
||||||
- pip install ansible==2.2.1.0
|
- pip install -r tests/requirements.txt
|
||||||
- pip install netaddr
|
|
||||||
- pip install apache-libcloud==0.20.1
|
|
||||||
- pip install boto==2.9.0
|
|
||||||
- mkdir -p /.ssh
|
- mkdir -p /.ssh
|
||||||
- mkdir -p $HOME/.ssh
|
- mkdir -p $HOME/.ssh
|
||||||
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
||||||
@@ -85,84 +73,74 @@ before_script:
|
|||||||
- echo $GCE_CREDENTIALS > $HOME/.ssh/gce.json
|
- echo $GCE_CREDENTIALS > $HOME/.ssh/gce.json
|
||||||
- chmod 400 $HOME/.ssh/id_rsa
|
- chmod 400 $HOME/.ssh/id_rsa
|
||||||
- ansible-playbook --version
|
- ansible-playbook --version
|
||||||
- export PYPATH=$([ $BOOTSTRAP_OS = none ] && echo /usr/bin/python || echo /opt/bin/python)
|
- export PYPATH=$([[ ! "$CI_JOB_NAME" =~ "coreos" ]] && echo /usr/bin/python || echo /opt/bin/python)
|
||||||
|
- echo "CI_JOB_NAME is $CI_JOB_NAME"
|
||||||
|
- echo "PYPATH is $PYPATH"
|
||||||
script:
|
script:
|
||||||
- pwd
|
- pwd
|
||||||
- ls
|
- ls
|
||||||
- echo ${PWD}
|
- echo ${PWD}
|
||||||
|
- echo "${STARTUP_SCRIPT}"
|
||||||
- >
|
- >
|
||||||
ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local
|
ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local
|
||||||
${LOG_LEVEL}
|
${LOG_LEVEL}
|
||||||
-e cloud_image=${CLOUD_IMAGE}
|
|
||||||
-e cloud_region=${CLOUD_REGION}
|
|
||||||
-e gce_credentials_file=${HOME}/.ssh/gce.json
|
-e gce_credentials_file=${HOME}/.ssh/gce.json
|
||||||
-e gce_project_id=${GCE_PROJECT_ID}
|
-e gce_project_id=${GCE_PROJECT_ID}
|
||||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||||
-e cloud_machine_type=${CLOUD_MACHINE_TYPE}
|
|
||||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
|
||||||
-e mode=${CLUSTER_MODE}
|
|
||||||
-e test_id=${TEST_ID}
|
-e test_id=${TEST_ID}
|
||||||
|
-e preemptible=$GCE_PREEMPTIBLE
|
||||||
|
|
||||||
# Check out latest tag if testing upgrade
|
# Check out latest tag if testing upgrade
|
||||||
# Uncomment when gitlab kargo repo has tags
|
# Uncomment when gitlab kargo repo has tags
|
||||||
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
|
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
|
||||||
- test "${UPGRADE_TEST}" != "false" && git checkout 031cf565ec3ccd3ebbe80eeef3454c3780e5c598 && pip install ansible==2.2.0
|
- test "${UPGRADE_TEST}" != "false" && git checkout 72ae7638bcc94c66afa8620dfa4ad9a9249327ea
|
||||||
|
# Checkout the CI vars file so it is available
|
||||||
|
- test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
|
||||||
|
|
||||||
|
|
||||||
# Create cluster
|
# Create cluster
|
||||||
- >
|
- >
|
||||||
ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
|
ansible-playbook
|
||||||
|
-i inventory/inventory.ini
|
||||||
|
-b --become-user=root
|
||||||
|
--private-key=${HOME}/.ssh/id_rsa
|
||||||
|
-u $SSH_USER
|
||||||
${SSH_ARGS}
|
${SSH_ARGS}
|
||||||
${LOG_LEVEL}
|
${LOG_LEVEL}
|
||||||
|
-e @${CI_TEST_VARS}
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
-e ansible_ssh_user=${SSH_USER}
|
-e ansible_ssh_user=${SSH_USER}
|
||||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
|
||||||
-e cert_management=${CERT_MGMT:-script}
|
|
||||||
-e cloud_provider=gce
|
|
||||||
-e deploy_netchecker=true
|
|
||||||
-e download_localhost=${DOWNLOAD_LOCALHOST}
|
|
||||||
-e download_run_once=${DOWNLOAD_RUN_ONCE}
|
|
||||||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
|
||||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
|
||||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
|
||||||
-e local_release_dir=${PWD}/downloads
|
-e local_release_dir=${PWD}/downloads
|
||||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
|
||||||
-e vault_deployment_type=${VAULT_DEPLOYMENT}
|
|
||||||
--limit "all:!fake_hosts"
|
--limit "all:!fake_hosts"
|
||||||
cluster.yml
|
cluster.yml
|
||||||
|
|
||||||
# Repeat deployment if testing upgrade
|
# Repeat deployment if testing upgrade
|
||||||
- >
|
- >
|
||||||
if [ "${UPGRADE_TEST}" != "false" ]; then
|
if [ "${UPGRADE_TEST}" != "false" ]; then
|
||||||
test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml";
|
test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml";
|
||||||
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
|
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
|
||||||
pip install ansible==2.2.1.0;
|
git checkout "${CI_BUILD_REF}";
|
||||||
git checkout "${CI_BUILD_REF}";
|
ansible-playbook
|
||||||
ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
|
-i inventory/inventory.ini
|
||||||
${SSH_ARGS}
|
-b --become-user=root
|
||||||
${LOG_LEVEL}
|
--private-key=${HOME}/.ssh/id_rsa
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
-u $SSH_USER
|
||||||
-e ansible_ssh_user=${SSH_USER}
|
${SSH_ARGS}
|
||||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
${LOG_LEVEL}
|
||||||
-e cloud_provider=gce
|
-e @${CI_TEST_VARS}
|
||||||
-e deploy_netchecker=true
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
-e download_localhost=${DOWNLOAD_LOCALHOST}
|
-e ansible_ssh_user=${SSH_USER}
|
||||||
-e download_run_once=${DOWNLOAD_RUN_ONCE}
|
-e local_release_dir=${PWD}/downloads
|
||||||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
--limit "all:!fake_hosts"
|
||||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
$PLAYBOOK;
|
||||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
|
||||||
-e local_release_dir=${PWD}/downloads
|
|
||||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
|
||||||
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
|
|
||||||
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
|
|
||||||
--limit "all:!fake_hosts"
|
|
||||||
$PLAYBOOK;
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Tests Cases
|
# Tests Cases
|
||||||
## Test Master API
|
## Test Master API
|
||||||
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
- >
|
||||||
|
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
||||||
|
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
|
||||||
|
|
||||||
## Ping the between 2 pod
|
## Ping the between 2 pod
|
||||||
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
|
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
|
||||||
@@ -173,195 +151,157 @@ before_script:
|
|||||||
## Idempotency checks 1/5 (repeat deployment)
|
## Idempotency checks 1/5 (repeat deployment)
|
||||||
- >
|
- >
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||||
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
ansible-playbook
|
||||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
-i inventory/inventory.ini
|
||||||
--private-key=${HOME}/.ssh/id_rsa
|
-b --become-user=root
|
||||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
--private-key=${HOME}/.ssh/id_rsa
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
-u $SSH_USER
|
||||||
-e download_localhost=${DOWNLOAD_LOCALHOST}
|
${SSH_ARGS}
|
||||||
-e download_run_once=${DOWNLOAD_RUN_ONCE}
|
${LOG_LEVEL}
|
||||||
-e deploy_netchecker=true
|
-e @${CI_TEST_VARS}
|
||||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
-e local_release_dir=${PWD}/downloads
|
-e local_release_dir=${PWD}/downloads
|
||||||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
--limit "all:!fake_hosts"
|
||||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
|
||||||
--limit "all:!fake_hosts"
|
|
||||||
cluster.yml;
|
cluster.yml;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
## Idempotency checks 2/5 (Advanced DNS checks)
|
## Idempotency checks 2/5 (Advanced DNS checks)
|
||||||
- >
|
- >
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||||
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
|
ansible-playbook
|
||||||
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
|
-i inventory/inventory.ini
|
||||||
--limit "all:!fake_hosts"
|
-b --become-user=root
|
||||||
|
--private-key=${HOME}/.ssh/id_rsa
|
||||||
|
-u $SSH_USER
|
||||||
|
${SSH_ARGS}
|
||||||
|
${LOG_LEVEL}
|
||||||
|
-e @${CI_TEST_VARS}
|
||||||
|
--limit "all:!fake_hosts"
|
||||||
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
## Idempotency checks 3/5 (reset deployment)
|
## Idempotency checks 3/5 (reset deployment)
|
||||||
- >
|
- >
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
|
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
|
||||||
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
ansible-playbook
|
||||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
-i inventory/inventory.ini
|
||||||
--private-key=${HOME}/.ssh/id_rsa
|
-b --become-user=root
|
||||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
--private-key=${HOME}/.ssh/id_rsa
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
-u $SSH_USER
|
||||||
-e reset_confirmation=yes
|
${SSH_ARGS}
|
||||||
|
${LOG_LEVEL}
|
||||||
|
-e @${CI_TEST_VARS}
|
||||||
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
|
-e reset_confirmation=yes
|
||||||
--limit "all:!fake_hosts"
|
--limit "all:!fake_hosts"
|
||||||
reset.yml;
|
reset.yml;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
## Idempotency checks 4/5 (redeploy after reset)
|
## Idempotency checks 4/5 (redeploy after reset)
|
||||||
- >
|
- >
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
|
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
|
||||||
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
ansible-playbook
|
||||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
-i inventory/inventory.ini
|
||||||
--private-key=${HOME}/.ssh/id_rsa
|
-b --become-user=root
|
||||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
--private-key=${HOME}/.ssh/id_rsa
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
-u $SSH_USER
|
||||||
-e download_localhost=${DOWNLOAD_LOCALHOST}
|
${SSH_ARGS}
|
||||||
-e download_run_once=${DOWNLOAD_RUN_ONCE}
|
${LOG_LEVEL}
|
||||||
-e deploy_netchecker=true
|
-e @${CI_TEST_VARS}
|
||||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
-e local_release_dir=${PWD}/downloads
|
-e local_release_dir=${PWD}/downloads
|
||||||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
--limit "all:!fake_hosts"
|
||||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
|
||||||
--limit "all:!fake_hosts"
|
|
||||||
cluster.yml;
|
cluster.yml;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
## Idempotency checks 5/5 (Advanced DNS checks)
|
## Idempotency checks 5/5 (Advanced DNS checks)
|
||||||
- >
|
- >
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
|
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
|
||||||
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
|
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
|
||||||
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
|
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
|
||||||
--limit "all:!fake_hosts"
|
--limit "all:!fake_hosts"
|
||||||
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
after_script:
|
after_script:
|
||||||
- >
|
- >
|
||||||
ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
|
ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
|
||||||
-e mode=${CLUSTER_MODE}
|
-e @${CI_TEST_VARS}
|
||||||
-e test_id=${TEST_ID}
|
-e test_id=${TEST_ID}
|
||||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
|
||||||
-e gce_project_id=${GCE_PROJECT_ID}
|
-e gce_project_id=${GCE_PROJECT_ID}
|
||||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||||
-e gce_credentials_file=${HOME}/.ssh/gce.json
|
-e gce_credentials_file=${HOME}/.ssh/gce.json
|
||||||
-e cloud_image=${CLOUD_IMAGE}
|
|
||||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||||
-e cloud_region=${CLOUD_REGION}
|
|
||||||
|
|
||||||
# Test matrix. Leave the comments for markup scripts.
|
# Test matrix. Leave the comments for markup scripts.
|
||||||
.coreos_calico_sep_variables: &coreos_calico_sep_variables
|
.coreos_calico_aio_variables: &coreos_calico_aio_variables
|
||||||
# stage: deploy-gce-part1
|
# stage: deploy-gce-part1
|
||||||
KUBE_NETWORK_PLUGIN: calico
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
CLOUD_IMAGE: coreos-stable-1298-6-0-v20170315
|
|
||||||
CLOUD_REGION: us-west1-b
|
|
||||||
CLUSTER_MODE: separate
|
|
||||||
BOOTSTRAP_OS: coreos
|
|
||||||
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
|
|
||||||
|
|
||||||
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
|
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
|
||||||
# stage: deploy-gce-part1
|
# stage: deploy-gce-part1
|
||||||
KUBE_NETWORK_PLUGIN: canal
|
|
||||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
|
||||||
CLOUD_REGION: europe-west1-b
|
|
||||||
CLOUD_MACHINE_TYPE: "n1-standard-2"
|
|
||||||
UPGRADE_TEST: "basic"
|
|
||||||
CLUSTER_MODE: ha
|
|
||||||
UPGRADE_TEST: "graceful"
|
UPGRADE_TEST: "graceful"
|
||||||
|
|
||||||
|
.centos_weave_kubeadm_variables: ¢os_weave_kubeadm_variables
|
||||||
|
# stage: deploy-gce-part1
|
||||||
|
UPGRADE_TEST: "graceful"
|
||||||
|
|
||||||
|
.ubuntu_canal_kubeadm_variables: &ubuntu_canal_kubeadm_variables
|
||||||
|
# stage: deploy-gce-part1
|
||||||
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
.rhel7_weave_variables: &rhel7_weave_variables
|
.rhel7_weave_variables: &rhel7_weave_variables
|
||||||
# stage: deploy-gce-part1
|
# stage: deploy-gce-part1
|
||||||
KUBE_NETWORK_PLUGIN: weave
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
CLOUD_IMAGE: rhel-7
|
|
||||||
CLOUD_REGION: europe-west1-b
|
|
||||||
CLUSTER_MODE: default
|
|
||||||
|
|
||||||
.centos7_flannel_variables: ¢os7_flannel_variables
|
.centos7_flannel_addons_variables: ¢os7_flannel_addons_variables
|
||||||
# stage: deploy-gce-part2
|
# stage: deploy-gce-part2
|
||||||
KUBE_NETWORK_PLUGIN: flannel
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
CLOUD_IMAGE: centos-7
|
|
||||||
CLOUD_REGION: us-west1-a
|
|
||||||
CLUSTER_MODE: default
|
|
||||||
|
|
||||||
.debian8_calico_variables: &debian8_calico_variables
|
.debian8_calico_variables: &debian8_calico_variables
|
||||||
# stage: deploy-gce-part2
|
# stage: deploy-gce-part2
|
||||||
KUBE_NETWORK_PLUGIN: calico
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
CLOUD_IMAGE: debian-8-kubespray
|
|
||||||
CLOUD_REGION: us-central1-b
|
|
||||||
CLUSTER_MODE: default
|
|
||||||
|
|
||||||
.coreos_canal_variables: &coreos_canal_variables
|
.coreos_canal_variables: &coreos_canal_variables
|
||||||
# stage: deploy-gce-part2
|
# stage: deploy-gce-part2
|
||||||
KUBE_NETWORK_PLUGIN: canal
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
CLOUD_IMAGE: coreos-stable-1298-6-0-v20170315
|
|
||||||
CLOUD_REGION: us-east1-b
|
|
||||||
CLUSTER_MODE: default
|
|
||||||
BOOTSTRAP_OS: coreos
|
|
||||||
IDEMPOT_CHECK: "true"
|
|
||||||
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
|
|
||||||
|
|
||||||
.rhel7_canal_sep_variables: &rhel7_canal_sep_variables
|
.rhel7_canal_sep_variables: &rhel7_canal_sep_variables
|
||||||
# stage: deploy-gce-special
|
# stage: deploy-gce-special
|
||||||
KUBE_NETWORK_PLUGIN: canal
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
CLOUD_IMAGE: rhel-7
|
|
||||||
CLOUD_REGION: us-east1-b
|
|
||||||
CLUSTER_MODE: separate
|
|
||||||
|
|
||||||
.ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables
|
.ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables
|
||||||
# stage: deploy-gce-special
|
# stage: deploy-gce-special
|
||||||
KUBE_NETWORK_PLUGIN: weave
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
|
||||||
CLOUD_REGION: us-central1-b
|
|
||||||
CLUSTER_MODE: separate
|
|
||||||
IDEMPOT_CHECK: "false"
|
|
||||||
|
|
||||||
.centos7_calico_ha_variables: ¢os7_calico_ha_variables
|
.centos7_calico_ha_variables: ¢os7_calico_ha_variables
|
||||||
# stage: deploy-gce-special
|
# stage: deploy-gce-special
|
||||||
KUBE_NETWORK_PLUGIN: calico
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
DOWNLOAD_LOCALHOST: "true"
|
|
||||||
DOWNLOAD_RUN_ONCE: "true"
|
|
||||||
CLOUD_IMAGE: centos-7
|
|
||||||
CLOUD_REGION: europe-west1-b
|
|
||||||
CLUSTER_MODE: ha-scale
|
|
||||||
IDEMPOT_CHECK: "true"
|
|
||||||
|
|
||||||
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
|
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
|
||||||
# stage: deploy-gce-special
|
# stage: deploy-gce-special
|
||||||
KUBE_NETWORK_PLUGIN: weave
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
CLOUD_IMAGE: coreos-alpha-1325-0-0-v20170216
|
|
||||||
CLOUD_REGION: us-west1-a
|
|
||||||
CLUSTER_MODE: ha-scale
|
|
||||||
BOOTSTRAP_OS: coreos
|
|
||||||
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
|
|
||||||
|
|
||||||
.ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables
|
.ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables
|
||||||
# stage: deploy-gce-part1
|
# stage: deploy-gce-part1
|
||||||
KUBE_NETWORK_PLUGIN: flannel
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
|
||||||
CLOUD_REGION: us-central1-b
|
|
||||||
CLUSTER_MODE: separate
|
|
||||||
ETCD_DEPLOYMENT: rkt
|
|
||||||
KUBELET_DEPLOYMENT: rkt
|
|
||||||
|
|
||||||
.ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables
|
.ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables
|
||||||
# stage: deploy-gce-part1
|
# stage: deploy-gce-part1
|
||||||
KUBE_NETWORK_PLUGIN: canal
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
CERT_MGMT: vault
|
|
||||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
.ubuntu_flannel_variables: &ubuntu_flannel_variables
|
||||||
CLOUD_REGION: us-central1-b
|
# stage: deploy-gce-special
|
||||||
CLUSTER_MODE: separate
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
|
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
|
||||||
coreos-calico-sep:
|
coreos-calico-aio:
|
||||||
stage: deploy-gce-part1
|
stage: deploy-gce-part1
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
<<: *gce_variables
|
<<: *gce_variables
|
||||||
<<: *coreos_calico_sep_variables
|
<<: *coreos_calico_aio_variables
|
||||||
when: on_success
|
when: on_success
|
||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: [/^pr-.*$/]
|
only: [/^pr-.*$/]
|
||||||
@@ -372,28 +312,28 @@ coreos-calico-sep-triggers:
|
|||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
<<: *gce_variables
|
<<: *gce_variables
|
||||||
<<: *coreos_calico_sep_variables
|
<<: *coreos_calico_aio_variables
|
||||||
when: on_success
|
when: on_success
|
||||||
only: ['triggers']
|
only: ['triggers']
|
||||||
|
|
||||||
centos7-flannel:
|
centos7-flannel-addons:
|
||||||
stage: deploy-gce-part2
|
stage: deploy-gce-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
<<: *gce_variables
|
<<: *gce_variables
|
||||||
<<: *centos7_flannel_variables
|
<<: *centos7_flannel_addons_variables
|
||||||
when: on_success
|
when: on_success
|
||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: [/^pr-.*$/]
|
only: [/^pr-.*$/]
|
||||||
|
|
||||||
centos7-flannel-triggers:
|
centos7-flannel-addons-triggers:
|
||||||
stage: deploy-gce-part1
|
stage: deploy-gce-part1
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
<<: *gce_variables
|
<<: *gce_variables
|
||||||
<<: *centos7_flannel_variables
|
<<: *centos7_flannel_addons_variables
|
||||||
when: on_success
|
when: on_success
|
||||||
only: ['triggers']
|
only: ['triggers']
|
||||||
|
|
||||||
@@ -440,6 +380,48 @@ ubuntu-canal-ha-triggers:
|
|||||||
when: on_success
|
when: on_success
|
||||||
only: ['triggers']
|
only: ['triggers']
|
||||||
|
|
||||||
|
ubuntu-canal-kubeadm:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *ubuntu_canal_kubeadm_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
ubuntu-canal-kubeadm-triggers:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *ubuntu_canal_kubeadm_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
|
||||||
|
centos-weave-kubeadm:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *centos_weave_kubeadm_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
centos-weave-kubeadm-triggers:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *centos_weave_kubeadm_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
|
||||||
rhel7-weave:
|
rhel7-weave:
|
||||||
stage: deploy-gce-part1
|
stage: deploy-gce-part1
|
||||||
<<: *job
|
<<: *job
|
||||||
@@ -579,6 +561,17 @@ ubuntu-vault-sep:
|
|||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: ['master', /^pr-.*$/]
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
ubuntu-flannel-sep:
|
||||||
|
stage: deploy-gce-special
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *ubuntu_flannel_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
# Premoderated with manual actions
|
# Premoderated with manual actions
|
||||||
ci-authorized:
|
ci-authorized:
|
||||||
<<: *job
|
<<: *job
|
||||||
@@ -588,7 +581,7 @@ ci-authorized:
|
|||||||
script:
|
script:
|
||||||
- /bin/sh scripts/premoderator.sh
|
- /bin/sh scripts/premoderator.sh
|
||||||
except: ['triggers', 'master']
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
syntax-check:
|
syntax-check:
|
||||||
<<: *job
|
<<: *job
|
||||||
stage: unit-tests
|
stage: unit-tests
|
||||||
@@ -596,6 +589,14 @@ syntax-check:
|
|||||||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root cluster.yml -vvv --syntax-check
|
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root cluster.yml -vvv --syntax-check
|
||||||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root upgrade-cluster.yml -vvv --syntax-check
|
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root upgrade-cluster.yml -vvv --syntax-check
|
||||||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root reset.yml -vvv --syntax-check
|
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root reset.yml -vvv --syntax-check
|
||||||
|
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root extra_playbooks/upgrade-only-k8s.yml -vvv --syntax-check
|
||||||
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
|
yamllint:
|
||||||
|
<<: *job
|
||||||
|
stage: unit-tests
|
||||||
|
script:
|
||||||
|
- yamllint roles
|
||||||
except: ['triggers', 'master']
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
tox-inventory-builder:
|
tox-inventory-builder:
|
||||||
|
|||||||
161
.travis.yml.bak
161
.travis.yml.bak
@@ -1,161 +0,0 @@
|
|||||||
sudo: required
|
|
||||||
|
|
||||||
services:
|
|
||||||
- docker
|
|
||||||
|
|
||||||
git:
|
|
||||||
depth: 5
|
|
||||||
|
|
||||||
env:
|
|
||||||
global:
|
|
||||||
GCE_USER=travis
|
|
||||||
SSH_USER=$GCE_USER
|
|
||||||
TEST_ID=$TRAVIS_JOB_NUMBER
|
|
||||||
CONTAINER_ENGINE=docker
|
|
||||||
PRIVATE_KEY=$GCE_PRIVATE_KEY
|
|
||||||
GS_ACCESS_KEY_ID=$GS_KEY
|
|
||||||
GS_SECRET_ACCESS_KEY=$GS_SECRET
|
|
||||||
ANSIBLE_KEEP_REMOTE_FILES=1
|
|
||||||
CLUSTER_MODE=default
|
|
||||||
BOOTSTRAP_OS=none
|
|
||||||
matrix:
|
|
||||||
# Debian Jessie
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=canal
|
|
||||||
CLOUD_IMAGE=debian-8-kubespray
|
|
||||||
CLOUD_REGION=asia-east1-a
|
|
||||||
CLUSTER_MODE=ha
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=calico
|
|
||||||
CLOUD_IMAGE=debian-8-kubespray
|
|
||||||
CLOUD_REGION=europe-west1-c
|
|
||||||
CLUSTER_MODE=default
|
|
||||||
|
|
||||||
# Centos 7
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=flannel
|
|
||||||
CLOUD_IMAGE=centos-7
|
|
||||||
CLOUD_REGION=asia-northeast1-c
|
|
||||||
CLUSTER_MODE=default
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=calico
|
|
||||||
CLOUD_IMAGE=centos-7
|
|
||||||
CLOUD_REGION=us-central1-b
|
|
||||||
CLUSTER_MODE=ha
|
|
||||||
|
|
||||||
# Redhat 7
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=weave
|
|
||||||
CLOUD_IMAGE=rhel-7
|
|
||||||
CLOUD_REGION=us-east1-c
|
|
||||||
CLUSTER_MODE=default
|
|
||||||
|
|
||||||
# CoreOS stable
|
|
||||||
#- >-
|
|
||||||
# KUBE_NETWORK_PLUGIN=weave
|
|
||||||
# CLOUD_IMAGE=coreos-stable
|
|
||||||
# CLOUD_REGION=europe-west1-b
|
|
||||||
# CLUSTER_MODE=ha
|
|
||||||
# BOOTSTRAP_OS=coreos
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=canal
|
|
||||||
CLOUD_IMAGE=coreos-stable
|
|
||||||
CLOUD_REGION=us-west1-b
|
|
||||||
CLUSTER_MODE=default
|
|
||||||
BOOTSTRAP_OS=coreos
|
|
||||||
|
|
||||||
# Extra cases for separated roles
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=canal
|
|
||||||
CLOUD_IMAGE=rhel-7
|
|
||||||
CLOUD_REGION=asia-northeast1-b
|
|
||||||
CLUSTER_MODE=separate
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=weave
|
|
||||||
CLOUD_IMAGE=ubuntu-1604-xenial
|
|
||||||
CLOUD_REGION=europe-west1-d
|
|
||||||
CLUSTER_MODE=separate
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=calico
|
|
||||||
CLOUD_IMAGE=coreos-stable
|
|
||||||
CLOUD_REGION=us-central1-f
|
|
||||||
CLUSTER_MODE=separate
|
|
||||||
BOOTSTRAP_OS=coreos
|
|
||||||
|
|
||||||
matrix:
|
|
||||||
allow_failures:
|
|
||||||
- env: KUBE_NETWORK_PLUGIN=weave CLOUD_IMAGE=coreos-stable CLOUD_REGION=europe-west1-b CLUSTER_MODE=ha BOOTSTRAP_OS=coreos
|
|
||||||
|
|
||||||
before_install:
|
|
||||||
# Install Ansible.
|
|
||||||
- pip install --user ansible
|
|
||||||
- pip install --user netaddr
|
|
||||||
# W/A https://github.com/ansible/ansible-modules-core/issues/5196#issuecomment-253766186
|
|
||||||
- pip install --user apache-libcloud==0.20.1
|
|
||||||
- pip install --user boto==2.9.0 -U
|
|
||||||
# Load cached docker images
|
|
||||||
- if [ -d /var/tmp/releases ]; then find /var/tmp/releases -type f -name "*.tar" | xargs -I {} sh -c "zcat {} | docker load"; fi
|
|
||||||
|
|
||||||
cache:
|
|
||||||
- directories:
|
|
||||||
- $HOME/.cache/pip
|
|
||||||
- $HOME/.local
|
|
||||||
- /var/tmp/releases
|
|
||||||
|
|
||||||
before_script:
|
|
||||||
- echo "RUN $TRAVIS_JOB_NUMBER $KUBE_NETWORK_PLUGIN $CONTAINER_ENGINE "
|
|
||||||
- mkdir -p $HOME/.ssh
|
|
||||||
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
|
||||||
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
|
|
||||||
- chmod 400 $HOME/.ssh/id_rsa
|
|
||||||
- chmod 755 $HOME/.local/bin/ansible-playbook
|
|
||||||
- $HOME/.local/bin/ansible-playbook --version
|
|
||||||
- cp tests/ansible.cfg .
|
|
||||||
- export PYPATH=$([ $BOOTSTRAP_OS = none ] && echo /usr/bin/python || echo /opt/bin/python)
|
|
||||||
# - "echo $HOME/.local/bin/ansible-playbook -i inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml"
|
|
||||||
|
|
||||||
script:
|
|
||||||
- >
|
|
||||||
$HOME/.local/bin/ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local $LOG_LEVEL
|
|
||||||
-e mode=${CLUSTER_MODE}
|
|
||||||
-e test_id=${TEST_ID}
|
|
||||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
|
||||||
-e gce_project_id=${GCE_PROJECT_ID}
|
|
||||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
|
||||||
-e gce_pem_file=${HOME}/.ssh/gce
|
|
||||||
-e cloud_image=${CLOUD_IMAGE}
|
|
||||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
|
||||||
-e cloud_region=${CLOUD_REGION}
|
|
||||||
|
|
||||||
# Create cluster with netchecker app deployed
|
|
||||||
- >
|
|
||||||
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
|
||||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
|
||||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
|
||||||
-e download_run_once=true
|
|
||||||
-e download_localhost=true
|
|
||||||
-e local_release_dir=/var/tmp/releases
|
|
||||||
-e deploy_netchecker=true
|
|
||||||
cluster.yml
|
|
||||||
|
|
||||||
# Tests Cases
|
|
||||||
## Test Master API
|
|
||||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
|
||||||
## Ping the between 2 pod
|
|
||||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
|
|
||||||
## Advanced DNS checks
|
|
||||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/040_check-network-adv.yml $LOG_LEVEL
|
|
||||||
|
|
||||||
after_script:
|
|
||||||
- >
|
|
||||||
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
|
|
||||||
-e mode=${CLUSTER_MODE}
|
|
||||||
-e test_id=${TEST_ID}
|
|
||||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
|
||||||
-e gce_project_id=${GCE_PROJECT_ID}
|
|
||||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
|
||||||
-e gce_pem_file=${HOME}/.ssh/gce
|
|
||||||
-e cloud_image=${CLOUD_IMAGE}
|
|
||||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
|
||||||
-e cloud_region=${CLOUD_REGION}
|
|
||||||
16
.yamllint
Normal file
16
.yamllint
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
extends: default
|
||||||
|
|
||||||
|
rules:
|
||||||
|
braces:
|
||||||
|
min-spaces-inside: 0
|
||||||
|
max-spaces-inside: 1
|
||||||
|
brackets:
|
||||||
|
min-spaces-inside: 0
|
||||||
|
max-spaces-inside: 1
|
||||||
|
indentation:
|
||||||
|
spaces: 2
|
||||||
|
indent-sequences: consistent
|
||||||
|
line-length: disable
|
||||||
|
new-line-at-end-of-file: disable
|
||||||
|
truthy: disable
|
||||||
55
README.md
55
README.md
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
## Deploy a production ready kubernetes cluster
|
## Deploy a production ready kubernetes cluster
|
||||||
|
|
||||||
If you have questions, join us on the [kubernetes slack](https://slack.k8s.io), channel **#kargo**.
|
If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **#kubespray**.
|
||||||
|
|
||||||
- Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal**
|
- Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal**
|
||||||
- **High available** cluster
|
- **High available** cluster
|
||||||
@@ -13,26 +13,29 @@ If you have questions, join us on the [kubernetes slack](https://slack.k8s.io),
|
|||||||
|
|
||||||
To deploy the cluster you can use :
|
To deploy the cluster you can use :
|
||||||
|
|
||||||
[**kargo-cli**](https://github.com/kubespray/kargo-cli) <br>
|
[**kubespray-cli**](https://github.com/kubespray/kubespray-cli) <br>
|
||||||
**Ansible** usual commands and [**inventory builder**](https://github.com/kubernetes-incubator/kargo/blob/master/contrib/inventory_builder/inventory.py) <br>
|
**Ansible** usual commands and [**inventory builder**](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py) <br>
|
||||||
**vagrant** by simply running `vagrant up` (for tests purposes) <br>
|
**vagrant** by simply running `vagrant up` (for tests purposes) <br>
|
||||||
|
|
||||||
|
|
||||||
* [Requirements](#requirements)
|
* [Requirements](#requirements)
|
||||||
* [Kargo vs ...](docs/comparisons.md)
|
* [Kubespray vs ...](docs/comparisons.md)
|
||||||
* [Getting started](docs/getting-started.md)
|
* [Getting started](docs/getting-started.md)
|
||||||
* [Ansible inventory and tags](docs/ansible.md)
|
* [Ansible inventory and tags](docs/ansible.md)
|
||||||
|
* [Integration with existing ansible repo](docs/integration.md)
|
||||||
* [Deployment data variables](docs/vars.md)
|
* [Deployment data variables](docs/vars.md)
|
||||||
* [DNS stack](docs/dns-stack.md)
|
* [DNS stack](docs/dns-stack.md)
|
||||||
* [HA mode](docs/ha-mode.md)
|
* [HA mode](docs/ha-mode.md)
|
||||||
* [Network plugins](#network-plugins)
|
* [Network plugins](#network-plugins)
|
||||||
* [Vagrant install](docs/vagrant.md)
|
* [Vagrant install](docs/vagrant.md)
|
||||||
* [CoreOS bootstrap](docs/coreos.md)
|
* [CoreOS bootstrap](docs/coreos.md)
|
||||||
|
* [Debian Jessie setup](docs/debian.md)
|
||||||
* [Downloaded artifacts](docs/downloads.md)
|
* [Downloaded artifacts](docs/downloads.md)
|
||||||
* [Cloud providers](docs/cloud.md)
|
* [Cloud providers](docs/cloud.md)
|
||||||
* [OpenStack](docs/openstack.md)
|
* [OpenStack](docs/openstack.md)
|
||||||
* [AWS](docs/aws.md)
|
* [AWS](docs/aws.md)
|
||||||
* [Azure](docs/azure.md)
|
* [Azure](docs/azure.md)
|
||||||
|
* [vSphere](docs/vsphere.md)
|
||||||
* [Large deployments](docs/large-deployments.md)
|
* [Large deployments](docs/large-deployments.md)
|
||||||
* [Upgrades basics](docs/upgrades.md)
|
* [Upgrades basics](docs/upgrades.md)
|
||||||
* [Roadmap](docs/roadmap.md)
|
* [Roadmap](docs/roadmap.md)
|
||||||
@@ -50,16 +53,19 @@ Note: Upstart/SysV init based OS types are not supported.
|
|||||||
Versions of supported components
|
Versions of supported components
|
||||||
--------------------------------
|
--------------------------------
|
||||||
|
|
||||||
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.5.1 <br>
|
|
||||||
[etcd](https://github.com/coreos/etcd/releases) v3.0.6 <br>
|
|
||||||
[flanneld](https://github.com/coreos/flannel/releases) v0.6.2 <br>
|
|
||||||
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.23.0 <br>
|
|
||||||
[canal](https://github.com/projectcalico/canal) (given calico/flannel versions) <br>
|
|
||||||
[weave](http://weave.works/) v1.8.2 <br>
|
|
||||||
[docker](https://www.docker.com/) v1.12.5 <br>
|
|
||||||
[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 <br>
|
|
||||||
|
|
||||||
Note: rkt support as docker alternative is limited to control plane (etcd and
|
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.8.1 <br>
|
||||||
|
[etcd](https://github.com/coreos/etcd/releases) v3.2.4 <br>
|
||||||
|
[flanneld](https://github.com/coreos/flannel/releases) v0.8.0 <br>
|
||||||
|
[calico](https://docs.projectcalico.org/v2.5/releases/) v2.5.0 <br>
|
||||||
|
[canal](https://github.com/projectcalico/canal) (given calico/flannel versions) <br>
|
||||||
|
[weave](http://weave.works/) v2.0.1 <br>
|
||||||
|
[docker](https://www.docker.com/) v1.13 (see note)<br>
|
||||||
|
[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)<br>
|
||||||
|
|
||||||
|
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||||
|
|
||||||
|
Note 2: rkt support as docker alternative is limited to control plane (etcd and
|
||||||
kubelet). Docker is still used for Kubernetes cluster workloads and network
|
kubelet). Docker is still used for Kubernetes cluster workloads and network
|
||||||
plugins' related OS services. Also note, only one of the supported network
|
plugins' related OS services. Also note, only one of the supported network
|
||||||
plugins can be deployed for a given single cluster.
|
plugins can be deployed for a given single cluster.
|
||||||
@@ -67,9 +73,9 @@ plugins can be deployed for a given single cluster.
|
|||||||
Requirements
|
Requirements
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
* **Ansible v2.2 (or newer) and python-netaddr is installed on the machine
|
* **Ansible v2.4 (or newer) and python-netaddr is installed on the machine
|
||||||
that will run Ansible commands**
|
that will run Ansible commands**
|
||||||
* **Jinja 2.8 (or newer) is required to run the Ansible Playbooks**
|
* **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
|
||||||
* The target servers must have **access to the Internet** in order to pull docker images.
|
* The target servers must have **access to the Internet** in order to pull docker images.
|
||||||
* The target servers are configured to allow **IPv4 forwarding**.
|
* The target servers are configured to allow **IPv4 forwarding**.
|
||||||
* **Your ssh key must be copied** to all the servers part of your inventory.
|
* **Your ssh key must be copied** to all the servers part of your inventory.
|
||||||
@@ -78,7 +84,8 @@ in order to avoid any issue during deployment you should disable your firewall.
|
|||||||
|
|
||||||
|
|
||||||
## Network plugins
|
## Network plugins
|
||||||
You can choose between 4 network plugins. (default: `calico`)
|
|
||||||
|
You can choose between 4 network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
||||||
|
|
||||||
* [**flannel**](docs/flannel.md): gre/vxlan (layer 2) networking.
|
* [**flannel**](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||||
|
|
||||||
@@ -86,7 +93,7 @@ You can choose between 4 network plugins. (default: `calico`)
|
|||||||
|
|
||||||
* [**canal**](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
|
* [**canal**](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
|
||||||
|
|
||||||
* **weave**: Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. <br>
|
* [**weave**](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. <br>
|
||||||
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
|
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
|
||||||
|
|
||||||
The choice is defined with the variable `kube_network_plugin`. There is also an
|
The choice is defined with the variable `kube_network_plugin`. There is also an
|
||||||
@@ -94,22 +101,22 @@ option to leverage built-in cloud provider networking instead.
|
|||||||
See also [Network checker](docs/netcheck.md).
|
See also [Network checker](docs/netcheck.md).
|
||||||
|
|
||||||
## Community docs and resources
|
## Community docs and resources
|
||||||
- [kubernetes.io/docs/getting-started-guides/kargo/](https://kubernetes.io/docs/getting-started-guides/kargo/)
|
- [kubernetes.io/docs/getting-started-guides/kubespray/](https://kubernetes.io/docs/getting-started-guides/kubespray/)
|
||||||
- [kargo, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
|
- [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
|
||||||
- [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty
|
- [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty
|
||||||
- [Deploy a Kubernets Cluster with Kargo (video)](https://www.youtube.com/watch?v=N9q51JgbWu8)
|
- [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=N9q51JgbWu8)
|
||||||
|
|
||||||
## Tools and projects on top of Kargo
|
## Tools and projects on top of Kubespray
|
||||||
- [Digital Rebar](https://github.com/digitalrebar/digitalrebar)
|
- [Digital Rebar](https://github.com/digitalrebar/digitalrebar)
|
||||||
- [Kargo-cli](https://github.com/kubespray/kargo-cli)
|
- [Kubespray-cli](https://github.com/kubespray/kubespray-cli)
|
||||||
- [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer)
|
- [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer)
|
||||||
- [Terraform Contrib](https://github.com/kubernetes-incubator/kargo/tree/master/contrib/terraform)
|
- [Terraform Contrib](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/terraform)
|
||||||
|
|
||||||
## CI Tests
|
## CI Tests
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
[](https://gitlab.com/kargo-ci/kubernetes-incubator__kargo/pipelines) </br>
|
[](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines) </br>
|
||||||
|
|
||||||
CI/end-to-end tests sponsored by Google (GCE), DigitalOcean, [teuto.net](https://teuto.net/) (openstack).
|
CI/end-to-end tests sponsored by Google (GCE), DigitalOcean, [teuto.net](https://teuto.net/) (openstack).
|
||||||
See the [test matrix](docs/test_cases.md) for details.
|
See the [test matrix](docs/test_cases.md) for details.
|
||||||
|
|||||||
20
RELEASE.md
20
RELEASE.md
@@ -1,16 +1,16 @@
|
|||||||
# Release Process
|
# Release Process
|
||||||
|
|
||||||
The Kargo Project is released on an as-needed basis. The process is as follows:
|
The Kubespray Project is released on an as-needed basis. The process is as follows:
|
||||||
|
|
||||||
1. An issue is proposing a new release with a changelog since the last release
|
1. An issue is proposing a new release with a changelog since the last release
|
||||||
2. At least on of the [OWNERS](OWNERS) must LGTM this release
|
2. At least one of the [OWNERS](OWNERS) must LGTM this release
|
||||||
3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
|
3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
|
||||||
4. The release issue is closed
|
4. The release issue is closed
|
||||||
5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kargo $VERSION is released`
|
5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||||
|
|
||||||
## Major/minor releases, merge freezes and milestones
|
## Major/minor releases, merge freezes and milestones
|
||||||
|
|
||||||
* Kargo does not maintain stable branches for releases. Releases are tags, not
|
* Kubespray does not maintain stable branches for releases. Releases are tags, not
|
||||||
branches, and there are no backports. Therefore, there is no need for merge
|
branches, and there are no backports. Therefore, there is no need for merge
|
||||||
freezes as well.
|
freezes as well.
|
||||||
|
|
||||||
@@ -20,21 +20,21 @@ The Kargo Project is released on an as-needed basis. The process is as follows:
|
|||||||
support lifetime, which ends once the milestone closed. Then only a next major
|
support lifetime, which ends once the milestone closed. Then only a next major
|
||||||
or minor release can be done.
|
or minor release can be done.
|
||||||
|
|
||||||
* Kargo major and minor releases are bound to the given ``kube_version`` major/minor
|
* Kubespray major and minor releases are bound to the given ``kube_version`` major/minor
|
||||||
version numbers and other components' arbitrary versions, like etcd or network plugins.
|
version numbers and other components' arbitrary versions, like etcd or network plugins.
|
||||||
Older or newer versions are not supported and not tested for the given release.
|
Older or newer versions are not supported and not tested for the given release.
|
||||||
|
|
||||||
* There is no unstable releases and no APIs, thus Kargo doesn't follow
|
* There is no unstable releases and no APIs, thus Kubespray doesn't follow
|
||||||
[semver](http://semver.org/). Every version describes only a stable release.
|
[semver](http://semver.org/). Every version describes only a stable release.
|
||||||
Breaking changes, if any introduced by changed defaults or non-contrib ansible roles'
|
Breaking changes, if any introduced by changed defaults or non-contrib ansible roles'
|
||||||
playbooks, shall be described in the release notes. Other breaking changes, if any in
|
playbooks, shall be described in the release notes. Other breaking changes, if any in
|
||||||
the contributed addons or bound versions of Kubernetes and other components, are
|
the contributed addons or bound versions of Kubernetes and other components, are
|
||||||
considered out of Kargo scope and are up to the components' teams to deal with and
|
considered out of Kubespray scope and are up to the components' teams to deal with and
|
||||||
document.
|
document.
|
||||||
|
|
||||||
* Minor releases can change components' versions, but not the major ``kube_version``.
|
* Minor releases can change components' versions, but not the major ``kube_version``.
|
||||||
Greater ``kube_version`` requires a new major or minor release. For example, if Kargo v2.0.0
|
Greater ``kube_version`` requires a new major or minor release. For example, if Kubespray v2.0.0
|
||||||
is bound to ``kube_version: 1.4.x``, ``calico_version: 0.22.0``, ``etcd_version: v3.0.6``,
|
is bound to ``kube_version: 1.4.x``, ``calico_version: 0.22.0``, ``etcd_version: v3.0.6``,
|
||||||
then Kargo v2.1.0 may be bound to only minor changes to ``kube_version``, like v1.5.1
|
then Kubespray v2.1.0 may be bound to only minor changes to ``kube_version``, like v1.5.1
|
||||||
and *any* changes to other components, like etcd v4, or calico 1.2.3.
|
and *any* changes to other components, like etcd v4, or calico 1.2.3.
|
||||||
And Kargo v3.x.x shall be bound to ``kube_version: 2.x.x`` respectively.
|
And Kubespray v3.x.x shall be bound to ``kube_version: 2.x.x`` respectively.
|
||||||
|
|||||||
45
Vagrantfile
vendored
45
Vagrantfile
vendored
@@ -3,23 +3,34 @@
|
|||||||
|
|
||||||
require 'fileutils'
|
require 'fileutils'
|
||||||
|
|
||||||
Vagrant.require_version ">= 1.8.0"
|
Vagrant.require_version ">= 1.9.0"
|
||||||
|
|
||||||
CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb")
|
CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb")
|
||||||
|
|
||||||
|
COREOS_URL_TEMPLATE = "https://storage.googleapis.com/%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json"
|
||||||
|
|
||||||
|
SUPPORTED_OS = {
|
||||||
|
"coreos-stable" => {box: "coreos-stable", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
|
||||||
|
"coreos-alpha" => {box: "coreos-alpha", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
|
||||||
|
"coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
|
||||||
|
"ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "vagrant"},
|
||||||
|
"centos" => {box: "bento/centos-7.3", bootstrap_os: "centos", user: "vagrant"},
|
||||||
|
}
|
||||||
|
|
||||||
# Defaults for config options defined in CONFIG
|
# Defaults for config options defined in CONFIG
|
||||||
$num_instances = 3
|
$num_instances = 3
|
||||||
$instance_name_prefix = "k8s"
|
$instance_name_prefix = "k8s"
|
||||||
$vm_gui = false
|
$vm_gui = false
|
||||||
$vm_memory = 1536
|
$vm_memory = 2048
|
||||||
$vm_cpus = 1
|
$vm_cpus = 1
|
||||||
$shared_folders = {}
|
$shared_folders = {}
|
||||||
$forwarded_ports = {}
|
$forwarded_ports = {}
|
||||||
$subnet = "172.17.8"
|
$subnet = "172.17.8"
|
||||||
$box = "bento/ubuntu-16.04"
|
$os = "ubuntu"
|
||||||
|
$network_plugin = "flannel"
|
||||||
# The first three nodes are etcd servers
|
# The first three nodes are etcd servers
|
||||||
$etcd_instances = $num_instances
|
$etcd_instances = $num_instances
|
||||||
# The first two nodes are masters
|
# The first two nodes are kube masters
|
||||||
$kube_master_instances = $num_instances == 1 ? $num_instances : ($num_instances - 1)
|
$kube_master_instances = $num_instances == 1 ? $num_instances : ($num_instances - 1)
|
||||||
# All nodes are kube nodes
|
# All nodes are kube nodes
|
||||||
$kube_node_instances = $num_instances
|
$kube_node_instances = $num_instances
|
||||||
@@ -31,6 +42,7 @@ if File.exist?(CONFIG)
|
|||||||
require CONFIG
|
require CONFIG
|
||||||
end
|
end
|
||||||
|
|
||||||
|
$box = SUPPORTED_OS[$os][:box]
|
||||||
# if $inventory is not set, try to use example
|
# if $inventory is not set, try to use example
|
||||||
$inventory = File.join(File.dirname(__FILE__), "inventory") if ! $inventory
|
$inventory = File.join(File.dirname(__FILE__), "inventory") if ! $inventory
|
||||||
|
|
||||||
@@ -56,7 +68,10 @@ Vagrant.configure("2") do |config|
|
|||||||
# always use Vagrants insecure key
|
# always use Vagrants insecure key
|
||||||
config.ssh.insert_key = false
|
config.ssh.insert_key = false
|
||||||
config.vm.box = $box
|
config.vm.box = $box
|
||||||
|
if SUPPORTED_OS[$os].has_key? :box_url
|
||||||
|
config.vm.box_url = SUPPORTED_OS[$os][:box_url]
|
||||||
|
end
|
||||||
|
config.ssh.username = SUPPORTED_OS[$os][:user]
|
||||||
# plugin conflict
|
# plugin conflict
|
||||||
if Vagrant.has_plugin?("vagrant-vbguest") then
|
if Vagrant.has_plugin?("vagrant-vbguest") then
|
||||||
config.vbguest.auto_update = false
|
config.vbguest.auto_update = false
|
||||||
@@ -87,6 +102,10 @@ Vagrant.configure("2") do |config|
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
$shared_folders.each do |src, dst|
|
||||||
|
config.vm.synced_folder src, dst
|
||||||
|
end
|
||||||
|
|
||||||
config.vm.provider :virtualbox do |vb|
|
config.vm.provider :virtualbox do |vb|
|
||||||
vb.gui = $vm_gui
|
vb.gui = $vm_gui
|
||||||
vb.memory = $vm_memory
|
vb.memory = $vm_memory
|
||||||
@@ -96,15 +115,19 @@ Vagrant.configure("2") do |config|
|
|||||||
ip = "#{$subnet}.#{i+100}"
|
ip = "#{$subnet}.#{i+100}"
|
||||||
host_vars[vm_name] = {
|
host_vars[vm_name] = {
|
||||||
"ip": ip,
|
"ip": ip,
|
||||||
"flannel_interface": ip,
|
"bootstrap_os": SUPPORTED_OS[$os][:bootstrap_os],
|
||||||
"flannel_backend_type": "host-gw",
|
|
||||||
"local_release_dir" => $local_release_dir,
|
"local_release_dir" => $local_release_dir,
|
||||||
"download_run_once": "False",
|
"download_run_once": "False",
|
||||||
# Override the default 'calico' with flannel.
|
"kube_network_plugin": $network_plugin
|
||||||
# inventory/group_vars/k8s-cluster.yml
|
|
||||||
"kube_network_plugin": "flannel",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
config.vm.network :private_network, ip: ip
|
config.vm.network :private_network, ip: ip
|
||||||
|
|
||||||
|
# workaround for Vagrant 1.9.1 and centos vm
|
||||||
|
# https://github.com/hashicorp/vagrant/issues/8096
|
||||||
|
if Vagrant::VERSION == "1.9.1" && $os == "centos"
|
||||||
|
config.vm.provision "shell", inline: "service network restart", run: "always"
|
||||||
|
end
|
||||||
|
|
||||||
# Only execute once the Ansible provisioner,
|
# Only execute once the Ansible provisioner,
|
||||||
# when all the machines are up and ready.
|
# when all the machines are up and ready.
|
||||||
@@ -117,7 +140,7 @@ Vagrant.configure("2") do |config|
|
|||||||
ansible.sudo = true
|
ansible.sudo = true
|
||||||
ansible.limit = "all"
|
ansible.limit = "all"
|
||||||
ansible.host_key_checking = false
|
ansible.host_key_checking = false
|
||||||
ansible.raw_arguments = ["--forks=#{$num_instances}"]
|
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache"]
|
||||||
ansible.host_vars = host_vars
|
ansible.host_vars = host_vars
|
||||||
#ansible.tags = ['download']
|
#ansible.tags = ['download']
|
||||||
ansible.groups = {
|
ansible.groups = {
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
[ssh_connection]
|
[ssh_connection]
|
||||||
pipelining=True
|
pipelining=True
|
||||||
#ssh_args = -F ./ssh-bastion.conf -o ControlMaster=auto -o ControlPersist=30m
|
ansible_ssh_common_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100
|
||||||
|
#ansible_ssh_common_args = -F {{ inventory_dir|quote }}/ssh-bastion.conf -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100
|
||||||
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
||||||
[defaults]
|
[defaults]
|
||||||
host_key_checking=False
|
host_key_checking=False
|
||||||
@@ -10,3 +11,4 @@ fact_caching_connection = /tmp
|
|||||||
stdout_callback = skippy
|
stdout_callback = skippy
|
||||||
library = ./library
|
library = ./library
|
||||||
callback_whitelist = profile_tasks
|
callback_whitelist = profile_tasks
|
||||||
|
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles
|
||||||
|
|||||||
42
cluster.yml
42
cluster.yml
@@ -2,7 +2,7 @@
|
|||||||
- hosts: localhost
|
- hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- { role: kargo-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||||
|
|
||||||
- hosts: k8s-cluster:etcd:calico-rr
|
- hosts: k8s-cluster:etcd:calico-rr
|
||||||
@@ -13,7 +13,7 @@
|
|||||||
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
|
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
|
||||||
ansible_ssh_pipelining: false
|
ansible_ssh_pipelining: false
|
||||||
roles:
|
roles:
|
||||||
- { role: kargo-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: bootstrap-os, tags: bootstrap-os}
|
- { role: bootstrap-os, tags: bootstrap-os}
|
||||||
|
|
||||||
- hosts: k8s-cluster:etcd:calico-rr
|
- hosts: k8s-cluster:etcd:calico-rr
|
||||||
@@ -25,68 +25,82 @@
|
|||||||
- hosts: k8s-cluster:etcd:calico-rr
|
- hosts: k8s-cluster:etcd:calico-rr
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kargo-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade }
|
|
||||||
- { role: kubernetes/preinstall, tags: preinstall }
|
- { role: kubernetes/preinstall, tags: preinstall }
|
||||||
- { role: docker, tags: docker }
|
- { role: docker, tags: docker }
|
||||||
- role: rkt
|
- role: rkt
|
||||||
tags: rkt
|
tags: rkt
|
||||||
when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"
|
when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"
|
||||||
|
- { role: download, tags: download, skip_downloads: false }
|
||||||
|
|
||||||
- hosts: etcd:k8s-cluster:vault
|
- hosts: etcd:k8s-cluster:vault
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kargo-defaults, when: "cert_management == 'vault'" }
|
- { role: kubespray-defaults, when: "cert_management == 'vault'" }
|
||||||
- { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" }
|
- { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" }
|
||||||
|
|
||||||
- hosts: etcd
|
- hosts: etcd
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kargo-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: etcd, tags: etcd, etcd_cluster_setup: true }
|
- { role: etcd, tags: etcd, etcd_cluster_setup: true }
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s-cluster
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kargo-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: etcd, tags: etcd, etcd_cluster_setup: false }
|
- { role: etcd, tags: etcd, etcd_cluster_setup: false }
|
||||||
|
|
||||||
- hosts: etcd:k8s-cluster:vault
|
- hosts: etcd:k8s-cluster:vault
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kargo-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: vault, tags: vault, when: "cert_management == 'vault'"}
|
- { role: vault, tags: vault, when: "cert_management == 'vault'"}
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s-cluster
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kargo-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes/node, tags: node }
|
- { role: kubernetes/node, tags: node }
|
||||||
|
|
||||||
|
- hosts: kube-master
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
|
- { role: kubernetes/master, tags: master }
|
||||||
|
- { role: kubernetes/client, tags: client }
|
||||||
|
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
||||||
|
|
||||||
|
- hosts: k8s-cluster
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
|
- { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" }
|
||||||
- { role: network_plugin, tags: network }
|
- { role: network_plugin, tags: network }
|
||||||
|
|
||||||
- hosts: kube-master
|
- hosts: kube-master
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kargo-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes/master, tags: master }
|
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
|
||||||
- { role: kubernetes-apps/network_plugin, tags: network }
|
- { role: kubernetes-apps/network_plugin, tags: network }
|
||||||
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
||||||
|
|
||||||
- hosts: calico-rr
|
- hosts: calico-rr
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kargo-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: network_plugin/calico/rr, tags: network }
|
- { role: network_plugin/calico/rr, tags: network }
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s-cluster
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kargo-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq }
|
- { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq }
|
||||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf }
|
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf }
|
||||||
|
|
||||||
- hosts: kube-master[0]
|
- hosts: kube-master[0]
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kargo-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes-apps, tags: apps }
|
- { role: kubernetes-apps, tags: apps }
|
||||||
|
|||||||
@@ -32,8 +32,7 @@ Conduct may be permanently removed from the project team.
|
|||||||
This code of conduct applies both within project spaces and in public spaces
|
This code of conduct applies both within project spaces and in public spaces
|
||||||
when an individual is representing the project or its community.
|
when an individual is representing the project or its community.
|
||||||
|
|
||||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting a Kubernetes maintainer, Sarah Novotny <sarahnovotny@google.com>, and/or Dan Kohn <dan@linuxfoundation.org>.
|
||||||
opening an issue or contacting one or more of the project maintainers.
|
|
||||||
|
|
||||||
This Code of Conduct is adapted from the Contributor Covenant
|
This Code of Conduct is adapted from the Contributor Covenant
|
||||||
(http://contributor-covenant.org), version 1.2.0, available at
|
(http://contributor-covenant.org), version 1.2.0, available at
|
||||||
@@ -53,7 +52,7 @@ The Kubernetes team does not condone any statements by speakers contrary to thes
|
|||||||
team reserves the right to deny entrance and/or eject from an event (without refund) any individual found to
|
team reserves the right to deny entrance and/or eject from an event (without refund) any individual found to
|
||||||
be engaging in discriminatory or offensive speech or actions.
|
be engaging in discriminatory or offensive speech or actions.
|
||||||
|
|
||||||
Please bring any concerns to to the immediate attention of Kubernetes event staff
|
Please bring any concerns to the immediate attention of Kubernetes event staff.
|
||||||
|
|
||||||
|
|
||||||
[]()
|
[]()
|
||||||
|
|||||||
61
contrib/aws_inventory/kubespray-aws-inventory.py
Executable file
61
contrib/aws_inventory/kubespray-aws-inventory.py
Executable file
@@ -0,0 +1,61 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
import boto3
|
||||||
|
import os
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
|
||||||
|
class SearchEC2Tags(object):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.parse_args()
|
||||||
|
if self.args.list:
|
||||||
|
self.search_tags()
|
||||||
|
if self.args.host:
|
||||||
|
data = {}
|
||||||
|
print json.dumps(data, indent=2)
|
||||||
|
|
||||||
|
def parse_args(self):
|
||||||
|
|
||||||
|
##Check if VPC_VISIBILITY is set, if not default to private
|
||||||
|
if "VPC_VISIBILITY" in os.environ:
|
||||||
|
self.vpc_visibility = os.environ['VPC_VISIBILITY']
|
||||||
|
else:
|
||||||
|
self.vpc_visibility = "private"
|
||||||
|
|
||||||
|
##Support --list and --host flags. We largely ignore the host one.
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument('--list', action='store_true', default=False, help='List instances')
|
||||||
|
parser.add_argument('--host', action='store_true', help='Get all the variables about a specific instance')
|
||||||
|
self.args = parser.parse_args()
|
||||||
|
|
||||||
|
def search_tags(self):
|
||||||
|
hosts = {}
|
||||||
|
hosts['_meta'] = { 'hostvars': {} }
|
||||||
|
|
||||||
|
##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
|
||||||
|
for group in ["kube-master", "kube-node", "etcd"]:
|
||||||
|
hosts[group] = []
|
||||||
|
tag_key = "kubespray-role"
|
||||||
|
tag_value = ["*"+group+"*"]
|
||||||
|
region = os.environ['REGION']
|
||||||
|
|
||||||
|
ec2 = boto3.resource('ec2', region)
|
||||||
|
|
||||||
|
instances = ec2.instances.filter(Filters=[{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}])
|
||||||
|
for instance in instances:
|
||||||
|
if self.vpc_visibility == "public":
|
||||||
|
hosts[group].append(instance.public_dns_name)
|
||||||
|
hosts['_meta']['hostvars'][instance.public_dns_name] = {
|
||||||
|
'ansible_ssh_host': instance.public_ip_address
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
hosts[group].append(instance.private_dns_name)
|
||||||
|
hosts['_meta']['hostvars'][instance.private_dns_name] = {
|
||||||
|
'ansible_ssh_host': instance.private_ip_address
|
||||||
|
}
|
||||||
|
|
||||||
|
hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
|
||||||
|
print json.dumps(hosts, sort_keys=True, indent=2)
|
||||||
|
|
||||||
|
SearchEC2Tags()
|
||||||
@@ -5,7 +5,7 @@ Provision the base infrastructure for a Kubernetes cluster by using [Azure Resou
|
|||||||
## Status
|
## Status
|
||||||
|
|
||||||
This will provision the base infrastructure (vnet, vms, nics, ips, ...) needed for Kubernetes in Azure into the specified
|
This will provision the base infrastructure (vnet, vms, nics, ips, ...) needed for Kubernetes in Azure into the specified
|
||||||
Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kargo of course).
|
Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kubespray of course).
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
@@ -47,7 +47,7 @@ $ ./clear-rg.sh <resource_group_name>
|
|||||||
**WARNING** this really deletes everything from your resource group, including everything that was later created by you!
|
**WARNING** this really deletes everything from your resource group, including everything that was later created by you!
|
||||||
|
|
||||||
|
|
||||||
## Generating an inventory for kargo
|
## Generating an inventory for kubespray
|
||||||
|
|
||||||
After you have applied the templates, you can generate an inventory with this call:
|
After you have applied the templates, you can generate an inventory with this call:
|
||||||
|
|
||||||
@@ -55,10 +55,10 @@ After you have applied the templates, you can generate an inventory with this ca
|
|||||||
$ ./generate-inventory.sh <resource_group_name>
|
$ ./generate-inventory.sh <resource_group_name>
|
||||||
```
|
```
|
||||||
|
|
||||||
It will create the file ./inventory which can then be used with kargo, e.g.:
|
It will create the file ./inventory which can then be used with kubespray, e.g.:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ cd kargo-root-dir
|
$ cd kubespray-root-dir
|
||||||
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/group_vars/all.yml" cluster.yml
|
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/group_vars/all.yml" cluster.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -9,11 +9,18 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ansible-playbook generate-templates.yml
|
if az &>/dev/null; then
|
||||||
|
echo "azure cli 2.0 found, using it instead of 1.0"
|
||||||
azure group deployment create -f ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
./apply-rg_2.sh "$AZURE_RESOURCE_GROUP"
|
||||||
azure group deployment create -f ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
|
elif azure &>/dev/null; then
|
||||||
azure group deployment create -f ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
|
ansible-playbook generate-templates.yml
|
||||||
azure group deployment create -f ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
|
|
||||||
azure group deployment create -f ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
|
azure group deployment create -f ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
||||||
azure group deployment create -f ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
|
azure group deployment create -f ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
azure group deployment create -f ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
azure group deployment create -f ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
azure group deployment create -f ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
azure group deployment create -f ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
else
|
||||||
|
echo "Azure cli not found"
|
||||||
|
fi
|
||||||
|
|||||||
19
contrib/azurerm/apply-rg_2.sh
Executable file
19
contrib/azurerm/apply-rg_2.sh
Executable file
@@ -0,0 +1,19 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
AZURE_RESOURCE_GROUP="$1"
|
||||||
|
|
||||||
|
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||||
|
echo "AZURE_RESOURCE_GROUP is missing"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
ansible-playbook generate-templates.yml
|
||||||
|
|
||||||
|
az group deployment create --template-file ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
az group deployment create --template-file ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
az group deployment create --template-file ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
az group deployment create --template-file ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
az group deployment create --template-file ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
az group deployment create --template-file ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
|
||||||
@@ -9,6 +9,10 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ansible-playbook generate-templates.yml
|
if az &>/dev/null; then
|
||||||
|
echo "azure cli 2.0 found, using it instead of 1.0"
|
||||||
azure group deployment create -g "$AZURE_RESOURCE_GROUP" -f ./.generated/clear-rg.json -m Complete
|
./clear-rg_2.sh "$AZURE_RESOURCE_GROUP"
|
||||||
|
else
|
||||||
|
ansible-playbook generate-templates.yml
|
||||||
|
azure group deployment create -g "$AZURE_RESOURCE_GROUP" -f ./.generated/clear-rg.json -m Complete
|
||||||
|
fi
|
||||||
|
|||||||
14
contrib/azurerm/clear-rg_2.sh
Executable file
14
contrib/azurerm/clear-rg_2.sh
Executable file
@@ -0,0 +1,14 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
AZURE_RESOURCE_GROUP="$1"
|
||||||
|
|
||||||
|
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||||
|
echo "AZURE_RESOURCE_GROUP is missing"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
ansible-playbook generate-templates.yml
|
||||||
|
|
||||||
|
az group deployment create -g "$AZURE_RESOURCE_GROUP" --template-file ./.generated/clear-rg.json --mode Complete
|
||||||
@@ -8,5 +8,11 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
|||||||
echo "AZURE_RESOURCE_GROUP is missing"
|
echo "AZURE_RESOURCE_GROUP is missing"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
# check if azure cli 2.0 exists else use azure cli 1.0
|
||||||
ansible-playbook generate-inventory.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"
|
if az &>/dev/null; then
|
||||||
|
ansible-playbook generate-inventory_2.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"
|
||||||
|
elif azure &>/dev/null; then
|
||||||
|
ansible-playbook generate-inventory.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"
|
||||||
|
else
|
||||||
|
echo "Azure cli not found"
|
||||||
|
fi
|
||||||
|
|||||||
5
contrib/azurerm/generate-inventory_2.yml
Normal file
5
contrib/azurerm/generate-inventory_2.yml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
- hosts: localhost
|
||||||
|
gather_facts: False
|
||||||
|
roles:
|
||||||
|
- generate-inventory_2
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
|
|
||||||
# Due to some Azure limitations, this name must be globally unique
|
# Due to some Azure limitations (ex:- Storage Account's name must be unique),
|
||||||
|
# this name must be globally unique - it will be used as a prefix for azure components
|
||||||
cluster_name: example
|
cluster_name: example
|
||||||
|
|
||||||
# Set this to true if you do not want to have public IPs for your masters and minions. This will provision a bastion
|
# Set this to true if you do not want to have public IPs for your masters and minions. This will provision a bastion
|
||||||
@@ -17,10 +18,29 @@ minions_os_disk_size: 1000
|
|||||||
|
|
||||||
admin_username: devops
|
admin_username: devops
|
||||||
admin_password: changeme
|
admin_password: changeme
|
||||||
|
|
||||||
|
# MAKE SURE TO CHANGE THIS TO YOUR PUBLIC KEY to access your azure machines
|
||||||
ssh_public_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy"
|
ssh_public_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy"
|
||||||
|
|
||||||
|
# Disable using ssh using password. Change it to false to allow to connect to ssh by password
|
||||||
|
disablePasswordAuthentication: true
|
||||||
|
|
||||||
# Azure CIDRs
|
# Azure CIDRs
|
||||||
azure_vnet_cidr: 10.0.0.0/8
|
azure_vnet_cidr: 10.0.0.0/8
|
||||||
azure_admin_cidr: 10.241.2.0/24
|
azure_admin_cidr: 10.241.2.0/24
|
||||||
azure_masters_cidr: 10.0.4.0/24
|
azure_masters_cidr: 10.0.4.0/24
|
||||||
azure_minions_cidr: 10.240.0.0/16
|
azure_minions_cidr: 10.240.0.0/16
|
||||||
|
|
||||||
|
# Azure loadbalancer port to use to access your cluster
|
||||||
|
kube_apiserver_port: 6443
|
||||||
|
|
||||||
|
# Azure Netwoking and storage naming to use with inventory/all.yml
|
||||||
|
#azure_virtual_network_name: KubeVNET
|
||||||
|
#azure_subnet_admin_name: ad-subnet
|
||||||
|
#azure_subnet_masters_name: master-subnet
|
||||||
|
#azure_subnet_minions_name: minion-subnet
|
||||||
|
#azure_route_table_name: routetable
|
||||||
|
#azure_security_group_name: secgroup
|
||||||
|
|
||||||
|
# Storage types available are: "Standard_LRS","Premium_LRS"
|
||||||
|
#azure_storage_account_type: Standard_LRS
|
||||||
|
|||||||
@@ -8,4 +8,4 @@
|
|||||||
vm_list: "{{ vm_list_cmd.stdout }}"
|
vm_list: "{{ vm_list_cmd.stdout }}"
|
||||||
|
|
||||||
- name: Generate inventory
|
- name: Generate inventory
|
||||||
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
|
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
|
||||||
|
|||||||
16
contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
Normal file
16
contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
- name: Query Azure VMs IPs
|
||||||
|
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
|
||||||
|
register: vm_ip_list_cmd
|
||||||
|
|
||||||
|
- name: Query Azure VMs Roles
|
||||||
|
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
||||||
|
register: vm_list_cmd
|
||||||
|
|
||||||
|
- set_fact:
|
||||||
|
vm_ip_list: "{{ vm_ip_list_cmd.stdout }}"
|
||||||
|
vm_roles_list: "{{ vm_list_cmd.stdout }}"
|
||||||
|
|
||||||
|
- name: Generate inventory
|
||||||
|
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
|
||||||
@@ -0,0 +1,34 @@
|
|||||||
|
|
||||||
|
{% for vm in vm_ip_list %}
|
||||||
|
{% if not use_bastion or vm.virtualMachinename == 'bastion' %}
|
||||||
|
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.publicIpAddresses[0].ipAddress }} ip={{ vm.virtualMachine.network.privateIpAddresses[0] }}
|
||||||
|
{% else %}
|
||||||
|
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.privateIpAddresses[0] }}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
[kube-master]
|
||||||
|
{% for vm in vm_roles_list %}
|
||||||
|
{% if 'kube-master' in vm.tags.roles %}
|
||||||
|
{{ vm.name }}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
[etcd]
|
||||||
|
{% for vm in vm_roles_list %}
|
||||||
|
{% if 'etcd' in vm.tags.roles %}
|
||||||
|
{{ vm.name }}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
[kube-node]
|
||||||
|
{% for vm in vm_roles_list %}
|
||||||
|
{% if 'kube-node' in vm.tags.roles %}
|
||||||
|
{{ vm.name }}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
[k8s-cluster:children]
|
||||||
|
kube-node
|
||||||
|
kube-master
|
||||||
|
|
||||||
@@ -1,15 +1,15 @@
|
|||||||
apiVersion: "2015-06-15"
|
apiVersion: "2015-06-15"
|
||||||
|
|
||||||
virtualNetworkName: "KubVNET"
|
virtualNetworkName: "{{ azure_virtual_network_name | default('KubeVNET') }}"
|
||||||
|
|
||||||
subnetAdminName: "ad-subnet"
|
subnetAdminName: "{{ azure_subnet_admin_name | default('ad-subnet') }}"
|
||||||
subnetMastersName: "master-subnet"
|
subnetMastersName: "{{ azure_subnet_masters_name | default('master-subnet') }}"
|
||||||
subnetMinionsName: "minion-subnet"
|
subnetMinionsName: "{{ azure_subnet_minions_name | default('minion-subnet') }}"
|
||||||
|
|
||||||
routeTableName: "routetable"
|
routeTableName: "{{ azure_route_table_name | default('routetable') }}"
|
||||||
securityGroupName: "secgroup"
|
securityGroupName: "{{ azure_security_group_name | default('secgroup') }}"
|
||||||
|
|
||||||
nameSuffix: "{{cluster_name}}"
|
nameSuffix: "{{ cluster_name }}"
|
||||||
|
|
||||||
availabilitySetMasters: "master-avs"
|
availabilitySetMasters: "master-avs"
|
||||||
availabilitySetMinions: "minion-avs"
|
availabilitySetMinions: "minion-avs"
|
||||||
@@ -33,5 +33,5 @@ imageReference:
|
|||||||
imageReferenceJson: "{{imageReference|to_json}}"
|
imageReferenceJson: "{{imageReference|to_json}}"
|
||||||
|
|
||||||
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
|
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
|
||||||
storageAccountType: "Standard_LRS"
|
storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"
|
||||||
|
|
||||||
|
|||||||
@@ -62,8 +62,8 @@
|
|||||||
"id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]"
|
"id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]"
|
||||||
},
|
},
|
||||||
"protocol": "tcp",
|
"protocol": "tcp",
|
||||||
"frontendPort": 443,
|
"frontendPort": "{{kube_apiserver_port}}",
|
||||||
"backendPort": 443,
|
"backendPort": "{{kube_apiserver_port}}",
|
||||||
"enableFloatingIP": false,
|
"enableFloatingIP": false,
|
||||||
"idleTimeoutInMinutes": 5,
|
"idleTimeoutInMinutes": 5,
|
||||||
"probe": {
|
"probe": {
|
||||||
@@ -77,7 +77,7 @@
|
|||||||
"name": "kube-api",
|
"name": "kube-api",
|
||||||
"properties": {
|
"properties": {
|
||||||
"protocol": "tcp",
|
"protocol": "tcp",
|
||||||
"port": 443,
|
"port": "{{kube_apiserver_port}}",
|
||||||
"intervalInSeconds": 5,
|
"intervalInSeconds": 5,
|
||||||
"numberOfProbes": 2
|
"numberOfProbes": 2
|
||||||
}
|
}
|
||||||
@@ -193,4 +193,4 @@
|
|||||||
} {% if not loop.last %},{% endif %}
|
} {% if not loop.last %},{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -92,7 +92,7 @@
|
|||||||
"description": "Allow secure kube-api",
|
"description": "Allow secure kube-api",
|
||||||
"protocol": "Tcp",
|
"protocol": "Tcp",
|
||||||
"sourcePortRange": "*",
|
"sourcePortRange": "*",
|
||||||
"destinationPortRange": "443",
|
"destinationPortRange": "{{kube_apiserver_port}}",
|
||||||
"sourceAddressPrefix": "Internet",
|
"sourceAddressPrefix": "Internet",
|
||||||
"destinationAddressPrefix": "*",
|
"destinationAddressPrefix": "*",
|
||||||
"access": "Allow",
|
"access": "Allow",
|
||||||
@@ -106,4 +106,4 @@
|
|||||||
"dependsOn": []
|
"dependsOn": []
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ import re
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster:children',
|
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster:children',
|
||||||
'calico-rr']
|
'calico-rr', 'vault']
|
||||||
PROTECTED_NAMES = ROLES
|
PROTECTED_NAMES = ROLES
|
||||||
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'load']
|
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'load']
|
||||||
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
||||||
@@ -65,7 +65,7 @@ HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
|
|||||||
# Configurable as shell vars end
|
# Configurable as shell vars end
|
||||||
|
|
||||||
|
|
||||||
class KargoInventory(object):
|
class KubesprayInventory(object):
|
||||||
|
|
||||||
def __init__(self, changed_hosts=None, config_file=None):
|
def __init__(self, changed_hosts=None, config_file=None):
|
||||||
self.config = configparser.ConfigParser(allow_no_value=True,
|
self.config = configparser.ConfigParser(allow_no_value=True,
|
||||||
@@ -250,6 +250,7 @@ class KargoInventory(object):
|
|||||||
def set_etcd(self, hosts):
|
def set_etcd(self, hosts):
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
self.add_host_to_group('etcd', host)
|
self.add_host_to_group('etcd', host)
|
||||||
|
self.add_host_to_group('vault', host)
|
||||||
|
|
||||||
def load_file(self, files=None):
|
def load_file(self, files=None):
|
||||||
'''Directly loads JSON, or YAML file to inventory.'''
|
'''Directly loads JSON, or YAML file to inventory.'''
|
||||||
@@ -337,7 +338,7 @@ MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
|
|||||||
def main(argv=None):
|
def main(argv=None):
|
||||||
if not argv:
|
if not argv:
|
||||||
argv = sys.argv[1:]
|
argv = sys.argv[1:]
|
||||||
KargoInventory(argv, CONFIG_FILE)
|
KubesprayInventory(argv, CONFIG_FILE)
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
[metadata]
|
[metadata]
|
||||||
name = kargo-inventory-builder
|
name = kubespray-inventory-builder
|
||||||
version = 0.1
|
version = 0.1
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ class TestInventory(unittest.TestCase):
|
|||||||
sys_mock.exit = mock.Mock()
|
sys_mock.exit = mock.Mock()
|
||||||
super(TestInventory, self).setUp()
|
super(TestInventory, self).setUp()
|
||||||
self.data = ['10.90.3.2', '10.90.3.3', '10.90.3.4']
|
self.data = ['10.90.3.2', '10.90.3.3', '10.90.3.4']
|
||||||
self.inv = inventory.KargoInventory()
|
self.inv = inventory.KubesprayInventory()
|
||||||
|
|
||||||
def test_get_ip_from_opts(self):
|
def test_get_ip_from_opts(self):
|
||||||
optstring = "ansible_host=10.90.3.2 ip=10.90.3.2"
|
optstring = "ansible_host=10.90.3.2 ip=10.90.3.2"
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
# Kargo on KVM Virtual Machines hypervisor preparation
|
# Kubespray on KVM Virtual Machines hypervisor preparation
|
||||||
|
|
||||||
A simple playbook to ensure your system has the right settings to enable Kargo
|
A simple playbook to ensure your system has the right settings to enable Kubespray
|
||||||
deployment on VMs.
|
deployment on VMs.
|
||||||
|
|
||||||
This playbook does not create Virtual Machines, nor does it run Kargo itself.
|
This playbook does not create Virtual Machines, nor does it run Kubespray itself.
|
||||||
|
|
||||||
### User creation
|
### User creation
|
||||||
|
|
||||||
If you want to create a user for running Kargo deployment, you should specify
|
If you want to create a user for running Kubespray deployment, you should specify
|
||||||
both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`.
|
both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`.
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
#k8s_deployment_user: kargo
|
#k8s_deployment_user: kubespray
|
||||||
#k8s_deployment_user_pkey_path: /tmp/ssh_rsa
|
#k8s_deployment_user_pkey_path: /tmp/ssh_rsa
|
||||||
|
|
||||||
|
|||||||
@@ -12,9 +12,9 @@
|
|||||||
line: 'br_netfilter'
|
line: 'br_netfilter'
|
||||||
when: br_netfilter is defined and ansible_os_family == 'Debian'
|
when: br_netfilter is defined and ansible_os_family == 'Debian'
|
||||||
|
|
||||||
- name: Add br_netfilter into /etc/modules-load.d/kargo.conf
|
- name: Add br_netfilter into /etc/modules-load.d/kubespray.conf
|
||||||
copy:
|
copy:
|
||||||
dest: /etc/modules-load.d/kargo.conf
|
dest: /etc/modules-load.d/kubespray.conf
|
||||||
content: |-
|
content: |-
|
||||||
### This file is managed by Ansible
|
### This file is managed by Ansible
|
||||||
br-netfilter
|
br-netfilter
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Deploying a Kargo Kubernetes Cluster with GlusterFS
|
# Deploying a Kubespray Kubernetes Cluster with GlusterFS
|
||||||
|
|
||||||
You can either deploy using Ansible on its own by supplying your own inventory file or by using Terraform to create the VMs and then providing a dynamic inventory to Ansible. The following two sections are self-contained, you don't need to go through one to use the other. So, if you want to provision with Terraform, you can skip the **Using an Ansible inventory** section, and if you want to provision with a pre-built ansible inventory, you can neglect the **Using Terraform and Ansible** section.
|
You can either deploy using Ansible on its own by supplying your own inventory file or by using Terraform to create the VMs and then providing a dynamic inventory to Ansible. The following two sections are self-contained, you don't need to go through one to use the other. So, if you want to provision with Terraform, you can skip the **Using an Ansible inventory** section, and if you want to provision with a pre-built ansible inventory, you can neglect the **Using Terraform and Ansible** section.
|
||||||
|
|
||||||
@@ -6,7 +6,7 @@ You can either deploy using Ansible on its own by supplying your own inventory f
|
|||||||
|
|
||||||
In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group.
|
In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group.
|
||||||
|
|
||||||
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/k8s_gfs_inventory`. Make sure that the settings on `inventory/group_vars/all.yml` make sense with your deployment. Then execute change to the kargo root folder, and execute (supposing that the machines are all using ubuntu):
|
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/k8s_gfs_inventory`. Make sure that the settings on `inventory/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
|
||||||
|
|
||||||
```
|
```
|
||||||
ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./cluster.yml
|
ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./cluster.yml
|
||||||
@@ -28,7 +28,7 @@ k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_us
|
|||||||
|
|
||||||
## Using Terraform and Ansible
|
## Using Terraform and Ansible
|
||||||
|
|
||||||
First step is to fill in a `my-kargo-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
|
First step is to fill in a `my-kubespray-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
|
||||||
|
|
||||||
```
|
```
|
||||||
cluster_name = "cluster1"
|
cluster_name = "cluster1"
|
||||||
@@ -65,15 +65,15 @@ $ echo Setting up Terraform creds && \
|
|||||||
export TF_VAR_auth_url=${OS_AUTH_URL}
|
export TF_VAR_auth_url=${OS_AUTH_URL}
|
||||||
```
|
```
|
||||||
|
|
||||||
Then, standing on the kargo directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
|
Then, standing on the kubespray directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
|
||||||
|
|
||||||
```
|
```
|
||||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kargo-gluster-cluster.tfvars contrib/terraform/openstack
|
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
|
||||||
```
|
```
|
||||||
|
|
||||||
This will create both your Kubernetes and Gluster VMs. Make sure that the ansible file `contrib/terraform/openstack/group_vars/all.yml` includes any ansible variable that you want to setup (like, for instance, the type of machine for bootstrapping).
|
This will create both your Kubernetes and Gluster VMs. Make sure that the ansible file `contrib/terraform/openstack/group_vars/all.yml` includes any ansible variable that you want to setup (like, for instance, the type of machine for bootstrapping).
|
||||||
|
|
||||||
Then, provision your Kubernetes (Kargo) cluster with the following ansible call:
|
Then, provision your Kubernetes (kubespray) cluster with the following ansible call:
|
||||||
|
|
||||||
```
|
```
|
||||||
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml
|
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml
|
||||||
@@ -88,5 +88,5 @@ ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./co
|
|||||||
If you need to destroy the cluster, you can run:
|
If you need to destroy the cluster, you can run:
|
||||||
|
|
||||||
```
|
```
|
||||||
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kargo-gluster-cluster.tfvars contrib/terraform/openstack
|
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -1,8 +1,17 @@
|
|||||||
---
|
---
|
||||||
|
- hosts: gfs-cluster
|
||||||
|
gather_facts: false
|
||||||
|
vars:
|
||||||
|
ansible_ssh_pipelining: false
|
||||||
|
roles:
|
||||||
|
- { role: bootstrap-os, tags: bootstrap-os}
|
||||||
|
|
||||||
- hosts: all
|
- hosts: all
|
||||||
gather_facts: true
|
gather_facts: true
|
||||||
|
|
||||||
- hosts: gfs-cluster
|
- hosts: gfs-cluster
|
||||||
|
vars:
|
||||||
|
ansible_ssh_pipelining: true
|
||||||
roles:
|
roles:
|
||||||
- { role: glusterfs/server }
|
- { role: glusterfs/server }
|
||||||
|
|
||||||
@@ -12,6 +21,5 @@
|
|||||||
|
|
||||||
- hosts: kube-master[0]
|
- hosts: kube-master[0]
|
||||||
roles:
|
roles:
|
||||||
- { role: kubernetes-pv/lib }
|
|
||||||
- { role: kubernetes-pv }
|
- { role: kubernetes-pv }
|
||||||
|
|
||||||
|
|||||||
1
contrib/network-storage/glusterfs/group_vars
Symbolic link
1
contrib/network-storage/glusterfs/group_vars
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../../inventory/group_vars
|
||||||
1
contrib/network-storage/glusterfs/roles/bootstrap-os
Symbolic link
1
contrib/network-storage/glusterfs/roles/bootstrap-os
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../../../roles/bootstrap-os
|
||||||
@@ -4,6 +4,7 @@
|
|||||||
with_items:
|
with_items:
|
||||||
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||||
|
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
|
||||||
register: gluster_pv
|
register: gluster_pv
|
||||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,12 @@
|
|||||||
|
{
|
||||||
|
"kind": "Service",
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"metadata": {
|
||||||
|
"name": "glusterfs"
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"ports": [
|
||||||
|
{"port": 1}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
60
contrib/packaging/rpm/ansible-kubespray.spec
Normal file
60
contrib/packaging/rpm/ansible-kubespray.spec
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
%global srcname ansible_kubespray
|
||||||
|
|
||||||
|
%{!?upstream_version: %global upstream_version %{version}%{?milestone}}
|
||||||
|
|
||||||
|
Name: ansible-kubespray
|
||||||
|
Version: XXX
|
||||||
|
Release: XXX
|
||||||
|
Summary: Ansible modules for installing Kubernetes
|
||||||
|
|
||||||
|
Group: System Environment/Libraries
|
||||||
|
License: ASL 2.0
|
||||||
|
Vendor: Kubespray <smainklh@gmail.com>
|
||||||
|
Url: https://github.com/kubernetes-incubator/kubespray
|
||||||
|
Source0: https://github.com/kubernetes-incubator/kubespray/archive/%{upstream_version}.tar.gz
|
||||||
|
|
||||||
|
BuildArch: noarch
|
||||||
|
BuildRequires: git
|
||||||
|
BuildRequires: python2-devel
|
||||||
|
BuildRequires: python-setuptools
|
||||||
|
BuildRequires: python-d2to1
|
||||||
|
BuildRequires: python-pbr
|
||||||
|
|
||||||
|
Requires: ansible
|
||||||
|
Requires: python-jinja2
|
||||||
|
Requires: python-netaddr
|
||||||
|
|
||||||
|
%description
|
||||||
|
|
||||||
|
Ansible-kubespray is a set of Ansible modules and playbooks for
|
||||||
|
installing a Kubernetes cluster. If you have questions, join us
|
||||||
|
on the https://slack.k8s.io, channel '#kubespray'.
|
||||||
|
|
||||||
|
%prep
|
||||||
|
%autosetup -n %{name}-%{upstream_version} -S git
|
||||||
|
|
||||||
|
|
||||||
|
%build
|
||||||
|
%{__python2} setup.py build
|
||||||
|
|
||||||
|
|
||||||
|
%install
|
||||||
|
export PBR_VERSION=%{version}
|
||||||
|
export SKIP_PIP_INSTALL=1
|
||||||
|
%{__python2} setup.py install --skip-build --root %{buildroot}
|
||||||
|
|
||||||
|
|
||||||
|
%files
|
||||||
|
%doc README.md
|
||||||
|
%doc inventory/inventory.example
|
||||||
|
%config /etc/kubespray/ansible.cfg
|
||||||
|
%config /etc/kubespray/inventory/group_vars/all.yml
|
||||||
|
%config /etc/kubespray/inventory/group_vars/k8s-cluster.yml
|
||||||
|
%license LICENSE
|
||||||
|
%{python2_sitelib}/%{srcname}-%{version}-py%{python2_version}.egg-info
|
||||||
|
/usr/local/share/kubespray/roles/
|
||||||
|
/usr/local/share/kubespray/playbooks/
|
||||||
|
%defattr(-,root,root)
|
||||||
|
|
||||||
|
|
||||||
|
%changelog
|
||||||
@@ -14,22 +14,57 @@ This project will create:
|
|||||||
|
|
||||||
**How to Use:**
|
**How to Use:**
|
||||||
|
|
||||||
- Export the variables for your AWS credentials or edit credentials.tfvars:
|
- Export the variables for your AWS credentials or edit `credentials.tfvars`:
|
||||||
|
|
||||||
```
|
```
|
||||||
export aws_access_key="xxx"
|
export AWS_ACCESS_KEY_ID="www"
|
||||||
export aws_secret_key="yyy"
|
export AWS_SECRET_ACCESS_KEY ="xxx"
|
||||||
export aws_ssh_key_name="zzz"
|
export AWS_SSH_KEY_NAME="yyy"
|
||||||
|
export AWS_DEFAULT_REGION="zzz"
|
||||||
|
```
|
||||||
|
- Rename `contrib/terraform/aws/terraform.tfvars.example` to `terraform.tfvars`
|
||||||
|
|
||||||
|
- Update `contrib/terraform/aws/terraform.tfvars` with your data
|
||||||
|
- Allocate a new AWS Elastic IP. Use this for your `loadbalancer_apiserver_address` value (below)
|
||||||
|
- Create an AWS EC2 SSH Key
|
||||||
|
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```commandline
|
||||||
|
terraform apply -var-file=credentials.tfvars -var 'loadbalancer_apiserver_address=34.212.228.77'
|
||||||
```
|
```
|
||||||
|
|
||||||
- Update contrib/terraform/aws/terraform.tfvars with your data
|
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
|
||||||
|
|
||||||
- Run with `terraform apply -var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
|
- Ansible will automatically generate an ssh config file for your bastion hosts. To make use of it, make sure you have a line in your `ansible.cfg` file that looks like the following:
|
||||||
|
```commandline
|
||||||
|
ssh_args = -F ./ssh-bastion.conf -o ControlMaster=auto -o ControlPersist=30m
|
||||||
|
```
|
||||||
|
|
||||||
- Once the infrastructure is created, you can run the kargo playbooks and supply inventory/hosts with the `-i` flag.
|
- Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag.
|
||||||
|
|
||||||
|
Example (this one assumes you are using CoreOS)
|
||||||
|
```commandline
|
||||||
|
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_ssh_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache
|
||||||
|
```
|
||||||
|
|
||||||
|
**Troubleshooting**
|
||||||
|
|
||||||
|
***Remaining AWS IAM Instance Profile***:
|
||||||
|
|
||||||
|
If the cluster was destroyed without using Terraform it is possible that
|
||||||
|
the AWS IAM Instance Profiles still remain. To delete them you can use
|
||||||
|
the `AWS CLI` with the following command:
|
||||||
|
```
|
||||||
|
aws iam delete-instance-profile --region <region_name> --instance-profile-name <profile_name>
|
||||||
|
```
|
||||||
|
|
||||||
|
***Ansible Inventory doesnt get created:***
|
||||||
|
|
||||||
|
It could happen that Terraform doesnt create an Ansible Inventory file automatically. If this is the case copy the output after `inventory=` and create a file named `hosts`in the directory `inventory` and paste the inventory into the file.
|
||||||
|
|
||||||
**Architecture**
|
**Architecture**
|
||||||
|
|
||||||
Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones.
|
Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones.
|
||||||
|
|
||||||

|

|
||||||
|
|||||||
@@ -19,9 +19,9 @@ module "aws-vpc" {
|
|||||||
aws_cluster_name = "${var.aws_cluster_name}"
|
aws_cluster_name = "${var.aws_cluster_name}"
|
||||||
aws_vpc_cidr_block = "${var.aws_vpc_cidr_block}"
|
aws_vpc_cidr_block = "${var.aws_vpc_cidr_block}"
|
||||||
aws_avail_zones="${var.aws_avail_zones}"
|
aws_avail_zones="${var.aws_avail_zones}"
|
||||||
|
|
||||||
aws_cidr_subnets_private="${var.aws_cidr_subnets_private}"
|
aws_cidr_subnets_private="${var.aws_cidr_subnets_private}"
|
||||||
aws_cidr_subnets_public="${var.aws_cidr_subnets_public}"
|
aws_cidr_subnets_public="${var.aws_cidr_subnets_public}"
|
||||||
|
default_tags="${var.default_tags}"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -35,6 +35,7 @@ module "aws-elb" {
|
|||||||
aws_subnet_ids_public="${module.aws-vpc.aws_subnet_ids_public}"
|
aws_subnet_ids_public="${module.aws-vpc.aws_subnet_ids_public}"
|
||||||
aws_elb_api_port = "${var.aws_elb_api_port}"
|
aws_elb_api_port = "${var.aws_elb_api_port}"
|
||||||
k8s_secure_api_port = "${var.k8s_secure_api_port}"
|
k8s_secure_api_port = "${var.k8s_secure_api_port}"
|
||||||
|
default_tags="${var.default_tags}"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -61,11 +62,11 @@ resource "aws_instance" "bastion-server" {
|
|||||||
|
|
||||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||||
|
|
||||||
tags {
|
tags = "${merge(var.default_tags, map(
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-bastion-${count.index}"
|
"Name", "kubernetes-${var.aws_cluster_name}-bastion-${count.index}",
|
||||||
Cluster = "${var.aws_cluster_name}"
|
"Cluster", "${var.aws_cluster_name}",
|
||||||
Role = "bastion-${var.aws_cluster_name}-${count.index}"
|
"Role", "bastion-${var.aws_cluster_name}-${count.index}"
|
||||||
}
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -92,11 +93,11 @@ resource "aws_instance" "k8s-master" {
|
|||||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||||
|
|
||||||
|
|
||||||
tags {
|
tags = "${merge(var.default_tags, map(
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-master${count.index}"
|
"Name", "kubernetes-${var.aws_cluster_name}-master${count.index}",
|
||||||
Cluster = "${var.aws_cluster_name}"
|
"Cluster", "${var.aws_cluster_name}",
|
||||||
Role = "master"
|
"Role", "master"
|
||||||
}
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_elb_attachment" "attach_master_nodes" {
|
resource "aws_elb_attachment" "attach_master_nodes" {
|
||||||
@@ -121,12 +122,11 @@ resource "aws_instance" "k8s-etcd" {
|
|||||||
|
|
||||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||||
|
|
||||||
|
tags = "${merge(var.default_tags, map(
|
||||||
tags {
|
"Name", "kubernetes-${var.aws_cluster_name}-etcd${count.index}",
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-etcd${count.index}"
|
"Cluster", "${var.aws_cluster_name}",
|
||||||
Cluster = "${var.aws_cluster_name}"
|
"Role", "etcd"
|
||||||
Role = "etcd"
|
))}"
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -146,35 +146,36 @@ resource "aws_instance" "k8s-worker" {
|
|||||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||||
|
|
||||||
|
|
||||||
tags {
|
tags = "${merge(var.default_tags, map(
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-worker${count.index}"
|
"Name", "kubernetes-${var.aws_cluster_name}-worker${count.index}",
|
||||||
Cluster = "${var.aws_cluster_name}"
|
"Cluster", "${var.aws_cluster_name}",
|
||||||
Role = "worker"
|
"Role", "worker"
|
||||||
}
|
))}"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Create Kargo Inventory File
|
* Create Kubespray Inventory File
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
data "template_file" "inventory" {
|
data "template_file" "inventory" {
|
||||||
template = "${file("${path.module}/templates/inventory.tpl")}"
|
template = "${file("${path.module}/templates/inventory.tpl")}"
|
||||||
|
|
||||||
vars {
|
vars {
|
||||||
public_ip_address_bastion = "${join("\n",formatlist("bastion ansible_ssh_host=%s" , aws_instance.bastion-server.*.public_ip))}"
|
public_ip_address_bastion = "${join("\n",formatlist("bastion ansible_host=%s" , aws_instance.bastion-server.*.public_ip))}"
|
||||||
connection_strings_master = "${join("\n",formatlist("%s ansible_ssh_host=%s",aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}"
|
connection_strings_master = "${join("\n",formatlist("%s ansible_host=%s",aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}"
|
||||||
connection_strings_node = "${join("\n", formatlist("%s ansible_ssh_host=%s", aws_instance.k8s-worker.*.tags.Name, aws_instance.k8s-worker.*.private_ip))}"
|
connection_strings_node = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.tags.Name, aws_instance.k8s-worker.*.private_ip))}"
|
||||||
connection_strings_etcd = "${join("\n",formatlist("%s ansible_ssh_host=%s", aws_instance.k8s-etcd.*.tags.Name, aws_instance.k8s-etcd.*.private_ip))}"
|
connection_strings_etcd = "${join("\n",formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.tags.Name, aws_instance.k8s-etcd.*.private_ip))}"
|
||||||
list_master = "${join("\n",aws_instance.k8s-master.*.tags.Name)}"
|
list_master = "${join("\n",aws_instance.k8s-master.*.tags.Name)}"
|
||||||
list_node = "${join("\n",aws_instance.k8s-worker.*.tags.Name)}"
|
list_node = "${join("\n",aws_instance.k8s-worker.*.tags.Name)}"
|
||||||
list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}"
|
list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}"
|
||||||
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
|
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
|
||||||
elb_api_port = "loadbalancer_apiserver.port=${var.aws_elb_api_port}"
|
elb_api_port = "loadbalancer_apiserver.port=${var.aws_elb_api_port}"
|
||||||
|
loadbalancer_apiserver_address = "loadbalancer_apiserver.address=${var.loadbalancer_apiserver_address}"
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "null_resource" "inventories" {
|
resource "null_resource" "inventories" {
|
||||||
@@ -182,4 +183,8 @@ resource "null_resource" "inventories" {
|
|||||||
command = "echo '${data.template_file.inventory.rendered}' > ../../../inventory/hosts"
|
command = "echo '${data.template_file.inventory.rendered}' > ../../../inventory/hosts"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
triggers {
|
||||||
|
template = "${data.template_file.inventory.rendered}"
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
|
Before Width: | Height: | Size: 114 KiB After Width: | Height: | Size: 114 KiB |
@@ -2,9 +2,9 @@ resource "aws_security_group" "aws-elb" {
|
|||||||
name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||||
vpc_id = "${var.aws_vpc_id}"
|
vpc_id = "${var.aws_vpc_id}"
|
||||||
|
|
||||||
tags {
|
tags = "${merge(var.default_tags, map(
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||||
}
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -52,7 +52,7 @@ resource "aws_elb" "aws-elb-api" {
|
|||||||
connection_draining = true
|
connection_draining = true
|
||||||
connection_draining_timeout = 400
|
connection_draining_timeout = 400
|
||||||
|
|
||||||
tags {
|
tags = "${merge(var.default_tags, map(
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-elb-api"
|
"Name", "kubernetes-${var.aws_cluster_name}-elb-api"
|
||||||
}
|
))}"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -26,3 +26,8 @@ variable "aws_subnet_ids_public" {
|
|||||||
description = "IDs of Public Subnets"
|
description = "IDs of Public Subnets"
|
||||||
type = "list"
|
type = "list"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "default_tags" {
|
||||||
|
description = "Tags for all resources"
|
||||||
|
type = "map"
|
||||||
|
}
|
||||||
|
|||||||
@@ -129,10 +129,10 @@ EOF
|
|||||||
|
|
||||||
resource "aws_iam_instance_profile" "kube-master" {
|
resource "aws_iam_instance_profile" "kube-master" {
|
||||||
name = "kube_${var.aws_cluster_name}_master_profile"
|
name = "kube_${var.aws_cluster_name}_master_profile"
|
||||||
roles = ["${aws_iam_role.kube-master.name}"]
|
role = "${aws_iam_role.kube-master.name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_iam_instance_profile" "kube-worker" {
|
resource "aws_iam_instance_profile" "kube-worker" {
|
||||||
name = "kube_${var.aws_cluster_name}_node_profile"
|
name = "kube_${var.aws_cluster_name}_node_profile"
|
||||||
roles = ["${aws_iam_role.kube-worker.name}"]
|
role = "${aws_iam_role.kube-worker.name}"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,9 +6,9 @@ resource "aws_vpc" "cluster-vpc" {
|
|||||||
enable_dns_support = true
|
enable_dns_support = true
|
||||||
enable_dns_hostnames = true
|
enable_dns_hostnames = true
|
||||||
|
|
||||||
tags {
|
tags = "${merge(var.default_tags, map(
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-vpc"
|
"Name", "kubernetes-${var.aws_cluster_name}-vpc"
|
||||||
}
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -18,13 +18,13 @@ resource "aws_eip" "cluster-nat-eip" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
resource "aws_internet_gateway" "cluster-vpc-internetgw" {
|
resource "aws_internet_gateway" "cluster-vpc-internetgw" {
|
||||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||||
|
|
||||||
tags {
|
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-internetgw"
|
tags = "${merge(var.default_tags, map(
|
||||||
}
|
"Name", "kubernetes-${var.aws_cluster_name}-internetgw"
|
||||||
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_subnet" "cluster-vpc-subnets-public" {
|
resource "aws_subnet" "cluster-vpc-subnets-public" {
|
||||||
@@ -33,9 +33,9 @@ resource "aws_subnet" "cluster-vpc-subnets-public" {
|
|||||||
availability_zone = "${element(var.aws_avail_zones, count.index)}"
|
availability_zone = "${element(var.aws_avail_zones, count.index)}"
|
||||||
cidr_block = "${element(var.aws_cidr_subnets_public, count.index)}"
|
cidr_block = "${element(var.aws_cidr_subnets_public, count.index)}"
|
||||||
|
|
||||||
tags {
|
tags = "${merge(var.default_tags, map(
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public"
|
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public"
|
||||||
}
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_nat_gateway" "cluster-nat-gateway" {
|
resource "aws_nat_gateway" "cluster-nat-gateway" {
|
||||||
@@ -51,9 +51,9 @@ resource "aws_subnet" "cluster-vpc-subnets-private" {
|
|||||||
availability_zone = "${element(var.aws_avail_zones, count.index)}"
|
availability_zone = "${element(var.aws_avail_zones, count.index)}"
|
||||||
cidr_block = "${element(var.aws_cidr_subnets_private, count.index)}"
|
cidr_block = "${element(var.aws_cidr_subnets_private, count.index)}"
|
||||||
|
|
||||||
tags {
|
tags = "${merge(var.default_tags, map(
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
|
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
|
||||||
}
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
#Routing in VPC
|
#Routing in VPC
|
||||||
@@ -66,9 +66,10 @@ resource "aws_route_table" "kubernetes-public" {
|
|||||||
cidr_block = "0.0.0.0/0"
|
cidr_block = "0.0.0.0/0"
|
||||||
gateway_id = "${aws_internet_gateway.cluster-vpc-internetgw.id}"
|
gateway_id = "${aws_internet_gateway.cluster-vpc-internetgw.id}"
|
||||||
}
|
}
|
||||||
tags {
|
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-routetable-public"
|
tags = "${merge(var.default_tags, map(
|
||||||
}
|
"Name", "kubernetes-${var.aws_cluster_name}-routetable-public"
|
||||||
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_route_table" "kubernetes-private" {
|
resource "aws_route_table" "kubernetes-private" {
|
||||||
@@ -78,9 +79,11 @@ resource "aws_route_table" "kubernetes-private" {
|
|||||||
cidr_block = "0.0.0.0/0"
|
cidr_block = "0.0.0.0/0"
|
||||||
nat_gateway_id = "${element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index)}"
|
nat_gateway_id = "${element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index)}"
|
||||||
}
|
}
|
||||||
tags {
|
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
|
tags = "${merge(var.default_tags, map(
|
||||||
}
|
"Name", "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
|
||||||
|
))}"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_route_table_association" "kubernetes-public" {
|
resource "aws_route_table_association" "kubernetes-public" {
|
||||||
@@ -104,9 +107,9 @@ resource "aws_security_group" "kubernetes" {
|
|||||||
name = "kubernetes-${var.aws_cluster_name}-securitygroup"
|
name = "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||||
|
|
||||||
tags {
|
tags = "${merge(var.default_tags, map(
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-securitygroup"
|
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||||
}
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_security_group_rule" "allow-all-ingress" {
|
resource "aws_security_group_rule" "allow-all-ingress" {
|
||||||
|
|||||||
@@ -14,3 +14,8 @@ output "aws_security_group" {
|
|||||||
value = ["${aws_security_group.kubernetes.*.id}"]
|
value = ["${aws_security_group.kubernetes.*.id}"]
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
output "default_tags" {
|
||||||
|
value = "${default_tags}"
|
||||||
|
|
||||||
|
}
|
||||||
@@ -22,3 +22,8 @@ variable "aws_cidr_subnets_public" {
|
|||||||
description = "CIDR Blocks for public subnets in Availability zones"
|
description = "CIDR Blocks for public subnets in Availability zones"
|
||||||
type = "list"
|
type = "list"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "default_tags" {
|
||||||
|
description = "Default tags for all resources"
|
||||||
|
type = "map"
|
||||||
|
}
|
||||||
|
|||||||
@@ -18,3 +18,11 @@ output "etcd" {
|
|||||||
output "aws_elb_api_fqdn" {
|
output "aws_elb_api_fqdn" {
|
||||||
value = "${module.aws-elb.aws_elb_api_fqdn}:${var.aws_elb_api_port}"
|
value = "${module.aws-elb.aws_elb_api_fqdn}:${var.aws_elb_api_port}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
output "inventory" {
|
||||||
|
value = "${data.template_file.inventory.rendered}"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "default_tags" {
|
||||||
|
value = "${default_tags}"
|
||||||
|
}
|
||||||
@@ -1,3 +1,4 @@
|
|||||||
|
[all]
|
||||||
${connection_strings_master}
|
${connection_strings_master}
|
||||||
${connection_strings_node}
|
${connection_strings_node}
|
||||||
${connection_strings_etcd}
|
${connection_strings_etcd}
|
||||||
@@ -25,3 +26,4 @@ kube-master
|
|||||||
[k8s-cluster:vars]
|
[k8s-cluster:vars]
|
||||||
${elb_api_fqdn}
|
${elb_api_fqdn}
|
||||||
${elb_api_port}
|
${elb_api_port}
|
||||||
|
${loadbalancer_apiserver_address}
|
||||||
|
|||||||
@@ -5,11 +5,11 @@ aws_cluster_name = "devtest"
|
|||||||
aws_vpc_cidr_block = "10.250.192.0/18"
|
aws_vpc_cidr_block = "10.250.192.0/18"
|
||||||
aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"]
|
aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"]
|
||||||
aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
|
aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
|
||||||
aws_avail_zones = ["eu-central-1a","eu-central-1b"]
|
aws_avail_zones = ["us-west-2a","us-west-2b"]
|
||||||
|
|
||||||
#Bastion Host
|
#Bastion Host
|
||||||
aws_bastion_ami = "ami-5900cc36"
|
aws_bastion_ami = "ami-db56b9a3"
|
||||||
aws_bastion_size = "t2.small"
|
aws_bastion_size = "t2.medium"
|
||||||
|
|
||||||
|
|
||||||
#Kubernetes Cluster
|
#Kubernetes Cluster
|
||||||
@@ -23,9 +23,15 @@ aws_etcd_size = "t2.medium"
|
|||||||
aws_kube_worker_num = 4
|
aws_kube_worker_num = 4
|
||||||
aws_kube_worker_size = "t2.medium"
|
aws_kube_worker_size = "t2.medium"
|
||||||
|
|
||||||
aws_cluster_ami = "ami-903df7ff"
|
aws_cluster_ami = "ami-db56b9a3"
|
||||||
|
|
||||||
#Settings AWS ELB
|
#Settings AWS ELB
|
||||||
|
|
||||||
aws_elb_api_port = 443
|
aws_elb_api_port = 6443
|
||||||
k8s_secure_api_port = 443
|
k8s_secure_api_port = 6443
|
||||||
|
kube_insecure_apiserver_address = "0.0.0.0"
|
||||||
|
|
||||||
|
default_tags = {
|
||||||
|
# Env = "devtest"
|
||||||
|
# Product = "kubernetes"
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
#Global Vars
|
#Global Vars
|
||||||
aws_cluster_name = "devtest"
|
aws_cluster_name = "devtest"
|
||||||
aws_region = "eu-central-1"
|
|
||||||
|
|
||||||
#VPC Vars
|
#VPC Vars
|
||||||
aws_vpc_cidr_block = "10.250.192.0/18"
|
aws_vpc_cidr_block = "10.250.192.0/18"
|
||||||
@@ -28,5 +27,6 @@ aws_cluster_ami = "ami-903df7ff"
|
|||||||
|
|
||||||
#Settings AWS ELB
|
#Settings AWS ELB
|
||||||
|
|
||||||
aws_elb_api_port = 443
|
aws_elb_api_port = 6443
|
||||||
k8s_secure_api_port = 443
|
k8s_secure_api_port = 6443
|
||||||
|
kube_insecure_apiserver_address = 0.0.0.0
|
||||||
|
|||||||
@@ -95,3 +95,12 @@ variable "aws_elb_api_port" {
|
|||||||
variable "k8s_secure_api_port" {
|
variable "k8s_secure_api_port" {
|
||||||
description = "Secure Port of K8S API Server"
|
description = "Secure Port of K8S API Server"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "loadbalancer_apiserver_address" {
|
||||||
|
description= "Bind Address for ELB of K8s API Server"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "default_tags" {
|
||||||
|
description = "Default tags for all resources"
|
||||||
|
type = "map"
|
||||||
|
}
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ services.
|
|||||||
|
|
||||||
There are some assumptions made to try and ensure it will work on your openstack cluster.
|
There are some assumptions made to try and ensure it will work on your openstack cluster.
|
||||||
|
|
||||||
* floating-ips are used for access, but you can have masters and nodes that don't use floating-ips if needed. You need currently at least 1 floating ip, which we would suggest is used on a master.
|
* floating-ips are used for access, but you can have masters and nodes that don't use floating-ips if needed. You need currently at least 1 floating ip, which needs to be used on a master. If using more than one, at least one should be on a master for bastions to work fine.
|
||||||
* you already have a suitable OS image in glance
|
* you already have a suitable OS image in glance
|
||||||
* you already have both an internal network and a floating-ip pool created
|
* you already have both an internal network and a floating-ip pool created
|
||||||
* you have security-groups enabled
|
* you have security-groups enabled
|
||||||
@@ -36,6 +36,8 @@ Ensure your OpenStack **Identity v2** credentials are loaded in environment vari
|
|||||||
$ source ~/.stackrc
|
$ source ~/.stackrc
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> You must set **OS_REGION_NAME** and **OS_TENANT_ID** environment variables not required by openstack CLI
|
||||||
|
|
||||||
You will need two networks before installing, an internal network and
|
You will need two networks before installing, an internal network and
|
||||||
an external (floating IP Pool) network. The internet network can be shared as
|
an external (floating IP Pool) network. The internet network can be shared as
|
||||||
we use security groups to provide network segregation. Due to the many
|
we use security groups to provide network segregation. Due to the many
|
||||||
@@ -73,7 +75,9 @@ $ echo Setting up Terraform creds && \
|
|||||||
export TF_VAR_auth_url=${OS_AUTH_URL}
|
export TF_VAR_auth_url=${OS_AUTH_URL}
|
||||||
```
|
```
|
||||||
|
|
||||||
If you want to provision master or node VMs that don't use floating ips, write on a `my-terraform-vars.tfvars` file, for example:
|
##### Alternative: etcd inside masters
|
||||||
|
|
||||||
|
If you want to provision master or node VMs that don't use floating ips and where etcd is inside masters, write on a `my-terraform-vars.tfvars` file, for example:
|
||||||
|
|
||||||
```
|
```
|
||||||
number_of_k8s_masters = "1"
|
number_of_k8s_masters = "1"
|
||||||
@@ -83,10 +87,32 @@ number_of_k8s_nodes = "0"
|
|||||||
```
|
```
|
||||||
This will provision one VM as master using a floating ip, two additional masters using no floating ips (these will only have private ips inside your tenancy) and one VM as node, again without a floating ip.
|
This will provision one VM as master using a floating ip, two additional masters using no floating ips (these will only have private ips inside your tenancy) and one VM as node, again without a floating ip.
|
||||||
|
|
||||||
|
##### Alternative: etcd on separate machines
|
||||||
|
|
||||||
|
If you want to provision master or node VMs that don't use floating ips and where **etcd is on separate nodes from Kubernetes masters**, write on a `my-terraform-vars.tfvars` file, for example:
|
||||||
|
|
||||||
|
```
|
||||||
|
number_of_etcd = "3"
|
||||||
|
number_of_k8s_masters = "0"
|
||||||
|
number_of_k8s_masters_no_etcd = "1"
|
||||||
|
number_of_k8s_masters_no_floating_ip = "0"
|
||||||
|
number_of_k8s_masters_no_floating_ip_no_etcd = "2"
|
||||||
|
number_of_k8s_nodes_no_floating_ip = "1"
|
||||||
|
number_of_k8s_nodes = "2"
|
||||||
|
|
||||||
|
flavor_k8s_node = "desired-flavor-id"
|
||||||
|
flavor_k8s_master = "desired-flavor-id"
|
||||||
|
flavor_etcd = "desired-flavor-id"
|
||||||
|
```
|
||||||
|
|
||||||
|
This will provision one VM as master using a floating ip, two additional masters using no floating ips (these will only have private ips inside your tenancy), two VMs as nodes with floating ips, one VM as node without floating ip and three VMs for etcd.
|
||||||
|
|
||||||
|
##### Alternative: add GlusterFS
|
||||||
|
|
||||||
Additionally, now the terraform based installation supports provisioning of a GlusterFS shared file system based on a separate set of VMs, running either a Debian or RedHat based set of VMs. To enable this, you need to add to your `my-terraform-vars.tfvars` the following variables:
|
Additionally, now the terraform based installation supports provisioning of a GlusterFS shared file system based on a separate set of VMs, running either a Debian or RedHat based set of VMs. To enable this, you need to add to your `my-terraform-vars.tfvars` the following variables:
|
||||||
|
|
||||||
```
|
```
|
||||||
# Flavour depends on your openstack installation, you can get available flavours through `nova list-flavors`
|
# Flavour depends on your openstack installation, you can get available flavours through `nova flavor-list`
|
||||||
flavor_gfs_node = "af659280-5b8a-42b5-8865-a703775911da"
|
flavor_gfs_node = "af659280-5b8a-42b5-8865-a703775911da"
|
||||||
# This is the name of an image already available in your openstack installation.
|
# This is the name of an image already available in your openstack installation.
|
||||||
image_gfs = "Ubuntu 15.10"
|
image_gfs = "Ubuntu 15.10"
|
||||||
@@ -99,6 +125,48 @@ ssh_user_gfs = "ubuntu"
|
|||||||
|
|
||||||
If these variables are provided, this will give rise to a new ansible group called `gfs-cluster`, for which we have added ansible roles to execute in the ansible provisioning step. If you are using Container Linux by CoreOS, these GlusterFS VM necessarily need to be either Debian or RedHat based VMs, Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
If these variables are provided, this will give rise to a new ansible group called `gfs-cluster`, for which we have added ansible roles to execute in the ansible provisioning step. If you are using Container Linux by CoreOS, these GlusterFS VM necessarily need to be either Debian or RedHat based VMs, Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
||||||
|
|
||||||
|
GlusterFS is not deployed by the standard `cluster.yml` playbook, see the [glusterfs playbook documentation](../../network-storage/glusterfs/README.md) for instructions.
|
||||||
|
|
||||||
|
# Configure Cluster variables
|
||||||
|
|
||||||
|
Edit `inventory/group_vars/all.yml`:
|
||||||
|
- Set variable **bootstrap_os** according selected image
|
||||||
|
```
|
||||||
|
# Valid bootstrap options (required): ubuntu, coreos, centos, none
|
||||||
|
bootstrap_os: coreos
|
||||||
|
```
|
||||||
|
- **bin_dir**
|
||||||
|
```
|
||||||
|
# Directory where the binaries will be installed
|
||||||
|
# Default:
|
||||||
|
# bin_dir: /usr/local/bin
|
||||||
|
# For Container Linux by CoreOS:
|
||||||
|
bin_dir: /opt/bin
|
||||||
|
```
|
||||||
|
- and **cloud_provider**
|
||||||
|
```
|
||||||
|
cloud_provider: openstack
|
||||||
|
```
|
||||||
|
Edit `inventory/group_vars/k8s-cluster.yml`:
|
||||||
|
- Set variable **kube_network_plugin** according selected networking
|
||||||
|
```
|
||||||
|
# Choose network plugin (calico, weave or flannel)
|
||||||
|
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||||
|
kube_network_plugin: flannel
|
||||||
|
```
|
||||||
|
> flannel works out-of-the-box
|
||||||
|
|
||||||
|
> calico requires allowing service's and pod's subnets on according OpenStack Neutron ports
|
||||||
|
- Set variable **resolvconf_mode**
|
||||||
|
```
|
||||||
|
# Can be docker_dns, host_resolvconf or none
|
||||||
|
# Default:
|
||||||
|
# resolvconf_mode: docker_dns
|
||||||
|
# For Container Linux by CoreOS:
|
||||||
|
resolvconf_mode: host_resolvconf
|
||||||
|
```
|
||||||
|
|
||||||
|
For calico configure OpenStack Neutron ports: [OpenStack](/docs/openstack.md)
|
||||||
|
|
||||||
# Provision a Kubernetes Cluster on OpenStack
|
# Provision a Kubernetes Cluster on OpenStack
|
||||||
|
|
||||||
@@ -156,6 +224,49 @@ Deploy kubernetes:
|
|||||||
$ ansible-playbook --become -i contrib/terraform/openstack/hosts cluster.yml
|
$ ansible-playbook --become -i contrib/terraform/openstack/hosts cluster.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
|
# Set up local kubectl
|
||||||
|
1. Install kubectl on your workstation:
|
||||||
|
[Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
|
||||||
|
2. Add route to internal IP of master node (if needed):
|
||||||
|
```
|
||||||
|
sudo route add [master-internal-ip] gw [router-ip]
|
||||||
|
```
|
||||||
|
or
|
||||||
|
```
|
||||||
|
sudo route add -net [internal-subnet]/24 gw [router-ip]
|
||||||
|
```
|
||||||
|
3. List Kubernetes certs&keys:
|
||||||
|
```
|
||||||
|
ssh [os-user]@[master-ip] sudo ls /etc/kubernetes/ssl/
|
||||||
|
```
|
||||||
|
4. Get admin's certs&key:
|
||||||
|
```
|
||||||
|
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1-key.pem > admin-key.pem
|
||||||
|
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1.pem > admin.pem
|
||||||
|
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/ca.pem > ca.pem
|
||||||
|
```
|
||||||
|
5. Edit OpenStack Neutron master's Security Group to allow TCP connections to port 6443
|
||||||
|
6. Configure kubectl:
|
||||||
|
```
|
||||||
|
kubectl config set-cluster default-cluster --server=https://[master-internal-ip]:6443 \
|
||||||
|
--certificate-authority=ca.pem
|
||||||
|
|
||||||
|
kubectl config set-credentials default-admin \
|
||||||
|
--certificate-authority=ca.pem \
|
||||||
|
--client-key=admin-key.pem \
|
||||||
|
--client-certificate=admin.pem
|
||||||
|
|
||||||
|
kubectl config set-context default-system --cluster=default-cluster --user=default-admin
|
||||||
|
kubectl config use-context default-system
|
||||||
|
```
|
||||||
|
7. Check it:
|
||||||
|
```
|
||||||
|
kubectl version
|
||||||
|
```
|
||||||
|
|
||||||
|
# What's next
|
||||||
|
[Start Hello Kubernetes Service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/)
|
||||||
|
|
||||||
# clean up:
|
# clean up:
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
resource "openstack_networking_floatingip_v2" "k8s_master" {
|
resource "openstack_networking_floatingip_v2" "k8s_master" {
|
||||||
count = "${var.number_of_k8s_masters}"
|
count = "${var.number_of_k8s_masters + var.number_of_k8s_masters_no_etcd}"
|
||||||
pool = "${var.floatingip_pool}"
|
pool = "${var.floatingip_pool}"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -73,6 +73,44 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||||
|
name = "${var.cluster_name}-k8s-master-ne-${count.index+1}"
|
||||||
|
count = "${var.number_of_k8s_masters_no_etcd}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
flavor_id = "${var.flavor_k8s_master}"
|
||||||
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
network {
|
||||||
|
name = "${var.network_name}"
|
||||||
|
}
|
||||||
|
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||||
|
"${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||||
|
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_master.*.address, count.index + var.number_of_k8s_masters)}"
|
||||||
|
metadata = {
|
||||||
|
ssh_user = "${var.ssh_user}"
|
||||||
|
kubespray_groups = "kube-master,kube-node,k8s-cluster,vault"
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_instance_v2" "etcd" {
|
||||||
|
name = "${var.cluster_name}-etcd-${count.index+1}"
|
||||||
|
count = "${var.number_of_etcd}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
flavor_id = "${var.flavor_etcd}"
|
||||||
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
network {
|
||||||
|
name = "${var.network_name}"
|
||||||
|
}
|
||||||
|
security_groups = [ "${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||||
|
metadata = {
|
||||||
|
ssh_user = "${var.ssh_user}"
|
||||||
|
kubespray_groups = "etcd,vault,no-floating"
|
||||||
|
}
|
||||||
|
provisioner "local-exec" {
|
||||||
|
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||||
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
|
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
|
||||||
@@ -94,6 +132,27 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||||
|
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index+1}"
|
||||||
|
count = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
flavor_id = "${var.flavor_k8s_master}"
|
||||||
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
network {
|
||||||
|
name = "${var.network_name}"
|
||||||
|
}
|
||||||
|
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||||
|
"${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||||
|
metadata = {
|
||||||
|
ssh_user = "${var.ssh_user}"
|
||||||
|
kubespray_groups = "kube-master,kube-node,k8s-cluster,vault,no-floating"
|
||||||
|
}
|
||||||
|
provisioner "local-exec" {
|
||||||
|
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
resource "openstack_compute_instance_v2" "k8s_node" {
|
resource "openstack_compute_instance_v2" "k8s_node" {
|
||||||
name = "${var.cluster_name}-k8s-node-${count.index+1}"
|
name = "${var.cluster_name}-k8s-node-${count.index+1}"
|
||||||
count = "${var.number_of_k8s_nodes}"
|
count = "${var.number_of_k8s_nodes}"
|
||||||
|
|||||||
@@ -6,10 +6,22 @@ variable "number_of_k8s_masters" {
|
|||||||
default = 2
|
default = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "number_of_k8s_masters_no_etcd" {
|
||||||
|
default = 2
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "number_of_etcd" {
|
||||||
|
default = 2
|
||||||
|
}
|
||||||
|
|
||||||
variable "number_of_k8s_masters_no_floating_ip" {
|
variable "number_of_k8s_masters_no_floating_ip" {
|
||||||
default = 2
|
default = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "number_of_k8s_masters_no_floating_ip_no_etcd" {
|
||||||
|
default = 2
|
||||||
|
}
|
||||||
|
|
||||||
variable "number_of_k8s_nodes" {
|
variable "number_of_k8s_nodes" {
|
||||||
default = 1
|
default = 1
|
||||||
}
|
}
|
||||||
@@ -59,6 +71,10 @@ variable "flavor_k8s_node" {
|
|||||||
default = 3
|
default = 3
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "flavor_etcd" {
|
||||||
|
default = 3
|
||||||
|
}
|
||||||
|
|
||||||
variable "flavor_gfs_node" {
|
variable "flavor_gfs_node" {
|
||||||
default = 3
|
default = 3
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ not _kube-node_.
|
|||||||
|
|
||||||
There are also two special groups:
|
There are also two special groups:
|
||||||
|
|
||||||
* **calico-rr** : explained for [advanced Calico networking cases](docs/calico.md)
|
* **calico-rr** : explained for [advanced Calico networking cases](calico.md)
|
||||||
* **bastion** : configure a bastion host if your nodes are not directly reachable
|
* **bastion** : configure a bastion host if your nodes are not directly reachable
|
||||||
|
|
||||||
Below is a complete inventory example:
|
Below is a complete inventory example:
|
||||||
@@ -75,25 +75,25 @@ According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variab
|
|||||||
those cannot be overriden from the group vars. In order to override, one should use
|
those cannot be overriden from the group vars. In order to override, one should use
|
||||||
the `-e ` runtime flags (most simple way) or other layers described in the docs.
|
the `-e ` runtime flags (most simple way) or other layers described in the docs.
|
||||||
|
|
||||||
Kargo uses only a few layers to override things (or expect them to
|
Kubespray uses only a few layers to override things (or expect them to
|
||||||
be overriden for roles):
|
be overriden for roles):
|
||||||
|
|
||||||
Layer | Comment
|
Layer | Comment
|
||||||
------|--------
|
------|--------
|
||||||
**role defaults** | provides best UX to override things for Kargo deployments
|
**role defaults** | provides best UX to override things for Kubespray deployments
|
||||||
inventory vars | Unused
|
inventory vars | Unused
|
||||||
**inventory group_vars** | Expects users to use ``all.yml``,``k8s-cluster.yml`` etc. to override things
|
**inventory group_vars** | Expects users to use ``all.yml``,``k8s-cluster.yml`` etc. to override things
|
||||||
inventory host_vars | Unused
|
inventory host_vars | Unused
|
||||||
playbook group_vars | Unuses
|
playbook group_vars | Unused
|
||||||
playbook host_vars | Unused
|
playbook host_vars | Unused
|
||||||
**host facts** | Kargo overrides for internal roles' logic, like state flags
|
**host facts** | Kubespray overrides for internal roles' logic, like state flags
|
||||||
play vars | Unused
|
play vars | Unused
|
||||||
play vars_prompt | Unused
|
play vars_prompt | Unused
|
||||||
play vars_files | Unused
|
play vars_files | Unused
|
||||||
registered vars | Unused
|
registered vars | Unused
|
||||||
set_facts | Kargo overrides those, for some places
|
set_facts | Kubespray overrides those, for some places
|
||||||
**role and include vars** | Provides bad UX to override things! Use extra vars to enforce
|
**role and include vars** | Provides bad UX to override things! Use extra vars to enforce
|
||||||
block vars (only for tasks in block) | Kargo overrides for internal roles' logic
|
block vars (only for tasks in block) | Kubespray overrides for internal roles' logic
|
||||||
task vars (only for the task) | Unused for roles, but only for helper scripts
|
task vars (only for the task) | Unused for roles, but only for helper scripts
|
||||||
**extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml``
|
**extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml``
|
||||||
|
|
||||||
@@ -124,12 +124,12 @@ The following tags are defined in playbooks:
|
|||||||
| k8s-pre-upgrade | Upgrading K8s cluster
|
| k8s-pre-upgrade | Upgrading K8s cluster
|
||||||
| k8s-secrets | Configuring K8s certs/keys
|
| k8s-secrets | Configuring K8s certs/keys
|
||||||
| kpm | Installing K8s apps definitions with KPM
|
| kpm | Installing K8s apps definitions with KPM
|
||||||
| kube-apiserver | Configuring self-hosted kube-apiserver
|
| kube-apiserver | Configuring static pod kube-apiserver
|
||||||
| kube-controller-manager | Configuring self-hosted kube-controller-manager
|
| kube-controller-manager | Configuring static pod kube-controller-manager
|
||||||
| kubectl | Installing kubectl and bash completion
|
| kubectl | Installing kubectl and bash completion
|
||||||
| kubelet | Configuring kubelet service
|
| kubelet | Configuring kubelet service
|
||||||
| kube-proxy | Configuring self-hosted kube-proxy
|
| kube-proxy | Configuring static pod kube-proxy
|
||||||
| kube-scheduler | Configuring self-hosted kube-scheduler
|
| kube-scheduler | Configuring static pod kube-scheduler
|
||||||
| localhost | Special steps for the localhost (ansible runner)
|
| localhost | Special steps for the localhost (ansible runner)
|
||||||
| master | Configuring K8s master node role
|
| master | Configuring K8s master node role
|
||||||
| netchecker | Installing netchecker K8s app
|
| netchecker | Installing netchecker K8s app
|
||||||
@@ -157,7 +157,7 @@ ansible-playbook -i inventory/inventory.ini cluster.yml --tags preinstall,dnsma
|
|||||||
```
|
```
|
||||||
And this play only removes the K8s cluster DNS resolver IP from hosts' /etc/resolv.conf files:
|
And this play only removes the K8s cluster DNS resolver IP from hosts' /etc/resolv.conf files:
|
||||||
```
|
```
|
||||||
ansible-playbook -i inventory/inventory.ini -e dns_server='' cluster.yml --tags resolvconf
|
ansible-playbook -i inventory/inventory.ini -e dnsmasq_dns_server='' cluster.yml --tags resolvconf
|
||||||
```
|
```
|
||||||
And this prepares all container images localy (at the ansible runner node) without installing
|
And this prepares all container images localy (at the ansible runner node) without installing
|
||||||
or upgrading related stuff or trying to upload container to K8s cluster nodes:
|
or upgrading related stuff or trying to upload container to K8s cluster nodes:
|
||||||
|
|||||||
52
docs/aws.md
52
docs/aws.md
@@ -3,8 +3,58 @@ AWS
|
|||||||
|
|
||||||
To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`.
|
To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`.
|
||||||
|
|
||||||
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kargo/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
|
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
|
||||||
|
|
||||||
|
You would also need to tag the resources in your VPC accordingly for the aws provider to utilize them. Tag the subnets and all instances that kubernetes will be run on with key `kuberentes.io/cluster/$cluster_name` (`$cluster_name` must be a unique identifier for the cluster). Tag the subnets that must be targetted by external ELBs with the key `kubernetes.io/role/elb` and internal ELBs with the key `kubernetes.io/role/internal-elb`.
|
||||||
|
|
||||||
|
Make sure your VPC has both DNS Hostnames support and Private DNS enabled.
|
||||||
|
|
||||||
The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`.
|
The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`.
|
||||||
|
|
||||||
You can now create your cluster!
|
You can now create your cluster!
|
||||||
|
|
||||||
|
### Dynamic Inventory ###
|
||||||
|
There is also a dynamic inventory script for AWS that can be used if desired. However, be aware that it makes some certain assumptions about how you'll create your inventory. It also does not handle all use cases and groups that we may use as part of more advanced deployments. Additions welcome.
|
||||||
|
|
||||||
|
This will produce an inventory that is passed into Ansible that looks like the following:
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"_meta": {
|
||||||
|
"hostvars": {
|
||||||
|
"ip-172-31-3-xxx.us-east-2.compute.internal": {
|
||||||
|
"ansible_ssh_host": "172.31.3.xxx"
|
||||||
|
},
|
||||||
|
"ip-172-31-8-xxx.us-east-2.compute.internal": {
|
||||||
|
"ansible_ssh_host": "172.31.8.xxx"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"etcd": [
|
||||||
|
"ip-172-31-3-xxx.us-east-2.compute.internal"
|
||||||
|
],
|
||||||
|
"k8s-cluster": {
|
||||||
|
"children": [
|
||||||
|
"kube-master",
|
||||||
|
"kube-node"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"kube-master": [
|
||||||
|
"ip-172-31-3-xxx.us-east-2.compute.internal"
|
||||||
|
],
|
||||||
|
"kube-node": [
|
||||||
|
"ip-172-31-8-xxx.us-east-2.compute.internal"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Guide:
|
||||||
|
- Create instances in AWS as needed.
|
||||||
|
- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube-master`, `etcd`, or `kube-node`. You can also share roles like `kube-master, etcd`
|
||||||
|
- Copy the `kubespray-aws-inventory.py` script from `kubespray/contrib/aws_inventory` to the `kubespray/inventory` directory.
|
||||||
|
- Set the following AWS credentials and info as environment variables in your terminal:
|
||||||
|
```
|
||||||
|
export AWS_ACCESS_KEY_ID="xxxxx"
|
||||||
|
export AWS_SECRET_ACCESS_KEY="yyyyy"
|
||||||
|
export REGION="us-east-2"
|
||||||
|
```
|
||||||
|
- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kubespray-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml`
|
||||||
|
|||||||
@@ -96,7 +96,7 @@ You need to edit your inventory and add:
|
|||||||
* `cluster_id` by route reflector node/group (see details
|
* `cluster_id` by route reflector node/group (see details
|
||||||
[here](https://hub.docker.com/r/calico/routereflector/))
|
[here](https://hub.docker.com/r/calico/routereflector/))
|
||||||
|
|
||||||
Here's an example of Kargo inventory with route reflectors:
|
Here's an example of Kubespray inventory with route reflectors:
|
||||||
|
|
||||||
```
|
```
|
||||||
[all]
|
[all]
|
||||||
@@ -145,11 +145,11 @@ cluster_id="1.0.0.1"
|
|||||||
The inventory above will deploy the following topology assuming that calico's
|
The inventory above will deploy the following topology assuming that calico's
|
||||||
`global_as_num` is set to `65400`:
|
`global_as_num` is set to `65400`:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
##### Optional : Define default endpoint to host action
|
##### Optional : Define default endpoint to host action
|
||||||
|
|
||||||
By default Calico blocks traffic from endpoints to the host itself by using an iptables DROP action. When using it in kubernetes the action has to be changed to RETURN (default in kargo) or ACCEPT (see https://github.com/projectcalico/felix/issues/660 and https://github.com/projectcalico/calicoctl/issues/1389). Otherwise all network packets from pods (with hostNetwork=False) to services endpoints (with hostNetwork=True) withing the same node are dropped.
|
By default Calico blocks traffic from endpoints to the host itself by using an iptables DROP action. When using it in kubernetes the action has to be changed to RETURN (default in kubespray) or ACCEPT (see https://github.com/projectcalico/felix/issues/660 and https://github.com/projectcalico/calicoctl/issues/1389). Otherwise all network packets from pods (with hostNetwork=False) to services endpoints (with hostNetwork=True) withing the same node are dropped.
|
||||||
|
|
||||||
|
|
||||||
To re-define default action please set the following variable in your inventory:
|
To re-define default action please set the following variable in your inventory:
|
||||||
@@ -161,3 +161,11 @@ Cloud providers configuration
|
|||||||
=============================
|
=============================
|
||||||
|
|
||||||
Please refer to the official documentation, for example [GCE configuration](http://docs.projectcalico.org/v1.5/getting-started/docker/installation/gce) requires a security rule for calico ip-ip tunnels. Note, calico is always configured with ``ipip: true`` if the cloud provider was defined.
|
Please refer to the official documentation, for example [GCE configuration](http://docs.projectcalico.org/v1.5/getting-started/docker/installation/gce) requires a security rule for calico ip-ip tunnels. Note, calico is always configured with ``ipip: true`` if the cloud provider was defined.
|
||||||
|
|
||||||
|
##### Optional : Ignore kernel's RPF check setting
|
||||||
|
|
||||||
|
By default the felix agent(calico-node) will abort if the Kernel RPF setting is not 'strict'. If you want Calico to ignore the Kernel setting:
|
||||||
|
|
||||||
|
```
|
||||||
|
calico_node_ignorelooserpf: true
|
||||||
|
```
|
||||||
|
|||||||
@@ -3,17 +3,17 @@ Cloud providers
|
|||||||
|
|
||||||
#### Provisioning
|
#### Provisioning
|
||||||
|
|
||||||
You can use kargo-cli to start new instances on cloud providers
|
You can use kubespray-cli to start new instances on cloud providers
|
||||||
here's an example
|
here's an example
|
||||||
```
|
```
|
||||||
kargo [aws|gce] --nodes 2 --etcd 3 --cluster-name test-smana
|
kubespray [aws|gce] --nodes 2 --etcd 3 --cluster-name test-smana
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Deploy kubernetes
|
#### Deploy kubernetes
|
||||||
|
|
||||||
With kargo-cli
|
With kubespray-cli
|
||||||
```
|
```
|
||||||
kargo deploy [--aws|--gce] -u admin
|
kubespray deploy [--aws|--gce] -u admin
|
||||||
```
|
```
|
||||||
|
|
||||||
Or ansible-playbook command
|
Or ansible-playbook command
|
||||||
|
|||||||
@@ -1,25 +1,25 @@
|
|||||||
Kargo vs [Kops](https://github.com/kubernetes/kops)
|
Kubespray vs [Kops](https://github.com/kubernetes/kops)
|
||||||
---------------
|
---------------
|
||||||
|
|
||||||
Kargo runs on bare metal and most clouds, using Ansible as its substrate for
|
Kubespray runs on bare metal and most clouds, using Ansible as its substrate for
|
||||||
provisioning and orchestration. Kops performs the provisioning and orchestration
|
provisioning and orchestration. Kops performs the provisioning and orchestration
|
||||||
itself, and as such is less flexible in deployment platforms. For people with
|
itself, and as such is less flexible in deployment platforms. For people with
|
||||||
familiarity with Ansible, existing Ansible deployments or the desire to run a
|
familiarity with Ansible, existing Ansible deployments or the desire to run a
|
||||||
Kubernetes cluster across multiple platforms, Kargo is a good choice. Kops,
|
Kubernetes cluster across multiple platforms, Kubespray is a good choice. Kops,
|
||||||
however, is more tightly integrated with the unique features of the clouds it
|
however, is more tightly integrated with the unique features of the clouds it
|
||||||
supports so it could be a better choice if you know that you will only be using
|
supports so it could be a better choice if you know that you will only be using
|
||||||
one platform for the foreseeable future.
|
one platform for the foreseeable future.
|
||||||
|
|
||||||
Kargo vs [Kubeadm](https://github.com/kubernetes/kubeadm)
|
Kubespray vs [Kubeadm](https://github.com/kubernetes/kubeadm)
|
||||||
------------------
|
------------------
|
||||||
|
|
||||||
Kubeadm provides domain Knowledge of Kubernetes clusters' life cycle
|
Kubeadm provides domain Knowledge of Kubernetes clusters' life cycle
|
||||||
management, including self-hosted layouts, dynamic discovery services and so
|
management, including self-hosted layouts, dynamic discovery services and so
|
||||||
on. Had it belong to the new [operators world](https://coreos.com/blog/introducing-operators.html),
|
on. Had it belonged to the new [operators world](https://coreos.com/blog/introducing-operators.html),
|
||||||
it would've likely been named a "Kubernetes cluster operator". Kargo however,
|
it may have been named a "Kubernetes cluster operator". Kubespray however,
|
||||||
does generic configuration management tasks from the "OS operators" ansible
|
does generic configuration management tasks from the "OS operators" ansible
|
||||||
world, plus some initial K8s clustering (with networking plugins included) and
|
world, plus some initial K8s clustering (with networking plugins included) and
|
||||||
control plane bootstrapping. Kargo [strives](https://github.com/kubernetes-incubator/kargo/issues/553)
|
control plane bootstrapping. Kubespray [strives](https://github.com/kubernetes-incubator/kubespray/issues/553)
|
||||||
to adopt kubeadm as a tool in order to consume life cycle management domain
|
to adopt kubeadm as a tool in order to consume life cycle management domain
|
||||||
knowledge from it and offload generic OS configuration things from it, which
|
knowledge from it and offload generic OS configuration things from it, which
|
||||||
hopefully benefits both sides.
|
hopefully benefits both sides.
|
||||||
|
|||||||
@@ -1,16 +1,20 @@
|
|||||||
CoreOS bootstrap
|
CoreOS bootstrap
|
||||||
===============
|
===============
|
||||||
|
|
||||||
Example with **kargo-cli**:
|
Example with **kubespray-cli**:
|
||||||
|
|
||||||
```
|
```
|
||||||
kargo deploy --gce --coreos
|
kubespray deploy --gce --coreos
|
||||||
```
|
```
|
||||||
|
|
||||||
Or with Ansible:
|
Or with Ansible:
|
||||||
|
|
||||||
Before running the cluster playbook you must satisfy the following requirements:
|
Before running the cluster playbook you must satisfy the following requirements:
|
||||||
|
|
||||||
* On each CoreOS nodes a writable directory **/opt/bin** (~400M disk space)
|
General CoreOS Pre-Installation Notes:
|
||||||
|
- You should set the bootstrap_os variable to `coreos`
|
||||||
|
- Ensure that the bin_dir is set to `/opt/bin`
|
||||||
|
- ansible_python_interpreter should be `/opt/bin/python`. This will be laid down by the bootstrap task.
|
||||||
|
- The default resolvconf_mode setting of `docker_dns` **does not** work for CoreOS. This is because we do not edit the systemd service file for docker on CoreOS nodes. Instead, just use the `host_resolvconf` mode. It should work out of the box.
|
||||||
|
|
||||||
Then you can proceed to [cluster deployment](#run-deployment)
|
Then you can proceed to [cluster deployment](#run-deployment)
|
||||||
|
|||||||
38
docs/debian.md
Normal file
38
docs/debian.md
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
Debian Jessie
|
||||||
|
===============
|
||||||
|
|
||||||
|
Debian Jessie installation Notes:
|
||||||
|
|
||||||
|
- Add
|
||||||
|
|
||||||
|
```GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"```
|
||||||
|
|
||||||
|
to /etc/default/grub. Then update with
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo update-grub
|
||||||
|
sudo update-grub2
|
||||||
|
sudo reboot
|
||||||
|
```
|
||||||
|
|
||||||
|
- Add the [backports](https://backports.debian.org/Instructions/) which contain Systemd 2.30 and update Systemd.
|
||||||
|
|
||||||
|
```apt-get -t jessie-backports install systemd```
|
||||||
|
|
||||||
|
(Necessary because the default Systemd version (2.15) does not support the "Delegate" directive in service files)
|
||||||
|
|
||||||
|
- Add the Ansible repository and install Ansible to get a proper version
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo add-apt-repository ppa:ansible/ansible
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt.get install ansible
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
- Install Jinja2 and Python-Netaddr
|
||||||
|
|
||||||
|
```sudo apt-get install phyton-jinja2=2.8-1~bpo8+1 python-netaddr```
|
||||||
|
|
||||||
|
|
||||||
|
Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment)
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
K8s DNS stack by Kargo
|
K8s DNS stack by Kubespray
|
||||||
======================
|
======================
|
||||||
|
|
||||||
For K8s cluster nodes, kargo configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/)
|
For K8s cluster nodes, Kubespray configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/)
|
||||||
[cluster add-on](http://releases.k8s.io/master/cluster/addons/README.md)
|
[cluster add-on](http://releases.k8s.io/master/cluster/addons/README.md)
|
||||||
to serve as an authoritative DNS server for a given ``dns_domain`` and its
|
to serve as an authoritative DNS server for a given ``dns_domain`` and its
|
||||||
``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels).
|
``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels).
|
||||||
@@ -44,13 +44,13 @@ DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode``
|
|||||||
DNS servers in early cluster deployment when no cluster DNS is available yet. These are also added as upstream
|
DNS servers in early cluster deployment when no cluster DNS is available yet. These are also added as upstream
|
||||||
DNS servers used by ``dnsmasq`` (when deployed with ``dns_mode: dnsmasq_kubedns``).
|
DNS servers used by ``dnsmasq`` (when deployed with ``dns_mode: dnsmasq_kubedns``).
|
||||||
|
|
||||||
DNS modes supported by kargo
|
DNS modes supported by Kubespray
|
||||||
============================
|
============================
|
||||||
|
|
||||||
You can modify how kargo sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
|
You can modify how Kubespray sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
|
||||||
|
|
||||||
## dns_mode
|
## dns_mode
|
||||||
``dns_mode`` configures how kargo will setup cluster DNS. There are three modes available:
|
``dns_mode`` configures how Kubespray will setup cluster DNS. There are three modes available:
|
||||||
|
|
||||||
#### dnsmasq_kubedns (default)
|
#### dnsmasq_kubedns (default)
|
||||||
This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some
|
This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some
|
||||||
@@ -67,7 +67,7 @@ This does not install any of dnsmasq and kubedns/skydns. This basically disables
|
|||||||
leaves you with a non functional cluster.
|
leaves you with a non functional cluster.
|
||||||
|
|
||||||
## resolvconf_mode
|
## resolvconf_mode
|
||||||
``resolvconf_mode`` configures how kargo will setup DNS for ``hostNetwork: true`` PODs and non-k8s containers.
|
``resolvconf_mode`` configures how Kubespray will setup DNS for ``hostNetwork: true`` PODs and non-k8s containers.
|
||||||
There are three modes available:
|
There are three modes available:
|
||||||
|
|
||||||
#### docker_dns (default)
|
#### docker_dns (default)
|
||||||
@@ -100,7 +100,7 @@ used as a backup nameserver. After cluster DNS is running, all queries will be a
|
|||||||
servers, which in turn will forward queries to the system nameserver if required.
|
servers, which in turn will forward queries to the system nameserver if required.
|
||||||
|
|
||||||
#### host_resolvconf
|
#### host_resolvconf
|
||||||
This activates the classic kargo behaviour that modifies the hosts ``/etc/resolv.conf`` file and dhclient
|
This activates the classic Kubespray behaviour that modifies the hosts ``/etc/resolv.conf`` file and dhclient
|
||||||
configuration to point to the cluster dns server (either dnsmasq or kubedns, depending on dns_mode).
|
configuration to point to the cluster dns server (either dnsmasq or kubedns, depending on dns_mode).
|
||||||
|
|
||||||
As cluster DNS is not available on early deployment stage, this mode is split into 2 stages. In the first
|
As cluster DNS is not available on early deployment stage, this mode is split into 2 stages. In the first
|
||||||
@@ -120,7 +120,7 @@ cluster service names.
|
|||||||
Limitations
|
Limitations
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
* Kargo has yet ways to configure Kubedns addon to forward requests SkyDns can
|
* Kubespray has yet ways to configure Kubedns addon to forward requests SkyDns can
|
||||||
not answer with authority to arbitrary recursive resolvers. This task is left
|
not answer with authority to arbitrary recursive resolvers. This task is left
|
||||||
for future. See [official SkyDns docs](https://github.com/skynetservices/skydns)
|
for future. See [official SkyDns docs](https://github.com/skynetservices/skydns)
|
||||||
for details.
|
for details.
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
Downloading binaries and containers
|
Downloading binaries and containers
|
||||||
===================================
|
===================================
|
||||||
|
|
||||||
Kargo supports several download/upload modes. The default is:
|
Kubespray supports several download/upload modes. The default is:
|
||||||
|
|
||||||
* Each node downloads binaries and container images on its own, which is
|
* Each node downloads binaries and container images on its own, which is
|
||||||
``download_run_once: False``.
|
``download_run_once: False``.
|
||||||
|
|||||||
|
Before Width: | Height: | Size: 40 KiB After Width: | Height: | Size: 40 KiB |
@@ -23,13 +23,6 @@ ip a show dev flannel.1
|
|||||||
valid_lft forever preferred_lft forever
|
valid_lft forever preferred_lft forever
|
||||||
```
|
```
|
||||||
|
|
||||||
* Docker must be configured with a bridge ip in the flannel subnet.
|
|
||||||
|
|
||||||
```
|
|
||||||
ps aux | grep docker
|
|
||||||
root 20196 1.7 2.7 1260616 56840 ? Ssl 10:18 0:07 /usr/bin/docker daemon --bip=10.233.16.1/24 --mtu=1450
|
|
||||||
```
|
|
||||||
|
|
||||||
* Try to run a container and check its ip address
|
* Try to run a container and check its ip address
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -1,21 +1,21 @@
|
|||||||
Getting started
|
Getting started
|
||||||
===============
|
===============
|
||||||
|
|
||||||
The easiest way to run the deployement is to use the **kargo-cli** tool.
|
The easiest way to run the deployement is to use the **kubespray-cli** tool.
|
||||||
A complete documentation can be found in its [github repository](https://github.com/kubespray/kargo-cli).
|
A complete documentation can be found in its [github repository](https://github.com/kubespray/kubespray-cli).
|
||||||
|
|
||||||
Here is a simple example on AWS:
|
Here is a simple example on AWS:
|
||||||
|
|
||||||
* Create instances and generate the inventory
|
* Create instances and generate the inventory
|
||||||
|
|
||||||
```
|
```
|
||||||
kargo aws --instances 3
|
kubespray aws --instances 3
|
||||||
```
|
```
|
||||||
|
|
||||||
* Run the deployment
|
* Run the deployment
|
||||||
|
|
||||||
```
|
```
|
||||||
kargo deploy --aws -u centos -n calico
|
kubespray deploy --aws -u centos -n calico
|
||||||
```
|
```
|
||||||
|
|
||||||
Building your own inventory
|
Building your own inventory
|
||||||
@@ -23,22 +23,22 @@ Building your own inventory
|
|||||||
|
|
||||||
Ansible inventory can be stored in 3 formats: YAML, JSON, or INI-like. There is
|
Ansible inventory can be stored in 3 formats: YAML, JSON, or INI-like. There is
|
||||||
an example inventory located
|
an example inventory located
|
||||||
[here](https://github.com/kubernetes-incubator/kargo/blob/master/inventory/inventory.example).
|
[here](https://github.com/kubernetes-incubator/kubespray/blob/master/inventory/inventory.example).
|
||||||
|
|
||||||
You can use an
|
You can use an
|
||||||
[inventory generator](https://github.com/kubernetes-incubator/kargo/blob/master/contrib/inventory_builder/inventory.py)
|
[inventory generator](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py)
|
||||||
to create or modify an Ansible inventory. Currently, it is limited in
|
to create or modify an Ansible inventory. Currently, it is limited in
|
||||||
functionality and is only use for making a basic Kargo cluster, but it does
|
functionality and is only used for configuring a basic Kubespray cluster inventory, but it does
|
||||||
support creating large clusters. It now supports
|
support creating inventory file for large clusters as well. It now supports
|
||||||
separated ETCD and Kubernetes master roles from node role if the size exceeds a
|
separated ETCD and Kubernetes master roles from node role if the size exceeds a
|
||||||
certain threshold. Run inventory.py help for more information.
|
certain threshold. Run `python3 contrib/inventory_builder/inventory.py help` help for more information.
|
||||||
|
|
||||||
Example inventory generator usage:
|
Example inventory generator usage:
|
||||||
|
|
||||||
```
|
```
|
||||||
cp -r inventory my_inventory
|
cp -r inventory my_inventory
|
||||||
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
||||||
CONFIG_FILE=my_inventory/inventory.cfg python3 contrib/inventory_builder/inventory.py ${IPS}
|
CONFIG_FILE=my_inventory/inventory.cfg python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
||||||
```
|
```
|
||||||
|
|
||||||
Starting custom deployment
|
Starting custom deployment
|
||||||
@@ -55,3 +55,67 @@ ansible-playbook -i my_inventory/inventory.cfg cluster.yml -b -v \
|
|||||||
```
|
```
|
||||||
|
|
||||||
See more details in the [ansible guide](ansible.md).
|
See more details in the [ansible guide](ansible.md).
|
||||||
|
|
||||||
|
Adding nodes
|
||||||
|
------------
|
||||||
|
|
||||||
|
You may want to add **worker** nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters.
|
||||||
|
|
||||||
|
- Add the new worker node to your inventory under kube-node (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
||||||
|
- Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`:
|
||||||
|
```
|
||||||
|
ansible-playbook -i my_inventory/inventory.cfg scale.yml -b -v \
|
||||||
|
--private-key=~/.ssh/private_key
|
||||||
|
```
|
||||||
|
|
||||||
|
Connecting to Kubernetes
|
||||||
|
------------------------
|
||||||
|
By default, Kubespray configures kube-master hosts with insecure access to
|
||||||
|
kube-apiserver via port 8080. A kubeconfig file is not necessary in this case,
|
||||||
|
because kubectl will use http://localhost:8080 to connect. The kubeconfig files
|
||||||
|
generated will point to localhost (on kube-masters) and kube-node hosts will
|
||||||
|
connect either to a localhost nginx proxy or to a loadbalancer if configured.
|
||||||
|
More details on this process are in the [HA guide](ha.md).
|
||||||
|
|
||||||
|
Kubespray permits connecting to the cluster remotely on any IP of any
|
||||||
|
kube-master host on port 6443 by default. However, this requires
|
||||||
|
authentication. One could generate a kubeconfig based on one installed
|
||||||
|
kube-master hosts (needs improvement) or connect with a username and password.
|
||||||
|
By default, a user with admin rights is created, named `kube`.
|
||||||
|
The password can be viewed after deployment by looking at the file
|
||||||
|
`PATH_TO_KUBESPRAY/credentials/kube_user`. This contains a randomly generated
|
||||||
|
password. If you wish to set your own password, just precreate/modify this
|
||||||
|
file yourself.
|
||||||
|
|
||||||
|
For more information on kubeconfig and accessing a Kubernetes cluster, refer to
|
||||||
|
the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/).
|
||||||
|
|
||||||
|
Accessing Kubernetes Dashboard
|
||||||
|
------------------------------
|
||||||
|
|
||||||
|
If the variable `dashboard_enabled` is set (default is true) as well as
|
||||||
|
kube_basic_auth (default is false), then you can
|
||||||
|
access the Kubernetes Dashboard at the following URL:
|
||||||
|
|
||||||
|
https://kube:_kube-password_@_host_:6443/ui/
|
||||||
|
|
||||||
|
To see the password, refer to the section above, titled *Connecting to
|
||||||
|
Kubernetes*. The host can be any kube-master or kube-node or loadbalancer
|
||||||
|
(when enabled).
|
||||||
|
|
||||||
|
To access the Dashboard with basic auth disabled, follow the instructions here:
|
||||||
|
https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/#command-line-proxy
|
||||||
|
|
||||||
|
Accessing Kubernetes API
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
The main client of Kubernetes is `kubectl`. It is installed on each kube-master
|
||||||
|
host and can optionally be configured on your ansible host by setting
|
||||||
|
`kubeconfig_localhost: true` in the configuration. If enabled, kubectl and
|
||||||
|
admin.conf will appear in the artifacts/ directory after deployment. You can
|
||||||
|
see a list of nodes by running the following commands:
|
||||||
|
|
||||||
|
cd artifacts/
|
||||||
|
./kubectl --kubeconfig admin.conf get nodes
|
||||||
|
|
||||||
|
If desired, copy kubectl to your bin dir and admin.conf to ~/.kube/config.
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ Etcd
|
|||||||
----
|
----
|
||||||
|
|
||||||
The `etcd_access_endpoint` fact provides an access pattern for clients. And the
|
The `etcd_access_endpoint` fact provides an access pattern for clients. And the
|
||||||
`etcd_multiaccess` (defaults to `True`) group var controlls that behavior.
|
`etcd_multiaccess` (defaults to `True`) group var controls that behavior.
|
||||||
It makes deployed components to access the etcd cluster members
|
It makes deployed components to access the etcd cluster members
|
||||||
directly: `http://ip1:2379, http://ip2:2379,...`. This mode assumes the clients
|
directly: `http://ip1:2379, http://ip2:2379,...`. This mode assumes the clients
|
||||||
do a loadbalancing and handle HA for connections.
|
do a loadbalancing and handle HA for connections.
|
||||||
@@ -22,20 +22,20 @@ Kube-apiserver
|
|||||||
--------------
|
--------------
|
||||||
|
|
||||||
K8s components require a loadbalancer to access the apiservers via a reverse
|
K8s components require a loadbalancer to access the apiservers via a reverse
|
||||||
proxy. Kargo includes support for an nginx-based proxy that resides on each
|
proxy. Kubespray includes support for an nginx-based proxy that resides on each
|
||||||
non-master Kubernetes node. This is referred to as localhost loadbalancing. It
|
non-master Kubernetes node. This is referred to as localhost loadbalancing. It
|
||||||
is less efficient than a dedicated load balancer because it creates extra
|
is less efficient than a dedicated load balancer because it creates extra
|
||||||
health checks on the Kubernetes apiserver, but is more practical for scenarios
|
health checks on the Kubernetes apiserver, but is more practical for scenarios
|
||||||
where an external LB or virtual IP management is inconvenient. This option is
|
where an external LB or virtual IP management is inconvenient. This option is
|
||||||
configured by the variable `loadbalancer_apiserver_localhost` (defaults to `True`).
|
configured by the variable `loadbalancer_apiserver_localhost` (defaults to `True`).
|
||||||
You may also define the port the local internal loadbalancer users by changing,
|
You may also define the port the local internal loadbalancer uses by changing,
|
||||||
`nginx_kube_apiserver_port`. This defaults to the value of `kube_apiserver_port`.
|
`nginx_kube_apiserver_port`. This defaults to the value of `kube_apiserver_port`.
|
||||||
It is also import to note that Kargo will only configure kubelet and kube-proxy
|
It is also important to note that Kubespray will only configure kubelet and kube-proxy
|
||||||
on non-master nodes to use the local internal loadbalancer.
|
on non-master nodes to use the local internal loadbalancer.
|
||||||
|
|
||||||
If you choose to NOT use the local internal loadbalancer, you will need to configure
|
If you choose to NOT use the local internal loadbalancer, you will need to configure
|
||||||
your own loadbalancer to achieve HA. Note that deploying a loadbalancer is up to
|
your own loadbalancer to achieve HA. Note that deploying a loadbalancer is up to
|
||||||
a user and is not covered by ansible roles in Kargo. By default, it only configures
|
a user and is not covered by ansible roles in Kubespray. By default, it only configures
|
||||||
a non-HA endpoint, which points to the `access_ip` or IP address of the first server
|
a non-HA endpoint, which points to the `access_ip` or IP address of the first server
|
||||||
node in the `kube-master` group. It can also configure clients to use endpoints
|
node in the `kube-master` group. It can also configure clients to use endpoints
|
||||||
for a given loadbalancer type. The following diagram shows how traffic to the
|
for a given loadbalancer type. The following diagram shows how traffic to the
|
||||||
|
|||||||
121
docs/integration.md
Normal file
121
docs/integration.md
Normal file
@@ -0,0 +1,121 @@
|
|||||||
|
# Kubespray (kargo) in own ansible playbooks repo
|
||||||
|
|
||||||
|
1. Fork [kubespray repo](https://github.com/kubernetes-incubator/kubespray) to your personal/organisation account on github.
|
||||||
|
Note:
|
||||||
|
* All forked public repos at github will be also public, so **never commit sensitive data to your public forks**.
|
||||||
|
* List of all forked repos could be retrieved from github page of original project.
|
||||||
|
|
||||||
|
2. Add **forked repo** as submodule to desired folder in your existent ansible repo(for example 3d/kubespray):
|
||||||
|
```git submodule add https://github.com/YOUR_GITHUB/kubespray.git kubespray```
|
||||||
|
Git will create _.gitmodules_ file in your existent ansible repo:
|
||||||
|
```
|
||||||
|
[submodule "3d/kubespray"]
|
||||||
|
path = 3d/kubespray
|
||||||
|
url = https://github.com/YOUR_GITHUB/kubespray.git
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Configure git to show submodule status:
|
||||||
|
```git config --global status.submoduleSummary true```
|
||||||
|
|
||||||
|
4. Add *original* kubespray repo as upstream:
|
||||||
|
```git remote add upstream https://github.com/kubernetes-incubator/kubespray.git```
|
||||||
|
|
||||||
|
5. Sync your master branch with upstream:
|
||||||
|
```
|
||||||
|
git checkout master
|
||||||
|
git fetch upstream
|
||||||
|
git merge upstream/master
|
||||||
|
git push origin master
|
||||||
|
```
|
||||||
|
|
||||||
|
6. Create a new branch which you will use in your working environment:
|
||||||
|
```git checkout -b work```
|
||||||
|
***Never*** use master branch of your repository for your commits.
|
||||||
|
|
||||||
|
7. Modify path to library and roles in your ansible.cfg file (role naming should be uniq, you may have to rename your existent roles if they have same names as kubespray project):
|
||||||
|
```
|
||||||
|
...
|
||||||
|
library = 3d/kubespray/library/
|
||||||
|
roles_path = 3d/kubespray/roles/
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
8. Copy and modify configs from kubespray `group_vars` folder to corresponging `group_vars` folder in your existent project.
|
||||||
|
You could rename *all.yml* config to something else, i.e. *kubespray.yml* and create corresponding group in your inventory file, which will include all hosts groups related to kubernetes setup.
|
||||||
|
|
||||||
|
9. Modify your ansible inventory file by adding mapping of your existent groups (if any) to kubespray naming.
|
||||||
|
For example:
|
||||||
|
```
|
||||||
|
...
|
||||||
|
#Kargo groups:
|
||||||
|
[kube-node:children]
|
||||||
|
kubenode
|
||||||
|
|
||||||
|
[k8s-cluster:children]
|
||||||
|
kubernetes
|
||||||
|
|
||||||
|
[etcd:children]
|
||||||
|
kubemaster
|
||||||
|
kubemaster-ha
|
||||||
|
|
||||||
|
[kube-master:children]
|
||||||
|
kubemaster
|
||||||
|
kubemaster-ha
|
||||||
|
|
||||||
|
[vault:children]
|
||||||
|
kube-master
|
||||||
|
|
||||||
|
[kubespray:children]
|
||||||
|
kubernetes
|
||||||
|
```
|
||||||
|
* Last entry here needed to apply kubespray.yml config file, renamed from all.yml of kubespray project.
|
||||||
|
|
||||||
|
10. Now you can include kargo tasks in you existent playbooks by including cluster.yml file:
|
||||||
|
```
|
||||||
|
- name: Include kargo tasks
|
||||||
|
include: 3d/kubespray/cluster.yml
|
||||||
|
```
|
||||||
|
Or your could copy separate tasks from cluster.yml into your ansible repository.
|
||||||
|
|
||||||
|
11. Commit changes to your ansible repo. Keep in mind, that submodule folder is just a link to the git commit hash of your forked repo.
|
||||||
|
When you update your "work" branch you need to commit changes to ansible repo as well.
|
||||||
|
Other members of your team should use ```git submodule sync```, ```git submodule update --init``` to get actual code from submodule.
|
||||||
|
|
||||||
|
# Contributing
|
||||||
|
If you made useful changes or fixed a bug in existent kubespray repo, use this flow for PRs to original kubespray repo.
|
||||||
|
|
||||||
|
0. Sign the [CNCF CLA](https://github.com/kubernetes/kubernetes/wiki/CLA-FAQ).
|
||||||
|
|
||||||
|
1. Change working directory to git submodule directory (3d/kubespray).
|
||||||
|
|
||||||
|
2. Setup desired user.name and user.email for submodule.
|
||||||
|
If kubespray is only one submodule in your repo you could use something like:
|
||||||
|
```git submodule foreach --recursive 'git config user.name "First Last" && git config user.email "your-email-addres@used.for.cncf"'```
|
||||||
|
|
||||||
|
3. Sync with upstream master:
|
||||||
|
```
|
||||||
|
git fetch upstream
|
||||||
|
git merge upstream/master
|
||||||
|
git push origin master
|
||||||
|
```
|
||||||
|
4. Create new branch for the specific fixes that you want to contribute:
|
||||||
|
```git checkout -b fixes-name-date-index```
|
||||||
|
Branch name should be self explaining to you, adding date and/or index will help you to track/delete your old PRs.
|
||||||
|
|
||||||
|
5. Find git hash of your commit in "work" repo and apply it to newly created "fix" repo:
|
||||||
|
```
|
||||||
|
git cherry-pick <COMMIT_HASH>
|
||||||
|
```
|
||||||
|
6. If your have several temporary-stage commits - squash them using [```git rebase -i```](http://eli.thegreenplace.net/2014/02/19/squashing-github-pull-requests-into-a-single-commit)
|
||||||
|
Also you could use interactive rebase (```git rebase -i HEAD~10```) to delete commits which you don't want to contribute into original repo.
|
||||||
|
|
||||||
|
7. When your changes is in place, you need to check upstream repo one more time because it could be changed during your work.
|
||||||
|
Check that you're on correct branch:
|
||||||
|
```git status```
|
||||||
|
And pull changes from upstream (if any):
|
||||||
|
```git pull --rebase upstream master```
|
||||||
|
|
||||||
|
8. Now push your changes to your **fork** repo with ```git push```. If your branch doesn't exists on github, git will propose you to use something like ```git push --set-upstream origin fixes-name-date-index```.
|
||||||
|
|
||||||
|
9. Open you forked repo in browser, on the main page you will see proposition to create pull request for your newly created branch. Check proposed diff of your PR. If something is wrong you could safely delete "fix" branch on github using ```git push origin --delete fixes-name-date-index```, ```git branch -D fixes-name-date-index``` and start whole process from the beginning.
|
||||||
|
If everything is fine - add description about your changes (what they do and why they're needed) and confirm pull request creation.
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
Network Checker Application
|
Network Checker Application
|
||||||
===========================
|
===========================
|
||||||
|
|
||||||
With the ``deploy_netchecker`` var enabled (defaults to false), Kargo deploys a
|
With the ``deploy_netchecker`` var enabled (defaults to false), Kubespray deploys a
|
||||||
Network Checker Application from the 3rd side `l23network/k8s-netchecker` docker
|
Network Checker Application from the 3rd side `l23network/k8s-netchecker` docker
|
||||||
images. It consists of the server and agents trying to reach the server by usual
|
images. It consists of the server and agents trying to reach the server by usual
|
||||||
for Kubernetes applications network connectivity meanings. Therefore, this
|
for Kubernetes applications network connectivity meanings. Therefore, this
|
||||||
@@ -17,7 +17,7 @@ any of the cluster nodes:
|
|||||||
```
|
```
|
||||||
curl http://localhost:31081/api/v1/connectivity_check
|
curl http://localhost:31081/api/v1/connectivity_check
|
||||||
```
|
```
|
||||||
Note that Kargo does not invoke the check but only deploys the application, if
|
Note that Kubespray does not invoke the check but only deploys the application, if
|
||||||
requested.
|
requested.
|
||||||
|
|
||||||
There are related application specifc variables:
|
There are related application specifc variables:
|
||||||
|
|||||||
@@ -35,14 +35,12 @@ Then you can use the instance ids to find the connected [neutron](https://wiki.o
|
|||||||
| 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 |
|
| 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 |
|
||||||
| e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 |
|
| e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 |
|
||||||
|
|
||||||
Given the port ids on the left, you can set the `allowed_address_pairs` in neutron:
|
Given the port ids on the left, you can set the `allowed_address_pairs` in neutron.
|
||||||
|
Note that you have to allow both of `kube_service_addresses` (default `10.233.0.0/18`)
|
||||||
|
and `kube_pods_subnet` (default `10.233.64.0/18`.)
|
||||||
|
|
||||||
# allow kube_service_addresses network
|
# allow kube_service_addresses and kube_pods_subnet network
|
||||||
neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18
|
neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18 ip_address=10.233.64.0/18
|
||||||
neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18
|
neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18 ip_address=10.233.64.0/18
|
||||||
|
|
||||||
# allow kube_pods_subnet network
|
|
||||||
neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.64.0/18
|
|
||||||
neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.64.0/18
|
|
||||||
|
|
||||||
Now you can finally run the playbook.
|
Now you can finally run the playbook.
|
||||||
|
|||||||
@@ -1,71 +1,47 @@
|
|||||||
Kargo's roadmap
|
Kubespray's roadmap
|
||||||
=================
|
=================
|
||||||
|
|
||||||
### Kubeadm
|
### Kubeadm
|
||||||
- Propose kubeadm as an option in order to setup the kubernetes cluster.
|
- Switch to kubeadm deployment as the default method after some bugs are fixed:
|
||||||
That would probably improve deployment speed and certs management [#553](https://github.com/kubespray/kargo/issues/553)
|
* Support for basic auth
|
||||||
|
* cloudprovider cloud-config mount [#484](https://github.com/kubernetes/kubeadm/issues/484)
|
||||||
|
|
||||||
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kargo/issues/320)
|
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kubespray/issues/320)
|
||||||
- the playbook would install and configure docker/rkt and the etcd cluster
|
- the playbook would install and configure docker/rkt and the etcd cluster
|
||||||
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
|
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
|
||||||
- a "kubespray" container would be deployed (kargo-cli, ansible-playbook, kpm)
|
- a "kubespray" container would be deployed (kubespray-cli, ansible-playbook, kpm)
|
||||||
- to be discussed, a way to provide the inventory
|
- to be discussed, a way to provide the inventory
|
||||||
- **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kargo/issues/321)
|
- **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kubespray/issues/321)
|
||||||
|
|
||||||
### Provisionning and cloud providers
|
### Provisioning and cloud providers
|
||||||
- [ ] Terraform to provision instances on **GCE, AWS, Openstack, Digital Ocean, Azure**
|
- [ ] Terraform to provision instances on **GCE, AWS, Openstack, Digital Ocean, Azure**
|
||||||
- [ ] On AWS autoscaling, multi AZ
|
- [ ] On AWS autoscaling, multi AZ
|
||||||
- [ ] On Azure autoscaling, create loadbalancer [#297](https://github.com/kubespray/kargo/issues/297)
|
- [ ] On Azure autoscaling, create loadbalancer [#297](https://github.com/kubespray/kubespray/issues/297)
|
||||||
- [ ] On GCE be able to create a loadbalancer automatically (IAM ?) [#280](https://github.com/kubespray/kargo/issues/280)
|
- [ ] On GCE be able to create a loadbalancer automatically (IAM ?) [#280](https://github.com/kubespray/kubespray/issues/280)
|
||||||
- [x] **TLS boostrap** support for kubelet [#234](https://github.com/kubespray/kargo/issues/234)
|
- [x] **TLS boostrap** support for kubelet (covered by kubeadm, but not in standard deployment) [#234](https://github.com/kubespray/kubespray/issues/234)
|
||||||
(related issues: https://github.com/kubernetes/kubernetes/pull/20439 <br>
|
(related issues: https://github.com/kubernetes/kubernetes/pull/20439 <br>
|
||||||
https://github.com/kubernetes/kubernetes/issues/18112)
|
https://github.com/kubernetes/kubernetes/issues/18112)
|
||||||
|
|
||||||
### Tests
|
### Tests
|
||||||
- [x] Run kubernetes e2e tests
|
- [ ] Run kubernetes e2e tests
|
||||||
- [x] migrate to jenkins
|
- [ ] Test idempotency on on single OS but for all network plugins/container engines
|
||||||
(a test is currently a deployment on a 3 node cluste, testing k8s api, ping between 2 pods)
|
|
||||||
- [x] Full tests on GCE per day (All OS's, all network plugins)
|
|
||||||
- [x] trigger a single test per pull request
|
|
||||||
- [ ] ~~single test with the Ansible version n-1 per day~~
|
|
||||||
- [x] Test idempotency on on single OS but for all network plugins/container engines
|
|
||||||
- [ ] single test on AWS per day
|
- [ ] single test on AWS per day
|
||||||
- [x] test different achitectures :
|
|
||||||
- 3 instances, 3 are members of the etcd cluster, 2 of them acting as master and node, 1 as node
|
|
||||||
- 5 instances, 3 are etcd and nodes, 2 are masters only
|
|
||||||
- 7 instances, 3 etcd only, 2 masters, 2 nodes
|
|
||||||
- [ ] test scale up cluster: +1 etcd, +1 master, +1 node
|
- [ ] test scale up cluster: +1 etcd, +1 master, +1 node
|
||||||
|
- [ ] Reorganize CI test vars into group var files
|
||||||
|
|
||||||
### Lifecycle
|
### Lifecycle
|
||||||
- [ ] Adopt the kubeadm tool by delegating CM tasks it is capable to accomplish well [#553](https://github.com/kubespray/kargo/issues/553)
|
|
||||||
- [x] Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kargo/issues/154)
|
|
||||||
- [ ] Drain worker node when shutting down/deleting an instance
|
|
||||||
- [ ] Upgrade granularity: select components to upgrade and skip others
|
- [ ] Upgrade granularity: select components to upgrade and skip others
|
||||||
|
|
||||||
### Networking
|
### Networking
|
||||||
- [ ] romana.io support [#160](https://github.com/kubespray/kargo/issues/160)
|
|
||||||
- [ ] Configure network policy for Calico. [#159](https://github.com/kubespray/kargo/issues/159)
|
|
||||||
- [ ] Opencontrail
|
- [ ] Opencontrail
|
||||||
- [x] Canal
|
- [ ] Consolidate network_plugins and kubernetes-apps/network_plugins
|
||||||
- [x] Cloud Provider native networking (instead of our network plugins)
|
|
||||||
|
|
||||||
### High availability
|
### Kubespray API
|
||||||
- (to be discussed) option to set a loadbalancer for the apiservers like ucarp/packemaker/keepalived
|
|
||||||
While waiting for the issue [kubernetes/kubernetes#18174](https://github.com/kubernetes/kubernetes/issues/18174) to be fixed.
|
|
||||||
|
|
||||||
### Kargo-cli
|
|
||||||
- Delete instances
|
|
||||||
- `kargo vagrant` to setup a test cluster locally
|
|
||||||
- `kargo azure` for Microsoft Azure support
|
|
||||||
- switch to Terraform instead of Ansible for provisionning
|
|
||||||
- update $HOME/.kube/config when a cluster is deployed. Optionally switch to this context
|
|
||||||
|
|
||||||
### Kargo API
|
|
||||||
- Perform all actions through an **API**
|
- Perform all actions through an **API**
|
||||||
- Store inventories / configurations of mulltiple clusters
|
- Store inventories / configurations of mulltiple clusters
|
||||||
- make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
|
- make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
|
||||||
|
|
||||||
### Addons (with kpm)
|
### Addons (helm or native ansible)
|
||||||
Include optionals deployments to init the cluster:
|
Include optionals deployments to init the cluster:
|
||||||
##### Monitoring
|
##### Monitoring
|
||||||
- Heapster / Grafana ....
|
- Heapster / Grafana ....
|
||||||
@@ -85,10 +61,10 @@ Include optionals deployments to init the cluster:
|
|||||||
- Deis Workflow
|
- Deis Workflow
|
||||||
|
|
||||||
### Others
|
### Others
|
||||||
- remove nodes (adding is already supported)
|
- remove nodes (adding is already supported)
|
||||||
- being able to choose any k8s version (almost done)
|
- Organize and update documentation (split in categories)
|
||||||
- **rkt** support [#59](https://github.com/kubespray/kargo/issues/59)
|
- Refactor downloads so it all runs in the beginning of deployment
|
||||||
- Review documentation (split in categories)
|
- Make bootstrapping OS more consistent
|
||||||
- **consul** -> if officialy supported by k8s
|
- **consul** -> if officialy supported by k8s
|
||||||
- flex volumes options (e.g. **torrus** support) [#312](https://github.com/kubespray/kargo/issues/312)
|
- flex volumes options (e.g. **torrus** support) [#312](https://github.com/kubespray/kubespray/issues/312)
|
||||||
- Clusters federation option (aka **ubernetes**) [#329](https://github.com/kubespray/kargo/issues/329)
|
- Clusters federation option (aka **ubernetes**) [#329](https://github.com/kubespray/kubespray/issues/329)
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
Upgrading Kubernetes in Kargo
|
Upgrading Kubernetes in Kubespray
|
||||||
=============================
|
=============================
|
||||||
|
|
||||||
#### Description
|
#### Description
|
||||||
|
|
||||||
Kargo handles upgrades the same way it handles initial deployment. That is to
|
Kubespray handles upgrades the same way it handles initial deployment. That is to
|
||||||
say that each component is laid down in a fixed order. You should be able to
|
say that each component is laid down in a fixed order. You should be able to
|
||||||
upgrade from Kargo tag 2.0 up to the current master without difficulty. You can
|
upgrade from Kubespray tag 2.0 up to the current master without difficulty. You can
|
||||||
also individually control versions of components by explicitly defining their
|
also individually control versions of components by explicitly defining their
|
||||||
versions. Here are all version vars for each component:
|
versions. Here are all version vars for each component:
|
||||||
|
|
||||||
@@ -35,7 +35,7 @@ ansible-playbook cluster.yml -i inventory/inventory.cfg -e kube_version=v1.4.6
|
|||||||
|
|
||||||
#### Graceful upgrade
|
#### Graceful upgrade
|
||||||
|
|
||||||
Kargo also supports cordon, drain and uncordoning of nodes when performing
|
Kubespray also supports cordon, drain and uncordoning of nodes when performing
|
||||||
a cluster upgrade. There is a separate playbook used for this purpose. It is
|
a cluster upgrade. There is a separate playbook used for this purpose. It is
|
||||||
important to note that upgrade-cluster.yml can only be used for upgrading an
|
important to note that upgrade-cluster.yml can only be used for upgrading an
|
||||||
existing cluster. That means there must be at least 1 kube-master already
|
existing cluster. That means there must be at least 1 kube-master already
|
||||||
@@ -44,7 +44,15 @@ deployed.
|
|||||||
```
|
```
|
||||||
git fetch origin
|
git fetch origin
|
||||||
git checkout origin/master
|
git checkout origin/master
|
||||||
ansible-playbook upgrade-cluster.yml -b -i inventory/inventory.cfg
|
ansible-playbook upgrade-cluster.yml -b -i inventory/inventory.cfg -e kube_version=v1.6.0
|
||||||
|
```
|
||||||
|
|
||||||
|
After a successul upgrade, the Server Version should be updated:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ kubectl version
|
||||||
|
Client Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.0", GitCommit:"fff5156092b56e6bd60fff75aad4dc9de6b6ef37", GitTreeState:"clean", BuildDate:"2017-03-28T19:15:41Z", GoVersion:"go1.8", Compiler:"gc", Platform:"darwin/amd64"}
|
||||||
|
Server Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.0+coreos.0", GitCommit:"8031716957d697332f9234ddf85febb07ac6c3e3", GitTreeState:"clean", BuildDate:"2017-03-29T04:33:09Z", GoVersion:"go1.7.5", Compiler:"gc", Platform:"linux/amd64"}
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Upgrade order
|
#### Upgrade order
|
||||||
@@ -59,3 +67,17 @@ follows:
|
|||||||
* network_plugin (such as Calico or Weave)
|
* network_plugin (such as Calico or Weave)
|
||||||
* kube-apiserver, kube-scheduler, and kube-controller-manager
|
* kube-apiserver, kube-scheduler, and kube-controller-manager
|
||||||
* Add-ons (such as KubeDNS)
|
* Add-ons (such as KubeDNS)
|
||||||
|
|
||||||
|
#### Upgrade considerations
|
||||||
|
|
||||||
|
Kubespray supports rotating certificates used for etcd and Kubernetes
|
||||||
|
components, but some manual steps may be required. If you have a pod that
|
||||||
|
requires use of a service token and is deployed in a namespace other than
|
||||||
|
`kube-system`, you will need to manually delete the affected pods after
|
||||||
|
rotating certificates. This is because all service account tokens are dependent
|
||||||
|
on the apiserver token that is used to generate them. When the certificate
|
||||||
|
rotates, all service account tokens must be rotated as well. During the
|
||||||
|
kubernetes-apps/rotate_tokens role, only pods in kube-system are destroyed and
|
||||||
|
recreated. All other invalidated service account tokens are cleaned up
|
||||||
|
automatically, but other pods are not deleted out of an abundance of caution
|
||||||
|
for impact to user deployed pods.
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
Vagrant Install
|
Vagrant Install
|
||||||
=================
|
=================
|
||||||
|
|
||||||
Assuming you have Vagrant (1.8+) installed with virtualbox (it may work
|
Assuming you have Vagrant (1.9+) installed with virtualbox (it may work
|
||||||
with vmware, but is untested) you should be able to launch a 3 node
|
with vmware, but is untested) you should be able to launch a 3 node
|
||||||
Kubernetes cluster by simply running `$ vagrant up`.<br />
|
Kubernetes cluster by simply running `$ vagrant up`.<br />
|
||||||
|
|
||||||
@@ -39,3 +39,31 @@ k8s-01 Ready 45s
|
|||||||
k8s-02 Ready 45s
|
k8s-02 Ready 45s
|
||||||
k8s-03 Ready 45s
|
k8s-03 Ready 45s
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Customize Vagrant
|
||||||
|
=================
|
||||||
|
|
||||||
|
You can override the default settings in the `Vagrantfile` either by directly modifying the `Vagrantfile`
|
||||||
|
or through an override file.
|
||||||
|
|
||||||
|
In the same directory as the `Vagrantfile`, create a folder called `vagrant` and create `config.rb` file in it.
|
||||||
|
|
||||||
|
You're able to override the variables defined in `Vagrantfile` by providing the value in the `vagrant/config.rb` file,
|
||||||
|
e.g.:
|
||||||
|
|
||||||
|
echo '$forwarded_ports = {8001 => 8001}' >> vagrant/config.rb
|
||||||
|
|
||||||
|
and after `vagrant up` or `vagrant reload`, your host will have port forwarding setup with the guest on port 8001.
|
||||||
|
|
||||||
|
Use alternative OS for Vagrant
|
||||||
|
==============================
|
||||||
|
|
||||||
|
By default, Vagrant uses Ubuntu 16.04 box to provision a local cluster. You may use an alternative supported
|
||||||
|
operating system for your local cluster.
|
||||||
|
|
||||||
|
Customize `$os` variable in `Vagrantfile` or as override, e.g.,:
|
||||||
|
|
||||||
|
echo '$os = "coreos-stable"' >> vagrant/config.rb
|
||||||
|
|
||||||
|
|
||||||
|
The supported operating systems for vagrant are defined in the `SUPPORTED_OS` constant in the `Vagrantfile`.
|
||||||
|
|||||||
59
docs/vars.md
59
docs/vars.md
@@ -1,4 +1,4 @@
|
|||||||
Configurable Parameters in Kargo
|
Configurable Parameters in Kubespray
|
||||||
================================
|
================================
|
||||||
|
|
||||||
#### Generic Ansible variables
|
#### Generic Ansible variables
|
||||||
@@ -12,7 +12,7 @@ Some variables of note include:
|
|||||||
* *ansible_default_ipv4.address*: IP address Ansible automatically chooses.
|
* *ansible_default_ipv4.address*: IP address Ansible automatically chooses.
|
||||||
Generated based on the output from the command ``ip -4 route get 8.8.8.8``
|
Generated based on the output from the command ``ip -4 route get 8.8.8.8``
|
||||||
|
|
||||||
#### Common vars that are used in Kargo
|
#### Common vars that are used in Kubespray
|
||||||
|
|
||||||
* *calico_version* - Specify version of Calico to use
|
* *calico_version* - Specify version of Calico to use
|
||||||
* *calico_cni_version* - Specify version of Calico CNI plugin to use
|
* *calico_cni_version* - Specify version of Calico CNI plugin to use
|
||||||
@@ -28,6 +28,7 @@ Some variables of note include:
|
|||||||
* *kube_version* - Specify a given Kubernetes hyperkube version
|
* *kube_version* - Specify a given Kubernetes hyperkube version
|
||||||
* *searchdomains* - Array of DNS domains to search when looking up hostnames
|
* *searchdomains* - Array of DNS domains to search when looking up hostnames
|
||||||
* *nameservers* - Array of nameservers to use for DNS lookup
|
* *nameservers* - Array of nameservers to use for DNS lookup
|
||||||
|
* *preinstall_selinux_state* - Set selinux state, permitted values are permissive and disabled.
|
||||||
|
|
||||||
#### Addressing variables
|
#### Addressing variables
|
||||||
|
|
||||||
@@ -35,16 +36,16 @@ Some variables of note include:
|
|||||||
* *access_ip* - IP for other hosts to use to connect to. Often required when
|
* *access_ip* - IP for other hosts to use to connect to. Often required when
|
||||||
deploying from a cloud, such as OpenStack or GCE and you have separate
|
deploying from a cloud, such as OpenStack or GCE and you have separate
|
||||||
public/floating and private IPs.
|
public/floating and private IPs.
|
||||||
* *ansible_default_ipv4.address* - Not Kargo-specific, but it is used if ip
|
* *ansible_default_ipv4.address* - Not Kubespray-specific, but it is used if ip
|
||||||
and access_ip are undefined
|
and access_ip are undefined
|
||||||
* *loadbalancer_apiserver* - If defined, all hosts will connect to this
|
* *loadbalancer_apiserver* - If defined, all hosts will connect to this
|
||||||
address instead of localhost for kube-masters and kube-master[0] for
|
address instead of localhost for kube-masters and kube-master[0] for
|
||||||
kube-nodes. See more details in the
|
kube-nodes. See more details in the
|
||||||
[HA guide](https://github.com/kubernetes-incubator/kargo/blob/master/docs/ha-mode.md).
|
[HA guide](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/ha-mode.md).
|
||||||
* *loadbalancer_apiserver_localhost* - makes all hosts to connect to
|
* *loadbalancer_apiserver_localhost* - makes all hosts to connect to
|
||||||
the apiserver internally load balanced endpoint. Mutual exclusive to the
|
the apiserver internally load balanced endpoint. Mutual exclusive to the
|
||||||
`loadbalancer_apiserver`. See more details in the
|
`loadbalancer_apiserver`. See more details in the
|
||||||
[HA guide](https://github.com/kubernetes-incubator/kargo/blob/master/docs/ha-mode.md).
|
[HA guide](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/ha-mode.md).
|
||||||
|
|
||||||
#### Cluster variables
|
#### Cluster variables
|
||||||
|
|
||||||
@@ -61,12 +62,22 @@ following default cluster paramters:
|
|||||||
* *kube_network_node_prefix* - Subnet allocated per-node for pod IPs. Remainin
|
* *kube_network_node_prefix* - Subnet allocated per-node for pod IPs. Remainin
|
||||||
bits in kube_pods_subnet dictates how many kube-nodes can be in cluster.
|
bits in kube_pods_subnet dictates how many kube-nodes can be in cluster.
|
||||||
* *dns_setup* - Enables dnsmasq
|
* *dns_setup* - Enables dnsmasq
|
||||||
* *dns_server* - Cluster IP for dnsmasq (default is 10.233.0.2)
|
* *dnsmasq_dns_server* - Cluster IP for dnsmasq (default is 10.233.0.2)
|
||||||
* *skydns_server* - Cluster IP for KubeDNS (default is 10.233.0.3)
|
* *skydns_server* - Cluster IP for KubeDNS (default is 10.233.0.3)
|
||||||
* *cloud_provider* - Enable extra Kubelet option if operating inside GCE or
|
* *cloud_provider* - Enable extra Kubelet option if operating inside GCE or
|
||||||
OpenStack (default is unset)
|
OpenStack (default is unset)
|
||||||
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
|
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
|
||||||
Kubernetes
|
Kubernetes
|
||||||
|
* *kube_feature_gates* - A list of key=value pairs that describe feature gates for
|
||||||
|
alpha/experimental Kubernetes features. (defaults is `[]`)
|
||||||
|
* *authorization_modes* - A list of [authorization mode](
|
||||||
|
https://kubernetes.io/docs/admin/authorization/#using-flags-for-your-authorization-module)
|
||||||
|
that the cluster should be configured for. Defaults to `['Node', 'RBAC']`
|
||||||
|
(Node and RBAC authorizers).
|
||||||
|
Note: `Node` and `RBAC` are enabled by default. Previously deployed clusters can be
|
||||||
|
converted to RBAC mode. However, your apps which rely on Kubernetes API will
|
||||||
|
require a service account and cluster role bindings. You can override this
|
||||||
|
setting by setting authorization_modes to `[]`.
|
||||||
|
|
||||||
Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances'
|
Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances'
|
||||||
private addresses, make sure to pick another values for ``kube_service_addresses``
|
private addresses, make sure to pick another values for ``kube_service_addresses``
|
||||||
@@ -79,26 +90,52 @@ other settings from your existing /etc/resolv.conf are lost. Set the following
|
|||||||
variables to match your requirements.
|
variables to match your requirements.
|
||||||
|
|
||||||
* *upstream_dns_servers* - Array of upstream DNS servers configured on host in
|
* *upstream_dns_servers* - Array of upstream DNS servers configured on host in
|
||||||
addition to Kargo deployed DNS
|
addition to Kubespray deployed DNS
|
||||||
* *nameservers* - Array of DNS servers configured for use in dnsmasq
|
* *nameservers* - Array of DNS servers configured for use in dnsmasq
|
||||||
* *searchdomains* - Array of up to 4 search domains
|
* *searchdomains* - Array of up to 4 search domains
|
||||||
* *skip_dnsmasq* - Don't set up dnsmasq (use only KubeDNS)
|
* *skip_dnsmasq* - Don't set up dnsmasq (use only KubeDNS)
|
||||||
|
|
||||||
For more information, see [DNS
|
For more information, see [DNS
|
||||||
Stack](https://github.com/kubernetes-incubator/kargo/blob/master/docs/dns-stack.md).
|
Stack](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/dns-stack.md).
|
||||||
|
|
||||||
#### Other service variables
|
#### Other service variables
|
||||||
|
|
||||||
* *docker_options* - Commonly used to set
|
* *docker_options* - Commonly used to set
|
||||||
``--insecure-registry=myregistry.mydomain:5000``
|
``--insecure-registry=myregistry.mydomain:5000``
|
||||||
* *http_proxy/https_proxy/no_proxy* - Proxy variables for deploying behind a
|
* *http_proxy/https_proxy/no_proxy* - Proxy variables for deploying behind a
|
||||||
proxy
|
proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames
|
||||||
|
that correspond to each node.
|
||||||
|
* *kubelet_deployment_type* - Controls which platform to deploy kubelet on.
|
||||||
|
Available options are ``host``, ``rkt``, and ``docker``. ``docker`` mode
|
||||||
|
is unlikely to work on newer releases. Starting with Kubernetes v1.7
|
||||||
|
series, this now defaults to ``host``. Before v1.7, the default was Docker.
|
||||||
|
This is because of cgroup [issues](https://github.com/kubernetes/kubernetes/issues/43704).
|
||||||
* *kubelet_load_modules* - For some things, kubelet needs to load kernel modules. For example,
|
* *kubelet_load_modules* - For some things, kubelet needs to load kernel modules. For example,
|
||||||
dynamic kernel services are needed for mounting persistent volumes into containers. These may not be
|
dynamic kernel services are needed for mounting persistent volumes into containers. These may not be
|
||||||
loaded by preinstall kubernetes processes. For example, ceph and rbd backed volumes. Set this variable to
|
loaded by preinstall kubernetes processes. For example, ceph and rbd backed volumes. Set this variable to
|
||||||
true to let kubelet load kernel modules.
|
true to let kubelet load kernel modules.
|
||||||
|
* *kubelet_cgroup_driver* - Allows manual override of the
|
||||||
|
cgroup-driver option for Kubelet. By default autodetection is used
|
||||||
|
to match Docker configuration.
|
||||||
|
|
||||||
|
##### Custom flags for Kube Components
|
||||||
|
For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. This can be done by providing a list of flags. Example:
|
||||||
|
```
|
||||||
|
kubelet_custom_flags:
|
||||||
|
- "--eviction-hard=memory.available<100Mi"
|
||||||
|
- "--eviction-soft-grace-period=memory.available=30s"
|
||||||
|
- "--eviction-soft=memory.available<300Mi"
|
||||||
|
```
|
||||||
|
The possible vars are:
|
||||||
|
* *apiserver_custom_flags*
|
||||||
|
* *controller_mgr_custom_flags*
|
||||||
|
* *scheduler_custom_flags*
|
||||||
|
* *kubelet_custom_flags*
|
||||||
|
|
||||||
#### User accounts
|
#### User accounts
|
||||||
|
|
||||||
Kargo sets up two Kubernetes accounts by default: ``root`` and ``kube``. Their
|
By default, a user with admin rights is created, named `kube`.
|
||||||
passwords default to changeme. You can set this by changing ``kube_api_pwd``.
|
The password can be viewed after deployment by looking at the file
|
||||||
|
`PATH_TO_KUBESPRAY/credentials/kube_user`. This contains a randomly generated
|
||||||
|
password. If you wish to set your own password, just precreate/modify this
|
||||||
|
file yourself or change `kube_api_pwd` var.
|
||||||
|
|||||||
@@ -26,7 +26,6 @@ first task, is to stop any temporary instances of Vault, to free the port for
|
|||||||
the long-term. At the end of this task, the entire Vault cluster should be up
|
the long-term. At the end of this task, the entire Vault cluster should be up
|
||||||
and read to go.
|
and read to go.
|
||||||
|
|
||||||
|
|
||||||
Keys to the Kingdom
|
Keys to the Kingdom
|
||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
@@ -39,35 +38,43 @@ vault group.
|
|||||||
It is *highly* recommended that these secrets are removed from the servers after
|
It is *highly* recommended that these secrets are removed from the servers after
|
||||||
your cluster has been deployed, and kept in a safe location of your choosing.
|
your cluster has been deployed, and kept in a safe location of your choosing.
|
||||||
Naturally, the seriousness of the situation depends on what you're doing with
|
Naturally, the seriousness of the situation depends on what you're doing with
|
||||||
your Kargo cluster, but with these secrets, an attacker will have the ability
|
your Kubespray cluster, but with these secrets, an attacker will have the ability
|
||||||
to authenticate to almost everything in Kubernetes and decode all private
|
to authenticate to almost everything in Kubernetes and decode all private
|
||||||
(HTTPS) traffic on your network signed by Vault certificates.
|
(HTTPS) traffic on your network signed by Vault certificates.
|
||||||
|
|
||||||
For even greater security, you may want to remove and store elsewhere any
|
For even greater security, you may want to remove and store elsewhere any
|
||||||
CA keys generated as well (e.g. /etc/vault/ssl/ca-key.pem).
|
CA keys generated as well (e.g. /etc/vault/ssl/ca-key.pem).
|
||||||
|
|
||||||
Vault by default encrypts all traffic to and from the datastore backend, all
|
Vault by default encrypts all traffic to and from the datastore backend, all
|
||||||
resting data, and uses TLS for its TCP listener. It is recommended that you
|
resting data, and uses TLS for its TCP listener. It is recommended that you
|
||||||
do not change the Vault config to disable TLS, unless you absolutely have to.
|
do not change the Vault config to disable TLS, unless you absolutely have to.
|
||||||
|
|
||||||
|
|
||||||
Usage
|
Usage
|
||||||
-----
|
-----
|
||||||
|
|
||||||
To get the Vault role running, you must to do two things at a minimum:
|
To get the Vault role running, you must to do two things at a minimum:
|
||||||
|
|
||||||
1. Assign the ``vault`` group to at least 1 node in your inventory
|
1. Assign the ``vault`` group to at least 1 node in your inventory
|
||||||
2. Change ``cert_management`` to be ``vault`` instead of ``script``
|
1. Change ``cert_management`` to be ``vault`` instead of ``script``
|
||||||
|
|
||||||
Nothing else is required, but customization is possible. Check
|
Nothing else is required, but customization is possible. Check
|
||||||
``roles/vault/defaults/main.yml`` for the different variables that can be
|
``roles/vault/defaults/main.yml`` for the different variables that can be
|
||||||
overridden, most common being ``vault_config``, ``vault_port``, and
|
overridden, most common being ``vault_config``, ``vault_port``, and
|
||||||
``vault_deployment_type``.
|
``vault_deployment_type``.
|
||||||
|
|
||||||
Also, if you intend to use a Root or Intermediate CA generated elsewhere,
|
As a result of the Vault role will be create separated Root CA for `etcd`,
|
||||||
you'll need to copy the certificate and key to the hosts in the vault group
|
`kubernetes` and `vault`. Also, if you intend to use a Root or Intermediate CA
|
||||||
prior to running the vault role. By default, they'll be located at
|
generated elsewhere, you'll need to copy the certificate and key to the hosts in the vault group prior to running the vault role. By default, they'll be located at:
|
||||||
``/etc/vault/ssl/ca.pem`` and ``/etc/vault/ssl/ca-key.pem``, respectively.
|
|
||||||
|
* vault:
|
||||||
|
* ``/etc/vault/ssl/ca.pem``
|
||||||
|
* ``/etc/vault/ssl/ca-key.pem``
|
||||||
|
* etcd:
|
||||||
|
* ``/etc/ssl/etcd/ssl/ca.pem``
|
||||||
|
* ``/etc/ssl/etcd/ssl/ca-key.pem``
|
||||||
|
* kubernetes:
|
||||||
|
* ``/etc/kubernetes/ssl/ca.pem``
|
||||||
|
* ``/etc/kubernetes/ssl/ca-key.pem``
|
||||||
|
|
||||||
Additional Notes:
|
Additional Notes:
|
||||||
|
|
||||||
@@ -77,7 +84,6 @@ Additional Notes:
|
|||||||
credentials are saved to ``/etc/vault/roles/<role>/``. The service will
|
credentials are saved to ``/etc/vault/roles/<role>/``. The service will
|
||||||
need to read in those credentials, if they want to interact with Vault.
|
need to read in those credentials, if they want to interact with Vault.
|
||||||
|
|
||||||
|
|
||||||
Potential Work
|
Potential Work
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
@@ -87,6 +93,3 @@ Potential Work
|
|||||||
- Add the ability to start temp Vault with Host, Rkt, or Docker
|
- Add the ability to start temp Vault with Host, Rkt, or Docker
|
||||||
- Add a dynamic way to change out the backend role creation during Bootstrap,
|
- Add a dynamic way to change out the backend role creation during Bootstrap,
|
||||||
so other services can be used (such as Consul)
|
so other services can be used (such as Consul)
|
||||||
- Segregate Server Cert generation from Auth Cert generation (separate CAs).
|
|
||||||
This work was partially started with the `auth_cert_backend` tasks, but would
|
|
||||||
need to be further applied to all roles (particularly Etcd and Kubernetes).
|
|
||||||
|
|||||||
61
docs/vsphere.md
Normal file
61
docs/vsphere.md
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
# vSphere cloud provider
|
||||||
|
|
||||||
|
Kubespray can be deployed with vSphere as Cloud provider. This feature supports
|
||||||
|
- Volumes
|
||||||
|
- Persistent Volumes
|
||||||
|
- Storage Classes and provisioning of volumes.
|
||||||
|
- vSphere Storage Policy Based Management for Containers orchestrated by Kubernetes.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
You need at first to configure you vSphere environement by following the [official documentation](https://kubernetes.io/docs/getting-started-guides/vsphere/#vsphere-cloud-provider).
|
||||||
|
|
||||||
|
After this step you should have:
|
||||||
|
- UUID activated for each VM where Kubernetes will be deployed
|
||||||
|
- A vSphere account with required privileges
|
||||||
|
|
||||||
|
## Kubespray configuration
|
||||||
|
|
||||||
|
Fist you must define the cloud provider in `inventory/group_vars/all.yml` and set it to `vsphere`.
|
||||||
|
```yml
|
||||||
|
cloud_provider: vsphere
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, in the same file, you need to declare your vCenter credential following the description bellow.
|
||||||
|
|
||||||
|
| Variable | Required | Type | Choices | Default | Comment |
|
||||||
|
|------------------------------|----------|---------|----------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| vsphere_vcenter_ip | TRUE | string | | | IP/URL of the vCenter |
|
||||||
|
| vsphere_vcenter_port | TRUE | integer | | | Port of the vCenter API. Commonly 443 |
|
||||||
|
| vsphere_insecure | TRUE | integer | 1, 0 | | set to 1 if the host above uses a self-signed cert |
|
||||||
|
| vsphere_user | TRUE | string | | | User name for vCenter with required privileges |
|
||||||
|
| vsphere_password | TRUE | string | | | Password for vCenter |
|
||||||
|
| vsphere_datacenter | TRUE | string | | | Datacenter name to use |
|
||||||
|
| vsphere_datastore | TRUE | string | | | Datastore name to use |
|
||||||
|
| vsphere_working_dir | TRUE | string | | | Working directory from the view "VMs and template" in the vCenter where VM are placed |
|
||||||
|
| vsphere_scsi_controller_type | TRUE | string | buslogic, pvscsi, parallel | pvscsi | SCSI controller name. Commonly "pvscsi". |
|
||||||
|
| vsphere_vm_uuid | FALSE | string | | | VM Instance UUID of virtual machine that host K8s master. Can be retrieved from instanceUuid property in VmConfigInfo, or as vc.uuid in VMX file or in `/sys/class/dmi/id/product_serial` |
|
||||||
|
| vsphere_public_network | FALSE | string | | Blank | Name of the network the VMs are joined to |
|
||||||
|
|
||||||
|
Example configuration
|
||||||
|
```yml
|
||||||
|
vsphere_vcenter_ip: "myvcenter.domain.com"
|
||||||
|
vsphere_vcenter_port: 443
|
||||||
|
vsphere_insecure: 1
|
||||||
|
vsphere_user: "k8s@vsphere.local"
|
||||||
|
vsphere_password: "K8s_admin"
|
||||||
|
vsphere_datacenter: "DATACENTER_name"
|
||||||
|
vsphere_datastore: "DATASTORE_name"
|
||||||
|
vsphere_working_dir: "Docker_hosts"
|
||||||
|
vsphere_scsi_controller_type: "pvscsi"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Deployment
|
||||||
|
|
||||||
|
Once the configuration is set, you can execute the playbook again to apply the new configuration
|
||||||
|
```
|
||||||
|
cd kubespray
|
||||||
|
ansible-playbook -i inventory/inventory.cfg -b -v cluster.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
You'll find some usefull examples [here](https://github.com/kubernetes/kubernetes/tree/master/examples/volumes/vsphere) to test your configuration.
|
||||||
98
docs/weave.md
Normal file
98
docs/weave.md
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
Weave
|
||||||
|
=======
|
||||||
|
|
||||||
|
Weave 2.0.1 is supported by kubespray
|
||||||
|
|
||||||
|
Weave uses [**consensus**](https://www.weave.works/docs/net/latest/ipam/##consensus) mode (default mode) and [**seed**](https://www.weave.works/docs/net/latest/ipam/#seed) mode.
|
||||||
|
|
||||||
|
`Consensus` mode is best to use on static size cluster and `seed` mode is best to use on dynamic size cluster
|
||||||
|
|
||||||
|
Weave encryption is supported for all communication
|
||||||
|
|
||||||
|
* To use Weave encryption, specify a strong password (if no password, no encrytion)
|
||||||
|
|
||||||
|
```
|
||||||
|
# In file ./inventory/group_vars/k8s-cluster.yml
|
||||||
|
weave_password: EnterPasswordHere
|
||||||
|
```
|
||||||
|
|
||||||
|
This password is used to set an environment variable inside weave container.
|
||||||
|
|
||||||
|
Weave is deployed by kubespray using a daemonSet
|
||||||
|
|
||||||
|
* Check the status of Weave containers
|
||||||
|
|
||||||
|
```
|
||||||
|
# From client
|
||||||
|
kubectl -n kube-system get pods | grep weave
|
||||||
|
# output
|
||||||
|
weave-net-50wd2 2/2 Running 0 2m
|
||||||
|
weave-net-js9rb 2/2 Running 0 2m
|
||||||
|
```
|
||||||
|
There must be as many pods as nodes (here kubernetes have 2 nodes so there are 2 weave pods).
|
||||||
|
|
||||||
|
* Check status of weave (connection,encryption ...) for each node
|
||||||
|
|
||||||
|
```
|
||||||
|
# On nodes
|
||||||
|
curl http://127.0.0.1:6784/status
|
||||||
|
# output on node1
|
||||||
|
Version: 2.0.1 (up to date; next check at 2017/08/01 13:51:34)
|
||||||
|
|
||||||
|
Service: router
|
||||||
|
Protocol: weave 1..2
|
||||||
|
Name: fa:16:3e:b3:d6:b2(node1)
|
||||||
|
Encryption: enabled
|
||||||
|
PeerDiscovery: enabled
|
||||||
|
Targets: 2
|
||||||
|
Connections: 2 (1 established, 1 failed)
|
||||||
|
Peers: 2 (with 2 established connections)
|
||||||
|
TrustedSubnets: none
|
||||||
|
|
||||||
|
Service: ipam
|
||||||
|
Status: ready
|
||||||
|
Range: 10.233.64.0/18
|
||||||
|
DefaultSubnet: 10.233.64.0/18
|
||||||
|
```
|
||||||
|
|
||||||
|
* Check parameters of weave for each node
|
||||||
|
|
||||||
|
```
|
||||||
|
# On nodes
|
||||||
|
ps -aux | grep weaver
|
||||||
|
# output on node1 (here its use seed mode)
|
||||||
|
root 8559 0.2 3.0 365280 62700 ? Sl 08:25 0:00 /home/weave/weaver --name=fa:16:3e:b3:d6:b2 --port=6783 --datapath=datapath --host-root=/host --http-addr=127.0.0.1:6784 --status-addr=0.0.0.0:6782 --docker-api= --no-dns --db-prefix=/weavedb/weave-net --ipalloc-range=10.233.64.0/18 --nickname=node1 --ipalloc-init seed=fa:16:3e:b3:d6:b2,fa:16:3e:f0:50:53 --conn-limit=30 --expect-npc 192.168.208.28 192.168.208.19
|
||||||
|
```
|
||||||
|
|
||||||
|
### Consensus mode (default mode)
|
||||||
|
|
||||||
|
This mode is best to use on static size cluster
|
||||||
|
|
||||||
|
### Seed mode
|
||||||
|
|
||||||
|
This mode is best to use on dynamic size cluster
|
||||||
|
|
||||||
|
The seed mode also allows multi-clouds and hybrid on-premise/cloud clusters deployement.
|
||||||
|
|
||||||
|
* Switch from consensus mode to seed mode
|
||||||
|
|
||||||
|
```
|
||||||
|
# In file ./inventory/group_vars/k8s-cluster.yml
|
||||||
|
weave_mode_seed: true
|
||||||
|
```
|
||||||
|
|
||||||
|
These two variables are only used when `weave_mode_seed` is set to `true` (**/!\ do not manually change these values**)
|
||||||
|
|
||||||
|
```
|
||||||
|
# In file ./inventory/group_vars/k8s-cluster.yml
|
||||||
|
weave_seed: uninitialized
|
||||||
|
weave_peers: uninitialized
|
||||||
|
```
|
||||||
|
|
||||||
|
The first variable, `weave_seed`, contains the initial nodes of the weave network
|
||||||
|
|
||||||
|
The seconde variable, `weave_peers`, saves the IPs of all nodes joined to the weave network
|
||||||
|
|
||||||
|
These two variables are used to connect a new node to the weave network. The new node needs to know the firsts nodes (seed) and the list of IPs of all nodes.
|
||||||
|
|
||||||
|
To reset these variables and reset the weave network set them to `uninitialized`
|
||||||
1
extra_playbooks/inventory
Symbolic link
1
extra_playbooks/inventory
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../inventory
|
||||||
1
extra_playbooks/roles
Symbolic link
1
extra_playbooks/roles
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../roles
|
||||||
63
extra_playbooks/upgrade-only-k8s.yml
Normal file
63
extra_playbooks/upgrade-only-k8s.yml
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
### NOTE: This playbook cannot be used to deploy any new nodes to the cluster.
|
||||||
|
### Additional information:
|
||||||
|
### * Will not upgrade etcd
|
||||||
|
### * Will not upgrade network plugins
|
||||||
|
### * Will not upgrade Docker
|
||||||
|
### * Will not pre-download containers or kubeadm
|
||||||
|
### * Currently does not support Vault deployment.
|
||||||
|
###
|
||||||
|
### In most cases, you probably want to use upgrade-cluster.yml playbook and
|
||||||
|
### not this one.
|
||||||
|
|
||||||
|
- hosts: localhost
|
||||||
|
gather_facts: False
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||||
|
|
||||||
|
- hosts: k8s-cluster:etcd:calico-rr
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
gather_facts: false
|
||||||
|
vars:
|
||||||
|
# Need to disable pipelining for bootstrap-os as some systems have requiretty in sudoers set, which makes pipelining
|
||||||
|
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
|
||||||
|
ansible_ssh_pipelining: false
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
|
- { role: bootstrap-os, tags: bootstrap-os}
|
||||||
|
|
||||||
|
- hosts: k8s-cluster:etcd:calico-rr
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
vars:
|
||||||
|
ansible_ssh_pipelining: true
|
||||||
|
gather_facts: true
|
||||||
|
|
||||||
|
- hosts: k8s-cluster:etcd:calico-rr
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
|
- { role: kubernetes/preinstall, tags: preinstall }
|
||||||
|
|
||||||
|
#Handle upgrades to master components first to maintain backwards compat.
|
||||||
|
- hosts: kube-master
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
serial: 1
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
|
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
|
||||||
|
- { role: kubernetes/node, tags: node }
|
||||||
|
- { role: kubernetes/master, tags: master }
|
||||||
|
- { role: kubernetes/client, tags: client }
|
||||||
|
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
||||||
|
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
||||||
|
|
||||||
|
#Finally handle worker upgrades, based on given batch size
|
||||||
|
- hosts: kube-node:!kube-master
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
serial: "{{ serial | default('20%') }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
|
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
|
||||||
|
- { role: kubernetes/node, tags: node }
|
||||||
|
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
||||||
|
- { role: kubespray-defaults}
|
||||||
@@ -1,3 +1,12 @@
|
|||||||
|
# Valid bootstrap options (required): ubuntu, coreos, centos, none
|
||||||
|
bootstrap_os: none
|
||||||
|
|
||||||
|
#Directory where etcd data stored
|
||||||
|
etcd_data_dir: /var/lib/etcd
|
||||||
|
|
||||||
|
# Directory where the binaries will be installed
|
||||||
|
bin_dir: /usr/local/bin
|
||||||
|
|
||||||
## The access_ip variable is used to define how other nodes should access
|
## The access_ip variable is used to define how other nodes should access
|
||||||
## the node. This is used in flannel to allow other flannel nodes to see
|
## the node. This is used in flannel to allow other flannel nodes to see
|
||||||
## this node for example. The access_ip is really useful AWS and Google
|
## this node for example. The access_ip is really useful AWS and Google
|
||||||
@@ -65,15 +74,36 @@
|
|||||||
#azure_vnet_name:
|
#azure_vnet_name:
|
||||||
#azure_route_table_name:
|
#azure_route_table_name:
|
||||||
|
|
||||||
## Set these proxy values in order to update docker daemon to use proxies
|
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461)
|
||||||
|
#openstack_blockstorage_version: "v1/v2/auto (default)"
|
||||||
|
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables.
|
||||||
|
#openstack_lbaas_enabled: True
|
||||||
|
#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
|
||||||
|
#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
|
||||||
|
#openstack_lbaas_create_monitor: "yes"
|
||||||
|
#openstack_lbaas_monitor_delay: "1m"
|
||||||
|
#openstack_lbaas_monitor_timeout: "30s"
|
||||||
|
#openstack_lbaas_monitor_max_retries: "3"
|
||||||
|
|
||||||
|
## Uncomment to enable experimental kubeadm deployment mode
|
||||||
|
#kubeadm_enabled: false
|
||||||
|
#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}"
|
||||||
|
#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
|
||||||
|
#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
|
||||||
|
#
|
||||||
|
## Set these proxy values in order to update package manager and docker daemon to use proxies
|
||||||
#http_proxy: ""
|
#http_proxy: ""
|
||||||
#https_proxy: ""
|
#https_proxy: ""
|
||||||
|
## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
|
||||||
#no_proxy: ""
|
#no_proxy: ""
|
||||||
|
|
||||||
## Uncomment this if you want to force overlay/overlay2 as docker storage driver
|
## Uncomment this if you want to force overlay/overlay2 as docker storage driver
|
||||||
## Please note that overlay2 is only supported on newer kernels
|
## Please note that overlay2 is only supported on newer kernels
|
||||||
#docker_storage_options: -s overlay2
|
#docker_storage_options: -s overlay2
|
||||||
|
|
||||||
|
# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
|
||||||
|
#docker_dns_servers_strict: false
|
||||||
|
|
||||||
## Default packages to install within the cluster, f.e:
|
## Default packages to install within the cluster, f.e:
|
||||||
#kpm_packages:
|
#kpm_packages:
|
||||||
# - name: kube-system/grafana
|
# - name: kube-system/grafana
|
||||||
@@ -84,5 +114,11 @@
|
|||||||
## as a backend). Options are "script" or "vault"
|
## as a backend). Options are "script" or "vault"
|
||||||
#cert_management: script
|
#cert_management: script
|
||||||
|
|
||||||
## Please specify true if you want to perform a kernel upgrade
|
# Set to true to allow pre-checks to fail and continue deployment
|
||||||
kernel_upgrade: false
|
#ignore_assert_errors: false
|
||||||
|
|
||||||
|
## Etcd auto compaction retention for mvcc key value store in hour
|
||||||
|
#etcd_compaction_retention: 0
|
||||||
|
|
||||||
|
## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
|
||||||
|
#etcd_metrics: basic
|
||||||
|
|||||||
@@ -1,12 +1,3 @@
|
|||||||
# Valid bootstrap options (required): ubuntu, coreos, centos, none
|
|
||||||
bootstrap_os: none
|
|
||||||
|
|
||||||
#Directory where etcd data stored
|
|
||||||
etcd_data_dir: /var/lib/etcd
|
|
||||||
|
|
||||||
# Directory where the binaries will be installed
|
|
||||||
bin_dir: /usr/local/bin
|
|
||||||
|
|
||||||
# Kubernetes configuration dirs and system namespace.
|
# Kubernetes configuration dirs and system namespace.
|
||||||
# Those are where all the additional config stuff goes
|
# Those are where all the additional config stuff goes
|
||||||
# the kubernetes normally puts in /srv/kubernets.
|
# the kubernetes normally puts in /srv/kubernets.
|
||||||
@@ -32,7 +23,7 @@ kube_users_dir: "{{ kube_config_dir }}/users"
|
|||||||
kube_api_anonymous_auth: false
|
kube_api_anonymous_auth: false
|
||||||
|
|
||||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||||
kube_version: v1.5.3
|
kube_version: v1.8.1
|
||||||
|
|
||||||
# Where the binaries will be downloaded.
|
# Where the binaries will be downloaded.
|
||||||
# Note: ensure that you've enough disk space (about 1G)
|
# Note: ensure that you've enough disk space (about 1G)
|
||||||
@@ -48,16 +39,14 @@ kube_cert_group: kube-cert
|
|||||||
kube_log_level: 2
|
kube_log_level: 2
|
||||||
|
|
||||||
# Users to create for basic auth in Kubernetes API via HTTP
|
# Users to create for basic auth in Kubernetes API via HTTP
|
||||||
kube_api_pwd: "changeme"
|
# Optionally add groups for user
|
||||||
|
kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
|
||||||
kube_users:
|
kube_users:
|
||||||
kube:
|
kube:
|
||||||
pass: "{{kube_api_pwd}}"
|
pass: "{{kube_api_pwd}}"
|
||||||
role: admin
|
role: admin
|
||||||
root:
|
groups:
|
||||||
pass: "{{kube_api_pwd}}"
|
- system:masters
|
||||||
role: admin
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
|
## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
|
||||||
#kube_oidc_auth: false
|
#kube_oidc_auth: false
|
||||||
@@ -80,6 +69,23 @@ kube_users:
|
|||||||
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||||
kube_network_plugin: calico
|
kube_network_plugin: calico
|
||||||
|
|
||||||
|
# weave's network password for encryption
|
||||||
|
# if null then no network encryption
|
||||||
|
# you can use --extra-vars to pass the password in command line
|
||||||
|
weave_password: EnterPasswordHere
|
||||||
|
|
||||||
|
# Weave uses consensus mode by default
|
||||||
|
# Enabling seed mode allow to dynamically add or remove hosts
|
||||||
|
# https://www.weave.works/docs/net/latest/ipam/
|
||||||
|
weave_mode_seed: false
|
||||||
|
|
||||||
|
# This two variable are automatically changed by the weave's role, do not manually change these values
|
||||||
|
# To reset values :
|
||||||
|
# weave_seed: uninitialized
|
||||||
|
# weave_peers: uninitialized
|
||||||
|
weave_seed: uninitialized
|
||||||
|
weave_peers: uninitialized
|
||||||
|
|
||||||
# Enable kubernetes network policies
|
# Enable kubernetes network policies
|
||||||
enable_network_policy: false
|
enable_network_policy: false
|
||||||
|
|
||||||
@@ -107,14 +113,14 @@ cluster_name: cluster.local
|
|||||||
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
|
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
|
||||||
ndots: 2
|
ndots: 2
|
||||||
# Can be dnsmasq_kubedns, kubedns or none
|
# Can be dnsmasq_kubedns, kubedns or none
|
||||||
dns_mode: dnsmasq_kubedns
|
dns_mode: kubedns
|
||||||
# Can be docker_dns, host_resolvconf or none
|
# Can be docker_dns, host_resolvconf or none
|
||||||
resolvconf_mode: docker_dns
|
resolvconf_mode: docker_dns
|
||||||
# Deploy netchecker app to verify DNS resolve as an HTTP service
|
# Deploy netchecker app to verify DNS resolve as an HTTP service
|
||||||
deploy_netchecker: false
|
deploy_netchecker: false
|
||||||
# Ip address of the kubernetes skydns service
|
# Ip address of the kubernetes skydns service
|
||||||
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
|
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
|
||||||
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
||||||
dns_domain: "{{ cluster_name }}"
|
dns_domain: "{{ cluster_name }}"
|
||||||
|
|
||||||
# Path used to store Docker data
|
# Path used to store Docker data
|
||||||
@@ -124,20 +130,47 @@ docker_daemon_graph: "/var/lib/docker"
|
|||||||
## This string should be exactly as you wish it to appear.
|
## This string should be exactly as you wish it to appear.
|
||||||
## An obvious use case is allowing insecure-registry access
|
## An obvious use case is allowing insecure-registry access
|
||||||
## to self hosted registries like so:
|
## to self hosted registries like so:
|
||||||
docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }}"
|
|
||||||
|
docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
|
||||||
docker_bin_dir: "/usr/bin"
|
docker_bin_dir: "/usr/bin"
|
||||||
|
|
||||||
# Settings for containerized control plane (etcd/kubelet/secrets)
|
# Settings for containerized control plane (etcd/kubelet/secrets)
|
||||||
etcd_deployment_type: docker
|
etcd_deployment_type: docker
|
||||||
kubelet_deployment_type: docker
|
kubelet_deployment_type: host
|
||||||
cert_management: script
|
|
||||||
vault_deployment_type: docker
|
vault_deployment_type: docker
|
||||||
|
|
||||||
# K8s image pull policy (imagePullPolicy)
|
# K8s image pull policy (imagePullPolicy)
|
||||||
k8s_image_pull_policy: IfNotPresent
|
k8s_image_pull_policy: IfNotPresent
|
||||||
|
|
||||||
|
# Kubernetes dashboard (available at http://first_master:6443/ui by default)
|
||||||
|
dashboard_enabled: true
|
||||||
|
|
||||||
# Monitoring apps for k8s
|
# Monitoring apps for k8s
|
||||||
efk_enabled: false
|
efk_enabled: false
|
||||||
|
|
||||||
# Helm deployment
|
# Helm deployment
|
||||||
helm_enabled: false
|
helm_enabled: false
|
||||||
|
|
||||||
|
# Istio depoyment
|
||||||
|
istio_enabled: false
|
||||||
|
|
||||||
|
# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
|
||||||
|
# kubeconfig_localhost: false
|
||||||
|
# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
|
||||||
|
# kubectl_localhost: false
|
||||||
|
|
||||||
|
# dnsmasq
|
||||||
|
# dnsmasq_upstream_dns_servers:
|
||||||
|
# - /resolvethiszone.with/10.0.4.250
|
||||||
|
# - 8.8.8.8
|
||||||
|
|
||||||
|
# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
|
||||||
|
# kubelet_cgroups_per_qos: true
|
||||||
|
|
||||||
|
# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
|
||||||
|
# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
|
||||||
|
# kubelet_enforce_node_allocatable: pods
|
||||||
|
|
||||||
|
## Supplementary addresses that can be added in kubernetes ssl keys.
|
||||||
|
## That can be usefull for example to setup a keepalived virtual IP
|
||||||
|
# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
|
||||||
|
|||||||
@@ -28,4 +28,4 @@
|
|||||||
|
|
||||||
# [k8s-cluster:children]
|
# [k8s-cluster:children]
|
||||||
# kube-node
|
# kube-node
|
||||||
# kube-master
|
# kube-master
|
||||||
@@ -66,7 +66,7 @@ options:
|
|||||||
description:
|
description:
|
||||||
- present handles checking existence or creating if definition file provided,
|
- present handles checking existence or creating if definition file provided,
|
||||||
absent handles deleting resource(s) based on other options,
|
absent handles deleting resource(s) based on other options,
|
||||||
latest handles creating ore updating based on existence,
|
latest handles creating or updating based on existence,
|
||||||
reloaded handles updating resource(s) definition using definition file,
|
reloaded handles updating resource(s) definition using definition file,
|
||||||
stopped handles stopping resource(s) based on other options.
|
stopped handles stopping resource(s) based on other options.
|
||||||
requirements:
|
requirements:
|
||||||
@@ -135,11 +135,14 @@ class KubeManager(object):
|
|||||||
return None
|
return None
|
||||||
return out.splitlines()
|
return out.splitlines()
|
||||||
|
|
||||||
def create(self, check=True):
|
def create(self, check=True, force=True):
|
||||||
if check and self.exists():
|
if check and self.exists():
|
||||||
return []
|
return []
|
||||||
|
|
||||||
cmd = ['create']
|
cmd = ['apply']
|
||||||
|
|
||||||
|
if force:
|
||||||
|
cmd.append('--force')
|
||||||
|
|
||||||
if not self.filename:
|
if not self.filename:
|
||||||
self.module.fail_json(msg='filename required to create')
|
self.module.fail_json(msg='filename required to create')
|
||||||
@@ -148,14 +151,11 @@ class KubeManager(object):
|
|||||||
|
|
||||||
return self._execute(cmd)
|
return self._execute(cmd)
|
||||||
|
|
||||||
def replace(self):
|
def replace(self, force=True):
|
||||||
|
|
||||||
if not self.force and not self.exists():
|
cmd = ['apply']
|
||||||
return []
|
|
||||||
|
|
||||||
cmd = ['replace']
|
if force:
|
||||||
|
|
||||||
if self.force:
|
|
||||||
cmd.append('--force')
|
cmd.append('--force')
|
||||||
|
|
||||||
if not self.filename:
|
if not self.filename:
|
||||||
@@ -270,9 +270,8 @@ def main():
|
|||||||
|
|
||||||
manager = KubeManager(module)
|
manager = KubeManager(module)
|
||||||
state = module.params.get('state')
|
state = module.params.get('state')
|
||||||
|
|
||||||
if state == 'present':
|
if state == 'present':
|
||||||
result = manager.create()
|
result = manager.create(check=False)
|
||||||
|
|
||||||
elif state == 'absent':
|
elif state == 'absent':
|
||||||
result = manager.delete()
|
result = manager.delete()
|
||||||
@@ -284,17 +283,11 @@ def main():
|
|||||||
result = manager.stop()
|
result = manager.stop()
|
||||||
|
|
||||||
elif state == 'latest':
|
elif state == 'latest':
|
||||||
if manager.exists():
|
result = manager.replace()
|
||||||
manager.force = True
|
|
||||||
result = manager.replace()
|
|
||||||
else:
|
|
||||||
result = manager.create(check=False)
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
module.fail_json(msg='Unrecognized state %s.' % state)
|
module.fail_json(msg='Unrecognized state %s.' % state)
|
||||||
|
|
||||||
if result:
|
|
||||||
changed = True
|
|
||||||
module.exit_json(changed=changed,
|
module.exit_json(changed=changed,
|
||||||
msg='success: %s' % (' '.join(result))
|
msg='success: %s' % (' '.join(result))
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,14 +1,4 @@
|
|||||||
ansible==2.2.1.0
|
pbr>=1.6
|
||||||
|
ansible>=2.4.0
|
||||||
netaddr
|
netaddr
|
||||||
# Ansible 2.2.1 requires jinja2<2.9, see <https://github.com/ansible/ansible/blob/v2.2.1.0-1/setup.py#L25>,
|
jinja2>=2.9.6
|
||||||
# but without explicit limiting upper jinja2 version here pip ignores
|
|
||||||
# Ansible requirements and installs latest available jinja2
|
|
||||||
# (pip is not very smart here), which is incompatible with with
|
|
||||||
# Ansible 2.2.1.
|
|
||||||
# With incompatible jinja2 version "ansible-vault create" (and probably other parts)
|
|
||||||
# fails with:
|
|
||||||
# ERROR! Unexpected Exception: The 'jinja2<2.9' distribution was not found
|
|
||||||
# and is required by ansible
|
|
||||||
# This upper limit should be removed in 2.2.2 release, see:
|
|
||||||
# <https://github.com/ansible/ansible/commit/978311bf3f91dae5806ab72b665b0937adce38ad>
|
|
||||||
jinja2>=2.8,<2.9
|
|
||||||
|
|||||||
@@ -14,5 +14,5 @@
|
|||||||
when: reset_confirmation != "yes"
|
when: reset_confirmation != "yes"
|
||||||
|
|
||||||
roles:
|
roles:
|
||||||
- { role: kargo-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: reset, tags: reset }
|
- { role: reset, tags: reset }
|
||||||
|
|||||||
@@ -3,13 +3,13 @@
|
|||||||
has_bastion: "{{ 'bastion' in groups['all'] }}"
|
has_bastion: "{{ 'bastion' in groups['all'] }}"
|
||||||
|
|
||||||
- set_fact:
|
- set_fact:
|
||||||
bastion_ip: "{{ hostvars['bastion']['ansible_ssh_host'] }}"
|
bastion_ip: "{{ hostvars['bastion']['ansible_host'] }}"
|
||||||
when: has_bastion
|
when: has_bastion
|
||||||
|
|
||||||
# As we are actually running on localhost, the ansible_ssh_user is your local user when you try to use it directly
|
# As we are actually running on localhost, the ansible_ssh_user is your local user when you try to use it directly
|
||||||
# To figure out the real ssh user, we delegate this task to the bastion and store the ansible_ssh_user in real_user
|
# To figure out the real ssh user, we delegate this task to the bastion and store the ansible_user in real_user
|
||||||
- set_fact:
|
- set_fact:
|
||||||
real_user: "{{ ansible_ssh_user }}"
|
real_user: "{{ ansible_user }}"
|
||||||
delegate_to: bastion
|
delegate_to: bastion
|
||||||
when: has_bastion
|
when: has_bastion
|
||||||
|
|
||||||
@@ -18,3 +18,4 @@
|
|||||||
template:
|
template:
|
||||||
src: ssh-bastion.conf
|
src: ssh-bastion.conf
|
||||||
dest: "{{ playbook_dir }}/ssh-bastion.conf"
|
dest: "{{ playbook_dir }}/ssh-bastion.conf"
|
||||||
|
when: has_bastion
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ Host {{ bastion_ip }}
|
|||||||
ControlPersist 5m
|
ControlPersist 5m
|
||||||
|
|
||||||
Host {{ vars['hosts'] }}
|
Host {{ vars['hosts'] }}
|
||||||
ProxyCommand ssh -W %h:%p {{ real_user }}@{{ bastion_ip }}
|
ProxyCommand ssh -W %h:%p {{ real_user }}@{{ bastion_ip }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}
|
||||||
|
|
||||||
StrictHostKeyChecking no
|
StrictHostKeyChecking no
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|||||||
@@ -2,3 +2,4 @@
|
|||||||
pypy_version: 2.4.0
|
pypy_version: 2.4.0
|
||||||
pip_python_modules:
|
pip_python_modules:
|
||||||
- httplib2
|
- httplib2
|
||||||
|
- six
|
||||||
@@ -13,3 +13,8 @@
|
|||||||
line: "enabled=0"
|
line: "enabled=0"
|
||||||
state: present
|
state: present
|
||||||
when: fastestmirror.stat.exists
|
when: fastestmirror.stat.exists
|
||||||
|
|
||||||
|
- name: Install packages requirements for bootstrap
|
||||||
|
yum:
|
||||||
|
name: libselinux-python
|
||||||
|
state: present
|
||||||
|
|||||||
@@ -3,15 +3,17 @@
|
|||||||
raw: stat /opt/bin/.bootstrapped
|
raw: stat /opt/bin/.bootstrapped
|
||||||
register: need_bootstrap
|
register: need_bootstrap
|
||||||
failed_when: false
|
failed_when: false
|
||||||
tags: facts
|
tags:
|
||||||
|
- facts
|
||||||
|
|
||||||
- name: Bootstrap | Run bootstrap.sh
|
- name: Bootstrap | Run bootstrap.sh
|
||||||
script: bootstrap.sh
|
script: bootstrap.sh
|
||||||
when: (need_bootstrap | failed)
|
when: need_bootstrap.rc != 0
|
||||||
|
|
||||||
- set_fact:
|
- set_fact:
|
||||||
ansible_python_interpreter: "/opt/bin/python"
|
ansible_python_interpreter: "/opt/bin/python"
|
||||||
tags: facts
|
tags:
|
||||||
|
- facts
|
||||||
|
|
||||||
- name: Bootstrap | Check if we need to install pip
|
- name: Bootstrap | Check if we need to install pip
|
||||||
shell: "{{ansible_python_interpreter}} -m pip --version"
|
shell: "{{ansible_python_interpreter}} -m pip --version"
|
||||||
@@ -19,34 +21,34 @@
|
|||||||
failed_when: false
|
failed_when: false
|
||||||
changed_when: false
|
changed_when: false
|
||||||
check_mode: no
|
check_mode: no
|
||||||
when: (need_bootstrap | failed)
|
when: need_bootstrap.rc != 0
|
||||||
tags: facts
|
tags:
|
||||||
|
- facts
|
||||||
|
|
||||||
- name: Bootstrap | Copy get-pip.py
|
- name: Bootstrap | Copy get-pip.py
|
||||||
copy:
|
copy:
|
||||||
src: get-pip.py
|
src: get-pip.py
|
||||||
dest: ~/get-pip.py
|
dest: ~/get-pip.py
|
||||||
when: (need_pip | failed)
|
when: need_pip != 0
|
||||||
|
|
||||||
- name: Bootstrap | Install pip
|
- name: Bootstrap | Install pip
|
||||||
shell: "{{ansible_python_interpreter}} ~/get-pip.py"
|
shell: "{{ansible_python_interpreter}} ~/get-pip.py"
|
||||||
when: (need_pip | failed)
|
when: need_pip != 0
|
||||||
|
|
||||||
- name: Bootstrap | Remove get-pip.py
|
- name: Bootstrap | Remove get-pip.py
|
||||||
file:
|
file:
|
||||||
path: ~/get-pip.py
|
path: ~/get-pip.py
|
||||||
state: absent
|
state: absent
|
||||||
when: (need_pip | failed)
|
when: need_pip != 0
|
||||||
|
|
||||||
- name: Bootstrap | Install pip launcher
|
- name: Bootstrap | Install pip launcher
|
||||||
copy:
|
copy:
|
||||||
src: runner
|
src: runner
|
||||||
dest: /opt/bin/pip
|
dest: /opt/bin/pip
|
||||||
mode: 0755
|
mode: 0755
|
||||||
when: (need_pip | failed)
|
when: need_pip != 0
|
||||||
|
|
||||||
- name: Install required python modules
|
- name: Install required python modules
|
||||||
pip:
|
pip:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
with_items: "{{pip_python_modules}}"
|
with_items: "{{pip_python_modules}}"
|
||||||
|
|
||||||
|
|||||||
23
roles/bootstrap-os/tasks/bootstrap-debian.yml
Normal file
23
roles/bootstrap-os/tasks/bootstrap-debian.yml
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
---
|
||||||
|
# raw: cat /etc/issue.net | grep '{{ bootstrap_versions }}'
|
||||||
|
|
||||||
|
- name: Bootstrap | Check if bootstrap is needed
|
||||||
|
raw: which "{{ item }}"
|
||||||
|
register: need_bootstrap
|
||||||
|
failed_when: false
|
||||||
|
with_items:
|
||||||
|
- python
|
||||||
|
- pip
|
||||||
|
- dbus-daemon
|
||||||
|
tags: facts
|
||||||
|
|
||||||
|
- name: Bootstrap | Install python 2.x, pip, and dbus
|
||||||
|
raw:
|
||||||
|
apt-get update && \
|
||||||
|
DEBIAN_FRONTEND=noninteractive apt-get install -y python-minimal python-pip dbus
|
||||||
|
when:
|
||||||
|
"{{ need_bootstrap.results | map(attribute='rc') | sort | last | bool }}"
|
||||||
|
|
||||||
|
- set_fact:
|
||||||
|
ansible_python_interpreter: "/usr/bin/python"
|
||||||
|
tags: facts
|
||||||
@@ -8,15 +8,18 @@
|
|||||||
with_items:
|
with_items:
|
||||||
- python
|
- python
|
||||||
- pip
|
- pip
|
||||||
tags: facts
|
- dbus-daemon
|
||||||
|
tags:
|
||||||
|
- facts
|
||||||
|
|
||||||
- name: Bootstrap | Install python 2.x and pip
|
- name: Bootstrap | Install python 2.x and pip
|
||||||
raw:
|
raw:
|
||||||
apt-get update && \
|
apt-get update && \
|
||||||
DEBIAN_FRONTEND=noninteractive apt-get install -y python-minimal python-pip
|
DEBIAN_FRONTEND=noninteractive apt-get install -y python-minimal python-pip dbus
|
||||||
when:
|
when:
|
||||||
"{{ need_bootstrap.results | map(attribute='rc') | sort | last | bool }}"
|
"{{ need_bootstrap.results | map(attribute='rc') | sort | last | bool }}"
|
||||||
|
|
||||||
- set_fact:
|
- set_fact:
|
||||||
ansible_python_interpreter: "/usr/bin/python"
|
ansible_python_interpreter: "/usr/bin/python"
|
||||||
tags: facts
|
tags:
|
||||||
|
- facts
|
||||||
|
|||||||
@@ -2,6 +2,9 @@
|
|||||||
- include: bootstrap-ubuntu.yml
|
- include: bootstrap-ubuntu.yml
|
||||||
when: bootstrap_os == "ubuntu"
|
when: bootstrap_os == "ubuntu"
|
||||||
|
|
||||||
|
- include: bootstrap-debian.yml
|
||||||
|
when: bootstrap_os == "debian"
|
||||||
|
|
||||||
- include: bootstrap-coreos.yml
|
- include: bootstrap-coreos.yml
|
||||||
when: bootstrap_os == "coreos"
|
when: bootstrap_os == "coreos"
|
||||||
|
|
||||||
@@ -21,10 +24,20 @@
|
|||||||
- name: Gather nodes hostnames
|
- name: Gather nodes hostnames
|
||||||
setup:
|
setup:
|
||||||
gather_subset: '!all'
|
gather_subset: '!all'
|
||||||
filter: ansible_hostname
|
filter: ansible_*
|
||||||
|
|
||||||
- name: Assign inventory name to unconfigured hostnames
|
- name: Assign inventory name to unconfigured hostnames (non-CoreOS)
|
||||||
hostname:
|
hostname:
|
||||||
name: "{{inventory_hostname}}"
|
name: "{{inventory_hostname}}"
|
||||||
when: ansible_hostname == 'localhost'
|
when: ansible_os_family not in ['CoreOS', 'Container Linux by CoreOS']
|
||||||
|
|
||||||
|
- name: Assign inventory name to unconfigured hostnames (CoreOS only)
|
||||||
|
command: "hostnamectl set-hostname {{inventory_hostname}}"
|
||||||
|
register: hostname_changed
|
||||||
|
when: ansible_hostname == 'localhost' and ansible_os_family in ['CoreOS', 'Container Linux by CoreOS']
|
||||||
|
|
||||||
|
- name: Update hostname fact (CoreOS only)
|
||||||
|
setup:
|
||||||
|
gather_subset: '!all'
|
||||||
|
filter: ansible_hostname
|
||||||
|
when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] and hostname_changed.changed
|
||||||
|
|||||||
@@ -6,4 +6,3 @@
|
|||||||
regexp: '^\w+\s+requiretty'
|
regexp: '^\w+\s+requiretty'
|
||||||
dest: /etc/sudoers
|
dest: /etc/sudoers
|
||||||
state: absent
|
state: absent
|
||||||
|
|
||||||
|
|||||||
@@ -4,12 +4,12 @@
|
|||||||
|
|
||||||
# Max of 4 names is allowed and no more than 256 - 17 chars total
|
# Max of 4 names is allowed and no more than 256 - 17 chars total
|
||||||
# (a 2 is reserved for the 'default.svc.' and'svc.')
|
# (a 2 is reserved for the 'default.svc.' and'svc.')
|
||||||
#searchdomains:
|
# searchdomains:
|
||||||
# - foo.bar.lc
|
# - foo.bar.lc
|
||||||
|
|
||||||
# Max of 2 is allowed here (a 1 is reserved for the dns_server)
|
# Max of 2 is allowed here (a 1 is reserved for the dns_server)
|
||||||
#nameservers:
|
# nameservers:
|
||||||
# - 127.0.0.1
|
# - 127.0.0.1
|
||||||
|
|
||||||
dns_forward_max: 150
|
dns_forward_max: 150
|
||||||
cache_size: 1000
|
cache_size: 1000
|
||||||
@@ -30,3 +30,6 @@ dns_memory_requests: 50Mi
|
|||||||
# Autoscaler parameters
|
# Autoscaler parameters
|
||||||
dnsmasq_nodes_per_replica: 10
|
dnsmasq_nodes_per_replica: 10
|
||||||
dnsmasq_min_replicas: 1
|
dnsmasq_min_replicas: 1
|
||||||
|
|
||||||
|
# Custom name servers
|
||||||
|
dnsmasq_upstream_dns_servers: []
|
||||||
|
|||||||
@@ -1,6 +0,0 @@
|
|||||||
---
|
|
||||||
dependencies:
|
|
||||||
- role: download
|
|
||||||
file: "{{ downloads.dnsmasq }}"
|
|
||||||
when: dns_mode == 'dnsmasq_kubedns' and download_localhost|default(false)
|
|
||||||
tags: [download, dnsmasq]
|
|
||||||
@@ -1,17 +1,17 @@
|
|||||||
---
|
---
|
||||||
- include: pre_upgrade.yml
|
|
||||||
|
|
||||||
- name: ensure dnsmasq.d directory exists
|
- name: ensure dnsmasq.d directory exists
|
||||||
file:
|
file:
|
||||||
path: /etc/dnsmasq.d
|
path: /etc/dnsmasq.d
|
||||||
state: directory
|
state: directory
|
||||||
tags: bootstrap-os
|
tags:
|
||||||
|
- bootstrap-os
|
||||||
|
|
||||||
- name: ensure dnsmasq.d-available directory exists
|
- name: ensure dnsmasq.d-available directory exists
|
||||||
file:
|
file:
|
||||||
path: /etc/dnsmasq.d-available
|
path: /etc/dnsmasq.d-available
|
||||||
state: directory
|
state: directory
|
||||||
tags: bootstrap-os
|
tags:
|
||||||
|
- bootstrap-os
|
||||||
|
|
||||||
- name: check system nameservers
|
- name: check system nameservers
|
||||||
shell: awk '/^nameserver/ {print $NF}' /etc/resolv.conf
|
shell: awk '/^nameserver/ {print $NF}' /etc/resolv.conf
|
||||||
@@ -56,6 +56,26 @@
|
|||||||
dest: /etc/dnsmasq.d/01-kube-dns.conf
|
dest: /etc/dnsmasq.d/01-kube-dns.conf
|
||||||
state: link
|
state: link
|
||||||
|
|
||||||
|
- name: Create dnsmasq RBAC manifests
|
||||||
|
template:
|
||||||
|
src: "{{ item }}"
|
||||||
|
dest: "{{ kube_config_dir }}/{{ item }}"
|
||||||
|
with_items:
|
||||||
|
- "dnsmasq-clusterrolebinding.yml"
|
||||||
|
- "dnsmasq-serviceaccount.yml"
|
||||||
|
when: rbac_enabled
|
||||||
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Apply dnsmasq RBAC manifests
|
||||||
|
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }}"
|
||||||
|
with_items:
|
||||||
|
- "dnsmasq-clusterrolebinding.yml"
|
||||||
|
- "dnsmasq-serviceaccount.yml"
|
||||||
|
when: rbac_enabled
|
||||||
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
- name: Create dnsmasq manifests
|
- name: Create dnsmasq manifests
|
||||||
template:
|
template:
|
||||||
src: "{{item.file}}"
|
src: "{{item.file}}"
|
||||||
@@ -63,9 +83,10 @@
|
|||||||
with_items:
|
with_items:
|
||||||
- {name: dnsmasq, file: dnsmasq-deploy.yml, type: deployment}
|
- {name: dnsmasq, file: dnsmasq-deploy.yml, type: deployment}
|
||||||
- {name: dnsmasq, file: dnsmasq-svc.yml, type: svc}
|
- {name: dnsmasq, file: dnsmasq-svc.yml, type: svc}
|
||||||
- {name: dnsmasq-autoscaler, file: dnsmasq-autoscaler.yml, type: deployment}
|
- {name: dnsmasq-autoscaler, file: dnsmasq-autoscaler.yml.j2, type: deployment}
|
||||||
register: manifests
|
register: manifests
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
- name: Start Resources
|
- name: Start Resources
|
||||||
kube:
|
kube:
|
||||||
@@ -74,14 +95,14 @@
|
|||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{item.item.type}}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||||
state: "{{item.changed | ternary('latest','present') }}"
|
state: "latest"
|
||||||
with_items: "{{ manifests.results }}"
|
with_items: "{{ manifests.results }}"
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
- name: Check for dnsmasq port (pulling image and running container)
|
- name: Check for dnsmasq port (pulling image and running container)
|
||||||
wait_for:
|
wait_for:
|
||||||
host: "{{dns_server}}"
|
host: "{{dnsmasq_dns_server}}"
|
||||||
port: 53
|
port: 53
|
||||||
timeout: 180
|
timeout: 180
|
||||||
when: inventory_hostname == groups['kube-node'][0] and groups['kube-node'][0] in ansible_play_hosts
|
when: inventory_hostname == groups['kube-node'][0] and groups['kube-node'][0] in ansible_play_hosts
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user