mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 13:54:37 +03:00
Compare commits
447 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
01dbc909be | ||
|
|
0512c22607 | ||
|
|
f0d5a96464 | ||
|
|
361645e8b6 | ||
|
|
353d44a4a6 | ||
|
|
680aa60429 | ||
|
|
6b3cf8c4b8 | ||
|
|
e41766fd58 | ||
|
|
e4c820c35e | ||
|
|
db5f83f8c9 | ||
|
|
412d560bcf | ||
|
|
a468954519 | ||
|
|
a3d3f27aaa | ||
|
|
72b68c7f82 | ||
|
|
28333d4513 | ||
|
|
ed8c0ee95a | ||
|
|
724a316204 | ||
|
|
d70cafc1e4 | ||
|
|
18c8e0a14a | ||
|
|
3ff6a2e7ff | ||
|
|
1ee3ff738e | ||
|
|
52edd4c9bc | ||
|
|
d8345c5eae | ||
|
|
98e7a07fba | ||
|
|
3d5988577a | ||
|
|
69603aed34 | ||
|
|
299e35ebe4 | ||
|
|
6674be2572 | ||
|
|
cf1566e8ed | ||
|
|
c6d91b89d7 | ||
|
|
b44f7957d5 | ||
|
|
aead0e3a69 | ||
|
|
b0484fe3e5 | ||
|
|
b8cd9403df | ||
|
|
d7df577898 | ||
|
|
09bccc97ba | ||
|
|
1c187e9729 | ||
|
|
8939196f0d | ||
|
|
15be42abfd | ||
|
|
ca45d5ffbe | ||
|
|
2bec26dba5 | ||
|
|
03c8d0113c | ||
|
|
536606c2ed | ||
|
|
6e29a47784 | ||
|
|
826a440fa6 | ||
|
|
baff4e61cf | ||
|
|
4d7eca7d2e | ||
|
|
32fec3bb74 | ||
|
|
3134dd4c0d | ||
|
|
56a9c7a802 | ||
|
|
bfa468c771 | ||
|
|
6318bb9f96 | ||
|
|
8618a3119b | ||
|
|
27a268df33 | ||
|
|
7930f6fa0a | ||
|
|
49bd208026 | ||
|
|
83fe607f62 | ||
|
|
ea8b799ff0 | ||
|
|
e2d6f8d897 | ||
|
|
0924c2510c | ||
|
|
065292f8a4 | ||
|
|
35f248dff0 | ||
|
|
b09fe64ff1 | ||
|
|
54debdbda2 | ||
|
|
b6341287bb | ||
|
|
6a92e34994 | ||
|
|
00efc63f74 | ||
|
|
b061cce913 | ||
|
|
c929b5e82e | ||
|
|
58f48500b1 | ||
|
|
b5125e59ab | ||
|
|
d316b02d28 | ||
|
|
7910198b93 | ||
|
|
7b2f35c7d4 | ||
|
|
45874a23bb | ||
|
|
9c3b573f8e | ||
|
|
7d6ef61491 | ||
|
|
6a7c3c6e3f | ||
|
|
883194afec | ||
|
|
3a63aa6b1e | ||
|
|
337499d772 | ||
|
|
82123f3c4e | ||
|
|
8f3d820664 | ||
|
|
7d812f8112 | ||
|
|
473a8beff0 | ||
|
|
0d675cdd1a | ||
|
|
9cce46ea8c | ||
|
|
2e67289473 | ||
|
|
980aeafebe | ||
|
|
7d1ab3374e | ||
|
|
01b9b263ed | ||
|
|
c33a049292 | ||
|
|
7eaa7c957a | ||
|
|
f055ba7965 | ||
|
|
157c247563 | ||
|
|
a35b6dc1af | ||
|
|
910a821d0b | ||
|
|
2c21e7bd3a | ||
|
|
45a177e2a0 | ||
|
|
0c51352a74 | ||
|
|
9b1980cfff | ||
|
|
ae29296e20 | ||
|
|
75e743bfae | ||
|
|
2f19d964f6 | ||
|
|
0d2990510e | ||
|
|
e732df56a1 | ||
|
|
2f92d6bca3 | ||
|
|
c72903e8d6 | ||
|
|
ded58d3b66 | ||
|
|
be9414fabe | ||
|
|
033afe1574 | ||
|
|
c35461a005 | ||
|
|
a93421019b | ||
|
|
4fd3e2ece7 | ||
|
|
937adec515 | ||
|
|
bce3f282f1 | ||
|
|
f8ad44a99f | ||
|
|
7ee2f0d918 | ||
|
|
9cbb373ae2 | ||
|
|
484df62c5a | ||
|
|
79a6b72a13 | ||
|
|
d439564a7e | ||
|
|
b0a5f265e3 | ||
|
|
8800eb3492 | ||
|
|
09308d6125 | ||
|
|
a8822e24b0 | ||
|
|
a60e4c0a3f | ||
|
|
b2d740dd1f | ||
|
|
3237b2702f | ||
|
|
e8c49b0090 | ||
|
|
3dd51cd648 | ||
|
|
e03aa795fa | ||
|
|
a8a05a21a4 | ||
|
|
63fa406c3c | ||
|
|
6ad6609872 | ||
|
|
474fbf09c4 | ||
|
|
47849b8ff7 | ||
|
|
0379a52f03 | ||
|
|
bc2eeb0560 | ||
|
|
81f07c3783 | ||
|
|
f90926389a | ||
|
|
dcb97e775e | ||
|
|
096de82fd9 | ||
|
|
db693d46df | ||
|
|
b8d628c5f3 | ||
|
|
0aa22998e2 | ||
|
|
afe047a77f | ||
|
|
1ae794e5e4 | ||
|
|
a7a204ebca | ||
|
|
8774d7e4d5 | ||
|
|
34e51ac1cb | ||
|
|
d152dc2e6a | ||
|
|
8ce5a9dd19 | ||
|
|
820d8e6ce6 | ||
|
|
3cefd60c37 | ||
|
|
876d4de6be | ||
|
|
974902af31 | ||
|
|
45626a05dc | ||
|
|
4b5299bb7a | ||
|
|
ceab27c97a | ||
|
|
03d1b56a8f | ||
|
|
64190dfc73 | ||
|
|
29128eb316 | ||
|
|
ea9f8b4258 | ||
|
|
1cb03a184b | ||
|
|
158d998ec4 | ||
|
|
168241df4f | ||
|
|
f5417032bf | ||
|
|
d69db3469e | ||
|
|
980a4fa401 | ||
|
|
027e2e8a11 | ||
|
|
dcfda9d9d2 | ||
|
|
ca73e29ec5 | ||
|
|
0330442c63 | ||
|
|
221c6a8eef | ||
|
|
25a1e5f952 | ||
|
|
38df80046e | ||
|
|
57bb7aa5f6 | ||
|
|
86996704ce | ||
|
|
f53ac2a5a0 | ||
|
|
d0af5979c8 | ||
|
|
43020bd064 | ||
|
|
dc00b96f47 | ||
|
|
71c856878c | ||
|
|
19865e81db | ||
|
|
a4258b1244 | ||
|
|
e0b76b185a | ||
|
|
c47f441b13 | ||
|
|
7c854a18bb | ||
|
|
8df2c0a7c6 | ||
|
|
e60b9f796e | ||
|
|
2c8bcc6722 | ||
|
|
e257d92f41 | ||
|
|
f697338eec | ||
|
|
e2ec7c76a4 | ||
|
|
058d101bf9 | ||
|
|
833794feef | ||
|
|
a3dedb68d1 | ||
|
|
4a463567ac | ||
|
|
9f3ed7d855 | ||
|
|
221b429c24 | ||
|
|
b937d1cd9a | ||
|
|
986c46c2b6 | ||
|
|
e029216566 | ||
|
|
2d4595887d | ||
|
|
2beffe688a | ||
|
|
66408a87ee | ||
|
|
62b418cd16 | ||
|
|
5361cc075d | ||
|
|
be12164290 | ||
|
|
588896712e | ||
|
|
6221b94fdf | ||
|
|
efef80f67b | ||
|
|
0c1a0ab966 | ||
|
|
678ed5ced5 | ||
|
|
7f87ce0362 | ||
|
|
d1acf7f192 | ||
|
|
171d2ce59c | ||
|
|
c6170eb79d | ||
|
|
d2c44dd4df | ||
|
|
9b7090ca1d | ||
|
|
ee8e88b111 | ||
|
|
a901b1f0d7 | ||
|
|
82efd95901 | ||
|
|
4c803d579b | ||
|
|
b34ec6c46b | ||
|
|
6368c626c5 | ||
|
|
a5445d9c5c | ||
|
|
da86457cda | ||
|
|
eb00693325 | ||
|
|
a15a0b5eb9 | ||
|
|
646fd5f47b | ||
|
|
277b347604 | ||
|
|
12bc634ec3 | ||
|
|
769e54d8f5 | ||
|
|
ad50bc4ccb | ||
|
|
0ca7aa126b | ||
|
|
d0d9967457 | ||
|
|
b51b52ac0e | ||
|
|
36c1f32ef9 | ||
|
|
fa245ffdd5 | ||
|
|
f7c5f45833 | ||
|
|
579976260f | ||
|
|
d56e9f6b80 | ||
|
|
57b0b6a9b1 | ||
|
|
640190217d | ||
|
|
a08f485d76 | ||
|
|
f6b66839bd | ||
|
|
26700e7882 | ||
|
|
d86229dc2b | ||
|
|
f56171b513 | ||
|
|
516e9a4de6 | ||
|
|
765d907ea1 | ||
|
|
287421e21e | ||
|
|
2761fda2c9 | ||
|
|
339e36fbe6 | ||
|
|
5e648b96e8 | ||
|
|
ac2135e450 | ||
|
|
68c8c05775 | ||
|
|
14b1cab5d2 | ||
|
|
e570e2e736 | ||
|
|
16fd2e5d68 | ||
|
|
422b25ab1f | ||
|
|
b7527399b5 | ||
|
|
89bad11ad8 | ||
|
|
9d32e2c3b0 | ||
|
|
099341582a | ||
|
|
942c98003f | ||
|
|
cad3bf3e8c | ||
|
|
2ab5cc73cd | ||
|
|
9f2dd09628 | ||
|
|
2798adc837 | ||
|
|
54d9404c0e | ||
|
|
f1025dce4e | ||
|
|
538f4dad9d | ||
|
|
5323e232b2 | ||
|
|
5d9986ab5f | ||
|
|
38688a4486 | ||
|
|
d640a57f9b | ||
|
|
5e9479cded | ||
|
|
06ffe44f1f | ||
|
|
b35b816287 | ||
|
|
bf15d06568 | ||
|
|
2c2ffa846c | ||
|
|
48c41bcbe7 | ||
|
|
beb47e1c63 | ||
|
|
303c3654a1 | ||
|
|
5fab610fab | ||
|
|
3c3ebc05cc | ||
|
|
94956ebde9 | ||
|
|
e716bed11b | ||
|
|
ccbcad9741 | ||
|
|
15a8c34717 | ||
|
|
b815f48803 | ||
|
|
95c97332bf | ||
|
|
9bdf6b00cc | ||
|
|
91b23caa19 | ||
|
|
5df48ef8fd | ||
|
|
109078c5e0 | ||
|
|
c0b262a22a | ||
|
|
8bb1af9926 | ||
|
|
538f1f1a68 | ||
|
|
b60ab3ae44 | ||
|
|
370a0635fa | ||
|
|
db2ca014cb | ||
|
|
f0f8379e1b | ||
|
|
815eebf1d7 | ||
|
|
95cf18ff00 | ||
|
|
696fcaf391 | ||
|
|
6ff5ccc938 | ||
|
|
f8a18fcaca | ||
|
|
961c1be53e | ||
|
|
eda1dcb7f6 | ||
|
|
5e0140d62c | ||
|
|
717fe3cf3a | ||
|
|
32d80ca438 | ||
|
|
2a9aead50e | ||
|
|
9fda84b1c9 | ||
|
|
42702dc1a3 | ||
|
|
40e35b3fa6 | ||
|
|
b15d41a96a | ||
|
|
7da2083986 | ||
|
|
37df9a10ff | ||
|
|
0f845fb350 | ||
|
|
23b8998701 | ||
|
|
401d441c10 | ||
|
|
f7aea8ed89 | ||
|
|
a9b67d586b | ||
|
|
b1fbead531 | ||
|
|
b06826e88a | ||
|
|
57fef8f75e | ||
|
|
f599a4a859 | ||
|
|
e44b0727d5 | ||
|
|
18cee65c4b | ||
|
|
f779cb93d6 | ||
|
|
aec5080a47 | ||
|
|
80418a44d5 | ||
|
|
257c20f39e | ||
|
|
050de3ab7b | ||
|
|
f1498d4b53 | ||
|
|
18d19d9ed4 | ||
|
|
61d7d1459a | ||
|
|
6924c6e5a3 | ||
|
|
85c851f519 | ||
|
|
5cd7d1a3c9 | ||
|
|
8b67159239 | ||
|
|
4f70da2731 | ||
|
|
db5040e6ea | ||
|
|
97764921ed | ||
|
|
a6853cb79d | ||
|
|
8c15db53b2 | ||
|
|
0200138a5d | ||
|
|
14af98ebdc | ||
|
|
8a5434419b | ||
|
|
8a406be48a | ||
|
|
feac802456 | ||
|
|
076f254a67 | ||
|
|
bc3a8a0039 | ||
|
|
45d151a69d | ||
|
|
bd014c409b | ||
|
|
08421aad3d | ||
|
|
1c25ed669c | ||
|
|
a005d19f6f | ||
|
|
471589f1f4 | ||
|
|
b0ee1f6cc6 | ||
|
|
79128dcd6b | ||
|
|
dd7e1469e9 | ||
|
|
186ec13579 | ||
|
|
2c4e6b65d7 | ||
|
|
108a6297e9 | ||
|
|
94d4ce5a6f | ||
|
|
81da231b1e | ||
|
|
1a87dcd9b9 | ||
|
|
a1fff30bd9 | ||
|
|
81d57fe658 | ||
|
|
3118437e10 | ||
|
|
65e461a7c0 | ||
|
|
c672681ce5 | ||
|
|
d332a254ee | ||
|
|
f3c072f6b6 | ||
|
|
3debb8aab5 | ||
|
|
aada6e7e40 | ||
|
|
ac60786c6f | ||
|
|
db33dc6938 | ||
|
|
9dfb25cafd | ||
|
|
df8d2285b6 | ||
|
|
af6456d1ea | ||
|
|
6f57f7dd2f | ||
|
|
fd2ff675c3 | ||
|
|
bec23c8a41 | ||
|
|
faaff8bd72 | ||
|
|
8031c6c1e7 | ||
|
|
9d8fc8caad | ||
|
|
d1b1add176 | ||
|
|
a51b729817 | ||
|
|
19bc79b1a6 | ||
|
|
d2fa3c7796 | ||
|
|
03cac2109c | ||
|
|
932935ecc7 | ||
|
|
e01118d36d | ||
|
|
dea9304968 | ||
|
|
2864e13ff9 | ||
|
|
a8c5a0afdc | ||
|
|
0ba336b04e | ||
|
|
89f1223f64 | ||
|
|
8bc0710073 | ||
|
|
fb591bf232 | ||
|
|
a43e0d3f95 | ||
|
|
f16cc1f08d | ||
|
|
8712bddcbe | ||
|
|
99dbc6d780 | ||
|
|
75e4cc2fd9 | ||
|
|
81cb302399 | ||
|
|
3bcdf46937 | ||
|
|
1cf6a99df4 | ||
|
|
a5d165dc85 | ||
|
|
f18e77f1db | ||
|
|
2fc02ed456 | ||
|
|
9db61c45ed | ||
|
|
8cb54cd74d | ||
|
|
a3f1ce25f8 | ||
|
|
3c7f682e90 | ||
|
|
8984096f35 | ||
|
|
1ce7831f6d | ||
|
|
46f6c09d21 | ||
|
|
6fe2248314 | ||
|
|
cb4f797d32 | ||
|
|
eb40ac163f | ||
|
|
27ec548b88 | ||
|
|
637f09f140 | ||
|
|
9b0f57a0a6 | ||
|
|
5f02068f90 | ||
|
|
7f74906d33 | ||
|
|
56523812d3 | ||
|
|
602b2d198a | ||
|
|
4d95bb1421 | ||
|
|
184ac6a4e6 | ||
|
|
10e0fe86fb | ||
|
|
7e1645845f | ||
|
|
f255ce3f02 | ||
|
|
07ecef86e3 | ||
|
|
3bc4b4c174 | ||
|
|
da089b5fca | ||
|
|
d4f094cc11 | ||
|
|
494a6512b8 | ||
|
|
3732c3a9b1 | ||
|
|
f6a63d88a7 |
@@ -5,6 +5,8 @@ skip_list:
|
|||||||
# The following rules throw errors.
|
# The following rules throw errors.
|
||||||
# These either still need to be corrected in the repository and the rules re-enabled or documented why they are skipped on purpose.
|
# These either still need to be corrected in the repository and the rules re-enabled or documented why they are skipped on purpose.
|
||||||
- '301'
|
- '301'
|
||||||
|
- '302'
|
||||||
|
- '303'
|
||||||
- '305'
|
- '305'
|
||||||
- '306'
|
- '306'
|
||||||
- '404'
|
- '404'
|
||||||
|
|||||||
6
.github/ISSUE_TEMPLATE/bug-report.md
vendored
6
.github/ISSUE_TEMPLATE/bug-report.md
vendored
@@ -18,6 +18,8 @@ explain why.
|
|||||||
|
|
||||||
- **Version of Ansible** (`ansible --version`):
|
- **Version of Ansible** (`ansible --version`):
|
||||||
|
|
||||||
|
- **Version of Python** (`python --version`):
|
||||||
|
|
||||||
|
|
||||||
**Kubespray version (commit) (`git rev-parse --short HEAD`):**
|
**Kubespray version (commit) (`git rev-parse --short HEAD`):**
|
||||||
|
|
||||||
@@ -25,8 +27,8 @@ explain why.
|
|||||||
**Network plugin used**:
|
**Network plugin used**:
|
||||||
|
|
||||||
|
|
||||||
**Copy of your inventory file:**
|
**Full inventory with variables (`ansible -i inventory/sample/inventory.ini all -m debug -a "var=hostvars[inventory_hostname]"`):**
|
||||||
|
<!-- We recommend using snippets services like https://gist.github.com/ etc. -->
|
||||||
|
|
||||||
**Command used to invoke ansible**:
|
**Command used to invoke ansible**:
|
||||||
|
|
||||||
|
|||||||
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,9 +1,9 @@
|
|||||||
<!-- Thanks for sending a pull request! Here are some tips for you:
|
<!-- Thanks for sending a pull request! Here are some tips for you:
|
||||||
|
|
||||||
1. If this is your first time, please read our contributor guidelines: https://git.k8s.io/community/contributors/guide#your-first-contribution and developer guide https://git.k8s.io/community/contributors/devel/development.md#development-guide
|
1. If this is your first time, please read our contributor guidelines: https://git.k8s.io/community/contributors/guide/first-contribution.md and developer guide https://git.k8s.io/community/contributors/devel/development.md
|
||||||
2. Please label this pull request according to what type of issue you are addressing, especially if this is a release targeted pull request. For reference on required PR/issue labels, read here:
|
2. Please label this pull request according to what type of issue you are addressing, especially if this is a release targeted pull request. For reference on required PR/issue labels, read here:
|
||||||
https://git.k8s.io/community/contributors/devel/release.md#issue-kind-label
|
https://git.k8s.io/community/contributors/devel/sig-release/release.md#issuepr-kind-label
|
||||||
3. Ensure you have added or ran the appropriate tests for your PR: https://git.k8s.io/community/contributors/devel/testing.md
|
3. Ensure you have added or ran the appropriate tests for your PR: https://git.k8s.io/community/contributors/devel/sig-testing/testing.md
|
||||||
4. If you want *faster* PR reviews, read how: https://git.k8s.io/community/contributors/guide/pull-requests.md#best-practices-for-faster-reviews
|
4. If you want *faster* PR reviews, read how: https://git.k8s.io/community/contributors/guide/pull-requests.md#best-practices-for-faster-reviews
|
||||||
5. Follow the instructions for writing a release note: https://git.k8s.io/community/contributors/guide/release-notes.md
|
5. Follow the instructions for writing a release note: https://git.k8s.io/community/contributors/guide/release-notes.md
|
||||||
6. If the PR is unfinished, see how to mark it: https://git.k8s.io/community/contributors/guide/pull-requests.md#marking-unfinished-pull-requests
|
6. If the PR is unfinished, see how to mark it: https://git.k8s.io/community/contributors/guide/pull-requests.md#marking-unfinished-pull-requests
|
||||||
|
|||||||
@@ -4,13 +4,13 @@ stages:
|
|||||||
- deploy-part1
|
- deploy-part1
|
||||||
- moderator
|
- moderator
|
||||||
- deploy-part2
|
- deploy-part2
|
||||||
- deploy-gce
|
- deploy-part3
|
||||||
- deploy-special
|
- deploy-special
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
|
KUBESPRAY_VERSION: v2.12.6
|
||||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||||
# DOCKER_HOST: tcp://localhost:2375
|
|
||||||
ANSIBLE_FORCE_COLOR: "true"
|
ANSIBLE_FORCE_COLOR: "true"
|
||||||
MAGIC: "ci check this"
|
MAGIC: "ci check this"
|
||||||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||||
@@ -26,31 +26,35 @@ variables:
|
|||||||
IDEMPOT_CHECK: "false"
|
IDEMPOT_CHECK: "false"
|
||||||
RESET_CHECK: "false"
|
RESET_CHECK: "false"
|
||||||
UPGRADE_TEST: "false"
|
UPGRADE_TEST: "false"
|
||||||
LOG_LEVEL: "-vv"
|
MITOGEN_ENABLE: "false"
|
||||||
|
ANSIBLE_LOG_LEVEL: "-vv"
|
||||||
|
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||||
|
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
|
||||||
|
|
||||||
before_script:
|
before_script:
|
||||||
- ./tests/scripts/rebase.sh
|
- ./tests/scripts/rebase.sh
|
||||||
- /usr/bin/python -m pip install -r tests/requirements.txt
|
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||||
|
- python -m pip install -r tests/requirements.txt
|
||||||
- mkdir -p /.ssh
|
- mkdir -p /.ssh
|
||||||
|
|
||||||
.job: &job
|
.job: &job
|
||||||
tags:
|
tags:
|
||||||
- packet
|
- packet
|
||||||
variables:
|
|
||||||
KUBESPRAY_VERSION: v2.10.0
|
|
||||||
image: quay.io/kubespray/kubespray:$KUBESPRAY_VERSION
|
image: quay.io/kubespray/kubespray:$KUBESPRAY_VERSION
|
||||||
|
artifacts:
|
||||||
|
paths:
|
||||||
|
- cluster-dump/
|
||||||
|
|
||||||
.testcases: &testcases
|
.testcases: &testcases
|
||||||
<<: *job
|
<<: *job
|
||||||
services:
|
|
||||||
- docker:dind
|
|
||||||
before_script:
|
before_script:
|
||||||
|
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||||
- ./tests/scripts/rebase.sh
|
- ./tests/scripts/rebase.sh
|
||||||
- ./tests/scripts/testcases_prepare.sh
|
- ./tests/scripts/testcases_prepare.sh
|
||||||
script:
|
script:
|
||||||
- ./tests/scripts/testcases_run.sh
|
- ./tests/scripts/testcases_run.sh
|
||||||
after_script:
|
after_script:
|
||||||
- ./tests/scripts/testcases_cleanup.sh
|
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||||
|
|
||||||
# For failfast, at least 1 job must be defined in .gitlab-ci.yml
|
# For failfast, at least 1 job must be defined in .gitlab-ci.yml
|
||||||
# Premoderated with manual actions
|
# Premoderated with manual actions
|
||||||
@@ -66,6 +70,6 @@ ci-authorized:
|
|||||||
include:
|
include:
|
||||||
- .gitlab-ci/lint.yml
|
- .gitlab-ci/lint.yml
|
||||||
- .gitlab-ci/shellcheck.yml
|
- .gitlab-ci/shellcheck.yml
|
||||||
- .gitlab-ci/digital-ocean.yml
|
|
||||||
- .gitlab-ci/terraform.yml
|
- .gitlab-ci/terraform.yml
|
||||||
- .gitlab-ci/packet.yml
|
- .gitlab-ci/packet.yml
|
||||||
|
- .gitlab-ci/vagrant.yml
|
||||||
|
|||||||
@@ -1,19 +0,0 @@
|
|||||||
---
|
|
||||||
.do_variables: &do_variables
|
|
||||||
PRIVATE_KEY: $DO_PRIVATE_KEY
|
|
||||||
CI_PLATFORM: "do"
|
|
||||||
SSH_USER: root
|
|
||||||
|
|
||||||
.do: &do
|
|
||||||
extends: .testcases
|
|
||||||
tags:
|
|
||||||
- do
|
|
||||||
|
|
||||||
do_ubuntu-canal-ha:
|
|
||||||
stage: deploy-part2
|
|
||||||
extends: .do
|
|
||||||
variables:
|
|
||||||
<<: *do_variables
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
@@ -1,247 +0,0 @@
|
|||||||
---
|
|
||||||
.gce_variables: &gce_variables
|
|
||||||
GCE_USER: travis
|
|
||||||
SSH_USER: $GCE_USER
|
|
||||||
CLOUD_MACHINE_TYPE: "g1-small"
|
|
||||||
CI_PLATFORM: "gce"
|
|
||||||
PRIVATE_KEY: $GCE_PRIVATE_KEY
|
|
||||||
|
|
||||||
.cache: &cache
|
|
||||||
cache:
|
|
||||||
key: "$CI_BUILD_REF_NAME"
|
|
||||||
paths:
|
|
||||||
- downloads/
|
|
||||||
- $HOME/.cache
|
|
||||||
|
|
||||||
.gce: &gce
|
|
||||||
extends: .testcases
|
|
||||||
<<: *cache
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
tags:
|
|
||||||
- gce
|
|
||||||
except: ['triggers']
|
|
||||||
only: [/^pr-.*$/]
|
|
||||||
|
|
||||||
.centos_weave_kubeadm_variables: ¢os_weave_kubeadm_variables
|
|
||||||
# stage: deploy-part1
|
|
||||||
UPGRADE_TEST: "graceful"
|
|
||||||
|
|
||||||
.centos7_multus_calico_variables: ¢os7_multus_calico_variables
|
|
||||||
# stage: deploy-gce
|
|
||||||
UPGRADE_TEST: "graceful"
|
|
||||||
|
|
||||||
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
|
|
||||||
### PR JOBS PART1
|
|
||||||
|
|
||||||
gce_ubuntu18-flannel-aio:
|
|
||||||
stage: deploy-part1
|
|
||||||
<<: *gce
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
### PR JOBS PART2
|
|
||||||
|
|
||||||
gce_coreos-calico-aio:
|
|
||||||
stage: deploy-gce
|
|
||||||
<<: *gce
|
|
||||||
when: on_success
|
|
||||||
|
|
||||||
gce_centos7-flannel-addons:
|
|
||||||
stage: deploy-gce
|
|
||||||
<<: *gce
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
### MANUAL JOBS
|
|
||||||
|
|
||||||
gce_centos-weave-kubeadm-sep:
|
|
||||||
stage: deploy-gce
|
|
||||||
extends: .gce
|
|
||||||
variables:
|
|
||||||
<<: *centos_weave_kubeadm_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
except: []
|
|
||||||
|
|
||||||
gce_ubuntu-weave-sep:
|
|
||||||
stage: deploy-gce
|
|
||||||
<<: *gce
|
|
||||||
when: manual
|
|
||||||
only: ['triggers']
|
|
||||||
except: []
|
|
||||||
|
|
||||||
gce_coreos-calico-sep-triggers:
|
|
||||||
stage: deploy-gce
|
|
||||||
<<: *gce
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
except: []
|
|
||||||
|
|
||||||
gce_ubuntu-canal-ha-triggers:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *gce
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
except: []
|
|
||||||
|
|
||||||
gce_centos7-flannel-addons-triggers:
|
|
||||||
stage: deploy-gce
|
|
||||||
<<: *gce
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
except: []
|
|
||||||
|
|
||||||
gce_ubuntu-weave-sep-triggers:
|
|
||||||
stage: deploy-gce
|
|
||||||
<<: *gce
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
except: []
|
|
||||||
|
|
||||||
# More builds for PRs/merges (manual) and triggers (auto)
|
|
||||||
|
|
||||||
|
|
||||||
gce_ubuntu-canal-ha:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *gce
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
gce_ubuntu-canal-kubeadm:
|
|
||||||
stage: deploy-gce
|
|
||||||
<<: *gce
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
gce_ubuntu-canal-kubeadm-triggers:
|
|
||||||
stage: deploy-gce
|
|
||||||
<<: *gce
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
except: []
|
|
||||||
|
|
||||||
gce_ubuntu-flannel-ha:
|
|
||||||
stage: deploy-gce
|
|
||||||
<<: *gce
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
gce_centos-weave-kubeadm-triggers:
|
|
||||||
stage: deploy-gce
|
|
||||||
extends: .gce
|
|
||||||
variables:
|
|
||||||
<<: *centos_weave_kubeadm_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
except: []
|
|
||||||
|
|
||||||
gce_ubuntu-contiv-sep:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *gce
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
gce_coreos-cilium:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *gce
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
gce_ubuntu18-cilium-sep:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *gce
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
gce_rhel7-weave:
|
|
||||||
stage: deploy-gce
|
|
||||||
<<: *gce
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
gce_rhel7-weave-triggers:
|
|
||||||
stage: deploy-gce
|
|
||||||
<<: *gce
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
except: []
|
|
||||||
|
|
||||||
gce_debian9-calico-upgrade:
|
|
||||||
stage: deploy-gce
|
|
||||||
<<: *gce
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
gce_debian9-calico-triggers:
|
|
||||||
stage: deploy-gce
|
|
||||||
<<: *gce
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
except: []
|
|
||||||
|
|
||||||
gce_coreos-canal:
|
|
||||||
stage: deploy-gce
|
|
||||||
<<: *gce
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
gce_coreos-canal-triggers:
|
|
||||||
stage: deploy-gce
|
|
||||||
<<: *gce
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
except: []
|
|
||||||
|
|
||||||
gce_rhel7-canal-sep:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *gce
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
gce_rhel7-canal-sep-triggers:
|
|
||||||
stage: deploy-gce
|
|
||||||
<<: *gce
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
except: []
|
|
||||||
|
|
||||||
gce_centos7-calico-ha:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *gce
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
gce_centos7-calico-ha-triggers:
|
|
||||||
stage: deploy-gce
|
|
||||||
<<: *gce
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
except: []
|
|
||||||
|
|
||||||
gce_centos7-kube-router:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *gce
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
gce_centos7-multus-calico:
|
|
||||||
stage: deploy-gce
|
|
||||||
extends: .gce
|
|
||||||
variables:
|
|
||||||
<<: *centos7_multus_calico_variables
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
gce_oracle-canal:
|
|
||||||
stage: deploy-gce
|
|
||||||
<<: *gce
|
|
||||||
when: manual
|
|
||||||
except: ['triggers']
|
|
||||||
only: ['master', /^pr-.*$/]
|
|
||||||
|
|
||||||
gce_opensuse-canal:
|
|
||||||
stage: deploy-gce
|
|
||||||
<<: *gce
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
# no triggers yet https://github.com/kubernetes-incubator/kargo/issues/613
|
|
||||||
gce_coreos-alpha-weave-ha:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *gce
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
gce_coreos-kube-router:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *gce
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
gce_ubuntu-kube-router-sep:
|
|
||||||
stage: deploy-special
|
|
||||||
<<: *gce
|
|
||||||
when: manual
|
|
||||||
@@ -2,6 +2,9 @@
|
|||||||
yamllint:
|
yamllint:
|
||||||
extends: .job
|
extends: .job
|
||||||
stage: unit-tests
|
stage: unit-tests
|
||||||
|
tags: [light]
|
||||||
|
variables:
|
||||||
|
LANG: C.UTF-8
|
||||||
script:
|
script:
|
||||||
- yamllint --strict .
|
- yamllint --strict .
|
||||||
except: ['triggers', 'master']
|
except: ['triggers', 'master']
|
||||||
@@ -9,15 +12,17 @@ yamllint:
|
|||||||
vagrant-validate:
|
vagrant-validate:
|
||||||
extends: .job
|
extends: .job
|
||||||
stage: unit-tests
|
stage: unit-tests
|
||||||
|
tags: [light]
|
||||||
|
variables:
|
||||||
|
VAGRANT_VERSION: 2.2.4
|
||||||
script:
|
script:
|
||||||
- curl -sL https://releases.hashicorp.com/vagrant/2.2.4/vagrant_2.2.4_x86_64.deb -o /tmp/vagrant_2.2.4_x86_64.deb
|
- ./tests/scripts/vagrant-validate.sh
|
||||||
- dpkg -i /tmp/vagrant_2.2.4_x86_64.deb
|
|
||||||
- vagrant validate --ignore-provider
|
|
||||||
except: ['triggers', 'master']
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
ansible-lint:
|
ansible-lint:
|
||||||
extends: .job
|
extends: .job
|
||||||
stage: unit-tests
|
stage: unit-tests
|
||||||
|
tags: [light]
|
||||||
# lint every yml/yaml file that looks like it contains Ansible plays
|
# lint every yml/yaml file that looks like it contains Ansible plays
|
||||||
script: |-
|
script: |-
|
||||||
grep -Rl '^- hosts: \|^ hosts: ' --include \*.yml --include \*.yaml . | xargs -P 4 -n 25 ansible-lint -v
|
grep -Rl '^- hosts: \|^ hosts: ' --include \*.yml --include \*.yaml . | xargs -P 4 -n 25 ansible-lint -v
|
||||||
@@ -26,6 +31,7 @@ ansible-lint:
|
|||||||
syntax-check:
|
syntax-check:
|
||||||
extends: .job
|
extends: .job
|
||||||
stage: unit-tests
|
stage: unit-tests
|
||||||
|
tags: [light]
|
||||||
variables:
|
variables:
|
||||||
ANSIBLE_INVENTORY: inventory/local-tests.cfg
|
ANSIBLE_INVENTORY: inventory/local-tests.cfg
|
||||||
ANSIBLE_REMOTE_USER: root
|
ANSIBLE_REMOTE_USER: root
|
||||||
@@ -41,9 +47,30 @@ syntax-check:
|
|||||||
|
|
||||||
tox-inventory-builder:
|
tox-inventory-builder:
|
||||||
stage: unit-tests
|
stage: unit-tests
|
||||||
|
tags: [light]
|
||||||
extends: .job
|
extends: .job
|
||||||
|
before_script:
|
||||||
|
- ./tests/scripts/rebase.sh
|
||||||
|
- apt-get update && apt-get install -y python3-pip
|
||||||
|
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||||
|
- python -m pip install -r tests/requirements.txt
|
||||||
script:
|
script:
|
||||||
- pip install tox
|
- pip3 install tox
|
||||||
- cd contrib/inventory_builder && tox
|
- cd contrib/inventory_builder && tox
|
||||||
when: manual
|
|
||||||
except: ['triggers', 'master']
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
|
markdownlint:
|
||||||
|
stage: unit-tests
|
||||||
|
tags: [light]
|
||||||
|
image: node
|
||||||
|
before_script:
|
||||||
|
- npm install -g markdownlint-cli
|
||||||
|
script:
|
||||||
|
- markdownlint README.md docs --ignore docs/_sidebar.md
|
||||||
|
|
||||||
|
ci-matrix:
|
||||||
|
stage: unit-tests
|
||||||
|
tags: [light]
|
||||||
|
image: python:3
|
||||||
|
script:
|
||||||
|
- tests/scripts/md-table/test.sh
|
||||||
|
|||||||
@@ -1,122 +1,201 @@
|
|||||||
---
|
---
|
||||||
.packet_variables: &packet_variables
|
|
||||||
CI_PLATFORM: "packet"
|
|
||||||
SSH_USER: "kubespray"
|
|
||||||
|
|
||||||
.packet: &packet
|
.packet: &packet
|
||||||
extends: .testcases
|
extends: .testcases
|
||||||
variables:
|
variables:
|
||||||
<<: *packet_variables
|
CI_PLATFORM: "packet"
|
||||||
|
SSH_USER: "kubespray"
|
||||||
tags:
|
tags:
|
||||||
- packet
|
- packet
|
||||||
only: [/^pr-.*$/]
|
only: [/^pr-.*$/]
|
||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
|
|
||||||
.test-upgrade: &test-upgrade
|
|
||||||
variables:
|
|
||||||
UPGRADE_TEST: "graceful"
|
|
||||||
|
|
||||||
packet_ubuntu18-calico-aio:
|
packet_ubuntu18-calico-aio:
|
||||||
stage: deploy-part1
|
stage: deploy-part1
|
||||||
<<: *packet
|
extends: .packet
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
|
# Future AIO job
|
||||||
|
packet_ubuntu20-calico-aio:
|
||||||
|
stage: deploy-part1
|
||||||
|
extends: .packet
|
||||||
|
when: manual
|
||||||
|
|
||||||
# ### PR JOBS PART2
|
# ### PR JOBS PART2
|
||||||
|
|
||||||
packet_centos7-flannel-addons:
|
packet_centos7-flannel-containerd-addons-ha:
|
||||||
|
extends: .packet
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *packet
|
|
||||||
when: on_success
|
when: on_success
|
||||||
|
variables:
|
||||||
|
MITOGEN_ENABLE: "true"
|
||||||
|
|
||||||
|
packet_ubuntu18-crio:
|
||||||
|
extends: .packet
|
||||||
|
stage: deploy-part2
|
||||||
|
when: on_success
|
||||||
|
variables:
|
||||||
|
MITOGEN_ENABLE: "true"
|
||||||
|
|
||||||
# ### MANUAL JOBS
|
# ### MANUAL JOBS
|
||||||
|
|
||||||
packet_centos-weave-kubeadm-sep:
|
packet_centos7-weave-upgrade-ha:
|
||||||
stage: deploy-part2
|
stage: deploy-part3
|
||||||
<<: *packet
|
extends: .packet
|
||||||
when: on_success
|
when: on_success
|
||||||
only: ['triggers']
|
variables:
|
||||||
except: []
|
UPGRADE_TEST: basic
|
||||||
|
MITOGEN_ENABLE: "false"
|
||||||
|
|
||||||
packet_ubuntu-weave-sep:
|
packet_ubuntu16-weave-sep:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *packet
|
extends: .packet
|
||||||
when: manual
|
when: manual
|
||||||
only: ['triggers']
|
|
||||||
except: []
|
|
||||||
|
|
||||||
# # More builds for PRs/merges (manual) and triggers (auto)
|
# # More builds for PRs/merges (manual) and triggers (auto)
|
||||||
|
|
||||||
packet_ubuntu-canal-ha:
|
packet_ubuntu16-canal-sep:
|
||||||
stage: deploy-special
|
stage: deploy-special
|
||||||
<<: *packet
|
extends: .packet
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_ubuntu-canal-kubeadm:
|
packet_ubuntu16-canal-kubeadm-ha:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *packet
|
extends: .packet
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_ubuntu-flannel-ha:
|
packet_ubuntu16-flannel-ha:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *packet
|
extends: .packet
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_ubuntu-contiv-sep:
|
# Contiv does not work in k8s v1.16
|
||||||
stage: deploy-part2
|
# packet_ubuntu16-contiv-sep:
|
||||||
<<: *packet
|
# stage: deploy-part2
|
||||||
when: on_success
|
# extends: .packet
|
||||||
|
# when: on_success
|
||||||
|
|
||||||
packet_ubuntu18-cilium-sep:
|
packet_ubuntu18-cilium-sep:
|
||||||
stage: deploy-special
|
stage: deploy-special
|
||||||
<<: *packet
|
extends: .packet
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_ubuntu18-flannel-containerd:
|
packet_ubuntu18-flannel-containerd-ha:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *packet
|
extends: .packet
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_debian9-macvlan-sep:
|
packet_ubuntu18-flannel-containerd-ha-once:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *packet
|
extends: .packet
|
||||||
when: on_success
|
when: manual
|
||||||
|
|
||||||
packet_debian9-calico-upgrade:
|
packet_debian9-macvlan:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *packet
|
extends: .packet
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
packet_debian9-calico-upgrade-once:
|
||||||
|
stage: deploy-part3
|
||||||
|
extends: .packet
|
||||||
when: on_success
|
when: on_success
|
||||||
|
variables:
|
||||||
|
UPGRADE_TEST: graceful
|
||||||
|
MITOGEN_ENABLE: "false"
|
||||||
|
|
||||||
|
packet_debian10-containerd:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet
|
||||||
|
when: on_success
|
||||||
|
variables:
|
||||||
|
MITOGEN_ENABLE: "true"
|
||||||
|
|
||||||
packet_centos7-calico-ha:
|
packet_centos7-calico-ha:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *packet
|
extends: .packet
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_centos7-kube-ovn:
|
packet_centos7-calico-ha-once-localhost:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *packet
|
extends: .packet
|
||||||
when: on_success
|
when: on_success
|
||||||
|
services:
|
||||||
|
- docker:18.09.9-dind
|
||||||
|
|
||||||
|
packet_centos8-kube-ovn:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_fedora30-weave:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_fedora31-flannel:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet
|
||||||
|
when: on_success
|
||||||
|
variables:
|
||||||
|
MITOGEN_ENABLE: "true"
|
||||||
|
|
||||||
packet_centos7-kube-router:
|
packet_centos7-kube-router:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *packet
|
extends: .packet
|
||||||
when: on_success
|
when: manual
|
||||||
|
|
||||||
packet_centos7-multus-calico:
|
packet_centos7-multus-calico:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *packet
|
extends: .packet
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
|
packet_centos8-calico:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet
|
||||||
|
when: on_success
|
||||||
|
|
||||||
packet_opensuse-canal:
|
packet_opensuse-canal:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *packet
|
extends: .packet
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_oracle7-canal-ha:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_oracle-7-canal:
|
packet_ubuntu16-kube-router-sep:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *packet
|
extends: .packet
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_ubuntu-kube-router-sep:
|
packet_amazon-linux-2-aio:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
<<: *packet
|
extends: .packet
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
|
# ### PR JOBS PART3
|
||||||
|
# Long jobs (45min+)
|
||||||
|
|
||||||
|
packet_debian9-calico-upgrade:
|
||||||
|
stage: deploy-part3
|
||||||
|
extends: .packet
|
||||||
|
when: on_success
|
||||||
|
variables:
|
||||||
|
UPGRADE_TEST: graceful
|
||||||
|
MITOGEN_ENABLE: "false"
|
||||||
|
|
||||||
|
packet_ubuntu18-calico-ha-recover:
|
||||||
|
stage: deploy-part3
|
||||||
|
extends: .packet
|
||||||
|
when: on_success
|
||||||
|
variables:
|
||||||
|
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||||
|
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
|
||||||
|
|
||||||
|
packet_ubuntu18-calico-ha-recover-noquorum:
|
||||||
|
stage: deploy-part3
|
||||||
|
extends: .packet
|
||||||
|
when: on_success
|
||||||
|
variables:
|
||||||
|
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||||
|
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube-master[1:]"
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
shellcheck:
|
shellcheck:
|
||||||
extends: .job
|
extends: .job
|
||||||
stage: unit-tests
|
stage: unit-tests
|
||||||
|
tags: [light]
|
||||||
variables:
|
variables:
|
||||||
SHELLCHECK_VERSION: v0.6.0
|
SHELLCHECK_VERSION: v0.6.0
|
||||||
before_script:
|
before_script:
|
||||||
|
|||||||
@@ -3,14 +3,14 @@
|
|||||||
.terraform_install:
|
.terraform_install:
|
||||||
extends: .job
|
extends: .job
|
||||||
before_script:
|
before_script:
|
||||||
|
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||||
- ./tests/scripts/rebase.sh
|
- ./tests/scripts/rebase.sh
|
||||||
- ./tests/scripts/testcases_prepare.sh
|
- ./tests/scripts/testcases_prepare.sh
|
||||||
- ./tests/scripts/terraform_install.sh
|
- ./tests/scripts/terraform_install.sh
|
||||||
# Set Ansible config
|
# Set Ansible config
|
||||||
- cp ansible.cfg ~/.ansible.cfg
|
- cp ansible.cfg ~/.ansible.cfg
|
||||||
# Prepare inventory
|
# Prepare inventory
|
||||||
- if [ "$PROVIDER" == "openstack" ]; then VARIABLEFILE="cluster.tfvars"; else VARIABLEFILE="cluster.tf"; fi
|
- cp contrib/terraform/$PROVIDER/sample-inventory/cluster.tfvars .
|
||||||
- cp contrib/terraform/$PROVIDER/sample-inventory/$VARIABLEFILE .
|
|
||||||
- ln -s contrib/terraform/$PROVIDER/hosts
|
- ln -s contrib/terraform/$PROVIDER/hosts
|
||||||
- terraform init contrib/terraform/$PROVIDER
|
- terraform init contrib/terraform/$PROVIDER
|
||||||
# Copy SSH keypair
|
# Copy SSH keypair
|
||||||
@@ -22,17 +22,21 @@
|
|||||||
.terraform_validate:
|
.terraform_validate:
|
||||||
extends: .terraform_install
|
extends: .terraform_install
|
||||||
stage: unit-tests
|
stage: unit-tests
|
||||||
|
tags: [light]
|
||||||
only: ['master', /^pr-.*$/]
|
only: ['master', /^pr-.*$/]
|
||||||
script:
|
script:
|
||||||
- if [ "$PROVIDER" == "openstack" ]; then VARIABLEFILE="cluster.tfvars"; else VARIABLEFILE="cluster.tf"; fi
|
- terraform validate -var-file=cluster.tfvars contrib/terraform/$PROVIDER
|
||||||
- terraform validate -var-file=$VARIABLEFILE contrib/terraform/$PROVIDER
|
|
||||||
- terraform fmt -check -diff contrib/terraform/$PROVIDER
|
- terraform fmt -check -diff contrib/terraform/$PROVIDER
|
||||||
|
|
||||||
.terraform_apply:
|
.terraform_apply:
|
||||||
extends: .terraform_install
|
extends: .terraform_install
|
||||||
stage: deploy-part2
|
tags: [light]
|
||||||
|
stage: deploy-part3
|
||||||
when: manual
|
when: manual
|
||||||
only: [/^pr-.*$/]
|
only: [/^pr-.*$/]
|
||||||
|
artifacts:
|
||||||
|
paths:
|
||||||
|
- cluster-dump/
|
||||||
variables:
|
variables:
|
||||||
ANSIBLE_INVENTORY_UNPARSED_FAILED: "true"
|
ANSIBLE_INVENTORY_UNPARSED_FAILED: "true"
|
||||||
ANSIBLE_INVENTORY: hosts
|
ANSIBLE_INVENTORY: hosts
|
||||||
@@ -43,56 +47,56 @@
|
|||||||
- tests/scripts/testcases_run.sh
|
- tests/scripts/testcases_run.sh
|
||||||
after_script:
|
after_script:
|
||||||
# Cleanup regardless of exit code
|
# Cleanup regardless of exit code
|
||||||
- ./tests/scripts/testcases_cleanup.sh
|
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||||
|
|
||||||
tf-validate-openstack:
|
tf-validate-openstack:
|
||||||
extends: .terraform_validate
|
extends: .terraform_validate
|
||||||
variables:
|
variables:
|
||||||
TF_VERSION: 0.12.6
|
TF_VERSION: 0.12.24
|
||||||
PROVIDER: openstack
|
PROVIDER: openstack
|
||||||
CLUSTER: $CI_COMMIT_REF_NAME
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
|
||||||
tf-validate-packet:
|
tf-validate-packet:
|
||||||
extends: .terraform_validate
|
extends: .terraform_validate
|
||||||
variables:
|
variables:
|
||||||
TF_VERSION: 0.11.11
|
TF_VERSION: 0.12.24
|
||||||
PROVIDER: packet
|
PROVIDER: packet
|
||||||
CLUSTER: $CI_COMMIT_REF_NAME
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
|
||||||
tf-validate-aws:
|
tf-validate-aws:
|
||||||
extends: .terraform_validate
|
extends: .terraform_validate
|
||||||
variables:
|
variables:
|
||||||
TF_VERSION: 0.11.11
|
TF_VERSION: 0.12.24
|
||||||
PROVIDER: aws
|
PROVIDER: aws
|
||||||
CLUSTER: $CI_COMMIT_REF_NAME
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
|
||||||
tf-packet-ubuntu16-default:
|
# tf-packet-ubuntu16-default:
|
||||||
extends: .terraform_apply
|
# extends: .terraform_apply
|
||||||
variables:
|
# variables:
|
||||||
TF_VERSION: 0.11.11
|
# TF_VERSION: 0.12.24
|
||||||
PROVIDER: packet
|
# PROVIDER: packet
|
||||||
CLUSTER: $CI_COMMIT_REF_NAME
|
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
TF_VAR_number_of_k8s_masters: "1"
|
# TF_VAR_number_of_k8s_masters: "1"
|
||||||
TF_VAR_number_of_k8s_nodes: "1"
|
# TF_VAR_number_of_k8s_nodes: "1"
|
||||||
TF_VAR_plan_k8s_masters: t1.small.x86
|
# TF_VAR_plan_k8s_masters: t1.small.x86
|
||||||
TF_VAR_plan_k8s_nodes: t1.small.x86
|
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||||
TF_VAR_facility: ewr1
|
# TF_VAR_facility: ewr1
|
||||||
TF_VAR_public_key_path: ""
|
# TF_VAR_public_key_path: ""
|
||||||
TF_VAR_operating_system: ubuntu_16_04
|
# TF_VAR_operating_system: ubuntu_16_04
|
||||||
|
#
|
||||||
tf-packet-ubuntu18-default:
|
# tf-packet-ubuntu18-default:
|
||||||
extends: .terraform_apply
|
# extends: .terraform_apply
|
||||||
variables:
|
# variables:
|
||||||
TF_VERSION: 0.11.11
|
# TF_VERSION: 0.12.24
|
||||||
PROVIDER: packet
|
# PROVIDER: packet
|
||||||
CLUSTER: $CI_COMMIT_REF_NAME
|
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
TF_VAR_number_of_k8s_masters: "1"
|
# TF_VAR_number_of_k8s_masters: "1"
|
||||||
TF_VAR_number_of_k8s_nodes: "1"
|
# TF_VAR_number_of_k8s_nodes: "1"
|
||||||
TF_VAR_plan_k8s_masters: t1.small.x86
|
# TF_VAR_plan_k8s_masters: t1.small.x86
|
||||||
TF_VAR_plan_k8s_nodes: t1.small.x86
|
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||||
TF_VAR_facility: ams1
|
# TF_VAR_facility: ams1
|
||||||
TF_VAR_public_key_path: ""
|
# TF_VAR_public_key_path: ""
|
||||||
TF_VAR_operating_system: ubuntu_18_04
|
# TF_VAR_operating_system: ubuntu_18_04
|
||||||
|
|
||||||
.ovh_variables: &ovh_variables
|
.ovh_variables: &ovh_variables
|
||||||
OS_AUTH_URL: https://auth.cloud.ovh.net/v3
|
OS_AUTH_URL: https://auth.cloud.ovh.net/v3
|
||||||
@@ -105,12 +109,23 @@ tf-packet-ubuntu18-default:
|
|||||||
OS_INTERFACE: public
|
OS_INTERFACE: public
|
||||||
OS_IDENTITY_API_VERSION: "3"
|
OS_IDENTITY_API_VERSION: "3"
|
||||||
|
|
||||||
|
tf-ovh_cleanup:
|
||||||
|
stage: unit-tests
|
||||||
|
tags: [light]
|
||||||
|
image: python
|
||||||
|
variables:
|
||||||
|
<<: *ovh_variables
|
||||||
|
before_script:
|
||||||
|
- pip install -r scripts/openstack-cleanup/requirements.txt
|
||||||
|
script:
|
||||||
|
- ./scripts/openstack-cleanup/main.py
|
||||||
|
|
||||||
tf-ovh_ubuntu18-calico:
|
tf-ovh_ubuntu18-calico:
|
||||||
extends: .terraform_apply
|
extends: .terraform_apply
|
||||||
when: on_success
|
when: on_success
|
||||||
variables:
|
variables:
|
||||||
<<: *ovh_variables
|
<<: *ovh_variables
|
||||||
TF_VERSION: 0.12.6
|
TF_VERSION: 0.12.24
|
||||||
PROVIDER: openstack
|
PROVIDER: openstack
|
||||||
CLUSTER: $CI_COMMIT_REF_NAME
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
ANSIBLE_TIMEOUT: "60"
|
ANSIBLE_TIMEOUT: "60"
|
||||||
@@ -138,7 +153,7 @@ tf-ovh_coreos-calico:
|
|||||||
when: on_success
|
when: on_success
|
||||||
variables:
|
variables:
|
||||||
<<: *ovh_variables
|
<<: *ovh_variables
|
||||||
TF_VERSION: 0.12.6
|
TF_VERSION: 0.12.24
|
||||||
PROVIDER: openstack
|
PROVIDER: openstack
|
||||||
CLUSTER: $CI_COMMIT_REF_NAME
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
ANSIBLE_TIMEOUT: "60"
|
ANSIBLE_TIMEOUT: "60"
|
||||||
|
|||||||
49
.gitlab-ci/vagrant.yml
Normal file
49
.gitlab-ci/vagrant.yml
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
molecule_tests:
|
||||||
|
tags: [c3.small.x86]
|
||||||
|
only: [/^pr-.*$/]
|
||||||
|
except: ['triggers']
|
||||||
|
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||||
|
services: []
|
||||||
|
stage: deploy-part1
|
||||||
|
before_script:
|
||||||
|
- tests/scripts/rebase.sh
|
||||||
|
- apt-get update && apt-get install -y python3-pip
|
||||||
|
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||||
|
- python -m pip install -r tests/requirements.txt
|
||||||
|
- ./tests/scripts/vagrant_clean.sh
|
||||||
|
script:
|
||||||
|
- ./tests/scripts/molecule_run.sh
|
||||||
|
|
||||||
|
.vagrant:
|
||||||
|
extends: .testcases
|
||||||
|
variables:
|
||||||
|
CI_PLATFORM: "vagrant"
|
||||||
|
SSH_USER: "kubespray"
|
||||||
|
VAGRANT_DEFAULT_PROVIDER: "libvirt"
|
||||||
|
KUBESPRAY_VAGRANT_CONFIG: tests/files/${CI_JOB_NAME}.rb
|
||||||
|
tags: [c3.small.x86]
|
||||||
|
only: [/^pr-.*$/]
|
||||||
|
except: ['triggers']
|
||||||
|
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||||
|
services: []
|
||||||
|
before_script:
|
||||||
|
- apt-get update && apt-get install -y python3-pip
|
||||||
|
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||||
|
- python -m pip install -r tests/requirements.txt
|
||||||
|
- ./tests/scripts/vagrant_clean.sh
|
||||||
|
script:
|
||||||
|
- vagrant up
|
||||||
|
after_script:
|
||||||
|
- vagrant destroy --force
|
||||||
|
|
||||||
|
vagrant_ubuntu18-flannel:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .vagrant
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
vagrant_ubuntu18-weave-medium:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .vagrant
|
||||||
|
when: manual
|
||||||
2
.markdownlint.yaml
Normal file
2
.markdownlint.yaml
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
---
|
||||||
|
MD013: false
|
||||||
@@ -2,10 +2,30 @@
|
|||||||
|
|
||||||
## How to become a contributor and submit your own code
|
## How to become a contributor and submit your own code
|
||||||
|
|
||||||
|
### Environment setup
|
||||||
|
|
||||||
|
It is recommended to use filter to manage the GitHub email notification, see [examples for setting filters to Kubernetes Github notifications](https://github.com/kubernetes/community/blob/master/communication/best-practices.md#examples-for-setting-filters-to-kubernetes-github-notifications)
|
||||||
|
|
||||||
|
To install development dependencies you can use `pip install -r tests/requirements.txt`
|
||||||
|
|
||||||
|
#### Linting
|
||||||
|
|
||||||
|
Kubespray uses `yamllint` and `ansible-lint`. To run them locally use `yamllint .` and `./tests/scripts/ansible-lint.sh`
|
||||||
|
|
||||||
|
#### Molecule
|
||||||
|
|
||||||
|
[molecule](https://github.com/ansible-community/molecule) is designed to help the development and testing of Ansible roles. In Kubespray you can run it all for all roles with `./tests/scripts/molecule_run.sh` or for a specific role (that you are working with) with `molecule test` from the role directory (`cd roles/my-role`).
|
||||||
|
|
||||||
|
When developing or debugging a role it can be useful to run `molecule create` and `molecule converge` separately. Then you can use `molecule login` to SSH into the test environment.
|
||||||
|
|
||||||
|
#### Vagrant
|
||||||
|
|
||||||
|
Vagrant with VirtualBox or libvirt driver helps you to quickly spin test clusters to test things end to end. See [README.md#vagrant](README.md)
|
||||||
|
|
||||||
### Contributing A Patch
|
### Contributing A Patch
|
||||||
|
|
||||||
1. Submit an issue describing your proposed change to the repo in question.
|
1. Submit an issue describing your proposed change to the repo in question.
|
||||||
2. The [repo owners](OWNERS) will respond to your issue promptly.
|
2. The [repo owners](OWNERS) will respond to your issue promptly.
|
||||||
3. Fork the desired repo, develop and test your code changes.
|
3. Fork the desired repo, develop and test your code changes.
|
||||||
4. Sign the CNCF CLA (https://git.k8s.io/community/CLA.md#the-contributor-license-agreement)
|
4. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
||||||
5. Submit a pull request.
|
5. Submit a pull request.
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ RUN mkdir /kubespray
|
|||||||
WORKDIR /kubespray
|
WORKDIR /kubespray
|
||||||
RUN apt update -y && \
|
RUN apt update -y && \
|
||||||
apt install -y \
|
apt install -y \
|
||||||
libssl-dev python3-dev sshpass apt-transport-https jq \
|
libssl-dev python3-dev sshpass apt-transport-https jq moreutils \
|
||||||
ca-certificates curl gnupg2 software-properties-common python3-pip rsync
|
ca-certificates curl gnupg2 software-properties-common python3-pip rsync
|
||||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
|
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
|
||||||
add-apt-repository \
|
add-apt-repository \
|
||||||
@@ -13,6 +13,9 @@ RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - &&
|
|||||||
stable" \
|
stable" \
|
||||||
&& apt update -y && apt-get install docker-ce -y
|
&& apt update -y && apt-get install docker-ce -y
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN /usr/bin/python3 -m pip install pip -U && /usr/bin/python3 -m pip install -r tests/requirements.txt && python3 -m pip install -r requirements.txt
|
RUN /usr/bin/python3 -m pip install pip -U && /usr/bin/python3 -m pip install -r tests/requirements.txt && python3 -m pip install -r requirements.txt && update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.14.4/bin/linux/amd64/kubectl \
|
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.17.5/bin/linux/amd64/kubectl \
|
||||||
&& chmod a+x kubectl && cp kubectl /usr/local/bin/kubectl
|
&& chmod a+x kubectl && cp kubectl /usr/local/bin/kubectl
|
||||||
|
|
||||||
|
# Some tools like yamllint need this
|
||||||
|
ENV LANG=C.UTF-8
|
||||||
|
|||||||
2
Makefile
2
Makefile
@@ -1,5 +1,5 @@
|
|||||||
mitogen:
|
mitogen:
|
||||||
ansible-playbook -c local mitogen.yaml -vv
|
ansible-playbook -c local mitogen.yml -vv
|
||||||
clean:
|
clean:
|
||||||
rm -rf dist/
|
rm -rf dist/
|
||||||
rm *.retry
|
rm *.retry
|
||||||
|
|||||||
@@ -4,18 +4,16 @@ aliases:
|
|||||||
- mattymo
|
- mattymo
|
||||||
- atoms
|
- atoms
|
||||||
- chadswen
|
- chadswen
|
||||||
- rsmitty
|
- mirwan
|
||||||
- bogdando
|
- miouge1
|
||||||
- bradbeam
|
|
||||||
- woopstar
|
|
||||||
- riverzhang
|
- riverzhang
|
||||||
- holser
|
|
||||||
- smana
|
|
||||||
- verwilst
|
- verwilst
|
||||||
|
- woopstar
|
||||||
|
- luckysb
|
||||||
kubespray-reviewers:
|
kubespray-reviewers:
|
||||||
- jjungnickel
|
- jjungnickel
|
||||||
- archifleks
|
- archifleks
|
||||||
- chapsuk
|
|
||||||
- mirwan
|
|
||||||
- miouge1
|
|
||||||
- holmsten
|
- holmsten
|
||||||
|
- bozzo
|
||||||
|
- floryut
|
||||||
|
- eppo
|
||||||
|
|||||||
259
README.md
259
README.md
@@ -1,19 +1,17 @@
|
|||||||
|
# Deploy a Production Ready Kubernetes Cluster
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
Deploy a Production Ready Kubernetes Cluster
|
If you have questions, check the documentation at [kubespray.io](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
||||||
============================================
|
|
||||||
|
|
||||||
If you have questions, check the [documentation](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
|
||||||
You can get your invite [here](http://slack.k8s.io/)
|
You can get your invite [here](http://slack.k8s.io/)
|
||||||
|
|
||||||
- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere, Packet (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere, Packet (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
||||||
- **Highly available** cluster
|
- **Highly available** cluster
|
||||||
- **Composable** (Choice of the network plugin for instance)
|
- **Composable** (Choice of the network plugin for instance)
|
||||||
- Supports most popular **Linux distributions**
|
- Supports most popular **Linux distributions**
|
||||||
- **Continuous integration tests**
|
- **Continuous integration tests**
|
||||||
|
|
||||||
Quick Start
|
## Quick Start
|
||||||
-----------
|
|
||||||
|
|
||||||
To deploy the cluster you can use :
|
To deploy the cluster you can use :
|
||||||
|
|
||||||
@@ -21,31 +19,35 @@ To deploy the cluster you can use :
|
|||||||
|
|
||||||
#### Usage
|
#### Usage
|
||||||
|
|
||||||
# Install dependencies from ``requirements.txt``
|
```ShellSession
|
||||||
sudo pip install -r requirements.txt
|
# Install dependencies from ``requirements.txt``
|
||||||
|
sudo pip3 install -r requirements.txt
|
||||||
|
|
||||||
# Copy ``inventory/sample`` as ``inventory/mycluster``
|
# Copy ``inventory/sample`` as ``inventory/mycluster``
|
||||||
cp -rfp inventory/sample inventory/mycluster
|
cp -rfp inventory/sample inventory/mycluster
|
||||||
|
|
||||||
# Update Ansible inventory file with inventory builder
|
# Update Ansible inventory file with inventory builder
|
||||||
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
||||||
CONFIG_FILE=inventory/mycluster/hosts.yml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
||||||
|
|
||||||
# Review and change parameters under ``inventory/mycluster/group_vars``
|
# Review and change parameters under ``inventory/mycluster/group_vars``
|
||||||
cat inventory/mycluster/group_vars/all/all.yml
|
cat inventory/mycluster/group_vars/all/all.yml
|
||||||
cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
|
cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
|
||||||
|
|
||||||
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
||||||
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
||||||
# installing packages and interacting with various systemd daemons.
|
# installing packages and interacting with various systemd daemons.
|
||||||
# Without --become the playbook will fail to run!
|
# Without --become the playbook will fail to run!
|
||||||
ansible-playbook -i inventory/mycluster/hosts.yml --become --become-user=root cluster.yml
|
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml
|
||||||
|
```
|
||||||
|
|
||||||
Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu).
|
Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu).
|
||||||
As a consequence, `ansible-playbook` command will fail with:
|
As a consequence, `ansible-playbook` command will fail with:
|
||||||
```
|
|
||||||
|
```raw
|
||||||
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
|
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
|
||||||
```
|
```
|
||||||
|
|
||||||
probably pointing on a task depending on a module present in requirements.txt (i.e. "unseal vault").
|
probably pointing on a task depending on a module present in requirements.txt (i.e. "unseal vault").
|
||||||
|
|
||||||
One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible.
|
One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible.
|
||||||
@@ -56,155 +58,156 @@ A workaround consists of setting `ANSIBLE_LIBRARY` and `ANSIBLE_MODULE_UTILS` en
|
|||||||
For Vagrant we need to install python dependencies for provisioning tasks.
|
For Vagrant we need to install python dependencies for provisioning tasks.
|
||||||
Check if Python and pip are installed:
|
Check if Python and pip are installed:
|
||||||
|
|
||||||
python -V && pip -V
|
```ShellSession
|
||||||
|
python -V && pip -V
|
||||||
|
```
|
||||||
|
|
||||||
If this returns the version of the software, you're good to go. If not, download and install Python from here <https://www.python.org/downloads/source/>
|
If this returns the version of the software, you're good to go. If not, download and install Python from here <https://www.python.org/downloads/source/>
|
||||||
Install the necessary requirements
|
Install the necessary requirements
|
||||||
|
|
||||||
sudo pip install -r requirements.txt
|
```ShellSession
|
||||||
vagrant up
|
sudo pip install -r requirements.txt
|
||||||
|
vagrant up
|
||||||
|
```
|
||||||
|
|
||||||
Documents
|
## Documents
|
||||||
---------
|
|
||||||
|
|
||||||
- [Requirements](#requirements)
|
- [Requirements](#requirements)
|
||||||
- [Kubespray vs ...](docs/comparisons.md)
|
- [Kubespray vs ...](docs/comparisons.md)
|
||||||
- [Getting started](docs/getting-started.md)
|
- [Getting started](docs/getting-started.md)
|
||||||
- [Ansible inventory and tags](docs/ansible.md)
|
- [Ansible inventory and tags](docs/ansible.md)
|
||||||
- [Integration with existing ansible repo](docs/integration.md)
|
- [Integration with existing ansible repo](docs/integration.md)
|
||||||
- [Deployment data variables](docs/vars.md)
|
- [Deployment data variables](docs/vars.md)
|
||||||
- [DNS stack](docs/dns-stack.md)
|
- [DNS stack](docs/dns-stack.md)
|
||||||
- [HA mode](docs/ha-mode.md)
|
- [HA mode](docs/ha-mode.md)
|
||||||
- [Network plugins](#network-plugins)
|
- [Network plugins](#network-plugins)
|
||||||
- [Vagrant install](docs/vagrant.md)
|
- [Vagrant install](docs/vagrant.md)
|
||||||
- [CoreOS bootstrap](docs/coreos.md)
|
- [CoreOS bootstrap](docs/coreos.md)
|
||||||
- [Debian Jessie setup](docs/debian.md)
|
- [Fedora CoreOS bootstrap](docs/fcos.md)
|
||||||
- [openSUSE setup](docs/opensuse.md)
|
- [Debian Jessie setup](docs/debian.md)
|
||||||
- [Downloaded artifacts](docs/downloads.md)
|
- [openSUSE setup](docs/opensuse.md)
|
||||||
- [Cloud providers](docs/cloud.md)
|
- [Downloaded artifacts](docs/downloads.md)
|
||||||
- [OpenStack](docs/openstack.md)
|
- [Cloud providers](docs/cloud.md)
|
||||||
- [AWS](docs/aws.md)
|
- [OpenStack](docs/openstack.md)
|
||||||
- [Azure](docs/azure.md)
|
- [AWS](docs/aws.md)
|
||||||
- [vSphere](docs/vsphere.md)
|
- [Azure](docs/azure.md)
|
||||||
- [Packet Host](docs/packet.md)
|
- [vSphere](docs/vsphere.md)
|
||||||
- [Large deployments](docs/large-deployments.md)
|
- [Packet Host](docs/packet.md)
|
||||||
- [Upgrades basics](docs/upgrades.md)
|
- [Large deployments](docs/large-deployments.md)
|
||||||
- [Roadmap](docs/roadmap.md)
|
- [Adding/replacing a node](docs/nodes.md)
|
||||||
|
- [Upgrades basics](docs/upgrades.md)
|
||||||
|
- [Roadmap](docs/roadmap.md)
|
||||||
|
|
||||||
Supported Linux Distributions
|
## Supported Linux Distributions
|
||||||
-----------------------------
|
|
||||||
|
|
||||||
- **Container Linux by CoreOS**
|
- **Container Linux by CoreOS**
|
||||||
- **Debian** Buster, Jessie, Stretch, Wheezy
|
- **Debian** Buster, Jessie, Stretch, Wheezy
|
||||||
- **Ubuntu** 16.04, 18.04
|
- **Ubuntu** 16.04, 18.04
|
||||||
- **CentOS/RHEL** 7
|
- **CentOS/RHEL** 7, 8 (experimental: see [centos 8 notes](docs/centos8.md)
|
||||||
- **Fedora** 28
|
- **Fedora** 30, 31
|
||||||
- **Fedora/CentOS** Atomic
|
- **Fedora CoreOS** (experimental: see [fcos Note](docs/fcos.md))
|
||||||
- **openSUSE** Leap 42.3/Tumbleweed
|
- **openSUSE** Leap 42.3/Tumbleweed
|
||||||
- **Oracle Linux** 7
|
- **Oracle Linux** 7
|
||||||
|
|
||||||
Note: Upstart/SysV init based OS types are not supported.
|
Note: Upstart/SysV init based OS types are not supported.
|
||||||
|
|
||||||
Supported Components
|
## Supported Components
|
||||||
--------------------
|
|
||||||
|
|
||||||
- Core
|
- Core
|
||||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.15.3
|
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.17.5
|
||||||
- [etcd](https://github.com/coreos/etcd) v3.3.10
|
- [etcd](https://github.com/coreos/etcd) v3.3.12
|
||||||
- [docker](https://www.docker.com/) v18.06 (see note)
|
- [docker](https://www.docker.com/) v18.06 (see note)
|
||||||
- [cri-o](http://cri-o.io/) v1.11.5 (experimental: see [CRI-O Note](docs/cri-o.md). Only on centos based OS)
|
- [containerd](https://containerd.io/) v1.2.13
|
||||||
- Network Plugin
|
- [cri-o](http://cri-o.io/) v1.17 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||||
- [cni-plugins](https://github.com/containernetworking/plugins) v0.8.1
|
- Network Plugin
|
||||||
- [calico](https://github.com/projectcalico/calico) v3.7.3
|
- [cni-plugins](https://github.com/containernetworking/plugins) v0.8.5
|
||||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
- [calico](https://github.com/projectcalico/calico) v3.13.2
|
||||||
- [cilium](https://github.com/cilium/cilium) v1.5.5
|
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||||
- [contiv](https://github.com/contiv/install) v1.2.1
|
- [cilium](https://github.com/cilium/cilium) v1.7.2
|
||||||
- [flanneld](https://github.com/coreos/flannel) v0.11.0
|
- [contiv](https://github.com/contiv/install) v1.2.1
|
||||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v0.2.5
|
- [flanneld](https://github.com/coreos/flannel) v0.12.0
|
||||||
- [multus](https://github.com/intel/multus-cni) v3.2.1
|
- [kube-router](https://github.com/cloudnativelabs/kube-router) v0.4.0
|
||||||
- [weave](https://github.com/weaveworks/weave) v2.5.2
|
- [multus](https://github.com/intel/multus-cni) v3.4.1
|
||||||
- Application
|
- [weave](https://github.com/weaveworks/weave) v2.6.2
|
||||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
- Application
|
||||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||||
- [cert-manager](https://github.com/jetstack/cert-manager) v0.5.2
|
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||||
- [coredns](https://github.com/coredns/coredns) v1.6.0
|
- [cert-manager](https://github.com/jetstack/cert-manager) v0.11.1
|
||||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.25.1
|
- [coredns](https://github.com/coredns/coredns) v1.6.5
|
||||||
|
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.30.0
|
||||||
|
|
||||||
Note: The list of validated [docker versions](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.13.md) was updated to 1.11.1, 1.12.1, 1.13.1, 17.03, 17.06, 17.09, 18.06. kubeadm now properly recognizes Docker 18.09.0 and newer, but still treats 18.06 as the default supported version. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
Note: The list of validated [docker versions](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.16.md) was updated to 1.13.1, 17.03, 17.06, 17.09, 18.06, 18.09. kubeadm now properly recognizes Docker 18.09.0 and newer, but still treats 18.06 as the default supported version. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||||
|
|
||||||
Requirements
|
## Requirements
|
||||||
------------
|
|
||||||
- **Minimum required version of Kubernetes is v1.14**
|
- **Minimum required version of Kubernetes is v1.15**
|
||||||
- **Ansible v2.7.8 (or newer, but [not 2.8.x](https://github.com/kubernetes-sigs/kubespray/issues/4778)) and python-netaddr is installed on the machine
|
- **Ansible v2.9+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||||
that will run Ansible commands**
|
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/downloads.md#offline-environment))
|
||||||
- **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
|
- The target servers are configured to allow **IPv4 forwarding**.
|
||||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/downloads.md#offline-environment))
|
- **Your ssh key must be copied** to all the servers part of your inventory.
|
||||||
- The target servers are configured to allow **IPv4 forwarding**.
|
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||||
- **Your ssh key must be copied** to all the servers part of your inventory.
|
|
||||||
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
|
||||||
in order to avoid any issue during deployment you should disable your firewall.
|
in order to avoid any issue during deployment you should disable your firewall.
|
||||||
- If kubespray is ran from non-root user account, correct privilege escalation method
|
- If kubespray is ran from non-root user account, correct privilege escalation method
|
||||||
should be configured in the target servers. Then the `ansible_become` flag
|
should be configured in the target servers. Then the `ansible_become` flag
|
||||||
or command parameters `--become or -b` should be specified.
|
or command parameters `--become or -b` should be specified.
|
||||||
|
|
||||||
Hardware:
|
Hardware:
|
||||||
These limits are safe guarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide.
|
These limits are safe guarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide.
|
||||||
|
|
||||||
- Master
|
- Master
|
||||||
- Memory: 1500 MB
|
- Memory: 1500 MB
|
||||||
- Node
|
- Node
|
||||||
- Memory: 1024 MB
|
- Memory: 1024 MB
|
||||||
|
|
||||||
Network Plugins
|
## Network Plugins
|
||||||
---------------
|
|
||||||
|
|
||||||
You can choose between 10 network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
You can choose between 10 network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
||||||
|
|
||||||
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||||
|
|
||||||
- [calico](docs/calico.md): bgp (layer 3) networking.
|
- [Calico](https://docs.projectcalico.org/latest/introduction/) is a networking and network policy provider. Calico supports a flexible set of networking options
|
||||||
|
designed to give you the most efficient networking across a range of situations, including non-overlay
|
||||||
|
and overlay networks, with or without BGP. Calico uses the same engine to enforce network policy for hosts,
|
||||||
|
pods, and (if using Istio and Envoy) applications at the service mesh layer.
|
||||||
|
|
||||||
- [canal](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
|
- [canal](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
|
||||||
|
|
||||||
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
||||||
|
|
||||||
- [contiv](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
|
- [contiv](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
|
||||||
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
|
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
|
||||||
|
|
||||||
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
||||||
(Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)).
|
(Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)).
|
||||||
|
|
||||||
- [kube-ovn](docs/kube-ovn.md): Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
|
- [kube-ovn](docs/kube-ovn.md): Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
|
||||||
|
|
||||||
- [kube-router](docs/kube-router.md): Kube-router is a L3 CNI for Kubernetes networking aiming to provide operational
|
- [kube-router](docs/kube-router.md): Kube-router is a L3 CNI for Kubernetes networking aiming to provide operational
|
||||||
simplicity and high performance: it uses IPVS to provide Kube Services Proxy (if setup to replace kube-proxy),
|
simplicity and high performance: it uses IPVS to provide Kube Services Proxy (if setup to replace kube-proxy),
|
||||||
iptables for network policies, and BGP for ods L3 networking (with optionally BGP peering with out-of-cluster BGP peers).
|
iptables for network policies, and BGP for ods L3 networking (with optionally BGP peering with out-of-cluster BGP peers).
|
||||||
It can also optionally advertise routes to Kubernetes cluster Pods CIDRs, ClusterIPs, ExternalIPs and LoadBalancerIPs.
|
It can also optionally advertise routes to Kubernetes cluster Pods CIDRs, ClusterIPs, ExternalIPs and LoadBalancerIPs.
|
||||||
|
|
||||||
- [macvlan](docs/macvlan.md): Macvlan is a Linux network driver. Pods have their own unique Mac and Ip address, connected directly the physical (layer 2) network.
|
- [macvlan](docs/macvlan.md): Macvlan is a Linux network driver. Pods have their own unique Mac and Ip address, connected directly the physical (layer 2) network.
|
||||||
|
|
||||||
- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
|
- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
|
||||||
|
|
||||||
The choice is defined with the variable `kube_network_plugin`. There is also an
|
The choice is defined with the variable `kube_network_plugin`. There is also an
|
||||||
option to leverage built-in cloud provider networking instead.
|
option to leverage built-in cloud provider networking instead.
|
||||||
See also [Network checker](docs/netcheck.md).
|
See also [Network checker](docs/netcheck.md).
|
||||||
|
|
||||||
Community docs and resources
|
## Community docs and resources
|
||||||
----------------------------
|
|
||||||
|
|
||||||
- [kubernetes.io/docs/getting-started-guides/kubespray/](https://kubernetes.io/docs/getting-started-guides/kubespray/)
|
- [kubernetes.io/docs/setup/production-environment/tools/kubespray/](https://kubernetes.io/docs/setup/production-environment/tools/kubespray/)
|
||||||
- [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
|
- [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
|
||||||
- [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty
|
- [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty
|
||||||
- [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=N9q51JgbWu8)
|
- [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=CJ5G4GpqDy0)
|
||||||
|
|
||||||
Tools and projects on top of Kubespray
|
## Tools and projects on top of Kubespray
|
||||||
--------------------------------------
|
|
||||||
|
|
||||||
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/master/doc/integrations/ansible.rst)
|
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst)
|
||||||
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
|
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
|
||||||
|
|
||||||
CI Tests
|
## CI Tests
|
||||||
--------
|
|
||||||
|
|
||||||
[](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines)
|
[](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines)
|
||||||
|
|
||||||
|
|||||||
48
RELEASE.md
48
RELEASE.md
@@ -3,38 +3,46 @@
|
|||||||
The Kubespray Project is released on an as-needed basis. The process is as follows:
|
The Kubespray Project is released on an as-needed basis. The process is as follows:
|
||||||
|
|
||||||
1. An issue is proposing a new release with a changelog since the last release
|
1. An issue is proposing a new release with a changelog since the last release
|
||||||
2. At least one of the [OWNERS](OWNERS) must LGTM this release
|
2. At least one of the [approvers](OWNERS_ALIASES) must approve this release
|
||||||
3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
|
3. The `kube_version_min_required` variable is set to `n-1`
|
||||||
4. The release issue is closed
|
4. Remove hashes for [EOL versions](https://github.com/kubernetes/sig-release/blob/master/releases/patch-releases.md) of kubernetes from `*_checksums` variables.
|
||||||
5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
5. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes
|
||||||
|
6. An approver creates a release branch in the form `release-X.Y`
|
||||||
|
7. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) docker images are built and tagged
|
||||||
|
8. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml`
|
||||||
|
9. The release issue is closed
|
||||||
|
10. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||||
|
11. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||||
|
|
||||||
## Major/minor releases, merge freezes and milestones
|
## Major/minor releases and milestones
|
||||||
|
|
||||||
* Kubespray does not maintain stable branches for releases. Releases are tags, not
|
* For major releases (vX.Y) Kubespray maintains one branch (`release-X.Y`). Minor releases (vX.Y.Z) are available only as tags.
|
||||||
branches, and there are no backports. Therefore, there is no need for merge
|
|
||||||
freezes as well.
|
|
||||||
|
|
||||||
* Fixes for major releases (vX.x.0) and minor releases (vX.Y.x) are delivered
|
* Security patches and bugs might be backported.
|
||||||
|
|
||||||
|
* Fixes for major releases (vX.Y) and minor releases (vX.Y.Z) are delivered
|
||||||
via maintenance releases (vX.Y.Z) and assigned to the corresponding open
|
via maintenance releases (vX.Y.Z) and assigned to the corresponding open
|
||||||
milestone (vX.Y). That milestone remains open for the major/minor releases
|
[GitHub milestone](https://github.com/kubernetes-sigs/kubespray/milestones).
|
||||||
support lifetime, which ends once the milestone closed. Then only a next major
|
That milestone remains open for the major/minor releases support lifetime,
|
||||||
or minor release can be done.
|
which ends once the milestone is closed. Then only a next major or minor release
|
||||||
|
can be done.
|
||||||
|
|
||||||
* Kubespray major and minor releases are bound to the given ``kube_version`` major/minor
|
* Kubespray major and minor releases are bound to the given `kube_version` major/minor
|
||||||
version numbers and other components' arbitrary versions, like etcd or network plugins.
|
version numbers and other components' arbitrary versions, like etcd or network plugins.
|
||||||
Older or newer versions are not supported and not tested for the given release.
|
Older or newer component versions are not supported and not tested for the given
|
||||||
|
release (even if included in the checksum variables, like `kubeadm_checksums`).
|
||||||
|
|
||||||
* There is no unstable releases and no APIs, thus Kubespray doesn't follow
|
* There is no unstable releases and no APIs, thus Kubespray doesn't follow
|
||||||
[semver](http://semver.org/). Every version describes only a stable release.
|
[semver](https://semver.org/). Every version describes only a stable release.
|
||||||
Breaking changes, if any introduced by changed defaults or non-contrib ansible roles'
|
Breaking changes, if any introduced by changed defaults or non-contrib ansible roles'
|
||||||
playbooks, shall be described in the release notes. Other breaking changes, if any in
|
playbooks, shall be described in the release notes. Other breaking changes, if any in
|
||||||
the contributed addons or bound versions of Kubernetes and other components, are
|
the contributed addons or bound versions of Kubernetes and other components, are
|
||||||
considered out of Kubespray scope and are up to the components' teams to deal with and
|
considered out of Kubespray scope and are up to the components' teams to deal with and
|
||||||
document.
|
document.
|
||||||
|
|
||||||
* Minor releases can change components' versions, but not the major ``kube_version``.
|
* Minor releases can change components' versions, but not the major `kube_version`.
|
||||||
Greater ``kube_version`` requires a new major or minor release. For example, if Kubespray v2.0.0
|
Greater `kube_version` requires a new major or minor release. For example, if Kubespray v2.0.0
|
||||||
is bound to ``kube_version: 1.4.x``, ``calico_version: 0.22.0``, ``etcd_version: v3.0.6``,
|
is bound to `kube_version: 1.4.x`, `calico_version: 0.22.0`, `etcd_version: v3.0.6`,
|
||||||
then Kubespray v2.1.0 may be bound to only minor changes to ``kube_version``, like v1.5.1
|
then Kubespray v2.1.0 may be bound to only minor changes to `kube_version`, like v1.5.1
|
||||||
and *any* changes to other components, like etcd v4, or calico 1.2.3.
|
and *any* changes to other components, like etcd v4, or calico 1.2.3.
|
||||||
And Kubespray v3.x.x shall be bound to ``kube_version: 2.x.x`` respectively.
|
And Kubespray v3.x.x shall be bound to `kube_version: 2.x.x` respectively.
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
# Defined below are the security contacts for this repo.
|
# Defined below are the security contacts for this repo.
|
||||||
#
|
#
|
||||||
# They are the contact point for the Product Security Team to reach out
|
# They are the contact point for the Product Security Committee to reach out
|
||||||
# to for triaging and handling of incoming issues.
|
# to for triaging and handling of incoming issues.
|
||||||
#
|
#
|
||||||
# The below names agree to abide by the
|
# The below names agree to abide by the
|
||||||
# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy)
|
# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy)
|
||||||
# and will be removed and replaced if they violate that agreement.
|
# and will be removed and replaced if they violate that agreement.
|
||||||
#
|
#
|
||||||
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||||
# INSTRUCTIONS AT https://kubernetes.io/security/
|
# INSTRUCTIONS AT https://kubernetes.io/security/
|
||||||
atoms
|
atoms
|
||||||
mattymo
|
mattymo
|
||||||
|
|||||||
99
Vagrantfile
vendored
99
Vagrantfile
vendored
@@ -7,63 +7,72 @@ require 'fileutils'
|
|||||||
|
|
||||||
Vagrant.require_version ">= 2.0.0"
|
Vagrant.require_version ">= 2.0.0"
|
||||||
|
|
||||||
CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb")
|
CONFIG = File.join(File.dirname(__FILE__), ENV['KUBESPRAY_VAGRANT_CONFIG'] || 'vagrant/config.rb')
|
||||||
|
|
||||||
COREOS_URL_TEMPLATE = "https://storage.googleapis.com/%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json"
|
COREOS_URL_TEMPLATE = "https://storage.googleapis.com/%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json"
|
||||||
|
FLATCAR_URL_TEMPLATE = "https://%s.release.flatcar-linux.net/amd64-usr/current/flatcar_production_vagrant.json"
|
||||||
|
|
||||||
# Uniq disk UUID for libvirt
|
# Uniq disk UUID for libvirt
|
||||||
DISK_UUID = Time.now.utc.to_i
|
DISK_UUID = Time.now.utc.to_i
|
||||||
|
|
||||||
SUPPORTED_OS = {
|
SUPPORTED_OS = {
|
||||||
"coreos-stable" => {box: "coreos-stable", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
|
"coreos-stable" => {box: "coreos-stable", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
|
||||||
"coreos-alpha" => {box: "coreos-alpha", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
|
"coreos-alpha" => {box: "coreos-alpha", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
|
||||||
"coreos-beta" => {box: "coreos-beta", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
|
"coreos-beta" => {box: "coreos-beta", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
|
||||||
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
|
"flatcar-stable" => {box: "flatcar-stable", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["stable"]},
|
||||||
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
|
"flatcar-beta" => {box: "flatcar-beta", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["beta"]},
|
||||||
"centos" => {box: "centos/7", user: "vagrant"},
|
"flatcar-alpha" => {box: "flatcar-alpha", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["alpha"]},
|
||||||
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
"flatcar-edge" => {box: "flatcar-edge", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["edge"]},
|
||||||
"fedora" => {box: "fedora/28-cloud-base", user: "vagrant"},
|
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
|
||||||
"opensuse" => {box: "opensuse/openSUSE-15.0-x86_64", user: "vagrant"},
|
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
|
||||||
"opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", user: "vagrant"},
|
"ubuntu2004" => {box: "geerlingguy/ubuntu2004", user: "vagrant"},
|
||||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
"centos" => {box: "centos/7", user: "vagrant"},
|
||||||
|
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
||||||
|
"centos8" => {box: "centos/8", user: "vagrant"},
|
||||||
|
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
||||||
|
"fedora30" => {box: "fedora/30-cloud-base", user: "vagrant"},
|
||||||
|
"fedora31" => {box: "fedora/31-cloud-base", user: "vagrant"},
|
||||||
|
"opensuse" => {box: "bento/opensuse-leap-15.1", user: "vagrant"},
|
||||||
|
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||||
|
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||||
}
|
}
|
||||||
|
|
||||||
# Defaults for config options defined in CONFIG
|
|
||||||
$num_instances = 3
|
|
||||||
$instance_name_prefix = "k8s"
|
|
||||||
$vm_gui = false
|
|
||||||
$vm_memory = 2048
|
|
||||||
$vm_cpus = 1
|
|
||||||
$shared_folders = {}
|
|
||||||
$forwarded_ports = {}
|
|
||||||
$subnet = "172.17.8"
|
|
||||||
$os = "ubuntu1804"
|
|
||||||
$network_plugin = "flannel"
|
|
||||||
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
|
|
||||||
$multi_networking = false
|
|
||||||
# The first three nodes are etcd servers
|
|
||||||
$etcd_instances = $num_instances
|
|
||||||
# The first two nodes are kube masters
|
|
||||||
$kube_master_instances = $num_instances == 1 ? $num_instances : ($num_instances - 1)
|
|
||||||
# All nodes are kube nodes
|
|
||||||
$kube_node_instances = $num_instances
|
|
||||||
# The following only works when using the libvirt provider
|
|
||||||
$kube_node_instances_with_disks = false
|
|
||||||
$kube_node_instances_with_disks_size = "20G"
|
|
||||||
$kube_node_instances_with_disks_number = 2
|
|
||||||
$override_disk_size = false
|
|
||||||
$disk_size = "20GB"
|
|
||||||
$local_path_provisioner_enabled = false
|
|
||||||
$local_path_provisioner_claim_root = "/opt/local-path-provisioner/"
|
|
||||||
|
|
||||||
$playbook = "cluster.yml"
|
|
||||||
|
|
||||||
host_vars = {}
|
|
||||||
|
|
||||||
if File.exist?(CONFIG)
|
if File.exist?(CONFIG)
|
||||||
require CONFIG
|
require CONFIG
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# Defaults for config options defined in CONFIG
|
||||||
|
$num_instances ||= 3
|
||||||
|
$instance_name_prefix ||= "k8s"
|
||||||
|
$vm_gui ||= false
|
||||||
|
$vm_memory ||= 2048
|
||||||
|
$vm_cpus ||= 1
|
||||||
|
$shared_folders ||= {}
|
||||||
|
$forwarded_ports ||= {}
|
||||||
|
$subnet ||= "172.18.8"
|
||||||
|
$os ||= "ubuntu1804"
|
||||||
|
$network_plugin ||= "flannel"
|
||||||
|
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
|
||||||
|
$multi_networking ||= false
|
||||||
|
# The first three nodes are etcd servers
|
||||||
|
$etcd_instances ||= $num_instances
|
||||||
|
# The first two nodes are kube masters
|
||||||
|
$kube_master_instances ||= $num_instances == 1 ? $num_instances : ($num_instances - 1)
|
||||||
|
# All nodes are kube nodes
|
||||||
|
$kube_node_instances ||= $num_instances
|
||||||
|
# The following only works when using the libvirt provider
|
||||||
|
$kube_node_instances_with_disks ||= false
|
||||||
|
$kube_node_instances_with_disks_size ||= "20G"
|
||||||
|
$kube_node_instances_with_disks_number ||= 2
|
||||||
|
$override_disk_size ||= false
|
||||||
|
$disk_size ||= "20GB"
|
||||||
|
$local_path_provisioner_enabled ||= false
|
||||||
|
$local_path_provisioner_claim_root ||= "/opt/local-path-provisioner/"
|
||||||
|
|
||||||
|
$playbook = "cluster.yml"
|
||||||
|
|
||||||
|
host_vars = {}
|
||||||
|
|
||||||
$box = SUPPORTED_OS[$os][:box]
|
$box = SUPPORTED_OS[$os][:box]
|
||||||
# if $inventory is not set, try to use example
|
# if $inventory is not set, try to use example
|
||||||
$inventory = "inventory/sample" if ! $inventory
|
$inventory = "inventory/sample" if ! $inventory
|
||||||
@@ -206,7 +215,7 @@ Vagrant.configure("2") do |config|
|
|||||||
ansible.inventory_path = $ansible_inventory_path
|
ansible.inventory_path = $ansible_inventory_path
|
||||||
end
|
end
|
||||||
ansible.become = true
|
ansible.become = true
|
||||||
ansible.limit = "all"
|
ansible.limit = "all,localhost"
|
||||||
ansible.host_key_checking = false
|
ansible.host_key_checking = false
|
||||||
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "-e ansible_become_pass=vagrant"]
|
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "-e ansible_become_pass=vagrant"]
|
||||||
ansible.host_vars = host_vars
|
ansible.host_vars = host_vars
|
||||||
|
|||||||
@@ -11,11 +11,13 @@ host_key_checking=False
|
|||||||
gathering = smart
|
gathering = smart
|
||||||
fact_caching = jsonfile
|
fact_caching = jsonfile
|
||||||
fact_caching_connection = /tmp
|
fact_caching_connection = /tmp
|
||||||
stdout_callback = skippy
|
fact_caching_timeout = 7200
|
||||||
|
stdout_callback = default
|
||||||
|
display_skipped_hosts = no
|
||||||
library = ./library
|
library = ./library
|
||||||
callback_whitelist = profile_tasks
|
callback_whitelist = profile_tasks
|
||||||
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
||||||
deprecation_warnings=False
|
deprecation_warnings=False
|
||||||
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds
|
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds, .gpg
|
||||||
[inventory]
|
[inventory]
|
||||||
ignore_patterns = artifacts, credentials
|
ignore_patterns = artifacts, credentials
|
||||||
|
|||||||
15
ansible_version.yml
Normal file
15
ansible_version.yml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
---
|
||||||
|
- hosts: localhost
|
||||||
|
gather_facts: false
|
||||||
|
become: no
|
||||||
|
vars:
|
||||||
|
minimal_ansible_version: 2.8.0
|
||||||
|
ansible_connection: local
|
||||||
|
tasks:
|
||||||
|
- name: "Check ansible version >={{ minimal_ansible_version }}"
|
||||||
|
assert:
|
||||||
|
msg: "Ansible must be {{ minimal_ansible_version }} or higher"
|
||||||
|
that:
|
||||||
|
- ansible_version.string is version(minimal_ansible_version, ">=")
|
||||||
|
tags:
|
||||||
|
- check
|
||||||
74
cluster.yml
74
cluster.yml
@@ -1,44 +1,53 @@
|
|||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Check ansible version
|
||||||
|
import_playbook: ansible_version.yml
|
||||||
|
|
||||||
|
- hosts: all
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
become: no
|
|
||||||
tasks:
|
tasks:
|
||||||
- name: "Check ansible version >=2.7.8"
|
- name: "Set up proxy environment"
|
||||||
assert:
|
set_fact:
|
||||||
msg: "Ansible must be v2.7.8 or higher"
|
proxy_env:
|
||||||
that:
|
http_proxy: "{{ http_proxy | default ('') }}"
|
||||||
- ansible_version.string is version("2.7.8", ">=")
|
HTTP_PROXY: "{{ http_proxy | default ('') }}"
|
||||||
tags:
|
https_proxy: "{{ https_proxy | default ('') }}"
|
||||||
- check
|
HTTPS_PROXY: "{{ https_proxy | default ('') }}"
|
||||||
vars:
|
no_proxy: "{{ no_proxy | default ('') }}"
|
||||||
ansible_connection: local
|
NO_PROXY: "{{ no_proxy | default ('') }}"
|
||||||
|
no_log: true
|
||||||
|
|
||||||
- hosts: bastion[0]
|
- hosts: bastion[0]
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults }
|
||||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
|
||||||
|
|
||||||
- hosts: k8s-cluster:etcd
|
- hosts: k8s-cluster:etcd
|
||||||
|
strategy: linear
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults }
|
||||||
- { role: bootstrap-os, tags: bootstrap-os}
|
- { role: bootstrap-os, tags: bootstrap-os}
|
||||||
|
|
||||||
|
- name: Gather facts
|
||||||
|
import_playbook: facts.yml
|
||||||
|
|
||||||
- hosts: k8s-cluster:etcd
|
- hosts: k8s-cluster:etcd
|
||||||
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults }
|
||||||
- { role: kubernetes/preinstall, tags: preinstall }
|
- { role: kubernetes/preinstall, tags: preinstall }
|
||||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) }
|
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) }
|
||||||
- { role: download, tags: download, when: "not skip_downloads" }
|
- { role: download, tags: download, when: "not skip_downloads" }
|
||||||
environment: "{{ proxy_env }}"
|
environment: "{{ proxy_env }}"
|
||||||
|
|
||||||
- hosts: etcd
|
- hosts: etcd
|
||||||
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults }
|
||||||
- role: etcd
|
- role: etcd
|
||||||
tags: etcd
|
tags: etcd
|
||||||
vars:
|
vars:
|
||||||
@@ -47,9 +56,10 @@
|
|||||||
when: not etcd_kubeadm_enabled| default(false)
|
when: not etcd_kubeadm_enabled| default(false)
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s-cluster
|
||||||
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults }
|
||||||
- role: etcd
|
- role: etcd
|
||||||
tags: etcd
|
tags: etcd
|
||||||
vars:
|
vars:
|
||||||
@@ -58,58 +68,68 @@
|
|||||||
when: not etcd_kubeadm_enabled| default(false)
|
when: not etcd_kubeadm_enabled| default(false)
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s-cluster
|
||||||
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults }
|
||||||
- { role: kubernetes/node, tags: node }
|
- { role: kubernetes/node, tags: node }
|
||||||
environment: "{{ proxy_env }}"
|
environment: "{{ proxy_env }}"
|
||||||
|
|
||||||
- hosts: kube-master
|
- hosts: kube-master
|
||||||
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults }
|
||||||
- { role: kubernetes/master, tags: master }
|
- { role: kubernetes/master, tags: master }
|
||||||
- { role: kubernetes/client, tags: client }
|
- { role: kubernetes/client, tags: client }
|
||||||
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s-cluster
|
||||||
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults }
|
||||||
- { role: kubernetes/kubeadm, tags: kubeadm}
|
- { role: kubernetes/kubeadm, tags: kubeadm}
|
||||||
- { role: network_plugin, tags: network }
|
- { role: network_plugin, tags: network }
|
||||||
|
- { role: kubernetes/node-label, tags: node-label }
|
||||||
|
|
||||||
- hosts: calico-rr
|
- hosts: calico-rr
|
||||||
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults }
|
||||||
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr']}
|
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
|
||||||
|
|
||||||
- hosts: kube-master[0]
|
- hosts: kube-master[0]
|
||||||
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults }
|
||||||
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
|
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
|
||||||
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"]}
|
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
|
||||||
|
|
||||||
- hosts: kube-master
|
- hosts: kube-master
|
||||||
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults }
|
||||||
|
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
|
||||||
- { role: kubernetes-apps/network_plugin, tags: network }
|
- { role: kubernetes-apps/network_plugin, tags: network }
|
||||||
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
||||||
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
||||||
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
||||||
|
|
||||||
- hosts: kube-master
|
- hosts: kube-master
|
||||||
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults }
|
||||||
- { role: kubernetes-apps, tags: apps }
|
- { role: kubernetes-apps, tags: apps }
|
||||||
environment: "{{ proxy_env }}"
|
environment: "{{ proxy_env }}"
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s-cluster
|
||||||
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults }
|
||||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
|
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
|
||||||
|
|||||||
@@ -15,8 +15,9 @@ Resource Group. It will not install Kubernetes itself, this has to be done in a
|
|||||||
|
|
||||||
## Configuration through group_vars/all
|
## Configuration through group_vars/all
|
||||||
|
|
||||||
You have to modify at least one variable in group_vars/all, which is the **cluster_name** variable. It must be globally
|
You have to modify at least two variables in group_vars/all. The one is the **cluster_name** variable, it must be globally
|
||||||
unique due to some restrictions in Azure. Most other variables should be self explanatory if you have some basic Kubernetes
|
unique due to some restrictions in Azure. The other one is the **ssh_public_keys** variable, it must be your ssh public
|
||||||
|
key to access your azure virtual machines. Most other variables should be self explanatory if you have some basic Kubernetes
|
||||||
experience.
|
experience.
|
||||||
|
|
||||||
## Bastion host
|
## Bastion host
|
||||||
@@ -59,6 +60,6 @@ It will create the file ./inventory which can then be used with kubespray, e.g.:
|
|||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ cd kubespray-root-dir
|
$ cd kubespray-root-dir
|
||||||
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all.yml" cluster.yml
|
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all/all.yml" cluster.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -11,9 +11,9 @@ fi
|
|||||||
|
|
||||||
ansible-playbook generate-templates.yml
|
ansible-playbook generate-templates.yml
|
||||||
|
|
||||||
az group deployment create --template-file ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
az deployment group create --template-file ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
||||||
az group deployment create --template-file ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
|
az deployment group create --template-file ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
|
||||||
az group deployment create --template-file ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
|
az deployment group create --template-file ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
|
||||||
az group deployment create --template-file ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
|
az deployment group create --template-file ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
|
||||||
az group deployment create --template-file ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
|
az deployment group create --template-file ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
|
||||||
az group deployment create --template-file ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
|
az deployment group create --template-file ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ cluster_name: example
|
|||||||
# node that can be used to access the masters and minions
|
# node that can be used to access the masters and minions
|
||||||
use_bastion: false
|
use_bastion: false
|
||||||
|
|
||||||
# Set this to a prefered name that will be used as the first part of the dns name for your bastotion host. For example: k8s-bastion.<azureregion>.cloudapp.azure.com.
|
# Set this to a preferred name that will be used as the first part of the dns name for your bastotion host. For example: k8s-bastion.<azureregion>.cloudapp.azure.com.
|
||||||
# This is convenient when exceptions have to be configured on a firewall to allow ssh to the given bastion host.
|
# This is convenient when exceptions have to be configured on a firewall to allow ssh to the given bastion host.
|
||||||
# bastion_domain_prefix: k8s-bastion
|
# bastion_domain_prefix: k8s-bastion
|
||||||
|
|
||||||
|
|||||||
@@ -20,6 +20,8 @@
|
|||||||
# Add range of hosts: inventory.py 10.10.1.3-10.10.1.5
|
# Add range of hosts: inventory.py 10.10.1.3-10.10.1.5
|
||||||
# Add hosts with different ip and access ip:
|
# Add hosts with different ip and access ip:
|
||||||
# inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.1.3
|
# inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.1.3
|
||||||
|
# Add hosts with a specific hostname, ip, and optional access ip:
|
||||||
|
# inventory.py first,10.0.0.1,192.168.10.1 second,10.0.0.2 last,10.0.0.3
|
||||||
# Delete a host: inventory.py -10.10.1.3
|
# Delete a host: inventory.py -10.10.1.3
|
||||||
# Delete a host by id: inventory.py -node1
|
# Delete a host by id: inventory.py -node1
|
||||||
#
|
#
|
||||||
@@ -44,7 +46,8 @@ import sys
|
|||||||
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster',
|
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster',
|
||||||
'calico-rr']
|
'calico-rr']
|
||||||
PROTECTED_NAMES = ROLES
|
PROTECTED_NAMES = ROLES
|
||||||
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'load']
|
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames',
|
||||||
|
'load']
|
||||||
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
||||||
'0': False, 'no': False, 'false': False, 'off': False}
|
'0': False, 'no': False, 'false': False, 'off': False}
|
||||||
yaml = YAML()
|
yaml = YAML()
|
||||||
@@ -78,8 +81,8 @@ class KubesprayInventory(object):
|
|||||||
if self.config_file:
|
if self.config_file:
|
||||||
try:
|
try:
|
||||||
self.hosts_file = open(config_file, 'r')
|
self.hosts_file = open(config_file, 'r')
|
||||||
self.yaml_config = yaml.load(self.hosts_file)
|
self.yaml_config = yaml.load_all(self.hosts_file)
|
||||||
except FileNotFoundError:
|
except OSError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
|
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
|
||||||
@@ -194,8 +197,21 @@ class KubesprayInventory(object):
|
|||||||
'ip': ip,
|
'ip': ip,
|
||||||
'access_ip': access_ip}
|
'access_ip': access_ip}
|
||||||
elif host[0].isalpha():
|
elif host[0].isalpha():
|
||||||
raise Exception("Adding hosts by hostname is not supported.")
|
if ',' in host:
|
||||||
|
try:
|
||||||
|
hostname, ip, access_ip = host.split(',')
|
||||||
|
except Exception:
|
||||||
|
hostname, ip = host.split(',')
|
||||||
|
access_ip = ip
|
||||||
|
if self.exists_hostname(all_hosts, host):
|
||||||
|
self.debug("Skipping existing host {0}.".format(host))
|
||||||
|
continue
|
||||||
|
elif self.exists_ip(all_hosts, ip):
|
||||||
|
self.debug("Skipping existing host {0}.".format(ip))
|
||||||
|
continue
|
||||||
|
all_hosts[hostname] = {'ansible_host': access_ip,
|
||||||
|
'ip': ip,
|
||||||
|
'access_ip': access_ip}
|
||||||
return all_hosts
|
return all_hosts
|
||||||
|
|
||||||
def range2ips(self, hosts):
|
def range2ips(self, hosts):
|
||||||
@@ -206,10 +222,10 @@ class KubesprayInventory(object):
|
|||||||
# Python 3.x
|
# Python 3.x
|
||||||
start = int(ip_address(start_address))
|
start = int(ip_address(start_address))
|
||||||
end = int(ip_address(end_address))
|
end = int(ip_address(end_address))
|
||||||
except:
|
except Exception:
|
||||||
# Python 2.7
|
# Python 2.7
|
||||||
start = int(ip_address(unicode(start_address)))
|
start = int(ip_address(str(start_address)))
|
||||||
end = int(ip_address(unicode(end_address)))
|
end = int(ip_address(str(end_address)))
|
||||||
return [ip_address(ip).exploded for ip in range(start, end + 1)]
|
return [ip_address(ip).exploded for ip in range(start, end + 1)]
|
||||||
|
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
@@ -348,6 +364,8 @@ class KubesprayInventory(object):
|
|||||||
self.print_config()
|
self.print_config()
|
||||||
elif command == 'print_ips':
|
elif command == 'print_ips':
|
||||||
self.print_ips()
|
self.print_ips()
|
||||||
|
elif command == 'print_hostnames':
|
||||||
|
self.print_hostnames()
|
||||||
elif command == 'load':
|
elif command == 'load':
|
||||||
self.load_file(args)
|
self.load_file(args)
|
||||||
else:
|
else:
|
||||||
@@ -361,11 +379,13 @@ Available commands:
|
|||||||
help - Display this message
|
help - Display this message
|
||||||
print_cfg - Write inventory file to stdout
|
print_cfg - Write inventory file to stdout
|
||||||
print_ips - Write a space-delimited list of IPs from "all" group
|
print_ips - Write a space-delimited list of IPs from "all" group
|
||||||
|
print_hostnames - Write a space-delimited list of Hostnames from "all" group
|
||||||
|
|
||||||
Advanced usage:
|
Advanced usage:
|
||||||
Add another host after initial creation: inventory.py 10.10.1.5
|
Add another host after initial creation: inventory.py 10.10.1.5
|
||||||
Add range of hosts: inventory.py 10.10.1.3-10.10.1.5
|
Add range of hosts: inventory.py 10.10.1.3-10.10.1.5
|
||||||
Add hosts with different ip and access ip: inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.10.3
|
Add hosts with different ip and access ip: inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.10.3
|
||||||
|
Add hosts with a specific hostname, ip, and optional access ip: first,10.0.0.1,192.168.10.1 second,10.0.0.2 last,10.0.0.3
|
||||||
Delete a host: inventory.py -10.10.1.3
|
Delete a host: inventory.py -10.10.1.3
|
||||||
Delete a host by id: inventory.py -node1
|
Delete a host by id: inventory.py -node1
|
||||||
|
|
||||||
@@ -381,6 +401,9 @@ MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
|
|||||||
def print_config(self):
|
def print_config(self):
|
||||||
yaml.dump(self.yaml_config, sys.stdout)
|
yaml.dump(self.yaml_config, sys.stdout)
|
||||||
|
|
||||||
|
def print_hostnames(self):
|
||||||
|
print(' '.join(self.yaml_config['all']['hosts'].keys()))
|
||||||
|
|
||||||
def print_ips(self):
|
def print_ips(self):
|
||||||
ips = []
|
ips = []
|
||||||
for host, opts in self.yaml_config['all']['hosts'].items():
|
for host, opts in self.yaml_config['all']['hosts'].items():
|
||||||
|
|||||||
@@ -12,6 +12,7 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
import inventory
|
||||||
import mock
|
import mock
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
@@ -22,7 +23,7 @@ path = "./contrib/inventory_builder/"
|
|||||||
if path not in sys.path:
|
if path not in sys.path:
|
||||||
sys.path.append(path)
|
sys.path.append(path)
|
||||||
|
|
||||||
import inventory
|
import inventory # noqa
|
||||||
|
|
||||||
|
|
||||||
class TestInventory(unittest.TestCase):
|
class TestInventory(unittest.TestCase):
|
||||||
@@ -43,8 +44,8 @@ class TestInventory(unittest.TestCase):
|
|||||||
|
|
||||||
def test_get_ip_from_opts_invalid(self):
|
def test_get_ip_from_opts_invalid(self):
|
||||||
optstring = "notanaddr=value something random!chars:D"
|
optstring = "notanaddr=value something random!chars:D"
|
||||||
self.assertRaisesRegexp(ValueError, "IP parameter not found",
|
self.assertRaisesRegex(ValueError, "IP parameter not found",
|
||||||
self.inv.get_ip_from_opts, optstring)
|
self.inv.get_ip_from_opts, optstring)
|
||||||
|
|
||||||
def test_ensure_required_groups(self):
|
def test_ensure_required_groups(self):
|
||||||
groups = ['group1', 'group2']
|
groups = ['group1', 'group2']
|
||||||
@@ -63,8 +64,8 @@ class TestInventory(unittest.TestCase):
|
|||||||
def test_get_host_id_invalid(self):
|
def test_get_host_id_invalid(self):
|
||||||
bad_hostnames = ['node', 'no99de', '01node', 'node.111111']
|
bad_hostnames = ['node', 'no99de', '01node', 'node.111111']
|
||||||
for hostname in bad_hostnames:
|
for hostname in bad_hostnames:
|
||||||
self.assertRaisesRegexp(ValueError, "Host name must end in an",
|
self.assertRaisesRegex(ValueError, "Host name must end in an",
|
||||||
self.inv.get_host_id, hostname)
|
self.inv.get_host_id, hostname)
|
||||||
|
|
||||||
def test_build_hostnames_add_one(self):
|
def test_build_hostnames_add_one(self):
|
||||||
changed_hosts = ['10.90.0.2']
|
changed_hosts = ['10.90.0.2']
|
||||||
@@ -192,8 +193,8 @@ class TestInventory(unittest.TestCase):
|
|||||||
('node2', {'ansible_host': '10.90.0.3',
|
('node2', {'ansible_host': '10.90.0.3',
|
||||||
'ip': '10.90.0.3',
|
'ip': '10.90.0.3',
|
||||||
'access_ip': '10.90.0.3'})])
|
'access_ip': '10.90.0.3'})])
|
||||||
self.assertRaisesRegexp(ValueError, "Unable to find host",
|
self.assertRaisesRegex(ValueError, "Unable to find host",
|
||||||
self.inv.delete_host_by_ip, existing_hosts, ip)
|
self.inv.delete_host_by_ip, existing_hosts, ip)
|
||||||
|
|
||||||
def test_purge_invalid_hosts(self):
|
def test_purge_invalid_hosts(self):
|
||||||
proper_hostnames = ['node1', 'node2']
|
proper_hostnames = ['node1', 'node2']
|
||||||
@@ -309,8 +310,8 @@ class TestInventory(unittest.TestCase):
|
|||||||
|
|
||||||
def test_range2ips_incorrect_range(self):
|
def test_range2ips_incorrect_range(self):
|
||||||
host_range = ['10.90.0.4-a.9b.c.e']
|
host_range = ['10.90.0.4-a.9b.c.e']
|
||||||
self.assertRaisesRegexp(Exception, "Range of ip_addresses isn't valid",
|
self.assertRaisesRegex(Exception, "Range of ip_addresses isn't valid",
|
||||||
self.inv.range2ips, host_range)
|
self.inv.range2ips, host_range)
|
||||||
|
|
||||||
def test_build_hostnames_different_ips_add_one(self):
|
def test_build_hostnames_different_ips_add_one(self):
|
||||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
[tox]
|
[tox]
|
||||||
minversion = 1.6
|
minversion = 1.6
|
||||||
skipsdist = True
|
skipsdist = True
|
||||||
envlist = pep8, py27
|
envlist = pep8, py33
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
whitelist_externals = py.test
|
whitelist_externals = py.test
|
||||||
|
|||||||
@@ -1,6 +1,12 @@
|
|||||||
---
|
---
|
||||||
|
- hosts: bastion[0]
|
||||||
|
gather_facts: False
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||||
- hosts: kube-master[0]
|
- hosts: kube-master[0]
|
||||||
tags:
|
tags:
|
||||||
- "provision"
|
- "provision"
|
||||||
roles:
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
- { role: provision }
|
- { role: provision }
|
||||||
|
|||||||
@@ -1,10 +1,12 @@
|
|||||||
---
|
---
|
||||||
metallb:
|
metallb:
|
||||||
ip_range: "10.5.0.50-10.5.0.99"
|
ip_range:
|
||||||
|
- "10.5.0.50-10.5.0.99"
|
||||||
protocol: "layer2"
|
protocol: "layer2"
|
||||||
# additional_address_pools:
|
# additional_address_pools:
|
||||||
# kube_service_pool:
|
# kube_service_pool:
|
||||||
# ip_range: "10.5.1.50-10.5.1.99"
|
# ip_range:
|
||||||
|
# - 10.5.1.50-10.5.1.99"
|
||||||
# protocol: "layer2"
|
# protocol: "layer2"
|
||||||
# auto_assign: false
|
# auto_assign: false
|
||||||
limits:
|
limits:
|
||||||
|
|||||||
@@ -1,4 +1,25 @@
|
|||||||
---
|
---
|
||||||
|
- name: "Kubernetes Apps | Check cluster settings for MetalLB"
|
||||||
|
fail:
|
||||||
|
msg: "MetalLB require kube_proxy_strict_arp = true, see https://github.com/danderson/metallb/issues/153#issuecomment-518651132"
|
||||||
|
when:
|
||||||
|
- "kube_proxy_mode == 'ipvs' and not kube_proxy_strict_arp"
|
||||||
|
|
||||||
|
- name: Kubernetes Apps | Check AppArmor status
|
||||||
|
command: which apparmor_parser
|
||||||
|
register: apparmor_status
|
||||||
|
when:
|
||||||
|
- podsecuritypolicy_enabled
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- name: Kubernetes Apps | Set apparmor_enabled
|
||||||
|
set_fact:
|
||||||
|
apparmor_enabled: "{{ apparmor_status.rc == 0 }}"
|
||||||
|
when:
|
||||||
|
- podsecuritypolicy_enabled
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
- name: "Kubernetes Apps | Lay Down MetalLB"
|
- name: "Kubernetes Apps | Lay Down MetalLB"
|
||||||
become: true
|
become: true
|
||||||
template: { src: "{{ item }}.j2", dest: "{{ kube_config_dir }}/{{ item }}" }
|
template: { src: "{{ item }}.j2", dest: "{{ kube_config_dir }}/{{ item }}" }
|
||||||
@@ -6,6 +27,7 @@
|
|||||||
register: "rendering"
|
register: "rendering"
|
||||||
when:
|
when:
|
||||||
- "inventory_hostname == groups['kube-master'][0]"
|
- "inventory_hostname == groups['kube-master'][0]"
|
||||||
|
|
||||||
- name: "Kubernetes Apps | Install and configure MetalLB"
|
- name: "Kubernetes Apps | Install and configure MetalLB"
|
||||||
kube:
|
kube:
|
||||||
name: "MetalLB"
|
name: "MetalLB"
|
||||||
|
|||||||
@@ -10,12 +10,16 @@ data:
|
|||||||
- name: loadbalanced
|
- name: loadbalanced
|
||||||
protocol: {{ metallb.protocol }}
|
protocol: {{ metallb.protocol }}
|
||||||
addresses:
|
addresses:
|
||||||
- {{ metallb.ip_range }}
|
{% for ip_range in metallb.ip_range %}
|
||||||
|
- {{ ip_range }}
|
||||||
|
{% endfor %}
|
||||||
{% if metallb.additional_address_pools is defined %}{% for pool in metallb.additional_address_pools %}
|
{% if metallb.additional_address_pools is defined %}{% for pool in metallb.additional_address_pools %}
|
||||||
- name: {{ pool }}
|
- name: {{ pool }}
|
||||||
protocol: {{ metallb.additional_address_pools[pool].protocol }}
|
protocol: {{ metallb.additional_address_pools[pool].protocol }}
|
||||||
addresses:
|
addresses:
|
||||||
- {{ metallb.additional_address_pools[pool].ip_range }}
|
{% for ip_range in metallb.additional_address_pools[pool].ip_range %}
|
||||||
|
- {{ ip_range }}
|
||||||
|
{% endfor %}
|
||||||
auto-assign: {{ metallb.additional_address_pools[pool].auto_assign }}
|
auto-assign: {{ metallb.additional_address_pools[pool].auto_assign }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|||||||
@@ -50,6 +50,48 @@ rules:
|
|||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["services", "endpoints", "nodes"]
|
resources: ["services", "endpoints", "nodes"]
|
||||||
verbs: ["get", "list", "watch"]
|
verbs: ["get", "list", "watch"]
|
||||||
|
{% if podsecuritypolicy_enabled %}
|
||||||
|
- apiGroups: ["policy"]
|
||||||
|
resourceNames: ["metallb"]
|
||||||
|
resources: ["podsecuritypolicies"]
|
||||||
|
verbs: ["use"]
|
||||||
|
---
|
||||||
|
apiVersion: policy/v1beta1
|
||||||
|
kind: PodSecurityPolicy
|
||||||
|
metadata:
|
||||||
|
name: metallb
|
||||||
|
annotations:
|
||||||
|
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
|
||||||
|
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
|
||||||
|
{% if apparmor_enabled %}
|
||||||
|
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
|
||||||
|
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
|
||||||
|
{% endif %}
|
||||||
|
labels:
|
||||||
|
app: metallb
|
||||||
|
spec:
|
||||||
|
privileged: true
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
allowedCapabilities:
|
||||||
|
- net_raw
|
||||||
|
volumes:
|
||||||
|
- secret
|
||||||
|
hostNetwork: true
|
||||||
|
hostPorts:
|
||||||
|
- min: {{ metallb.port }}
|
||||||
|
max: {{ metallb.port }}
|
||||||
|
hostIPC: false
|
||||||
|
hostPID: false
|
||||||
|
runAsUser:
|
||||||
|
rule: 'RunAsAny'
|
||||||
|
seLinux:
|
||||||
|
rule: 'RunAsAny'
|
||||||
|
supplementalGroups:
|
||||||
|
rule: 'RunAsAny'
|
||||||
|
fsGroup:
|
||||||
|
rule: 'RunAsAny'
|
||||||
|
readOnlyRootFilesystem: true
|
||||||
|
{% endif %}
|
||||||
---
|
---
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: Role
|
kind: Role
|
||||||
@@ -115,7 +157,7 @@ roleRef:
|
|||||||
kind: Role
|
kind: Role
|
||||||
name: config-watcher
|
name: config-watcher
|
||||||
---
|
---
|
||||||
apiVersion: apps/v1beta2
|
apiVersion: apps/v1
|
||||||
kind: DaemonSet
|
kind: DaemonSet
|
||||||
metadata:
|
metadata:
|
||||||
namespace: metallb-system
|
namespace: metallb-system
|
||||||
@@ -169,7 +211,7 @@ spec:
|
|||||||
- net_raw
|
- net_raw
|
||||||
|
|
||||||
---
|
---
|
||||||
apiVersion: apps/v1beta2
|
apiVersion: apps/v1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
namespace: metallb-system
|
namespace: metallb-system
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
metadata:
|
metadata:
|
||||||
name: kubernetes-dashboard
|
name: kubernetes-dashboard
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ You can specify a `default_release` for apt on Debian/Ubuntu by overriding this
|
|||||||
glusterfs_ppa_use: yes
|
glusterfs_ppa_use: yes
|
||||||
glusterfs_ppa_version: "3.5"
|
glusterfs_ppa_version: "3.5"
|
||||||
|
|
||||||
For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](http://www.gluster.org/community/documentation/index.php/Getting_started_install) for more info.
|
For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/) for more info.
|
||||||
|
|
||||||
## Dependencies
|
## Dependencies
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
- name: Include OS-specific variables.
|
- name: Include OS-specific variables.
|
||||||
include_vars: "{{ ansible_os_family }}.yml"
|
include_vars: "{{ ansible_os_family }}.yml"
|
||||||
|
|
||||||
# Instal xfs package
|
# Install xfs package
|
||||||
- name: install xfs Debian
|
- name: install xfs Debian
|
||||||
apt: name=xfsprogs state=present
|
apt: name=xfsprogs state=present
|
||||||
when: ansible_os_family == "Debian"
|
when: ansible_os_family == "Debian"
|
||||||
@@ -36,7 +36,7 @@
|
|||||||
- "{{ gluster_brick_dir }}"
|
- "{{ gluster_brick_dir }}"
|
||||||
- "{{ gluster_mount_dir }}"
|
- "{{ gluster_mount_dir }}"
|
||||||
|
|
||||||
- name: Configure Gluster volume.
|
- name: Configure Gluster volume with replicas
|
||||||
gluster_volume:
|
gluster_volume:
|
||||||
state: present
|
state: present
|
||||||
name: "{{ gluster_brick_name }}"
|
name: "{{ gluster_brick_name }}"
|
||||||
@@ -46,6 +46,18 @@
|
|||||||
host: "{{ inventory_hostname }}"
|
host: "{{ inventory_hostname }}"
|
||||||
force: yes
|
force: yes
|
||||||
run_once: true
|
run_once: true
|
||||||
|
when: groups['gfs-cluster']|length > 1
|
||||||
|
|
||||||
|
- name: Configure Gluster volume without replicas
|
||||||
|
gluster_volume:
|
||||||
|
state: present
|
||||||
|
name: "{{ gluster_brick_name }}"
|
||||||
|
brick: "{{ gluster_brick_dir }}"
|
||||||
|
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||||
|
host: "{{ inventory_hostname }}"
|
||||||
|
force: yes
|
||||||
|
run_once: true
|
||||||
|
when: groups['gfs-cluster']|length <= 1
|
||||||
|
|
||||||
- name: Mount glusterfs to retrieve disk size
|
- name: Mount glusterfs to retrieve disk size
|
||||||
mount:
|
mount:
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"kind": "DaemonSet",
|
"kind": "DaemonSet",
|
||||||
"apiVersion": "extensions/v1beta1",
|
"apiVersion": "apps/v1",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"name": "glusterfs",
|
"name": "glusterfs",
|
||||||
"labels": {
|
"labels": {
|
||||||
@@ -12,6 +12,11 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"spec": {
|
"spec": {
|
||||||
|
"selector": {
|
||||||
|
"matchLabels": {
|
||||||
|
"glusterfs-node": "daemonset"
|
||||||
|
}
|
||||||
|
},
|
||||||
"template": {
|
"template": {
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"name": "glusterfs",
|
"name": "glusterfs",
|
||||||
|
|||||||
@@ -30,7 +30,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"kind": "Deployment",
|
"kind": "Deployment",
|
||||||
"apiVersion": "extensions/v1beta1",
|
"apiVersion": "apps/v1",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"name": "deploy-heketi",
|
"name": "deploy-heketi",
|
||||||
"labels": {
|
"labels": {
|
||||||
@@ -42,6 +42,11 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"spec": {
|
"spec": {
|
||||||
|
"selector": {
|
||||||
|
"matchLabels": {
|
||||||
|
"name": "deploy-heketi"
|
||||||
|
}
|
||||||
|
},
|
||||||
"replicas": 1,
|
"replicas": 1,
|
||||||
"template": {
|
"template": {
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
|||||||
@@ -44,7 +44,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"kind": "Deployment",
|
"kind": "Deployment",
|
||||||
"apiVersion": "extensions/v1beta1",
|
"apiVersion": "apps/v1",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"name": "heketi",
|
"name": "heketi",
|
||||||
"labels": {
|
"labels": {
|
||||||
@@ -55,6 +55,11 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"spec": {
|
"spec": {
|
||||||
|
"selector": {
|
||||||
|
"matchLabels": {
|
||||||
|
"name": "heketi"
|
||||||
|
}
|
||||||
|
},
|
||||||
"replicas": 1,
|
"replicas": 1,
|
||||||
"template": {
|
"template": {
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
|||||||
@@ -16,7 +16,7 @@
|
|||||||
{
|
{
|
||||||
"addresses": [
|
"addresses": [
|
||||||
{
|
{
|
||||||
"ip": "{{ hostvars[node]['ansible_facts']['default_ipv4']['address'] }}"
|
"ip": "{{ hostvars[node].ip }}"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"ports": [
|
"ports": [
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
"{{ node }}"
|
"{{ node }}"
|
||||||
],
|
],
|
||||||
"storage": [
|
"storage": [
|
||||||
"{{ hostvars[node]['ansible_facts']['default_ipv4']['address'] }}"
|
"{{ hostvars[node].ip }}"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"zone": 1
|
"zone": 1
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ This project will create:
|
|||||||
* AWS ELB in the Public Subnet for accessing the Kubernetes API from the internet
|
* AWS ELB in the Public Subnet for accessing the Kubernetes API from the internet
|
||||||
|
|
||||||
**Requirements**
|
**Requirements**
|
||||||
- Terraform 0.8.7 or newer
|
- Terraform 0.12.0 or newer
|
||||||
|
|
||||||
**How to Use:**
|
**How to Use:**
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
terraform {
|
terraform {
|
||||||
required_version = ">= 0.8.7"
|
required_version = ">= 0.12.0"
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "aws" {
|
provider "aws" {
|
||||||
@@ -16,22 +16,22 @@ data "aws_availability_zones" "available" {}
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
module "aws-vpc" {
|
module "aws-vpc" {
|
||||||
source = "modules/vpc"
|
source = "./modules/vpc"
|
||||||
|
|
||||||
aws_cluster_name = "${var.aws_cluster_name}"
|
aws_cluster_name = "${var.aws_cluster_name}"
|
||||||
aws_vpc_cidr_block = "${var.aws_vpc_cidr_block}"
|
aws_vpc_cidr_block = "${var.aws_vpc_cidr_block}"
|
||||||
aws_avail_zones = "${slice(data.aws_availability_zones.available.names,0,2)}"
|
aws_avail_zones = "${slice(data.aws_availability_zones.available.names, 0, 2)}"
|
||||||
aws_cidr_subnets_private = "${var.aws_cidr_subnets_private}"
|
aws_cidr_subnets_private = "${var.aws_cidr_subnets_private}"
|
||||||
aws_cidr_subnets_public = "${var.aws_cidr_subnets_public}"
|
aws_cidr_subnets_public = "${var.aws_cidr_subnets_public}"
|
||||||
default_tags = "${var.default_tags}"
|
default_tags = "${var.default_tags}"
|
||||||
}
|
}
|
||||||
|
|
||||||
module "aws-elb" {
|
module "aws-elb" {
|
||||||
source = "modules/elb"
|
source = "./modules/elb"
|
||||||
|
|
||||||
aws_cluster_name = "${var.aws_cluster_name}"
|
aws_cluster_name = "${var.aws_cluster_name}"
|
||||||
aws_vpc_id = "${module.aws-vpc.aws_vpc_id}"
|
aws_vpc_id = "${module.aws-vpc.aws_vpc_id}"
|
||||||
aws_avail_zones = "${slice(data.aws_availability_zones.available.names,0,2)}"
|
aws_avail_zones = "${slice(data.aws_availability_zones.available.names, 0, 2)}"
|
||||||
aws_subnet_ids_public = "${module.aws-vpc.aws_subnet_ids_public}"
|
aws_subnet_ids_public = "${module.aws-vpc.aws_subnet_ids_public}"
|
||||||
aws_elb_api_port = "${var.aws_elb_api_port}"
|
aws_elb_api_port = "${var.aws_elb_api_port}"
|
||||||
k8s_secure_api_port = "${var.k8s_secure_api_port}"
|
k8s_secure_api_port = "${var.k8s_secure_api_port}"
|
||||||
@@ -39,7 +39,7 @@ module "aws-elb" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
module "aws-iam" {
|
module "aws-iam" {
|
||||||
source = "modules/iam"
|
source = "./modules/iam"
|
||||||
|
|
||||||
aws_cluster_name = "${var.aws_cluster_name}"
|
aws_cluster_name = "${var.aws_cluster_name}"
|
||||||
}
|
}
|
||||||
@@ -54,18 +54,18 @@ resource "aws_instance" "bastion-server" {
|
|||||||
instance_type = "${var.aws_bastion_size}"
|
instance_type = "${var.aws_bastion_size}"
|
||||||
count = "${length(var.aws_cidr_subnets_public)}"
|
count = "${length(var.aws_cidr_subnets_public)}"
|
||||||
associate_public_ip_address = true
|
associate_public_ip_address = true
|
||||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}"
|
||||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_public,count.index)}"
|
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_public, count.index)}"
|
||||||
|
|
||||||
vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"]
|
vpc_security_group_ids = "${module.aws-vpc.aws_security_group}"
|
||||||
|
|
||||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||||
|
|
||||||
tags = "${merge(var.default_tags, map(
|
tags = "${merge(var.default_tags, map(
|
||||||
"Name", "kubernetes-${var.aws_cluster_name}-bastion-${count.index}",
|
"Name", "kubernetes-${var.aws_cluster_name}-bastion-${count.index}",
|
||||||
"Cluster", "${var.aws_cluster_name}",
|
"Cluster", "${var.aws_cluster_name}",
|
||||||
"Role", "bastion-${var.aws_cluster_name}-${count.index}"
|
"Role", "bastion-${var.aws_cluster_name}-${count.index}"
|
||||||
))}"
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -79,25 +79,25 @@ resource "aws_instance" "k8s-master" {
|
|||||||
|
|
||||||
count = "${var.aws_kube_master_num}"
|
count = "${var.aws_kube_master_num}"
|
||||||
|
|
||||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}"
|
||||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private, count.index)}"
|
||||||
|
|
||||||
vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"]
|
vpc_security_group_ids = "${module.aws-vpc.aws_security_group}"
|
||||||
|
|
||||||
iam_instance_profile = "${module.aws-iam.kube-master-profile}"
|
iam_instance_profile = "${module.aws-iam.kube-master-profile}"
|
||||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||||
|
|
||||||
tags = "${merge(var.default_tags, map(
|
tags = "${merge(var.default_tags, map(
|
||||||
"Name", "kubernetes-${var.aws_cluster_name}-master${count.index}",
|
"Name", "kubernetes-${var.aws_cluster_name}-master${count.index}",
|
||||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||||
"Role", "master"
|
"Role", "master"
|
||||||
))}"
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_elb_attachment" "attach_master_nodes" {
|
resource "aws_elb_attachment" "attach_master_nodes" {
|
||||||
count = "${var.aws_kube_master_num}"
|
count = "${var.aws_kube_master_num}"
|
||||||
elb = "${module.aws-elb.aws_elb_api_id}"
|
elb = "${module.aws-elb.aws_elb_api_id}"
|
||||||
instance = "${element(aws_instance.k8s-master.*.id,count.index)}"
|
instance = "${element(aws_instance.k8s-master.*.id, count.index)}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_instance" "k8s-etcd" {
|
resource "aws_instance" "k8s-etcd" {
|
||||||
@@ -106,18 +106,18 @@ resource "aws_instance" "k8s-etcd" {
|
|||||||
|
|
||||||
count = "${var.aws_etcd_num}"
|
count = "${var.aws_etcd_num}"
|
||||||
|
|
||||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}"
|
||||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private, count.index)}"
|
||||||
|
|
||||||
vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"]
|
vpc_security_group_ids = "${module.aws-vpc.aws_security_group}"
|
||||||
|
|
||||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||||
|
|
||||||
tags = "${merge(var.default_tags, map(
|
tags = "${merge(var.default_tags, map(
|
||||||
"Name", "kubernetes-${var.aws_cluster_name}-etcd${count.index}",
|
"Name", "kubernetes-${var.aws_cluster_name}-etcd${count.index}",
|
||||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||||
"Role", "etcd"
|
"Role", "etcd"
|
||||||
))}"
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_instance" "k8s-worker" {
|
resource "aws_instance" "k8s-worker" {
|
||||||
@@ -126,19 +126,19 @@ resource "aws_instance" "k8s-worker" {
|
|||||||
|
|
||||||
count = "${var.aws_kube_worker_num}"
|
count = "${var.aws_kube_worker_num}"
|
||||||
|
|
||||||
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}"
|
||||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private, count.index)}"
|
||||||
|
|
||||||
vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"]
|
vpc_security_group_ids = "${module.aws-vpc.aws_security_group}"
|
||||||
|
|
||||||
iam_instance_profile = "${module.aws-iam.kube-worker-profile}"
|
iam_instance_profile = "${module.aws-iam.kube-worker-profile}"
|
||||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||||
|
|
||||||
tags = "${merge(var.default_tags, map(
|
tags = "${merge(var.default_tags, map(
|
||||||
"Name", "kubernetes-${var.aws_cluster_name}-worker${count.index}",
|
"Name", "kubernetes-${var.aws_cluster_name}-worker${count.index}",
|
||||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||||
"Role", "worker"
|
"Role", "worker"
|
||||||
))}"
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -148,14 +148,14 @@ resource "aws_instance" "k8s-worker" {
|
|||||||
data "template_file" "inventory" {
|
data "template_file" "inventory" {
|
||||||
template = "${file("${path.module}/templates/inventory.tpl")}"
|
template = "${file("${path.module}/templates/inventory.tpl")}"
|
||||||
|
|
||||||
vars {
|
vars = {
|
||||||
public_ip_address_bastion = "${join("\n",formatlist("bastion ansible_host=%s" , aws_instance.bastion-server.*.public_ip))}"
|
public_ip_address_bastion = "${join("\n", formatlist("bastion ansible_host=%s", aws_instance.bastion-server.*.public_ip))}"
|
||||||
connection_strings_master = "${join("\n",formatlist("%s ansible_host=%s",aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}"
|
connection_strings_master = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-master.*.private_dns, aws_instance.k8s-master.*.private_ip))}"
|
||||||
connection_strings_node = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.tags.Name, aws_instance.k8s-worker.*.private_ip))}"
|
connection_strings_node = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.private_dns, aws_instance.k8s-worker.*.private_ip))}"
|
||||||
connection_strings_etcd = "${join("\n",formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.tags.Name, aws_instance.k8s-etcd.*.private_ip))}"
|
connection_strings_etcd = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.private_dns, aws_instance.k8s-etcd.*.private_ip))}"
|
||||||
list_master = "${join("\n",aws_instance.k8s-master.*.tags.Name)}"
|
list_master = "${join("\n", aws_instance.k8s-master.*.private_dns)}"
|
||||||
list_node = "${join("\n",aws_instance.k8s-worker.*.tags.Name)}"
|
list_node = "${join("\n", aws_instance.k8s-worker.*.private_dns)}"
|
||||||
list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}"
|
list_etcd = "${join("\n", aws_instance.k8s-etcd.*.private_dns)}"
|
||||||
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
|
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -165,7 +165,7 @@ resource "null_resource" "inventories" {
|
|||||||
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
|
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
|
||||||
}
|
}
|
||||||
|
|
||||||
triggers {
|
triggers = {
|
||||||
template = "${data.template_file.inventory.rendered}"
|
template = "${data.template_file.inventory.rendered}"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,8 +3,8 @@ resource "aws_security_group" "aws-elb" {
|
|||||||
vpc_id = "${var.aws_vpc_id}"
|
vpc_id = "${var.aws_vpc_id}"
|
||||||
|
|
||||||
tags = "${merge(var.default_tags, map(
|
tags = "${merge(var.default_tags, map(
|
||||||
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||||
))}"
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_security_group_rule" "aws-allow-api-access" {
|
resource "aws_security_group_rule" "aws-allow-api-access" {
|
||||||
@@ -28,7 +28,7 @@ resource "aws_security_group_rule" "aws-allow-api-egress" {
|
|||||||
# Create a new AWS ELB for K8S API
|
# Create a new AWS ELB for K8S API
|
||||||
resource "aws_elb" "aws-elb-api" {
|
resource "aws_elb" "aws-elb-api" {
|
||||||
name = "kubernetes-elb-${var.aws_cluster_name}"
|
name = "kubernetes-elb-${var.aws_cluster_name}"
|
||||||
subnets = ["${var.aws_subnet_ids_public}"]
|
subnets = var.aws_subnet_ids_public
|
||||||
security_groups = ["${aws_security_group.aws-elb.id}"]
|
security_groups = ["${aws_security_group.aws-elb.id}"]
|
||||||
|
|
||||||
listener {
|
listener {
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
output "kube-master-profile" {
|
output "kube-master-profile" {
|
||||||
value = "${aws_iam_instance_profile.kube-master.name }"
|
value = "${aws_iam_instance_profile.kube-master.name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
output "kube-worker-profile" {
|
output "kube-worker-profile" {
|
||||||
value = "${aws_iam_instance_profile.kube-worker.name }"
|
value = "${aws_iam_instance_profile.kube-worker.name}"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,8 +6,8 @@ resource "aws_vpc" "cluster-vpc" {
|
|||||||
enable_dns_hostnames = true
|
enable_dns_hostnames = true
|
||||||
|
|
||||||
tags = "${merge(var.default_tags, map(
|
tags = "${merge(var.default_tags, map(
|
||||||
"Name", "kubernetes-${var.aws_cluster_name}-vpc"
|
"Name", "kubernetes-${var.aws_cluster_name}-vpc"
|
||||||
))}"
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_eip" "cluster-nat-eip" {
|
resource "aws_eip" "cluster-nat-eip" {
|
||||||
@@ -30,9 +30,9 @@ resource "aws_subnet" "cluster-vpc-subnets-public" {
|
|||||||
cidr_block = "${element(var.aws_cidr_subnets_public, count.index)}"
|
cidr_block = "${element(var.aws_cidr_subnets_public, count.index)}"
|
||||||
|
|
||||||
tags = "${merge(var.default_tags, map(
|
tags = "${merge(var.default_tags, map(
|
||||||
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public",
|
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public",
|
||||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member"
|
"kubernetes.io/cluster/${var.aws_cluster_name}", "member"
|
||||||
))}"
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_nat_gateway" "cluster-nat-gateway" {
|
resource "aws_nat_gateway" "cluster-nat-gateway" {
|
||||||
@@ -48,8 +48,8 @@ resource "aws_subnet" "cluster-vpc-subnets-private" {
|
|||||||
cidr_block = "${element(var.aws_cidr_subnets_private, count.index)}"
|
cidr_block = "${element(var.aws_cidr_subnets_private, count.index)}"
|
||||||
|
|
||||||
tags = "${merge(var.default_tags, map(
|
tags = "${merge(var.default_tags, map(
|
||||||
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
|
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
|
||||||
))}"
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
#Routing in VPC
|
#Routing in VPC
|
||||||
@@ -65,8 +65,8 @@ resource "aws_route_table" "kubernetes-public" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tags = "${merge(var.default_tags, map(
|
tags = "${merge(var.default_tags, map(
|
||||||
"Name", "kubernetes-${var.aws_cluster_name}-routetable-public"
|
"Name", "kubernetes-${var.aws_cluster_name}-routetable-public"
|
||||||
))}"
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_route_table" "kubernetes-private" {
|
resource "aws_route_table" "kubernetes-private" {
|
||||||
@@ -79,20 +79,20 @@ resource "aws_route_table" "kubernetes-private" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tags = "${merge(var.default_tags, map(
|
tags = "${merge(var.default_tags, map(
|
||||||
"Name", "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
|
"Name", "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
|
||||||
))}"
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_route_table_association" "kubernetes-public" {
|
resource "aws_route_table_association" "kubernetes-public" {
|
||||||
count = "${length(var.aws_cidr_subnets_public)}"
|
count = "${length(var.aws_cidr_subnets_public)}"
|
||||||
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id,count.index)}"
|
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index)}"
|
||||||
route_table_id = "${aws_route_table.kubernetes-public.id}"
|
route_table_id = "${aws_route_table.kubernetes-public.id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_route_table_association" "kubernetes-private" {
|
resource "aws_route_table_association" "kubernetes-private" {
|
||||||
count = "${length(var.aws_cidr_subnets_private)}"
|
count = "${length(var.aws_cidr_subnets_private)}"
|
||||||
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-private.*.id,count.index)}"
|
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-private.*.id, count.index)}"
|
||||||
route_table_id = "${element(aws_route_table.kubernetes-private.*.id,count.index)}"
|
route_table_id = "${element(aws_route_table.kubernetes-private.*.id, count.index)}"
|
||||||
}
|
}
|
||||||
|
|
||||||
#Kubernetes Security Groups
|
#Kubernetes Security Groups
|
||||||
@@ -102,8 +102,8 @@ resource "aws_security_group" "kubernetes" {
|
|||||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||||
|
|
||||||
tags = "${merge(var.default_tags, map(
|
tags = "${merge(var.default_tags, map(
|
||||||
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup"
|
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||||
))}"
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_security_group_rule" "allow-all-ingress" {
|
resource "aws_security_group_rule" "allow-all-ingress" {
|
||||||
|
|||||||
@@ -3,15 +3,15 @@ output "aws_vpc_id" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
output "aws_subnet_ids_private" {
|
output "aws_subnet_ids_private" {
|
||||||
value = ["${aws_subnet.cluster-vpc-subnets-private.*.id}"]
|
value = aws_subnet.cluster-vpc-subnets-private.*.id
|
||||||
}
|
}
|
||||||
|
|
||||||
output "aws_subnet_ids_public" {
|
output "aws_subnet_ids_public" {
|
||||||
value = ["${aws_subnet.cluster-vpc-subnets-public.*.id}"]
|
value = aws_subnet.cluster-vpc-subnets-public.*.id
|
||||||
}
|
}
|
||||||
|
|
||||||
output "aws_security_group" {
|
output "aws_security_group" {
|
||||||
value = ["${aws_security_group.kubernetes.*.id}"]
|
value = aws_security_group.kubernetes.*.id
|
||||||
}
|
}
|
||||||
|
|
||||||
output "default_tags" {
|
output "default_tags" {
|
||||||
|
|||||||
@@ -2,9 +2,9 @@
|
|||||||
aws_cluster_name = "devtest"
|
aws_cluster_name = "devtest"
|
||||||
|
|
||||||
#VPC Vars
|
#VPC Vars
|
||||||
aws_vpc_cidr_block = "10.250.192.0/18"
|
aws_vpc_cidr_block = "10.250.192.0/18"
|
||||||
aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"]
|
aws_cidr_subnets_private = ["10.250.192.0/20", "10.250.208.0/20"]
|
||||||
aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
|
aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"]
|
||||||
|
|
||||||
#Bastion Host
|
#Bastion Host
|
||||||
aws_bastion_size = "t2.medium"
|
aws_bastion_size = "t2.medium"
|
||||||
@@ -12,24 +12,24 @@ aws_bastion_size = "t2.medium"
|
|||||||
|
|
||||||
#Kubernetes Cluster
|
#Kubernetes Cluster
|
||||||
|
|
||||||
aws_kube_master_num = 3
|
aws_kube_master_num = 3
|
||||||
aws_kube_master_size = "t2.medium"
|
aws_kube_master_size = "t2.medium"
|
||||||
|
|
||||||
aws_etcd_num = 3
|
aws_etcd_num = 3
|
||||||
aws_etcd_size = "t2.medium"
|
aws_etcd_size = "t2.medium"
|
||||||
|
|
||||||
aws_kube_worker_num = 4
|
aws_kube_worker_num = 4
|
||||||
aws_kube_worker_size = "t2.medium"
|
aws_kube_worker_size = "t2.medium"
|
||||||
|
|
||||||
#Settings AWS ELB
|
#Settings AWS ELB
|
||||||
|
|
||||||
aws_elb_api_port = 6443
|
aws_elb_api_port = 6443
|
||||||
k8s_secure_api_port = 6443
|
k8s_secure_api_port = 6443
|
||||||
kube_insecure_apiserver_address = "0.0.0.0"
|
kube_insecure_apiserver_address = "0.0.0.0"
|
||||||
|
|
||||||
default_tags = {
|
default_tags = {
|
||||||
# Env = "devtest"
|
# Env = "devtest"
|
||||||
# Product = "kubernetes"
|
# Product = "kubernetes"
|
||||||
}
|
}
|
||||||
|
|
||||||
inventory_file = "../../../inventory/hosts"
|
inventory_file = "../../../inventory/hosts"
|
||||||
|
|||||||
@@ -38,6 +38,16 @@ hosts where that makes sense. You have the option of creating bastion hosts
|
|||||||
inside the private subnet to access the nodes there. Alternatively, a node with
|
inside the private subnet to access the nodes there. Alternatively, a node with
|
||||||
a floating IP can be used as a jump host to nodes without.
|
a floating IP can be used as a jump host to nodes without.
|
||||||
|
|
||||||
|
#### Using an existing router
|
||||||
|
It is possible to use an existing router instead of creating one. To use an
|
||||||
|
existing router set the router\_id variable to the uuid of the router you wish
|
||||||
|
to use.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
```
|
||||||
|
router_id = "00c542e7-6f46-4535-ae95-984c7f0391a3"
|
||||||
|
```
|
||||||
|
|
||||||
### Kubernetes Nodes
|
### Kubernetes Nodes
|
||||||
You can create many different kubernetes topologies by setting the number of
|
You can create many different kubernetes topologies by setting the number of
|
||||||
different classes of hosts. For each class there are options for allocating
|
different classes of hosts. For each class there are options for allocating
|
||||||
@@ -224,7 +234,9 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
|||||||
|Variable | Description |
|
|Variable | Description |
|
||||||
|---------|-------------|
|
|---------|-------------|
|
||||||
|`cluster_name` | All OpenStack resources will use the Terraform variable`cluster_name` (default`example`) in their name to make it easier to track. For example the first compute resource will be named`example-kubernetes-1`. |
|
|`cluster_name` | All OpenStack resources will use the Terraform variable`cluster_name` (default`example`) in their name to make it easier to track. For example the first compute resource will be named`example-kubernetes-1`. |
|
||||||
|
|`az_list` | List of Availability Zones available in your OpenStack cluster. |
|
||||||
|`network_name` | The name to be given to the internal network that will be generated |
|
|`network_name` | The name to be given to the internal network that will be generated |
|
||||||
|
|`network_dns_domain` | (Optional) The dns_domain for the internal network that will be generated |
|
||||||
|`dns_nameservers`| An array of DNS name server names to be used by hosts in the internal subnet. |
|
|`dns_nameservers`| An array of DNS name server names to be used by hosts in the internal subnet. |
|
||||||
|`floatingip_pool` | Name of the pool from which floating IPs will be allocated |
|
|`floatingip_pool` | Name of the pool from which floating IPs will be allocated |
|
||||||
|`external_net` | UUID of the external network that will be routed to |
|
|`external_net` | UUID of the external network that will be routed to |
|
||||||
@@ -246,6 +258,113 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
|||||||
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|
||||||
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|
||||||
|`wait_for_floatingip` | Let Terraform poll the instance until the floating IP has been associated, `false` by default. |
|
|`wait_for_floatingip` | Let Terraform poll the instance until the floating IP has been associated, `false` by default. |
|
||||||
|
|`node_root_volume_size_in_gb` | Size of the root volume for nodes, 0 to use ephemeral storage |
|
||||||
|
|`master_root_volume_size_in_gb` | Size of the root volume for masters, 0 to use ephemeral storage |
|
||||||
|
|`gfs_root_volume_size_in_gb` | Size of the root volume for gluster, 0 to use ephemeral storage |
|
||||||
|
|`etcd_root_volume_size_in_gb` | Size of the root volume for etcd nodes, 0 to use ephemeral storage |
|
||||||
|
|`bastion_root_volume_size_in_gb` | Size of the root volume for bastions, 0 to use ephemeral storage |
|
||||||
|
|`use_server_group` | Create and use openstack nova servergroups, default: false |
|
||||||
|
|`k8s_nodes` | Map containing worker node definition, see explanation below |
|
||||||
|
|
||||||
|
##### k8s_nodes
|
||||||
|
Allows a custom defintion of worker nodes giving the operator full control over individual node flavor and
|
||||||
|
availability zone placement. To enable the use of this mode set the `number_of_k8s_nodes` and
|
||||||
|
`number_of_k8s_nodes_no_floating_ip` variables to 0. Then define your desired worker node configuration
|
||||||
|
using the `k8s_nodes` variable.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
```
|
||||||
|
k8s_nodes = {
|
||||||
|
"1" = {
|
||||||
|
"az" = "sto1"
|
||||||
|
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
|
||||||
|
"floating_ip" = true
|
||||||
|
},
|
||||||
|
"2" = {
|
||||||
|
"az" = "sto2"
|
||||||
|
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
|
||||||
|
"floating_ip" = true
|
||||||
|
},
|
||||||
|
"3" = {
|
||||||
|
"az" = "sto3"
|
||||||
|
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
|
||||||
|
"floating_ip" = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Would result in the same configuration as:
|
||||||
|
```
|
||||||
|
number_of_k8s_nodes = 3
|
||||||
|
flavor_k8s_node = "83d8b44a-26a0-4f02-a981-079446926445"
|
||||||
|
az_list = ["sto1", "sto2", "sto3"]
|
||||||
|
```
|
||||||
|
|
||||||
|
And:
|
||||||
|
```
|
||||||
|
k8s_nodes = {
|
||||||
|
"ing-1" = {
|
||||||
|
"az" = "sto1"
|
||||||
|
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
|
||||||
|
"floating_ip" = true
|
||||||
|
},
|
||||||
|
"ing-2" = {
|
||||||
|
"az" = "sto2"
|
||||||
|
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
|
||||||
|
"floating_ip" = true
|
||||||
|
},
|
||||||
|
"ing-3" = {
|
||||||
|
"az" = "sto3"
|
||||||
|
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
|
||||||
|
"floating_ip" = true
|
||||||
|
},
|
||||||
|
"big-1" = {
|
||||||
|
"az" = "sto1"
|
||||||
|
"flavor" = "3f73fc93-ec61-4808-88df-2580d94c1a9b"
|
||||||
|
"floating_ip" = false
|
||||||
|
},
|
||||||
|
"big-2" = {
|
||||||
|
"az" = "sto2"
|
||||||
|
"flavor" = "3f73fc93-ec61-4808-88df-2580d94c1a9b"
|
||||||
|
"floating_ip" = false
|
||||||
|
},
|
||||||
|
"big-3" = {
|
||||||
|
"az" = "sto3"
|
||||||
|
"flavor" = "3f73fc93-ec61-4808-88df-2580d94c1a9b"
|
||||||
|
"floating_ip" = false
|
||||||
|
},
|
||||||
|
"small-1" = {
|
||||||
|
"az" = "sto1"
|
||||||
|
"flavor" = "7a6a998f-ac7f-4fb8-a534-2175b254f75e"
|
||||||
|
"floating_ip" = false
|
||||||
|
},
|
||||||
|
"small-2" = {
|
||||||
|
"az" = "sto2"
|
||||||
|
"flavor" = "7a6a998f-ac7f-4fb8-a534-2175b254f75e"
|
||||||
|
"floating_ip" = false
|
||||||
|
},
|
||||||
|
"small-3" = {
|
||||||
|
"az" = "sto3"
|
||||||
|
"flavor" = "7a6a998f-ac7f-4fb8-a534-2175b254f75e"
|
||||||
|
"floating_ip" = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Would result in three nodes in each availability zone each with their own separate naming,
|
||||||
|
flavor and floating ip configuration.
|
||||||
|
|
||||||
|
The "schema":
|
||||||
|
```
|
||||||
|
k8s_nodes = {
|
||||||
|
"key | node name suffix, must be unique" = {
|
||||||
|
"az" = string
|
||||||
|
"flavor" = string
|
||||||
|
"floating_ip" = bool
|
||||||
|
},
|
||||||
|
}
|
||||||
|
```
|
||||||
|
All values are required.
|
||||||
|
|
||||||
#### Terraform state files
|
#### Terraform state files
|
||||||
|
|
||||||
@@ -419,7 +538,10 @@ resolvconf_mode: host_resolvconf
|
|||||||
```
|
```
|
||||||
node_volume_attach_limit: 26
|
node_volume_attach_limit: 26
|
||||||
```
|
```
|
||||||
|
- Disable access_ip, this will make all innternal cluster traffic to be sent over local network when a floating IP is attached (default this value is set to 1)
|
||||||
|
```
|
||||||
|
use_access_ip: 0
|
||||||
|
```
|
||||||
|
|
||||||
### Deploy Kubernetes
|
### Deploy Kubernetes
|
||||||
|
|
||||||
@@ -483,3 +605,81 @@ $ ansible-playbook --become -i inventory/$CLUSTER/hosts ./contrib/network-storag
|
|||||||
## What's next
|
## What's next
|
||||||
|
|
||||||
Try out your new Kubernetes cluster with the [Hello Kubernetes service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/).
|
Try out your new Kubernetes cluster with the [Hello Kubernetes service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/).
|
||||||
|
|
||||||
|
## Appendix
|
||||||
|
|
||||||
|
### Migration from `number_of_k8s_nodes*` to `k8s_nodes`
|
||||||
|
If you currently have a cluster defined using the `number_of_k8s_nodes*` variables and wish
|
||||||
|
to migrate to the `k8s_nodes` style you can do it like so:
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
$ terraform state list
|
||||||
|
module.compute.data.openstack_images_image_v2.gfs_image
|
||||||
|
module.compute.data.openstack_images_image_v2.vm_image
|
||||||
|
module.compute.openstack_compute_floatingip_associate_v2.k8s_master[0]
|
||||||
|
module.compute.openstack_compute_floatingip_associate_v2.k8s_node[0]
|
||||||
|
module.compute.openstack_compute_floatingip_associate_v2.k8s_node[1]
|
||||||
|
module.compute.openstack_compute_floatingip_associate_v2.k8s_node[2]
|
||||||
|
module.compute.openstack_compute_instance_v2.k8s_master[0]
|
||||||
|
module.compute.openstack_compute_instance_v2.k8s_node[0]
|
||||||
|
module.compute.openstack_compute_instance_v2.k8s_node[1]
|
||||||
|
module.compute.openstack_compute_instance_v2.k8s_node[2]
|
||||||
|
module.compute.openstack_compute_keypair_v2.k8s
|
||||||
|
module.compute.openstack_compute_servergroup_v2.k8s_etcd[0]
|
||||||
|
module.compute.openstack_compute_servergroup_v2.k8s_master[0]
|
||||||
|
module.compute.openstack_compute_servergroup_v2.k8s_node[0]
|
||||||
|
module.compute.openstack_networking_secgroup_rule_v2.bastion[0]
|
||||||
|
module.compute.openstack_networking_secgroup_rule_v2.egress[0]
|
||||||
|
module.compute.openstack_networking_secgroup_rule_v2.k8s
|
||||||
|
module.compute.openstack_networking_secgroup_rule_v2.k8s_allowed_remote_ips[0]
|
||||||
|
module.compute.openstack_networking_secgroup_rule_v2.k8s_allowed_remote_ips[1]
|
||||||
|
module.compute.openstack_networking_secgroup_rule_v2.k8s_allowed_remote_ips[2]
|
||||||
|
module.compute.openstack_networking_secgroup_rule_v2.k8s_master[0]
|
||||||
|
module.compute.openstack_networking_secgroup_rule_v2.worker[0]
|
||||||
|
module.compute.openstack_networking_secgroup_rule_v2.worker[1]
|
||||||
|
module.compute.openstack_networking_secgroup_rule_v2.worker[2]
|
||||||
|
module.compute.openstack_networking_secgroup_rule_v2.worker[3]
|
||||||
|
module.compute.openstack_networking_secgroup_rule_v2.worker[4]
|
||||||
|
module.compute.openstack_networking_secgroup_v2.bastion[0]
|
||||||
|
module.compute.openstack_networking_secgroup_v2.k8s
|
||||||
|
module.compute.openstack_networking_secgroup_v2.k8s_master
|
||||||
|
module.compute.openstack_networking_secgroup_v2.worker
|
||||||
|
module.ips.null_resource.dummy_dependency
|
||||||
|
module.ips.openstack_networking_floatingip_v2.k8s_master[0]
|
||||||
|
module.ips.openstack_networking_floatingip_v2.k8s_node[0]
|
||||||
|
module.ips.openstack_networking_floatingip_v2.k8s_node[1]
|
||||||
|
module.ips.openstack_networking_floatingip_v2.k8s_node[2]
|
||||||
|
module.network.openstack_networking_network_v2.k8s[0]
|
||||||
|
module.network.openstack_networking_router_interface_v2.k8s[0]
|
||||||
|
module.network.openstack_networking_router_v2.k8s[0]
|
||||||
|
module.network.openstack_networking_subnet_v2.k8s[0]
|
||||||
|
$ terraform state mv 'module.compute.openstack_compute_floatingip_associate_v2.k8s_node[0]' 'module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes["1"]'
|
||||||
|
Move "module.compute.openstack_compute_floatingip_associate_v2.k8s_node[0]" to "module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes[\"1\"]"
|
||||||
|
Successfully moved 1 object(s).
|
||||||
|
$ terraform state mv 'module.compute.openstack_compute_floatingip_associate_v2.k8s_node[1]' 'module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes["2"]'
|
||||||
|
Move "module.compute.openstack_compute_floatingip_associate_v2.k8s_node[1]" to "module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes[\"2\"]"
|
||||||
|
Successfully moved 1 object(s).
|
||||||
|
$ terraform state mv 'module.compute.openstack_compute_floatingip_associate_v2.k8s_node[2]' 'module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes["3"]'
|
||||||
|
Move "module.compute.openstack_compute_floatingip_associate_v2.k8s_node[2]" to "module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes[\"3\"]"
|
||||||
|
Successfully moved 1 object(s).
|
||||||
|
$ terraform state mv 'module.compute.openstack_compute_instance_v2.k8s_node[0]' 'module.compute.openstack_compute_instance_v2.k8s_node["1"]'
|
||||||
|
Move "module.compute.openstack_compute_instance_v2.k8s_node[0]" to "module.compute.openstack_compute_instance_v2.k8s_node[\"1\"]"
|
||||||
|
Successfully moved 1 object(s).
|
||||||
|
$ terraform state mv 'module.compute.openstack_compute_instance_v2.k8s_node[1]' 'module.compute.openstack_compute_instance_v2.k8s_node["2"]'
|
||||||
|
Move "module.compute.openstack_compute_instance_v2.k8s_node[1]" to "module.compute.openstack_compute_instance_v2.k8s_node[\"2\"]"
|
||||||
|
Successfully moved 1 object(s).
|
||||||
|
$ terraform state mv 'module.compute.openstack_compute_instance_v2.k8s_node[2]' 'module.compute.openstack_compute_instance_v2.k8s_node["3"]'
|
||||||
|
Move "module.compute.openstack_compute_instance_v2.k8s_node[2]" to "module.compute.openstack_compute_instance_v2.k8s_node[\"3\"]"
|
||||||
|
Successfully moved 1 object(s).
|
||||||
|
$ terraform state mv 'module.ips.openstack_networking_floatingip_v2.k8s_node[0]' 'module.ips.openstack_networking_floatingip_v2.k8s_node["1"]'
|
||||||
|
Move "module.ips.openstack_networking_floatingip_v2.k8s_node[0]" to "module.ips.openstack_networking_floatingip_v2.k8s_node[\"1\"]"
|
||||||
|
Successfully moved 1 object(s).
|
||||||
|
$ terraform state mv 'module.ips.openstack_networking_floatingip_v2.k8s_node[1]' 'module.ips.openstack_networking_floatingip_v2.k8s_node["2"]'
|
||||||
|
Move "module.ips.openstack_networking_floatingip_v2.k8s_node[1]" to "module.ips.openstack_networking_floatingip_v2.k8s_node[\"2\"]"
|
||||||
|
Successfully moved 1 object(s).
|
||||||
|
$ terraform state mv 'module.ips.openstack_networking_floatingip_v2.k8s_node[2]' 'module.ips.openstack_networking_floatingip_v2.k8s_node["3"]'
|
||||||
|
Move "module.ips.openstack_networking_floatingip_v2.k8s_node[2]" to "module.ips.openstack_networking_floatingip_v2.k8s_node[\"3\"]"
|
||||||
|
Successfully moved 1 object(s).
|
||||||
|
```
|
||||||
|
|
||||||
|
Of course for nodes without floating ips those steps can be omitted.
|
||||||
|
|||||||
@@ -5,12 +5,14 @@ provider "openstack" {
|
|||||||
module "network" {
|
module "network" {
|
||||||
source = "./modules/network"
|
source = "./modules/network"
|
||||||
|
|
||||||
external_net = "${var.external_net}"
|
external_net = "${var.external_net}"
|
||||||
network_name = "${var.network_name}"
|
network_name = "${var.network_name}"
|
||||||
subnet_cidr = "${var.subnet_cidr}"
|
subnet_cidr = "${var.subnet_cidr}"
|
||||||
cluster_name = "${var.cluster_name}"
|
cluster_name = "${var.cluster_name}"
|
||||||
dns_nameservers = "${var.dns_nameservers}"
|
dns_nameservers = "${var.dns_nameservers}"
|
||||||
use_neutron = "${var.use_neutron}"
|
network_dns_domain = "${var.network_dns_domain}"
|
||||||
|
use_neutron = "${var.use_neutron}"
|
||||||
|
router_id = "${var.router_id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
module "ips" {
|
module "ips" {
|
||||||
@@ -24,6 +26,7 @@ module "ips" {
|
|||||||
external_net = "${var.external_net}"
|
external_net = "${var.external_net}"
|
||||||
network_name = "${var.network_name}"
|
network_name = "${var.network_name}"
|
||||||
router_id = "${module.network.router_id}"
|
router_id = "${module.network.router_id}"
|
||||||
|
k8s_nodes = "${var.k8s_nodes}"
|
||||||
}
|
}
|
||||||
|
|
||||||
module "compute" {
|
module "compute" {
|
||||||
@@ -31,6 +34,7 @@ module "compute" {
|
|||||||
|
|
||||||
cluster_name = "${var.cluster_name}"
|
cluster_name = "${var.cluster_name}"
|
||||||
az_list = "${var.az_list}"
|
az_list = "${var.az_list}"
|
||||||
|
az_list_node = "${var.az_list_node}"
|
||||||
number_of_k8s_masters = "${var.number_of_k8s_masters}"
|
number_of_k8s_masters = "${var.number_of_k8s_masters}"
|
||||||
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
|
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
|
||||||
number_of_etcd = "${var.number_of_etcd}"
|
number_of_etcd = "${var.number_of_etcd}"
|
||||||
@@ -40,6 +44,12 @@ module "compute" {
|
|||||||
number_of_bastions = "${var.number_of_bastions}"
|
number_of_bastions = "${var.number_of_bastions}"
|
||||||
number_of_k8s_nodes_no_floating_ip = "${var.number_of_k8s_nodes_no_floating_ip}"
|
number_of_k8s_nodes_no_floating_ip = "${var.number_of_k8s_nodes_no_floating_ip}"
|
||||||
number_of_gfs_nodes_no_floating_ip = "${var.number_of_gfs_nodes_no_floating_ip}"
|
number_of_gfs_nodes_no_floating_ip = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||||
|
k8s_nodes = "${var.k8s_nodes}"
|
||||||
|
bastion_root_volume_size_in_gb = "${var.bastion_root_volume_size_in_gb}"
|
||||||
|
etcd_root_volume_size_in_gb = "${var.etcd_root_volume_size_in_gb}"
|
||||||
|
master_root_volume_size_in_gb = "${var.master_root_volume_size_in_gb}"
|
||||||
|
node_root_volume_size_in_gb = "${var.node_root_volume_size_in_gb}"
|
||||||
|
gfs_root_volume_size_in_gb = "${var.gfs_root_volume_size_in_gb}"
|
||||||
gfs_volume_size_in_gb = "${var.gfs_volume_size_in_gb}"
|
gfs_volume_size_in_gb = "${var.gfs_volume_size_in_gb}"
|
||||||
public_key_path = "${var.public_key_path}"
|
public_key_path = "${var.public_key_path}"
|
||||||
image = "${var.image}"
|
image = "${var.image}"
|
||||||
@@ -55,6 +65,7 @@ module "compute" {
|
|||||||
k8s_master_fips = "${module.ips.k8s_master_fips}"
|
k8s_master_fips = "${module.ips.k8s_master_fips}"
|
||||||
k8s_master_no_etcd_fips = "${module.ips.k8s_master_no_etcd_fips}"
|
k8s_master_no_etcd_fips = "${module.ips.k8s_master_no_etcd_fips}"
|
||||||
k8s_node_fips = "${module.ips.k8s_node_fips}"
|
k8s_node_fips = "${module.ips.k8s_node_fips}"
|
||||||
|
k8s_nodes_fips = "${module.ips.k8s_nodes_fips}"
|
||||||
bastion_fips = "${module.ips.bastion_fips}"
|
bastion_fips = "${module.ips.bastion_fips}"
|
||||||
bastion_allowed_remote_ips = "${var.bastion_allowed_remote_ips}"
|
bastion_allowed_remote_ips = "${var.bastion_allowed_remote_ips}"
|
||||||
master_allowed_remote_ips = "${var.master_allowed_remote_ips}"
|
master_allowed_remote_ips = "${var.master_allowed_remote_ips}"
|
||||||
@@ -64,6 +75,8 @@ module "compute" {
|
|||||||
supplementary_node_groups = "${var.supplementary_node_groups}"
|
supplementary_node_groups = "${var.supplementary_node_groups}"
|
||||||
worker_allowed_ports = "${var.worker_allowed_ports}"
|
worker_allowed_ports = "${var.worker_allowed_ports}"
|
||||||
wait_for_floatingip = "${var.wait_for_floatingip}"
|
wait_for_floatingip = "${var.wait_for_floatingip}"
|
||||||
|
use_access_ip = "${var.use_access_ip}"
|
||||||
|
use_server_groups = "${var.use_server_groups}"
|
||||||
|
|
||||||
network_id = "${module.network.router_id}"
|
network_id = "${module.network.router_id}"
|
||||||
}
|
}
|
||||||
@@ -85,7 +98,7 @@ output "k8s_master_fips" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
output "k8s_node_fips" {
|
output "k8s_node_fips" {
|
||||||
value = "${module.ips.k8s_node_fips}"
|
value = "${var.number_of_k8s_nodes > 0 ? module.ips.k8s_node_fips : [for key, value in module.ips.k8s_nodes_fips : value.address]}"
|
||||||
}
|
}
|
||||||
|
|
||||||
output "bastion_fips" {
|
output "bastion_fips" {
|
||||||
|
|||||||
@@ -1,3 +1,11 @@
|
|||||||
|
data "openstack_images_image_v2" "vm_image" {
|
||||||
|
name = "${var.image}"
|
||||||
|
}
|
||||||
|
|
||||||
|
data "openstack_images_image_v2" "gfs_image" {
|
||||||
|
name = "${var.image_gfs == "" ? var.image : var.image_gfs}"
|
||||||
|
}
|
||||||
|
|
||||||
resource "openstack_compute_keypair_v2" "k8s" {
|
resource "openstack_compute_keypair_v2" "k8s" {
|
||||||
name = "kubernetes-${var.cluster_name}"
|
name = "kubernetes-${var.cluster_name}"
|
||||||
public_key = "${chomp(file(var.public_key_path))}"
|
public_key = "${chomp(file(var.public_key_path))}"
|
||||||
@@ -35,7 +43,7 @@ resource "openstack_networking_secgroup_rule_v2" "bastion" {
|
|||||||
port_range_min = "22"
|
port_range_min = "22"
|
||||||
port_range_max = "22"
|
port_range_max = "22"
|
||||||
remote_ip_prefix = "${var.bastion_allowed_remote_ips[count.index]}"
|
remote_ip_prefix = "${var.bastion_allowed_remote_ips[count.index]}"
|
||||||
security_group_id = "${openstack_networking_secgroup_v2.bastion[count.index].id}"
|
security_group_id = "${openstack_networking_secgroup_v2.bastion[0].id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_secgroup_v2" "k8s" {
|
resource "openstack_networking_secgroup_v2" "k8s" {
|
||||||
@@ -87,13 +95,43 @@ resource "openstack_networking_secgroup_rule_v2" "worker" {
|
|||||||
security_group_id = "${openstack_networking_secgroup_v2.worker.id}"
|
security_group_id = "${openstack_networking_secgroup_v2.worker.id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_servergroup_v2" "k8s_master" {
|
||||||
|
count = "%{if var.use_server_groups}1%{else}0%{endif}"
|
||||||
|
name = "k8s-master-srvgrp"
|
||||||
|
policies = ["anti-affinity"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_servergroup_v2" "k8s_node" {
|
||||||
|
count = "%{if var.use_server_groups}1%{else}0%{endif}"
|
||||||
|
name = "k8s-node-srvgrp"
|
||||||
|
policies = ["anti-affinity"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_servergroup_v2" "k8s_etcd" {
|
||||||
|
count = "%{if var.use_server_groups}1%{else}0%{endif}"
|
||||||
|
name = "k8s-etcd-srvgrp"
|
||||||
|
policies = ["anti-affinity"]
|
||||||
|
}
|
||||||
|
|
||||||
resource "openstack_compute_instance_v2" "bastion" {
|
resource "openstack_compute_instance_v2" "bastion" {
|
||||||
name = "${var.cluster_name}-bastion-${count.index+1}"
|
name = "${var.cluster_name}-bastion-${count.index + 1}"
|
||||||
count = "${var.number_of_bastions}"
|
count = "${var.number_of_bastions}"
|
||||||
image_name = "${var.image}"
|
image_name = "${var.image}"
|
||||||
flavor_id = "${var.flavor_bastion}"
|
flavor_id = "${var.flavor_bastion}"
|
||||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
|
||||||
|
dynamic "block_device" {
|
||||||
|
for_each = var.bastion_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||||
|
content {
|
||||||
|
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||||
|
source_type = "image"
|
||||||
|
volume_size = "${var.bastion_root_volume_size_in_gb}"
|
||||||
|
boot_index = 0
|
||||||
|
destination_type = "volume"
|
||||||
|
delete_on_termination = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
network {
|
network {
|
||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
@@ -106,6 +144,7 @@ resource "openstack_compute_instance_v2" "bastion" {
|
|||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "bastion"
|
kubespray_groups = "bastion"
|
||||||
depends_on = "${var.network_id}"
|
depends_on = "${var.network_id}"
|
||||||
|
use_access_ip = "${var.use_access_ip}"
|
||||||
}
|
}
|
||||||
|
|
||||||
provisioner "local-exec" {
|
provisioner "local-exec" {
|
||||||
@@ -114,13 +153,26 @@ resource "openstack_compute_instance_v2" "bastion" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_instance_v2" "k8s_master" {
|
resource "openstack_compute_instance_v2" "k8s_master" {
|
||||||
name = "${var.cluster_name}-k8s-master-${count.index+1}"
|
name = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
||||||
count = "${var.number_of_k8s_masters}"
|
count = "${var.number_of_k8s_masters}"
|
||||||
availability_zone = "${element(var.az_list, count.index)}"
|
availability_zone = "${element(var.az_list, count.index)}"
|
||||||
image_name = "${var.image}"
|
image_name = "${var.image}"
|
||||||
flavor_id = "${var.flavor_k8s_master}"
|
flavor_id = "${var.flavor_k8s_master}"
|
||||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
|
||||||
|
|
||||||
|
dynamic "block_device" {
|
||||||
|
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||||
|
content {
|
||||||
|
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||||
|
source_type = "image"
|
||||||
|
volume_size = "${var.master_root_volume_size_in_gb}"
|
||||||
|
boot_index = 0
|
||||||
|
destination_type = "volume"
|
||||||
|
delete_on_termination = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
network {
|
network {
|
||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
@@ -129,25 +181,46 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
|||||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
dynamic "scheduler_hints" {
|
||||||
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||||
|
content {
|
||||||
|
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||||
depends_on = "${var.network_id}"
|
depends_on = "${var.network_id}"
|
||||||
|
use_access_ip = "${var.use_access_ip}"
|
||||||
}
|
}
|
||||||
|
|
||||||
provisioner "local-exec" {
|
provisioner "local-exec" {
|
||||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
|
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||||
name = "${var.cluster_name}-k8s-master-ne-${count.index+1}"
|
name = "${var.cluster_name}-k8s-master-ne-${count.index + 1}"
|
||||||
count = "${var.number_of_k8s_masters_no_etcd}"
|
count = "${var.number_of_k8s_masters_no_etcd}"
|
||||||
availability_zone = "${element(var.az_list, count.index)}"
|
availability_zone = "${element(var.az_list, count.index)}"
|
||||||
image_name = "${var.image}"
|
image_name = "${var.image}"
|
||||||
flavor_id = "${var.flavor_k8s_master}"
|
flavor_id = "${var.flavor_k8s_master}"
|
||||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
|
||||||
|
|
||||||
|
dynamic "block_device" {
|
||||||
|
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||||
|
content {
|
||||||
|
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||||
|
source_type = "image"
|
||||||
|
volume_size = "${var.master_root_volume_size_in_gb}"
|
||||||
|
boot_index = 0
|
||||||
|
destination_type = "volume"
|
||||||
|
delete_on_termination = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
network {
|
network {
|
||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
@@ -156,46 +229,86 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
|||||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
dynamic "scheduler_hints" {
|
||||||
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||||
|
content {
|
||||||
|
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||||
depends_on = "${var.network_id}"
|
depends_on = "${var.network_id}"
|
||||||
|
use_access_ip = "${var.use_access_ip}"
|
||||||
}
|
}
|
||||||
|
|
||||||
provisioner "local-exec" {
|
provisioner "local-exec" {
|
||||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
|
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_instance_v2" "etcd" {
|
resource "openstack_compute_instance_v2" "etcd" {
|
||||||
name = "${var.cluster_name}-etcd-${count.index+1}"
|
name = "${var.cluster_name}-etcd-${count.index + 1}"
|
||||||
count = "${var.number_of_etcd}"
|
count = "${var.number_of_etcd}"
|
||||||
availability_zone = "${element(var.az_list, count.index)}"
|
availability_zone = "${element(var.az_list, count.index)}"
|
||||||
image_name = "${var.image}"
|
image_name = "${var.image}"
|
||||||
flavor_id = "${var.flavor_etcd}"
|
flavor_id = "${var.flavor_etcd}"
|
||||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
|
||||||
|
dynamic "block_device" {
|
||||||
|
for_each = var.etcd_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||||
|
content {
|
||||||
|
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||||
|
source_type = "image"
|
||||||
|
volume_size = "${var.etcd_root_volume_size_in_gb}"
|
||||||
|
boot_index = 0
|
||||||
|
destination_type = "volume"
|
||||||
|
delete_on_termination = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
network {
|
network {
|
||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
||||||
|
|
||||||
|
dynamic "scheduler_hints" {
|
||||||
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : []
|
||||||
|
content {
|
||||||
|
group = "${openstack_compute_servergroup_v2.k8s_etcd[0].id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "etcd,vault,no-floating"
|
kubespray_groups = "etcd,vault,no-floating"
|
||||||
depends_on = "${var.network_id}"
|
depends_on = "${var.network_id}"
|
||||||
|
use_access_ip = "${var.use_access_ip}"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||||
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
|
name = "${var.cluster_name}-k8s-master-nf-${count.index + 1}"
|
||||||
count = "${var.number_of_k8s_masters_no_floating_ip}"
|
count = "${var.number_of_k8s_masters_no_floating_ip}"
|
||||||
availability_zone = "${element(var.az_list, count.index)}"
|
availability_zone = "${element(var.az_list, count.index)}"
|
||||||
image_name = "${var.image}"
|
image_name = "${var.image}"
|
||||||
flavor_id = "${var.flavor_k8s_master}"
|
flavor_id = "${var.flavor_k8s_master}"
|
||||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
|
||||||
|
dynamic "block_device" {
|
||||||
|
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||||
|
content {
|
||||||
|
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||||
|
source_type = "image"
|
||||||
|
volume_size = "${var.master_root_volume_size_in_gb}"
|
||||||
|
boot_index = 0
|
||||||
|
destination_type = "volume"
|
||||||
|
delete_on_termination = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
network {
|
network {
|
||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
@@ -204,21 +317,41 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
|||||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
dynamic "scheduler_hints" {
|
||||||
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||||
|
content {
|
||||||
|
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||||
depends_on = "${var.network_id}"
|
depends_on = "${var.network_id}"
|
||||||
|
use_access_ip = "${var.use_access_ip}"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||||
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index+1}"
|
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index + 1}"
|
||||||
count = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
|
count = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
|
||||||
availability_zone = "${element(var.az_list, count.index)}"
|
availability_zone = "${element(var.az_list, count.index)}"
|
||||||
image_name = "${var.image}"
|
image_name = "${var.image}"
|
||||||
flavor_id = "${var.flavor_k8s_master}"
|
flavor_id = "${var.flavor_k8s_master}"
|
||||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
|
||||||
|
dynamic "block_device" {
|
||||||
|
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||||
|
content {
|
||||||
|
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||||
|
source_type = "image"
|
||||||
|
volume_size = "${var.master_root_volume_size_in_gb}"
|
||||||
|
boot_index = 0
|
||||||
|
destination_type = "volume"
|
||||||
|
delete_on_termination = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
network {
|
network {
|
||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
@@ -227,21 +360,41 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
|||||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
dynamic "scheduler_hints" {
|
||||||
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||||
|
content {
|
||||||
|
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||||
depends_on = "${var.network_id}"
|
depends_on = "${var.network_id}"
|
||||||
|
use_access_ip = "${var.use_access_ip}"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_instance_v2" "k8s_node" {
|
resource "openstack_compute_instance_v2" "k8s_node" {
|
||||||
name = "${var.cluster_name}-k8s-node-${count.index+1}"
|
name = "${var.cluster_name}-k8s-node-${count.index + 1}"
|
||||||
count = "${var.number_of_k8s_nodes}"
|
count = "${var.number_of_k8s_nodes}"
|
||||||
availability_zone = "${element(var.az_list, count.index)}"
|
availability_zone = "${element(var.az_list_node, count.index)}"
|
||||||
image_name = "${var.image}"
|
image_name = "${var.image}"
|
||||||
flavor_id = "${var.flavor_k8s_node}"
|
flavor_id = "${var.flavor_k8s_node}"
|
||||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
|
||||||
|
dynamic "block_device" {
|
||||||
|
for_each = var.node_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||||
|
content {
|
||||||
|
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||||
|
source_type = "image"
|
||||||
|
volume_size = "${var.node_root_volume_size_in_gb}"
|
||||||
|
boot_index = 0
|
||||||
|
destination_type = "volume"
|
||||||
|
delete_on_termination = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
network {
|
network {
|
||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
@@ -250,25 +403,45 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
|||||||
"${openstack_networking_secgroup_v2.worker.name}",
|
"${openstack_networking_secgroup_v2.worker.name}",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
dynamic "scheduler_hints" {
|
||||||
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||||
|
content {
|
||||||
|
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}"
|
kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}"
|
||||||
depends_on = "${var.network_id}"
|
depends_on = "${var.network_id}"
|
||||||
|
use_access_ip = "${var.use_access_ip}"
|
||||||
}
|
}
|
||||||
|
|
||||||
provisioner "local-exec" {
|
provisioner "local-exec" {
|
||||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > group_vars/no-floating.yml"
|
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > group_vars/no-floating.yml"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||||
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
|
name = "${var.cluster_name}-k8s-node-nf-${count.index + 1}"
|
||||||
count = "${var.number_of_k8s_nodes_no_floating_ip}"
|
count = "${var.number_of_k8s_nodes_no_floating_ip}"
|
||||||
availability_zone = "${element(var.az_list, count.index)}"
|
availability_zone = "${element(var.az_list_node, count.index)}"
|
||||||
image_name = "${var.image}"
|
image_name = "${var.image}"
|
||||||
flavor_id = "${var.flavor_k8s_node}"
|
flavor_id = "${var.flavor_k8s_node}"
|
||||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
|
||||||
|
dynamic "block_device" {
|
||||||
|
for_each = var.node_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||||
|
content {
|
||||||
|
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||||
|
source_type = "image"
|
||||||
|
volume_size = "${var.node_root_volume_size_in_gb}"
|
||||||
|
boot_index = 0
|
||||||
|
destination_type = "volume"
|
||||||
|
delete_on_termination = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
network {
|
network {
|
||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
}
|
}
|
||||||
@@ -277,10 +450,106 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
|||||||
"${openstack_networking_secgroup_v2.worker.name}",
|
"${openstack_networking_secgroup_v2.worker.name}",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
dynamic "scheduler_hints" {
|
||||||
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||||
|
content {
|
||||||
|
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}"
|
kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}"
|
||||||
depends_on = "${var.network_id}"
|
depends_on = "${var.network_id}"
|
||||||
|
use_access_ip = "${var.use_access_ip}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||||
|
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {}
|
||||||
|
name = "${var.cluster_name}-k8s-node-${each.key}"
|
||||||
|
availability_zone = "${each.value.az}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
flavor_id = "${each.value.flavor}"
|
||||||
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
|
||||||
|
dynamic "block_device" {
|
||||||
|
for_each = var.node_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||||
|
content {
|
||||||
|
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||||
|
source_type = "image"
|
||||||
|
volume_size = "${var.node_root_volume_size_in_gb}"
|
||||||
|
boot_index = 0
|
||||||
|
destination_type = "volume"
|
||||||
|
delete_on_termination = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
network {
|
||||||
|
name = "${var.network_name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
|
"${openstack_networking_secgroup_v2.worker.name}",
|
||||||
|
]
|
||||||
|
|
||||||
|
dynamic "scheduler_hints" {
|
||||||
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||||
|
content {
|
||||||
|
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata = {
|
||||||
|
ssh_user = "${var.ssh_user}"
|
||||||
|
kubespray_groups = "kube-node,k8s-cluster,%{if each.value.floating_ip == false}no-floating,%{endif}${var.supplementary_node_groups}"
|
||||||
|
depends_on = "${var.network_id}"
|
||||||
|
use_access_ip = "${var.use_access_ip}"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "local-exec" {
|
||||||
|
command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_nodes_fips : value.address]), 0)}/ > group_vars/no-floating.yml%{else}true%{endif}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||||
|
name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}"
|
||||||
|
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||||
|
availability_zone = "${element(var.az_list, count.index)}"
|
||||||
|
image_name = "${var.image_gfs}"
|
||||||
|
flavor_id = "${var.flavor_gfs_node}"
|
||||||
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
|
||||||
|
dynamic "block_device" {
|
||||||
|
for_each = var.gfs_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||||
|
content {
|
||||||
|
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||||
|
source_type = "image"
|
||||||
|
volume_size = "${var.gfs_root_volume_size_in_gb}"
|
||||||
|
boot_index = 0
|
||||||
|
destination_type = "volume"
|
||||||
|
delete_on_termination = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
network {
|
||||||
|
name = "${var.network_name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
||||||
|
|
||||||
|
dynamic "scheduler_hints" {
|
||||||
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||||
|
content {
|
||||||
|
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata = {
|
||||||
|
ssh_user = "${var.ssh_user_gfs}"
|
||||||
|
kubespray_groups = "gfs-cluster,network-storage,no-floating"
|
||||||
|
depends_on = "${var.network_id}"
|
||||||
|
use_access_ip = "${var.use_access_ip}"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -291,6 +560,7 @@ resource "openstack_compute_floatingip_associate_v2" "bastion" {
|
|||||||
wait_until_associated = "${var.wait_for_floatingip}"
|
wait_until_associated = "${var.wait_for_floatingip}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
resource "openstack_compute_floatingip_associate_v2" "k8s_master" {
|
resource "openstack_compute_floatingip_associate_v2" "k8s_master" {
|
||||||
count = "${var.number_of_k8s_masters}"
|
count = "${var.number_of_k8s_masters}"
|
||||||
instance_id = "${element(openstack_compute_instance_v2.k8s_master.*.id, count.index)}"
|
instance_id = "${element(openstack_compute_instance_v2.k8s_master.*.id, count.index)}"
|
||||||
@@ -299,48 +569,34 @@ resource "openstack_compute_floatingip_associate_v2" "k8s_master" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_floatingip_associate_v2" "k8s_master_no_etcd" {
|
resource "openstack_compute_floatingip_associate_v2" "k8s_master_no_etcd" {
|
||||||
count = "${var.number_of_k8s_masters_no_etcd}"
|
count = "${var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_etcd : 0}"
|
||||||
instance_id = "${element(openstack_compute_instance_v2.k8s_master_no_etcd.*.id, count.index)}"
|
instance_id = "${element(openstack_compute_instance_v2.k8s_master_no_etcd.*.id, count.index)}"
|
||||||
floating_ip = "${var.k8s_master_no_etcd_fips[count.index]}"
|
floating_ip = "${var.k8s_master_no_etcd_fips[count.index]}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_floatingip_associate_v2" "k8s_node" {
|
resource "openstack_compute_floatingip_associate_v2" "k8s_node" {
|
||||||
count = "${var.number_of_k8s_nodes}"
|
count = "${var.node_root_volume_size_in_gb == 0 ? var.number_of_k8s_nodes : 0}"
|
||||||
floating_ip = "${var.k8s_node_fips[count.index]}"
|
floating_ip = "${var.k8s_node_fips[count.index]}"
|
||||||
instance_id = "${element(openstack_compute_instance_v2.k8s_node.*.id, count.index)}"
|
instance_id = "${element(openstack_compute_instance_v2.k8s_node[*].id, count.index)}"
|
||||||
|
wait_until_associated = "${var.wait_for_floatingip}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_floatingip_associate_v2" "k8s_nodes" {
|
||||||
|
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip } : {}
|
||||||
|
floating_ip = "${var.k8s_nodes_fips[each.key].address}"
|
||||||
|
instance_id = "${openstack_compute_instance_v2.k8s_nodes[each.key].id}"
|
||||||
wait_until_associated = "${var.wait_for_floatingip}"
|
wait_until_associated = "${var.wait_for_floatingip}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
||||||
name = "${var.cluster_name}-glusterfs_volume-${count.index+1}"
|
name = "${var.cluster_name}-glusterfs_volume-${count.index + 1}"
|
||||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
count = "${var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0}"
|
||||||
description = "Non-ephemeral volume for GlusterFS"
|
description = "Non-ephemeral volume for GlusterFS"
|
||||||
size = "${var.gfs_volume_size_in_gb}"
|
size = "${var.gfs_volume_size_in_gb}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
|
||||||
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
|
|
||||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
|
||||||
availability_zone = "${element(var.az_list, count.index)}"
|
|
||||||
image_name = "${var.image_gfs}"
|
|
||||||
flavor_id = "${var.flavor_gfs_node}"
|
|
||||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
|
||||||
|
|
||||||
network {
|
|
||||||
name = "${var.network_name}"
|
|
||||||
}
|
|
||||||
|
|
||||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
|
||||||
|
|
||||||
metadata = {
|
|
||||||
ssh_user = "${var.ssh_user_gfs}"
|
|
||||||
kubespray_groups = "gfs-cluster,network-storage,no-floating"
|
|
||||||
depends_on = "${var.network_id}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "openstack_compute_volume_attach_v2" "glusterfs_volume" {
|
resource "openstack_compute_volume_attach_v2" "glusterfs_volume" {
|
||||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
count = "${var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0}"
|
||||||
instance_id = "${element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip.*.id, count.index)}"
|
instance_id = "${element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip.*.id, count.index)}"
|
||||||
volume_id = "${element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)}"
|
volume_id = "${element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)}"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,11 @@
|
|||||||
variable "cluster_name" {}
|
variable "cluster_name" {}
|
||||||
|
|
||||||
variable "az_list" {
|
variable "az_list" {
|
||||||
type = "list"
|
type = list(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "az_list_node" {
|
||||||
|
type = list(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "number_of_k8s_masters" {}
|
variable "number_of_k8s_masters" {}
|
||||||
@@ -22,6 +26,16 @@ variable "number_of_bastions" {}
|
|||||||
|
|
||||||
variable "number_of_gfs_nodes_no_floating_ip" {}
|
variable "number_of_gfs_nodes_no_floating_ip" {}
|
||||||
|
|
||||||
|
variable "bastion_root_volume_size_in_gb" {}
|
||||||
|
|
||||||
|
variable "etcd_root_volume_size_in_gb" {}
|
||||||
|
|
||||||
|
variable "master_root_volume_size_in_gb" {}
|
||||||
|
|
||||||
|
variable "node_root_volume_size_in_gb" {}
|
||||||
|
|
||||||
|
variable "gfs_root_volume_size_in_gb" {}
|
||||||
|
|
||||||
variable "gfs_volume_size_in_gb" {}
|
variable "gfs_volume_size_in_gb" {}
|
||||||
|
|
||||||
variable "public_key_path" {}
|
variable "public_key_path" {}
|
||||||
@@ -62,6 +76,10 @@ variable "k8s_node_fips" {
|
|||||||
type = "list"
|
type = "list"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "k8s_nodes_fips" {
|
||||||
|
type = "map"
|
||||||
|
}
|
||||||
|
|
||||||
variable "bastion_fips" {
|
variable "bastion_fips" {
|
||||||
type = "list"
|
type = "list"
|
||||||
}
|
}
|
||||||
@@ -82,6 +100,8 @@ variable "k8s_allowed_egress_ips" {
|
|||||||
type = "list"
|
type = "list"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "k8s_nodes" {}
|
||||||
|
|
||||||
variable "wait_for_floatingip" {}
|
variable "wait_for_floatingip" {}
|
||||||
|
|
||||||
variable "supplementary_master_groups" {
|
variable "supplementary_master_groups" {
|
||||||
@@ -95,3 +115,9 @@ variable "supplementary_node_groups" {
|
|||||||
variable "worker_allowed_ports" {
|
variable "worker_allowed_ports" {
|
||||||
type = "list"
|
type = "list"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "use_access_ip" {}
|
||||||
|
|
||||||
|
variable "use_server_groups" {
|
||||||
|
type = bool
|
||||||
|
}
|
||||||
|
|||||||
@@ -27,3 +27,10 @@ resource "openstack_networking_floatingip_v2" "bastion" {
|
|||||||
pool = "${var.floatingip_pool}"
|
pool = "${var.floatingip_pool}"
|
||||||
depends_on = ["null_resource.dummy_dependency"]
|
depends_on = ["null_resource.dummy_dependency"]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_floatingip_v2" "k8s_nodes" {
|
||||||
|
for_each = var.number_of_k8s_nodes == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip } : {}
|
||||||
|
pool = "${var.floatingip_pool}"
|
||||||
|
depends_on = ["null_resource.dummy_dependency"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -10,6 +10,10 @@ output "k8s_node_fips" {
|
|||||||
value = "${openstack_networking_floatingip_v2.k8s_node[*].address}"
|
value = "${openstack_networking_floatingip_v2.k8s_node[*].address}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
output "k8s_nodes_fips" {
|
||||||
|
value = "${openstack_networking_floatingip_v2.k8s_nodes}"
|
||||||
|
}
|
||||||
|
|
||||||
output "bastion_fips" {
|
output "bastion_fips" {
|
||||||
value = "${openstack_networking_floatingip_v2.bastion[*].address}"
|
value = "${openstack_networking_floatingip_v2.bastion[*].address}"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,3 +15,5 @@ variable "network_name" {}
|
|||||||
variable "router_id" {
|
variable "router_id" {
|
||||||
default = ""
|
default = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "k8s_nodes" {}
|
||||||
|
|||||||
@@ -1,13 +1,19 @@
|
|||||||
resource "openstack_networking_router_v2" "k8s" {
|
resource "openstack_networking_router_v2" "k8s" {
|
||||||
name = "${var.cluster_name}-router"
|
name = "${var.cluster_name}-router"
|
||||||
count = "${var.use_neutron}"
|
count = "${var.use_neutron}" == 1 && "${var.router_id}" == null ? 1 : 0
|
||||||
admin_state_up = "true"
|
admin_state_up = "true"
|
||||||
external_network_id = "${var.external_net}"
|
external_network_id = "${var.external_net}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
data "openstack_networking_router_v2" "k8s" {
|
||||||
|
router_id = "${var.router_id}"
|
||||||
|
count = "${var.use_neutron}" == 1 && "${var.router_id}" != null ? 1 : 0
|
||||||
|
}
|
||||||
|
|
||||||
resource "openstack_networking_network_v2" "k8s" {
|
resource "openstack_networking_network_v2" "k8s" {
|
||||||
name = "${var.network_name}"
|
name = "${var.network_name}"
|
||||||
count = "${var.use_neutron}"
|
count = "${var.use_neutron}"
|
||||||
|
dns_domain = var.network_dns_domain != null ? "${var.network_dns_domain}" : null
|
||||||
admin_state_up = "true"
|
admin_state_up = "true"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -22,6 +28,6 @@ resource "openstack_networking_subnet_v2" "k8s" {
|
|||||||
|
|
||||||
resource "openstack_networking_router_interface_v2" "k8s" {
|
resource "openstack_networking_router_interface_v2" "k8s" {
|
||||||
count = "${var.use_neutron}"
|
count = "${var.use_neutron}"
|
||||||
router_id = "${openstack_networking_router_v2.k8s[count.index].id}"
|
router_id = "%{if openstack_networking_router_v2.k8s != []}${openstack_networking_router_v2.k8s[count.index].id}%{else}${var.router_id}%{endif}"
|
||||||
subnet_id = "${openstack_networking_subnet_v2.k8s[count.index].id}"
|
subnet_id = "${openstack_networking_subnet_v2.k8s[count.index].id}"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
output "router_id" {
|
output "router_id" {
|
||||||
value = "${element(concat(openstack_networking_router_v2.k8s.*.id, list("")), 0)}"
|
value = "%{if var.use_neutron == 1} ${var.router_id == null ? element(concat(openstack_networking_router_v2.k8s.*.id, [""]), 0) : var.router_id} %{else} %{endif}"
|
||||||
}
|
}
|
||||||
|
|
||||||
output "router_internal_port_id" {
|
output "router_internal_port_id" {
|
||||||
value = "${element(concat(openstack_networking_router_interface_v2.k8s.*.id, list("")), 0)}"
|
value = "${element(concat(openstack_networking_router_interface_v2.k8s.*.id, [""]), 0)}"
|
||||||
}
|
}
|
||||||
|
|
||||||
output "subnet_id" {
|
output "subnet_id" {
|
||||||
value = "${element(concat(openstack_networking_subnet_v2.k8s.*.id, list("")), 0)}"
|
value = "${element(concat(openstack_networking_subnet_v2.k8s.*.id, [""]), 0)}"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,8 @@ variable "external_net" {}
|
|||||||
|
|
||||||
variable "network_name" {}
|
variable "network_name" {}
|
||||||
|
|
||||||
|
variable "network_dns_domain" {}
|
||||||
|
|
||||||
variable "cluster_name" {}
|
variable "cluster_name" {}
|
||||||
|
|
||||||
variable "dns_nameservers" {
|
variable "dns_nameservers" {
|
||||||
@@ -11,3 +13,5 @@ variable "dns_nameservers" {
|
|||||||
variable "subnet_cidr" {}
|
variable "subnet_cidr" {}
|
||||||
|
|
||||||
variable "use_neutron" {}
|
variable "use_neutron" {}
|
||||||
|
|
||||||
|
variable "router_id" {}
|
||||||
|
|||||||
@@ -1,6 +1,9 @@
|
|||||||
# your Kubernetes cluster name here
|
# your Kubernetes cluster name here
|
||||||
cluster_name = "i-didnt-read-the-docs"
|
cluster_name = "i-didnt-read-the-docs"
|
||||||
|
|
||||||
|
# list of availability zones available in your OpenStack cluster
|
||||||
|
#az_list = ["nova"]
|
||||||
|
|
||||||
# SSH key to use for access to nodes
|
# SSH key to use for access to nodes
|
||||||
public_key_path = "~/.ssh/id_rsa.pub"
|
public_key_path = "~/.ssh/id_rsa.pub"
|
||||||
|
|
||||||
|
|||||||
@@ -3,8 +3,14 @@ variable "cluster_name" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
variable "az_list" {
|
variable "az_list" {
|
||||||
description = "List of Availability Zones available in your OpenStack cluster"
|
description = "List of Availability Zones to use for masters in your OpenStack cluster"
|
||||||
type = "list"
|
type = list(string)
|
||||||
|
default = ["nova"]
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "az_list_node" {
|
||||||
|
description = "List of Availability Zones to use for nodes in your OpenStack cluster"
|
||||||
|
type = list(string)
|
||||||
default = ["nova"]
|
default = ["nova"]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -44,6 +50,26 @@ variable "number_of_gfs_nodes_no_floating_ip" {
|
|||||||
default = 0
|
default = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "bastion_root_volume_size_in_gb" {
|
||||||
|
default = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "etcd_root_volume_size_in_gb" {
|
||||||
|
default = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "master_root_volume_size_in_gb" {
|
||||||
|
default = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "node_root_volume_size_in_gb" {
|
||||||
|
default = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "gfs_root_volume_size_in_gb" {
|
||||||
|
default = 0
|
||||||
|
}
|
||||||
|
|
||||||
variable "gfs_volume_size_in_gb" {
|
variable "gfs_volume_size_in_gb" {
|
||||||
default = 75
|
default = 75
|
||||||
}
|
}
|
||||||
@@ -55,12 +81,12 @@ variable "public_key_path" {
|
|||||||
|
|
||||||
variable "image" {
|
variable "image" {
|
||||||
description = "the image to use"
|
description = "the image to use"
|
||||||
default = "ubuntu-14.04"
|
default = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "image_gfs" {
|
variable "image_gfs" {
|
||||||
description = "Glance image to use for GlusterFS"
|
description = "Glance image to use for GlusterFS"
|
||||||
default = "ubuntu-16.04"
|
default = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "ssh_user" {
|
variable "ssh_user" {
|
||||||
@@ -103,6 +129,12 @@ variable "network_name" {
|
|||||||
default = "internal"
|
default = "internal"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "network_dns_domain" {
|
||||||
|
description = "dns_domain for the internal network"
|
||||||
|
type = "string"
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
variable "use_neutron" {
|
variable "use_neutron" {
|
||||||
description = "Use neutron"
|
description = "Use neutron"
|
||||||
default = 1
|
default = 1
|
||||||
@@ -180,3 +212,21 @@ variable "worker_allowed_ports" {
|
|||||||
},
|
},
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "use_access_ip" {
|
||||||
|
default = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "use_server_groups" {
|
||||||
|
default = false
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "router_id" {
|
||||||
|
description = "uuid of an externally defined router to use"
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "k8s_nodes" {
|
||||||
|
default = {}
|
||||||
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ now six total etcd replicas.
|
|||||||
|
|
||||||
## SSH Key Setup
|
## SSH Key Setup
|
||||||
|
|
||||||
An SSH keypair is required so Ansible can access the newly provisioned nodes (bare metal Packet hosts). By default, the public SSH key defined in cluster.tf will be installed in authorized_key on the newly provisioned nodes (~/.ssh/id_rsa.pub). Terraform will upload this public key and then it will be distributed out to all the nodes. If you have already set this public key in Packet (i.e. via the portal), then set the public keyfile name in cluster.tf to blank to prevent the duplicate key from being uploaded which will cause an error.
|
An SSH keypair is required so Ansible can access the newly provisioned nodes (bare metal Packet hosts). By default, the public SSH key defined in cluster.tfvars will be installed in authorized_key on the newly provisioned nodes (~/.ssh/id_rsa.pub). Terraform will upload this public key and then it will be distributed out to all the nodes. If you have already set this public key in Packet (i.e. via the portal), then set the public keyfile name in cluster.tfvars to blank to prevent the duplicate key from being uploaded which will cause an error.
|
||||||
|
|
||||||
If you don't already have a keypair generated (~/.ssh/id_rsa and ~/.ssh/id_rsa.pub), then a new keypair can be generated with the command:
|
If you don't already have a keypair generated (~/.ssh/id_rsa and ~/.ssh/id_rsa.pub), then a new keypair can be generated with the command:
|
||||||
|
|
||||||
@@ -72,7 +72,7 @@ If someone gets this key, they can startup/shutdown hosts in your project!
|
|||||||
For more information on how to generate an API key or find your project ID, please see:
|
For more information on how to generate an API key or find your project ID, please see:
|
||||||
https://support.packet.com/kb/articles/api-integrations
|
https://support.packet.com/kb/articles/api-integrations
|
||||||
|
|
||||||
The Packet Project ID associated with the key will be set later in cluster.tf.
|
The Packet Project ID associated with the key will be set later in cluster.tfvars.
|
||||||
|
|
||||||
For more information about the API, please see:
|
For more information about the API, please see:
|
||||||
https://www.packet.com/developers/api/
|
https://www.packet.com/developers/api/
|
||||||
@@ -88,7 +88,7 @@ Note that to deploy several clusters within the same project you need to use [te
|
|||||||
The construction of the cluster is driven by values found in
|
The construction of the cluster is driven by values found in
|
||||||
[variables.tf](variables.tf).
|
[variables.tf](variables.tf).
|
||||||
|
|
||||||
For your cluster, edit `inventory/$CLUSTER/cluster.tf`.
|
For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||||
|
|
||||||
The `cluster_name` is used to set a tag on each server deployed as part of this cluster.
|
The `cluster_name` is used to set a tag on each server deployed as part of this cluster.
|
||||||
This helps when identifying which hosts are associated with each cluster.
|
This helps when identifying which hosts are associated with each cluster.
|
||||||
@@ -138,7 +138,7 @@ This should finish fairly quickly telling you Terraform has successfully initial
|
|||||||
You can apply the Terraform configuration to your cluster with the following command
|
You can apply the Terraform configuration to your cluster with the following command
|
||||||
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
|
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
|
||||||
```ShellSession
|
```ShellSession
|
||||||
$ terraform apply -var-file=cluster.tf ../../contrib/terraform/packet
|
$ terraform apply -var-file=cluster.tfvars ../../contrib/terraform/packet
|
||||||
$ export ANSIBLE_HOST_KEY_CHECKING=False
|
$ export ANSIBLE_HOST_KEY_CHECKING=False
|
||||||
$ ansible-playbook -i hosts ../../cluster.yml
|
$ ansible-playbook -i hosts ../../cluster.yml
|
||||||
```
|
```
|
||||||
@@ -147,7 +147,7 @@ $ ansible-playbook -i hosts ../../cluster.yml
|
|||||||
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
|
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
$ terraform destroy -var-file=cluster.tf ../../contrib/terraform/packet
|
$ terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/packet
|
||||||
```
|
```
|
||||||
|
|
||||||
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
|
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
|
||||||
|
|||||||
@@ -4,59 +4,60 @@ provider "packet" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "packet_ssh_key" "k8s" {
|
resource "packet_ssh_key" "k8s" {
|
||||||
count = "${var.public_key_path != "" ? 1 : 0}"
|
count = var.public_key_path != "" ? 1 : 0
|
||||||
name = "kubernetes-${var.cluster_name}"
|
name = "kubernetes-${var.cluster_name}"
|
||||||
public_key = "${chomp(file(var.public_key_path))}"
|
public_key = chomp(file(var.public_key_path))
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "packet_device" "k8s_master" {
|
resource "packet_device" "k8s_master" {
|
||||||
depends_on = ["packet_ssh_key.k8s"]
|
depends_on = [packet_ssh_key.k8s]
|
||||||
|
|
||||||
count = "${var.number_of_k8s_masters}"
|
count = var.number_of_k8s_masters
|
||||||
hostname = "${var.cluster_name}-k8s-master-${count.index+1}"
|
hostname = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
||||||
plan = "${var.plan_k8s_masters}"
|
plan = var.plan_k8s_masters
|
||||||
facilities = ["${var.facility}"]
|
facilities = [var.facility]
|
||||||
operating_system = "${var.operating_system}"
|
operating_system = var.operating_system
|
||||||
billing_cycle = "${var.billing_cycle}"
|
billing_cycle = var.billing_cycle
|
||||||
project_id = "${var.packet_project_id}"
|
project_id = var.packet_project_id
|
||||||
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-master", "etcd", "kube-node"]
|
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-master", "etcd", "kube-node"]
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "packet_device" "k8s_master_no_etcd" {
|
resource "packet_device" "k8s_master_no_etcd" {
|
||||||
depends_on = ["packet_ssh_key.k8s"]
|
depends_on = [packet_ssh_key.k8s]
|
||||||
|
|
||||||
count = "${var.number_of_k8s_masters_no_etcd}"
|
count = var.number_of_k8s_masters_no_etcd
|
||||||
hostname = "${var.cluster_name}-k8s-master-${count.index+1}"
|
hostname = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
||||||
plan = "${var.plan_k8s_masters_no_etcd}"
|
plan = var.plan_k8s_masters_no_etcd
|
||||||
facilities = ["${var.facility}"]
|
facilities = [var.facility]
|
||||||
operating_system = "${var.operating_system}"
|
operating_system = var.operating_system
|
||||||
billing_cycle = "${var.billing_cycle}"
|
billing_cycle = var.billing_cycle
|
||||||
project_id = "${var.packet_project_id}"
|
project_id = var.packet_project_id
|
||||||
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-master"]
|
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-master"]
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "packet_device" "k8s_etcd" {
|
resource "packet_device" "k8s_etcd" {
|
||||||
depends_on = ["packet_ssh_key.k8s"]
|
depends_on = [packet_ssh_key.k8s]
|
||||||
|
|
||||||
count = "${var.number_of_etcd}"
|
count = var.number_of_etcd
|
||||||
hostname = "${var.cluster_name}-etcd-${count.index+1}"
|
hostname = "${var.cluster_name}-etcd-${count.index + 1}"
|
||||||
plan = "${var.plan_etcd}"
|
plan = var.plan_etcd
|
||||||
facilities = ["${var.facility}"]
|
facilities = [var.facility]
|
||||||
operating_system = "${var.operating_system}"
|
operating_system = var.operating_system
|
||||||
billing_cycle = "${var.billing_cycle}"
|
billing_cycle = var.billing_cycle
|
||||||
project_id = "${var.packet_project_id}"
|
project_id = var.packet_project_id
|
||||||
tags = ["cluster-${var.cluster_name}", "etcd"]
|
tags = ["cluster-${var.cluster_name}", "etcd"]
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "packet_device" "k8s_node" {
|
resource "packet_device" "k8s_node" {
|
||||||
depends_on = ["packet_ssh_key.k8s"]
|
depends_on = [packet_ssh_key.k8s]
|
||||||
|
|
||||||
count = "${var.number_of_k8s_nodes}"
|
count = var.number_of_k8s_nodes
|
||||||
hostname = "${var.cluster_name}-k8s-node-${count.index+1}"
|
hostname = "${var.cluster_name}-k8s-node-${count.index + 1}"
|
||||||
plan = "${var.plan_k8s_nodes}"
|
plan = var.plan_k8s_nodes
|
||||||
facilities = ["${var.facility}"]
|
facilities = [var.facility]
|
||||||
operating_system = "${var.operating_system}"
|
operating_system = var.operating_system
|
||||||
billing_cycle = "${var.billing_cycle}"
|
billing_cycle = var.billing_cycle
|
||||||
project_id = "${var.packet_project_id}"
|
project_id = var.packet_project_id
|
||||||
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-node"]
|
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-node"]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,15 +1,16 @@
|
|||||||
output "k8s_masters" {
|
output "k8s_masters" {
|
||||||
value = "${packet_device.k8s_master.*.access_public_ipv4}"
|
value = packet_device.k8s_master.*.access_public_ipv4
|
||||||
}
|
}
|
||||||
|
|
||||||
output "k8s_masters_no_etc" {
|
output "k8s_masters_no_etc" {
|
||||||
value = "${packet_device.k8s_master_no_etcd.*.access_public_ipv4}"
|
value = packet_device.k8s_master_no_etcd.*.access_public_ipv4
|
||||||
}
|
}
|
||||||
|
|
||||||
output "k8s_etcds" {
|
output "k8s_etcds" {
|
||||||
value = "${packet_device.k8s_etcd.*.access_public_ipv4}"
|
value = packet_device.k8s_etcd.*.access_public_ipv4
|
||||||
}
|
}
|
||||||
|
|
||||||
output "k8s_nodes" {
|
output "k8s_nodes" {
|
||||||
value = "${packet_device.k8s_node.*.access_public_ipv4}"
|
value = packet_device.k8s_node.*.access_public_ipv4
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -54,3 +54,4 @@ variable "number_of_etcd" {
|
|||||||
variable "number_of_k8s_nodes" {
|
variable "number_of_k8s_nodes" {
|
||||||
default = 0
|
default = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
4
contrib/terraform/packet/versions.tf
Normal file
4
contrib/terraform/packet/versions.tf
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
|
||||||
|
terraform {
|
||||||
|
required_version = ">= 0.12"
|
||||||
|
}
|
||||||
@@ -73,7 +73,7 @@ def iterresources(filenames):
|
|||||||
# In version 4 the structure changes so we need to iterate
|
# In version 4 the structure changes so we need to iterate
|
||||||
# each instance inside the resource branch.
|
# each instance inside the resource branch.
|
||||||
for resource in state['resources']:
|
for resource in state['resources']:
|
||||||
name = resource['module'].split('.')[-1]
|
name = resource['provider'].split('.')[-1]
|
||||||
for instance in resource['instances']:
|
for instance in resource['instances']:
|
||||||
key = "{}.{}".format(resource['type'], resource['name'])
|
key = "{}.{}".format(resource['type'], resource['name'])
|
||||||
if 'index_key' in instance:
|
if 'index_key' in instance:
|
||||||
@@ -182,6 +182,9 @@ def parse_list(source, prefix, sep='.'):
|
|||||||
|
|
||||||
|
|
||||||
def parse_bool(string_form):
|
def parse_bool(string_form):
|
||||||
|
if type(string_form) is bool:
|
||||||
|
return string_form
|
||||||
|
|
||||||
token = string_form.lower()[0]
|
token = string_form.lower()[0]
|
||||||
|
|
||||||
if token == 't':
|
if token == 't':
|
||||||
@@ -210,7 +213,7 @@ def packet_device(resource, tfvars=None):
|
|||||||
'state': raw_attrs['state'],
|
'state': raw_attrs['state'],
|
||||||
# ansible
|
# ansible
|
||||||
'ansible_ssh_host': raw_attrs['network.0.address'],
|
'ansible_ssh_host': raw_attrs['network.0.address'],
|
||||||
'ansible_ssh_user': 'root', # it's always "root" on Packet
|
'ansible_ssh_user': 'root', # Use root by default in packet
|
||||||
# generic
|
# generic
|
||||||
'ipv4_address': raw_attrs['network.0.address'],
|
'ipv4_address': raw_attrs['network.0.address'],
|
||||||
'public_ipv4': raw_attrs['network.0.address'],
|
'public_ipv4': raw_attrs['network.0.address'],
|
||||||
@@ -220,6 +223,10 @@ def packet_device(resource, tfvars=None):
|
|||||||
'provider': 'packet',
|
'provider': 'packet',
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if raw_attrs['operating_system'] == 'coreos_stable':
|
||||||
|
# For CoreOS set the ssh_user to core
|
||||||
|
attrs.update({'ansible_ssh_user': 'core'})
|
||||||
|
|
||||||
# add groups based on attrs
|
# add groups based on attrs
|
||||||
groups.append('packet_operating_system=' + attrs['operating_system'])
|
groups.append('packet_operating_system=' + attrs['operating_system'])
|
||||||
groups.append('packet_locked=%s' % attrs['locked'])
|
groups.append('packet_locked=%s' % attrs['locked'])
|
||||||
@@ -339,14 +346,20 @@ def iter_host_ips(hosts, ips):
|
|||||||
'''Update hosts that have an entry in the floating IP list'''
|
'''Update hosts that have an entry in the floating IP list'''
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
host_id = host[1]['id']
|
host_id = host[1]['id']
|
||||||
|
|
||||||
if host_id in ips:
|
if host_id in ips:
|
||||||
ip = ips[host_id]
|
ip = ips[host_id]
|
||||||
|
|
||||||
host[1].update({
|
host[1].update({
|
||||||
'access_ip_v4': ip,
|
'access_ip_v4': ip,
|
||||||
'access_ip': ip,
|
'access_ip': ip,
|
||||||
'public_ipv4': ip,
|
'public_ipv4': ip,
|
||||||
'ansible_ssh_host': ip,
|
'ansible_ssh_host': ip,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
if 'use_access_ip' in host[1]['metadata'] and host[1]['metadata']['use_access_ip'] == "0":
|
||||||
|
host[1].pop('access_ip')
|
||||||
|
|
||||||
yield host
|
yield host
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -13,7 +13,7 @@
|
|||||||
/usr/local/share/ca-certificates/vault-ca.crt
|
/usr/local/share/ca-certificates/vault-ca.crt
|
||||||
{%- elif ansible_os_family == "RedHat" -%}
|
{%- elif ansible_os_family == "RedHat" -%}
|
||||||
/etc/pki/ca-trust/source/anchors/vault-ca.crt
|
/etc/pki/ca-trust/source/anchors/vault-ca.crt
|
||||||
{%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%}
|
{%- elif ansible_os_family in ["Coreos", "Container Linux by CoreOS", "Flatcar", "Flatcar Container Linux by Kinvolk"] -%}
|
||||||
/etc/ssl/certs/vault-ca.pem
|
/etc/ssl/certs/vault-ca.pem
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|
||||||
@@ -25,7 +25,7 @@
|
|||||||
|
|
||||||
- name: bootstrap/ca_trust | update ca-certificates (Debian/Ubuntu/CoreOS)
|
- name: bootstrap/ca_trust | update ca-certificates (Debian/Ubuntu/CoreOS)
|
||||||
command: update-ca-certificates
|
command: update-ca-certificates
|
||||||
when: vault_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"]
|
when: vault_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Coreos", "Container Linux by CoreOS", "Flatcar", "Flatcar Container Linux by Kinvolk"]
|
||||||
|
|
||||||
- name: bootstrap/ca_trust | update ca-certificates (RedHat)
|
- name: bootstrap/ca_trust | update ca-certificates (RedHat)
|
||||||
command: update-ca-trust extract
|
command: update-ca-trust extract
|
||||||
|
|||||||
@@ -21,7 +21,7 @@
|
|||||||
- name: bootstrap/sync_secrets | Print out warning message if secrets are not available and vault is initialized
|
- name: bootstrap/sync_secrets | Print out warning message if secrets are not available and vault is initialized
|
||||||
pause:
|
pause:
|
||||||
prompt: >
|
prompt: >
|
||||||
Vault orchestration may not be able to proceed. The Vault cluster is initialzed, but
|
Vault orchestration may not be able to proceed. The Vault cluster is initialized, but
|
||||||
'root_token' or 'unseal_keys' were not found in {{ vault_secrets_dir }}. These are
|
'root_token' or 'unseal_keys' were not found in {{ vault_secrets_dir }}. These are
|
||||||
needed for many vault orchestration steps.
|
needed for many vault orchestration steps.
|
||||||
when: vault_cluster_is_initialized and not vault_secrets_available
|
when: vault_cluster_is_initialized and not vault_secrets_available
|
||||||
|
|||||||
@@ -36,6 +36,7 @@
|
|||||||
{{ etcd_access_addresses.split(',') | first }}/v3alpha/kv/range
|
{{ etcd_access_addresses.split(',') | first }}/v3alpha/kv/range
|
||||||
register: vault_etcd_exists
|
register: vault_etcd_exists
|
||||||
retries: 4
|
retries: 4
|
||||||
|
until: vault_etcd_exists.status == 200
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
when: not vault_is_running and vault_etcd_available
|
when: not vault_is_running and vault_etcd_available
|
||||||
|
|||||||
@@ -3,11 +3,11 @@
|
|||||||
* [Getting started](/docs/getting-started.md)
|
* [Getting started](/docs/getting-started.md)
|
||||||
* [Ansible](docs/ansible.md)
|
* [Ansible](docs/ansible.md)
|
||||||
* [Variables](/docs/vars.md)
|
* [Variables](/docs/vars.md)
|
||||||
* [Ansible](/docs/ansible.md)
|
|
||||||
* Operations
|
* Operations
|
||||||
* [Integration](docs/integration.md)
|
* [Integration](docs/integration.md)
|
||||||
* [Upgrades](/docs/upgrades.md)
|
* [Upgrades](/docs/upgrades.md)
|
||||||
* [HA Mode](docs/ha-mode.md)
|
* [HA Mode](docs/ha-mode.md)
|
||||||
|
* [Adding/replacing a node](docs/nodes.md)
|
||||||
* [Large deployments](docs/large-deployments.md)
|
* [Large deployments](docs/large-deployments.md)
|
||||||
* CNI
|
* CNI
|
||||||
* [Calico](docs/calico.md)
|
* [Calico](docs/calico.md)
|
||||||
@@ -23,9 +23,9 @@
|
|||||||
* [Packet](/docs/packet.md)
|
* [Packet](/docs/packet.md)
|
||||||
* [vSphere](/docs/vsphere.md)
|
* [vSphere](/docs/vsphere.md)
|
||||||
* Operating Systems
|
* Operating Systems
|
||||||
* [Atomic](docs/atomic.md)
|
|
||||||
* [Debian](docs/debian.md)
|
* [Debian](docs/debian.md)
|
||||||
* [Coreos](docs/coreos.md)
|
* [Coreos](docs/coreos.md)
|
||||||
|
* [Fedora CoreOS](docs/fcos.md)
|
||||||
* [OpenSUSE](docs/opensuse.md)
|
* [OpenSUSE](docs/opensuse.md)
|
||||||
* Advanced
|
* Advanced
|
||||||
* [Proxy](/docs/proxy.md)
|
* [Proxy](/docs/proxy.md)
|
||||||
@@ -37,4 +37,5 @@
|
|||||||
* Developers
|
* Developers
|
||||||
* [Test cases](docs/test_cases.md)
|
* [Test cases](docs/test_cases.md)
|
||||||
* [Vagrant](docs/vagrant.md)
|
* [Vagrant](docs/vagrant.md)
|
||||||
|
* [CI Matrix](docs/ci.md)
|
||||||
* [Roadmap](docs/roadmap.md)
|
* [Roadmap](docs/roadmap.md)
|
||||||
|
|||||||
@@ -1,9 +1,7 @@
|
|||||||
Ansible variables
|
# Ansible variables
|
||||||
===============
|
|
||||||
|
|
||||||
|
## Inventory
|
||||||
|
|
||||||
Inventory
|
|
||||||
-------------
|
|
||||||
The inventory is composed of 3 groups:
|
The inventory is composed of 3 groups:
|
||||||
|
|
||||||
* **kube-node** : list of kubernetes nodes where the pods will run.
|
* **kube-node** : list of kubernetes nodes where the pods will run.
|
||||||
@@ -14,7 +12,7 @@ Note: do not modify the children of _k8s-cluster_, like putting
|
|||||||
the _etcd_ group into the _k8s-cluster_, unless you are certain
|
the _etcd_ group into the _k8s-cluster_, unless you are certain
|
||||||
to do that and you have it fully contained in the latter:
|
to do that and you have it fully contained in the latter:
|
||||||
|
|
||||||
```
|
```ShellSession
|
||||||
k8s-cluster ⊂ etcd => kube-node ∩ etcd = etcd
|
k8s-cluster ⊂ etcd => kube-node ∩ etcd = etcd
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -32,7 +30,7 @@ There are also two special groups:
|
|||||||
|
|
||||||
Below is a complete inventory example:
|
Below is a complete inventory example:
|
||||||
|
|
||||||
```
|
```ini
|
||||||
## Configure 'ip' variable to bind kubernetes services on a
|
## Configure 'ip' variable to bind kubernetes services on a
|
||||||
## different ip than the default iface
|
## different ip than the default iface
|
||||||
node1 ansible_host=95.54.0.12 ip=10.3.0.1
|
node1 ansible_host=95.54.0.12 ip=10.3.0.1
|
||||||
@@ -63,17 +61,16 @@ kube-node
|
|||||||
kube-master
|
kube-master
|
||||||
```
|
```
|
||||||
|
|
||||||
Group vars and overriding variables precedence
|
## Group vars and overriding variables precedence
|
||||||
----------------------------------------------
|
|
||||||
|
|
||||||
The group variables to control main deployment options are located in the directory ``inventory/sample/group_vars``.
|
The group variables to control main deployment options are located in the directory ``inventory/sample/group_vars``.
|
||||||
Optional variables are located in the `inventory/sample/group_vars/all.yml`.
|
Optional variables are located in the `inventory/sample/group_vars/all.yml`.
|
||||||
Mandatory variables that are common for at least one role (or a node group) can be found in the
|
Mandatory variables that are common for at least one role (or a node group) can be found in the
|
||||||
`inventory/sample/group_vars/k8s-cluster.yml`.
|
`inventory/sample/group_vars/k8s-cluster.yml`.
|
||||||
There are also role vars for docker, kubernetes preinstall and master roles.
|
There are also role vars for docker, kubernetes preinstall and master roles.
|
||||||
According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
|
According to the [ansible docs](https://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
|
||||||
those cannot be overridden from the group vars. In order to override, one should use
|
those cannot be overridden from the group vars. In order to override, one should use
|
||||||
the `-e ` runtime flags (most simple way) or other layers described in the docs.
|
the `-e` runtime flags (most simple way) or other layers described in the docs.
|
||||||
|
|
||||||
Kubespray uses only a few layers to override things (or expect them to
|
Kubespray uses only a few layers to override things (or expect them to
|
||||||
be overridden for roles):
|
be overridden for roles):
|
||||||
@@ -97,8 +94,8 @@ block vars (only for tasks in block) | Kubespray overrides for internal roles' l
|
|||||||
task vars (only for the task) | Unused for roles, but only for helper scripts
|
task vars (only for the task) | Unused for roles, but only for helper scripts
|
||||||
**extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml``
|
**extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml``
|
||||||
|
|
||||||
Ansible tags
|
## Ansible tags
|
||||||
------------
|
|
||||||
The following tags are defined in playbooks:
|
The following tags are defined in playbooks:
|
||||||
|
|
||||||
| Tag name | Used for
|
| Tag name | Used for
|
||||||
@@ -140,26 +137,31 @@ The following tags are defined in playbooks:
|
|||||||
| upgrade | Upgrading, f.e. container images/binaries
|
| upgrade | Upgrading, f.e. container images/binaries
|
||||||
| upload | Distributing images/binaries across hosts
|
| upload | Distributing images/binaries across hosts
|
||||||
| weave | Network plugin Weave
|
| weave | Network plugin Weave
|
||||||
|
| ingress_alb | AWS ALB Ingress Controller
|
||||||
|
|
||||||
Note: Use the ``bash scripts/gen_tags.sh`` command to generate a list of all
|
Note: Use the ``bash scripts/gen_tags.sh`` command to generate a list of all
|
||||||
tags found in the codebase. New tags will be listed with the empty "Used for"
|
tags found in the codebase. New tags will be listed with the empty "Used for"
|
||||||
field.
|
field.
|
||||||
|
|
||||||
Example commands
|
## Example commands
|
||||||
----------------
|
|
||||||
Example command to filter and apply only DNS configuration tasks and skip
|
Example command to filter and apply only DNS configuration tasks and skip
|
||||||
everything else related to host OS configuration and downloading images of containers:
|
everything else related to host OS configuration and downloading images of containers:
|
||||||
|
|
||||||
```
|
```ShellSession
|
||||||
ansible-playbook -i inventory/sample/hosts.ini cluster.yml --tags preinstall,facts --skip-tags=download,bootstrap-os
|
ansible-playbook -i inventory/sample/hosts.ini cluster.yml --tags preinstall,facts --skip-tags=download,bootstrap-os
|
||||||
```
|
```
|
||||||
|
|
||||||
And this play only removes the K8s cluster DNS resolver IP from hosts' /etc/resolv.conf files:
|
And this play only removes the K8s cluster DNS resolver IP from hosts' /etc/resolv.conf files:
|
||||||
```
|
|
||||||
|
```ShellSession
|
||||||
ansible-playbook -i inventory/sample/hosts.ini -e dns_mode='none' cluster.yml --tags resolvconf
|
ansible-playbook -i inventory/sample/hosts.ini -e dns_mode='none' cluster.yml --tags resolvconf
|
||||||
```
|
```
|
||||||
|
|
||||||
And this prepares all container images locally (at the ansible runner node) without installing
|
And this prepares all container images locally (at the ansible runner node) without installing
|
||||||
or upgrading related stuff or trying to upload container to K8s cluster nodes:
|
or upgrading related stuff or trying to upload container to K8s cluster nodes:
|
||||||
```
|
|
||||||
|
```ShellSession
|
||||||
ansible-playbook -i inventory/sample/hosts.ini cluster.yml \
|
ansible-playbook -i inventory/sample/hosts.ini cluster.yml \
|
||||||
-e download_run_once=true -e download_localhost=true \
|
-e download_run_once=true -e download_localhost=true \
|
||||||
--tags download --skip-tags upload,upgrade
|
--tags download --skip-tags upload,upgrade
|
||||||
@@ -167,17 +169,21 @@ ansible-playbook -i inventory/sample/hosts.ini cluster.yml \
|
|||||||
|
|
||||||
Note: use `--tags` and `--skip-tags` wise and only if you're 100% sure what you're doing.
|
Note: use `--tags` and `--skip-tags` wise and only if you're 100% sure what you're doing.
|
||||||
|
|
||||||
Bastion host
|
## Bastion host
|
||||||
--------------
|
|
||||||
If you prefer to not make your nodes publicly accessible (nodes with private IPs only),
|
If you prefer to not make your nodes publicly accessible (nodes with private IPs only),
|
||||||
you can use a so called *bastion* host to connect to your nodes. To specify and use a bastion,
|
you can use a so called *bastion* host to connect to your nodes. To specify and use a bastion,
|
||||||
simply add a line to your inventory, where you have to replace x.x.x.x with the public IP of the
|
simply add a line to your inventory, where you have to replace x.x.x.x with the public IP of the
|
||||||
bastion host.
|
bastion host.
|
||||||
|
|
||||||
```
|
```ShellSession
|
||||||
[bastion]
|
[bastion]
|
||||||
bastion ansible_host=x.x.x.x
|
bastion ansible_host=x.x.x.x
|
||||||
```
|
```
|
||||||
|
|
||||||
For more information about Ansible and bastion hosts, read
|
For more information about Ansible and bastion hosts, read
|
||||||
[Running Ansible Through an SSH Bastion Host](http://blog.scottlowe.org/2015/12/24/running-ansible-through-ssh-bastion-host/)
|
[Running Ansible Through an SSH Bastion Host](https://blog.scottlowe.org/2015/12/24/running-ansible-through-ssh-bastion-host/)
|
||||||
|
|
||||||
|
## Mitogen
|
||||||
|
|
||||||
|
You can use [mitogen](mitogen.md) to speed up kubespray.
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
## Architecture compatibility
|
# Architecture compatibility
|
||||||
|
|
||||||
The following table shows the impact of the CPU architecture on compatible features:
|
The following table shows the impact of the CPU architecture on compatible features:
|
||||||
|
|
||||||
- amd64: Cluster using only x86/amd64 CPUs
|
- amd64: Cluster using only x86/amd64 CPUs
|
||||||
- arm64: Cluster using only arm64 CPUs
|
- arm64: Cluster using only arm64 CPUs
|
||||||
- amd64 + arm64: Cluster with a mix of x86/amd64 and arm64 CPUs
|
- amd64 + arm64: Cluster with a mix of x86/amd64 and arm64 CPUs
|
||||||
|
|||||||
@@ -1,23 +0,0 @@
|
|||||||
Atomic host bootstrap
|
|
||||||
=====================
|
|
||||||
|
|
||||||
Atomic host testing has been done with the network plugin flannel. Change the inventory var `kube_network_plugin: flannel`.
|
|
||||||
|
|
||||||
Note: Flannel is the only plugin that has currently been tested with atomic
|
|
||||||
|
|
||||||
### Vagrant
|
|
||||||
|
|
||||||
* For bootstrapping with Vagrant, use box centos/atomic-host or fedora/atomic-host
|
|
||||||
* Update VagrantFile variable `local_release_dir` to `/var/vagrant/temp`.
|
|
||||||
* Update `vm_memory = 2048` and `vm_cpus = 2`
|
|
||||||
* Networking on vagrant hosts has to be brought up manually once they are booted.
|
|
||||||
|
|
||||||
```
|
|
||||||
vagrant ssh
|
|
||||||
sudo /sbin/ifup enp0s8
|
|
||||||
```
|
|
||||||
|
|
||||||
* For users of vagrant-libvirt download centos/atomic-host qcow2 format from https://wiki.centos.org/SpecialInterestGroup/Atomic/Download/
|
|
||||||
* For users of vagrant-libvirt download fedora/atomic-host qcow2 format from https://getfedora.org/en/atomic/download/
|
|
||||||
|
|
||||||
Then you can proceed to [cluster deployment](#run-deployment)
|
|
||||||
87
docs/aws-ebs-csi.md
Normal file
87
docs/aws-ebs-csi.md
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
# AWS EBS CSI Driver
|
||||||
|
|
||||||
|
AWS EBS CSI driver allows you to provision EBS volumes for pods in EC2 instances. The old in-tree AWS cloud provider is deprecated and will be removed in future versions of Kubernetes. So transitioning to the CSI driver is advised.
|
||||||
|
|
||||||
|
To enable AWS EBS CSI driver, uncomment the `aws_ebs_csi_enabled` option in `group_vars/all/aws.yml` and set it to `true`.
|
||||||
|
|
||||||
|
To set the number of replicas for the AWS CSI controller, you can change `aws_ebs_csi_controller_replicas` option in `group_vars/all/aws.yml`.
|
||||||
|
|
||||||
|
Make sure to add a role, for your EC2 instances hosting Kubernetes, that allows it to do the actions necessary to request a volume and attach it: [AWS CSI Policy](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/docs/example-iam-policy.json)
|
||||||
|
|
||||||
|
If you want to deploy the AWS EBS storage class used with the CSI Driver, you should set `persistent_volumes_enabled` in `group_vars/k8s-cluster/k8s-cluster.yml` to `true`.
|
||||||
|
|
||||||
|
You can now run the kubespray playbook (cluster.yml) to deploy Kubernetes over AWS EC2 with EBS CSI Driver enabled.
|
||||||
|
|
||||||
|
## Usage example
|
||||||
|
|
||||||
|
To check if AWS EBS CSI Driver is deployed properly, check that the ebs-csi pods are running:
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
$ kubectl -n kube-system get pods | grep ebs
|
||||||
|
ebs-csi-controller-85d86bccc5-8gtq5 4/4 Running 4 40s
|
||||||
|
ebs-csi-node-n4b99 3/3 Running 3 40s
|
||||||
|
```
|
||||||
|
|
||||||
|
Check the associated storage class (if you enabled persistent_volumes):
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
$ kubectl get storageclass
|
||||||
|
NAME PROVISIONER AGE
|
||||||
|
ebs-sc ebs.csi.aws.com 45s
|
||||||
|
```
|
||||||
|
|
||||||
|
You can run a PVC and an example Pod using this file `ebs-pod.yml`:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
--
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: ebs-claim
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
storageClassName: ebs-sc
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 1Gi
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: app
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: app
|
||||||
|
image: centos
|
||||||
|
command: ["/bin/sh"]
|
||||||
|
args: ["-c", "while true; do echo $(date -u) >> /data/out.txt; sleep 5; done"]
|
||||||
|
volumeMounts:
|
||||||
|
- name: persistent-storage
|
||||||
|
mountPath: /data
|
||||||
|
volumes:
|
||||||
|
- name: persistent-storage
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: ebs-claim
|
||||||
|
```
|
||||||
|
|
||||||
|
Apply this conf to your cluster: ```kubectl apply -f ebs-pod.yml```
|
||||||
|
|
||||||
|
You should see the PVC provisioned and bound:
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
$ kubectl get pvc
|
||||||
|
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
|
||||||
|
ebs-claim Bound pvc-0034cb9e-1ddd-4b3f-bb9e-0b5edbf5194c 1Gi RWO ebs-sc 50s
|
||||||
|
```
|
||||||
|
|
||||||
|
And the volume mounted to the example Pod (wait until the Pod is Running):
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
$ kubectl exec -it app -- df -h | grep data
|
||||||
|
/dev/nvme1n1 1014M 34M 981M 4% /data
|
||||||
|
```
|
||||||
|
|
||||||
|
## More info
|
||||||
|
|
||||||
|
For further information about the AWS EBS CSI Driver, you can refer to this page: [AWS EBS Driver](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/).
|
||||||
17
docs/aws.md
17
docs/aws.md
@@ -1,11 +1,10 @@
|
|||||||
AWS
|
# AWS
|
||||||
===============
|
|
||||||
|
|
||||||
To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`. Refer to the [Kubespray Configuration](#kubespray-configuration) for customizing the provider.
|
To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`. Refer to the [Kubespray Configuration](#kubespray-configuration) for customizing the provider.
|
||||||
|
|
||||||
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
|
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
|
||||||
|
|
||||||
You would also need to tag the resources in your VPC accordingly for the aws provider to utilize them. Tag the subnets, route tables and all instances that kubernetes will be run on with key `kubernetes.io/cluster/$cluster_name` (`$cluster_name` must be a unique identifier for the cluster). Tag the subnets that must be targetted by external ELBs with the key `kubernetes.io/role/elb` and internal ELBs with the key `kubernetes.io/role/internal-elb`.
|
You would also need to tag the resources in your VPC accordingly for the aws provider to utilize them. Tag the subnets, route tables and all instances that kubernetes will be run on with key `kubernetes.io/cluster/$cluster_name` (`$cluster_name` must be a unique identifier for the cluster). Tag the subnets that must be targeted by external ELBs with the key `kubernetes.io/role/elb` and internal ELBs with the key `kubernetes.io/role/internal-elb`.
|
||||||
|
|
||||||
Make sure your VPC has both DNS Hostnames support and Private DNS enabled.
|
Make sure your VPC has both DNS Hostnames support and Private DNS enabled.
|
||||||
|
|
||||||
@@ -13,11 +12,13 @@ The next step is to make sure the hostnames in your `inventory` file are identic
|
|||||||
|
|
||||||
You can now create your cluster!
|
You can now create your cluster!
|
||||||
|
|
||||||
### Dynamic Inventory ###
|
## Dynamic Inventory
|
||||||
|
|
||||||
There is also a dynamic inventory script for AWS that can be used if desired. However, be aware that it makes some certain assumptions about how you'll create your inventory. It also does not handle all use cases and groups that we may use as part of more advanced deployments. Additions welcome.
|
There is also a dynamic inventory script for AWS that can be used if desired. However, be aware that it makes some certain assumptions about how you'll create your inventory. It also does not handle all use cases and groups that we may use as part of more advanced deployments. Additions welcome.
|
||||||
|
|
||||||
This will produce an inventory that is passed into Ansible that looks like the following:
|
This will produce an inventory that is passed into Ansible that looks like the following:
|
||||||
```
|
|
||||||
|
```json
|
||||||
{
|
{
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"hostvars": {
|
"hostvars": {
|
||||||
@@ -48,15 +49,18 @@ This will produce an inventory that is passed into Ansible that looks like the f
|
|||||||
```
|
```
|
||||||
|
|
||||||
Guide:
|
Guide:
|
||||||
|
|
||||||
- Create instances in AWS as needed.
|
- Create instances in AWS as needed.
|
||||||
- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube-master`, `etcd`, or `kube-node`. You can also share roles like `kube-master, etcd`
|
- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube-master`, `etcd`, or `kube-node`. You can also share roles like `kube-master, etcd`
|
||||||
- Copy the `kubespray-aws-inventory.py` script from `kubespray/contrib/aws_inventory` to the `kubespray/inventory` directory.
|
- Copy the `kubespray-aws-inventory.py` script from `kubespray/contrib/aws_inventory` to the `kubespray/inventory` directory.
|
||||||
- Set the following AWS credentials and info as environment variables in your terminal:
|
- Set the following AWS credentials and info as environment variables in your terminal:
|
||||||
```
|
|
||||||
|
```ShellSession
|
||||||
export AWS_ACCESS_KEY_ID="xxxxx"
|
export AWS_ACCESS_KEY_ID="xxxxx"
|
||||||
export AWS_SECRET_ACCESS_KEY="yyyyy"
|
export AWS_SECRET_ACCESS_KEY="yyyyy"
|
||||||
export REGION="us-east-2"
|
export REGION="us-east-2"
|
||||||
```
|
```
|
||||||
|
|
||||||
- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kubespray-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml`
|
- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kubespray-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml`
|
||||||
|
|
||||||
## Kubespray configuration
|
## Kubespray configuration
|
||||||
@@ -75,4 +79,3 @@ aws_kubernetes_cluster_id|string|KubernetesClusterID is the cluster id we'll use
|
|||||||
aws_disable_security_group_ingress|bool|The aws provider creates an inbound rule per load balancer on the node security group. However, this can run into the AWS security group rule limit of 50 if many LoadBalancers are created. This flag disables the automatic ingress creation. It requires that the user has setup a rule that allows inbound traffic on kubelet ports from the local VPC subnet (so load balancers can access it). E.g. 10.82.0.0/16 30000-32000.
|
aws_disable_security_group_ingress|bool|The aws provider creates an inbound rule per load balancer on the node security group. However, this can run into the AWS security group rule limit of 50 if many LoadBalancers are created. This flag disables the automatic ingress creation. It requires that the user has setup a rule that allows inbound traffic on kubelet ports from the local VPC subnet (so load balancers can access it). E.g. 10.82.0.0/16 30000-32000.
|
||||||
aws_elb_security_group|string|Only in Kubelet version >= 1.7 : AWS has a hard limit of 500 security groups. For large clusters creating a security group for each ELB can cause the max number of security groups to be reached. If this is set instead of creating a new Security group for each ELB this security group will be used instead.
|
aws_elb_security_group|string|Only in Kubelet version >= 1.7 : AWS has a hard limit of 500 security groups. For large clusters creating a security group for each ELB can cause the max number of security groups to be reached. If this is set instead of creating a new Security group for each ELB this security group will be used instead.
|
||||||
aws_disable_strict_zone_check|bool|During the instantiation of an new AWS cloud provider, the detected region is validated against a known set of regions. In a non-standard, AWS like environment (e.g. Eucalyptus), this check may be undesirable. Setting this to true will disable the check and provide a warning that the check was skipped. Please note that this is an experimental feature and work-in-progress for the moment.
|
aws_disable_strict_zone_check|bool|During the instantiation of an new AWS cloud provider, the detected region is validated against a known set of regions. In a non-standard, AWS like environment (e.g. Eucalyptus), this check may be undesirable. Setting this to true will disable the check and provide a warning that the check was skipped. Please note that this is an experimental feature and work-in-progress for the moment.
|
||||||
|
|
||||||
|
|||||||
119
docs/azure-csi.md
Normal file
119
docs/azure-csi.md
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
# Azure Disk CSI Driver
|
||||||
|
|
||||||
|
The Azure Disk CSI driver allows you to provision volumes for pods with a Kubernetes deployment over Azure Cloud. The CSI driver replaces to volume provioning done by the in-tree azure cloud provider which is deprecated.
|
||||||
|
|
||||||
|
This documentation is an updated version of the in-tree Azure cloud provider documentation (azure.md).
|
||||||
|
|
||||||
|
To deploy Azure Disk CSI driver, uncomment the `azure_csi_enabled` option in `group_vars/all/azure.yml` and set it to `true`.
|
||||||
|
|
||||||
|
## Azure Disk CSI Storage Class
|
||||||
|
|
||||||
|
If you want to deploy the Azure Disk storage class to provision volumes dynamically, you should set `persistent_volumes_enabled` in `group_vars/k8s-cluster/k8s-cluster.yml` to `true`.
|
||||||
|
|
||||||
|
## Parameters
|
||||||
|
|
||||||
|
Before creating the instances you must first set the `azure_csi_` variables in the `group_vars/all.yml` file.
|
||||||
|
|
||||||
|
All of the values can be retrieved using the azure cli tool which can be downloaded here: <https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest>
|
||||||
|
|
||||||
|
After installation you have to run `az login` to get access to your account.
|
||||||
|
|
||||||
|
### azure\_csi\_tenant\_id + azure\_csi\_subscription\_id
|
||||||
|
|
||||||
|
Run `az account show` to retrieve your subscription id and tenant id:
|
||||||
|
`azure_csi_tenant_id` -> tenantId field
|
||||||
|
`azure_csi_subscription_id` -> id field
|
||||||
|
|
||||||
|
### azure\_csi\_location
|
||||||
|
|
||||||
|
The region your instances are located in, it can be something like `francecentral` or `norwayeast`. A full list of region names can be retrieved via `az account list-locations`
|
||||||
|
|
||||||
|
### azure\_csi\_resource\_group
|
||||||
|
|
||||||
|
The name of the resource group your instances are in, a list of your resource groups can be retrieved via `az group list`
|
||||||
|
|
||||||
|
Or you can do `az vm list | grep resourceGroup` and get the resource group corresponding to the VMs of your cluster.
|
||||||
|
|
||||||
|
The resource group name is not case sensitive.
|
||||||
|
|
||||||
|
### azure\_csi\_vnet\_name
|
||||||
|
|
||||||
|
The name of the virtual network your instances are in, can be retrieved via `az network vnet list`
|
||||||
|
|
||||||
|
### azure\_csi\_vnet\_resource\_group
|
||||||
|
|
||||||
|
The name of the resource group your vnet is in, can be retrieved via `az network vnet list | grep resourceGroup` and get the resource group corresponding to the vnet of your cluster.
|
||||||
|
|
||||||
|
### azure\_csi\_subnet\_name
|
||||||
|
|
||||||
|
The name of the subnet your instances are in, can be retrieved via `az network vnet subnet list --resource-group RESOURCE_GROUP --vnet-name VNET_NAME`
|
||||||
|
|
||||||
|
### azure\_csi\_security\_group\_name
|
||||||
|
|
||||||
|
The name of the network security group your instances are in, can be retrieved via `az network nsg list`
|
||||||
|
|
||||||
|
### azure\_csi\_aad\_client\_id + azure\_csi\_aad\_client\_secret
|
||||||
|
|
||||||
|
These will have to be generated first:
|
||||||
|
|
||||||
|
- Create an Azure AD Application with:
|
||||||
|
`az ad app create --display-name kubespray --identifier-uris http://kubespray --homepage http://kubespray.com --password CLIENT_SECRET`
|
||||||
|
|
||||||
|
Display name, identifier-uri, homepage and the password can be chosen
|
||||||
|
|
||||||
|
Note the AppId in the output.
|
||||||
|
|
||||||
|
- Create Service principal for the application with:
|
||||||
|
`az ad sp create --id AppId`
|
||||||
|
|
||||||
|
This is the AppId from the last command
|
||||||
|
|
||||||
|
- Create the role assignment with:
|
||||||
|
`az role assignment create --role "Owner" --assignee http://kubespray --subscription SUBSCRIPTION_ID`
|
||||||
|
|
||||||
|
azure\_csi\_aad\_client\_id must be set to the AppId, azure\_csi\_aad\_client\_secret is your chosen secret.
|
||||||
|
|
||||||
|
### azure\_csi\_use\_instance\_metadata
|
||||||
|
|
||||||
|
Use instance metadata service where possible. Boolean value.
|
||||||
|
|
||||||
|
## Test the Azure Disk CSI driver
|
||||||
|
|
||||||
|
To test the dynamic provisioning using Azure CSI driver, make sure to have the storage class deployed (through persistent volumes), and apply the following manifest:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: pvc-azuredisk
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 1Gi
|
||||||
|
storageClassName: disk.csi.azure.com
|
||||||
|
---
|
||||||
|
kind: Pod
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: nginx-azuredisk
|
||||||
|
spec:
|
||||||
|
nodeSelector:
|
||||||
|
kubernetes.io/os: linux
|
||||||
|
containers:
|
||||||
|
- image: nginx
|
||||||
|
name: nginx-azuredisk
|
||||||
|
command:
|
||||||
|
- "/bin/sh"
|
||||||
|
- "-c"
|
||||||
|
- while true; do echo $(date) >> /mnt/azuredisk/outfile; sleep 1; done
|
||||||
|
volumeMounts:
|
||||||
|
- name: azuredisk
|
||||||
|
mountPath: "/mnt/azuredisk"
|
||||||
|
volumes:
|
||||||
|
- name: azuredisk
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: pvc-azuredisk
|
||||||
|
```
|
||||||
@@ -1,74 +1,86 @@
|
|||||||
Azure
|
# Azure
|
||||||
===============
|
|
||||||
|
|
||||||
To deploy Kubernetes on [Azure](https://azure.microsoft.com) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'azure'`.
|
To deploy Kubernetes on [Azure](https://azure.microsoft.com) uncomment the `cloud_provider` option in `group_vars/all/all.yml` and set it to `'azure'`.
|
||||||
|
|
||||||
All your instances are required to run in a resource group and a routing table has to be attached to the subnet your instances are in.
|
All your instances are required to run in a resource group and a routing table has to be attached to the subnet your instances are in.
|
||||||
|
|
||||||
Not all features are supported yet though, for a list of the current status have a look [here](https://github.com/colemickens/azure-kubernetes-status)
|
Not all features are supported yet though, for a list of the current status have a look [here](https://github.com/Azure/AKS)
|
||||||
|
|
||||||
### Parameters
|
## Parameters
|
||||||
|
|
||||||
Before creating the instances you must first set the `azure_` variables in the `group_vars/all.yml` file.
|
Before creating the instances you must first set the `azure_` variables in the `group_vars/all/all.yml` file.
|
||||||
|
|
||||||
All of the values can be retrieved using the azure cli tool which can be downloaded here: https://docs.microsoft.com/en-gb/azure/xplat-cli-install
|
All of the values can be retrieved using the azure cli tool which can be downloaded here: <https://docs.microsoft.com/en-gb/azure/xplat-cli-install>
|
||||||
After installation you have to run `azure login` to get access to your account.
|
After installation you have to run `az login` to get access to your account.
|
||||||
|
|
||||||
|
### azure\_tenant\_id + azure\_subscription\_id
|
||||||
|
|
||||||
#### azure\_tenant\_id + azure\_subscription\_id
|
run `az account show` to retrieve your subscription id and tenant id:
|
||||||
run `azure account show` to retrieve your subscription id and tenant id:
|
|
||||||
`azure_tenant_id` -> Tenant ID field
|
`azure_tenant_id` -> Tenant ID field
|
||||||
`azure_subscription_id` -> ID field
|
`azure_subscription_id` -> ID field
|
||||||
|
|
||||||
|
### azure\_location
|
||||||
|
|
||||||
#### azure\_location
|
The region your instances are located, can be something like `westeurope` or `westcentralus`. A full list of region names can be retrieved via `az account list-locations`
|
||||||
The region your instances are located, can be something like `westeurope` or `westcentralus`. A full list of region names can be retrieved via `azure location list`
|
|
||||||
|
|
||||||
|
### azure\_resource\_group
|
||||||
|
|
||||||
#### azure\_resource\_group
|
The name of the resource group your instances are in, can be retrieved via `az group list`
|
||||||
The name of the resource group your instances are in, can be retrieved via `azure group list`
|
|
||||||
|
|
||||||
#### azure\_vnet\_name
|
### azure\_vmtype
|
||||||
The name of the virtual network your instances are in, can be retrieved via `azure network vnet list`
|
|
||||||
|
|
||||||
#### azure\_subnet\_name
|
The type of the vm. Supported values are `standard` or `vmss`. If vm is type of `Virtal Machines` then value is `standard`. If vm is part of `Virtaul Machine Scale Sets` then value is `vmss`
|
||||||
The name of the subnet your instances are in, can be retrieved via `azure network vnet subnet list --resource-group RESOURCE_GROUP --vnet-name VNET_NAME`
|
|
||||||
|
|
||||||
#### azure\_security\_group\_name
|
### azure\_vnet\_name
|
||||||
The name of the network security group your instances are in, can be retrieved via `azure network nsg list`
|
|
||||||
|
The name of the virtual network your instances are in, can be retrieved via `az network vnet list`
|
||||||
|
|
||||||
|
### azure\_subnet\_name
|
||||||
|
|
||||||
|
The name of the subnet your instances are in, can be retrieved via `az network vnet subnet list --resource-group RESOURCE_GROUP --vnet-name VNET_NAME`
|
||||||
|
|
||||||
|
### azure\_security\_group\_name
|
||||||
|
|
||||||
|
The name of the network security group your instances are in, can be retrieved via `az network nsg list`
|
||||||
|
|
||||||
|
### azure\_aad\_client\_id + azure\_aad\_client\_secret
|
||||||
|
|
||||||
#### azure\_aad\_client\_id + azure\_aad\_client\_secret
|
|
||||||
These will have to be generated first:
|
These will have to be generated first:
|
||||||
|
|
||||||
- Create an Azure AD Application with:
|
- Create an Azure AD Application with:
|
||||||
`azure ad app create --display-name kubernetes --identifier-uris http://kubernetes --homepage http://example.com --password CLIENT_SECRET`
|
`az ad app create --display-name kubernetes --identifier-uris http://kubernetes --homepage http://example.com --password CLIENT_SECRET`
|
||||||
display name, identifier-uri, homepage and the password can be choosen
|
display name, identifier-uri, homepage and the password can be chosen
|
||||||
Note the AppId in the output.
|
Note the AppId in the output.
|
||||||
- Create Service principal for the application with:
|
- Create Service principal for the application with:
|
||||||
`azure ad sp create --id AppId`
|
`az ad sp create --id AppId`
|
||||||
This is the AppId from the last command
|
This is the AppId from the last command
|
||||||
- Create the role assignment with:
|
- Create the role assignment with:
|
||||||
`azure role assignment create --role "Owner" --assignee http://kubernetes --subscription SUBSCRIPTION_ID`
|
`az role assignment create --role "Owner" --assignee http://kubernetes --subscription SUBSCRIPTION_ID`
|
||||||
|
|
||||||
azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your choosen secret.
|
azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your chosen secret.
|
||||||
|
|
||||||
|
### azure\_loadbalancer\_sku
|
||||||
|
|
||||||
#### azure\_loadbalancer\_sku
|
|
||||||
Sku of Load Balancer and Public IP. Candidate values are: basic and standard.
|
Sku of Load Balancer and Public IP. Candidate values are: basic and standard.
|
||||||
|
|
||||||
#### azure\_exclude\_master\_from\_standard\_lb
|
### azure\_exclude\_master\_from\_standard\_lb
|
||||||
|
|
||||||
azure\_exclude\_master\_from\_standard\_lb excludes master nodes from `standard` load balancer.
|
azure\_exclude\_master\_from\_standard\_lb excludes master nodes from `standard` load balancer.
|
||||||
|
|
||||||
#### azure\_disable\_outbound\_snat
|
### azure\_disable\_outbound\_snat
|
||||||
|
|
||||||
azure\_disable\_outbound\_snat disables the outbound SNAT for public load balancer rules. It should only be set when azure\_exclude\_master\_from\_standard\_lb is `standard`.
|
azure\_disable\_outbound\_snat disables the outbound SNAT for public load balancer rules. It should only be set when azure\_exclude\_master\_from\_standard\_lb is `standard`.
|
||||||
|
|
||||||
#### azure\_primary\_availability\_set\_name
|
### azure\_primary\_availability\_set\_name
|
||||||
(Optional) The name of the availability set that should be used as the load balancer backend .If this is set, the Azure
|
|
||||||
cloudprovider will only add nodes from that availability set to the load balancer backend pool. If this is not set, and
|
(Optional) The name of the availability set that should be used as the load balancer backend .If this is set, the Azure
|
||||||
multiple agent pools (availability sets) are used, then the cloudprovider will try to add all nodes to a single backend
|
cloudprovider will only add nodes from that availability set to the load balancer backend pool. If this is not set, and
|
||||||
|
multiple agent pools (availability sets) are used, then the cloudprovider will try to add all nodes to a single backend
|
||||||
pool which is forbidden. In other words, if you use multiple agent pools (availability sets), you MUST set this field.
|
pool which is forbidden. In other words, if you use multiple agent pools (availability sets), you MUST set this field.
|
||||||
|
|
||||||
#### azure\_use\_instance\_metadata
|
### azure\_use\_instance\_metadata
|
||||||
Use instance metadata service where possible
|
|
||||||
|
|
||||||
|
Use instance metadata service where possible
|
||||||
|
|
||||||
## Provisioning Azure with Resource Group Templates
|
## Provisioning Azure with Resource Group Templates
|
||||||
|
|
||||||
|
|||||||
118
docs/calico.md
118
docs/calico.md
@@ -1,82 +1,83 @@
|
|||||||
Calico
|
# Calico
|
||||||
===========
|
|
||||||
|
|
||||||
---
|
N.B. **Version 2.6.5 upgrade to 3.1.1 is upgrading etcd store to etcdv3**
|
||||||
**N.B. Version 2.6.5 upgrade to 3.1.1 is upgrading etcd store to etcdv3**
|
|
||||||
If you create automated backups of etcdv2 please switch for creating etcdv3 backups, as kubernetes and calico now uses etcdv3
|
If you create automated backups of etcdv2 please switch for creating etcdv3 backups, as kubernetes and calico now uses etcdv3
|
||||||
After migration you can check `/tmp/calico_upgrade/` directory for converted items to etcdv3.
|
After migration you can check `/tmp/calico_upgrade/` directory for converted items to etcdv3.
|
||||||
**PLEASE TEST upgrade before upgrading production cluster.**
|
**PLEASE TEST upgrade before upgrading production cluster.**
|
||||||
---
|
|
||||||
|
|
||||||
Check if the calico-node container is running
|
Check if the calico-node container is running
|
||||||
|
|
||||||
```
|
```ShellSession
|
||||||
docker ps | grep calico
|
docker ps | grep calico
|
||||||
```
|
```
|
||||||
|
|
||||||
The **calicoctl** command allows to check the status of the network workloads.
|
The **calicoctl.sh** is wrap script with configured acces credentials for command calicoctl allows to check the status of the network workloads.
|
||||||
|
|
||||||
* Check the status of Calico nodes
|
* Check the status of Calico nodes
|
||||||
|
|
||||||
```
|
```ShellSession
|
||||||
calicoctl node status
|
calicoctl.sh node status
|
||||||
```
|
```
|
||||||
|
|
||||||
or for versions prior to *v1.0.0*:
|
or for versions prior to *v1.0.0*:
|
||||||
|
|
||||||
```
|
```ShellSession
|
||||||
calicoctl status
|
calicoctl.sh status
|
||||||
```
|
```
|
||||||
|
|
||||||
* Show the configured network subnet for containers
|
* Show the configured network subnet for containers
|
||||||
|
|
||||||
```
|
```ShellSession
|
||||||
calicoctl get ippool -o wide
|
calicoctl.sh get ippool -o wide
|
||||||
```
|
```
|
||||||
|
|
||||||
or for versions prior to *v1.0.0*:
|
or for versions prior to *v1.0.0*:
|
||||||
|
|
||||||
```
|
```ShellSession
|
||||||
calicoctl pool show
|
calicoctl.sh pool show
|
||||||
```
|
```
|
||||||
|
|
||||||
* Show the workloads (ip addresses of containers and their located)
|
* Show the workloads (ip addresses of containers and their located)
|
||||||
|
|
||||||
```
|
```ShellSession
|
||||||
calicoctl get workloadEndpoint -o wide
|
calicoctl.sh get workloadEndpoint -o wide
|
||||||
```
|
```
|
||||||
|
|
||||||
and
|
and
|
||||||
|
|
||||||
```
|
```ShellSession
|
||||||
calicoctl get hostEndpoint -o wide
|
calicoctl.sh get hostEndpoint -o wide
|
||||||
```
|
```
|
||||||
|
|
||||||
or for versions prior *v1.0.0*:
|
or for versions prior *v1.0.0*:
|
||||||
|
|
||||||
```
|
```ShellSession
|
||||||
calicoctl endpoint show --detail
|
calicoctl.sh endpoint show --detail
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Optional : Define network backend
|
## Configuration
|
||||||
|
|
||||||
In some cases you may want to define Calico network backend. Allowed values are 'bird', 'gobgp' or 'none'. Bird is a default value.
|
### Optional : Define network backend
|
||||||
|
|
||||||
|
In some cases you may want to define Calico network backend. Allowed values are `bird`, `vxlan` or `none`. Bird is a default value.
|
||||||
|
|
||||||
To re-define you need to edit the inventory and add a group variable `calico_network_backend`
|
To re-define you need to edit the inventory and add a group variable `calico_network_backend`
|
||||||
|
|
||||||
```
|
```yml
|
||||||
calico_network_backend: none
|
calico_network_backend: none
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Optional : Define the default pool CIDR
|
### Optional : Define the default pool CIDR
|
||||||
|
|
||||||
By default, `kube_pods_subnet` is used as the IP range CIDR for the default IP Pool.
|
By default, `kube_pods_subnet` is used as the IP range CIDR for the default IP Pool.
|
||||||
In some cases you may want to add several pools and not have them considered by Kubernetes as external (which means that they must be within or equal to the range defined in `kube_pods_subnet`), it starts with the default IP Pool of which IP range CIDR can by defined in group_vars (k8s-cluster/k8s-net-calico.yml):
|
In some cases you may want to add several pools and not have them considered by Kubernetes as external (which means that they must be within or equal to the range defined in `kube_pods_subnet`), it starts with the default IP Pool of which IP range CIDR can by defined in group_vars (k8s-cluster/k8s-net-calico.yml):
|
||||||
|
|
||||||
```
|
```ShellSession
|
||||||
calico_pool_cidr: 10.233.64.0/20
|
calico_pool_cidr: 10.233.64.0/20
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Optional : BGP Peering with border routers
|
### Optional : BGP Peering with border routers
|
||||||
|
|
||||||
In some cases you may want to route the pods subnet and so NAT is not needed on the nodes.
|
In some cases you may want to route the pods subnet and so NAT is not needed on the nodes.
|
||||||
For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located.
|
For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located.
|
||||||
@@ -84,11 +85,11 @@ The following variables need to be set:
|
|||||||
`peer_with_router` to enable the peering with the datacenter's border router (default value: false).
|
`peer_with_router` to enable the peering with the datacenter's border router (default value: false).
|
||||||
you'll need to edit the inventory and add a hostvar `local_as` by node.
|
you'll need to edit the inventory and add a hostvar `local_as` by node.
|
||||||
|
|
||||||
```
|
```ShellSession
|
||||||
node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx
|
node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Optional : Defining BGP peers
|
### Optional : Defining BGP peers
|
||||||
|
|
||||||
Peers can be defined using the `peers` variable (see docs/calico_peer_example examples).
|
Peers can be defined using the `peers` variable (see docs/calico_peer_example examples).
|
||||||
In order to define global peers, the `peers` variable can be defined in group_vars with the "scope" attribute of each global peer set to "global".
|
In order to define global peers, the `peers` variable can be defined in group_vars with the "scope" attribute of each global peer set to "global".
|
||||||
@@ -97,16 +98,17 @@ NB: Ansible's `hash_behaviour` is by default set to "replace", thus defining bot
|
|||||||
|
|
||||||
Since calico 3.4, Calico supports advertising Kubernetes service cluster IPs over BGP, just as it advertises pod IPs.
|
Since calico 3.4, Calico supports advertising Kubernetes service cluster IPs over BGP, just as it advertises pod IPs.
|
||||||
This can be enabled by setting the following variable as follow in group_vars (k8s-cluster/k8s-net-calico.yml)
|
This can be enabled by setting the following variable as follow in group_vars (k8s-cluster/k8s-net-calico.yml)
|
||||||
```
|
|
||||||
|
```yml
|
||||||
calico_advertise_cluster_ips: true
|
calico_advertise_cluster_ips: true
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Optional : Define global AS number
|
### Optional : Define global AS number
|
||||||
|
|
||||||
Optional parameter `global_as_num` defines Calico global AS number (`/calico/bgp/v1/global/as_num` etcd key).
|
Optional parameter `global_as_num` defines Calico global AS number (`/calico/bgp/v1/global/as_num` etcd key).
|
||||||
It defaults to "64512".
|
It defaults to "64512".
|
||||||
|
|
||||||
##### Optional : BGP Peering with route reflectors
|
### Optional : BGP Peering with route reflectors
|
||||||
|
|
||||||
At large scale you may want to disable full node-to-node mesh in order to
|
At large scale you may want to disable full node-to-node mesh in order to
|
||||||
optimize your BGP topology and improve `calico-node` containers' start times.
|
optimize your BGP topology and improve `calico-node` containers' start times.
|
||||||
@@ -114,8 +116,8 @@ optimize your BGP topology and improve `calico-node` containers' start times.
|
|||||||
To do so you can deploy BGP route reflectors and peer `calico-node` with them as
|
To do so you can deploy BGP route reflectors and peer `calico-node` with them as
|
||||||
recommended here:
|
recommended here:
|
||||||
|
|
||||||
* https://hub.docker.com/r/calico/routereflector/
|
* <https://hub.docker.com/r/calico/routereflector/>
|
||||||
* https://docs.projectcalico.org/v3.1/reference/private-cloud/l3-interconnect-fabric
|
* <https://docs.projectcalico.org/v3.1/reference/private-cloud/l3-interconnect-fabric>
|
||||||
|
|
||||||
You need to edit your inventory and add:
|
You need to edit your inventory and add:
|
||||||
|
|
||||||
@@ -127,7 +129,7 @@ You need to edit your inventory and add:
|
|||||||
|
|
||||||
Here's an example of Kubespray inventory with standalone route reflectors:
|
Here's an example of Kubespray inventory with standalone route reflectors:
|
||||||
|
|
||||||
```
|
```ini
|
||||||
[all]
|
[all]
|
||||||
rr0 ansible_ssh_host=10.210.1.10 ip=10.210.1.10
|
rr0 ansible_ssh_host=10.210.1.10 ip=10.210.1.10
|
||||||
rr1 ansible_ssh_host=10.210.1.11 ip=10.210.1.11
|
rr1 ansible_ssh_host=10.210.1.11 ip=10.210.1.11
|
||||||
@@ -177,35 +179,55 @@ The inventory above will deploy the following topology assuming that calico's
|
|||||||
|
|
||||||

|

|
||||||
|
|
||||||
##### Optional : Define default endpoint to host action
|
### Optional : Define default endpoint to host action
|
||||||
|
|
||||||
By default Calico blocks traffic from endpoints to the host itself by using an iptables DROP action. When using it in kubernetes the action has to be changed to RETURN (default in kubespray) or ACCEPT (see https://github.com/projectcalico/felix/issues/660 and https://github.com/projectcalico/calicoctl/issues/1389). Otherwise all network packets from pods (with hostNetwork=False) to services endpoints (with hostNetwork=True) within the same node are dropped.
|
|
||||||
|
|
||||||
|
By default Calico blocks traffic from endpoints to the host itself by using an iptables DROP action. When using it in kubernetes the action has to be changed to RETURN (default in kubespray) or ACCEPT (see <https://github.com/projectcalico/felix/issues/660> and <https://github.com/projectcalico/calicoctl/issues/1389).> Otherwise all network packets from pods (with hostNetwork=False) to services endpoints (with hostNetwork=True) within the same node are dropped.
|
||||||
|
|
||||||
To re-define default action please set the following variable in your inventory:
|
To re-define default action please set the following variable in your inventory:
|
||||||
```
|
|
||||||
|
```yml
|
||||||
calico_endpoint_to_host_action: "ACCEPT"
|
calico_endpoint_to_host_action: "ACCEPT"
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Optional : Define address on which Felix will respond to health requests
|
## Optional : Define address on which Felix will respond to health requests
|
||||||
|
|
||||||
Since Calico 3.2.0, HealthCheck default behavior changed from listening on all interfaces to just listening on localhost.
|
Since Calico 3.2.0, HealthCheck default behavior changed from listening on all interfaces to just listening on localhost.
|
||||||
|
|
||||||
To re-define health host please set the following variable in your inventory:
|
To re-define health host please set the following variable in your inventory:
|
||||||
```
|
|
||||||
|
```yml
|
||||||
calico_healthhost: "0.0.0.0"
|
calico_healthhost: "0.0.0.0"
|
||||||
```
|
```
|
||||||
|
|
||||||
Cloud providers configuration
|
## Config encapsulation for cross server traffic
|
||||||
=============================
|
|
||||||
|
|
||||||
Please refer to the official documentation, for example [GCE configuration](http://docs.projectcalico.org/v1.5/getting-started/docker/installation/gce) requires a security rule for calico ip-ip tunnels. Note, calico is always configured with ``ipip: true`` if the cloud provider was defined.
|
Calico supports two types of encapsulation: [VXLAN and IP in IP](https://docs.projectcalico.org/v3.11/networking/vxlan-ipip). VXLAN is supported in some environments where IP in IP is not (for example, Azure).
|
||||||
|
|
||||||
##### Optional : Ignore kernel's RPF check setting
|
*IP in IP* and *VXLAN* is mutualy exclusive modes.
|
||||||
|
|
||||||
|
Configure Ip in Ip mode. Possible values is `Always`, `CrossSubnet`, `Never`.
|
||||||
|
|
||||||
|
```yml
|
||||||
|
calico_ipip_mode: 'Always'
|
||||||
|
```
|
||||||
|
|
||||||
|
Configure VXLAN mode. Possible values is `Always`, `CrossSubnet`, `Never`.
|
||||||
|
|
||||||
|
```yml
|
||||||
|
calico_vxlan_mode: 'Never'
|
||||||
|
```
|
||||||
|
|
||||||
|
If you use VXLAN mode, BGP networking is not required. You can disable BGP to reduce the moving parts in your cluster by `calico_network_backend: vxlan`
|
||||||
|
|
||||||
|
## Cloud providers configuration
|
||||||
|
|
||||||
|
Please refer to the official documentation, for example [GCE configuration](http://docs.projectcalico.org/v1.5/getting-started/docker/installation/gce) requires a security rule for calico ip-ip tunnels. Note, calico is always configured with ``calico_ipip_mode: Always`` if the cloud provider was defined.
|
||||||
|
|
||||||
|
### Optional : Ignore kernel's RPF check setting
|
||||||
|
|
||||||
By default the felix agent(calico-node) will abort if the Kernel RPF setting is not 'strict'. If you want Calico to ignore the Kernel setting:
|
By default the felix agent(calico-node) will abort if the Kernel RPF setting is not 'strict'. If you want Calico to ignore the Kernel setting:
|
||||||
|
|
||||||
```
|
```yml
|
||||||
calico_node_ignorelooserpf: true
|
calico_node_ignorelooserpf: true
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -213,7 +235,7 @@ Note that in OpenStack you must allow `ipip` traffic in your security groups,
|
|||||||
otherwise you will experience timeouts.
|
otherwise you will experience timeouts.
|
||||||
To do this you must add a rule which allows it, for example:
|
To do this you must add a rule which allows it, for example:
|
||||||
|
|
||||||
```
|
```ShellSession
|
||||||
neutron security-group-rule-create --protocol 4 --direction egress k8s-a0tp4t
|
neutron security-group-rule-create --protocol 4 --direction egress k8s-a0tp4t
|
||||||
neutron security-group-rule-create --protocol 4 --direction igress k8s-a0tp4t
|
neutron security-group-rule-create --protocol 4 --direction igress k8s-a0tp4t
|
||||||
```
|
```
|
||||||
|
|||||||
9
docs/centos8.md
Normal file
9
docs/centos8.md
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
# RHEL / CentOS 8
|
||||||
|
|
||||||
|
RHEL / CentOS 8 ships only with iptables-nft (ie without iptables-legacy)
|
||||||
|
The only tested configuration for now is using Calico CNI
|
||||||
|
You need to use K8S 1.17+ and to add `calico_iptables_backend: "NFT"` to your configuration
|
||||||
|
|
||||||
|
If you have containers that are using iptables in the host network namespace (`hostNetwork=true`),
|
||||||
|
you need to ensure they are using iptables-nft.
|
||||||
|
An exemple how k8s do the autodetection can be found [in this PR](https://github.com/kubernetes/kubernetes/pull/82966)
|
||||||
57
docs/ci.md
Normal file
57
docs/ci.md
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
# CI test coverage
|
||||||
|
|
||||||
|
To generate this Matrix run `./tests/scripts/md-table/main.py`
|
||||||
|
|
||||||
|
## docker
|
||||||
|
|
||||||
|
| OS / CNI | calico | canal | cilium | contiv | flannel | kube-ovn | kube-router | macvlan | weave |
|
||||||
|
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- |
|
||||||
|
amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :white_check_mark: |
|
||||||
|
centos8 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
||||||
|
coreos | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
debian9 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
|
||||||
|
fedora30 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||||
|
fedora31 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
|
||||||
|
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
oracle7 | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
ubuntu16 | :x: | :white_check_mark: | :x: | :white_check_mark: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: |
|
||||||
|
ubuntu18 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
|
||||||
|
## crio
|
||||||
|
|
||||||
|
| OS / CNI | calico | canal | cilium | contiv | flannel | kube-ovn | kube-router | macvlan | weave |
|
||||||
|
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- |
|
||||||
|
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
centos7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
centos8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
coreos | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
fedora30 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
fedora31 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
|
||||||
|
## containerd
|
||||||
|
|
||||||
|
| OS / CNI | calico | canal | cilium | contiv | flannel | kube-ovn | kube-router | macvlan | weave |
|
||||||
|
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- |
|
||||||
|
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
centos7 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
|
||||||
|
centos8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
coreos | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
debian10 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
fedora30 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
fedora31 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
ubuntu18 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
|
||||||
|
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
102
docs/cinder-csi.md
Normal file
102
docs/cinder-csi.md
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
# Cinder CSI Driver
|
||||||
|
|
||||||
|
Cinder CSI driver allows you to provision volumes over an OpenStack deployment. The Kubernetes historic in-tree cloud provider is deprecated and will be removed in future versions.
|
||||||
|
|
||||||
|
To enable Cinder CSI driver, uncomment the `cinder_csi_enabled` option in `group_vars/all/openstack.yml` and set it to `true`.
|
||||||
|
|
||||||
|
To set the number of replicas for the Cinder CSI controller, you can change `cinder_csi_controller_replicas` option in `group_vars/all/openstack.yml`.
|
||||||
|
|
||||||
|
You need to source the OpenStack credentials you use to deploy your machines that will host Kubernetes: `source path/to/your/openstack-rc` or `. path/to/your/openstack-rc`.
|
||||||
|
|
||||||
|
Make sure the hostnames in your `inventory` file are identical to your instance names in OpenStack. Otherwise [cinder](https://docs.openstack.org/cinder/latest/) won't work as expected.
|
||||||
|
|
||||||
|
If you want to deploy the cinder provisioner used with Cinder CSI Driver, you should set `persistent_volumes_enabled` in `group_vars/k8s-cluster/k8s-cluster.yml` to `true`.
|
||||||
|
|
||||||
|
You can now run the kubespray playbook (cluster.yml) to deploy Kubernetes over OpenStack with Cinder CSI Driver enabled.
|
||||||
|
|
||||||
|
## Usage example
|
||||||
|
|
||||||
|
To check if Cinder CSI Driver works properly, see first that the cinder-csi pods are running:
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
$ kubectl -n kube-system get pods | grep cinder
|
||||||
|
csi-cinder-controllerplugin-7f8bf99785-cpb5v 5/5 Running 0 100m
|
||||||
|
csi-cinder-nodeplugin-rm5x2 2/2 Running 0 100m
|
||||||
|
```
|
||||||
|
|
||||||
|
Check the associated storage class (if you enabled persistent_volumes):
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
$ kubectl get storageclass
|
||||||
|
NAME PROVISIONER AGE
|
||||||
|
cinder-csi cinder.csi.openstack.org 100m
|
||||||
|
```
|
||||||
|
|
||||||
|
You can run a PVC and an Nginx Pod using this file `nginx.yaml`:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: csi-pvc-cinderplugin
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 1Gi
|
||||||
|
storageClassName: cinder-csi
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: nginx
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- image: nginx
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
name: nginx
|
||||||
|
ports:
|
||||||
|
- containerPort: 80
|
||||||
|
protocol: TCP
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /var/lib/www/html
|
||||||
|
name: csi-data-cinderplugin
|
||||||
|
volumes:
|
||||||
|
- name: csi-data-cinderplugin
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: csi-pvc-cinderplugin
|
||||||
|
readOnly: false
|
||||||
|
```
|
||||||
|
|
||||||
|
Apply this conf to your cluster: ```kubectl apply -f nginx.yml```
|
||||||
|
|
||||||
|
You should see the PVC provisioned and bound:
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
$ kubectl get pvc
|
||||||
|
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
|
||||||
|
csi-pvc-cinderplugin Bound pvc-f21ad0a1-5b7b-405e-a462-48da5cb76beb 1Gi RWO cinder-csi 8s
|
||||||
|
```
|
||||||
|
|
||||||
|
And the volume mounted to the Nginx Pod (wait until the Pod is Running):
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
kubectl exec -it nginx -- df -h | grep /var/lib/www/html
|
||||||
|
/dev/vdb 976M 2.6M 958M 1% /var/lib/www/html
|
||||||
|
```
|
||||||
|
|
||||||
|
## Compatibility with in-tree cloud provider
|
||||||
|
|
||||||
|
It is not necessary to enable OpenStack as a cloud provider for Cinder CSI Driver to work.
|
||||||
|
Though, you can run both the in-tree openstack cloud provider and the Cinder CSI Driver at the same time. The storage class provisioners associated to each one of them are differently named.
|
||||||
|
|
||||||
|
## Cinder v2 support
|
||||||
|
|
||||||
|
For the moment, only Cinder v3 is supported by the CSI Driver.
|
||||||
|
|
||||||
|
## More info
|
||||||
|
|
||||||
|
For further information about the Cinder CSI Driver, you can refer to this page: [Cloud Provider OpenStack](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/using-cinder-csi-plugin.md).
|
||||||
@@ -1,13 +1,13 @@
|
|||||||
Cloud providers
|
# Cloud providers
|
||||||
==============
|
|
||||||
|
|
||||||
#### Provisioning
|
## Provisioning
|
||||||
|
|
||||||
You can deploy instances in your cloud environment in several different ways. Examples include Terraform, Ansible (ec2 and gce modules), and manual creation.
|
You can deploy instances in your cloud environment in several different ways. Examples include Terraform, Ansible (ec2 and gce modules), and manual creation.
|
||||||
|
|
||||||
#### Deploy kubernetes
|
## Deploy kubernetes
|
||||||
|
|
||||||
With ansible-playbook command
|
With ansible-playbook command
|
||||||
```
|
|
||||||
|
```ShellSession
|
||||||
ansible-playbook -u smana -e ansible_ssh_user=admin -e cloud_provider=[aws|gce] -b --become-user=root -i inventory/single.cfg cluster.yml
|
ansible-playbook -u smana -e ansible_ssh_user=admin -e cloud_provider=[aws|gce] -b --become-user=root -i inventory/single.cfg cluster.yml
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -7,4 +7,4 @@ This network plugin only unpacks CNI plugins version `cni_version` into `/opt/cn
|
|||||||
|
|
||||||
It's intended usage is for custom CNI configuration, e.g. manual routing tables + bridge + loopback CNI plugin outside kubespray scope. Furthermore, it's used for non-kubespray supported CNI plugins which you can install afterward.
|
It's intended usage is for custom CNI configuration, e.g. manual routing tables + bridge + loopback CNI plugin outside kubespray scope. Furthermore, it's used for non-kubespray supported CNI plugins which you can install afterward.
|
||||||
|
|
||||||
You are required to fill `/etc/cni/net.d` with valid CNI configuration after using kubespray.
|
You are required to fill `/etc/cni/net.d` with valid CNI configuration after using kubespray.
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
Kubespray vs [Kops](https://github.com/kubernetes/kops)
|
# Comparison
|
||||||
---------------
|
|
||||||
|
## Kubespray vs [Kops](https://github.com/kubernetes/kops)
|
||||||
|
|
||||||
Kubespray runs on bare metal and most clouds, using Ansible as its substrate for
|
Kubespray runs on bare metal and most clouds, using Ansible as its substrate for
|
||||||
provisioning and orchestration. Kops performs the provisioning and orchestration
|
provisioning and orchestration. Kops performs the provisioning and orchestration
|
||||||
@@ -10,8 +11,7 @@ however, is more tightly integrated with the unique features of the clouds it
|
|||||||
supports so it could be a better choice if you know that you will only be using
|
supports so it could be a better choice if you know that you will only be using
|
||||||
one platform for the foreseeable future.
|
one platform for the foreseeable future.
|
||||||
|
|
||||||
Kubespray vs [Kubeadm](https://github.com/kubernetes/kubeadm)
|
## Kubespray vs [Kubeadm](https://github.com/kubernetes/kubeadm)
|
||||||
------------------
|
|
||||||
|
|
||||||
Kubeadm provides domain Knowledge of Kubernetes clusters' life cycle
|
Kubeadm provides domain Knowledge of Kubernetes clusters' life cycle
|
||||||
management, including self-hosted layouts, dynamic discovery services and so
|
management, including self-hosted layouts, dynamic discovery services and so
|
||||||
@@ -19,9 +19,9 @@ on. Had it belonged to the new [operators world](https://coreos.com/blog/introdu
|
|||||||
it may have been named a "Kubernetes cluster operator". Kubespray however,
|
it may have been named a "Kubernetes cluster operator". Kubespray however,
|
||||||
does generic configuration management tasks from the "OS operators" ansible
|
does generic configuration management tasks from the "OS operators" ansible
|
||||||
world, plus some initial K8s clustering (with networking plugins included) and
|
world, plus some initial K8s clustering (with networking plugins included) and
|
||||||
control plane bootstrapping.
|
control plane bootstrapping.
|
||||||
|
|
||||||
Kubespray supports `kubeadm` for cluster creation since v2.3
|
Kubespray supports `kubeadm` for cluster creation since v2.3
|
||||||
(and deprecated non-kubeadm deployment starting from v2.8)
|
(and deprecated non-kubeadm deployment starting from v2.8)
|
||||||
in order to consume life cycle management domain knowledge from it
|
in order to consume life cycle management domain knowledge from it
|
||||||
and offload generic OS configuration things from it, which hopefully benefits both sides.
|
and offload generic OS configuration things from it, which hopefully benefits both sides.
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
Contiv
|
# Contiv
|
||||||
======
|
|
||||||
|
|
||||||
Here is the [Contiv documentation](http://contiv.github.io/documents/).
|
Here is the [Contiv documentation](https://contiv.github.io/documents/).
|
||||||
|
|
||||||
## Administrate Contiv
|
## Administrate Contiv
|
||||||
|
|
||||||
@@ -10,7 +9,6 @@ There are two ways to manage Contiv:
|
|||||||
* a web UI managed by the api proxy service
|
* a web UI managed by the api proxy service
|
||||||
* a CLI named `netctl`
|
* a CLI named `netctl`
|
||||||
|
|
||||||
|
|
||||||
### Interfaces
|
### Interfaces
|
||||||
|
|
||||||
#### The Web Interface
|
#### The Web Interface
|
||||||
@@ -27,7 +25,6 @@ contiv_generate_certificate: true
|
|||||||
|
|
||||||
The default credentials to log in are: admin/admin.
|
The default credentials to log in are: admin/admin.
|
||||||
|
|
||||||
|
|
||||||
#### The Command Line Interface
|
#### The Command Line Interface
|
||||||
|
|
||||||
The second way to modify the Contiv configuration is to use the CLI. To do this, you have to connect to the server and export an environment variable to tell netctl how to connect to the cluster:
|
The second way to modify the Contiv configuration is to use the CLI. To do this, you have to connect to the server and export an environment variable to tell netctl how to connect to the cluster:
|
||||||
@@ -44,7 +41,6 @@ contiv_netmaster_port: 9999
|
|||||||
|
|
||||||
The CLI doesn't use the authentication process needed by the web interface.
|
The CLI doesn't use the authentication process needed by the web interface.
|
||||||
|
|
||||||
|
|
||||||
### Network configuration
|
### Network configuration
|
||||||
|
|
||||||
The default configuration uses VXLAN to create an overlay. Two networks are created by default:
|
The default configuration uses VXLAN to create an overlay. Two networks are created by default:
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ Example with Ansible:
|
|||||||
Before running the cluster playbook you must satisfy the following requirements:
|
Before running the cluster playbook you must satisfy the following requirements:
|
||||||
|
|
||||||
General CoreOS Pre-Installation Notes:
|
General CoreOS Pre-Installation Notes:
|
||||||
|
|
||||||
- Ensure that the bin_dir is set to `/opt/bin`
|
- Ensure that the bin_dir is set to `/opt/bin`
|
||||||
- ansible_python_interpreter should be `/opt/bin/python`. This will be laid down by the bootstrap task.
|
- ansible_python_interpreter should be `/opt/bin/python`. This will be laid down by the bootstrap task.
|
||||||
- The default resolvconf_mode setting of `docker_dns` **does not** work for CoreOS. This is because we do not edit the systemd service file for docker on CoreOS nodes. Instead, just use the `host_resolvconf` mode. It should work out of the box.
|
- The default resolvconf_mode setting of `docker_dns` **does not** work for CoreOS. This is because we do not edit the systemd service file for docker on CoreOS nodes. Instead, just use the `host_resolvconf` mode. It should work out of the box.
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
CRI-O
|
# CRI-O
|
||||||
===============
|
|
||||||
|
|
||||||
[CRI-O] is a lightweight container runtime for Kubernetes.
|
[CRI-O] is a lightweight container runtime for Kubernetes.
|
||||||
Kubespray supports basic functionality for using CRI-O as the default container runtime in a cluster.
|
Kubespray supports basic functionality for using CRI-O as the default container runtime in a cluster.
|
||||||
@@ -10,19 +9,24 @@ Kubespray supports basic functionality for using CRI-O as the default container
|
|||||||
|
|
||||||
_To use CRI-O instead of Docker, set the following variables:_
|
_To use CRI-O instead of Docker, set the following variables:_
|
||||||
|
|
||||||
#### all.yml
|
## all.yml
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
download_container: false
|
download_container: false
|
||||||
skip_downloads: false
|
skip_downloads: false
|
||||||
```
|
```
|
||||||
|
|
||||||
#### k8s-cluster.yml
|
## k8s-cluster.yml
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
etcd_deployment_type: host
|
|
||||||
kubelet_deployment_type: host
|
kubelet_deployment_type: host
|
||||||
container_manager: crio
|
container_manager: crio
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## etcd.yml
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
etcd_deployment_type: host
|
||||||
|
```
|
||||||
|
|
||||||
[CRI-O]: https://cri-o.io/
|
[CRI-O]: https://cri-o.io/
|
||||||
|
|||||||
@@ -1,15 +1,14 @@
|
|||||||
Debian Jessie
|
# Debian Jessie
|
||||||
===============
|
|
||||||
|
|
||||||
Debian Jessie installation Notes:
|
Debian Jessie installation Notes:
|
||||||
|
|
||||||
- Add
|
- Add
|
||||||
|
|
||||||
```GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"```
|
```GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"```
|
||||||
|
|
||||||
to /etc/default/grub. Then update with
|
to /etc/default/grub. Then update with
|
||||||
|
|
||||||
```
|
```ShellSession
|
||||||
sudo update-grub
|
sudo update-grub
|
||||||
sudo update-grub2
|
sudo update-grub2
|
||||||
sudo reboot
|
sudo reboot
|
||||||
@@ -23,7 +22,7 @@ Debian Jessie installation Notes:
|
|||||||
|
|
||||||
- Add the Ansible repository and install Ansible to get a proper version
|
- Add the Ansible repository and install Ansible to get a proper version
|
||||||
|
|
||||||
```
|
```ShellSession
|
||||||
sudo add-apt-repository ppa:ansible/ansible
|
sudo add-apt-repository ppa:ansible/ansible
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install ansible
|
sudo apt-get install ansible
|
||||||
@@ -34,5 +33,4 @@ Debian Jessie installation Notes:
|
|||||||
|
|
||||||
```sudo apt-get install python-jinja2=2.8-1~bpo8+1 python-netaddr```
|
```sudo apt-get install python-jinja2=2.8-1~bpo8+1 python-netaddr```
|
||||||
|
|
||||||
|
|
||||||
Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment)
|
Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment)
|
||||||
|
|||||||
@@ -1,86 +1,126 @@
|
|||||||
K8s DNS stack by Kubespray
|
# K8s DNS stack by Kubespray
|
||||||
======================
|
|
||||||
|
|
||||||
For K8s cluster nodes, Kubespray configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/)
|
For K8s cluster nodes, Kubespray configures a [Kubernetes DNS](https://kubernetes.io/docs/admin/dns/)
|
||||||
[cluster add-on](http://releases.k8s.io/master/cluster/addons/README.md)
|
[cluster add-on](https://releases.k8s.io/master/cluster/addons/README.md)
|
||||||
to serve as an authoritative DNS server for a given ``dns_domain`` and its
|
to serve as an authoritative DNS server for a given ``dns_domain`` and its
|
||||||
``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels).
|
``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels).
|
||||||
|
|
||||||
Other nodes in the inventory, like external storage nodes or a separate etcd cluster
|
Other nodes in the inventory, like external storage nodes or a separate etcd cluster
|
||||||
node group, considered non-cluster and left up to the user to configure DNS resolve.
|
node group, considered non-cluster and left up to the user to configure DNS resolve.
|
||||||
|
|
||||||
|
## DNS variables
|
||||||
DNS variables
|
|
||||||
=============
|
|
||||||
|
|
||||||
There are several global variables which can be used to modify DNS settings:
|
There are several global variables which can be used to modify DNS settings:
|
||||||
|
|
||||||
#### ndots
|
### ndots
|
||||||
|
|
||||||
ndots value to be used in ``/etc/resolv.conf``
|
ndots value to be used in ``/etc/resolv.conf``
|
||||||
|
|
||||||
It is important to note that multiple search domains combined with high ``ndots``
|
It is important to note that multiple search domains combined with high ``ndots``
|
||||||
values lead to poor performance of DNS stack, so please choose it wisely.
|
values lead to poor performance of DNS stack, so please choose it wisely.
|
||||||
|
|
||||||
#### searchdomains
|
### searchdomains
|
||||||
|
|
||||||
Custom search domains to be added in addition to the cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``).
|
Custom search domains to be added in addition to the cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``).
|
||||||
|
|
||||||
Most Linux systems limit the total number of search domains to 6 and the total length of all search domains
|
Most Linux systems limit the total number of search domains to 6 and the total length of all search domains
|
||||||
to 256 characters. Depending on the length of ``dns_domain``, you're limitted to less then the total limit.
|
to 256 characters. Depending on the length of ``dns_domain``, you're limited to less than the total limit.
|
||||||
|
|
||||||
Please note that ``resolvconf_mode: docker_dns`` will automatically add your systems search domains as
|
Please note that ``resolvconf_mode: docker_dns`` will automatically add your systems search domains as
|
||||||
additional search domains. Please take this into the accounts for the limits.
|
additional search domains. Please take this into the accounts for the limits.
|
||||||
|
|
||||||
#### nameservers
|
### nameservers
|
||||||
|
|
||||||
This variable is only used by ``resolvconf_mode: host_resolvconf``. These nameservers are added to the hosts
|
This variable is only used by ``resolvconf_mode: host_resolvconf``. These nameservers are added to the hosts
|
||||||
``/etc/resolv.conf`` *after* ``upstream_dns_servers`` and thus serve as backup nameservers. If this variable
|
``/etc/resolv.conf`` *after* ``upstream_dns_servers`` and thus serve as backup nameservers. If this variable
|
||||||
is not set, a default resolver is chosen (depending on cloud provider or 8.8.8.8 when no cloud provider is specified).
|
is not set, a default resolver is chosen (depending on cloud provider or 8.8.8.8 when no cloud provider is specified).
|
||||||
|
|
||||||
#### upstream_dns_servers
|
### upstream_dns_servers
|
||||||
|
|
||||||
DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode`` modes. These serve as backup
|
DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode`` modes. These serve as backup
|
||||||
DNS servers in early cluster deployment when no cluster DNS is available yet.
|
DNS servers in early cluster deployment when no cluster DNS is available yet.
|
||||||
|
|
||||||
DNS modes supported by Kubespray
|
## DNS modes supported by Kubespray
|
||||||
============================
|
|
||||||
|
### coredns_external_zones
|
||||||
|
|
||||||
|
Array of optional external zones to coredns forward queries to. It's injected into
|
||||||
|
`coredns`' config file before default kubernetes zone. Use it as an optimization for well-known zones and/or internal-only
|
||||||
|
domains, i.e. VPN for internal networks (default is unset)
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
coredns_external_zones:
|
||||||
|
- zones:
|
||||||
|
- example.com
|
||||||
|
- example.io:1053
|
||||||
|
nameservers:
|
||||||
|
- 1.1.1.1
|
||||||
|
- 2.2.2.2
|
||||||
|
cache: 5
|
||||||
|
- zones:
|
||||||
|
- https://mycompany.local:4453
|
||||||
|
nameservers:
|
||||||
|
- 192.168.0.53
|
||||||
|
cache: 0
|
||||||
|
```
|
||||||
|
|
||||||
|
or as INI
|
||||||
|
|
||||||
|
```ini
|
||||||
|
coredns_external_zones=[{"cache": 30,"zones":["example.com","example.io:453"],"nameservers":["1.1.1.1","2.2.2.2"]}]'
|
||||||
|
```
|
||||||
|
|
||||||
You can modify how Kubespray sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
|
You can modify how Kubespray sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
|
||||||
|
|
||||||
## dns_mode
|
### dns_mode
|
||||||
|
|
||||||
``dns_mode`` configures how Kubespray will setup cluster DNS. There are four modes available:
|
``dns_mode`` configures how Kubespray will setup cluster DNS. There are four modes available:
|
||||||
|
|
||||||
#### coredns (default)
|
#### dns_mode: coredns (default)
|
||||||
|
|
||||||
This installs CoreDNS as the default cluster DNS for all queries.
|
This installs CoreDNS as the default cluster DNS for all queries.
|
||||||
|
|
||||||
#### coredns_dual
|
#### dns_mode: coredns_dual
|
||||||
|
|
||||||
This installs CoreDNS as the default cluster DNS for all queries, plus a secondary CoreDNS stack.
|
This installs CoreDNS as the default cluster DNS for all queries, plus a secondary CoreDNS stack.
|
||||||
|
|
||||||
#### manual
|
#### dns_mode: manual
|
||||||
|
|
||||||
This does not install coredns, but allows you to specify
|
This does not install coredns, but allows you to specify
|
||||||
`manual_dns_server`, which will be configured on nodes for handling Pod DNS.
|
`manual_dns_server`, which will be configured on nodes for handling Pod DNS.
|
||||||
Use this method if you plan to install your own DNS server in the cluster after
|
Use this method if you plan to install your own DNS server in the cluster after
|
||||||
initial deployment.
|
initial deployment.
|
||||||
|
|
||||||
#### none
|
#### dns_mode: none
|
||||||
|
|
||||||
This does not install any of DNS solution at all. This basically disables cluster DNS completely and
|
This does not install any of DNS solution at all. This basically disables cluster DNS completely and
|
||||||
leaves you with a non functional cluster.
|
leaves you with a non functional cluster.
|
||||||
|
|
||||||
## resolvconf_mode
|
## resolvconf_mode
|
||||||
|
|
||||||
``resolvconf_mode`` configures how Kubespray will setup DNS for ``hostNetwork: true`` PODs and non-k8s containers.
|
``resolvconf_mode`` configures how Kubespray will setup DNS for ``hostNetwork: true`` PODs and non-k8s containers.
|
||||||
There are three modes available:
|
There are three modes available:
|
||||||
|
|
||||||
#### docker_dns (default)
|
### resolvconf_mode: docker_dns (default)
|
||||||
|
|
||||||
This sets up the docker daemon with additional --dns/--dns-search/--dns-opt flags.
|
This sets up the docker daemon with additional --dns/--dns-search/--dns-opt flags.
|
||||||
|
|
||||||
The following nameservers are added to the docker daemon (in the same order as listed here):
|
The following nameservers are added to the docker daemon (in the same order as listed here):
|
||||||
|
|
||||||
* cluster nameserver (depends on dns_mode)
|
* cluster nameserver (depends on dns_mode)
|
||||||
* content of optional upstream_dns_servers variable
|
* content of optional upstream_dns_servers variable
|
||||||
* host system nameservers (read from hosts /etc/resolv.conf)
|
* host system nameservers (read from hosts /etc/resolv.conf)
|
||||||
|
|
||||||
The following search domains are added to the docker daemon (in the same order as listed here):
|
The following search domains are added to the docker daemon (in the same order as listed here):
|
||||||
|
|
||||||
* cluster domains (``default.svc.{{ dns_domain }}``, ``svc.{{ dns_domain }}``)
|
* cluster domains (``default.svc.{{ dns_domain }}``, ``svc.{{ dns_domain }}``)
|
||||||
* content of optional searchdomains variable
|
* content of optional searchdomains variable
|
||||||
* host system search domains (read from hosts /etc/resolv.conf)
|
* host system search domains (read from hosts /etc/resolv.conf)
|
||||||
|
|
||||||
The following dns options are added to the docker daemon
|
The following dns options are added to the docker daemon
|
||||||
|
|
||||||
* ndots:{{ ndots }}
|
* ndots:{{ ndots }}
|
||||||
* timeout:2
|
* timeout:2
|
||||||
* attempts:2
|
* attempts:2
|
||||||
@@ -96,8 +136,9 @@ DNS queries to the cluster DNS will timeout after a few seconds, resulting in th
|
|||||||
used as a backup nameserver. After cluster DNS is running, all queries will be answered by the cluster DNS
|
used as a backup nameserver. After cluster DNS is running, all queries will be answered by the cluster DNS
|
||||||
servers, which in turn will forward queries to the system nameserver if required.
|
servers, which in turn will forward queries to the system nameserver if required.
|
||||||
|
|
||||||
#### host_resolvconf
|
#### resolvconf_mode: host_resolvconf
|
||||||
This activates the classic Kubespray behaviour that modifies the hosts ``/etc/resolv.conf`` file and dhclient
|
|
||||||
|
This activates the classic Kubespray behavior that modifies the hosts ``/etc/resolv.conf`` file and dhclient
|
||||||
configuration to point to the cluster dns server (either coredns or coredns_dual, depending on dns_mode).
|
configuration to point to the cluster dns server (either coredns or coredns_dual, depending on dns_mode).
|
||||||
|
|
||||||
As cluster DNS is not available on early deployment stage, this mode is split into 2 stages. In the first
|
As cluster DNS is not available on early deployment stage, this mode is split into 2 stages. In the first
|
||||||
@@ -108,21 +149,40 @@ the other nameservers as backups.
|
|||||||
Also note, existing records will be purged from the `/etc/resolv.conf`,
|
Also note, existing records will be purged from the `/etc/resolv.conf`,
|
||||||
including resolvconf's base/head/cloud-init config files and those that come from dhclient.
|
including resolvconf's base/head/cloud-init config files and those that come from dhclient.
|
||||||
|
|
||||||
#### none
|
#### resolvconf_mode: none
|
||||||
|
|
||||||
Does nothing regarding ``/etc/resolv.conf``. This leaves you with a cluster that works as expected in most cases.
|
Does nothing regarding ``/etc/resolv.conf``. This leaves you with a cluster that works as expected in most cases.
|
||||||
The only exception is that ``hostNetwork: true`` PODs and non-k8s managed containers will not be able to resolve
|
The only exception is that ``hostNetwork: true`` PODs and non-k8s managed containers will not be able to resolve
|
||||||
cluster service names.
|
cluster service names.
|
||||||
|
|
||||||
## Nodelocal DNS cache
|
## Nodelocal DNS cache
|
||||||
|
|
||||||
Setting ``enable_nodelocaldns`` to ``true`` will make pods reach out to the dns (core-dns) caching agent running on the same node, thereby avoiding iptables DNAT rules and connection tracking. The local caching agent will query core-dns (depending on what main DNS plugin is configured in your cluster) for cache misses of cluster hostnames(cluster.local suffix by default).
|
Setting ``enable_nodelocaldns`` to ``true`` will make pods reach out to the dns (core-dns) caching agent running on the same node, thereby avoiding iptables DNAT rules and connection tracking. The local caching agent will query core-dns (depending on what main DNS plugin is configured in your cluster) for cache misses of cluster hostnames(cluster.local suffix by default).
|
||||||
|
|
||||||
More information on the rationale behind this implementation can be found [here](https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/0030-nodelocal-dns-cache.md).
|
More information on the rationale behind this implementation can be found [here](https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/0030-nodelocal-dns-cache.md).
|
||||||
|
|
||||||
**As per the 2.10 release, Nodelocal DNS cache is enabled by default.**
|
**As per the 2.10 release, Nodelocal DNS cache is enabled by default.**
|
||||||
|
|
||||||
|
### External zones
|
||||||
|
|
||||||
Limitations
|
It's possible to extent the `nodelocaldns`' configuration by adding an array of external zones. For example:
|
||||||
-----------
|
|
||||||
|
```yaml
|
||||||
|
nodelocaldns_external_zones:
|
||||||
|
- zones:
|
||||||
|
- example.com
|
||||||
|
- example.io:1053
|
||||||
|
nameservers:
|
||||||
|
- 1.1.1.1
|
||||||
|
- 2.2.2.2
|
||||||
|
cache: 5
|
||||||
|
- zones:
|
||||||
|
- https://mycompany.local:4453
|
||||||
|
nameservers:
|
||||||
|
- 192.168.0.53
|
||||||
|
```
|
||||||
|
|
||||||
|
## Limitations
|
||||||
|
|
||||||
* Kubespray has yet ways to configure Kubedns addon to forward requests SkyDns can
|
* Kubespray has yet ways to configure Kubedns addon to forward requests SkyDns can
|
||||||
not answer with authority to arbitrary recursive resolvers. This task is left
|
not answer with authority to arbitrary recursive resolvers. This task is left
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
Downloading binaries and containers
|
# Downloading binaries and containers
|
||||||
===================================
|
|
||||||
|
|
||||||
Kubespray supports several download/upload modes. The default is:
|
Kubespray supports several download/upload modes. The default is:
|
||||||
|
|
||||||
@@ -14,11 +13,13 @@ There is also a "pull once, push many" mode as well:
|
|||||||
|
|
||||||
NOTE: When `download_run_once` is true and `download_localhost` is false, all downloads will be done on the delegate node, including downloads for container images that are not required on that node. As a consequence, the storage required on that node will probably be more than if download_run_once was false, because all images will be loaded into the docker instance on that node, instead of just the images required for that node.
|
NOTE: When `download_run_once` is true and `download_localhost` is false, all downloads will be done on the delegate node, including downloads for container images that are not required on that node. As a consequence, the storage required on that node will probably be more than if download_run_once was false, because all images will be loaded into the docker instance on that node, instead of just the images required for that node.
|
||||||
|
|
||||||
|
:warning: [`download_run_once: true` support only for `container_manager: docker`](https://github.com/containerd/containerd/issues/4075) :warning:
|
||||||
|
|
||||||
On caching:
|
On caching:
|
||||||
|
|
||||||
* When `download_run_once` is `True`, all downloaded files will be cached locally in `download_cache_dir`, which defaults to `/tmp/kubespray_cache`. On subsequent provisioning runs, this local cache will be used to provision the nodes, minimizing bandwidth usage and improving provisioning time. Expect about 800MB of disk space to be used on the ansible node for the cache. Disk space required for the image cache on the kubernetes nodes is a much as is needed for the largest image, which is currently slightly less than 150MB.
|
* When `download_run_once` is `True`, all downloaded files will be cached locally in `download_cache_dir`, which defaults to `/tmp/kubespray_cache`. On subsequent provisioning runs, this local cache will be used to provision the nodes, minimizing bandwidth usage and improving provisioning time. Expect about 800MB of disk space to be used on the ansible node for the cache. Disk space required for the image cache on the kubernetes nodes is a much as is needed for the largest image, which is currently slightly less than 150MB.
|
||||||
* By default, if `download_run_once` is false, kubespray will not retrieve the downloaded images and files from the remote node to the local cache, or use that cache to pre-provision those nodes. To force the use of the cache, set `download_force_cache` to `True`.
|
* By default, if `download_run_once` is false, kubespray will not retrieve the downloaded images and files from the download delegate node to the local cache, or use that cache to pre-provision those nodes. If you have a full cache with container images and files and you don’t need to download anything, but want to use a cache - set `download_force_cache` to `True`.
|
||||||
* By default, cached images that are used to pre-provision the remote nodes will be deleted from the remote nodes after use, to save disk space. Setting download_keep_remote_cache will prevent the files from being deleted. This can be useful while developing kubespray, as it can decrease provisioning times. As a consequence, the required storage for images on the remote nodes will increase from 150MB to about 550MB, which is currently the combined size of all required container images.
|
* By default, cached images that are used to pre-provision the remote nodes will be deleted from the remote nodes after use, to save disk space. Setting `download_keep_remote_cache` will prevent the files from being deleted. This can be useful while developing kubespray, as it can decrease provisioning times. As a consequence, the required storage for images on the remote nodes will increase from 150MB to about 550MB, which is currently the combined size of all required container images.
|
||||||
|
|
||||||
Container images and binary files are described by the vars like ``foo_version``,
|
Container images and binary files are described by the vars like ``foo_version``,
|
||||||
``foo_download_url``, ``foo_checksum`` for binaries and ``foo_image_repo``,
|
``foo_download_url``, ``foo_checksum`` for binaries and ``foo_image_repo``,
|
||||||
@@ -30,11 +31,13 @@ Container images may be defined by its repo and tag, for example:
|
|||||||
|
|
||||||
Note, the SHA256 digest and the image tag must be both specified and correspond
|
Note, the SHA256 digest and the image tag must be both specified and correspond
|
||||||
to each other. The given example above is represented by the following vars:
|
to each other. The given example above is represented by the following vars:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
dnsmasq_digest_checksum: 7c883354f6ea9876d176fe1d30132515478b2859d6fc0cbf9223ffdc09168193
|
dnsmasq_digest_checksum: 7c883354f6ea9876d176fe1d30132515478b2859d6fc0cbf9223ffdc09168193
|
||||||
dnsmasq_image_repo: andyshinn/dnsmasq
|
dnsmasq_image_repo: andyshinn/dnsmasq
|
||||||
dnsmasq_image_tag: '2.72'
|
dnsmasq_image_tag: '2.72'
|
||||||
```
|
```
|
||||||
|
|
||||||
The full list of available vars may be found in the download's ansible role defaults. Those also allow to specify custom urls and local repositories for binaries and container
|
The full list of available vars may be found in the download's ansible role defaults. Those also allow to specify custom urls and local repositories for binaries and container
|
||||||
images as well. See also the DNS stack docs for the related intranet configuration,
|
images as well. See also the DNS stack docs for the related intranet configuration,
|
||||||
so the hosts can resolve those urls and repos.
|
so the hosts can resolve those urls and repos.
|
||||||
|
|||||||
90
docs/fcos.md
Normal file
90
docs/fcos.md
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
# Fedora CoreOS
|
||||||
|
|
||||||
|
Tested with stable version 31.20200223.3.0.
|
||||||
|
|
||||||
|
Because package installation with `rpm-ostree` requires a reboot, playbook may fail while bootstrap.
|
||||||
|
Restart playbook again.
|
||||||
|
|
||||||
|
## Containers
|
||||||
|
|
||||||
|
Tested with
|
||||||
|
|
||||||
|
- docker
|
||||||
|
- crio
|
||||||
|
|
||||||
|
### docker
|
||||||
|
|
||||||
|
OS base packages contains docker.
|
||||||
|
|
||||||
|
### cri-o
|
||||||
|
|
||||||
|
To use `cri-o` disable docker service with ignition:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
#workaround, see https://github.com/coreos/fedora-coreos-tracker/issues/229
|
||||||
|
systemd:
|
||||||
|
units:
|
||||||
|
- name: docker.service
|
||||||
|
enabled: false
|
||||||
|
contents: |
|
||||||
|
[Unit]
|
||||||
|
Description=disable docker
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
```
|
||||||
|
|
||||||
|
## Network
|
||||||
|
|
||||||
|
### calico
|
||||||
|
|
||||||
|
To use calico create sysctl file with ignition:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
files:
|
||||||
|
- path: /etc/sysctl.d/reverse-path-filter.conf
|
||||||
|
contents:
|
||||||
|
inline: |
|
||||||
|
net.ipv4.conf.all.rp_filter=1
|
||||||
|
```
|
||||||
|
|
||||||
|
## libvirt setup
|
||||||
|
|
||||||
|
### Prepare
|
||||||
|
|
||||||
|
Prepare ignition and serve via http (a.e. python -m http.server )
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"ignition": {
|
||||||
|
"version": "3.0.0"
|
||||||
|
},
|
||||||
|
|
||||||
|
"passwd": {
|
||||||
|
"users": [
|
||||||
|
{
|
||||||
|
"name": "ansibleUser",
|
||||||
|
"sshAuthorizedKeys": [
|
||||||
|
"ssh-rsa ..publickey.."
|
||||||
|
],
|
||||||
|
"groups": [ "wheel" ]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### create guest
|
||||||
|
|
||||||
|
```shell script
|
||||||
|
fcos_version=31.20200223.3.0
|
||||||
|
kernel=https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/${fcos_version}/x86_64/fedora-coreos-${fcos_version}-live-kernel-x86_64
|
||||||
|
initrd=https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/${fcos_version}/x86_64/fedora-coreos-${fcos_version}-live-initramfs.x86_64.img
|
||||||
|
ignition_url=http://mywebserver/fcos.ign
|
||||||
|
kernel_args="ip=dhcp rd.neednet=1 console=tty0 coreos.liveiso=/ console=ttyS0 coreos.inst.install_dev=/dev/sda coreos.inst.stream=stable coreos.inst.ignition_url=${ignition_url}"
|
||||||
|
sudo virt-install --name ${machine_name} --ram 4048 --graphics=none --vcpus 2 --disk size=20 \
|
||||||
|
--network bridge=virbr0 \
|
||||||
|
--install kernel=${kernel},initrd=${initrd},kernel_args_overwrite=yes,kernel_args="${kernel_args}"
|
||||||
|
```
|
||||||
@@ -1,9 +1,14 @@
|
|||||||
Flannel
|
# Flannel
|
||||||
==============
|
|
||||||
|
Flannel is a network fabric for containers, designed for Kubernetes
|
||||||
|
|
||||||
|
**Warning:** You may encounter this [bug](https://github.com/coreos/flannel/pull/1282) with `VXLAN` backend, while waiting on a newer Flannel version the current workaround (`ethtool --offload flannel.1 rx off tx off`) is showcase in kubespray [networking test](tests/testcases/040_check-network-adv.yml:31).
|
||||||
|
|
||||||
|
## Verifying flannel install
|
||||||
|
|
||||||
* Flannel configuration file should have been created there
|
* Flannel configuration file should have been created there
|
||||||
|
|
||||||
```
|
```ShellSession
|
||||||
cat /run/flannel/subnet.env
|
cat /run/flannel/subnet.env
|
||||||
FLANNEL_NETWORK=10.233.0.0/18
|
FLANNEL_NETWORK=10.233.0.0/18
|
||||||
FLANNEL_SUBNET=10.233.16.1/24
|
FLANNEL_SUBNET=10.233.16.1/24
|
||||||
@@ -13,7 +18,7 @@ FLANNEL_IPMASQ=false
|
|||||||
|
|
||||||
* Check if the network interface has been created
|
* Check if the network interface has been created
|
||||||
|
|
||||||
```
|
```ShellSession
|
||||||
ip a show dev flannel.1
|
ip a show dev flannel.1
|
||||||
4: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
|
4: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
|
||||||
link/ether e2:f3:a7:0f:bf:cb brd ff:ff:ff:ff:ff:ff
|
link/ether e2:f3:a7:0f:bf:cb brd ff:ff:ff:ff:ff:ff
|
||||||
@@ -25,7 +30,7 @@ ip a show dev flannel.1
|
|||||||
|
|
||||||
* Try to run a container and check its ip address
|
* Try to run a container and check its ip address
|
||||||
|
|
||||||
```
|
```ShellSession
|
||||||
kubectl run test --image=busybox --command -- tail -f /dev/null
|
kubectl run test --image=busybox --command -- tail -f /dev/null
|
||||||
replicationcontroller "test" created
|
replicationcontroller "test" created
|
||||||
|
|
||||||
@@ -33,7 +38,7 @@ kubectl describe po test-34ozs | grep ^IP
|
|||||||
IP: 10.233.16.2
|
IP: 10.233.16.2
|
||||||
```
|
```
|
||||||
|
|
||||||
```
|
```ShellSession
|
||||||
kubectl exec test-34ozs -- ip a show dev eth0
|
kubectl exec test-34ozs -- ip a show dev eth0
|
||||||
8: eth0@if9: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1450 qdisc noqueue
|
8: eth0@if9: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1450 qdisc noqueue
|
||||||
link/ether 02:42:0a:e9:2b:03 brd ff:ff:ff:ff:ff:ff
|
link/ether 02:42:0a:e9:2b:03 brd ff:ff:ff:ff:ff:ff
|
||||||
|
|||||||
77
docs/gcp-pd-csi.md
Normal file
77
docs/gcp-pd-csi.md
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
# GCP Persistent Disk CSI Driver
|
||||||
|
|
||||||
|
The GCP Persistent Disk CSI driver allows you to provision volumes for pods with a Kubernetes deployment over Google Cloud Platform. The CSI driver replaces to volume provioning done by the in-tree azure cloud provider which is deprecated.
|
||||||
|
|
||||||
|
To deploy GCP Persistent Disk CSI driver, uncomment the `gcp_pd_csi_enabled` option in `group_vars/all/gcp.yml` and set it to `true`.
|
||||||
|
|
||||||
|
## GCP Persistent Disk Storage Class
|
||||||
|
|
||||||
|
If you want to deploy the GCP Persistent Disk storage class to provision volumes dynamically, you should set `persistent_volumes_enabled` in `group_vars/k8s-cluster/k8s-cluster.yml` to `true`.
|
||||||
|
|
||||||
|
## GCP credentials
|
||||||
|
|
||||||
|
In order for the CSI driver to provision disks, you need to create for it a service account on GCP with the appropriate permissions.
|
||||||
|
|
||||||
|
Follow these steps to configure it:
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
# This will open a web page for you to authenticate
|
||||||
|
gcloud auth login
|
||||||
|
export PROJECT=nameofmyproject
|
||||||
|
gcloud config set project $PROJECT
|
||||||
|
|
||||||
|
git clone https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver $GOPATH/src/sigs.k8s.io/gcp-compute-persistent-disk-csi-driver
|
||||||
|
|
||||||
|
export GCE_PD_SA_NAME=my-gce-pd-csi-sa
|
||||||
|
export GCE_PD_SA_DIR=/my/safe/credentials/directory
|
||||||
|
|
||||||
|
./deploy/setup-project.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
The above will create a file named `cloud-sa.json` in the specified `GCE_PD_SA_DIR`. This file contains the service account with the appropriate credentials for the CSI driver to perform actions on GCP to request disks for pods.
|
||||||
|
|
||||||
|
You need to provide this file's path through the variable `gcp_pd_csi_sa_cred_file` in `inventory/mycluster/group_vars/all/gcp.yml`
|
||||||
|
|
||||||
|
You can now deploy Kubernetes with Kubespray over GCP.
|
||||||
|
|
||||||
|
## GCP PD CSI Driver test
|
||||||
|
|
||||||
|
To test the dynamic provisioning using GCP PD CSI driver, make sure to have the storage class deployed (through persistent volumes), and apply the following manifest:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
---
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: podpvc
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
storageClassName: csi-gce-pd
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 1Gi
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: web-server
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: web-server
|
||||||
|
image: nginx
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /var/lib/www/html
|
||||||
|
name: mypvc
|
||||||
|
volumes:
|
||||||
|
- name: mypvc
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: podpvc
|
||||||
|
readOnly: false
|
||||||
|
```
|
||||||
|
|
||||||
|
## GCP PD documentation
|
||||||
|
|
||||||
|
You can find the official GCP Persistent Disk CSI driver installation documentation here: [GCP PD CSI Driver](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver/blob/master/docs/kubernetes/user-guides/driver-install.md
|
||||||
|
)
|
||||||
@@ -1,8 +1,6 @@
|
|||||||
Getting started
|
# Getting started
|
||||||
===============
|
|
||||||
|
|
||||||
Building your own inventory
|
## Building your own inventory
|
||||||
---------------------------
|
|
||||||
|
|
||||||
Ansible inventory can be stored in 3 formats: YAML, JSON, or INI-like. There is
|
Ansible inventory can be stored in 3 formats: YAML, JSON, or INI-like. There is
|
||||||
an example inventory located
|
an example inventory located
|
||||||
@@ -18,38 +16,41 @@ certain threshold. Run `python3 contrib/inventory_builder/inventory.py help` hel
|
|||||||
|
|
||||||
Example inventory generator usage:
|
Example inventory generator usage:
|
||||||
|
|
||||||
cp -r inventory/sample inventory/mycluster
|
```ShellSession
|
||||||
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
cp -r inventory/sample inventory/mycluster
|
||||||
CONFIG_FILE=inventory/mycluster/hosts.yml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
||||||
|
CONFIG_FILE=inventory/mycluster/hosts.yml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
||||||
|
```
|
||||||
|
|
||||||
Then use `inventory/mycluster/hosts.yml` as inventory file.
|
Then use `inventory/mycluster/hosts.yml` as inventory file.
|
||||||
|
|
||||||
Starting custom deployment
|
## Starting custom deployment
|
||||||
--------------------------
|
|
||||||
|
|
||||||
Once you have an inventory, you may want to customize deployment data vars
|
Once you have an inventory, you may want to customize deployment data vars
|
||||||
and start the deployment:
|
and start the deployment:
|
||||||
|
|
||||||
**IMPORTANT**: Edit my\_inventory/groups\_vars/\*.yaml to override data vars:
|
**IMPORTANT**: Edit my\_inventory/groups\_vars/\*.yaml to override data vars:
|
||||||
|
|
||||||
ansible-playbook -i inventory/mycluster/hosts.yml cluster.yml -b -v \
|
```ShellSession
|
||||||
--private-key=~/.ssh/private_key
|
ansible-playbook -i inventory/mycluster/hosts.yml cluster.yml -b -v \
|
||||||
|
--private-key=~/.ssh/private_key
|
||||||
|
```
|
||||||
|
|
||||||
See more details in the [ansible guide](ansible.md).
|
See more details in the [ansible guide](docs/ansible.md).
|
||||||
|
|
||||||
Adding nodes
|
### Adding nodes
|
||||||
------------
|
|
||||||
|
|
||||||
You may want to add worker, master or etcd nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters.
|
You may want to add worker, master or etcd nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters.
|
||||||
|
|
||||||
- Add the new worker node to your inventory in the appropriate group (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
- Add the new worker node to your inventory in the appropriate group (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
||||||
- Run the ansible-playbook command, substituting `cluster.yml` for `scale.yml`:
|
- Run the ansible-playbook command, substituting `cluster.yml` for `scale.yml`:
|
||||||
|
|
||||||
ansible-playbook -i inventory/mycluster/hosts.yml scale.yml -b -v \
|
```ShellSession
|
||||||
--private-key=~/.ssh/private_key
|
ansible-playbook -i inventory/mycluster/hosts.yml scale.yml -b -v \
|
||||||
|
--private-key=~/.ssh/private_key
|
||||||
|
```
|
||||||
|
|
||||||
Remove nodes
|
### Remove nodes
|
||||||
------------
|
|
||||||
|
|
||||||
You may want to remove **master**, **worker**, or **etcd** nodes from your
|
You may want to remove **master**, **worker**, or **etcd** nodes from your
|
||||||
existing cluster. This can be done by re-running the `remove-node.yml`
|
existing cluster. This can be done by re-running the `remove-node.yml`
|
||||||
@@ -61,10 +62,11 @@ when doing something like autoscaling your clusters. Of course, if a node
|
|||||||
is not working, you can remove the node and install it again.
|
is not working, you can remove the node and install it again.
|
||||||
|
|
||||||
Use `--extra-vars "node=<nodename>,<nodename2>"` to select the node(s) you want to delete.
|
Use `--extra-vars "node=<nodename>,<nodename2>"` to select the node(s) you want to delete.
|
||||||
```
|
|
||||||
|
```ShellSession
|
||||||
ansible-playbook -i inventory/mycluster/hosts.yml remove-node.yml -b -v \
|
ansible-playbook -i inventory/mycluster/hosts.yml remove-node.yml -b -v \
|
||||||
--private-key=~/.ssh/private_key \
|
--private-key=~/.ssh/private_key \
|
||||||
--extra-vars "node=nodename,nodename2"
|
--extra-vars "node=nodename,nodename2"
|
||||||
```
|
```
|
||||||
|
|
||||||
If a node is completely unreachable by ssh, add `--extra-vars reset_nodes=no`
|
If a node is completely unreachable by ssh, add `--extra-vars reset_nodes=no`
|
||||||
@@ -72,60 +74,64 @@ to skip the node reset step. If one node is unavailable, but others you wish
|
|||||||
to remove are able to connect via SSH, you could set reset_nodes=no as a host
|
to remove are able to connect via SSH, you could set reset_nodes=no as a host
|
||||||
var in inventory.
|
var in inventory.
|
||||||
|
|
||||||
Connecting to Kubernetes
|
## Connecting to Kubernetes
|
||||||
------------------------
|
|
||||||
|
|
||||||
By default, Kubespray configures kube-master hosts with insecure access to
|
By default, Kubespray configures kube-master hosts with insecure access to
|
||||||
kube-apiserver via port 8080. A kubeconfig file is not necessary in this case,
|
kube-apiserver via port 8080. A kubeconfig file is not necessary in this case,
|
||||||
because kubectl will use <http://localhost:8080> to connect. The kubeconfig files
|
because kubectl will use <http://localhost:8080> to connect. The kubeconfig files
|
||||||
generated will point to localhost (on kube-masters) and kube-node hosts will
|
generated will point to localhost (on kube-masters) and kube-node hosts will
|
||||||
connect either to a localhost nginx proxy or to a loadbalancer if configured.
|
connect either to a localhost nginx proxy or to a loadbalancer if configured.
|
||||||
More details on this process are in the [HA guide](ha-mode.md).
|
More details on this process are in the [HA guide](docs/ha-mode.md).
|
||||||
|
|
||||||
Kubespray permits connecting to the cluster remotely on any IP of any
|
Kubespray permits connecting to the cluster remotely on any IP of any
|
||||||
kube-master host on port 6443 by default. However, this requires
|
kube-master host on port 6443 by default. However, this requires
|
||||||
authentication. One could generate a kubeconfig based on one installed
|
authentication. One can get a kubeconfig from kube-master hosts
|
||||||
kube-master hosts (needs improvement) or connect with a username and password.
|
(see [below](#accessing-kubernetes-api)) or connect with a [username and password](vars.md#user-accounts).
|
||||||
By default, a user with admin rights is created, named `kube`.
|
|
||||||
The password can be viewed after deployment by looking at the file
|
|
||||||
`{{ credentials_dir }}/kube_user.creds` (`credentials_dir` is set to `{{ inventory_dir }}/credentials` by default). This contains a randomly generated
|
|
||||||
password. If you wish to set your own password, just precreate/modify this
|
|
||||||
file yourself.
|
|
||||||
|
|
||||||
For more information on kubeconfig and accessing a Kubernetes cluster, refer to
|
For more information on kubeconfig and accessing a Kubernetes cluster, refer to
|
||||||
the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/).
|
the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/).
|
||||||
|
|
||||||
Accessing Kubernetes Dashboard
|
## Accessing Kubernetes Dashboard
|
||||||
------------------------------
|
|
||||||
|
|
||||||
As of kubernetes-dashboard v1.7.x:
|
Supported version is kubernetes-dashboard v2.0.x :
|
||||||
|
|
||||||
- New login options that use apiserver auth proxying of token/basic/kubeconfig by default
|
- Login options are : token/kubeconfig by default, basic can be enabled with `kube_basic_auth: true` inventory variable - not recommended because this requires ABAC api-server which is not tested by kubespray team
|
||||||
- Requires RBAC in authorization\_modes
|
- Deployed by default in "kube-system" namespace, can be overriden with `dashboard_namespace: kubernetes-dashboard` in inventory,
|
||||||
- Only serves over https
|
- Only serves over https
|
||||||
- No longer available at <https://first_master:6443/ui> until apiserver is updated with the https proxy URL
|
|
||||||
|
|
||||||
If the variable `dashboard_enabled` is set (default is true), then you can access the Kubernetes Dashboard at the following URL, You will be prompted for credentials:
|
Access is described in [dashboard docs](https://github.com/kubernetes/dashboard/blob/master/docs/user/accessing-dashboard/1.7.x-and-above.md). With kubespray's default deployment in kube-system namespace, instead of kuberntes-dashboard :
|
||||||
<https://first_master:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login>
|
|
||||||
|
|
||||||
Or you can run 'kubectl proxy' from your local machine to access dashboard in your browser from:
|
- Proxy URL is <http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#/login>
|
||||||
<http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login>
|
- kubectl commands must be run with "-n kube-system"
|
||||||
|
|
||||||
It is recommended to access dashboard from behind a gateway (like Ingress Controller) that enforces an authentication token. Details and other access options here: <https://github.com/kubernetes/dashboard/wiki/Accessing-Dashboard---1.7.X-and-above>
|
Accessing through Ingress is highly recommended. For proxy access, please note that proxy must listen to [localhost](https://github.com/kubernetes/dashboard/issues/692#issuecomment-220492484) (`proxy --address="x.x.x.x"` will not work)
|
||||||
|
|
||||||
Accessing Kubernetes API
|
For token authentication, guide to create Service Account is provided in [dashboard sample user](https://github.com/kubernetes/dashboard/blob/master/docs/user/access-control/creating-sample-user.md) doc. Still take care of default namespace.
|
||||||
------------------------
|
|
||||||
|
Access can also by achieved via ssh tunnel on a master :
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# localhost:8081 will be sent to master-1's own localhost:8081
|
||||||
|
ssh -L8001:localhost:8001 user@master-1
|
||||||
|
sudo -i
|
||||||
|
kubectl proxy
|
||||||
|
```
|
||||||
|
|
||||||
|
## Accessing Kubernetes API
|
||||||
|
|
||||||
The main client of Kubernetes is `kubectl`. It is installed on each kube-master
|
The main client of Kubernetes is `kubectl`. It is installed on each kube-master
|
||||||
host and can optionally be configured on your ansible host by setting
|
host and can optionally be configured on your ansible host by setting
|
||||||
`kubectl_localhost: true` and `kubeconfig_localhost: true` in the configuration:
|
`kubectl_localhost: true` and `kubeconfig_localhost: true` in the configuration:
|
||||||
|
|
||||||
- If `kubectl_localhost` enabled, `kubectl` will download onto `/usr/local/bin/` and setup with bash completion. A helper script `inventory/mycluster/artifacts/kubectl.sh` also created for setup with below `admin.conf`.
|
- If `kubectl_localhost` enabled, `kubectl` will download onto `/usr/local/bin/` and setup with bash completion. A helper script `inventory/mycluster/artifacts/kubectl.sh` also created for setup with below `admin.conf`.
|
||||||
- If `kubeconfig_localhost` enabled `admin.conf` will appear in the `inventory/mycluster/artifacts/` directory after deployment.
|
- If `kubeconfig_localhost` enabled `admin.conf` will appear in the `inventory/mycluster/artifacts/` directory after deployment.
|
||||||
|
- The location where these files are downloaded to can be configured via the `artifacts_dir` variable.
|
||||||
|
|
||||||
You can see a list of nodes by running the following commands:
|
You can see a list of nodes by running the following commands:
|
||||||
|
|
||||||
cd inventory/mycluster/artifacts
|
```ShellSession
|
||||||
./kubectl.sh get nodes
|
cd inventory/mycluster/artifacts
|
||||||
|
./kubectl.sh get nodes
|
||||||
|
```
|
||||||
|
|
||||||
If desired, copy admin.conf to ~/.kube/config.
|
If desired, copy admin.conf to ~/.kube/config.
|
||||||
|
|||||||
@@ -1,19 +1,18 @@
|
|||||||
HA endpoints for K8s
|
# HA endpoints for K8s
|
||||||
====================
|
|
||||||
|
|
||||||
The following components require a highly available endpoints:
|
The following components require a highly available endpoints:
|
||||||
|
|
||||||
* etcd cluster,
|
* etcd cluster,
|
||||||
* kube-apiserver service instances.
|
* kube-apiserver service instances.
|
||||||
|
|
||||||
The latter relies on a 3rd side reverse proxy, like Nginx or HAProxy, to
|
The latter relies on a 3rd side reverse proxy, like Nginx or HAProxy, to
|
||||||
achieve the same goal.
|
achieve the same goal.
|
||||||
|
|
||||||
Etcd
|
## Etcd
|
||||||
----
|
|
||||||
The etcd clients (kube-api-masters) are configured with the list of all etcd peers. If the etcd-cluster has multiple instances, it's configured in HA already.
|
The etcd clients (kube-api-masters) are configured with the list of all etcd peers. If the etcd-cluster has multiple instances, it's configured in HA already.
|
||||||
|
|
||||||
Kube-apiserver
|
## Kube-apiserver
|
||||||
--------------
|
|
||||||
|
|
||||||
K8s components require a loadbalancer to access the apiservers via a reverse
|
K8s components require a loadbalancer to access the apiservers via a reverse
|
||||||
proxy. Kubespray includes support for an nginx-based proxy that resides on each
|
proxy. Kubespray includes support for an nginx-based proxy that resides on each
|
||||||
@@ -50,15 +49,16 @@ provides access for external clients, while the internal LB accepts client
|
|||||||
connections only to the localhost.
|
connections only to the localhost.
|
||||||
Given a frontend `VIP` address and `IP1, IP2` addresses of backends, here is
|
Given a frontend `VIP` address and `IP1, IP2` addresses of backends, here is
|
||||||
an example configuration for a HAProxy service acting as an external LB:
|
an example configuration for a HAProxy service acting as an external LB:
|
||||||
```
|
|
||||||
|
```raw
|
||||||
listen kubernetes-apiserver-https
|
listen kubernetes-apiserver-https
|
||||||
bind <VIP>:8383
|
bind <VIP>:8383
|
||||||
option ssl-hello-chk
|
|
||||||
mode tcp
|
mode tcp
|
||||||
|
option log-health-checks
|
||||||
timeout client 3h
|
timeout client 3h
|
||||||
timeout server 3h
|
timeout server 3h
|
||||||
server master1 <IP1>:6443
|
server master1 <IP1>:6443 check check-ssl verify none inter 10000
|
||||||
server master2 <IP2>:6443
|
server master2 <IP2>:6443 check check-ssl verify none inter 10000
|
||||||
balance roundrobin
|
balance roundrobin
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -66,7 +66,8 @@ listen kubernetes-apiserver-https
|
|||||||
|
|
||||||
And the corresponding example global vars for such a "cluster-aware"
|
And the corresponding example global vars for such a "cluster-aware"
|
||||||
external LB with the cluster API access modes configured in Kubespray:
|
external LB with the cluster API access modes configured in Kubespray:
|
||||||
```
|
|
||||||
|
```yml
|
||||||
apiserver_loadbalancer_domain_name: "my-apiserver-lb.example.com"
|
apiserver_loadbalancer_domain_name: "my-apiserver-lb.example.com"
|
||||||
loadbalancer_apiserver:
|
loadbalancer_apiserver:
|
||||||
address: <VIP>
|
address: <VIP>
|
||||||
@@ -101,14 +102,15 @@ exclusive to `loadbalancer_apiserver_localhost`.
|
|||||||
|
|
||||||
Access API endpoints are evaluated automatically, as the following:
|
Access API endpoints are evaluated automatically, as the following:
|
||||||
|
|
||||||
| Endpoint type | kube-master | non-master | external |
|
| Endpoint type | kube-master | non-master | external |
|
||||||
|------------------------------|----------------|---------------------|---------------------|
|
|------------------------------|------------------|-------------------------|-----------------------|
|
||||||
| Local LB (default) | https://bip:sp | https://lc:nsp | https://m[0].aip:sp |
|
| Local LB (default) | `https://bip:sp` | `https://lc:nsp` | `https://m[0].aip:sp` |
|
||||||
| Local LB + Unmanaged here LB | https://bip:sp | https://lc:nsp | https://ext |
|
| Local LB + Unmanaged here LB | `https://bip:sp` | `https://lc:nsp` | `https://ext` |
|
||||||
| External LB, no internal | https://bip:sp | https://lb:lp | https://lb:lp |
|
| External LB, no internal | `https://bip:sp` | `<https://lb:lp>` | `https://lb:lp` |
|
||||||
| No ext/int LB | https://bip:sp | https://m[0].aip:sp | https://m[0].aip:sp |
|
| No ext/int LB | `https://bip:sp` | `<https://m[0].aip:sp>` | `https://m[0].aip:sp` |
|
||||||
|
|
||||||
Where:
|
Where:
|
||||||
|
|
||||||
* `m[0]` - the first node in the `kube-master` group;
|
* `m[0]` - the first node in the `kube-master` group;
|
||||||
* `lb` - LB FQDN, `apiserver_loadbalancer_domain_name`;
|
* `lb` - LB FQDN, `apiserver_loadbalancer_domain_name`;
|
||||||
* `ext` - Externally load balanced VIP:port and FQDN, not managed by Kubespray;
|
* `ext` - Externally load balanced VIP:port and FQDN, not managed by Kubespray;
|
||||||
@@ -132,16 +134,19 @@ Kubespray, the masters' APIs are accessed via the insecure endpoint, which
|
|||||||
consists of the local `kube_apiserver_insecure_bind_address` and
|
consists of the local `kube_apiserver_insecure_bind_address` and
|
||||||
`kube_apiserver_insecure_port`.
|
`kube_apiserver_insecure_port`.
|
||||||
|
|
||||||
Optional configurations
|
## Optional configurations
|
||||||
------------------------
|
|
||||||
### ETCD with a LB
|
### ETCD with a LB
|
||||||
|
|
||||||
In order to use an external loadbalancing (L4/TCP or L7 w/ SSL Passthrough VIP), the following variables need to be overridden in group_vars
|
In order to use an external loadbalancing (L4/TCP or L7 w/ SSL Passthrough VIP), the following variables need to be overridden in group_vars
|
||||||
|
|
||||||
* `etcd_access_addresses`
|
* `etcd_access_addresses`
|
||||||
* `etcd_client_url`
|
* `etcd_client_url`
|
||||||
* `etcd_cert_alt_names`
|
* `etcd_cert_alt_names`
|
||||||
* `etcd_cert_alt_ips`
|
* `etcd_cert_alt_ips`
|
||||||
|
|
||||||
#### Example of a VIP w/ FQDN
|
#### Example of a VIP w/ FQDN
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
etcd_access_addresses: https://etcd.example.com:2379
|
etcd_access_addresses: https://etcd.example.com:2379
|
||||||
etcd_client_url: https://etcd.example.com:2379
|
etcd_client_url: https://etcd.example.com:2379
|
||||||
|
|||||||
@@ -3,12 +3,13 @@
|
|||||||
1. Fork [kubespray repo](https://github.com/kubernetes-sigs/kubespray) to your personal/organisation account on github.
|
1. Fork [kubespray repo](https://github.com/kubernetes-sigs/kubespray) to your personal/organisation account on github.
|
||||||
Note:
|
Note:
|
||||||
* All forked public repos at github will be also public, so **never commit sensitive data to your public forks**.
|
* All forked public repos at github will be also public, so **never commit sensitive data to your public forks**.
|
||||||
* List of all forked repos could be retrieved from github page of original project.
|
* List of all forked repos could be retrieved from github page of original project.
|
||||||
|
|
||||||
2. Add **forked repo** as submodule to desired folder in your existent ansible repo(for example 3d/kubespray):
|
2. Add **forked repo** as submodule to desired folder in your existent ansible repo(for example 3d/kubespray):
|
||||||
```git submodule add https://github.com/YOUR_GITHUB/kubespray.git kubespray```
|
```git submodule add https://github.com/YOUR_GITHUB/kubespray.git kubespray```
|
||||||
Git will create _.gitmodules_ file in your existent ansible repo:
|
Git will create _.gitmodules_ file in your existent ansible repo:
|
||||||
```
|
|
||||||
|
```ini
|
||||||
[submodule "3d/kubespray"]
|
[submodule "3d/kubespray"]
|
||||||
path = 3d/kubespray
|
path = 3d/kubespray
|
||||||
url = https://github.com/YOUR_GITHUB/kubespray.git
|
url = https://github.com/YOUR_GITHUB/kubespray.git
|
||||||
@@ -21,7 +22,8 @@
|
|||||||
```git remote add upstream https://github.com/kubernetes-sigs/kubespray.git```
|
```git remote add upstream https://github.com/kubernetes-sigs/kubespray.git```
|
||||||
|
|
||||||
5. Sync your master branch with upstream:
|
5. Sync your master branch with upstream:
|
||||||
```
|
|
||||||
|
```ShellSession
|
||||||
git checkout master
|
git checkout master
|
||||||
git fetch upstream
|
git fetch upstream
|
||||||
git merge upstream/master
|
git merge upstream/master
|
||||||
@@ -33,19 +35,21 @@
|
|||||||
***Never*** use master branch of your repository for your commits.
|
***Never*** use master branch of your repository for your commits.
|
||||||
|
|
||||||
7. Modify path to library and roles in your ansible.cfg file (role naming should be uniq, you may have to rename your existent roles if they have same names as kubespray project):
|
7. Modify path to library and roles in your ansible.cfg file (role naming should be uniq, you may have to rename your existent roles if they have same names as kubespray project):
|
||||||
```
|
|
||||||
|
```ini
|
||||||
...
|
...
|
||||||
library = 3d/kubespray/library/
|
library = 3d/kubespray/library/
|
||||||
roles_path = 3d/kubespray/roles/
|
roles_path = 3d/kubespray/roles/
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
8. Copy and modify configs from kubespray `group_vars` folder to corresponging `group_vars` folder in your existent project.
|
8. Copy and modify configs from kubespray `group_vars` folder to corresponding `group_vars` folder in your existent project.
|
||||||
You could rename *all.yml* config to something else, i.e. *kubespray.yml* and create corresponding group in your inventory file, which will include all hosts groups related to kubernetes setup.
|
You could rename *all.yml* config to something else, i.e. *kubespray.yml* and create corresponding group in your inventory file, which will include all hosts groups related to kubernetes setup.
|
||||||
|
|
||||||
9. Modify your ansible inventory file by adding mapping of your existent groups (if any) to kubespray naming.
|
9. Modify your ansible inventory file by adding mapping of your existent groups (if any) to kubespray naming.
|
||||||
For example:
|
For example:
|
||||||
```
|
|
||||||
|
```ini
|
||||||
...
|
...
|
||||||
#Kargo groups:
|
#Kargo groups:
|
||||||
[kube-node:children]
|
[kube-node:children]
|
||||||
@@ -65,54 +69,62 @@ You could rename *all.yml* config to something else, i.e. *kubespray.yml* and cr
|
|||||||
[kubespray:children]
|
[kubespray:children]
|
||||||
kubernetes
|
kubernetes
|
||||||
```
|
```
|
||||||
|
|
||||||
* Last entry here needed to apply kubespray.yml config file, renamed from all.yml of kubespray project.
|
* Last entry here needed to apply kubespray.yml config file, renamed from all.yml of kubespray project.
|
||||||
|
|
||||||
10. Now you can include kubespray tasks in you existent playbooks by including cluster.yml file:
|
10. Now you can include kubespray tasks in you existent playbooks by including cluster.yml file:
|
||||||
```
|
|
||||||
|
```yml
|
||||||
- name: Include kubespray tasks
|
- name: Include kubespray tasks
|
||||||
include: 3d/kubespray/cluster.yml
|
include: 3d/kubespray/cluster.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
Or your could copy separate tasks from cluster.yml into your ansible repository.
|
Or your could copy separate tasks from cluster.yml into your ansible repository.
|
||||||
|
|
||||||
11. Commit changes to your ansible repo. Keep in mind, that submodule folder is just a link to the git commit hash of your forked repo.
|
11. Commit changes to your ansible repo. Keep in mind, that submodule folder is just a link to the git commit hash of your forked repo.
|
||||||
When you update your "work" branch you need to commit changes to ansible repo as well.
|
When you update your "work" branch you need to commit changes to ansible repo as well.
|
||||||
Other members of your team should use ```git submodule sync```, ```git submodule update --init``` to get actual code from submodule.
|
Other members of your team should use ```git submodule sync```, ```git submodule update --init``` to get actual code from submodule.
|
||||||
|
|
||||||
# Contributing
|
## Contributing
|
||||||
|
|
||||||
If you made useful changes or fixed a bug in existent kubespray repo, use this flow for PRs to original kubespray repo.
|
If you made useful changes or fixed a bug in existent kubespray repo, use this flow for PRs to original kubespray repo.
|
||||||
|
|
||||||
0. Sign the [CNCF CLA](https://git.k8s.io/community/CLA.md).
|
1. Sign the [CNCF CLA](https://git.k8s.io/community/CLA.md).
|
||||||
|
|
||||||
1. Change working directory to git submodule directory (3d/kubespray).
|
2. Change working directory to git submodule directory (3d/kubespray).
|
||||||
|
|
||||||
2. Setup desired user.name and user.email for submodule.
|
3. Setup desired user.name and user.email for submodule.
|
||||||
If kubespray is only one submodule in your repo you could use something like:
|
If kubespray is only one submodule in your repo you could use something like:
|
||||||
```git submodule foreach --recursive 'git config user.name "First Last" && git config user.email "your-email-addres@used.for.cncf"'```
|
```git submodule foreach --recursive 'git config user.name "First Last" && git config user.email "your-email-addres@used.for.cncf"'```
|
||||||
|
|
||||||
3. Sync with upstream master:
|
4. Sync with upstream master:
|
||||||
```
|
|
||||||
|
```ShellSession
|
||||||
git fetch upstream
|
git fetch upstream
|
||||||
git merge upstream/master
|
git merge upstream/master
|
||||||
git push origin master
|
git push origin master
|
||||||
```
|
```
|
||||||
4. Create new branch for the specific fixes that you want to contribute:
|
|
||||||
|
5. Create new branch for the specific fixes that you want to contribute:
|
||||||
```git checkout -b fixes-name-date-index```
|
```git checkout -b fixes-name-date-index```
|
||||||
Branch name should be self explaining to you, adding date and/or index will help you to track/delete your old PRs.
|
Branch name should be self explaining to you, adding date and/or index will help you to track/delete your old PRs.
|
||||||
|
|
||||||
5. Find git hash of your commit in "work" repo and apply it to newly created "fix" repo:
|
6. Find git hash of your commit in "work" repo and apply it to newly created "fix" repo:
|
||||||
```
|
|
||||||
|
```ShellSession
|
||||||
git cherry-pick <COMMIT_HASH>
|
git cherry-pick <COMMIT_HASH>
|
||||||
```
|
```
|
||||||
6. If your have several temporary-stage commits - squash them using [```git rebase -i```](http://eli.thegreenplace.net/2014/02/19/squashing-github-pull-requests-into-a-single-commit)
|
|
||||||
|
7. If you have several temporary-stage commits - squash them using [```git rebase -i```](https://eli.thegreenplace.net/2014/02/19/squashing-github-pull-requests-into-a-single-commit)
|
||||||
Also you could use interactive rebase (```git rebase -i HEAD~10```) to delete commits which you don't want to contribute into original repo.
|
Also you could use interactive rebase (```git rebase -i HEAD~10```) to delete commits which you don't want to contribute into original repo.
|
||||||
|
|
||||||
7. When your changes is in place, you need to check upstream repo one more time because it could be changed during your work.
|
8. When your changes is in place, you need to check upstream repo one more time because it could be changed during your work.
|
||||||
Check that you're on correct branch:
|
Check that you're on correct branch:
|
||||||
```git status```
|
```git status```
|
||||||
And pull changes from upstream (if any):
|
And pull changes from upstream (if any):
|
||||||
```git pull --rebase upstream master```
|
```git pull --rebase upstream master```
|
||||||
|
|
||||||
8. Now push your changes to your **fork** repo with ```git push```. If your branch doesn't exists on github, git will propose you to use something like ```git push --set-upstream origin fixes-name-date-index```.
|
9. Now push your changes to your **fork** repo with ```git push```. If your branch doesn't exists on github, git will propose you to use something like ```git push --set-upstream origin fixes-name-date-index```.
|
||||||
|
|
||||||
9. Open you forked repo in browser, on the main page you will see proposition to create pull request for your newly created branch. Check proposed diff of your PR. If something is wrong you could safely delete "fix" branch on github using ```git push origin --delete fixes-name-date-index```, ```git branch -D fixes-name-date-index``` and start whole process from the beginning.
|
10. Open you forked repo in browser, on the main page you will see proposition to create pull request for your newly created branch. Check proposed diff of your PR. If something is wrong you could safely delete "fix" branch on github using ```git push origin --delete fixes-name-date-index```, ```git branch -D fixes-name-date-index``` and start whole process from the beginning.
|
||||||
If everything is fine - add description about your changes (what they do and why they're needed) and confirm pull request creation.
|
If everything is fine - add description about your changes (what they do and why they're needed) and confirm pull request creation.
|
||||||
|
|||||||
@@ -1,13 +1,20 @@
|
|||||||
Kube-OVN
|
# Kube-OVN
|
||||||
===========
|
|
||||||
Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
|
Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
|
||||||
|
|
||||||
For more information please check [Kube-OVN documentation](https://github.com/alauda/kube-ovn)
|
For more information please check [Kube-OVN documentation](https://github.com/alauda/kube-ovn)
|
||||||
|
|
||||||
|
**Warning:** Kernel version (`cat /proc/version`) needs to be different than `3.10.0-862` or kube-ovn won't start and will print this message:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kernel version 3.10.0-862 has a nat related bug that will affect ovs function, please update to a version greater than 3.10.0-898
|
||||||
|
```
|
||||||
|
|
||||||
## How to use it
|
## How to use it
|
||||||
|
|
||||||
Enable kube-ovn in `group_vars/k8s-cluster/k8s-cluster.yml`
|
Enable kube-ovn in `group_vars/k8s-cluster/k8s-cluster.yml`
|
||||||
```
|
|
||||||
|
```yml
|
||||||
...
|
...
|
||||||
kube_network_plugin: kube-ovn
|
kube_network_plugin: kube-ovn
|
||||||
...
|
...
|
||||||
@@ -19,7 +26,7 @@ Kube-OVN run ovn and controller in `kube-ovn` namespace
|
|||||||
|
|
||||||
* Check the status of kube-ovn pods
|
* Check the status of kube-ovn pods
|
||||||
|
|
||||||
```
|
```ShellSession
|
||||||
# From the CLI
|
# From the CLI
|
||||||
kubectl get pod -n kube-ovn
|
kubectl get pod -n kube-ovn
|
||||||
|
|
||||||
@@ -37,7 +44,7 @@ ovs-ovn-r5frh 1/1 Running 0 4d16h
|
|||||||
|
|
||||||
* Check the default and node subnet
|
* Check the default and node subnet
|
||||||
|
|
||||||
```
|
```ShellSession
|
||||||
# From the CLI
|
# From the CLI
|
||||||
kubectl get subnet
|
kubectl get subnet
|
||||||
|
|
||||||
@@ -45,4 +52,4 @@ kubectl get subnet
|
|||||||
NAME PROTOCOL CIDR PRIVATE NAT
|
NAME PROTOCOL CIDR PRIVATE NAT
|
||||||
join IPv4 100.64.0.0/16 false false
|
join IPv4 100.64.0.0/16 false false
|
||||||
ovn-default IPv4 10.16.0.0/16 false true
|
ovn-default IPv4 10.16.0.0/16 false true
|
||||||
```
|
```
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user