mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 13:54:37 +03:00
Compare commits
255 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a923f4e7c0 | ||
|
|
82af8e455e | ||
|
|
1baee488ab | ||
|
|
7433b70d95 | ||
|
|
de6c71a426 | ||
|
|
16a34548ea | ||
|
|
b2f3ab77cd | ||
|
|
b2f6ed7dee | ||
|
|
09e34d29cd | ||
|
|
667a6981ea | ||
|
|
cf1d9f5612 | ||
|
|
55b03a41b2 | ||
|
|
81b4ffa6b4 | ||
|
|
8c1821228d | ||
|
|
9c5c1a09a1 | ||
|
|
09fa99fdc6 | ||
|
|
8331939aed | ||
|
|
02213d6e07 | ||
|
|
387df0ee1f | ||
|
|
b59035df06 | ||
|
|
5517e62c86 | ||
|
|
5dca5225dc | ||
|
|
c005c90746 | ||
|
|
8bdd0bb82f | ||
|
|
a790935d02 | ||
|
|
1fcbbd3b9d | ||
|
|
b9077d3ea2 | ||
|
|
1d7d84540f | ||
|
|
6f471d1c5e | ||
|
|
ff95292435 | ||
|
|
e8a8a7b8cc | ||
|
|
b0ad8ec023 | ||
|
|
ab2bfd7f8c | ||
|
|
29f1c40580 | ||
|
|
2585e72a30 | ||
|
|
837fca1368 | ||
|
|
0c995c1ea7 | ||
|
|
ad244ab744 | ||
|
|
308ceee46c | ||
|
|
e0195da80d | ||
|
|
b02f40b392 | ||
|
|
c0fe32c4ec | ||
|
|
e9f93a1de9 | ||
|
|
c14388629a | ||
|
|
3c1f84a9e9 | ||
|
|
398a995798 | ||
|
|
dc86b2063a | ||
|
|
bbab1013c5 | ||
|
|
1945499e2f | ||
|
|
c971debd15 | ||
|
|
161c7e9fce | ||
|
|
fd3ebc13f7 | ||
|
|
9db4b949f2 | ||
|
|
5b5726bdd4 | ||
|
|
1347bb2e4b | ||
|
|
286191ecb7 | ||
|
|
096bcdd078 | ||
|
|
7d7739e031 | ||
|
|
3470810709 | ||
|
|
98b43bb24f | ||
|
|
275c54e810 | ||
|
|
9a05037352 | ||
|
|
143f9c78be | ||
|
|
75f0aaf4a1 | ||
|
|
c36df6a78b | ||
|
|
10a6bd67de | ||
|
|
db17ba54b4 | ||
|
|
c2f64a52da | ||
|
|
0b81c6a6c4 | ||
|
|
36bd4cdc43 | ||
|
|
87eea16d7b | ||
|
|
0aa6d3d4bc | ||
|
|
43dbff938e | ||
|
|
54aebb92fd | ||
|
|
f0c7649158 | ||
|
|
93445b4dbc | ||
|
|
aeaa876d57 | ||
|
|
9c1e08249d | ||
|
|
33a60fe919 | ||
|
|
85982dc8e9 | ||
|
|
dbe02d398a | ||
|
|
e022e2e13c | ||
|
|
7084d38767 | ||
|
|
00e0f3bd2b | ||
|
|
cd7212453e | ||
|
|
a69f2b09da | ||
|
|
878fe80ca3 | ||
|
|
8331c1f858 | ||
|
|
f4a69d2827 | ||
|
|
ed6cef85d8 | ||
|
|
d315f73080 | ||
|
|
06ec5393d7 | ||
|
|
1a491fc10c | ||
|
|
488db81e36 | ||
|
|
f377d9f057 | ||
|
|
db4e942b0d | ||
|
|
68b96bdf1a | ||
|
|
4f7a760a94 | ||
|
|
da5077fa5f | ||
|
|
f1231bb97d | ||
|
|
80eb1ad936 | ||
|
|
cc5303e1c8 | ||
|
|
f6a5948f58 | ||
|
|
f6eed8091e | ||
|
|
4a8a52bad9 | ||
|
|
c09aabab0c | ||
|
|
d47ba2b2ef | ||
|
|
17fb1ceed8 | ||
|
|
97ff67e54a | ||
|
|
d4204a42fd | ||
|
|
c6f6940459 | ||
|
|
d739a6bb2f | ||
|
|
0982c66051 | ||
|
|
d40701463f | ||
|
|
405692d793 | ||
|
|
7938748d77 | ||
|
|
e909f84966 | ||
|
|
8a153ed38e | ||
|
|
eb16986f32 | ||
|
|
bd801de236 | ||
|
|
9c3bcd48ee | ||
|
|
ee23b947aa | ||
|
|
0f7341bdde | ||
|
|
602b5aaf01 | ||
|
|
70bbb3e280 | ||
|
|
a27eebb225 | ||
|
|
1b0326f773 | ||
|
|
93a1693040 | ||
|
|
df7ed24389 | ||
|
|
544aa00c17 | ||
|
|
fc22453618 | ||
|
|
fefcb8c9f8 | ||
|
|
9cf5dd0291 | ||
|
|
7a1f033c1d | ||
|
|
4a5acad414 | ||
|
|
227e96469c | ||
|
|
c93fa6effe | ||
|
|
102fb94524 | ||
|
|
c25d624524 | ||
|
|
12ab8b7af3 | ||
|
|
097bec473c | ||
|
|
d36b5d7d55 | ||
|
|
4b858b6466 | ||
|
|
e03e3c4582 | ||
|
|
91f1edbdd4 | ||
|
|
c6e2a4ebd8 | ||
|
|
3eefb5f2ad | ||
|
|
04b19359cb | ||
|
|
f2ef781efd | ||
|
|
60b0fb3e88 | ||
|
|
f323d70c0f | ||
|
|
03f316e7a2 | ||
|
|
79b7f0d592 | ||
|
|
d25aebdaf5 | ||
|
|
1454ba4a9e | ||
|
|
4781df587c | ||
|
|
e49330d6ee | ||
|
|
dbe6eb20c8 | ||
|
|
8bec5beb4b | ||
|
|
e6effb8245 | ||
|
|
5e32655830 | ||
|
|
270f91e577 | ||
|
|
07858e8f71 | ||
|
|
4cb5a4f609 | ||
|
|
cb57c3c916 | ||
|
|
92b1166dd0 | ||
|
|
e6c28982dd | ||
|
|
64f69718fb | ||
|
|
1301e69c7d | ||
|
|
99b8f0902e | ||
|
|
6a4d322a7c | ||
|
|
9d7f358d4b | ||
|
|
b1bb5a4796 | ||
|
|
f8ae086334 | ||
|
|
c49bda7319 | ||
|
|
aa9022d638 | ||
|
|
2994ba33ac | ||
|
|
87c0f135dc | ||
|
|
a687013fbe | ||
|
|
b0097fd0c1 | ||
|
|
9729b6b75a | ||
|
|
58959ae82f | ||
|
|
4ffc106c58 | ||
|
|
a374301570 | ||
|
|
bc8e16fc69 | ||
|
|
947162452d | ||
|
|
7a730d42dd | ||
|
|
109391031b | ||
|
|
aba63f0f9a | ||
|
|
e67886bf9d | ||
|
|
c2ac3b51c1 | ||
|
|
081a9e7bd8 | ||
|
|
55d8ed093a | ||
|
|
77149e5d89 | ||
|
|
28839f6b71 | ||
|
|
49bcf91aaf | ||
|
|
28073c76ac | ||
|
|
50e8a52c74 | ||
|
|
5c448b6896 | ||
|
|
c0fd5b2e84 | ||
|
|
6141b98bf8 | ||
|
|
2eae207435 | ||
|
|
9a8e4381be | ||
|
|
5f034330c5 | ||
|
|
edea63511d | ||
|
|
80df4f8b01 | ||
|
|
68118c2653 | ||
|
|
1e79dcfcaa | ||
|
|
1805e95b69 | ||
|
|
0d0cc8cf9c | ||
|
|
5bd937ece0 | ||
|
|
8908a70c19 | ||
|
|
5ec2467268 | ||
|
|
e489e70031 | ||
|
|
05c9169c70 | ||
|
|
bd49c993de | ||
|
|
5989680967 | ||
|
|
e1265b2e7b | ||
|
|
1721460dcd | ||
|
|
861bf967a7 | ||
|
|
09b8314057 | ||
|
|
151b142d30 | ||
|
|
b7c4136702 | ||
|
|
e666fe5a8d | ||
|
|
9ce34be217 | ||
|
|
79226d0870 | ||
|
|
686316b390 | ||
|
|
6da385de9d | ||
|
|
0cc5e3ef03 | ||
|
|
47194c1fe4 | ||
|
|
3bf40d5db9 | ||
|
|
a9e11623cd | ||
|
|
a870dd368e | ||
|
|
b6b26c710f | ||
|
|
705ad84ce7 | ||
|
|
04932f496f | ||
|
|
dffbd58671 | ||
|
|
152e0162a9 | ||
|
|
2fa7faa75a | ||
|
|
12f514f752 | ||
|
|
e2886f37a2 | ||
|
|
03dff09b8a | ||
|
|
a556f8f2bf | ||
|
|
1765c9125a | ||
|
|
ab28192d50 | ||
|
|
ad15721677 | ||
|
|
a2d4dbeee4 | ||
|
|
1712ba1198 | ||
|
|
040dda37ed | ||
|
|
a99ba3bb16 | ||
|
|
05ff4a527d | ||
|
|
ae5328c500 | ||
|
|
34ff39e654 | ||
|
|
8e3915f5bf | ||
|
|
6019a1006c |
2
.github/ISSUE_TEMPLATE/support.md
vendored
2
.github/ISSUE_TEMPLATE/support.md
vendored
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: Support Request
|
||||
about: Support request or question relating to Kubespray
|
||||
labels: triage/support
|
||||
labels: kind/support
|
||||
|
||||
---
|
||||
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -15,6 +15,7 @@ contrib/terraform/aws/credentials.tfvars
|
||||
**/*.sw[pon]
|
||||
*~
|
||||
vagrant/
|
||||
plugins/mitogen
|
||||
|
||||
# Ansible inventory
|
||||
inventory/*
|
||||
|
||||
@@ -8,13 +8,14 @@ stages:
|
||||
- deploy-special
|
||||
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.13.3
|
||||
KUBESPRAY_VERSION: v2.14.1
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
MAGIC: "ci check this"
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
||||
CI_TEST_REGISTRY_MIRROR: "./tests/common/_docker_hub_registry_mirror.yml"
|
||||
GS_ACCESS_KEY_ID: $GS_KEY
|
||||
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
||||
CONTAINER_ENGINE: docker
|
||||
|
||||
@@ -14,7 +14,7 @@ vagrant-validate:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
VAGRANT_VERSION: 2.2.4
|
||||
VAGRANT_VERSION: 2.2.10
|
||||
script:
|
||||
- ./tests/scripts/vagrant-validate.sh
|
||||
except: ['triggers', 'master']
|
||||
@@ -64,9 +64,9 @@ markdownlint:
|
||||
tags: [light]
|
||||
image: node
|
||||
before_script:
|
||||
- npm install -g markdownlint-cli
|
||||
- npm install -g markdownlint-cli@0.22.0
|
||||
script:
|
||||
- markdownlint README.md docs --ignore docs/_sidebar.md
|
||||
- markdownlint $(find . -name '*.md' | grep -vF './.git') --ignore docs/_sidebar.md --ignore contrib/dind/README.md
|
||||
|
||||
ci-matrix:
|
||||
stage: unit-tests
|
||||
|
||||
@@ -1,43 +1,56 @@
|
||||
---
|
||||
.packet: &packet
|
||||
.packet:
|
||||
extends: .testcases
|
||||
variables:
|
||||
CI_PLATFORM: "packet"
|
||||
SSH_USER: "kubespray"
|
||||
CI_PLATFORM: packet
|
||||
SSH_USER: kubespray
|
||||
tags:
|
||||
- packet
|
||||
except: [triggers]
|
||||
|
||||
# CI template for PRs
|
||||
.packet_pr:
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
extends: .packet
|
||||
|
||||
# CI template for periodic CI jobs
|
||||
# Enabled when PERIODIC_CI_ENABLED var is set
|
||||
.packet_periodic:
|
||||
only:
|
||||
variables:
|
||||
- $PERIODIC_CI_ENABLED
|
||||
allow_failure: true
|
||||
extends: .packet
|
||||
|
||||
packet_ubuntu18-calico-aio:
|
||||
stage: deploy-part1
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
# Future AIO job
|
||||
packet_ubuntu20-calico-aio:
|
||||
stage: deploy-part1
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
# ### PR JOBS PART2
|
||||
|
||||
packet_centos7-flannel-containerd-addons-ha:
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_centos7-crio:
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_ubuntu18-crio:
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
variables:
|
||||
@@ -45,44 +58,44 @@ packet_ubuntu18-crio:
|
||||
|
||||
packet_ubuntu16-canal-kubeadm-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu16-canal-sep:
|
||||
stage: deploy-special
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-flannel-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-kube-router-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-kube-router-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian10-cilium-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_debian10-containerd:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_centos7-calico-ha-once-localhost:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
# This will instruct Docker not to start over TLS.
|
||||
@@ -92,97 +105,91 @@ packet_centos7-calico-ha-once-localhost:
|
||||
|
||||
packet_centos8-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_centos8-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_fedora32-weave:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_opensuse-canal:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu18-ovn4nfv:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
# Contiv does not work in k8s v1.16
|
||||
# packet_ubuntu16-contiv-sep:
|
||||
# stage: deploy-part2
|
||||
# extends: .packet
|
||||
# when: on_success
|
||||
|
||||
# ### MANUAL JOBS
|
||||
|
||||
packet_ubuntu16-weave-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-cilium-sep:
|
||||
stage: deploy-special
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-flannel-containerd-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-flannel-containerd-ha-once:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian9-macvlan:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_centos7-calico-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_centos7-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_centos7-multus-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_oracle7-canal-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora31-flannel:
|
||||
packet_fedora33-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_amazon-linux-2-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora32-kube-ovn-containerd:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
# ### PR JOBS PART3
|
||||
@@ -190,7 +197,7 @@ packet_fedora32-kube-ovn-containerd:
|
||||
|
||||
packet_centos7-weave-upgrade-ha:
|
||||
stage: deploy-part3
|
||||
extends: .packet
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: basic
|
||||
@@ -198,7 +205,7 @@ packet_centos7-weave-upgrade-ha:
|
||||
|
||||
packet_debian9-calico-upgrade:
|
||||
stage: deploy-part3
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
@@ -206,7 +213,7 @@ packet_debian9-calico-upgrade:
|
||||
|
||||
packet_debian9-calico-upgrade-once:
|
||||
stage: deploy-part3
|
||||
extends: .packet
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
@@ -214,7 +221,7 @@ packet_debian9-calico-upgrade-once:
|
||||
|
||||
packet_ubuntu18-calico-ha-recover:
|
||||
stage: deploy-part3
|
||||
extends: .packet
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
@@ -222,7 +229,7 @@ packet_ubuntu18-calico-ha-recover:
|
||||
|
||||
packet_ubuntu18-calico-ha-recover-noquorum:
|
||||
stage: deploy-part3
|
||||
extends: .packet
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
|
||||
@@ -4,7 +4,7 @@ shellcheck:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
SHELLCHECK_VERSION: v0.6.0
|
||||
SHELLCHECK_VERSION: v0.7.1
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- curl --silent --location "https://github.com/koalaman/shellcheck/releases/download/"${SHELLCHECK_VERSION}"/shellcheck-"${SHELLCHECK_VERSION}".linux.x86_64.tar.xz" | tar -xJv
|
||||
@@ -12,5 +12,5 @@ shellcheck:
|
||||
- shellcheck --version
|
||||
script:
|
||||
# Run shellcheck for all *.sh except contrib/
|
||||
- find . -name '*.sh' -not -path './contrib/*' | xargs shellcheck --severity error
|
||||
- find . -name '*.sh' -not -path './contrib/*' -not -path './.git/*' | xargs shellcheck --severity error
|
||||
except: ['triggers', 'master']
|
||||
|
||||
@@ -56,28 +56,70 @@
|
||||
tf-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.12.24
|
||||
TF_VERSION: 0.12.29
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-packet:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.12.24
|
||||
TF_VERSION: 0.12.29
|
||||
PROVIDER: packet
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-aws:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.12.24
|
||||
TF_VERSION: 0.12.29
|
||||
PROVIDER: aws
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.13.x-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.13.5
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.13.x-validate-packet:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.13.5
|
||||
PROVIDER: packet
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.13.x-validate-aws:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.13.5
|
||||
PROVIDER: aws
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.14.3
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-packet:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.14.3
|
||||
PROVIDER: packet
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-aws:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.14.3
|
||||
PROVIDER: aws
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
# tf-packet-ubuntu16-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: 0.12.24
|
||||
# TF_VERSION: 0.12.29
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
@@ -91,7 +133,7 @@ tf-validate-aws:
|
||||
# tf-packet-ubuntu18-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: 0.12.24
|
||||
# TF_VERSION: 0.12.29
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
@@ -148,7 +190,7 @@ tf-elastx_ubuntu18-calico:
|
||||
when: on_success
|
||||
variables:
|
||||
<<: *elastx_variables
|
||||
TF_VERSION: 0.12.24
|
||||
TF_VERSION: 0.12.29
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
ANSIBLE_TIMEOUT: "60"
|
||||
@@ -193,7 +235,7 @@ tf-ovh_ubuntu18-calico:
|
||||
environment: ovh
|
||||
variables:
|
||||
<<: *ovh_variables
|
||||
TF_VERSION: 0.12.24
|
||||
TF_VERSION: 0.12.29
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
ANSIBLE_TIMEOUT: "60"
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
---
|
||||
extends: default
|
||||
|
||||
ignore: |
|
||||
.git/
|
||||
|
||||
rules:
|
||||
braces:
|
||||
min-spaces-inside: 0
|
||||
|
||||
@@ -10,7 +10,7 @@ To install development dependencies you can use `pip install -r tests/requiremen
|
||||
|
||||
#### Linting
|
||||
|
||||
Kubespray uses `yamllint` and `ansible-lint`. To run them locally use `yamllint .` and `./tests/scripts/ansible-lint.sh`
|
||||
Kubespray uses `yamllint` and `ansible-lint`. To run them locally use `yamllint .` and `ansible-lint`
|
||||
|
||||
#### Molecule
|
||||
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
FROM ubuntu:18.04
|
||||
# Use imutable image tags rather than mutable tags (like ubuntu:18.04)
|
||||
FROM ubuntu:bionic-20200807
|
||||
|
||||
ENV KUBE_VERSION=v1.19.7
|
||||
|
||||
RUN mkdir /kubespray
|
||||
WORKDIR /kubespray
|
||||
@@ -14,7 +17,8 @@ RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
|
||||
&& apt update -y && apt-get install docker-ce -y
|
||||
COPY . .
|
||||
RUN /usr/bin/python3 -m pip install pip -U && /usr/bin/python3 -m pip install -r tests/requirements.txt && python3 -m pip install -r requirements.txt && update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.17.5/bin/linux/amd64/kubectl \
|
||||
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/amd64/kubectl \
|
||||
&& chmod a+x kubectl && cp kubectl /usr/local/bin/kubectl
|
||||
|
||||
# Some tools like yamllint need this
|
||||
|
||||
2
OWNERS
2
OWNERS
@@ -4,3 +4,5 @@ approvers:
|
||||
- kubespray-approvers
|
||||
reviewers:
|
||||
- kubespray-reviewers
|
||||
emeritus_approvers:
|
||||
- kubespray-emeritus_approvers
|
||||
@@ -1,19 +1,18 @@
|
||||
aliases:
|
||||
kubespray-approvers:
|
||||
- ant31
|
||||
- mattymo
|
||||
- atoms
|
||||
- chadswen
|
||||
- mirwan
|
||||
- miouge1
|
||||
- riverzhang
|
||||
- verwilst
|
||||
- woopstar
|
||||
- luckysb
|
||||
- floryut
|
||||
kubespray-reviewers:
|
||||
- jjungnickel
|
||||
- archifleks
|
||||
- holmsten
|
||||
- bozzo
|
||||
- floryut
|
||||
- eppo
|
||||
- oomichi
|
||||
kubespray-emeritus_approvers:
|
||||
- riverzhang
|
||||
- atoms
|
||||
- ant31
|
||||
|
||||
37
README.md
37
README.md
@@ -106,7 +106,7 @@ vagrant up
|
||||
- **Debian** Buster, Jessie, Stretch, Wheezy
|
||||
- **Ubuntu** 16.04, 18.04, 20.04
|
||||
- **CentOS/RHEL** 7, 8 (experimental: see [centos 8 notes](docs/centos8.md))
|
||||
- **Fedora** 31, 32
|
||||
- **Fedora** 32, 33
|
||||
- **Fedora CoreOS** (experimental: see [fcos Note](docs/fcos.md))
|
||||
- **openSUSE** Leap 42.3/Tumbleweed
|
||||
- **Oracle Linux** 7, 8 (experimental: [centos 8 notes](docs/centos8.md) apply)
|
||||
@@ -116,20 +116,19 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
## Supported Components
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.18.9
|
||||
- [etcd](https://github.com/coreos/etcd) v3.4.3
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.19.7
|
||||
- [etcd](https://github.com/coreos/etcd) v3.4.13
|
||||
- [docker](https://www.docker.com/) v19.03 (see note)
|
||||
- [containerd](https://containerd.io/) v1.2.13
|
||||
- [cri-o](http://cri-o.io/) v1.17 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- [containerd](https://containerd.io/) v1.3.9
|
||||
- [cri-o](http://cri-o.io/) v1.19 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v0.8.6
|
||||
- [calico](https://github.com/projectcalico/calico) v3.15.2
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v0.9.0
|
||||
- [calico](https://github.com/projectcalico/calico) v3.16.5
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.8.3
|
||||
- [contiv](https://github.com/contiv/install) v1.2.1
|
||||
- [flanneld](https://github.com/coreos/flannel) v0.12.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.3.0
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.0.1
|
||||
- [cilium](https://github.com/cilium/cilium) v1.8.6
|
||||
- [flanneld](https://github.com/coreos/flannel) v0.13.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.5.2
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.1.1
|
||||
- [multus](https://github.com/intel/multus-cni) v3.6.0
|
||||
- [ovn4nfv](https://github.com/opnfv/ovn4nfv-k8s-plugin) v1.1.0
|
||||
- [weave](https://github.com/weaveworks/weave) v2.7.0
|
||||
@@ -138,18 +137,17 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v0.16.1
|
||||
- [coredns](https://github.com/coredns/coredns) v1.6.7
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.35.0
|
||||
- [coredns](https://github.com/coredns/coredns) v1.7.0
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.41.2
|
||||
|
||||
Note: The list of validated [docker versions](https://kubernetes.io/docs/setup/production-environment/container-runtimes/#docker) is 1.13.1, 17.03, 17.06, 17.09, 18.06, 18.09 and 19.03. The recommended docker version is 19.03. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
Note: The list of available docker version is 18.09, 19.03 and 20.10. The recommended docker version is 19.03. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Minimum required version of Kubernetes is v1.17**
|
||||
- **Ansible v2.9+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- **Minimum required version of Kubernetes is v1.18**
|
||||
- **Ansible v2.9.x, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands, Ansible 2.10.x is not supported for now**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
||||
- The target servers are configured to allow **IPv4 forwarding**.
|
||||
- **Your ssh key must be copied** to all the servers part of your inventory.
|
||||
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||
in order to avoid any issue during deployment you should disable your firewall.
|
||||
- If kubespray is ran from non-root user account, correct privilege escalation method
|
||||
@@ -179,9 +177,6 @@ You can choose between 10 network plugins. (default: `calico`, except Vagrant us
|
||||
|
||||
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
||||
|
||||
- [contiv](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
|
||||
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
|
||||
|
||||
- [ovn4nfv](docs/ovn4nfv.md): [ovn4nfv-k8s-plugins](https://github.com/opnfv/ovn4nfv-k8s-plugin) is the network controller, OVS agent and CNI server to offer basic SFC and OVN overlay networking.
|
||||
|
||||
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
||||
|
||||
18
Vagrantfile
vendored
18
Vagrantfile
vendored
@@ -26,12 +26,14 @@ SUPPORTED_OS = {
|
||||
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
||||
"centos8" => {box: "centos/8", user: "vagrant"},
|
||||
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
||||
"fedora31" => {box: "fedora/31-cloud-base", user: "vagrant"},
|
||||
"fedora32" => {box: "fedora/32-cloud-base", user: "vagrant"},
|
||||
"fedora33" => {box: "fedora/33-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "bento/opensuse-leap-15.1", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
||||
"rhel7" => {box: "generic/rhel7", user: "vagrant"},
|
||||
"rhel8" => {box: "generic/rhel8", user: "vagrant"},
|
||||
}
|
||||
|
||||
if File.exist?(CONFIG)
|
||||
@@ -142,6 +144,7 @@ Vagrant.configure("2") do |config|
|
||||
vb.gui = $vm_gui
|
||||
vb.linked_clone = true
|
||||
vb.customize ["modifyvm", :id, "--vram", "8"] # ubuntu defaults to 256 MB which is a waste of precious RAM
|
||||
vb.customize ["modifyvm", :id, "--audio", "none"]
|
||||
end
|
||||
|
||||
node.vm.provider :libvirt do |lv|
|
||||
@@ -176,10 +179,19 @@ Vagrant.configure("2") do |config|
|
||||
node.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true
|
||||
end
|
||||
|
||||
if ["rhel7","rhel8"].include? $os
|
||||
# Vagrant synced_folder rsync options cannot be used for RHEL boxes as Rsync package cannot
|
||||
# be installed until the host is registered with a valid Red Hat support subscription
|
||||
node.vm.synced_folder ".", "/vagrant", disabled: false
|
||||
$shared_folders.each do |src, dst|
|
||||
node.vm.synced_folder src, dst
|
||||
end
|
||||
else
|
||||
node.vm.synced_folder ".", "/vagrant", disabled: false, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] , rsync__exclude: ['.git','venv']
|
||||
$shared_folders.each do |src, dst|
|
||||
node.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
|
||||
end
|
||||
end
|
||||
|
||||
ip = "#{$subnet}.#{i+100}"
|
||||
node.vm.network :private_network, ip: ip
|
||||
@@ -187,8 +199,8 @@ Vagrant.configure("2") do |config|
|
||||
# Disable swap for each vm
|
||||
node.vm.provision "shell", inline: "swapoff -a"
|
||||
|
||||
# Disable firewalld on oraclelinux vms
|
||||
if ["oraclelinux","oraclelinux8"].include? $os
|
||||
# Disable firewalld on oraclelinux/redhat vms
|
||||
if ["oraclelinux","oraclelinux8","rhel7","rhel8"].include? $os
|
||||
node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
|
||||
end
|
||||
|
||||
|
||||
@@ -3,13 +3,15 @@
|
||||
gather_facts: false
|
||||
become: no
|
||||
vars:
|
||||
minimal_ansible_version: 2.8.0
|
||||
minimal_ansible_version: 2.9.0
|
||||
maximal_ansible_version: 2.10.0
|
||||
ansible_connection: local
|
||||
tasks:
|
||||
- name: "Check ansible version >={{ minimal_ansible_version }}"
|
||||
- name: "Check {{ minimal_ansible_version }} <= Ansible version < {{ maximal_ansible_version }}"
|
||||
assert:
|
||||
msg: "Ansible must be {{ minimal_ansible_version }} or higher"
|
||||
msg: "Ansible must be between {{ minimal_ansible_version }} and {{ maximal_ansible_version }}"
|
||||
that:
|
||||
- ansible_version.string is version(minimal_ansible_version, ">=")
|
||||
- ansible_version.string is version(maximal_ansible_version, "<")
|
||||
tags:
|
||||
- check
|
||||
|
||||
18
cluster.yml
18
cluster.yml
@@ -2,21 +2,6 @@
|
||||
- name: Check ansible version
|
||||
import_playbook: ansible_version.yml
|
||||
|
||||
- hosts: all
|
||||
gather_facts: false
|
||||
tags: always
|
||||
tasks:
|
||||
- name: "Set up proxy environment"
|
||||
set_fact:
|
||||
proxy_env:
|
||||
http_proxy: "{{ http_proxy | default ('') }}"
|
||||
HTTP_PROXY: "{{ http_proxy | default ('') }}"
|
||||
https_proxy: "{{ https_proxy | default ('') }}"
|
||||
HTTPS_PROXY: "{{ https_proxy | default ('') }}"
|
||||
no_proxy: "{{ no_proxy | default ('') }}"
|
||||
NO_PROXY: "{{ no_proxy | default ('') }}"
|
||||
no_log: true
|
||||
|
||||
- hosts: bastion[0]
|
||||
gather_facts: False
|
||||
roles:
|
||||
@@ -43,7 +28,6 @@
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) }
|
||||
- { role: download, tags: download, when: "not skip_downloads" }
|
||||
environment: "{{ proxy_env }}"
|
||||
|
||||
- hosts: etcd
|
||||
gather_facts: False
|
||||
@@ -75,7 +59,6 @@
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/node, tags: node }
|
||||
environment: "{{ proxy_env }}"
|
||||
|
||||
- hosts: kube-master
|
||||
gather_facts: False
|
||||
@@ -127,7 +110,6 @@
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes-apps, tags: apps }
|
||||
environment: "{{ proxy_env }}"
|
||||
|
||||
- hosts: k8s-cluster
|
||||
gather_facts: False
|
||||
|
||||
1
contrib/aws_inventory/requirements.txt
Normal file
1
contrib/aws_inventory/requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
boto3 # Apache-2.0
|
||||
@@ -31,7 +31,7 @@ also removes all public IPs from all other VMs.
|
||||
To generate and apply the templates, call:
|
||||
|
||||
```shell
|
||||
$ ./apply-rg.sh <resource_group_name>
|
||||
./apply-rg.sh <resource_group_name>
|
||||
```
|
||||
|
||||
If you change something in the configuration (e.g. number of nodes) later, you can call this again and Azure will
|
||||
@@ -42,25 +42,23 @@ take care about creating/modifying whatever is needed.
|
||||
If you need to delete all resources from a resource group, simply call:
|
||||
|
||||
```shell
|
||||
$ ./clear-rg.sh <resource_group_name>
|
||||
./clear-rg.sh <resource_group_name>
|
||||
```
|
||||
|
||||
**WARNING** this really deletes everything from your resource group, including everything that was later created by you!
|
||||
|
||||
|
||||
## Generating an inventory for kubespray
|
||||
|
||||
After you have applied the templates, you can generate an inventory with this call:
|
||||
|
||||
```shell
|
||||
$ ./generate-inventory.sh <resource_group_name>
|
||||
./generate-inventory.sh <resource_group_name>
|
||||
```
|
||||
|
||||
It will create the file ./inventory which can then be used with kubespray, e.g.:
|
||||
|
||||
```shell
|
||||
$ cd kubespray-root-dir
|
||||
$ sudo pip3 install -r requirements.txt
|
||||
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all/all.yml" cluster.yml
|
||||
cd kubespray-root-dir
|
||||
sudo pip3 install -r requirements.txt
|
||||
ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all/all.yml" cluster.yml
|
||||
```
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ to serve as Kubernetes "nodes", which in turn will run
|
||||
called DIND (Docker-IN-Docker).
|
||||
|
||||
The playbook has two roles:
|
||||
|
||||
- dind-host: creates the "nodes" as containers in localhost, with
|
||||
appropriate settings for DIND (privileged, volume mapping for dind
|
||||
storage, etc).
|
||||
@@ -27,7 +28,7 @@ See below for a complete successful run:
|
||||
|
||||
1. Create the node containers
|
||||
|
||||
~~~~
|
||||
```shell
|
||||
# From the kubespray root dir
|
||||
cd contrib/dind
|
||||
pip install -r requirements.txt
|
||||
@@ -36,15 +37,15 @@ ansible-playbook -i hosts dind-cluster.yaml
|
||||
|
||||
# Back to kubespray root
|
||||
cd ../..
|
||||
~~~~
|
||||
```
|
||||
|
||||
NOTE: if the playbook run fails with something like below error
|
||||
message, you may need to specifically set `ansible_python_interpreter`,
|
||||
see `./hosts` file for an example expanded localhost entry.
|
||||
|
||||
~~~
|
||||
```shell
|
||||
failed: [localhost] (item=kube-node1) => {"changed": false, "item": "kube-node1", "msg": "Failed to import docker or docker-py - No module named requests.exceptions. Try `pip install docker` or `pip install docker-py` (Python 2.6)"}
|
||||
~~~
|
||||
```
|
||||
|
||||
2. Customize kubespray-dind.yaml
|
||||
|
||||
@@ -52,33 +53,33 @@ Note that there's coupling between above created node containers
|
||||
and `kubespray-dind.yaml` settings, in particular regarding selected `node_distro`
|
||||
(as set in `group_vars/all/all.yaml`), and docker settings.
|
||||
|
||||
~~~
|
||||
```shell
|
||||
$EDITOR contrib/dind/kubespray-dind.yaml
|
||||
~~~
|
||||
```
|
||||
|
||||
3. Prepare the inventory and run the playbook
|
||||
|
||||
~~~
|
||||
```shell
|
||||
INVENTORY_DIR=inventory/local-dind
|
||||
mkdir -p ${INVENTORY_DIR}
|
||||
rm -f ${INVENTORY_DIR}/hosts.ini
|
||||
CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
|
||||
|
||||
ansible-playbook --become -e ansible_ssh_user=debian -i ${INVENTORY_DIR}/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml
|
||||
~~~
|
||||
```
|
||||
|
||||
NOTE: You could also test other distros without editing files by
|
||||
passing `--extra-vars` as per below commandline,
|
||||
replacing `DISTRO` by either `debian`, `ubuntu`, `centos`, `fedora`:
|
||||
|
||||
~~~
|
||||
```shell
|
||||
cd contrib/dind
|
||||
ansible-playbook -i hosts dind-cluster.yaml --extra-vars node_distro=DISTRO
|
||||
|
||||
cd ../..
|
||||
CONFIG_FILE=inventory/local-dind/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
|
||||
ansible-playbook --become -e ansible_ssh_user=DISTRO -i inventory/local-dind/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml --extra-vars bootstrap_os=DISTRO
|
||||
~~~
|
||||
```
|
||||
|
||||
## Resulting deployment
|
||||
|
||||
@@ -89,7 +90,7 @@ from the host where you ran kubespray playbooks.
|
||||
|
||||
Running from an Ubuntu Xenial host:
|
||||
|
||||
~~~
|
||||
```shell
|
||||
$ uname -a
|
||||
Linux ip-xx-xx-xx-xx 4.4.0-1069-aws #79-Ubuntu SMP Mon Sep 24
|
||||
15:01:41 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
|
||||
@@ -149,14 +150,14 @@ kube-system weave-net-xr46t 2/2 Running 0
|
||||
|
||||
$ docker exec kube-node1 curl -s http://localhost:31081/api/v1/connectivity_check
|
||||
{"Message":"All 10 pods successfully reported back to the server","Absent":null,"Outdated":null}
|
||||
~~~
|
||||
```
|
||||
|
||||
## Using ./run-test-distros.sh
|
||||
|
||||
You can use `./run-test-distros.sh` to run a set of tests via DIND,
|
||||
and excerpt from this script, to get an idea:
|
||||
|
||||
~~~
|
||||
```shell
|
||||
# The SPEC file(s) must have two arrays as e.g.
|
||||
# DISTROS=(debian centos)
|
||||
# EXTRAS=(
|
||||
@@ -169,7 +170,7 @@ and excerpt from this script, to get an idea:
|
||||
#
|
||||
# Each $EXTRAS element will be whitespace split, and passed as --extra-vars
|
||||
# to main kubespray ansible-playbook run.
|
||||
~~~
|
||||
```
|
||||
|
||||
See e.g. `test-some_distros-most_CNIs.env` and
|
||||
`test-some_distros-kube_router_combo.env` in particular for a richer
|
||||
|
||||
@@ -63,10 +63,10 @@ def get_var_as_bool(name, default):
|
||||
|
||||
|
||||
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.yaml")
|
||||
KUBE_MASTERS = int(os.environ.get("KUBE_MASTERS_MASTERS", 2))
|
||||
KUBE_MASTERS = int(os.environ.get("KUBE_MASTERS", 2))
|
||||
# Reconfigures cluster distribution at scale
|
||||
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
|
||||
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 200))
|
||||
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("MASSIVE_SCALE_THRESHOLD", 200))
|
||||
|
||||
DEBUG = get_var_as_bool("DEBUG", True)
|
||||
HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
|
||||
@@ -402,6 +402,7 @@ Configurable env vars:
|
||||
DEBUG Enable debug printing. Default: True
|
||||
CONFIG_FILE File to write config to Default: ./inventory/sample/hosts.yaml
|
||||
HOST_PREFIX Host prefix for generated hosts. Default: node
|
||||
KUBE_MASTERS Set the number of kube-masters. Default: 2
|
||||
SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50
|
||||
MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
|
||||
''' # noqa
|
||||
|
||||
@@ -51,7 +51,7 @@ class TestInventory(unittest.TestCase):
|
||||
groups = ['group1', 'group2']
|
||||
self.inv.ensure_required_groups(groups)
|
||||
for group in groups:
|
||||
self.assertTrue(group in self.inv.yaml_config['all']['children'])
|
||||
self.assertIn(group, self.inv.yaml_config['all']['children'])
|
||||
|
||||
def test_get_host_id(self):
|
||||
hostnames = ['node99', 'no99de01', '01node01', 'node1.domain',
|
||||
@@ -209,8 +209,8 @@ class TestInventory(unittest.TestCase):
|
||||
('doesnotbelong2', {'whateveropts=ilike'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing_hosts
|
||||
self.inv.purge_invalid_hosts(proper_hostnames)
|
||||
self.assertTrue(
|
||||
bad_host not in self.inv.yaml_config['all']['hosts'].keys())
|
||||
self.assertNotIn(
|
||||
bad_host, self.inv.yaml_config['all']['hosts'].keys())
|
||||
|
||||
def test_add_host_to_group(self):
|
||||
group = 'etcd'
|
||||
@@ -227,8 +227,8 @@ class TestInventory(unittest.TestCase):
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_kube_master([host])
|
||||
self.assertTrue(
|
||||
host in self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
self.assertIn(
|
||||
host, self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
|
||||
def test_set_all(self):
|
||||
hosts = OrderedDict([
|
||||
@@ -246,8 +246,8 @@ class TestInventory(unittest.TestCase):
|
||||
|
||||
self.inv.set_k8s_cluster()
|
||||
for host in expected_hosts:
|
||||
self.assertTrue(
|
||||
host in
|
||||
self.assertIn(
|
||||
host,
|
||||
self.inv.yaml_config['all']['children'][group]['children'])
|
||||
|
||||
def test_set_kube_node(self):
|
||||
@@ -255,16 +255,16 @@ class TestInventory(unittest.TestCase):
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_kube_node([host])
|
||||
self.assertTrue(
|
||||
host in self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
self.assertIn(
|
||||
host, self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
|
||||
def test_set_etcd(self):
|
||||
group = 'etcd'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_etcd([host])
|
||||
self.assertTrue(
|
||||
host in self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
self.assertIn(
|
||||
host, self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
|
||||
def test_scale_scenario_one(self):
|
||||
num_nodes = 50
|
||||
|
||||
@@ -5,7 +5,7 @@ deployment on VMs.
|
||||
|
||||
This playbook does not create Virtual Machines, nor does it run Kubespray itself.
|
||||
|
||||
### User creation
|
||||
## User creation
|
||||
|
||||
If you want to create a user for running Kubespray deployment, you should specify
|
||||
both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`.
|
||||
|
||||
@@ -8,19 +8,19 @@ In the same directory of this ReadMe file you should find a file named `inventor
|
||||
|
||||
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/sample/k8s_gfs_inventory`. Make sure that the settings on `inventory/sample/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
|
||||
|
||||
```
|
||||
```shell
|
||||
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./cluster.yml
|
||||
```
|
||||
|
||||
This will provision your Kubernetes cluster. Then, to provision and configure the GlusterFS cluster, from the same directory execute:
|
||||
|
||||
```
|
||||
```shell
|
||||
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=<correct-user>` variable in the inventory file that you just created, for each machine/VM:
|
||||
|
||||
```
|
||||
```shell
|
||||
k8s-master-1 ansible_ssh_host=192.168.0.147 ip=192.168.0.147 ansible_ssh_user=core
|
||||
k8s-master-node-1 ansible_ssh_host=192.168.0.148 ip=192.168.0.148 ansible_ssh_user=core
|
||||
k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_user=core
|
||||
@@ -30,7 +30,7 @@ k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_us
|
||||
|
||||
First step is to fill in a `my-kubespray-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
|
||||
|
||||
```
|
||||
```ini
|
||||
cluster_name = "cluster1"
|
||||
number_of_k8s_masters = "1"
|
||||
number_of_k8s_masters_no_floating_ip = "2"
|
||||
@@ -54,7 +54,7 @@ ssh_user_gfs = "ubuntu"
|
||||
|
||||
As explained in the general terraform/openstack guide, you need to source your OpenStack credentials file, add your ssh-key to the ssh-agent and setup environment variables for terraform:
|
||||
|
||||
```
|
||||
```shell
|
||||
$ source ~/.stackrc
|
||||
$ eval $(ssh-agent -s)
|
||||
$ ssh-add ~/.ssh/my-desired-key
|
||||
@@ -67,7 +67,7 @@ $ echo Setting up Terraform creds && \
|
||||
|
||||
Then, standing on the kubespray directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
|
||||
|
||||
```
|
||||
```shell
|
||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
|
||||
```
|
||||
|
||||
@@ -75,18 +75,18 @@ This will create both your Kubernetes and Gluster VMs. Make sure that the ansibl
|
||||
|
||||
Then, provision your Kubernetes (kubespray) cluster with the following ansible call:
|
||||
|
||||
```
|
||||
```shell
|
||||
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml
|
||||
```
|
||||
|
||||
Finally, provision the glusterfs nodes and add the Persistent Volume setup for GlusterFS in Kubernetes through the following ansible call:
|
||||
|
||||
```
|
||||
```shell
|
||||
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
If you need to destroy the cluster, you can run:
|
||||
|
||||
```
|
||||
```shell
|
||||
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
|
||||
```
|
||||
|
||||
@@ -1,17 +1,26 @@
|
||||
# Deploy Heketi/Glusterfs into Kubespray/Kubernetes
|
||||
|
||||
This playbook aims to automate [this](https://github.com/heketi/heketi/blob/master/docs/admin/install-kubernetes.md) tutorial. It deploys heketi/glusterfs into kubernetes and sets up a storageclass.
|
||||
|
||||
## Important notice
|
||||
|
||||
> Due to resource limits on the current project maintainers and general lack of contributions we are considering placing Heketi into a [near-maintenance mode](https://github.com/heketi/heketi#important-notice)
|
||||
|
||||
## Client Setup
|
||||
|
||||
Heketi provides a CLI that provides users with a means to administer the deployment and configuration of GlusterFS in Kubernetes. [Download and install the heketi-cli](https://github.com/heketi/heketi/releases) on your client machine.
|
||||
|
||||
## Install
|
||||
|
||||
Copy the inventory.yml.sample over to inventory/sample/k8s_heketi_inventory.yml and change it according to your setup.
|
||||
```
|
||||
|
||||
```shell
|
||||
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi.yml
|
||||
```
|
||||
|
||||
## Tear down
|
||||
```
|
||||
|
||||
```shell
|
||||
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml
|
||||
```
|
||||
|
||||
|
||||
21
contrib/offline/README.md
Normal file
21
contrib/offline/README.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# Container image collecting script for offline deployment
|
||||
|
||||
This script has two features:
|
||||
(1) Get container images from an environment which is deployed online.
|
||||
(2) Deploy local container registry and register the container images to the registry.
|
||||
|
||||
Step(1) should be done online site as a preparation, then we bring the gotten images
|
||||
to the target offline environment.
|
||||
Then we will run step(2) for registering the images to local registry.
|
||||
|
||||
Step(1) can be operated with:
|
||||
|
||||
```shell
|
||||
manage-offline-container-images.sh create
|
||||
```
|
||||
|
||||
Step(2) can be operated with:
|
||||
|
||||
```shell
|
||||
manage-offline-container-images.sh register
|
||||
```
|
||||
1
contrib/offline/docker-daemon.json
Normal file
1
contrib/offline/docker-daemon.json
Normal file
@@ -0,0 +1 @@
|
||||
{ "insecure-registries":["HOSTNAME:5000"] }
|
||||
150
contrib/offline/manage-offline-container-images.sh
Executable file
150
contrib/offline/manage-offline-container-images.sh
Executable file
@@ -0,0 +1,150 @@
|
||||
#!/bin/bash
|
||||
|
||||
OPTION=$1
|
||||
CURRENT_DIR=$(cd $(dirname $0); pwd)
|
||||
TEMP_DIR="${CURRENT_DIR}/temp"
|
||||
|
||||
IMAGE_TAR_FILE="${CURRENT_DIR}/container-images.tar.gz"
|
||||
IMAGE_DIR="${CURRENT_DIR}/container-images"
|
||||
IMAGE_LIST="${IMAGE_DIR}/container-images.txt"
|
||||
RETRY_COUNT=5
|
||||
|
||||
function create_container_image_tar() {
|
||||
set -e
|
||||
|
||||
IMAGES=$(kubectl describe pods --all-namespaces | grep " Image:" | awk '{print $2}' | sort | uniq)
|
||||
# NOTE: etcd and pause cannot be seen as pods.
|
||||
# The pause image is used for --pod-infra-container-image option of kubelet.
|
||||
EXT_IMAGES=$(kubectl cluster-info dump | egrep "quay.io/coreos/etcd:|k8s.gcr.io/pause:" | sed s@\"@@g)
|
||||
IMAGES="${IMAGES} ${EXT_IMAGES}"
|
||||
|
||||
rm -f ${IMAGE_TAR_FILE}
|
||||
rm -rf ${IMAGE_DIR}
|
||||
mkdir ${IMAGE_DIR}
|
||||
cd ${IMAGE_DIR}
|
||||
|
||||
sudo docker pull registry:latest
|
||||
sudo docker save -o registry-latest.tar registry:latest
|
||||
|
||||
for image in ${IMAGES}
|
||||
do
|
||||
FILE_NAME="$(echo ${image} | sed s@"/"@"-"@g | sed s/":"/"-"/g)".tar
|
||||
set +e
|
||||
for step in $(seq 1 ${RETRY_COUNT})
|
||||
do
|
||||
sudo docker pull ${image}
|
||||
if [ $? -eq 0 ]; then
|
||||
break
|
||||
fi
|
||||
echo "Failed to pull ${image} at step ${step}"
|
||||
if [ ${step} -eq ${RETRY_COUNT} ]; then
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
set -e
|
||||
sudo docker save -o ${FILE_NAME} ${image}
|
||||
|
||||
# NOTE: Here removes the following repo parts from each image
|
||||
# so that these parts will be replaced with Kubespray.
|
||||
# - kube_image_repo: "k8s.gcr.io"
|
||||
# - gcr_image_repo: "gcr.io"
|
||||
# - docker_image_repo: "docker.io"
|
||||
# - quay_image_repo: "quay.io"
|
||||
FIRST_PART=$(echo ${image} | awk -F"/" '{print $1}')
|
||||
if [ "${FIRST_PART}" = "k8s.gcr.io" ] ||
|
||||
[ "${FIRST_PART}" = "gcr.io" ] ||
|
||||
[ "${FIRST_PART}" = "docker.io" ] ||
|
||||
[ "${FIRST_PART}" = "quay.io" ]; then
|
||||
image=$(echo ${image} | sed s@"${FIRST_PART}/"@@)
|
||||
fi
|
||||
echo "${FILE_NAME} ${image}" >> ${IMAGE_LIST}
|
||||
done
|
||||
|
||||
cd ..
|
||||
sudo chown ${USER} ${IMAGE_DIR}/*
|
||||
tar -zcvf ${IMAGE_TAR_FILE} ./container-images
|
||||
rm -rf ${IMAGE_DIR}
|
||||
|
||||
echo ""
|
||||
echo "${IMAGE_TAR_FILE} is created to contain your container images."
|
||||
echo "Please keep this file and bring it to your offline environment."
|
||||
}
|
||||
|
||||
function register_container_images() {
|
||||
if [ ! -f ${IMAGE_TAR_FILE} ]; then
|
||||
echo "${IMAGE_TAR_FILE} should exist."
|
||||
exit 1
|
||||
fi
|
||||
if [ ! -d ${TEMP_DIR} ]; then
|
||||
mkdir ${TEMP_DIR}
|
||||
fi
|
||||
|
||||
# To avoid "http: server gave http response to https client" error.
|
||||
LOCALHOST_NAME=$(hostname)
|
||||
if [ -d /etc/docker/ ]; then
|
||||
set -e
|
||||
# Ubuntu18.04, RHEL7/CentOS7
|
||||
cp ${CURRENT_DIR}/docker-daemon.json ${TEMP_DIR}/docker-daemon.json
|
||||
sed -i s@"HOSTNAME"@"${LOCALHOST_NAME}"@ ${TEMP_DIR}/docker-daemon.json
|
||||
sudo cp ${TEMP_DIR}/docker-daemon.json /etc/docker/daemon.json
|
||||
elif [ -d /etc/containers/ ]; then
|
||||
set -e
|
||||
# RHEL8/CentOS8
|
||||
cp ${CURRENT_DIR}/registries.conf ${TEMP_DIR}/registries.conf
|
||||
sed -i s@"HOSTNAME"@"${LOCALHOST_NAME}"@ ${TEMP_DIR}/registries.conf
|
||||
sudo cp ${TEMP_DIR}/registries.conf /etc/containers/registries.conf
|
||||
else
|
||||
echo "docker package(docker-ce, etc.) should be installed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tar -zxvf ${IMAGE_TAR_FILE}
|
||||
sudo docker load -i ${IMAGE_DIR}/registry-latest.tar
|
||||
sudo docker run --restart=always -d -p 5000:5000 --name registry registry:latest
|
||||
set +e
|
||||
|
||||
set -e
|
||||
while read -r line; do
|
||||
file_name=$(echo ${line} | awk '{print $1}')
|
||||
org_image=$(echo ${line} | awk '{print $2}')
|
||||
new_image="${LOCALHOST_NAME}:5000/${org_image}"
|
||||
image_id=$(tar -tf ${IMAGE_DIR}/${file_name} | grep "\.json" | grep -v manifest.json | sed s/"\.json"//)
|
||||
sudo docker load -i ${IMAGE_DIR}/${file_name}
|
||||
sudo docker tag ${image_id} ${new_image}
|
||||
sudo docker push ${new_image}
|
||||
done <<< "$(cat ${IMAGE_LIST})"
|
||||
|
||||
echo "Succeeded to register container images to local registry."
|
||||
echo "Please specify ${LOCALHOST_NAME}:5000 for the following options in your inventry:"
|
||||
echo "- kube_image_repo"
|
||||
echo "- gcr_image_repo"
|
||||
echo "- docker_image_repo"
|
||||
echo "- quay_image_repo"
|
||||
}
|
||||
|
||||
if [ "${OPTION}" == "create" ]; then
|
||||
create_container_image_tar
|
||||
elif [ "${OPTION}" == "register" ]; then
|
||||
register_container_images
|
||||
else
|
||||
echo "This script has two features:"
|
||||
echo "(1) Get container images from an environment which is deployed online."
|
||||
echo "(2) Deploy local container registry and register the container images to the registry."
|
||||
echo ""
|
||||
echo "Step(1) should be done online site as a preparation, then we bring"
|
||||
echo "the gotten images to the target offline environment."
|
||||
echo "Then we will run step(2) for registering the images to local registry."
|
||||
echo ""
|
||||
echo "${IMAGE_TAR_FILE} is created to contain your container images."
|
||||
echo "Please keep this file and bring it to your offline environment."
|
||||
echo ""
|
||||
echo "Step(1) can be operated with:"
|
||||
echo " $ ./manage-offline-container-images.sh create"
|
||||
echo ""
|
||||
echo "Step(2) can be operated with:"
|
||||
echo " $ ./manage-offline-container-images.sh register"
|
||||
echo ""
|
||||
echo "Please specify 'create' or 'register'."
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
||||
8
contrib/offline/registries.conf
Normal file
8
contrib/offline/registries.conf
Normal file
@@ -0,0 +1,8 @@
|
||||
[registries.search]
|
||||
registries = ['registry.access.redhat.com', 'registry.redhat.io', 'docker.io']
|
||||
|
||||
[registries.insecure]
|
||||
registries = ['HOSTNAME:5000']
|
||||
|
||||
[registries.block]
|
||||
registries = []
|
||||
@@ -1,40 +1,44 @@
|
||||
## Kubernetes on AWS with Terraform
|
||||
# Kubernetes on AWS with Terraform
|
||||
|
||||
**Overview:**
|
||||
## Overview
|
||||
|
||||
This project will create:
|
||||
* VPC with Public and Private Subnets in # Availability Zones
|
||||
* Bastion Hosts and NAT Gateways in the Public Subnet
|
||||
* A dynamic number of masters, etcd, and worker nodes in the Private Subnet
|
||||
* even distributed over the # of Availability Zones
|
||||
* AWS ELB in the Public Subnet for accessing the Kubernetes API from the internet
|
||||
|
||||
**Requirements**
|
||||
- VPC with Public and Private Subnets in # Availability Zones
|
||||
- Bastion Hosts and NAT Gateways in the Public Subnet
|
||||
- A dynamic number of masters, etcd, and worker nodes in the Private Subnet
|
||||
- even distributed over the # of Availability Zones
|
||||
- AWS ELB in the Public Subnet for accessing the Kubernetes API from the internet
|
||||
|
||||
## Requirements
|
||||
|
||||
- Terraform 0.12.0 or newer
|
||||
|
||||
**How to Use:**
|
||||
## How to Use
|
||||
|
||||
- Export the variables for your AWS credentials or edit `credentials.tfvars`:
|
||||
|
||||
```
|
||||
```commandline
|
||||
export TF_VAR_AWS_ACCESS_KEY_ID="www"
|
||||
export TF_VAR_AWS_SECRET_ACCESS_KEY ="xxx"
|
||||
export TF_VAR_AWS_SSH_KEY_NAME="yyy"
|
||||
export TF_VAR_AWS_DEFAULT_REGION="zzz"
|
||||
```
|
||||
|
||||
- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use Ubuntu 18.04 LTS (Bionic) as base image. If you want to change this behaviour, see note "Using other distrib than Ubuntu" below.
|
||||
- Create an AWS EC2 SSH Key
|
||||
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
|
||||
|
||||
Example:
|
||||
|
||||
```commandline
|
||||
terraform apply -var-file=credentials.tfvars
|
||||
```
|
||||
|
||||
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
|
||||
|
||||
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated ssh-bastion.conf.
|
||||
Ansible automatically detects bastion and changes ssh_args
|
||||
|
||||
```commandline
|
||||
ssh -F ./ssh-bastion.conf user@$ip
|
||||
```
|
||||
@@ -42,14 +46,19 @@ ssh -F ./ssh-bastion.conf user@$ip
|
||||
- Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag.
|
||||
|
||||
Example (this one assumes you are using Ubuntu)
|
||||
|
||||
```commandline
|
||||
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=ubuntu -b --become-user=root --flush-cache
|
||||
```
|
||||
|
||||
***Using other distrib than Ubuntu***
|
||||
If you want to use another distribution than Ubuntu 18.04 (Bionic) LTS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.
|
||||
|
||||
For example, to use:
|
||||
|
||||
- Debian Jessie, replace 'data "aws_ami" "distro"' in variables.tf with
|
||||
|
||||
```ini
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
@@ -65,8 +74,11 @@ data "aws_ami" "distro" {
|
||||
|
||||
owners = ["379101102735"]
|
||||
}
|
||||
```
|
||||
|
||||
- Ubuntu 16.04, replace 'data "aws_ami" "distro"' in variables.tf with
|
||||
|
||||
```ini
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
@@ -82,8 +94,11 @@ data "aws_ami" "distro" {
|
||||
|
||||
owners = ["099720109477"]
|
||||
}
|
||||
```
|
||||
|
||||
- Centos 7, replace 'data "aws_ami" "distro"' in variables.tf with
|
||||
|
||||
```ini
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
@@ -99,23 +114,49 @@ data "aws_ami" "distro" {
|
||||
|
||||
owners = ["688023202711"]
|
||||
}
|
||||
```
|
||||
|
||||
**Troubleshooting**
|
||||
## Connecting to Kubernetes
|
||||
|
||||
***Remaining AWS IAM Instance Profile***:
|
||||
You can use the following set of commands to get the kubeconfig file from your newly created cluster. Before running the commands, make sure you are in the project's root folder.
|
||||
|
||||
```commandline
|
||||
# Get the controller's IP address.
|
||||
CONTROLLER_HOST_NAME=$(cat ./inventory/hosts | grep "\[kube-master\]" -A 1 | tail -n 1)
|
||||
CONTROLLER_IP=$(cat ./inventory/hosts | grep $CONTROLLER_HOST_NAME | grep ansible_host | cut -d'=' -f2)
|
||||
|
||||
# Get the hostname of the load balancer.
|
||||
LB_HOST=$(cat inventory/hosts | grep apiserver_loadbalancer_domain_name | cut -d'"' -f2)
|
||||
|
||||
# Get the controller's SSH fingerprint.
|
||||
ssh-keygen -R $CONTROLLER_IP > /dev/null 2>&1
|
||||
ssh-keyscan -H $CONTROLLER_IP >> ~/.ssh/known_hosts 2>/dev/null
|
||||
|
||||
# Get the kubeconfig from the controller.
|
||||
mkdir -p ~/.kube
|
||||
ssh -F ssh-bastion.conf centos@$CONTROLLER_IP "sudo chmod 644 /etc/kubernetes/admin.conf"
|
||||
scp -F ssh-bastion.conf centos@$CONTROLLER_IP:/etc/kubernetes/admin.conf ~/.kube/config
|
||||
sed -i "s^server:.*^server: https://$LB_HOST:6443^" ~/.kube/config
|
||||
kubectl get nodes
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Remaining AWS IAM Instance Profile
|
||||
|
||||
If the cluster was destroyed without using Terraform it is possible that
|
||||
the AWS IAM Instance Profiles still remain. To delete them you can use
|
||||
the `AWS CLI` with the following command:
|
||||
```
|
||||
|
||||
```commandline
|
||||
aws iam delete-instance-profile --region <region_name> --instance-profile-name <profile_name>
|
||||
```
|
||||
|
||||
***Ansible Inventory doesn't get created:***
|
||||
### Ansible Inventory doesn't get created
|
||||
|
||||
It could happen that Terraform doesn't create an Ansible Inventory file automatically. If this is the case copy the output after `inventory=` and create a file named `hosts`in the directory `inventory` and paste the inventory into the file.
|
||||
|
||||
**Architecture**
|
||||
## Architecture
|
||||
|
||||
Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones.
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ resource "aws_instance" "bastion-server" {
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-bastion-${count.index}",
|
||||
"Cluster", "${var.aws_cluster_name}",
|
||||
"Cluster", var.aws_cluster_name,
|
||||
"Role", "bastion-${var.aws_cluster_name}-${count.index}"
|
||||
))
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ resource "aws_elb" "aws-elb-api" {
|
||||
healthy_threshold = 2
|
||||
unhealthy_threshold = 2
|
||||
timeout = 3
|
||||
target = "TCP:${var.k8s_secure_api_port}"
|
||||
target = "HTTPS:${var.k8s_secure_api_port}/healthz"
|
||||
interval = 30
|
||||
}
|
||||
|
||||
|
||||
@@ -16,15 +16,15 @@ variable "k8s_secure_api_port" {
|
||||
|
||||
variable "aws_avail_zones" {
|
||||
description = "Availability Zones Used"
|
||||
type = "list"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "aws_subnet_ids_public" {
|
||||
description = "IDs of Public Subnets"
|
||||
type = "list"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "default_tags" {
|
||||
description = "Tags for all resources"
|
||||
type = "map"
|
||||
type = map(string)
|
||||
}
|
||||
|
||||
@@ -8,20 +8,20 @@ variable "aws_cluster_name" {
|
||||
|
||||
variable "aws_avail_zones" {
|
||||
description = "AWS Availability Zones Used"
|
||||
type = "list"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "aws_cidr_subnets_private" {
|
||||
description = "CIDR Blocks for private subnets in Availability zones"
|
||||
type = "list"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "aws_cidr_subnets_public" {
|
||||
description = "CIDR Blocks for public subnets in Availability zones"
|
||||
type = "list"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "default_tags" {
|
||||
description = "Default tags for all resources"
|
||||
type = "map"
|
||||
type = map(string)
|
||||
}
|
||||
|
||||
@@ -44,12 +44,12 @@ variable "aws_vpc_cidr_block" {
|
||||
|
||||
variable "aws_cidr_subnets_private" {
|
||||
description = "CIDR Blocks for private subnets in Availability Zones"
|
||||
type = "list"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "aws_cidr_subnets_public" {
|
||||
description = "CIDR Blocks for public subnets in Availability Zones"
|
||||
type = "list"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
//AWS EC2 Settings
|
||||
@@ -101,7 +101,7 @@ variable "k8s_secure_api_port" {
|
||||
|
||||
variable "default_tags" {
|
||||
description = "Default tags for all resources"
|
||||
type = "map"
|
||||
type = map(string)
|
||||
}
|
||||
|
||||
variable "inventory_file" {
|
||||
|
||||
90
contrib/terraform/gcp/README.md
Normal file
90
contrib/terraform/gcp/README.md
Normal file
@@ -0,0 +1,90 @@
|
||||
# Kubernetes on GCP with Terraform
|
||||
|
||||
Provision a Kubernetes cluster on GCP using Terraform and Kubespray
|
||||
|
||||
## Overview
|
||||
|
||||
The setup looks like following
|
||||
|
||||
```text
|
||||
Kubernetes cluster
|
||||
+-----------------------+
|
||||
+---------------+ | +--------------+ |
|
||||
| | | | +--------------+ |
|
||||
| API server LB +---------> | | | |
|
||||
| | | | | Master/etcd | |
|
||||
+---------------+ | | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------+ |
|
||||
| ^ |
|
||||
| | |
|
||||
| v |
|
||||
+---------------+ | +--------------+ |
|
||||
| | | | +--------------+ |
|
||||
| Ingress LB +---------> | | | |
|
||||
| | | | | Worker | |
|
||||
+---------------+ | | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------+ |
|
||||
+-----------------------+
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
* Terraform 0.12.0 or newer
|
||||
|
||||
## Quickstart
|
||||
|
||||
To get a cluster up and running you'll need a JSON keyfile.
|
||||
Set the path to the file in the `tfvars.json` file and run the following:
|
||||
|
||||
```bash
|
||||
terraform apply -var-file tfvars.json -state dev-cluster.tfstate -var gcp_project_id=<ID of your GCP project> -var keyfile_location=<location of the json keyfile>
|
||||
```
|
||||
|
||||
To generate kubespray inventory based on the terraform state file you can run the following:
|
||||
|
||||
```bash
|
||||
./generate-inventory.sh dev-cluster.tfstate > inventory.ini
|
||||
```
|
||||
|
||||
You should now have a inventory file named `inventory.ini` that you can use with kubespray, e.g.
|
||||
|
||||
```bash
|
||||
ansible-playbook -i contrib/terraform/gcs/inventory.ini cluster.yml -b -v
|
||||
```
|
||||
|
||||
## Variables
|
||||
|
||||
### Required
|
||||
|
||||
* `keyfile_location`: Location to the keyfile to use as credentials for the google terraform provider
|
||||
* `gcp_project_id`: ID of the GCP project to deploy the cluster in
|
||||
* `ssh_pub_key`: Path to public ssh key to use for all machines
|
||||
* `region`: The region where to run the cluster
|
||||
* `machines`: Machines to provision. Key of this object will be used as the name of the machine
|
||||
* `node_type`: The role of this node *(master|worker)*
|
||||
* `size`: The size to use
|
||||
* `zone`: The zone the machine should run in
|
||||
* `additional_disks`: Extra disks to add to the machine. Key of this object will be used as the disk name
|
||||
* `size`: Size of the disk (in GB)
|
||||
* `boot_disk`: The boot disk to use
|
||||
* `image_name`: Name of the image
|
||||
* `size`: Size of the boot disk (in GB)
|
||||
* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes
|
||||
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server
|
||||
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports)
|
||||
|
||||
### Optional
|
||||
|
||||
* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project *(Defaults to `default`)*
|
||||
* `master_sa_email`: Service account email to use for the master nodes *(Defaults to `""`, auto generate one)*
|
||||
* `master_sa_scopes`: Service account email to use for the master nodes *(Defaults to `["https://www.googleapis.com/auth/cloud-platform"]`)*
|
||||
* `worker_sa_email`: Service account email to use for the worker nodes *(Defaults to `""`, auto generate one)*
|
||||
* `worker_sa_scopes`: Service account email to use for the worker nodes *(Defaults to `["https://www.googleapis.com/auth/cloud-platform"]`)*
|
||||
|
||||
An example variables file can be found `tfvars.json`
|
||||
|
||||
## Known limitations
|
||||
|
||||
This solution does not provide a solution to use a bastion host. Thus all the nodes must expose a public IP for kubespray to work.
|
||||
76
contrib/terraform/gcp/generate-inventory.sh
Executable file
76
contrib/terraform/gcp/generate-inventory.sh
Executable file
@@ -0,0 +1,76 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Generates a inventory file based on the terraform output.
|
||||
# After provisioning a cluster, simply run this command and supply the terraform state file
|
||||
# Default state file is terraform.tfstate
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
usage () {
|
||||
echo "Usage: $0 <state file>" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [[ $# -ne 1 ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
TF_STATE_FILE=${1}
|
||||
|
||||
if [[ ! -f "${TF_STATE_FILE}" ]]; then
|
||||
echo "ERROR: state file ${TF_STATE_FILE} doesn't exist" >&2
|
||||
usage
|
||||
fi
|
||||
|
||||
TF_OUT=$(terraform output -state "${TF_STATE_FILE}" -json)
|
||||
|
||||
MASTERS=$(jq -r '.master_ips.value | to_entries[]' <(echo "${TF_OUT}"))
|
||||
WORKERS=$(jq -r '.worker_ips.value | to_entries[]' <(echo "${TF_OUT}"))
|
||||
mapfile -t MASTER_NAMES < <(jq -r '.key' <(echo "${MASTERS}"))
|
||||
mapfile -t WORKER_NAMES < <(jq -r '.key' <(echo "${WORKERS}"))
|
||||
|
||||
API_LB=$(jq -r '.control_plane_lb_ip_address.value' <(echo "${TF_OUT}"))
|
||||
|
||||
# Generate master hosts
|
||||
i=1
|
||||
for name in "${MASTER_NAMES[@]}"; do
|
||||
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${MASTERS}"))
|
||||
public_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.public_ip' <(echo "${MASTERS}"))
|
||||
echo "${name} ansible_user=ubuntu ansible_host=${public_ip} ip=${private_ip} etcd_member_name=etcd${i}"
|
||||
i=$(( i + 1 ))
|
||||
done
|
||||
|
||||
# Generate worker hosts
|
||||
for name in "${WORKER_NAMES[@]}"; do
|
||||
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${WORKERS}"))
|
||||
public_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.public_ip' <(echo "${WORKERS}"))
|
||||
echo "${name} ansible_user=ubuntu ansible_host=${public_ip} ip=${private_ip}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[kube-master]"
|
||||
for name in "${MASTER_NAMES[@]}"; do
|
||||
echo "${name}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[kube-master:vars]"
|
||||
echo "supplementary_addresses_in_ssl_keys = [ '${API_LB}' ]" # Add LB address to API server certificate
|
||||
echo ""
|
||||
echo "[etcd]"
|
||||
for name in "${MASTER_NAMES[@]}"; do
|
||||
echo "${name}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[kube-node]"
|
||||
for name in "${WORKER_NAMES[@]}"; do
|
||||
echo "${name}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[k8s-cluster:children]"
|
||||
echo "kube-master"
|
||||
echo "kube-node"
|
||||
24
contrib/terraform/gcp/main.tf
Normal file
24
contrib/terraform/gcp/main.tf
Normal file
@@ -0,0 +1,24 @@
|
||||
provider "google" {
|
||||
credentials = file(var.keyfile_location)
|
||||
region = var.region
|
||||
project = var.gcp_project_id
|
||||
version = "~> 3.48"
|
||||
}
|
||||
|
||||
module "kubernetes" {
|
||||
source = "./modules/kubernetes-cluster"
|
||||
region = var.region
|
||||
prefix = var.prefix
|
||||
|
||||
machines = var.machines
|
||||
ssh_pub_key = var.ssh_pub_key
|
||||
|
||||
master_sa_email = var.master_sa_email
|
||||
master_sa_scopes = var.master_sa_scopes
|
||||
worker_sa_email = var.worker_sa_email
|
||||
worker_sa_scopes = var.worker_sa_scopes
|
||||
|
||||
ssh_whitelist = var.ssh_whitelist
|
||||
api_server_whitelist = var.api_server_whitelist
|
||||
nodeport_whitelist = var.nodeport_whitelist
|
||||
}
|
||||
360
contrib/terraform/gcp/modules/kubernetes-cluster/main.tf
Normal file
360
contrib/terraform/gcp/modules/kubernetes-cluster/main.tf
Normal file
@@ -0,0 +1,360 @@
|
||||
#################################################
|
||||
##
|
||||
## General
|
||||
##
|
||||
|
||||
resource "google_compute_network" "main" {
|
||||
name = "${var.prefix}-network"
|
||||
}
|
||||
|
||||
resource "google_compute_subnetwork" "main" {
|
||||
name = "${var.prefix}-subnet"
|
||||
network = google_compute_network.main.name
|
||||
ip_cidr_range = var.private_network_cidr
|
||||
region = var.region
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "deny_all" {
|
||||
name = "${var.prefix}-default-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
priority = 1000
|
||||
|
||||
deny {
|
||||
protocol = "all"
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "allow_internal" {
|
||||
name = "${var.prefix}-internal-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
priority = 500
|
||||
|
||||
source_ranges = [var.private_network_cidr]
|
||||
|
||||
allow {
|
||||
protocol = "all"
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "ssh" {
|
||||
name = "${var.prefix}-ssh-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
priority = 100
|
||||
|
||||
source_ranges = var.ssh_whitelist
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = ["22"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "api_server" {
|
||||
name = "${var.prefix}-api-server-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
priority = 100
|
||||
|
||||
source_ranges = var.api_server_whitelist
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = ["6443"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "nodeport" {
|
||||
name = "${var.prefix}-nodeport-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
priority = 100
|
||||
|
||||
source_ranges = var.nodeport_whitelist
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = ["30000-32767"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "ingress_http" {
|
||||
name = "${var.prefix}-http-ingress-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
priority = 100
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = ["80"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "ingress_https" {
|
||||
name = "${var.prefix}-https-ingress-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
priority = 100
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = ["443"]
|
||||
}
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Local variables
|
||||
##
|
||||
|
||||
locals {
|
||||
master_target_list = [
|
||||
for name, machine in google_compute_instance.master :
|
||||
"${machine.zone}/${machine.name}"
|
||||
]
|
||||
|
||||
worker_target_list = [
|
||||
for name, machine in google_compute_instance.worker :
|
||||
"${machine.zone}/${machine.name}"
|
||||
]
|
||||
|
||||
master_disks = flatten([
|
||||
for machine_name, machine in var.machines : [
|
||||
for disk_name, disk in machine.additional_disks : {
|
||||
"${machine_name}-${disk_name}" = {
|
||||
"machine_name": machine_name,
|
||||
"machine": machine,
|
||||
"disk_size": disk.size,
|
||||
"disk_name": disk_name
|
||||
}
|
||||
}
|
||||
]
|
||||
if machine.node_type == "master"
|
||||
])
|
||||
|
||||
worker_disks = flatten([
|
||||
for machine_name, machine in var.machines : [
|
||||
for disk_name, disk in machine.additional_disks : {
|
||||
"${machine_name}-${disk_name}" = {
|
||||
"machine_name": machine_name,
|
||||
"machine": machine,
|
||||
"disk_size": disk.size,
|
||||
"disk_name": disk_name
|
||||
}
|
||||
}
|
||||
]
|
||||
if machine.node_type == "worker"
|
||||
])
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Master
|
||||
##
|
||||
|
||||
resource "google_compute_address" "master" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "master"
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}-pip"
|
||||
address_type = "EXTERNAL"
|
||||
region = var.region
|
||||
}
|
||||
|
||||
resource "google_compute_disk" "master" {
|
||||
for_each = {
|
||||
for item in local.master_disks :
|
||||
keys(item)[0] => values(item)[0]
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}"
|
||||
type = "pd-ssd"
|
||||
zone = each.value.machine.zone
|
||||
size = each.value.disk_size
|
||||
|
||||
physical_block_size_bytes = 4096
|
||||
}
|
||||
|
||||
resource "google_compute_attached_disk" "master" {
|
||||
for_each = {
|
||||
for item in local.master_disks :
|
||||
keys(item)[0] => values(item)[0]
|
||||
}
|
||||
|
||||
disk = google_compute_disk.master[each.key].id
|
||||
instance = google_compute_instance.master[each.value.machine_name].id
|
||||
}
|
||||
|
||||
resource "google_compute_instance" "master" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "master"
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}"
|
||||
machine_type = each.value.size
|
||||
zone = each.value.zone
|
||||
|
||||
tags = ["master"]
|
||||
|
||||
boot_disk {
|
||||
initialize_params {
|
||||
image = each.value.boot_disk.image_name
|
||||
size = each.value.boot_disk.size
|
||||
}
|
||||
}
|
||||
|
||||
network_interface {
|
||||
subnetwork = google_compute_subnetwork.main.name
|
||||
|
||||
access_config {
|
||||
nat_ip = google_compute_address.master[each.key].address
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh-keys = "ubuntu:${trimspace(file(pathexpand(var.ssh_pub_key)))}"
|
||||
}
|
||||
|
||||
service_account {
|
||||
email = var.master_sa_email
|
||||
scopes = var.master_sa_scopes
|
||||
}
|
||||
|
||||
# Since we use google_compute_attached_disk we need to ignore this
|
||||
lifecycle {
|
||||
ignore_changes = ["attached_disk"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_forwarding_rule" "master_lb" {
|
||||
name = "${var.prefix}-master-lb-forward-rule"
|
||||
|
||||
port_range = "6443"
|
||||
|
||||
target = google_compute_target_pool.master_lb.id
|
||||
}
|
||||
|
||||
resource "google_compute_target_pool" "master_lb" {
|
||||
name = "${var.prefix}-master-lb-pool"
|
||||
instances = local.master_target_list
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Worker
|
||||
##
|
||||
|
||||
resource "google_compute_disk" "worker" {
|
||||
for_each = {
|
||||
for item in local.worker_disks :
|
||||
keys(item)[0] => values(item)[0]
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}"
|
||||
type = "pd-ssd"
|
||||
zone = each.value.machine.zone
|
||||
size = each.value.disk_size
|
||||
|
||||
physical_block_size_bytes = 4096
|
||||
}
|
||||
|
||||
resource "google_compute_attached_disk" "worker" {
|
||||
for_each = {
|
||||
for item in local.worker_disks :
|
||||
keys(item)[0] => values(item)[0]
|
||||
}
|
||||
|
||||
disk = google_compute_disk.worker[each.key].id
|
||||
instance = google_compute_instance.worker[each.value.machine_name].id
|
||||
}
|
||||
|
||||
resource "google_compute_address" "worker" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "worker"
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}-pip"
|
||||
address_type = "EXTERNAL"
|
||||
region = var.region
|
||||
}
|
||||
|
||||
resource "google_compute_instance" "worker" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "worker"
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}"
|
||||
machine_type = each.value.size
|
||||
zone = each.value.zone
|
||||
|
||||
tags = ["worker"]
|
||||
|
||||
boot_disk {
|
||||
initialize_params {
|
||||
image = each.value.boot_disk.image_name
|
||||
size = each.value.boot_disk.size
|
||||
}
|
||||
}
|
||||
|
||||
network_interface {
|
||||
subnetwork = google_compute_subnetwork.main.name
|
||||
|
||||
access_config {
|
||||
nat_ip = google_compute_address.worker[each.key].address
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh-keys = "ubuntu:${trimspace(file(pathexpand(var.ssh_pub_key)))}"
|
||||
}
|
||||
|
||||
service_account {
|
||||
email = var.worker_sa_email
|
||||
scopes = var.worker_sa_scopes
|
||||
}
|
||||
|
||||
# Since we use google_compute_attached_disk we need to ignore this
|
||||
lifecycle {
|
||||
ignore_changes = ["attached_disk"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_address" "worker_lb" {
|
||||
name = "${var.prefix}-worker-lb-address"
|
||||
address_type = "EXTERNAL"
|
||||
region = var.region
|
||||
}
|
||||
|
||||
resource "google_compute_forwarding_rule" "worker_http_lb" {
|
||||
name = "${var.prefix}-worker-http-lb-forward-rule"
|
||||
|
||||
ip_address = google_compute_address.worker_lb.address
|
||||
port_range = "80"
|
||||
|
||||
target = google_compute_target_pool.worker_lb.id
|
||||
}
|
||||
|
||||
resource "google_compute_forwarding_rule" "worker_https_lb" {
|
||||
name = "${var.prefix}-worker-https-lb-forward-rule"
|
||||
|
||||
ip_address = google_compute_address.worker_lb.address
|
||||
port_range = "443"
|
||||
|
||||
target = google_compute_target_pool.worker_lb.id
|
||||
}
|
||||
|
||||
resource "google_compute_target_pool" "worker_lb" {
|
||||
name = "${var.prefix}-worker-lb-pool"
|
||||
instances = local.worker_target_list
|
||||
}
|
||||
27
contrib/terraform/gcp/modules/kubernetes-cluster/output.tf
Normal file
27
contrib/terraform/gcp/modules/kubernetes-cluster/output.tf
Normal file
@@ -0,0 +1,27 @@
|
||||
output "master_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in google_compute_instance.master :
|
||||
instance.name => {
|
||||
"private_ip" = instance.network_interface.0.network_ip
|
||||
"public_ip" = instance.network_interface.0.access_config.0.nat_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "worker_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in google_compute_instance.worker :
|
||||
instance.name => {
|
||||
"private_ip" = instance.network_interface.0.network_ip
|
||||
"public_ip" = instance.network_interface.0.access_config.0.nat_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "ingress_controller_lb_ip_address" {
|
||||
value = google_compute_address.worker_lb.address
|
||||
}
|
||||
|
||||
output "control_plane_lb_ip_address" {
|
||||
value = google_compute_forwarding_rule.master_lb.ip_address
|
||||
}
|
||||
@@ -0,0 +1,54 @@
|
||||
variable "region" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "prefix" {}
|
||||
|
||||
variable "machines" {
|
||||
type = map(object({
|
||||
node_type = string
|
||||
size = string
|
||||
zone = string
|
||||
additional_disks = map(object({
|
||||
size = number
|
||||
}))
|
||||
boot_disk = object({
|
||||
image_name = string
|
||||
size = number
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
variable "master_sa_email" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "master_sa_scopes" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "worker_sa_email" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "worker_sa_scopes" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ssh_pub_key" {}
|
||||
|
||||
variable "ssh_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "api_server_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "nodeport_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "private_network_cidr" {
|
||||
default = "10.0.10.0/24"
|
||||
}
|
||||
15
contrib/terraform/gcp/output.tf
Normal file
15
contrib/terraform/gcp/output.tf
Normal file
@@ -0,0 +1,15 @@
|
||||
output "master_ips" {
|
||||
value = module.kubernetes.master_ip_addresses
|
||||
}
|
||||
|
||||
output "worker_ips" {
|
||||
value = module.kubernetes.worker_ip_addresses
|
||||
}
|
||||
|
||||
output "ingress_controller_lb_ip_address" {
|
||||
value = module.kubernetes.ingress_controller_lb_ip_address
|
||||
}
|
||||
|
||||
output "control_plane_lb_ip_address" {
|
||||
value = module.kubernetes.control_plane_lb_ip_address
|
||||
}
|
||||
60
contrib/terraform/gcp/tfvars.json
Normal file
60
contrib/terraform/gcp/tfvars.json
Normal file
@@ -0,0 +1,60 @@
|
||||
{
|
||||
"gcp_project_id": "GCP_PROJECT_ID",
|
||||
"region": "us-central1",
|
||||
"ssh_pub_key": "~/.ssh/id_rsa.pub",
|
||||
|
||||
"keyfile_location": "service-account.json",
|
||||
|
||||
"prefix": "development",
|
||||
|
||||
"ssh_whitelist": [
|
||||
"1.2.3.4/32"
|
||||
],
|
||||
"api_server_whitelist": [
|
||||
"1.2.3.4/32"
|
||||
],
|
||||
"nodeport_whitelist": [
|
||||
"1.2.3.4/32"
|
||||
],
|
||||
|
||||
"machines": {
|
||||
"master-0": {
|
||||
"node_type": "master",
|
||||
"size": "n1-standard-2",
|
||||
"zone": "us-central1-a",
|
||||
"additional_disks": {},
|
||||
"boot_disk": {
|
||||
"image_name": "ubuntu-os-cloud/ubuntu-1804-bionic-v20201116",
|
||||
"size": 50
|
||||
}
|
||||
},
|
||||
"worker-0": {
|
||||
"node_type": "worker",
|
||||
"size": "n1-standard-8",
|
||||
"zone": "us-central1-a",
|
||||
"additional_disks": {
|
||||
"extra-disk-1": {
|
||||
"size": 100
|
||||
}
|
||||
},
|
||||
"boot_disk": {
|
||||
"image_name": "ubuntu-os-cloud/ubuntu-1804-bionic-v20201116",
|
||||
"size": 50
|
||||
}
|
||||
},
|
||||
"worker-1": {
|
||||
"node_type": "worker",
|
||||
"size": "n1-standard-8",
|
||||
"zone": "us-central1-a",
|
||||
"additional_disks": {
|
||||
"extra-disk-1": {
|
||||
"size": 100
|
||||
}
|
||||
},
|
||||
"boot_disk": {
|
||||
"image_name": "ubuntu-os-cloud/ubuntu-1804-bionic-v20201116",
|
||||
"size": 50
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
72
contrib/terraform/gcp/variables.tf
Normal file
72
contrib/terraform/gcp/variables.tf
Normal file
@@ -0,0 +1,72 @@
|
||||
variable keyfile_location {
|
||||
description = "Location of the json keyfile to use with the google provider"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable region {
|
||||
description = "Region of all resources"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable gcp_project_id {
|
||||
description = "ID of the project"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable prefix {
|
||||
description = "Prefix for resource names"
|
||||
default = "default"
|
||||
}
|
||||
|
||||
variable machines {
|
||||
description = "Cluster machines"
|
||||
type = map(object({
|
||||
node_type = string
|
||||
size = string
|
||||
zone = string
|
||||
additional_disks = map(object({
|
||||
size = number
|
||||
}))
|
||||
boot_disk = object({
|
||||
image_name = string
|
||||
size = number
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
variable "master_sa_email" {
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "master_sa_scopes" {
|
||||
type = list(string)
|
||||
default = ["https://www.googleapis.com/auth/cloud-platform"]
|
||||
}
|
||||
|
||||
variable "worker_sa_email" {
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "worker_sa_scopes" {
|
||||
type = list(string)
|
||||
default = ["https://www.googleapis.com/auth/cloud-platform"]
|
||||
}
|
||||
|
||||
variable ssh_pub_key {
|
||||
description = "Path to public SSH key file which is injected into the VMs."
|
||||
type = string
|
||||
}
|
||||
|
||||
variable ssh_whitelist {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable api_server_whitelist {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable nodeport_whitelist {
|
||||
type = list(string)
|
||||
}
|
||||
@@ -9,6 +9,7 @@ This will install a Kubernetes cluster on an OpenStack Cloud. It should work on
|
||||
most modern installs of OpenStack that support the basic services.
|
||||
|
||||
### Known compatible public clouds
|
||||
|
||||
- [Auro](https://auro.io/)
|
||||
- [Betacloud](https://www.betacloud.io/)
|
||||
- [CityCloud](https://www.citycloud.com/)
|
||||
@@ -23,8 +24,8 @@ most modern installs of OpenStack that support the basic services.
|
||||
- [VexxHost](https://vexxhost.com/)
|
||||
- [Zetta](https://www.zetta.io/)
|
||||
|
||||
|
||||
## Approach
|
||||
|
||||
The terraform configuration inspects variables found in
|
||||
[variables.tf](variables.tf) to create resources in your OpenStack cluster.
|
||||
There is a [python script](../terraform.py) that reads the generated`.tfstate`
|
||||
@@ -32,6 +33,7 @@ file to generate a dynamic inventory that is consumed by the main ansible script
|
||||
to actually install kubernetes and stand up the cluster.
|
||||
|
||||
### Networking
|
||||
|
||||
The configuration includes creating a private subnet with a router to the
|
||||
external net. It will allocate floating IPs from a pool and assign them to the
|
||||
hosts where that makes sense. You have the option of creating bastion hosts
|
||||
@@ -39,19 +41,23 @@ inside the private subnet to access the nodes there. Alternatively, a node with
|
||||
a floating IP can be used as a jump host to nodes without.
|
||||
|
||||
#### Using an existing router
|
||||
|
||||
It is possible to use an existing router instead of creating one. To use an
|
||||
existing router set the router\_id variable to the uuid of the router you wish
|
||||
to use.
|
||||
|
||||
For example:
|
||||
```
|
||||
|
||||
```ShellSession
|
||||
router_id = "00c542e7-6f46-4535-ae95-984c7f0391a3"
|
||||
```
|
||||
|
||||
### Kubernetes Nodes
|
||||
|
||||
You can create many different kubernetes topologies by setting the number of
|
||||
different classes of hosts. For each class there are options for allocating
|
||||
floating IP addresses or not.
|
||||
|
||||
- Master nodes with etcd
|
||||
- Master nodes without etcd
|
||||
- Standalone etcd hosts
|
||||
@@ -64,10 +70,12 @@ master nodes with etcd replicas. As an example, if you have three master nodes w
|
||||
etcd replicas and three standalone etcd nodes, the script will fail since there are
|
||||
now six total etcd replicas.
|
||||
|
||||
### GlusterFS
|
||||
### GlusterFS shared file system
|
||||
|
||||
The Terraform configuration supports provisioning of an optional GlusterFS
|
||||
shared file system based on a separate set of VMs. To enable this, you need to
|
||||
specify:
|
||||
|
||||
- the number of Gluster hosts (minimum 2)
|
||||
- Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks
|
||||
- Other properties related to provisioning the hosts
|
||||
@@ -87,7 +95,9 @@ binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
||||
- you have a pair of keys generated that can be used to secure the new hosts
|
||||
|
||||
## Module Architecture
|
||||
|
||||
The configuration is divided into three modules:
|
||||
|
||||
- Network
|
||||
- IPs
|
||||
- Compute
|
||||
@@ -100,12 +110,13 @@ to be updated.
|
||||
You can force your existing IPs by modifying the compute variables in
|
||||
`kubespray.tf` as follows:
|
||||
|
||||
```
|
||||
```ini
|
||||
k8s_master_fips = ["151.101.129.67"]
|
||||
k8s_node_fips = ["151.101.129.68"]
|
||||
```
|
||||
|
||||
## Terraform
|
||||
|
||||
Terraform will be used to provision all of the OpenStack resources with base software as appropriate.
|
||||
|
||||
### Configuration
|
||||
@@ -115,10 +126,10 @@ Terraform will be used to provision all of the OpenStack resources with base sof
|
||||
Create an inventory directory for your cluster by copying the existing sample and linking the `hosts` script (used to build the inventory based on Terraform state):
|
||||
|
||||
```ShellSession
|
||||
$ cp -LRp contrib/terraform/openstack/sample-inventory inventory/$CLUSTER
|
||||
$ cd inventory/$CLUSTER
|
||||
$ ln -s ../../contrib/terraform/openstack/hosts
|
||||
$ ln -s ../../contrib
|
||||
cp -LRp contrib/terraform/openstack/sample-inventory inventory/$CLUSTER
|
||||
cd inventory/$CLUSTER
|
||||
ln -s ../../contrib/terraform/openstack/hosts
|
||||
ln -s ../../contrib
|
||||
```
|
||||
|
||||
This will be the base for subsequent Terraform commands.
|
||||
@@ -138,13 +149,13 @@ please read the [OpenStack provider documentation](https://www.terraform.io/docs
|
||||
|
||||
The recommended authentication method is to describe credentials in a YAML file `clouds.yaml` that can be stored in:
|
||||
|
||||
* the current directory
|
||||
* `~/.config/openstack`
|
||||
* `/etc/openstack`
|
||||
- the current directory
|
||||
- `~/.config/openstack`
|
||||
- `/etc/openstack`
|
||||
|
||||
`clouds.yaml`:
|
||||
|
||||
```
|
||||
```yaml
|
||||
clouds:
|
||||
mycloud:
|
||||
auth:
|
||||
@@ -162,7 +173,7 @@ clouds:
|
||||
If you have multiple clouds defined in your `clouds.yaml` file you can choose
|
||||
the one you want to use with the environment variable `OS_CLOUD`:
|
||||
|
||||
```
|
||||
```ShellSession
|
||||
export OS_CLOUD=mycloud
|
||||
```
|
||||
|
||||
@@ -174,7 +185,7 @@ from Horizon under *Project* -> *Compute* -> *Access & Security* -> *API Access*
|
||||
|
||||
With identity v2:
|
||||
|
||||
```
|
||||
```ShellSession
|
||||
source openrc
|
||||
|
||||
env | grep OS
|
||||
@@ -191,7 +202,7 @@ OS_IDENTITY_API_VERSION=2
|
||||
|
||||
With identity v3:
|
||||
|
||||
```
|
||||
```ShellSession
|
||||
source openrc
|
||||
|
||||
env | grep OS
|
||||
@@ -208,24 +219,24 @@ OS_IDENTITY_API_VERSION=3
|
||||
OS_USER_DOMAIN_NAME=Default
|
||||
```
|
||||
|
||||
Terraform does not support a mix of DomainName and DomainID, choose one or the
|
||||
other:
|
||||
Terraform does not support a mix of DomainName and DomainID, choose one or the other:
|
||||
|
||||
```
|
||||
* provider.openstack: You must provide exactly one of DomainID or DomainName to authenticate by Username
|
||||
```
|
||||
- provider.openstack: You must provide exactly one of DomainID or DomainName to authenticate by Username
|
||||
|
||||
```
|
||||
```ShellSession
|
||||
unset OS_USER_DOMAIN_NAME
|
||||
export OS_USER_DOMAIN_ID=default
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```ShellSession
|
||||
unset OS_PROJECT_DOMAIN_ID
|
||||
set OS_PROJECT_DOMAIN_NAME=Default
|
||||
```
|
||||
|
||||
#### Cluster variables
|
||||
|
||||
The construction of the cluster is driven by values found in
|
||||
[variables.tf](variables.tf).
|
||||
|
||||
@@ -239,6 +250,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|`network_dns_domain` | (Optional) The dns_domain for the internal network that will be generated |
|
||||
|`dns_nameservers`| An array of DNS name server names to be used by hosts in the internal subnet. |
|
||||
|`floatingip_pool` | Name of the pool from which floating IPs will be allocated |
|
||||
|`k8s_master_fips` | A list of floating IPs that you have already pre-allocated; they will be attached to master nodes instead of creating new random floating IPs. |
|
||||
|`external_net` | UUID of the external network that will be routed to |
|
||||
|`flavor_k8s_master`,`flavor_k8s_node`,`flavor_etcd`, `flavor_bastion`,`flavor_gfs_node` | Flavor depends on your openstack installation, you can get available flavor IDs through `openstack flavor list` |
|
||||
|`image`,`image_gfs` | Name of the image to use in provisioning the compute resources. Should already be loaded into glance. |
|
||||
@@ -264,16 +276,19 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|`etcd_root_volume_size_in_gb` | Size of the root volume for etcd nodes, 0 to use ephemeral storage |
|
||||
|`bastion_root_volume_size_in_gb` | Size of the root volume for bastions, 0 to use ephemeral storage |
|
||||
|`use_server_group` | Create and use openstack nova servergroups, default: false |
|
||||
|`use_access_ip` | If 1, nodes with floating IPs will transmit internal cluster traffic via floating IPs; if 0 private IPs will be used instead. Default value is 1. |
|
||||
|`k8s_nodes` | Map containing worker node definition, see explanation below |
|
||||
|
||||
##### k8s_nodes
|
||||
|
||||
Allows a custom defintion of worker nodes giving the operator full control over individual node flavor and
|
||||
availability zone placement. To enable the use of this mode set the `number_of_k8s_nodes` and
|
||||
`number_of_k8s_nodes_no_floating_ip` variables to 0. Then define your desired worker node configuration
|
||||
using the `k8s_nodes` variable.
|
||||
|
||||
For example:
|
||||
```
|
||||
|
||||
```ini
|
||||
k8s_nodes = {
|
||||
"1" = {
|
||||
"az" = "sto1"
|
||||
@@ -294,14 +309,16 @@ k8s_nodes = {
|
||||
```
|
||||
|
||||
Would result in the same configuration as:
|
||||
```
|
||||
|
||||
```ini
|
||||
number_of_k8s_nodes = 3
|
||||
flavor_k8s_node = "83d8b44a-26a0-4f02-a981-079446926445"
|
||||
az_list = ["sto1", "sto2", "sto3"]
|
||||
```
|
||||
|
||||
And:
|
||||
```
|
||||
|
||||
```ini
|
||||
k8s_nodes = {
|
||||
"ing-1" = {
|
||||
"az" = "sto1"
|
||||
@@ -355,7 +372,8 @@ Would result in three nodes in each availability zone each with their own separa
|
||||
flavor and floating ip configuration.
|
||||
|
||||
The "schema":
|
||||
```
|
||||
|
||||
```ini
|
||||
k8s_nodes = {
|
||||
"key | node name suffix, must be unique" = {
|
||||
"az" = string
|
||||
@@ -364,6 +382,7 @@ k8s_nodes = {
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
All values are required.
|
||||
|
||||
#### Terraform state files
|
||||
@@ -372,10 +391,10 @@ In the cluster's inventory folder, the following files might be created (either
|
||||
or manually), to prevent you from pushing them accidentally they are in a
|
||||
`.gitignore` file in the `terraform/openstack` directory :
|
||||
|
||||
* `.terraform`
|
||||
* `.tfvars`
|
||||
* `.tfstate`
|
||||
* `.tfstate.backup`
|
||||
- `.terraform`
|
||||
- `.tfvars`
|
||||
- `.tfstate`
|
||||
- `.tfstate.backup`
|
||||
|
||||
You can still add them manually if you want to.
|
||||
|
||||
@@ -385,17 +404,19 @@ Before Terraform can operate on your cluster you need to install the required
|
||||
plugins. This is accomplished as follows:
|
||||
|
||||
```ShellSession
|
||||
$ cd inventory/$CLUSTER
|
||||
$ terraform init ../../contrib/terraform/openstack
|
||||
cd inventory/$CLUSTER
|
||||
terraform init ../../contrib/terraform/openstack
|
||||
```
|
||||
|
||||
This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules.
|
||||
|
||||
### Provisioning cluster
|
||||
|
||||
You can apply the Terraform configuration to your cluster with the following command
|
||||
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
|
||||
|
||||
```ShellSession
|
||||
$ terraform apply -var-file=cluster.tfvars ../../contrib/terraform/openstack
|
||||
terraform apply -var-file=cluster.tfvars ../../contrib/terraform/openstack
|
||||
```
|
||||
|
||||
if you chose to create a bastion host, this script will create
|
||||
@@ -406,18 +427,20 @@ or move that file. If you want to use this, just leave it there, as ansible will
|
||||
pick it up automatically.
|
||||
|
||||
### Destroying cluster
|
||||
|
||||
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
|
||||
|
||||
```ShellSession
|
||||
$ terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/openstack
|
||||
terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/openstack
|
||||
```
|
||||
|
||||
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
|
||||
|
||||
* remove SSH keys from the destroyed cluster from your `~/.ssh/known_hosts` file
|
||||
* clean up any temporary cache files: `rm /tmp/$CLUSTER-*`
|
||||
- remove SSH keys from the destroyed cluster from your `~/.ssh/known_hosts` file
|
||||
- clean up any temporary cache files: `rm /tmp/$CLUSTER-*`
|
||||
|
||||
### Debugging
|
||||
|
||||
You can enable debugging output from Terraform by setting
|
||||
`OS_DEBUG` to 1 and`TF_LOG` to`DEBUG` before running the Terraform command.
|
||||
|
||||
@@ -425,8 +448,8 @@ You can enable debugging output from Terraform by setting
|
||||
|
||||
Terraform can output values that are useful for configure Neutron/Octavia LBaaS or Cinder persistent volume provisioning as part of your Kubernetes deployment:
|
||||
|
||||
- `private_subnet_id`: the subnet where your instances are running is used for `openstack_lbaas_subnet_id`
|
||||
- `floating_network_id`: the network_id where the floating IP are provisioned is used for `openstack_lbaas_floating_network_id`
|
||||
- `private_subnet_id`: the subnet where your instances are running is used for `openstack_lbaas_subnet_id`
|
||||
- `floating_network_id`: the network_id where the floating IP are provisioned is used for `openstack_lbaas_floating_network_id`
|
||||
|
||||
## Ansible
|
||||
|
||||
@@ -437,9 +460,9 @@ Terraform can output values that are useful for configure Neutron/Octavia LBaaS
|
||||
Ensure your local ssh-agent is running and your ssh key has been added. This
|
||||
step is required by the terraform provisioner:
|
||||
|
||||
```
|
||||
$ eval $(ssh-agent -s)
|
||||
$ ssh-add ~/.ssh/id_rsa
|
||||
```ShellSession
|
||||
eval $(ssh-agent -s)
|
||||
ssh-add ~/.ssh/id_rsa
|
||||
```
|
||||
|
||||
If you have deployed and destroyed a previous iteration of your cluster, you will need to clear out any stale keys from your SSH "known hosts" file ( `~/.ssh/known_hosts`).
|
||||
@@ -451,7 +474,7 @@ generated`.tfstate` file to generate a dynamic inventory recognizes
|
||||
some variables within a "metadata" block, defined in a "resource"
|
||||
block (example):
|
||||
|
||||
```
|
||||
```ini
|
||||
resource "openstack_compute_instance_v2" "example" {
|
||||
...
|
||||
metadata {
|
||||
@@ -472,8 +495,8 @@ instance should be preferred over IPv4.
|
||||
|
||||
Bastion access will be determined by:
|
||||
|
||||
- Your choice on the amount of bastion hosts (set by `number_of_bastions` terraform variable).
|
||||
- The existence of nodes/masters with floating IPs (set by `number_of_k8s_masters`, `number_of_k8s_nodes`, `number_of_k8s_masters_no_etcd` terraform variables).
|
||||
- Your choice on the amount of bastion hosts (set by `number_of_bastions` terraform variable).
|
||||
- The existence of nodes/masters with floating IPs (set by `number_of_k8s_masters`, `number_of_k8s_nodes`, `number_of_k8s_masters_no_etcd` terraform variables).
|
||||
|
||||
If you have a bastion host, your ssh traffic will be directly routed through it. This is regardless of whether you have masters/nodes with a floating IP assigned.
|
||||
If you don't have a bastion host, but at least one of your masters/nodes have a floating IP, then ssh traffic will be tunneled by one of these machines.
|
||||
@@ -484,7 +507,7 @@ So, either a bastion host, or at least master/node with a floating IP are requir
|
||||
|
||||
Make sure you can connect to the hosts. Note that Flatcar Container Linux by Kinvolk will have a state `FAILED` due to Python not being present. This is okay, because Python will be installed during bootstrapping, so long as the hosts are not `UNREACHABLE`.
|
||||
|
||||
```
|
||||
```ShellSession
|
||||
$ ansible -i inventory/$CLUSTER/hosts -m ping all
|
||||
example-k8s_node-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
@@ -505,48 +528,55 @@ If it fails try to connect manually via SSH. It could be something as simple as
|
||||
### Configure cluster variables
|
||||
|
||||
Edit `inventory/$CLUSTER/group_vars/all/all.yml`:
|
||||
|
||||
- **bin_dir**:
|
||||
```
|
||||
|
||||
```yml
|
||||
# Directory where the binaries will be installed
|
||||
# Default:
|
||||
# bin_dir: /usr/local/bin
|
||||
# For Flatcar Container Linux by Kinvolk:
|
||||
bin_dir: /opt/bin
|
||||
```
|
||||
|
||||
- and **cloud_provider**:
|
||||
```
|
||||
|
||||
```yml
|
||||
cloud_provider: openstack
|
||||
```
|
||||
|
||||
Edit `inventory/$CLUSTER/group_vars/k8s-cluster/k8s-cluster.yml`:
|
||||
|
||||
- Set variable **kube_network_plugin** to your desired networking plugin.
|
||||
- **flannel** works out-of-the-box
|
||||
- **calico** requires [configuring OpenStack Neutron ports](/docs/openstack.md) to allow service and pod subnets
|
||||
```
|
||||
|
||||
```yml
|
||||
# Choose network plugin (calico, weave or flannel)
|
||||
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||
kube_network_plugin: flannel
|
||||
```
|
||||
|
||||
- Set variable **resolvconf_mode**
|
||||
```
|
||||
|
||||
```yml
|
||||
# Can be docker_dns, host_resolvconf or none
|
||||
# Default:
|
||||
# resolvconf_mode: docker_dns
|
||||
# For Flatcar Container Linux by Kinvolk:
|
||||
resolvconf_mode: host_resolvconf
|
||||
```
|
||||
|
||||
- Set max amount of attached cinder volume per host (default 256)
|
||||
```
|
||||
|
||||
```yml
|
||||
node_volume_attach_limit: 26
|
||||
```
|
||||
- Disable access_ip, this will make all innternal cluster traffic to be sent over local network when a floating IP is attached (default this value is set to 1)
|
||||
```
|
||||
use_access_ip: 0
|
||||
```
|
||||
|
||||
### Deploy Kubernetes
|
||||
|
||||
```
|
||||
$ ansible-playbook --become -i inventory/$CLUSTER/hosts cluster.yml
|
||||
```ShellSession
|
||||
ansible-playbook --become -i inventory/$CLUSTER/hosts cluster.yml
|
||||
```
|
||||
|
||||
This will take some time as there are many tasks to run.
|
||||
@@ -554,26 +584,36 @@ This will take some time as there are many tasks to run.
|
||||
## Kubernetes
|
||||
|
||||
### Set up kubectl
|
||||
|
||||
1. [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your workstation
|
||||
2. Add a route to the internal IP of a master node (if needed):
|
||||
```
|
||||
|
||||
```ShellSession
|
||||
sudo route add [master-internal-ip] gw [router-ip]
|
||||
```
|
||||
|
||||
or
|
||||
```
|
||||
|
||||
```ShellSession
|
||||
sudo route add -net [internal-subnet]/24 gw [router-ip]
|
||||
```
|
||||
3. List Kubernetes certificates & keys:
|
||||
```
|
||||
|
||||
1. List Kubernetes certificates & keys:
|
||||
|
||||
```ShellSession
|
||||
ssh [os-user]@[master-ip] sudo ls /etc/kubernetes/ssl/
|
||||
```
|
||||
4. Get `admin`'s certificates and keys:
|
||||
```
|
||||
|
||||
1. Get `admin`'s certificates and keys:
|
||||
|
||||
```ShellSession
|
||||
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-kube-master-1-key.pem > admin-key.pem
|
||||
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-kube-master-1.pem > admin.pem
|
||||
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/ca.pem > ca.pem
|
||||
```
|
||||
5. Configure kubectl:
|
||||
|
||||
1. Configure kubectl:
|
||||
|
||||
```ShellSession
|
||||
$ kubectl config set-cluster default-cluster --server=https://[master-internal-ip]:6443 \
|
||||
--certificate-authority=ca.pem
|
||||
@@ -586,21 +626,24 @@ $ kubectl config set-credentials default-admin \
|
||||
$ kubectl config set-context default-system --cluster=default-cluster --user=default-admin
|
||||
$ kubectl config use-context default-system
|
||||
```
|
||||
7. Check it:
|
||||
```
|
||||
|
||||
1. Check it:
|
||||
|
||||
```ShellSession
|
||||
kubectl version
|
||||
```
|
||||
|
||||
## GlusterFS
|
||||
GlusterFS is not deployed by the standard`cluster.yml` playbook, see the
|
||||
|
||||
GlusterFS is not deployed by the standard `cluster.yml` playbook, see the
|
||||
[GlusterFS playbook documentation](../../network-storage/glusterfs/README.md)
|
||||
for instructions.
|
||||
|
||||
Basically you will install Gluster as
|
||||
```ShellSession
|
||||
$ ansible-playbook --become -i inventory/$CLUSTER/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
```ShellSession
|
||||
ansible-playbook --become -i inventory/$CLUSTER/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
## What's next
|
||||
|
||||
@@ -609,6 +652,7 @@ Try out your new Kubernetes cluster with the [Hello Kubernetes service](https://
|
||||
## Appendix
|
||||
|
||||
### Migration from `number_of_k8s_nodes*` to `k8s_nodes`
|
||||
|
||||
If you currently have a cluster defined using the `number_of_k8s_nodes*` variables and wish
|
||||
to migrate to the `k8s_nodes` style you can do it like so:
|
||||
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
provider "openstack" {
|
||||
version = "~> 1.17"
|
||||
}
|
||||
|
||||
module "network" {
|
||||
source = "./modules/network"
|
||||
|
||||
@@ -27,6 +23,8 @@ module "ips" {
|
||||
network_name = var.network_name
|
||||
router_id = module.network.router_id
|
||||
k8s_nodes = var.k8s_nodes
|
||||
k8s_master_fips = var.k8s_master_fips
|
||||
router_internal_port_id = module.network.router_internal_port_id
|
||||
}
|
||||
|
||||
module "compute" {
|
||||
@@ -79,6 +77,8 @@ module "compute" {
|
||||
wait_for_floatingip = var.wait_for_floatingip
|
||||
use_access_ip = var.use_access_ip
|
||||
use_server_groups = var.use_server_groups
|
||||
extra_sec_groups = var.extra_sec_groups
|
||||
extra_sec_groups_name = var.extra_sec_groups_name
|
||||
|
||||
network_id = module.network.router_id
|
||||
}
|
||||
|
||||
@@ -17,6 +17,13 @@ resource "openstack_networking_secgroup_v2" "k8s_master" {
|
||||
delete_default_rules = true
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_v2" "k8s_master_extra" {
|
||||
count = "%{if var.extra_sec_groups}1%{else}0%{endif}"
|
||||
name = "${var.cluster_name}-k8s-master-${var.extra_sec_groups_name}"
|
||||
description = "${var.cluster_name} - Kubernetes Master nodes - rules not managed by terraform"
|
||||
delete_default_rules = true
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "k8s_master" {
|
||||
count = length(var.master_allowed_remote_ips)
|
||||
direction = "ingress"
|
||||
@@ -95,6 +102,13 @@ resource "openstack_networking_secgroup_v2" "worker" {
|
||||
delete_default_rules = true
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_v2" "worker_extra" {
|
||||
count = "%{if var.extra_sec_groups}1%{else}0%{endif}"
|
||||
name = "${var.cluster_name}-k8s-worker-${var.extra_sec_groups_name}"
|
||||
description = "${var.cluster_name} - Kubernetes worker nodes - rules not managed by terraform"
|
||||
delete_default_rules = true
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker" {
|
||||
count = length(var.worker_allowed_ports)
|
||||
direction = "ingress"
|
||||
@@ -124,6 +138,21 @@ resource "openstack_compute_servergroup_v2" "k8s_etcd" {
|
||||
policies = ["anti-affinity"]
|
||||
}
|
||||
|
||||
locals {
|
||||
# master groups
|
||||
master_sec_groups = compact([
|
||||
openstack_networking_secgroup_v2.k8s_master.name,
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
var.extra_sec_groups ?openstack_networking_secgroup_v2.k8s_master_extra[0].name : "",
|
||||
])
|
||||
# worker groups
|
||||
worker_sec_groups = compact([
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
openstack_networking_secgroup_v2.worker.name,
|
||||
var.extra_sec_groups ? openstack_networking_secgroup_v2.k8s_master_extra[0].name : "",
|
||||
])
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "bastion" {
|
||||
name = "${var.cluster_name}-bastion-${count.index + 1}"
|
||||
count = var.number_of_bastions
|
||||
@@ -189,9 +218,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s_master.name,
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
]
|
||||
security_groups = local.master_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
@@ -238,9 +265,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s_master.name,
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
]
|
||||
security_groups = local.master_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
@@ -327,9 +352,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s_master.name,
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
]
|
||||
security_groups = local.master_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
@@ -371,9 +394,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s_master.name,
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
]
|
||||
security_groups = local.master_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
@@ -414,9 +435,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s.name,
|
||||
openstack_networking_secgroup_v2.worker.name,
|
||||
]
|
||||
security_groups = local.worker_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
@@ -461,9 +480,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s.name,
|
||||
openstack_networking_secgroup_v2.worker.name,
|
||||
]
|
||||
security_groups = local.worker_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
@@ -504,9 +521,7 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s.name,
|
||||
openstack_networking_secgroup_v2.worker.name,
|
||||
]
|
||||
security_groups = local.worker_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
|
||||
@@ -127,3 +127,11 @@ variable "use_access_ip" {}
|
||||
variable "use_server_groups" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "extra_sec_groups" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "extra_sec_groups_name" {
|
||||
type = string
|
||||
}
|
||||
8
contrib/terraform/openstack/modules/compute/versions.tf
Normal file
8
contrib/terraform/openstack/modules/compute/versions.tf
Normal file
@@ -0,0 +1,8 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
openstack = {
|
||||
source = "terraform-provider-openstack/openstack"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.12.26"
|
||||
}
|
||||
@@ -2,16 +2,21 @@ resource "null_resource" "dummy_dependency" {
|
||||
triggers = {
|
||||
dependency_id = var.router_id
|
||||
}
|
||||
depends_on = [
|
||||
var.router_internal_port_id
|
||||
]
|
||||
}
|
||||
|
||||
# If user specifies pre-existing IPs to use in k8s_master_fips, do not create new ones.
|
||||
resource "openstack_networking_floatingip_v2" "k8s_master" {
|
||||
count = var.number_of_k8s_masters
|
||||
count = length(var.k8s_master_fips) > 0 ? 0 : var.number_of_k8s_masters
|
||||
pool = var.floatingip_pool
|
||||
depends_on = [null_resource.dummy_dependency]
|
||||
}
|
||||
|
||||
# If user specifies pre-existing IPs to use in k8s_master_fips, do not create new ones.
|
||||
resource "openstack_networking_floatingip_v2" "k8s_master_no_etcd" {
|
||||
count = var.number_of_k8s_masters_no_etcd
|
||||
count = length(var.k8s_master_fips) > 0 ? 0 : var.number_of_k8s_masters_no_etcd
|
||||
pool = var.floatingip_pool
|
||||
depends_on = [null_resource.dummy_dependency]
|
||||
}
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
# If k8s_master_fips is already defined as input, keep the same value since new FIPs have not been created.
|
||||
output "k8s_master_fips" {
|
||||
value = openstack_networking_floatingip_v2.k8s_master[*].address
|
||||
value = length(var.k8s_master_fips) > 0 ? var.k8s_master_fips : openstack_networking_floatingip_v2.k8s_master[*].address
|
||||
}
|
||||
|
||||
# If k8s_master_fips is already defined as input, keep the same value since new FIPs have not been created.
|
||||
output "k8s_master_no_etcd_fips" {
|
||||
value = openstack_networking_floatingip_v2.k8s_master_no_etcd[*].address
|
||||
value = length(var.k8s_master_fips) > 0 ? var.k8s_master_fips : openstack_networking_floatingip_v2.k8s_master_no_etcd[*].address
|
||||
}
|
||||
|
||||
output "k8s_node_fips" {
|
||||
|
||||
@@ -17,3 +17,7 @@ variable "router_id" {
|
||||
}
|
||||
|
||||
variable "k8s_nodes" {}
|
||||
|
||||
variable "k8s_master_fips" {}
|
||||
|
||||
variable "router_internal_port_id" {}
|
||||
|
||||
11
contrib/terraform/openstack/modules/ips/versions.tf
Normal file
11
contrib/terraform/openstack/modules/ips/versions.tf
Normal file
@@ -0,0 +1,11 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
null = {
|
||||
source = "hashicorp/null"
|
||||
}
|
||||
openstack = {
|
||||
source = "terraform-provider-openstack/openstack"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.12.26"
|
||||
}
|
||||
8
contrib/terraform/openstack/modules/network/versions.tf
Normal file
8
contrib/terraform/openstack/modules/network/versions.tf
Normal file
@@ -0,0 +1,8 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
openstack = {
|
||||
source = "terraform-provider-openstack/openstack"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.12.26"
|
||||
}
|
||||
@@ -152,7 +152,13 @@ variable "subnet_cidr" {
|
||||
|
||||
variable "dns_nameservers" {
|
||||
description = "An array of DNS name server names used by hosts in this subnet."
|
||||
type = list
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "k8s_master_fips" {
|
||||
description = "specific pre-existing floating IPs to use for master nodes"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
@@ -205,13 +211,13 @@ variable "k8s_allowed_egress_ips" {
|
||||
}
|
||||
|
||||
variable "master_allowed_ports" {
|
||||
type = list
|
||||
type = list(any)
|
||||
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "worker_allowed_ports" {
|
||||
type = list
|
||||
type = list(any)
|
||||
|
||||
default = [
|
||||
{
|
||||
@@ -236,7 +242,19 @@ variable "router_id" {
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "router_internal_port_id" {
|
||||
description = "uuid of the port connection our router to our network"
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "k8s_nodes" {
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "extra_sec_groups" {
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "extra_sec_groups_name" {
|
||||
default = "custom"
|
||||
}
|
||||
|
||||
9
contrib/terraform/openstack/versions.tf
Normal file
9
contrib/terraform/openstack/versions.tf
Normal file
@@ -0,0 +1,9 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
openstack = {
|
||||
source = "terraform-provider-openstack/openstack"
|
||||
version = "~> 1.17"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.12.26"
|
||||
}
|
||||
@@ -8,6 +8,7 @@ Provision a Kubernetes cluster with [Terraform](https://www.terraform.io) on
|
||||
This will install a Kubernetes cluster on Packet bare metal. It should work in all locations and on most server types.
|
||||
|
||||
## Approach
|
||||
|
||||
The terraform configuration inspects variables found in
|
||||
[variables.tf](variables.tf) to create resources in your Packet project.
|
||||
There is a [python script](../terraform.py) that reads the generated`.tfstate`
|
||||
@@ -15,8 +16,10 @@ file to generate a dynamic inventory that is consumed by [cluster.yml](../../../
|
||||
to actually install Kubernetes with Kubespray.
|
||||
|
||||
### Kubernetes Nodes
|
||||
|
||||
You can create many different kubernetes topologies by setting the number of
|
||||
different classes of hosts.
|
||||
|
||||
- Master nodes with etcd: `number_of_k8s_masters` variable
|
||||
- Master nodes without etcd: `number_of_k8s_masters_no_etcd` variable
|
||||
- Standalone etcd hosts: `number_of_etcd` variable
|
||||
@@ -47,6 +50,7 @@ ssh-keygen -f ~/.ssh/id_rsa
|
||||
```
|
||||
|
||||
## Terraform
|
||||
|
||||
Terraform will be used to provision all of the Packet resources with base software as appropriate.
|
||||
|
||||
### Configuration
|
||||
@@ -56,9 +60,9 @@ Terraform will be used to provision all of the Packet resources with base softwa
|
||||
Create an inventory directory for your cluster by copying the existing sample and linking the `hosts` script (used to build the inventory based on Terraform state):
|
||||
|
||||
```ShellSession
|
||||
$ cp -LRp contrib/terraform/packet/sample-inventory inventory/$CLUSTER
|
||||
$ cd inventory/$CLUSTER
|
||||
$ ln -s ../../contrib/terraform/packet/hosts
|
||||
cp -LRp contrib/terraform/packet/sample-inventory inventory/$CLUSTER
|
||||
cd inventory/$CLUSTER
|
||||
ln -s ../../contrib/terraform/packet/hosts
|
||||
```
|
||||
|
||||
This will be the base for subsequent Terraform commands.
|
||||
@@ -69,22 +73,23 @@ Your Packet API key must be available in the `PACKET_AUTH_TOKEN` environment var
|
||||
This key is typically stored outside of the code repo since it is considered secret.
|
||||
If someone gets this key, they can startup/shutdown hosts in your project!
|
||||
|
||||
For more information on how to generate an API key or find your project ID, please see:
|
||||
https://support.packet.com/kb/articles/api-integrations
|
||||
For more information on how to generate an API key or find your project ID, please see
|
||||
[API Integrations](https://support.packet.com/kb/articles/api-integrations)
|
||||
|
||||
The Packet Project ID associated with the key will be set later in cluster.tfvars.
|
||||
|
||||
For more information about the API, please see:
|
||||
https://www.packet.com/developers/api/
|
||||
For more information about the API, please see [Packet API](https://www.packet.com/developers/api/)
|
||||
|
||||
Example:
|
||||
|
||||
```ShellSession
|
||||
$ export PACKET_AUTH_TOKEN="Example-API-Token"
|
||||
export PACKET_AUTH_TOKEN="Example-API-Token"
|
||||
```
|
||||
|
||||
Note that to deploy several clusters within the same project you need to use [terraform workspace](https://www.terraform.io/docs/state/workspaces.html#using-workspaces).
|
||||
|
||||
#### Cluster variables
|
||||
|
||||
The construction of the cluster is driven by values found in
|
||||
[variables.tf](variables.tf).
|
||||
|
||||
@@ -95,10 +100,11 @@ This helps when identifying which hosts are associated with each cluster.
|
||||
|
||||
While the defaults in variables.tf will successfully deploy a cluster, it is recommended to set the following values:
|
||||
|
||||
* cluster_name = the name of the inventory directory created above as $CLUSTER
|
||||
* packet_project_id = the Packet Project ID associated with the Packet API token above
|
||||
- cluster_name = the name of the inventory directory created above as $CLUSTER
|
||||
- packet_project_id = the Packet Project ID associated with the Packet API token above
|
||||
|
||||
#### Enable localhost access
|
||||
|
||||
Kubespray will pull down a Kubernetes configuration file to access this cluster by enabling the
|
||||
`kubeconfig_localhost: true` in the Kubespray configuration.
|
||||
|
||||
@@ -115,10 +121,10 @@ In the cluster's inventory folder, the following files might be created (either
|
||||
or manually), to prevent you from pushing them accidentally they are in a
|
||||
`.gitignore` file in the `terraform/packet` directory :
|
||||
|
||||
* `.terraform`
|
||||
* `.tfvars`
|
||||
* `.tfstate`
|
||||
* `.tfstate.backup`
|
||||
- `.terraform`
|
||||
- `.tfvars`
|
||||
- `.tfstate`
|
||||
- `.tfstate.backup`
|
||||
|
||||
You can still add them manually if you want to.
|
||||
|
||||
@@ -128,34 +134,38 @@ Before Terraform can operate on your cluster you need to install the required
|
||||
plugins. This is accomplished as follows:
|
||||
|
||||
```ShellSession
|
||||
$ cd inventory/$CLUSTER
|
||||
$ terraform init ../../contrib/terraform/packet
|
||||
cd inventory/$CLUSTER
|
||||
terraform init ../../contrib/terraform/packet
|
||||
```
|
||||
|
||||
This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules.
|
||||
|
||||
### Provisioning cluster
|
||||
|
||||
You can apply the Terraform configuration to your cluster with the following command
|
||||
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
|
||||
|
||||
```ShellSession
|
||||
$ terraform apply -var-file=cluster.tfvars ../../contrib/terraform/packet
|
||||
$ export ANSIBLE_HOST_KEY_CHECKING=False
|
||||
$ ansible-playbook -i hosts ../../cluster.yml
|
||||
terraform apply -var-file=cluster.tfvars ../../contrib/terraform/packet
|
||||
export ANSIBLE_HOST_KEY_CHECKING=False
|
||||
ansible-playbook -i hosts ../../cluster.yml
|
||||
```
|
||||
|
||||
### Destroying cluster
|
||||
|
||||
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
|
||||
|
||||
```ShellSession
|
||||
$ terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/packet
|
||||
terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/packet
|
||||
```
|
||||
|
||||
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
|
||||
|
||||
* remove SSH keys from the destroyed cluster from your `~/.ssh/known_hosts` file
|
||||
* clean up any temporary cache files: `rm /tmp/$CLUSTER-*`
|
||||
- Remove SSH keys from the destroyed cluster from your `~/.ssh/known_hosts` file
|
||||
- Clean up any temporary cache files: `rm /tmp/$CLUSTER-*`
|
||||
|
||||
### Debugging
|
||||
|
||||
You can enable debugging output from Terraform by setting `TF_LOG` to `DEBUG` before running the Terraform command.
|
||||
|
||||
## Ansible
|
||||
@@ -167,9 +177,9 @@ You can enable debugging output from Terraform by setting `TF_LOG` to `DEBUG` be
|
||||
Ensure your local ssh-agent is running and your ssh key has been added. This
|
||||
step is required by the terraform provisioner:
|
||||
|
||||
```
|
||||
$ eval $(ssh-agent -s)
|
||||
$ ssh-add ~/.ssh/id_rsa
|
||||
```ShellSession
|
||||
eval $(ssh-agent -s)
|
||||
ssh-add ~/.ssh/id_rsa
|
||||
```
|
||||
|
||||
If you have deployed and destroyed a previous iteration of your cluster, you will need to clear out any stale keys from your SSH "known hosts" file ( `~/.ssh/known_hosts`).
|
||||
@@ -178,7 +188,7 @@ If you have deployed and destroyed a previous iteration of your cluster, you wil
|
||||
|
||||
Make sure you can connect to the hosts. Note that Flatcar Container Linux by Kinvolk will have a state `FAILED` due to Python not being present. This is okay, because Python will be installed during bootstrapping, so long as the hosts are not `UNREACHABLE`.
|
||||
|
||||
```
|
||||
```ShellSession
|
||||
$ ansible -i inventory/$CLUSTER/hosts -m ping all
|
||||
example-k8s_node-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
@@ -198,8 +208,8 @@ If it fails try to connect manually via SSH. It could be something as simple as
|
||||
|
||||
### Deploy Kubernetes
|
||||
|
||||
```
|
||||
$ ansible-playbook --become -i inventory/$CLUSTER/hosts cluster.yml
|
||||
```ShellSession
|
||||
ansible-playbook --become -i inventory/$CLUSTER/hosts cluster.yml
|
||||
```
|
||||
|
||||
This will take some time as there are many tasks to run.
|
||||
@@ -208,20 +218,22 @@ This will take some time as there are many tasks to run.
|
||||
|
||||
### Set up kubectl
|
||||
|
||||
* [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on the localhost.
|
||||
- [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on the localhost.
|
||||
- Verify that Kubectl runs correctly
|
||||
|
||||
* Verify that Kubectl runs correctly
|
||||
```
|
||||
```ShellSession
|
||||
kubectl version
|
||||
```
|
||||
|
||||
* Verify that the Kubernetes configuration file has been copied over
|
||||
```
|
||||
- Verify that the Kubernetes configuration file has been copied over
|
||||
|
||||
```ShellSession
|
||||
cat inventory/alpha/$CLUSTER/admin.conf
|
||||
```
|
||||
|
||||
* Verify that all the nodes are running correctly.
|
||||
```
|
||||
- Verify that all the nodes are running correctly.
|
||||
|
||||
```ShellSession
|
||||
kubectl version
|
||||
kubectl --kubeconfig=inventory/$CLUSTER/artifacts/admin.conf get nodes
|
||||
```
|
||||
|
||||
@@ -1,4 +1,9 @@
|
||||
|
||||
terraform {
|
||||
required_version = ">= 0.12"
|
||||
required_providers {
|
||||
packet = {
|
||||
source = "terraform-providers/packet"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
# /!\ The vault role have been retired from the main playbook.
|
||||
# This role probably requires a LOT of changes in order to work again
|
||||
|
||||
Hashicorp Vault Role
|
||||
====================
|
||||
|
||||
The vault role have been retired from the main playbook.
|
||||
This role probably requires a LOT of changes in order to work again
|
||||
|
||||
Overview
|
||||
--------
|
||||
|
||||
@@ -22,7 +22,7 @@ role can generate certs for itself as well. Eventually, this may be improved
|
||||
to allow alternate backends (such as Consul), but currently the tasks are
|
||||
hardcoded to only create a Vault role for Etcd.
|
||||
|
||||
2. Cluster
|
||||
1. Cluster
|
||||
|
||||
This step is where the long-term Vault cluster is started and configured. Its
|
||||
first task, is to stop any temporary instances of Vault, to free the port for
|
||||
@@ -81,18 +81,18 @@ generated elsewhere, you'll need to copy the certificate and key to the hosts in
|
||||
|
||||
Additional Notes:
|
||||
|
||||
- ``groups.vault|first`` is considered the source of truth for Vault variables
|
||||
- ``vault_leader_url`` is used as pointer for the current running Vault
|
||||
- Each service should have its own role and credentials. Currently those
|
||||
* ``groups.vault|first`` is considered the source of truth for Vault variables
|
||||
* ``vault_leader_url`` is used as pointer for the current running Vault
|
||||
* Each service should have its own role and credentials. Currently those
|
||||
credentials are saved to ``/etc/vault/roles/<role>/``. The service will
|
||||
need to read in those credentials, if they want to interact with Vault.
|
||||
|
||||
Potential Work
|
||||
--------------
|
||||
|
||||
- Change the Vault role to not run certain tasks when ``root_token`` and
|
||||
* Change the Vault role to not run certain tasks when ``root_token`` and
|
||||
``unseal_keys`` are not present. Alternatively, allow user input for these
|
||||
values when missing.
|
||||
- Add the ability to start temp Vault with Host or Docker
|
||||
- Add a dynamic way to change out the backend role creation during Bootstrap,
|
||||
* Add the ability to start temp Vault with Host or Docker
|
||||
* Add a dynamic way to change out the backend role creation during Bootstrap,
|
||||
so other services can be used (such as Consul)
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
* [Air-Gap Installation](docs/offline-environment.md)
|
||||
* CNI
|
||||
* [Calico](docs/calico.md)
|
||||
* [Contiv](docs/contiv.md)
|
||||
* [Flannel](docs/flannel.md)
|
||||
* [Kube Router](docs/kube-router.md)
|
||||
* [Weave](docs/weave.md)
|
||||
@@ -30,10 +29,12 @@
|
||||
* [Flatcar Container Linux](docs/flatcar.md)
|
||||
* [Fedora CoreOS](docs/fcos.md)
|
||||
* [OpenSUSE](docs/opensuse.md)
|
||||
* CRI
|
||||
* [Containerd](docs/containerd.md)
|
||||
* [CRI-O](docs/cri-o.md)
|
||||
* Advanced
|
||||
* [Proxy](/docs/proxy.md)
|
||||
* [Downloads](docs/downloads.md)
|
||||
* [CRI-O](docs/cri-o.md)
|
||||
* [Netcheck](docs/netcheck.md)
|
||||
* [DNS Stack](docs/dns-stack.md)
|
||||
* [Kubernetes reliability](docs/kubernetes-reliability.md)
|
||||
|
||||
@@ -12,7 +12,7 @@ URL rewriting, CORS, rate limiting, and automatic metrics collection.
|
||||
* `ingress_ambassador_namespace` (default `ambassador`): namespace for installing Ambassador.
|
||||
* `ingress_ambassador_update_window` (default `0 0 * * SUN`): _crontab_-like expression
|
||||
for specifying when the Operator should try to update the Ambassador API Gateway.
|
||||
* `ingress_ambassador_version` (defaulkt: `*`): SemVer rule for versions allowed for
|
||||
* `ingress_ambassador_version` (default: `*`): SemVer rule for versions allowed for
|
||||
installation/updates.
|
||||
* `ingress_ambassador_secure_port` (default: 443): HTTPS port to listen at.
|
||||
* `ingress_ambassador_insecure_port` (default: 80): HTTP port to listen at.
|
||||
|
||||
@@ -116,7 +116,6 @@ The following tags are defined in playbooks:
|
||||
| facts | Gathering facts and misc check results
|
||||
| flannel | Network plugin flannel
|
||||
| gce | Cloud-provider GCP
|
||||
| hyperkube | Manipulations with K8s hyperkube image
|
||||
| k8s-pre-upgrade | Upgrading K8s cluster
|
||||
| k8s-secrets | Configuring K8s certs/keys
|
||||
| kube-apiserver | Configuring static pod kube-apiserver
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Azure Disk CSI Driver
|
||||
|
||||
The Azure Disk CSI driver allows you to provision volumes for pods with a Kubernetes deployment over Azure Cloud. The CSI driver replaces to volume provioning done by the in-tree azure cloud provider which is deprecated.
|
||||
The Azure Disk CSI driver allows you to provision volumes for pods with a Kubernetes deployment over Azure Cloud. The CSI driver replaces to volume provisioning done by the in-tree azure cloud provider which is deprecated.
|
||||
|
||||
This documentation is an updated version of the in-tree Azure cloud provider documentation (azure.md).
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ The name of the resource group your instances are in, can be retrieved via `az g
|
||||
|
||||
### azure\_vmtype
|
||||
|
||||
The type of the vm. Supported values are `standard` or `vmss`. If vm is type of `Virtal Machines` then value is `standard`. If vm is part of `Virtaul Machine Scale Sets` then value is `vmss`
|
||||
The type of the vm. Supported values are `standard` or `vmss`. If vm is type of `Virtual Machines` then value is `standard`. If vm is part of `Virtual Machine Scale Sets` then value is `vmss`
|
||||
|
||||
### azure\_vnet\_name
|
||||
|
||||
|
||||
@@ -1,18 +1,12 @@
|
||||
# Calico
|
||||
|
||||
N.B. **Version 2.6.5 upgrade to 3.1.1 is upgrading etcd store to etcdv3**
|
||||
|
||||
If you create automated backups of etcdv2 please switch for creating etcdv3 backups, as kubernetes and calico now uses etcdv3
|
||||
After migration you can check `/tmp/calico_upgrade/` directory for converted items to etcdv3.
|
||||
**PLEASE TEST upgrade before upgrading production cluster.**
|
||||
|
||||
Check if the calico-node container is running
|
||||
|
||||
```ShellSession
|
||||
docker ps | grep calico
|
||||
```
|
||||
|
||||
The **calicoctl.sh** is wrap script with configured acces credentials for command calicoctl allows to check the status of the network workloads.
|
||||
The **calicoctl.sh** is wrap script with configured access credentials for command calicoctl allows to check the status of the network workloads.
|
||||
|
||||
* Check the status of Calico nodes
|
||||
|
||||
@@ -20,24 +14,12 @@ The **calicoctl.sh** is wrap script with configured acces credentials for comman
|
||||
calicoctl.sh node status
|
||||
```
|
||||
|
||||
or for versions prior to *v1.0.0*:
|
||||
|
||||
```ShellSession
|
||||
calicoctl.sh status
|
||||
```
|
||||
|
||||
* Show the configured network subnet for containers
|
||||
|
||||
```ShellSession
|
||||
calicoctl.sh get ippool -o wide
|
||||
```
|
||||
|
||||
or for versions prior to *v1.0.0*:
|
||||
|
||||
```ShellSession
|
||||
calicoctl.sh pool show
|
||||
```
|
||||
|
||||
* Show the workloads (ip addresses of containers and their location)
|
||||
|
||||
```ShellSession
|
||||
@@ -50,14 +32,22 @@ and
|
||||
calicoctl.sh get hostEndpoint -o wide
|
||||
```
|
||||
|
||||
or for versions prior *v1.0.0*:
|
||||
|
||||
```ShellSession
|
||||
calicoctl.sh endpoint show --detail
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Optional : Define datastore type
|
||||
|
||||
The default datastore, Kubernetes API datastore is recommended for on-premises deployments, and supports only Kubernetes workloads; etcd is the best datastore for hybrid deployments.
|
||||
|
||||
Allowed values are `kdd` (default) and `etcd`.
|
||||
|
||||
Note: using kdd and more than 50 nodes, consider using the `typha` daemon to provide scaling.
|
||||
|
||||
To re-define you need to edit the inventory and add a group variable `calico_datastore`
|
||||
|
||||
```yml
|
||||
calico_datastore: kdd
|
||||
```
|
||||
|
||||
### Optional : Define network backend
|
||||
|
||||
In some cases you may want to define Calico network backend. Allowed values are `bird`, `vxlan` or `none`. Bird is a default value.
|
||||
@@ -103,6 +93,15 @@ This can be enabled by setting the following variable as follow in group_vars (k
|
||||
calico_advertise_cluster_ips: true
|
||||
```
|
||||
|
||||
Since calico 3.10, Calico supports advertising Kubernetes service ExternalIPs over BGP in addition to cluster IPs advertising.
|
||||
This can be enabled by setting the following variable in group_vars (k8s-cluster/k8s-net-calico.yml)
|
||||
|
||||
```yml
|
||||
calico_advertise_service_external_ips:
|
||||
- x.x.x.x/24
|
||||
- y.y.y.y/32
|
||||
```
|
||||
|
||||
### Optional : Define global AS number
|
||||
|
||||
Optional parameter `global_as_num` defines Calico global AS number (`/calico/bgp/v1/global/as_num` etcd key).
|
||||
@@ -248,3 +247,15 @@ calico_node_extra_envs:
|
||||
neutron security-group-rule-create --protocol 4 --direction egress k8s-a0tp4t
|
||||
neutron security-group-rule-create --protocol 4 --direction igress k8s-a0tp4t
|
||||
```
|
||||
|
||||
### Optional : Use Calico CNI host-local IPAM plugin
|
||||
|
||||
Calico currently supports two types of CNI IPAM plugins, `host-local` and `calico-ipam` (default).
|
||||
|
||||
To allow Calico to determine the subnet to use from the Kubernetes API based on the `Node.podCIDR` field, enable the following setting.
|
||||
|
||||
```yml
|
||||
calico_ipam_host_local: true
|
||||
```
|
||||
|
||||
Refer to Project Calico section [Using host-local IPAM](https://docs.projectcalico.org/reference/cni-plugin/configuration#using-host-local-ipam) for further information.
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# RHEL / CentOS 8
|
||||
# CentOS 8
|
||||
|
||||
RHEL / CentOS 8 ships only with iptables-nft (ie without iptables-legacy)
|
||||
CentOS 8 ships only with iptables-nft (ie without iptables-legacy)
|
||||
The only tested configuration for now is using Calico CNI
|
||||
You need to use K8S 1.17+ and to add `calico_iptables_backend: "NFT"` to your configuration
|
||||
You need to use K8S 1.17+ and to add `calico_iptables_backend: "NFT"` or `calico_iptables_backend: "Auto"` to your configuration
|
||||
|
||||
If you have containers that are using iptables in the host network namespace (`hostNetwork=true`),
|
||||
you need to ensure they are using iptables-nft.
|
||||
|
||||
84
docs/ci.md
84
docs/ci.md
@@ -4,51 +4,51 @@ To generate this Matrix run `./tests/scripts/md-table/main.py`
|
||||
|
||||
## docker
|
||||
|
||||
| OS / CNI | calico | canal | cilium | contiv | flannel | kube-ovn | kube-router | macvlan | ovn4nfv | weave |
|
||||
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
|
||||
amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :white_check_mark: |
|
||||
centos8 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
|
||||
debian10 | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
|
||||
fedora31 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora32 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
oracle7 | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :white_check_mark: | :x: | :white_check_mark: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :white_check_mark: |
|
||||
ubuntu18 | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :white_check_mark: |
|
||||
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
| OS / CNI | calico | canal | cilium | flannel | kube-ovn | kube-router | macvlan | ovn4nfv | weave |
|
||||
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- |
|
||||
amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :white_check_mark: |
|
||||
centos8 | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
|
||||
debian10 | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
|
||||
fedora32 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||
fedora33 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
oracle7 | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :white_check_mark: |
|
||||
ubuntu18 | :white_check_mark: | :x: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :white_check_mark: |
|
||||
ubuntu20 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
||||
## crio
|
||||
|
||||
| OS / CNI | calico | canal | cilium | contiv | flannel | kube-ovn | kube-router | macvlan | ovn4nfv | weave |
|
||||
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
|
||||
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora31 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora32 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
| OS / CNI | calico | canal | cilium | flannel | kube-ovn | kube-router | macvlan | ovn4nfv | weave |
|
||||
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- |
|
||||
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora32 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora33 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
||||
## containerd
|
||||
|
||||
| OS / CNI | calico | canal | cilium | contiv | flannel | kube-ovn | kube-router | macvlan | ovn4nfv | weave |
|
||||
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
|
||||
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian10 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora31 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora32 | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu18 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
| OS / CNI | calico | canal | cilium | flannel | kube-ovn | kube-router | macvlan | ovn4nfv | weave |
|
||||
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- |
|
||||
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian10 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora32 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
|
||||
fedora33 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu18 | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
# Comparison
|
||||
|
||||
## Kubespray vs [Kops](https://github.com/kubernetes/kops)
|
||||
## Kubespray vs Kops
|
||||
|
||||
Kubespray runs on bare metal and most clouds, using Ansible as its substrate for
|
||||
provisioning and orchestration. Kops performs the provisioning and orchestration
|
||||
provisioning and orchestration. [Kops](https://github.com/kubernetes/kops) performs the provisioning and orchestration
|
||||
itself, and as such is less flexible in deployment platforms. For people with
|
||||
familiarity with Ansible, existing Ansible deployments or the desire to run a
|
||||
Kubernetes cluster across multiple platforms, Kubespray is a good choice. Kops,
|
||||
@@ -11,9 +11,9 @@ however, is more tightly integrated with the unique features of the clouds it
|
||||
supports so it could be a better choice if you know that you will only be using
|
||||
one platform for the foreseeable future.
|
||||
|
||||
## Kubespray vs [Kubeadm](https://github.com/kubernetes/kubeadm)
|
||||
## Kubespray vs Kubeadm
|
||||
|
||||
Kubeadm provides domain Knowledge of Kubernetes clusters' life cycle
|
||||
[Kubeadm](https://github.com/kubernetes/kubeadm) provides domain Knowledge of Kubernetes clusters' life cycle
|
||||
management, including self-hosted layouts, dynamic discovery services and so
|
||||
on. Had it belonged to the new [operators world](https://coreos.com/blog/introducing-operators.html),
|
||||
it may have been named a "Kubernetes cluster operator". Kubespray however,
|
||||
|
||||
37
docs/containerd.md
Normal file
37
docs/containerd.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# containerd
|
||||
|
||||
[containerd] An industry-standard container runtime with an emphasis on simplicity, robustness and portability
|
||||
Kubespray supports basic functionality for using containerd as the default container runtime in a cluster.
|
||||
|
||||
_To use the containerd container runtime set the following variables:_
|
||||
|
||||
## k8s-cluster.yml
|
||||
|
||||
```yaml
|
||||
container_manager: containerd
|
||||
```
|
||||
|
||||
## etcd.yml
|
||||
|
||||
```yaml
|
||||
etcd_deployment_type: host
|
||||
```
|
||||
|
||||
## Containerd config
|
||||
|
||||
Example: define registry mirror for docker hub
|
||||
|
||||
```yaml
|
||||
containerd_config:
|
||||
grpc:
|
||||
max_recv_message_size: 16777216
|
||||
max_send_message_size: 16777216
|
||||
debug:
|
||||
level: ""
|
||||
registries:
|
||||
"docker.io":
|
||||
- "https://mirror.gcr.io"
|
||||
- "https://registry-1.docker.io"
|
||||
```
|
||||
|
||||
[containerd]: https://containerd.io/
|
||||
@@ -1,72 +0,0 @@
|
||||
# Contiv
|
||||
|
||||
Here is the [Contiv documentation](https://contiv.github.io/documents/).
|
||||
|
||||
## Administrate Contiv
|
||||
|
||||
There are two ways to manage Contiv:
|
||||
|
||||
* a web UI managed by the api proxy service
|
||||
* a CLI named `netctl`
|
||||
|
||||
### Interfaces
|
||||
|
||||
#### The Web Interface
|
||||
|
||||
This UI is hosted on all kubernetes master nodes. The service is available at `https://<one of your master node>:10000`.
|
||||
|
||||
You can configure the api proxy by overriding the following variables:
|
||||
|
||||
```yaml
|
||||
contiv_enable_api_proxy: true
|
||||
contiv_api_proxy_port: 10000
|
||||
contiv_generate_certificate: true
|
||||
```
|
||||
|
||||
The default credentials to log in are: admin/admin.
|
||||
|
||||
#### The Command Line Interface
|
||||
|
||||
The second way to modify the Contiv configuration is to use the CLI. To do this, you have to connect to the server and export an environment variable to tell netctl how to connect to the cluster:
|
||||
|
||||
```bash
|
||||
export NETMASTER=http://127.0.0.1:9999
|
||||
```
|
||||
|
||||
The port can be changed by overriding the following variable:
|
||||
|
||||
```yaml
|
||||
contiv_netmaster_port: 9999
|
||||
```
|
||||
|
||||
The CLI doesn't use the authentication process needed by the web interface.
|
||||
|
||||
### Network configuration
|
||||
|
||||
The default configuration uses VXLAN to create an overlay. Two networks are created by default:
|
||||
|
||||
* `contivh1`: an infrastructure network. It allows nodes to access the pods IPs. It is mandatory in a Kubernetes environment that uses VXLAN.
|
||||
* `default-net` : the default network that hosts pods.
|
||||
|
||||
You can change the default network configuration by overriding the `contiv_networks` variable.
|
||||
|
||||
The default forward mode is set to routing and the default network mode is vxlan:
|
||||
|
||||
```yaml
|
||||
contiv_fwd_mode: routing
|
||||
contiv_net_mode: vxlan
|
||||
```
|
||||
|
||||
The following is an example of how you can use VLAN instead of VXLAN:
|
||||
|
||||
```yaml
|
||||
contiv_fwd_mode: bridge
|
||||
contiv_net_mode: vlan
|
||||
contiv_vlan_interface: eth0
|
||||
contiv_networks:
|
||||
- name: default-net
|
||||
subnet: "{{ kube_pods_subnet }}"
|
||||
gateway: "{{ kube_pods_subnet|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
|
||||
encap: vlan
|
||||
pkt_tag: 10
|
||||
```
|
||||
@@ -4,29 +4,45 @@
|
||||
Kubespray supports basic functionality for using CRI-O as the default container runtime in a cluster.
|
||||
|
||||
* Kubernetes supports CRI-O on v1.11.1 or later.
|
||||
* Helm and other tools may not function as normal due to dependency on Docker.
|
||||
* `scale.yml` and `upgrade-cluster.yml` are not supported on clusters using CRI-O.
|
||||
* etcd: configure either kubeadm managed etcd or host deployment
|
||||
|
||||
_To use CRI-O instead of Docker, set the following variables:_
|
||||
_To use the CRI-O container runtime set the following variables:_
|
||||
|
||||
## all.yml
|
||||
## all/all.yml
|
||||
|
||||
```yaml
|
||||
download_container: false
|
||||
skip_downloads: false
|
||||
etcd_kubeadm_enabled: true
|
||||
```
|
||||
|
||||
## k8s-cluster.yml
|
||||
## k8s-cluster/k8s-cluster.yml
|
||||
|
||||
```yaml
|
||||
kubelet_deployment_type: host
|
||||
container_manager: crio
|
||||
```
|
||||
|
||||
## etcd.yml
|
||||
|
||||
```yaml
|
||||
etcd_deployment_type: host
|
||||
etcd_deployment_type: host # optionally and mutually exclusive with etcd_kubeadm_enabled
|
||||
```
|
||||
|
||||
## all/crio.yml
|
||||
|
||||
Enable docker hub registry mirrors
|
||||
|
||||
```yaml
|
||||
crio_registries_mirrors:
|
||||
- prefix: docker.io
|
||||
insecure: false
|
||||
blocked: false
|
||||
location: registry-1.docker.io
|
||||
mirrors:
|
||||
- location: 192.168.100.100:5000
|
||||
insecure: true
|
||||
- location: mirror.gcr.io
|
||||
insecure: false
|
||||
```
|
||||
|
||||
[CRI-O]: https://cri-o.io/
|
||||
|
||||
@@ -82,6 +82,10 @@ dns_etchosts: |
|
||||
192.168.0.200 ingress.example.com
|
||||
```
|
||||
|
||||
### enable_coredns_reverse_dns_lookups
|
||||
|
||||
Whether reverse DNS lookups are enabled in the coredns config. Defaults to `true`.
|
||||
|
||||
## DNS modes supported by Kubespray
|
||||
|
||||
You can modify how Kubespray sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
|
||||
|
||||
@@ -69,9 +69,9 @@ ansible-playbook -i inventory/mycluster/hosts.yml remove-node.yml -b -v \
|
||||
--extra-vars "node=nodename,nodename2"
|
||||
```
|
||||
|
||||
If a node is completely unreachable by ssh, add `--extra-vars reset_nodes=no`
|
||||
If a node is completely unreachable by ssh, add `--extra-vars reset_nodes=false`
|
||||
to skip the node reset step. If one node is unavailable, but others you wish
|
||||
to remove are able to connect via SSH, you could set reset_nodes=no as a host
|
||||
to remove are able to connect via SSH, you could set `reset_nodes=false` as a host
|
||||
var in inventory.
|
||||
|
||||
## Connecting to Kubernetes
|
||||
@@ -95,11 +95,11 @@ the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-applicati
|
||||
|
||||
Supported version is kubernetes-dashboard v2.0.x :
|
||||
|
||||
- Login options are : token/kubeconfig by default, basic can be enabled with `kube_basic_auth: true` inventory variable - not recommended because this requires ABAC api-server which is not tested by kubespray team
|
||||
- Login option : token/kubeconfig by default
|
||||
- Deployed by default in "kube-system" namespace, can be overridden with `dashboard_namespace: kubernetes-dashboard` in inventory,
|
||||
- Only serves over https
|
||||
|
||||
Access is described in [dashboard docs](https://github.com/kubernetes/dashboard/tree/master/docs/user/accessing-dashboard). With kubespray's default deployment in kube-system namespace, instead of kuberntes-dashboard :
|
||||
Access is described in [dashboard docs](https://github.com/kubernetes/dashboard/tree/master/docs/user/accessing-dashboard). With kubespray's default deployment in kube-system namespace, instead of kubernetes-dashboard :
|
||||
|
||||
- Proxy URL is <http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#/login>
|
||||
- kubectl commands must be run with "-n kube-system"
|
||||
@@ -127,6 +127,8 @@ host and can optionally be configured on your ansible host by setting
|
||||
- If `kubeconfig_localhost` enabled `admin.conf` will appear in the `inventory/mycluster/artifacts/` directory after deployment.
|
||||
- The location where these files are downloaded to can be configured via the `artifacts_dir` variable.
|
||||
|
||||
NOTE: The controller host name in the admin.conf file might be a private IP. If so, change it to use the controller's public IP or the cluster's load balancer.
|
||||
|
||||
You can see a list of nodes by running the following commands:
|
||||
|
||||
```ShellSession
|
||||
|
||||
@@ -43,8 +43,10 @@ attempts to set a status of node.
|
||||
|
||||
At the same time Kubernetes controller manager will try to check
|
||||
`nodeStatusUpdateRetry` times every `--node-monitor-period` of time. After
|
||||
`--node-monitor-grace-period` it will consider node unhealthy. It will remove
|
||||
its pods based on `--pod-eviction-timeout`
|
||||
`--node-monitor-grace-period` it will consider node unhealthy. Pods will then be rescheduled based on the
|
||||
[Taint Based Eviction](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/#taint-based-evictions)
|
||||
timers that you set on them individually, or the API Server's global timers:`--default-not-ready-toleration-seconds` &
|
||||
``--default-unreachable-toleration-seconds``.
|
||||
|
||||
Kube proxy has a watcher over API. Once pods are evicted, Kube proxy will
|
||||
notice and will update iptables of the node. It will remove endpoints from
|
||||
@@ -57,12 +59,14 @@ services so pods from failed node won't be accessible anymore.
|
||||
If `-–node-status-update-frequency` is set to **4s** (10s is default).
|
||||
`--node-monitor-period` to **2s** (5s is default).
|
||||
`--node-monitor-grace-period` to **20s** (40s is default).
|
||||
`--pod-eviction-timeout` is set to **30s** (5m is default)
|
||||
`--default-not-ready-toleration-seconds` and ``--default-unreachable-toleration-seconds`` are set to **30**
|
||||
(300 seconds is default). Note these two values should be integers representing the number of seconds ("s" or "m" for
|
||||
seconds\minutes are not specified).
|
||||
|
||||
In such scenario, pods will be evicted in **50s** because the node will be
|
||||
considered as down after **20s**, and `--pod-eviction-timeout` occurs after
|
||||
**30s** more. However, this scenario creates an overhead on etcd as every node
|
||||
will try to update its status every 2 seconds.
|
||||
considered as down after **20s**, and `--default-not-ready-toleration-seconds` or
|
||||
``--default-unreachable-toleration-seconds`` occur after **30s** more. However, this scenario creates an overhead on
|
||||
etcd as every node will try to update its status every 2 seconds.
|
||||
|
||||
If the environment has 1000 nodes, there will be 15000 node updates per
|
||||
minute which may require large etcd containers or even dedicated nodes for etcd.
|
||||
@@ -75,7 +79,8 @@ minute which may require large etcd containers or even dedicated nodes for etcd.
|
||||
## Medium Update and Average Reaction
|
||||
|
||||
Let's set `-–node-status-update-frequency` to **20s**
|
||||
`--node-monitor-grace-period` to **2m** and `--pod-eviction-timeout` to **1m**.
|
||||
`--node-monitor-grace-period` to **2m** and `--default-not-ready-toleration-seconds` and
|
||||
``--default-unreachable-toleration-seconds`` to **60**.
|
||||
In that case, Kubelet will try to update status every 20s. So, it will be 6 * 5
|
||||
= 30 attempts before Kubernetes controller manager will consider unhealthy
|
||||
status of node. After 1m it will evict all pods. The total time will be 3m
|
||||
@@ -90,9 +95,9 @@ etcd updates per minute.
|
||||
## Low Update and Slow reaction
|
||||
|
||||
Let's set `-–node-status-update-frequency` to **1m**.
|
||||
`--node-monitor-grace-period` will set to **5m** and `--pod-eviction-timeout`
|
||||
to **1m**. In this scenario, every kubelet will try to update the status every
|
||||
minute. There will be 5 * 5 = 25 attempts before unhealthy status. After 5m,
|
||||
`--node-monitor-grace-period` will set to **5m** and `--default-not-ready-toleration-seconds` and
|
||||
``--default-unreachable-toleration-seconds`` to **60**. In this scenario, every kubelet will try to update the status
|
||||
every minute. There will be 5 * 5 = 25 attempts before unhealthy status. After 5m,
|
||||
Kubernetes controller manager will set unhealthy status. This means that pods
|
||||
will be evicted after 1m after being marked unhealthy. (6m in total).
|
||||
|
||||
|
||||
@@ -30,7 +30,8 @@ For a large scaled deployments, consider the following configuration changes:
|
||||
* Tune ``kubelet_status_update_frequency`` to increase reliability of kubelet.
|
||||
``kube_controller_node_monitor_grace_period``,
|
||||
``kube_controller_node_monitor_period``,
|
||||
``kube_controller_pod_eviction_timeout`` for better Kubernetes reliability.
|
||||
``kube_apiserver_pod_eviction_not_ready_timeout_seconds`` &
|
||||
``kube_apiserver_pod_eviction_unreachable_timeout_seconds`` for better Kubernetes reliability.
|
||||
Check out [Kubernetes Reliability](kubernetes-reliability.md)
|
||||
|
||||
* Tune network prefix sizes. Those are ``kube_network_node_prefix``,
|
||||
|
||||
@@ -70,7 +70,7 @@ Before using `--limit` run playbook `facts.yml` without the limit to refresh fac
|
||||
|
||||
With the old node still in the inventory, run `remove-node.yml`. You need to pass `-e node=NODE_NAME` to the playbook to limit the execution to the node being removed.
|
||||
|
||||
If the node you want to remove is not online, you should add `reset_nodes=false` to your extra-vars: `-e node=NODE_NAME reset_nodes=false`.
|
||||
If the node you want to remove is not online, you should add `reset_nodes=false` to your extra-vars: `-e node=NODE_NAME -e reset_nodes=false`.
|
||||
Use this flag even when you remove other types of nodes like a master or etcd nodes.
|
||||
|
||||
### 5) Remove the node from the inventory
|
||||
|
||||
@@ -10,10 +10,11 @@ In case your servers don't have access to internet (for example when deploying o
|
||||
|
||||
## Configure Inventory
|
||||
|
||||
Once all artifacts are accessible from your internal network, **adjust** the following variables in your inventory to match your environment:
|
||||
Once all artifacts are accessible from your internal network, **adjust** the following variables in [your inventory](/inventory/sample/group_vars/k8s-cluster/offline.yml) to match your environment:
|
||||
|
||||
```yaml
|
||||
# Registry overrides
|
||||
kube_image_repo: "{{ registry_host }}"
|
||||
gcr_image_repo: "{{ registry_host }}"
|
||||
docker_image_repo: "{{ registry_host }}"
|
||||
quay_image_repo: "{{ registry_host }}"
|
||||
@@ -33,7 +34,7 @@ calicoctl_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_ctl_versio
|
||||
docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch"
|
||||
docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
|
||||
## Containerd
|
||||
extras_rh_repo_base_url: "{{ yum_repo }}/centos/$releasever/extras/$basearch"
|
||||
extras_rh_repo_base_url: "{{ yum_repo }}/centos/{{ ansible_distribution_major_version }}/extras/$basearch"
|
||||
extras_rh_repo_gpgkey: "{{ yum_repo }}/containerd/gpg"
|
||||
|
||||
# Fedora
|
||||
@@ -61,9 +62,6 @@ docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg"
|
||||
containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd"
|
||||
containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg"
|
||||
containerd_ubuntu_repo_repokey: 'YOURREPOKEY'
|
||||
|
||||
# If using helm
|
||||
helm_stable_repo_url: "{{ helm_registry }}"
|
||||
```
|
||||
|
||||
For the OS specific settings, just define the one matching your OS.
|
||||
@@ -72,10 +70,16 @@ If you use the settings like the one above, you'll need to define in your invent
|
||||
* `registry_host`: Container image registry. If you _don't_ use the same repository path for the container images that the ones defined in [Download's role defaults](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/download/defaults/main.yml), you need to override the `*_image_repo` for these container images. If you want to make your life easier, use the same repository path, you won't have to override anything else.
|
||||
* `files_repo`: HTTP webserver or reverse proxy that is able to serve the files listed above. Path is not important, you can store them anywhere as long as it's accessible by kubespray. It's recommended to use `*_version` in the path so that you don't need to modify this setting everytime kubespray upgrades one of these components.
|
||||
* `yum_repo`/`debian_repo`/`ubuntu_repo`: OS package repository depending of your OS, should point to your internal repository. Adjust the path accordingly.
|
||||
* `helm_registry`: Helm Registry to use for `stable` Helm Charts if `helm_enabled: true`
|
||||
|
||||
## Install Kubespray Python Packages
|
||||
|
||||
### Recommended way: Kubespray Container Image
|
||||
|
||||
The easiest way is to use [kubespray container image](https://quay.io/kubespray/kubespray) as all the required packages are baked in the image.
|
||||
Just copy the container image in your private container image registry and you are all set!
|
||||
|
||||
### Manual installation
|
||||
|
||||
Look at the `requirements.txt` file and check if your OS provides all packages out-of-the-box (Using the OS package manager). For those missing, you need to either use a proxy that has Internet access (typically from a DMZ) or setup a PyPi server in your network that will host these packages.
|
||||
|
||||
If you're using a HTTP(S) proxy to download your python packages:
|
||||
@@ -102,4 +106,10 @@ Once all artifacts are in place and your inventory properly set up, you can run
|
||||
ansible-playbook -i inventory/my_airgap_cluster/hosts.yaml -b cluster.yml
|
||||
```
|
||||
|
||||
If you use [Kubespray Container Image](#recommended-way:-kubespray-container-image), you can mount your inventory inside the container:
|
||||
|
||||
```bash
|
||||
docker run --rm -it -v path_to_inventory/my_airgap_cluster:inventory/my_airgap_cluster myprivateregisry.com/kubespray/kubespray:v2.14.0 ansible-playbook -i inventory/my_airgap_cluster/hosts.yaml -b cluster.yml
|
||||
```
|
||||
|
||||
## Please Note: Offline installation doesn't support CRI-O container runtime at the moment (see [this issue](https://github.com/kubernetes-sigs/kubespray/issues/6233))
|
||||
|
||||
@@ -40,7 +40,7 @@ Grab the latest version of Terraform and install it.
|
||||
```bash
|
||||
echo "https://releases.hashicorp.com/terraform/$(curl -s https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r -M '.current_version')/terraform_$(curl -s https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r -M '.current_version')_linux_amd64.zip"
|
||||
sudo yum install unzip
|
||||
sudo unzip terraform_0.12.24_linux_amd64.zip -d /usr/local/bin/
|
||||
sudo unzip terraform_0.12.29_linux_amd64.zip -d /usr/local/bin/
|
||||
```
|
||||
|
||||
## Download Kubespray
|
||||
|
||||
@@ -13,4 +13,11 @@ If you set http and https proxy, all nodes and loadbalancer will be excluded fro
|
||||
|
||||
## Set additional addresses to default no_proxy (all cluster nodes and loadbalancer)
|
||||
|
||||
`additional_no_proxy: "aditional_host,"`
|
||||
`additional_no_proxy: "aditional_host1,aditional_host2"`
|
||||
|
||||
## Exclude workers from no_proxy
|
||||
|
||||
Since workers are included in the no_proxy variable, by default, docker engine will be restarted on all nodes (all
|
||||
pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the
|
||||
no_proxy variable, set:
|
||||
`no_proxy_exclude_workers: true`
|
||||
|
||||
38
docs/rhel.md
Normal file
38
docs/rhel.md
Normal file
@@ -0,0 +1,38 @@
|
||||
# Red Hat Enterprise Linux (RHEL)
|
||||
|
||||
## RHEL Support Subscription Registration
|
||||
|
||||
In order to install packages via yum or dnf, RHEL 7/8 hosts are required to be registered for a valid Red Hat support subscription.
|
||||
|
||||
You can apply for a 1-year Development support subscription by creating a [Red Hat Developers](https://developers.redhat.com/) account. Be aware though that as the Red Hat Developers subscription is limited to only 1 year, it should not be used to register RHEL 7/8 hosts provisioned in Production environments.
|
||||
|
||||
Once you have a Red Hat support account, simply add the credentials to the Ansible inventory parameters `rh_subscription_username` and `rh_subscription_password` prior to deploying Kubespray. If your company has a Corporate Red Hat support account, then obtain an **Organization ID** and **Activation Key**, and add these to the Ansible inventory parameters `rh_subscription_org_id` and `rh_subscription_activation_key` instead of using your Red Hat support account credentials.
|
||||
|
||||
```ini
|
||||
rh_subscription_username: ""
|
||||
rh_subscription_password: ""
|
||||
# rh_subscription_org_id: ""
|
||||
# rh_subscription_activation_key: ""
|
||||
```
|
||||
|
||||
Either the Red Hat support account username/password, or Organization ID/Activation Key combination must be specified in the Ansible inventory in order for the Red Hat subscription registration to complete successfully during the deployment of Kubespray.
|
||||
|
||||
Update the Ansible inventory parameters `rh_subscription_usage`, `rh_subscription_role` and `rh_subscription_sla` if necessary to suit your specific requirements.
|
||||
|
||||
```ini
|
||||
rh_subscription_usage: "Development"
|
||||
rh_subscription_role: "Red Hat Enterprise Server"
|
||||
rh_subscription_sla: "Self-Support"
|
||||
```
|
||||
|
||||
If the RHEL 7/8 hosts are already registered to a valid Red Hat support subscription via an alternative configuration management approach prior to the deployment of Kubespray, the successful RHEL `subscription-manager` status check will simply result in the RHEL subscription registration tasks being skipped.
|
||||
|
||||
## RHEL 8
|
||||
|
||||
RHEL 8 ships only with iptables-nft (ie without iptables-legacy)
|
||||
The only tested configuration for now is using Calico CNI
|
||||
You need to use K8S 1.17+ and to add `calico_iptables_backend: "NFT"` to your configuration
|
||||
|
||||
If you have containers that are using iptables in the host network namespace (`hostNetwork=true`),
|
||||
you need to ensure they are using iptables-nft.
|
||||
An example how k8s do the autodetection can be found [in this PR](https://github.com/kubernetes/kubernetes/pull/82966)
|
||||
20
docs/vars.md
20
docs/vars.md
@@ -22,7 +22,7 @@ Some variables of note include:
|
||||
* *ipip* - Enables Calico ipip encapsulation by default
|
||||
* *kube_network_plugin* - Sets k8s network plugin (default Calico)
|
||||
* *kube_proxy_mode* - Changes k8s proxy mode to iptables mode
|
||||
* *kube_version* - Specify a given Kubernetes hyperkube version
|
||||
* *kube_version* - Specify a given Kubernetes version
|
||||
* *searchdomains* - Array of DNS domains to search when looking up hostnames
|
||||
* *nameservers* - Array of nameservers to use for DNS lookup
|
||||
* *preinstall_selinux_state* - Set selinux state, permitted values are permissive and disabled.
|
||||
@@ -109,14 +109,9 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
|
||||
* *docker_plugins* - This list can be used to define [Docker plugins](https://docs.docker.com/engine/extend/) to install.
|
||||
* *containerd_config* - Controls some parameters in containerd configuration file (usually /etc/containerd/config.toml).
|
||||
[Default config](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/container-engine/containerd/defaults/main.yml) can be overriden in inventory vars.
|
||||
* *http_proxy/https_proxy/no_proxy* - Proxy variables for deploying behind a
|
||||
* *http_proxy/https_proxy/no_proxy/no_proxy_exclude_workers/additional_no_proxy* - Proxy variables for deploying behind a
|
||||
proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames
|
||||
that correspond to each node.
|
||||
* *kubelet_deployment_type* - Controls which platform to deploy kubelet on.
|
||||
Available options are ``host`` and ``docker``. ``docker`` mode
|
||||
is unlikely to work on newer releases. Starting with Kubernetes v1.7
|
||||
series, this now defaults to ``host``. Before v1.7, the default was Docker.
|
||||
This is because of cgroup [issues](https://github.com/kubernetes/kubernetes/issues/43704).
|
||||
* *kubelet_cgroup_driver* - Allows manual override of the
|
||||
cgroup-driver option for Kubelet. By default autodetection is used
|
||||
to match Docker configuration.
|
||||
@@ -207,13 +202,4 @@ in the form of dicts of key-value pairs of configuration parameters that will be
|
||||
|
||||
## App variables
|
||||
|
||||
* *helm_version* - Defaults to v3.x, set to a v2 version (e.g. `v2.16.1` ) to install Helm 2.x (will install Tiller!).
|
||||
Picking v3 for an existing cluster running Tiller will leave it alone. In that case you will have to remove Tiller manually afterwards.
|
||||
|
||||
## User accounts
|
||||
|
||||
The variable `kube_basic_auth` is false by default, but if set to true, a user with admin rights is created, named `kube`.
|
||||
The password can be viewed after deployment by looking at the file
|
||||
`{{ credentials_dir }}/kube_user.creds` (`credentials_dir` is set to `{{ inventory_dir }}/credentials` by default). This contains a randomly generated
|
||||
password. If you wish to set your own password, just precreate/modify this
|
||||
file yourself or change `kube_api_pwd` var.
|
||||
* *helm_version* - Only supports v3.x. Existing v2 installs (with Tiller) will not be modified and need to be removed manually.
|
||||
|
||||
@@ -68,6 +68,17 @@ loadbalancer_apiserver_healthcheck_port: 8081
|
||||
## If you need exclude all cluster nodes from proxy and other resources, add other resources here.
|
||||
# additional_no_proxy: ""
|
||||
|
||||
## If you need to disable proxying of os package repositories but are still behind an http_proxy set
|
||||
## skip_http_proxy_on_os_packages to true
|
||||
## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu
|
||||
## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish
|
||||
# skip_http_proxy_on_os_packages: false
|
||||
|
||||
## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all
|
||||
## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the
|
||||
## no_proxy variable, set below to true:
|
||||
no_proxy_exclude_workers: false
|
||||
|
||||
## Certificate Management
|
||||
## This setting determines whether certs are generated via scripts.
|
||||
## Chose 'none' if you provide your own certificates.
|
||||
@@ -88,6 +99,16 @@ loadbalancer_apiserver_healthcheck_port: 8081
|
||||
# Set false if you want to deploy container engine manually.
|
||||
# deploy_container_engine: true
|
||||
|
||||
## Set Pypi repo and cert accordingly
|
||||
# pyrepo_index: https://pypi.example.com/simple
|
||||
# pyrepo_cert: /etc/ssl/certs/ca-certificates.crt
|
||||
## Red Hat Enterprise Linux subscription registration
|
||||
## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination
|
||||
## Update RHEL subscription purpose usage, role and SLA if necessary
|
||||
# rh_subscription_username: ""
|
||||
# rh_subscription_password: ""
|
||||
# rh_subscription_org_id: ""
|
||||
# rh_subscription_activation_key: ""
|
||||
# rh_subscription_usage: "Development"
|
||||
# rh_subscription_role: "Red Hat Enterprise Server"
|
||||
# rh_subscription_sla: "Self-Support"
|
||||
|
||||
## Check if access_ip responds to ping. Set false if your firewall blocks ICMP.
|
||||
# ping_access_ip: true
|
||||
|
||||
@@ -6,3 +6,4 @@
|
||||
# aws_ebs_csi_enable_volume_resizing: false
|
||||
# aws_ebs_csi_controller_replicas: 1
|
||||
# aws_ebs_csi_plugin_image_tag: latest
|
||||
# aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment'
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
# azure_csi_subnet_name:
|
||||
# azure_csi_security_group_name:
|
||||
# azure_csi_use_instance_metadata:
|
||||
# azure_csi_tags: "Owner=owner,Team=team,Environment=environment'
|
||||
|
||||
## To enable Azure Disk CSI, uncomment below
|
||||
# azure_csi_enabled: true
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
---
|
||||
# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options
|
||||
|
||||
# Example: define registry mirror for docker hub
|
||||
|
||||
# containerd_config:
|
||||
# grpc:
|
||||
# max_recv_message_size: 16777216
|
||||
@@ -8,7 +10,9 @@
|
||||
# debug:
|
||||
# level: ""
|
||||
# registries:
|
||||
# "docker.io": "https://registry-1.docker.io"
|
||||
# "docker.io":
|
||||
# - "https://mirror.gcr.io"
|
||||
# - "https://registry-1.docker.io"
|
||||
# max_container_log_line_size: -1
|
||||
# metrics:
|
||||
# address: ""
|
||||
|
||||
@@ -20,8 +20,8 @@
|
||||
# external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP"
|
||||
# external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from"
|
||||
# external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from"
|
||||
# external_openstack_lbaas_use_octavia: true
|
||||
# external_openstack_lbaas_method: "ROUND_ROBIN"
|
||||
# external_openstack_lbaas_provider: "octavia"
|
||||
# external_openstack_lbaas_create_monitor: false
|
||||
# external_openstack_lbaas_monitor_delay: "1m"
|
||||
# external_openstack_lbaas_monitor_timeout: "30s"
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than
|
||||
## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check
|
||||
## etcd documentation for more information.
|
||||
# etcd_quota_backend_bytes: "2G"
|
||||
# etcd_quota_backend_bytes: "2147483648"
|
||||
|
||||
### ETCD: disable peer client cert authentication.
|
||||
# This affects ETCD_PEER_CLIENT_CERT_AUTH variable
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
# Kubernetes dashboard
|
||||
# RBAC required. see docs/getting-started.md for access details.
|
||||
dashboard_enabled: true
|
||||
# dashboard_enabled: true
|
||||
|
||||
# Helm deployment
|
||||
helm_enabled: false
|
||||
@@ -33,6 +33,10 @@ local_path_provisioner_enabled: false
|
||||
# Local volume provisioner deployment
|
||||
local_volume_provisioner_enabled: false
|
||||
# local_volume_provisioner_namespace: kube-system
|
||||
# local_volume_provisioner_nodelabels:
|
||||
# - kubernetes.io/hostname
|
||||
# - topology.kubernetes.io/region
|
||||
# - topology.kubernetes.io/zone
|
||||
# local_volume_provisioner_storage_classes:
|
||||
# local-storage:
|
||||
# host_dir: /mnt/disks
|
||||
@@ -95,7 +99,7 @@ ingress_publish_status_address: ""
|
||||
# ingress_nginx_secure_port: 443
|
||||
# ingress_nginx_configmap:
|
||||
# map-hash-bucket-size: "128"
|
||||
# ssl-protocols: "SSLv2"
|
||||
# ssl-protocols: "TLSv1.2 TLSv1.3"
|
||||
# ingress_nginx_configmap_tcp_services:
|
||||
# 9000: "default/example-go:8080"
|
||||
# ingress_nginx_configmap_udp_services:
|
||||
|
||||
@@ -14,16 +14,10 @@ kube_cert_dir: "{{ kube_config_dir }}/ssl"
|
||||
# This is where all of the bearer tokens will be stored
|
||||
kube_token_dir: "{{ kube_config_dir }}/tokens"
|
||||
|
||||
# This is where to save basic auth file
|
||||
kube_users_dir: "{{ kube_config_dir }}/users"
|
||||
|
||||
kube_api_anonymous_auth: true
|
||||
|
||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||
kube_version: v1.18.9
|
||||
|
||||
# kubernetes image repo define
|
||||
kube_image_repo: "k8s.gcr.io"
|
||||
kube_version: v1.19.7
|
||||
|
||||
# Where the binaries will be downloaded.
|
||||
# Note: ensure that you've enough disk space (about 1G)
|
||||
@@ -41,19 +35,8 @@ kube_log_level: 2
|
||||
# Directory where credentials will be stored
|
||||
credentials_dir: "{{ inventory_dir }}/credentials"
|
||||
|
||||
# Users to create for basic auth in Kubernetes API via HTTP
|
||||
# Optionally add groups for user
|
||||
kube_api_pwd: "{{ lookup('password', credentials_dir + '/kube_user.creds length=15 chars=ascii_letters,digits') }}"
|
||||
kube_users:
|
||||
kube:
|
||||
pass: "{{kube_api_pwd}}"
|
||||
role: admin
|
||||
groups:
|
||||
- system:masters
|
||||
|
||||
## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
|
||||
## It is possible to activate / deactivate selected authentication methods (oidc, static token auth)
|
||||
# kube_oidc_auth: false
|
||||
# kube_basic_auth: false
|
||||
# kube_token_auth: false
|
||||
|
||||
|
||||
@@ -79,7 +62,7 @@ kube_users:
|
||||
# kube_webhook_authorization_url: https://...
|
||||
# kube_webhook_authorization_url_skip_tls_verify: false
|
||||
|
||||
# Choose network plugin (cilium, calico, contiv, weave or flannel. Use cni for generic cni plugin)
|
||||
# Choose network plugin (cilium, calico, weave or flannel. Use cni for generic cni plugin)
|
||||
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||
kube_network_plugin: calico
|
||||
|
||||
@@ -95,8 +78,20 @@ kube_service_addresses: 10.233.0.0/18
|
||||
kube_pods_subnet: 10.233.64.0/18
|
||||
|
||||
# internal network node size allocation (optional). This is the size allocated
|
||||
# to each node on your network. With these defaults you should have
|
||||
# room for 4096 nodes with 254 pods per node.
|
||||
# to each node for pod IP address allocation. Note that the number of pods per node is
|
||||
# also limited by the kubelet_max_pods variable which defaults to 110.
|
||||
#
|
||||
# Example:
|
||||
# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node:
|
||||
# - kube_pods_subnet: 10.233.64.0/18
|
||||
# - kube_network_node_prefix: 24
|
||||
# - kubelet_max_pods: 110
|
||||
#
|
||||
# Example:
|
||||
# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node:
|
||||
# - kube_pods_subnet: 10.233.64.0/18
|
||||
# - kube_network_node_prefix: 25
|
||||
# - kubelet_max_pods: 110
|
||||
kube_network_node_prefix: 24
|
||||
|
||||
# The port the API Server will be listening on.
|
||||
@@ -209,12 +204,6 @@ kata_containers_enabled: false
|
||||
# containerd_untrusted_runtime_engine: ''
|
||||
# containerd_untrusted_runtime_root: ''
|
||||
|
||||
## Settings for containerized control plane (kubelet/secrets)
|
||||
kubelet_deployment_type: host
|
||||
helm_deployment_type: host
|
||||
|
||||
# Enable kubeadm experimental control plane
|
||||
kubeadm_control_plane: false
|
||||
kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}"
|
||||
|
||||
# K8s image pull policy (imagePullPolicy)
|
||||
@@ -252,10 +241,10 @@ podsecuritypolicy_enabled: false
|
||||
## Optionally reserve resources for OS system daemons.
|
||||
# system_reserved: true
|
||||
## Uncomment to override default values
|
||||
# system_memory_reserved: 512M
|
||||
# system_memory_reserved: 512Mi
|
||||
# system_cpu_reserved: 500m
|
||||
## Reservation for master hosts
|
||||
# system_master_memory_reserved: 256M
|
||||
# system_master_memory_reserved: 256Mi
|
||||
# system_master_cpu_reserved: 250m
|
||||
|
||||
# An alternative flexvolume plugin directory
|
||||
@@ -321,3 +310,5 @@ persistent_volumes_enabled: false
|
||||
|
||||
## Amount of time to retain events. (default 1h0m0s)
|
||||
event_ttl_duration: "1h0m0s"
|
||||
## Force regeneration of kubernetes control plane certificates without the need of bumping the cluster version
|
||||
force_certificate_regeneration: false
|
||||
|
||||
@@ -8,6 +8,9 @@
|
||||
# Enables Internet connectivity from containers
|
||||
# nat_outgoing: true
|
||||
|
||||
# Enables Calico CNI "host-local" IPAM plugin
|
||||
# calico_ipam_host_local: true
|
||||
|
||||
# add default ippool name
|
||||
# calico_pool_name: "default-pool"
|
||||
|
||||
@@ -20,6 +23,10 @@
|
||||
# Global as_num (/calico/bgp/v1/global/as_num)
|
||||
# global_as_num: "64512"
|
||||
|
||||
# If doing peering with node-assigned asn where the globas does not match your nodes, you want this
|
||||
# to be true. All other cases, false.
|
||||
# calico_no_global_as_num: false
|
||||
|
||||
# You can set MTU value here. If left undefined or empty, it will
|
||||
# not be specified in calico CNI config, so Calico will use built-in
|
||||
# defaults. The value should be a number, not a string.
|
||||
@@ -35,6 +42,11 @@
|
||||
# Advertise Cluster IPs
|
||||
# calico_advertise_cluster_ips: true
|
||||
|
||||
# Advertise Service External IPs
|
||||
# calico_advertise_service_external_ips:
|
||||
# - x.x.x.x/24
|
||||
# - y.y.y.y/32
|
||||
|
||||
# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore)
|
||||
# calico_datastore: "etcd"
|
||||
|
||||
@@ -65,6 +77,10 @@
|
||||
# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never"
|
||||
# calico_vxlan_mode: 'Never'
|
||||
|
||||
# set VXLAN port and VNI
|
||||
# calico_vxlan_vni: 4096
|
||||
# calico_vxlan_port: 4789
|
||||
|
||||
# If you want to use non default IP_AUTODETECTION_METHOD for calico node set this option to one of:
|
||||
# * can-reach=DESTINATION
|
||||
# * interface=INTERFACE-REGEX
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
# see roles/network_plugin/contiv/defaults/main.yml
|
||||
|
||||
# Forwarding mode: bridge or routing
|
||||
# contiv_fwd_mode: routing
|
||||
|
||||
## With contiv, L3 BGP mode is possible by setting contiv_fwd_mode to "routing".
|
||||
## In this case, you may need to peer with an uplink
|
||||
## NB: The hostvars must contain a key "contiv" of which value is a dict containing "router_ip", "as"(defaults to contiv_global_as), "neighbor_as" (defaults to contiv_global_neighbor_as), "neighbor"
|
||||
# contiv_peer_with_uplink_leaf: false
|
||||
# contiv_global_as: "65002"
|
||||
# contiv_global_neighbor_as: "500"
|
||||
|
||||
# Fabric mode: aci, aci-opflex or default
|
||||
# contiv_fabric_mode: default
|
||||
|
||||
# Default netmode: vxlan or vlan
|
||||
# contiv_net_mode: vxlan
|
||||
|
||||
# Dataplane interface
|
||||
# contiv_vlan_interface: ""
|
||||
@@ -53,6 +53,9 @@
|
||||
# only with Weave IPAM (default).
|
||||
# weave_no_masq_local: true
|
||||
|
||||
# set to nft to use nftables backend for iptables (default is iptables)
|
||||
# weave_iptables_backend: iptables
|
||||
|
||||
# Extra variables that passing to launch.sh, useful for enabling seed mode, see
|
||||
# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/
|
||||
# weave_extra_args: ~
|
||||
|
||||
68
inventory/sample/group_vars/k8s-cluster/offline.yml
Normal file
68
inventory/sample/group_vars/k8s-cluster/offline.yml
Normal file
@@ -0,0 +1,68 @@
|
||||
---
|
||||
## Global Offline settings
|
||||
### Private Container Image Registry
|
||||
# registry_host: "myprivateregisry.com"
|
||||
# files_repo: "http://myprivatehttpd"
|
||||
### If using CentOS, RedHat or Fedora
|
||||
# yum_repo: "http://myinternalyumrepo"
|
||||
### If using Debian
|
||||
# debian_repo: "http://myinternaldebianrepo"
|
||||
### If using Ubuntu
|
||||
# ubuntu_repo: "http://myinternalubunturepo"
|
||||
|
||||
## Container Registry overrides
|
||||
# kube_image_repo: "{{ registry_host }}"
|
||||
# gcr_image_repo: "{{ registry_host }}"
|
||||
# docker_image_repo: "{{ registry_host }}"
|
||||
# quay_image_repo: "{{ registry_host }}"
|
||||
|
||||
## Kubernetes components
|
||||
# kubeadm_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubeadm"
|
||||
# kubectl_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubectl"
|
||||
# kubelet_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubelet"
|
||||
|
||||
## CNI Plugins
|
||||
# cni_download_url: "{{ files_repo }}/kubernetes/cni/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
|
||||
|
||||
## cri-tools
|
||||
# crictl_download_url: "{{ files_repo }}/kubernetes/cri-tools/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
||||
|
||||
## [Optional] etcd: only if you **DON'T** use etcd_deployment=host
|
||||
# etcd_download_url: "{{ files_repo }}/kubernetes/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
|
||||
|
||||
# [Optional] Calico: If using Calico network plugin
|
||||
# calicoctl_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
|
||||
|
||||
## CentOS/Redhat
|
||||
### Docker
|
||||
# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch"
|
||||
# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
|
||||
### Containerd
|
||||
# extras_rh_repo_base_url: "{{ yum_repo }}/centos/$releasever/extras/$basearch"
|
||||
# extras_rh_repo_gpgkey: "{{ yum_repo }}/containerd/gpg"
|
||||
|
||||
## Fedora
|
||||
### Docker
|
||||
# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}"
|
||||
# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
|
||||
### Containerd
|
||||
# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd"
|
||||
# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
|
||||
|
||||
## Debian
|
||||
### Docker
|
||||
# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce"
|
||||
# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg"
|
||||
### Containerd
|
||||
# containerd_debian_repo_base_url: "{{ ubuntu_repo }}/containerd"
|
||||
# containerd_debian_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg"
|
||||
# containerd_debian_repo_repokey: 'YOURREPOKEY'
|
||||
|
||||
## Ubuntu
|
||||
### Docker
|
||||
# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce"
|
||||
# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg"
|
||||
### Containerd
|
||||
# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd"
|
||||
# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg"
|
||||
# containerd_ubuntu_repo_repokey: 'YOURREPOKEY'
|
||||
@@ -15,6 +15,7 @@
|
||||
[kube-master]
|
||||
# node1
|
||||
# node2
|
||||
# node3
|
||||
|
||||
[etcd]
|
||||
# node1
|
||||
|
||||
@@ -2,21 +2,6 @@
|
||||
- name: Check ansible version
|
||||
import_playbook: ansible_version.yml
|
||||
|
||||
- hosts: all
|
||||
gather_facts: false
|
||||
tags: always
|
||||
tasks:
|
||||
- name: "Set up proxy environment"
|
||||
set_fact:
|
||||
proxy_env:
|
||||
http_proxy: "{{ http_proxy | default ('') }}"
|
||||
HTTP_PROXY: "{{ http_proxy | default ('') }}"
|
||||
https_proxy: "{{ https_proxy | default ('') }}"
|
||||
HTTPS_PROXY: "{{ https_proxy | default ('') }}"
|
||||
no_proxy: "{{ no_proxy | default ('') }}"
|
||||
NO_PROXY: "{{ no_proxy | default ('') }}"
|
||||
no_log: true
|
||||
|
||||
- hosts: "{{ node | default('etcd:k8s-cluster:calico-rr') }}"
|
||||
gather_facts: no
|
||||
vars_prompt:
|
||||
@@ -31,7 +16,7 @@
|
||||
msg: "Delete nodes confirmation failed"
|
||||
when: delete_nodes_confirmation != "yes"
|
||||
|
||||
- hosts: kube-master
|
||||
- hosts: kube-master[0]
|
||||
gather_facts: no
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
@@ -41,15 +26,15 @@
|
||||
- hosts: "{{ node | default('kube-node') }}"
|
||||
gather_facts: no
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: bootstrap-os, tags: bootstrap-os }
|
||||
- { role: remove-node/remove-etcd-node}
|
||||
- { role: reset, tags: reset, when: reset_nodes|default(True) }
|
||||
- { role: kubespray-defaults, when: reset_nodes|default(True)|bool }
|
||||
- { role: bootstrap-os, tags: bootstrap-os, when: reset_nodes|default(True)|bool }
|
||||
- { role: remove-node/remove-etcd-node }
|
||||
- { role: reset, tags: reset, when: reset_nodes|default(True)|bool }
|
||||
|
||||
# Currently cannot remove first master or etcd
|
||||
- hosts: "{{ node | default('kube-master[1:]:etcd[1:]') }}"
|
||||
gather_facts: no
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
- { role: kubespray-defaults, when: reset_nodes|default(True)|bool }
|
||||
- { role: bootstrap-os, tags: bootstrap-os, when: reset_nodes|default(True)|bool }
|
||||
- { role: remove-node/post-remove, tags: post-remove }
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ansible==2.9.6
|
||||
ansible==2.9.16
|
||||
jinja2==2.11.1
|
||||
netaddr==0.7.19
|
||||
pbr==5.4.4
|
||||
|
||||
15
reset.yml
15
reset.yml
@@ -2,21 +2,6 @@
|
||||
- name: Check ansible version
|
||||
import_playbook: ansible_version.yml
|
||||
|
||||
- hosts: all
|
||||
gather_facts: false
|
||||
tags: always
|
||||
tasks:
|
||||
- name: "Set up proxy environment"
|
||||
set_fact:
|
||||
proxy_env:
|
||||
http_proxy: "{{ http_proxy | default ('') }}"
|
||||
HTTP_PROXY: "{{ http_proxy | default ('') }}"
|
||||
https_proxy: "{{ https_proxy | default ('') }}"
|
||||
HTTPS_PROXY: "{{ https_proxy | default ('') }}"
|
||||
no_proxy: "{{ no_proxy | default ('') }}"
|
||||
NO_PROXY: "{{ no_proxy | default ('') }}"
|
||||
no_log: true
|
||||
|
||||
- hosts: bastion[0]
|
||||
gather_facts: False
|
||||
roles:
|
||||
|
||||
@@ -3,15 +3,16 @@
|
||||
Bootstrap an Ansible host to be able to run Ansible modules.
|
||||
|
||||
This role will:
|
||||
* configure the package manager (if applicable) to be able to fetch packages
|
||||
* install Python
|
||||
* install the necessary packages to use Ansible's package manager modules
|
||||
* set the hostname of the host to `{{ inventory_hostname }}` when requested
|
||||
|
||||
* configure the package manager (if applicable) to be able to fetch packages
|
||||
* install Python
|
||||
* install the necessary packages to use Ansible's package manager modules
|
||||
* set the hostname of the host to `{{ inventory_hostname }}` when requested
|
||||
|
||||
## Requirements
|
||||
|
||||
A host running an operating system that is supported by Kubespray.
|
||||
See https://github.com/kubernetes-sigs/kubespray#supported-linux-distributions for a current list.
|
||||
See [Supported Linux Distributions](https://github.com/kubernetes-sigs/kubespray#supported-linux-distributions) for a current list.
|
||||
|
||||
SSH access to the host.
|
||||
|
||||
@@ -21,10 +22,10 @@ Variables are listed with their default values, if applicable.
|
||||
|
||||
### General variables
|
||||
|
||||
* `http_proxy`/`https_proxy`
|
||||
* `http_proxy`/`https_proxy`
|
||||
The role will configure the package manager (if applicable) to download packages via a proxy.
|
||||
|
||||
* `override_system_hostname: true`
|
||||
* `override_system_hostname: true`
|
||||
The role will set the hostname of the machine to the name it has according to Ansible's inventory (the variable `{{ inventory_hostname }}`).
|
||||
|
||||
### Per distribution variables
|
||||
|
||||
@@ -23,3 +23,5 @@ fedora_coreos_packages:
|
||||
override_system_hostname: true
|
||||
|
||||
is_fedora_coreos: false
|
||||
|
||||
skip_http_proxy_on_os_packages: false
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
set -e
|
||||
|
||||
BINDIR="/opt/bin"
|
||||
PYPY_VERSION=7.3.1
|
||||
PYPY_VERSION=7.3.2
|
||||
PYPI_URL="https://downloads.python.org/pypy/pypy3.6-v${PYPY_VERSION}-linux64.tar.bz2"
|
||||
PYPI_HASH=f67cf1664a336a3e939b58b3cabfe47d893356bdc01f2e17bc912aaa6605db12
|
||||
PYPI_HASH=d7a91f179076aaa28115ffc0a81e46c6a787785b2bc995c926fe3b02f0e9ad83
|
||||
|
||||
mkdir -p $BINDIR
|
||||
|
||||
|
||||
4
roles/bootstrap-os/handlers/main.yml
Normal file
4
roles/bootstrap-os/handlers/main.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
- name: RHEL auto-attach subscription
|
||||
command: /sbin/subscription-manager attach --auto
|
||||
become: true
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user