mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 13:54:37 +03:00
Compare commits
151 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
10679ebb5d | ||
|
|
8775dcf92f | ||
|
|
bd382a9c39 | ||
|
|
ffacfe3ede | ||
|
|
7dcc22fe8c | ||
|
|
47ed2b115d | ||
|
|
b9fc4ec43e | ||
|
|
7bd757da5f | ||
|
|
9dc2092042 | ||
|
|
c7cfd32c40 | ||
|
|
a4b0656d9b | ||
|
|
c33e4d7bb7 | ||
|
|
24b82917d1 | ||
|
|
9696936b59 | ||
|
|
aeca9304f4 | ||
|
|
8fef156e8f | ||
|
|
8497528240 | ||
|
|
ebd71f6ad7 | ||
|
|
c677438189 | ||
|
|
d646053c0e | ||
|
|
c9a7ae1cae | ||
|
|
e84c1004df | ||
|
|
b19b727fe7 | ||
|
|
0932318b85 | ||
|
|
e573a2f6d4 | ||
|
|
52c1826423 | ||
|
|
e1881fae02 | ||
|
|
5ed85094c2 | ||
|
|
bf29ea55cf | ||
|
|
cafe4f1352 | ||
|
|
a9ee1c4167 | ||
|
|
a8c1bccdd5 | ||
|
|
71cf553aa8 | ||
|
|
a894a5e29b | ||
|
|
9bc7492ff2 | ||
|
|
77bda0df1c | ||
|
|
4c37399c75 | ||
|
|
cd69283184 | ||
|
|
cf3b3ca6fd | ||
|
|
1955943d4a | ||
|
|
3b68d63643 | ||
|
|
d21bfb84ad | ||
|
|
2a7c9d27b2 | ||
|
|
9c610ee11d | ||
|
|
7295d13d60 | ||
|
|
2fbbb70baa | ||
|
|
b5ce69cf3c | ||
|
|
1c5f657f97 | ||
|
|
9613ed8782 | ||
|
|
b142995808 | ||
|
|
36e5d742dc | ||
|
|
b9e3861385 | ||
|
|
f2bb3aba1e | ||
|
|
4243003c94 | ||
|
|
050bd0527f | ||
|
|
fe32de94b9 | ||
|
|
d2383d27a9 | ||
|
|
788190beca | ||
|
|
13aa32278a | ||
|
|
38ce02c610 | ||
|
|
9312ae7c6e | ||
|
|
1d86919883 | ||
|
|
78c1775661 | ||
|
|
5d00b851ce | ||
|
|
f8b93fa88a | ||
|
|
0405af1107 | ||
|
|
872e173887 | ||
|
|
b42757d330 | ||
|
|
a4d8d15a0e | ||
|
|
f8f197e26b | ||
|
|
4f85b75087 | ||
|
|
8895e38060 | ||
|
|
9a896957d9 | ||
|
|
37e004164b | ||
|
|
77069354cf | ||
|
|
2aafab6c19 | ||
|
|
35aaf97216 | ||
|
|
25cb90bc2d | ||
|
|
3311e0a296 | ||
|
|
eb31653d66 | ||
|
|
180df831ba | ||
|
|
2fa64f9fd6 | ||
|
|
a1521dc16e | ||
|
|
bf31a3a872 | ||
|
|
4a8fd94a5f | ||
|
|
e214bd0e1b | ||
|
|
4ad89ef8f1 | ||
|
|
7a66be8254 | ||
|
|
db696785d5 | ||
|
|
dfec133273 | ||
|
|
41605b4135 | ||
|
|
475abcc3a8 | ||
|
|
3a7d84e014 | ||
|
|
ad3f84df98 | ||
|
|
79e742c03b | ||
|
|
d79ada931d | ||
|
|
b2f6abe4ab | ||
|
|
c5dac1cdf6 | ||
|
|
89a0f515c7 | ||
|
|
d296adcd65 | ||
|
|
141064c443 | ||
|
|
54859cb814 | ||
|
|
0f0991b145 | ||
|
|
658d62be16 | ||
|
|
0139bfdb71 | ||
|
|
efeac70e40 | ||
|
|
b4db077e6a | ||
|
|
280e4e3b57 | ||
|
|
a962fa2357 | ||
|
|
775851b00c | ||
|
|
f8fadf53cd | ||
|
|
ce13699dfa | ||
|
|
fc5937e948 | ||
|
|
729e2c565b | ||
|
|
26ed50f04a | ||
|
|
2b80d053f3 | ||
|
|
f5ee8b71ff | ||
|
|
4c76feb574 | ||
|
|
18d84db41c | ||
|
|
08a571b4a1 | ||
|
|
5ebd305d17 | ||
|
|
edc73bc3c8 | ||
|
|
b7fa2d7b87 | ||
|
|
7771ac6074 | ||
|
|
f25b6fce1c | ||
|
|
d7b79395c7 | ||
|
|
ce18b0f22d | ||
|
|
2d8f60000c | ||
|
|
0b102287d1 | ||
|
|
d325fd6af7 | ||
|
|
e949b8a1e8 | ||
|
|
ab6e284180 | ||
|
|
7421b6e180 | ||
|
|
a2f03c559a | ||
|
|
3ced391fab | ||
|
|
ea7dcd46d7 | ||
|
|
94e33bdbbf | ||
|
|
29f833e9a4 | ||
|
|
8c32be5feb | ||
|
|
0ba2e655f4 | ||
|
|
78189186e5 | ||
|
|
96e875cd50 | ||
|
|
808524bed6 | ||
|
|
75e00420ec | ||
|
|
8be5604da4 | ||
|
|
02624554ae | ||
|
|
9d1e9a6a78 | ||
|
|
861d5b763d | ||
|
|
4013c48acb | ||
|
|
f264426646 | ||
|
|
862fd2c5c4 |
@@ -7,34 +7,32 @@ skip_list:
|
|||||||
|
|
||||||
# These rules are intentionally skipped:
|
# These rules are intentionally skipped:
|
||||||
#
|
#
|
||||||
# [E204]: "Lines should be no longer than 160 chars"
|
|
||||||
# This could be re-enabled with a major rewrite in the future.
|
|
||||||
# For now, there's not enough value gain from strictly limiting line length.
|
|
||||||
# (Disabled in May 2019)
|
|
||||||
- '204'
|
|
||||||
|
|
||||||
# [E701]: "meta/main.yml should contain relevant info"
|
|
||||||
# Roles in Kubespray are not intended to be used/imported by Ansible Galaxy.
|
|
||||||
# While it can be useful to have these metadata available, they are also available in the existing documentation.
|
|
||||||
# (Disabled in May 2019)
|
|
||||||
- '701'
|
|
||||||
|
|
||||||
# [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
|
# [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
|
||||||
# Meta roles in Kubespray don't need proper names
|
# Meta roles in Kubespray don't need proper names
|
||||||
# (Disabled in June 2021)
|
# (Disabled in June 2021)
|
||||||
- 'role-name'
|
- 'role-name'
|
||||||
|
|
||||||
- 'experimental'
|
|
||||||
# [var-naming] "defaults/main.yml" File defines variable 'apiVersion' that violates variable naming standards
|
# [var-naming] "defaults/main.yml" File defines variable 'apiVersion' that violates variable naming standards
|
||||||
# In Kubespray we use variables that use camelCase to match their k8s counterparts
|
# In Kubespray we use variables that use camelCase to match their k8s counterparts
|
||||||
# (Disabled in June 2021)
|
# (Disabled in June 2021)
|
||||||
- 'var-naming'
|
- 'var-naming'
|
||||||
- 'var-spacing'
|
|
||||||
|
|
||||||
# [fqcn-builtins]
|
# [fqcn-builtins]
|
||||||
# Roles in kubespray don't need fully qualified collection names
|
# Roles in kubespray don't need fully qualified collection names
|
||||||
# (Disabled in Feb 2023)
|
# (Disabled in Feb 2023)
|
||||||
- 'fqcn-builtins'
|
- 'fqcn-builtins'
|
||||||
|
|
||||||
|
# We use template in names
|
||||||
|
- 'name[template]'
|
||||||
|
|
||||||
|
# No changed-when on commands
|
||||||
|
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
||||||
|
- 'no-changed-when'
|
||||||
|
|
||||||
|
# Disable run-once check with free strategy
|
||||||
|
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
||||||
|
- 'run-once[task]'
|
||||||
exclude_paths:
|
exclude_paths:
|
||||||
# Generated files
|
# Generated files
|
||||||
- tests/files/custom_cni/cilium.yaml
|
- tests/files/custom_cni/cilium.yaml
|
||||||
|
- venv
|
||||||
|
|||||||
8
.ansible-lint-ignore
Normal file
8
.ansible-lint-ignore
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
# This file contains ignores rule violations for ansible-lint
|
||||||
|
inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml jinja[spacing]
|
||||||
|
roles/kubernetes/control-plane/defaults/main/kube-proxy.yml jinja[spacing]
|
||||||
|
roles/kubernetes/control-plane/defaults/main/main.yml jinja[spacing]
|
||||||
|
roles/kubernetes/kubeadm/defaults/main.yml jinja[spacing]
|
||||||
|
roles/kubernetes/node/defaults/main.yml jinja[spacing]
|
||||||
|
roles/kubernetes/preinstall/defaults/main.yml jinja[spacing]
|
||||||
|
roles/kubespray-defaults/defaults/main.yaml jinja[spacing]
|
||||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -11,7 +11,7 @@ contrib/offline/offline-files.tar.gz
|
|||||||
.cache
|
.cache
|
||||||
*.bak
|
*.bak
|
||||||
*.tfstate
|
*.tfstate
|
||||||
*.tfstate.backup
|
*.tfstate*backup
|
||||||
*.lock.hcl
|
*.lock.hcl
|
||||||
.terraform/
|
.terraform/
|
||||||
contrib/terraform/aws/credentials.tfvars
|
contrib/terraform/aws/credentials.tfvars
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ stages:
|
|||||||
- deploy-special
|
- deploy-special
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
KUBESPRAY_VERSION: v2.21.0
|
KUBESPRAY_VERSION: v2.22.1
|
||||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||||
ANSIBLE_FORCE_COLOR: "true"
|
ANSIBLE_FORCE_COLOR: "true"
|
||||||
@@ -33,16 +33,12 @@ variables:
|
|||||||
MITOGEN_ENABLE: "false"
|
MITOGEN_ENABLE: "false"
|
||||||
ANSIBLE_LOG_LEVEL: "-vv"
|
ANSIBLE_LOG_LEVEL: "-vv"
|
||||||
RECOVER_CONTROL_PLANE_TEST: "false"
|
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
|
||||||
TERRAFORM_VERSION: 1.3.7
|
TERRAFORM_VERSION: 1.3.7
|
||||||
ANSIBLE_MAJOR_VERSION: "2.11"
|
|
||||||
PIPELINE_IMAGE: "$CI_REGISTRY_IMAGE/pipeline:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}"
|
PIPELINE_IMAGE: "$CI_REGISTRY_IMAGE/pipeline:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}"
|
||||||
|
|
||||||
before_script:
|
before_script:
|
||||||
- ./tests/scripts/rebase.sh
|
- ./tests/scripts/rebase.sh
|
||||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
|
||||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
|
||||||
- python -m pip install -r tests/requirements-${ANSIBLE_MAJOR_VERSION}.txt
|
|
||||||
- mkdir -p /.ssh
|
- mkdir -p /.ssh
|
||||||
|
|
||||||
.job: &job
|
.job: &job
|
||||||
@@ -57,6 +53,7 @@ before_script:
|
|||||||
.testcases: &testcases
|
.testcases: &testcases
|
||||||
<<: *job
|
<<: *job
|
||||||
retry: 1
|
retry: 1
|
||||||
|
interruptible: true
|
||||||
before_script:
|
before_script:
|
||||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||||
- ./tests/scripts/rebase.sh
|
- ./tests/scripts/rebase.sh
|
||||||
|
|||||||
@@ -67,10 +67,6 @@ tox-inventory-builder:
|
|||||||
extends: .job
|
extends: .job
|
||||||
before_script:
|
before_script:
|
||||||
- ./tests/scripts/rebase.sh
|
- ./tests/scripts/rebase.sh
|
||||||
- apt-get update && apt-get install -y python3-pip
|
|
||||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
|
||||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
|
||||||
- python -m pip install -r tests/requirements.txt
|
|
||||||
script:
|
script:
|
||||||
- pip3 install tox
|
- pip3 install tox
|
||||||
- cd contrib/inventory_builder && tox
|
- cd contrib/inventory_builder && tox
|
||||||
|
|||||||
@@ -9,10 +9,6 @@
|
|||||||
stage: deploy-part1
|
stage: deploy-part1
|
||||||
before_script:
|
before_script:
|
||||||
- tests/scripts/rebase.sh
|
- tests/scripts/rebase.sh
|
||||||
- apt-get update && apt-get install -y python3-pip
|
|
||||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
|
||||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
|
||||||
- python -m pip install -r tests/requirements.txt
|
|
||||||
- ./tests/scripts/vagrant_clean.sh
|
- ./tests/scripts/vagrant_clean.sh
|
||||||
script:
|
script:
|
||||||
- ./tests/scripts/molecule_run.sh
|
- ./tests/scripts/molecule_run.sh
|
||||||
@@ -58,6 +54,7 @@ molecule_cri-o:
|
|||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
script:
|
script:
|
||||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
||||||
|
allow_failure: true
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
# Stage 3 container engines don't get as much attention so allow them to fail
|
# Stage 3 container engines don't get as much attention so allow them to fail
|
||||||
|
|||||||
@@ -23,6 +23,14 @@
|
|||||||
allow_failure: true
|
allow_failure: true
|
||||||
extends: .packet
|
extends: .packet
|
||||||
|
|
||||||
|
packet_cleanup_old:
|
||||||
|
stage: deploy-part1
|
||||||
|
extends: .packet_periodic
|
||||||
|
script:
|
||||||
|
- cd tests
|
||||||
|
- make cleanup-packet
|
||||||
|
after_script: []
|
||||||
|
|
||||||
# The ubuntu20-calico-aio jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
# The ubuntu20-calico-aio jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
||||||
packet_ubuntu20-calico-aio:
|
packet_ubuntu20-calico-aio:
|
||||||
stage: deploy-part1
|
stage: deploy-part1
|
||||||
@@ -31,21 +39,8 @@ packet_ubuntu20-calico-aio:
|
|||||||
variables:
|
variables:
|
||||||
RESET_CHECK: "true"
|
RESET_CHECK: "true"
|
||||||
|
|
||||||
packet_ubuntu20-calico-aio-ansible-2_11:
|
|
||||||
stage: deploy-part1
|
|
||||||
extends: .packet_periodic
|
|
||||||
when: on_success
|
|
||||||
variables:
|
|
||||||
ANSIBLE_MAJOR_VERSION: "2.11"
|
|
||||||
RESET_CHECK: "true"
|
|
||||||
|
|
||||||
# ### PR JOBS PART2
|
# ### PR JOBS PART2
|
||||||
|
|
||||||
packet_ubuntu18-aio-docker:
|
|
||||||
stage: deploy-part2
|
|
||||||
extends: .packet_pr
|
|
||||||
when: on_success
|
|
||||||
|
|
||||||
packet_ubuntu20-aio-docker:
|
packet_ubuntu20-aio-docker:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
@@ -56,11 +51,6 @@ packet_ubuntu20-calico-aio-hardening:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_ubuntu18-calico-aio:
|
|
||||||
stage: deploy-part2
|
|
||||||
extends: .packet_pr
|
|
||||||
when: on_success
|
|
||||||
|
|
||||||
packet_ubuntu22-aio-docker:
|
packet_ubuntu22-aio-docker:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
@@ -80,18 +70,19 @@ packet_almalinux8-crio:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
when: on_success
|
when: on_success
|
||||||
|
allow_failure: true
|
||||||
|
|
||||||
packet_ubuntu18-crio:
|
packet_ubuntu20-crio:
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_fedora35-crio:
|
packet_fedora37-crio:
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_ubuntu16-flannel-ha:
|
packet_ubuntu20-flannel-ha:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
@@ -121,6 +112,21 @@ packet_debian11-docker:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
|
packet_debian12-calico:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_pr
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_debian12-docker:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_pr
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_debian12-cilium:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_periodic
|
||||||
|
when: on_success
|
||||||
|
|
||||||
packet_centos7-calico-ha-once-localhost:
|
packet_centos7-calico-ha-once-localhost:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
@@ -133,7 +139,7 @@ packet_centos7-calico-ha-once-localhost:
|
|||||||
|
|
||||||
packet_almalinux8-kube-ovn:
|
packet_almalinux8-kube-ovn:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_periodic
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_almalinux8-calico:
|
packet_almalinux8-calico:
|
||||||
@@ -163,10 +169,11 @@ packet_almalinux8-docker:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_fedora36-docker-weave:
|
packet_fedora38-docker-weave:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
allow_failure: true
|
||||||
|
|
||||||
packet_opensuse-docker-cilium:
|
packet_opensuse-docker-cilium:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
@@ -175,22 +182,17 @@ packet_opensuse-docker-cilium:
|
|||||||
|
|
||||||
# ### MANUAL JOBS
|
# ### MANUAL JOBS
|
||||||
|
|
||||||
packet_ubuntu16-docker-weave-sep:
|
packet_ubuntu20-docker-weave-sep:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_ubuntu18-cilium-sep:
|
packet_ubuntu20-cilium-sep:
|
||||||
stage: deploy-special
|
stage: deploy-special
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_ubuntu18-flannel-ha:
|
packet_ubuntu20-flannel-ha-once:
|
||||||
stage: deploy-part2
|
|
||||||
extends: .packet_pr
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
packet_ubuntu18-flannel-ha-once:
|
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
@@ -216,19 +218,19 @@ packet_centos7-multus-calico:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_fedora36-docker-calico:
|
packet_fedora38-docker-calico:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
variables:
|
variables:
|
||||||
RESET_CHECK: "true"
|
RESET_CHECK: "true"
|
||||||
|
|
||||||
packet_fedora35-calico-selinux:
|
packet_fedora37-calico-selinux:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_fedora35-calico-swap-selinux:
|
packet_fedora37-calico-swap-selinux:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
@@ -243,7 +245,7 @@ packet_almalinux8-calico-nodelocaldns-secondary:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_fedora36-kube-ovn:
|
packet_fedora38-kube-ovn:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
@@ -308,18 +310,18 @@ packet_debian11-calico-upgrade-once:
|
|||||||
variables:
|
variables:
|
||||||
UPGRADE_TEST: graceful
|
UPGRADE_TEST: graceful
|
||||||
|
|
||||||
packet_ubuntu18-calico-ha-recover:
|
packet_ubuntu20-calico-ha-recover:
|
||||||
stage: deploy-part3
|
stage: deploy-part3
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
variables:
|
variables:
|
||||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
|
||||||
|
|
||||||
packet_ubuntu18-calico-ha-recover-noquorum:
|
packet_ubuntu20-calico-ha-recover-noquorum:
|
||||||
stage: deploy-part3
|
stage: deploy-part3
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
variables:
|
variables:
|
||||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube_control_plane[1:]"
|
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:]:kube_control_plane[1:]"
|
||||||
|
|||||||
@@ -100,21 +100,13 @@ tf-validate-upcloud:
|
|||||||
PROVIDER: upcloud
|
PROVIDER: upcloud
|
||||||
CLUSTER: $CI_COMMIT_REF_NAME
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
|
||||||
# tf-packet-ubuntu16-default:
|
tf-validate-nifcloud:
|
||||||
# extends: .terraform_apply
|
extends: .terraform_validate
|
||||||
# variables:
|
variables:
|
||||||
# TF_VERSION: $TERRAFORM_VERSION
|
TF_VERSION: $TERRAFORM_VERSION
|
||||||
# PROVIDER: packet
|
PROVIDER: nifcloud
|
||||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
|
||||||
# TF_VAR_number_of_k8s_masters: "1"
|
# tf-packet-ubuntu20-default:
|
||||||
# TF_VAR_number_of_k8s_nodes: "1"
|
|
||||||
# TF_VAR_plan_k8s_masters: t1.small.x86
|
|
||||||
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
|
||||||
# TF_VAR_metro: ny
|
|
||||||
# TF_VAR_public_key_path: ""
|
|
||||||
# TF_VAR_operating_system: ubuntu_16_04
|
|
||||||
#
|
|
||||||
# tf-packet-ubuntu18-default:
|
|
||||||
# extends: .terraform_apply
|
# extends: .terraform_apply
|
||||||
# variables:
|
# variables:
|
||||||
# TF_VERSION: $TERRAFORM_VERSION
|
# TF_VERSION: $TERRAFORM_VERSION
|
||||||
@@ -126,7 +118,7 @@ tf-validate-upcloud:
|
|||||||
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||||
# TF_VAR_metro: am
|
# TF_VAR_metro: am
|
||||||
# TF_VAR_public_key_path: ""
|
# TF_VAR_public_key_path: ""
|
||||||
# TF_VAR_operating_system: ubuntu_18_04
|
# TF_VAR_operating_system: ubuntu_20_04
|
||||||
|
|
||||||
.ovh_variables: &ovh_variables
|
.ovh_variables: &ovh_variables
|
||||||
OS_AUTH_URL: https://auth.cloud.ovh.net/v3
|
OS_AUTH_URL: https://auth.cloud.ovh.net/v3
|
||||||
@@ -164,7 +156,7 @@ tf-elastx_cleanup:
|
|||||||
script:
|
script:
|
||||||
- ./scripts/openstack-cleanup/main.py
|
- ./scripts/openstack-cleanup/main.py
|
||||||
|
|
||||||
tf-elastx_ubuntu18-calico:
|
tf-elastx_ubuntu20-calico:
|
||||||
extends: .terraform_apply
|
extends: .terraform_apply
|
||||||
stage: deploy-part3
|
stage: deploy-part3
|
||||||
when: on_success
|
when: on_success
|
||||||
@@ -194,7 +186,7 @@ tf-elastx_ubuntu18-calico:
|
|||||||
TF_VAR_az_list_node: '["sto1"]'
|
TF_VAR_az_list_node: '["sto1"]'
|
||||||
TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||||
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||||
TF_VAR_image: ubuntu-18.04-server-latest
|
TF_VAR_image: ubuntu-20.04-server-latest
|
||||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||||
|
|
||||||
# OVH voucher expired, commenting job until things are sorted out
|
# OVH voucher expired, commenting job until things are sorted out
|
||||||
@@ -211,7 +203,7 @@ tf-elastx_ubuntu18-calico:
|
|||||||
# script:
|
# script:
|
||||||
# - ./scripts/openstack-cleanup/main.py
|
# - ./scripts/openstack-cleanup/main.py
|
||||||
|
|
||||||
# tf-ovh_ubuntu18-calico:
|
# tf-ovh_ubuntu20-calico:
|
||||||
# extends: .terraform_apply
|
# extends: .terraform_apply
|
||||||
# when: on_success
|
# when: on_success
|
||||||
# environment: ovh
|
# environment: ovh
|
||||||
@@ -237,5 +229,5 @@ tf-elastx_ubuntu18-calico:
|
|||||||
# TF_VAR_network_name: "Ext-Net"
|
# TF_VAR_network_name: "Ext-Net"
|
||||||
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||||
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||||
# TF_VAR_image: "Ubuntu 18.04"
|
# TF_VAR_image: "Ubuntu 20.04"
|
||||||
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||||
|
|||||||
@@ -13,10 +13,6 @@
|
|||||||
image: $PIPELINE_IMAGE
|
image: $PIPELINE_IMAGE
|
||||||
services: []
|
services: []
|
||||||
before_script:
|
before_script:
|
||||||
- apt-get update && apt-get install -y python3-pip
|
|
||||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
|
||||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
|
||||||
- python -m pip install -r tests/requirements.txt
|
|
||||||
- ./tests/scripts/vagrant_clean.sh
|
- ./tests/scripts/vagrant_clean.sh
|
||||||
script:
|
script:
|
||||||
- ./tests/scripts/testcases_run.sh
|
- ./tests/scripts/testcases_run.sh
|
||||||
@@ -24,17 +20,12 @@
|
|||||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
|
|
||||||
vagrant_ubuntu18-calico-dual-stack:
|
vagrant_ubuntu20-calico-dual-stack:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
vagrant_ubuntu18-flannel:
|
vagrant_ubuntu20-weave-medium:
|
||||||
stage: deploy-part2
|
|
||||||
extends: .vagrant
|
|
||||||
when: on_success
|
|
||||||
|
|
||||||
vagrant_ubuntu18-weave-medium:
|
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: manual
|
when: manual
|
||||||
@@ -50,18 +41,18 @@ vagrant_ubuntu20-flannel-collection:
|
|||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
vagrant_ubuntu16-kube-router-sep:
|
vagrant_ubuntu20-kube-router-sep:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
# Service proxy test fails connectivity testing
|
# Service proxy test fails connectivity testing
|
||||||
vagrant_ubuntu16-kube-router-svc-proxy:
|
vagrant_ubuntu20-kube-router-svc-proxy:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
vagrant_fedora35-kube-router:
|
vagrant_fedora37-kube-router:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|||||||
1
CHANGELOG.md
Normal file
1
CHANGELOG.md
Normal file
@@ -0,0 +1 @@
|
|||||||
|
# See our release notes on [GitHub](https://github.com/kubernetes-sigs/kubespray/releases)
|
||||||
@@ -12,6 +12,7 @@ To install development dependencies you can set up a python virtual env with the
|
|||||||
virtualenv venv
|
virtualenv venv
|
||||||
source venv/bin/activate
|
source venv/bin/activate
|
||||||
pip install -r tests/requirements.txt
|
pip install -r tests/requirements.txt
|
||||||
|
ansible-galaxy install -r tests/requirements.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Linting
|
#### Linting
|
||||||
|
|||||||
11
Dockerfile
11
Dockerfile
@@ -2,7 +2,7 @@
|
|||||||
FROM ubuntu:jammy-20230308
|
FROM ubuntu:jammy-20230308
|
||||||
# Some tools like yamllint need this
|
# Some tools like yamllint need this
|
||||||
# Pip needs this as well at the moment to install ansible
|
# Pip needs this as well at the moment to install ansible
|
||||||
# (and potentially other packages)
|
# (and potentially other packages)
|
||||||
# See: https://github.com/pypa/pip/issues/10219
|
# See: https://github.com/pypa/pip/issues/10219
|
||||||
ENV LANG=C.UTF-8 \
|
ENV LANG=C.UTF-8 \
|
||||||
DEBIAN_FRONTEND=noninteractive \
|
DEBIAN_FRONTEND=noninteractive \
|
||||||
@@ -28,14 +28,15 @@ RUN apt update -q \
|
|||||||
rsync \
|
rsync \
|
||||||
openssh-client \
|
openssh-client \
|
||||||
&& pip install --no-compile --no-cache-dir \
|
&& pip install --no-compile --no-cache-dir \
|
||||||
ansible==5.7.1 \
|
ansible==7.6.0 \
|
||||||
ansible-core==2.12.5 \
|
ansible-core==2.14.6 \
|
||||||
cryptography==3.4.8 \
|
cryptography==41.0.1 \
|
||||||
jinja2==3.1.2 \
|
jinja2==3.1.2 \
|
||||||
netaddr==0.8.0 \
|
netaddr==0.8.0 \
|
||||||
jmespath==1.0.1 \
|
jmespath==1.0.1 \
|
||||||
MarkupSafe==2.1.2 \
|
MarkupSafe==2.1.3 \
|
||||||
ruamel.yaml==0.17.21 \
|
ruamel.yaml==0.17.21 \
|
||||||
|
passlib==1.7.4 \
|
||||||
&& KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
&& KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
||||||
&& curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
|
&& curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
|
||||||
&& echo $(curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \
|
&& echo $(curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ aliases:
|
|||||||
- cyclinder
|
- cyclinder
|
||||||
- mzaian
|
- mzaian
|
||||||
- mrfreezeex
|
- mrfreezeex
|
||||||
|
- erikjiang
|
||||||
kubespray-emeritus_approvers:
|
kubespray-emeritus_approvers:
|
||||||
- riverzhang
|
- riverzhang
|
||||||
- atoms
|
- atoms
|
||||||
|
|||||||
52
README.md
52
README.md
@@ -34,7 +34,7 @@ CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inv
|
|||||||
cat inventory/mycluster/group_vars/all/all.yml
|
cat inventory/mycluster/group_vars/all/all.yml
|
||||||
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
||||||
|
|
||||||
# Clean up old Kubernete cluster with Ansible Playbook - run the playbook as root
|
# Clean up old Kubernetes cluster with Ansible Playbook - run the playbook as root
|
||||||
# The option `--become` is required, as for example cleaning up SSL keys in /etc/,
|
# The option `--become` is required, as for example cleaning up SSL keys in /etc/,
|
||||||
# uninstalling old packages and interacting with various systemd daemons.
|
# uninstalling old packages and interacting with various systemd daemons.
|
||||||
# Without --become the playbook will fail to run!
|
# Without --become the playbook will fail to run!
|
||||||
@@ -75,11 +75,11 @@ You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mou
|
|||||||
to access the inventory and SSH key in the container, like this:
|
to access the inventory and SSH key in the container, like this:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
git checkout v2.22.0
|
git checkout v2.22.1
|
||||||
docker pull quay.io/kubespray/kubespray:v2.22.0
|
docker pull quay.io/kubespray/kubespray:v2.22.1
|
||||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||||
quay.io/kubespray/kubespray:v2.22.0 bash
|
quay.io/kubespray/kubespray:v2.22.1 bash
|
||||||
# Inside the container you may now run the kubespray playbooks:
|
# Inside the container you may now run the kubespray playbooks:
|
||||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||||
```
|
```
|
||||||
@@ -142,10 +142,10 @@ vagrant up
|
|||||||
## Supported Linux Distributions
|
## Supported Linux Distributions
|
||||||
|
|
||||||
- **Flatcar Container Linux by Kinvolk**
|
- **Flatcar Container Linux by Kinvolk**
|
||||||
- **Debian** Bullseye, Buster
|
- **Debian** Bookworm, Bullseye, Buster
|
||||||
- **Ubuntu** 16.04, 18.04, 20.04, 22.04
|
- **Ubuntu** 20.04, 22.04
|
||||||
- **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8)
|
- **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8)
|
||||||
- **Fedora** 35, 36
|
- **Fedora** 37, 38
|
||||||
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
||||||
- **openSUSE** Leap 15.x/Tumbleweed
|
- **openSUSE** Leap 15.x/Tumbleweed
|
||||||
- **Oracle Linux** 7, [8, 9](docs/centos.md#centos-8)
|
- **Oracle Linux** 7, [8, 9](docs/centos.md#centos-8)
|
||||||
@@ -161,28 +161,28 @@ Note: Upstart/SysV init based OS types are not supported.
|
|||||||
## Supported Components
|
## Supported Components
|
||||||
|
|
||||||
- Core
|
- Core
|
||||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.26.5
|
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.27.7
|
||||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.6
|
- [etcd](https://github.com/etcd-io/etcd) v3.5.9
|
||||||
- [docker](https://www.docker.com/) v20.10 (see note)
|
- [docker](https://www.docker.com/) v20.10 (see note)
|
||||||
- [containerd](https://containerd.io/) v1.7.1
|
- [containerd](https://containerd.io/) v1.7.5
|
||||||
- [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
- [cri-o](http://cri-o.io/) v1.27 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||||
- Network Plugin
|
- Network Plugin
|
||||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.2.0
|
- [cni-plugins](https://github.com/containernetworking/plugins) v1.2.0
|
||||||
- [calico](https://github.com/projectcalico/calico) v3.25.1
|
- [calico](https://github.com/projectcalico/calico) v3.25.2
|
||||||
- [cilium](https://github.com/cilium/cilium) v1.13.0
|
- [cilium](https://github.com/cilium/cilium) v1.13.4
|
||||||
- [flannel](https://github.com/flannel-io/flannel) v0.21.4
|
- [flannel](https://github.com/flannel-io/flannel) v0.22.0
|
||||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.10.7
|
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.11.5
|
||||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
|
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
|
||||||
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) v3.8
|
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) v3.8
|
||||||
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
||||||
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.5.12
|
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.5.12
|
||||||
- Application
|
- Application
|
||||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.11.1
|
- [cert-manager](https://github.com/jetstack/cert-manager) v1.11.1
|
||||||
- [coredns](https://github.com/coredns/coredns) v1.9.3
|
- [coredns](https://github.com/coredns/coredns) v1.10.1
|
||||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.7.1
|
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.8.1
|
||||||
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.3
|
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.4
|
||||||
- [argocd](https://argoproj.github.io/) v2.7.2
|
- [argocd](https://argoproj.github.io/) v2.8.0
|
||||||
- [helm](https://helm.sh/) v3.12.0
|
- [helm](https://helm.sh/) v3.12.3
|
||||||
- [metallb](https://metallb.universe.tf/) v0.13.9
|
- [metallb](https://metallb.universe.tf/) v0.13.9
|
||||||
- [registry](https://github.com/distribution/distribution) v2.8.1
|
- [registry](https://github.com/distribution/distribution) v2.8.1
|
||||||
- Storage Plugin
|
- Storage Plugin
|
||||||
@@ -191,19 +191,19 @@ Note: Upstart/SysV init based OS types are not supported.
|
|||||||
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
|
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
|
||||||
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
|
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
|
||||||
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0
|
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0
|
||||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.4.0
|
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.9.2
|
||||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.23
|
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.24
|
||||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
|
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
|
||||||
|
|
||||||
## Container Runtime Notes
|
## Container Runtime Notes
|
||||||
|
|
||||||
- Supported Docker versions are 18.09, 19.03 and 20.10. The *recommended* Docker version is 20.10. `Kubelet` might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. the YUM ``versionlock`` plugin or ``apt pin``).
|
- Supported Docker versions are 18.09, 19.03, 20.10, 23.0 and 24.0. The *recommended* Docker version is 20.10 (except on Debian bookworm which without supporting for 20.10 and below any more). `Kubelet` might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. the YUM ``versionlock`` plugin or ``apt pin``).
|
||||||
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
|
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
- **Minimum required version of Kubernetes is v1.24**
|
- **Minimum required version of Kubernetes is v1.25**
|
||||||
- **Ansible v2.11+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
- **Ansible v2.14+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
||||||
- The target servers are configured to allow **IPv4 forwarding**.
|
- The target servers are configured to allow **IPv4 forwarding**.
|
||||||
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
|
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
|
||||||
@@ -227,7 +227,7 @@ You can choose among ten network plugins. (default: `calico`, except Vagrant use
|
|||||||
|
|
||||||
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||||
|
|
||||||
- [Calico](https://docs.projectcalico.org/latest/introduction/) is a networking and network policy provider. Calico supports a flexible set of networking options
|
- [Calico](https://docs.tigera.io/calico/latest/about/) is a networking and network policy provider. Calico supports a flexible set of networking options
|
||||||
designed to give you the most efficient networking across a range of situations, including non-overlay
|
designed to give you the most efficient networking across a range of situations, including non-overlay
|
||||||
and overlay networks, with or without BGP. Calico uses the same engine to enforce network policy for hosts,
|
and overlay networks, with or without BGP. Calico uses the same engine to enforce network policy for hosts,
|
||||||
pods, and (if using Istio and Envoy) applications at the service mesh layer.
|
pods, and (if using Istio and Envoy) applications at the service mesh layer.
|
||||||
|
|||||||
27
Vagrantfile
vendored
27
Vagrantfile
vendored
@@ -10,7 +10,6 @@ Vagrant.require_version ">= 2.0.0"
|
|||||||
CONFIG = File.join(File.dirname(__FILE__), ENV['KUBESPRAY_VAGRANT_CONFIG'] || 'vagrant/config.rb')
|
CONFIG = File.join(File.dirname(__FILE__), ENV['KUBESPRAY_VAGRANT_CONFIG'] || 'vagrant/config.rb')
|
||||||
|
|
||||||
FLATCAR_URL_TEMPLATE = "https://%s.release.flatcar-linux.net/amd64-usr/current/flatcar_production_vagrant.json"
|
FLATCAR_URL_TEMPLATE = "https://%s.release.flatcar-linux.net/amd64-usr/current/flatcar_production_vagrant.json"
|
||||||
FEDORA35_MIRROR = "https://download.fedoraproject.org/pub/fedora/linux/releases/35/Cloud/x86_64/images/Fedora-Cloud-Base-Vagrant-35-1.2.x86_64.vagrant-libvirt.box"
|
|
||||||
|
|
||||||
# Uniq disk UUID for libvirt
|
# Uniq disk UUID for libvirt
|
||||||
DISK_UUID = Time.now.utc.to_i
|
DISK_UUID = Time.now.utc.to_i
|
||||||
@@ -20,9 +19,8 @@ SUPPORTED_OS = {
|
|||||||
"flatcar-beta" => {box: "flatcar-beta", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["beta"]},
|
"flatcar-beta" => {box: "flatcar-beta", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["beta"]},
|
||||||
"flatcar-alpha" => {box: "flatcar-alpha", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["alpha"]},
|
"flatcar-alpha" => {box: "flatcar-alpha", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["alpha"]},
|
||||||
"flatcar-edge" => {box: "flatcar-edge", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["edge"]},
|
"flatcar-edge" => {box: "flatcar-edge", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["edge"]},
|
||||||
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
|
|
||||||
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
|
|
||||||
"ubuntu2004" => {box: "generic/ubuntu2004", user: "vagrant"},
|
"ubuntu2004" => {box: "generic/ubuntu2004", user: "vagrant"},
|
||||||
|
"ubuntu2204" => {box: "generic/ubuntu2204", user: "vagrant"},
|
||||||
"centos" => {box: "centos/7", user: "vagrant"},
|
"centos" => {box: "centos/7", user: "vagrant"},
|
||||||
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
||||||
"centos8" => {box: "centos/8", user: "vagrant"},
|
"centos8" => {box: "centos/8", user: "vagrant"},
|
||||||
@@ -30,8 +28,8 @@ SUPPORTED_OS = {
|
|||||||
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
||||||
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
||||||
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"},
|
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"},
|
||||||
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant", box_url: FEDORA35_MIRROR},
|
"fedora37" => {box: "fedora/37-cloud-base", user: "vagrant"},
|
||||||
"fedora36" => {box: "fedora/36-cloud-base", user: "vagrant"},
|
"fedora38" => {box: "fedora/38-cloud-base", user: "vagrant"},
|
||||||
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
|
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
|
||||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||||
@@ -54,7 +52,7 @@ $shared_folders ||= {}
|
|||||||
$forwarded_ports ||= {}
|
$forwarded_ports ||= {}
|
||||||
$subnet ||= "172.18.8"
|
$subnet ||= "172.18.8"
|
||||||
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
|
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
|
||||||
$os ||= "ubuntu1804"
|
$os ||= "ubuntu2004"
|
||||||
$network_plugin ||= "flannel"
|
$network_plugin ||= "flannel"
|
||||||
# Setting multi_networking to true will install Multus: https://github.com/k8snetworkplumbingwg/multus-cni
|
# Setting multi_networking to true will install Multus: https://github.com/k8snetworkplumbingwg/multus-cni
|
||||||
$multi_networking ||= "False"
|
$multi_networking ||= "False"
|
||||||
@@ -209,7 +207,8 @@ Vagrant.configure("2") do |config|
|
|||||||
end
|
end
|
||||||
|
|
||||||
ip = "#{$subnet}.#{i+100}"
|
ip = "#{$subnet}.#{i+100}"
|
||||||
node.vm.network :private_network, ip: ip,
|
node.vm.network :private_network,
|
||||||
|
:ip => ip,
|
||||||
:libvirt__guest_ipv6 => 'yes',
|
:libvirt__guest_ipv6 => 'yes',
|
||||||
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
|
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
|
||||||
:libvirt__ipv6_prefix => "64",
|
:libvirt__ipv6_prefix => "64",
|
||||||
@@ -219,14 +218,22 @@ Vagrant.configure("2") do |config|
|
|||||||
# Disable swap for each vm
|
# Disable swap for each vm
|
||||||
node.vm.provision "shell", inline: "swapoff -a"
|
node.vm.provision "shell", inline: "swapoff -a"
|
||||||
|
|
||||||
# ubuntu1804 and ubuntu2004 have IPv6 explicitly disabled. This undoes that.
|
# ubuntu2004 and ubuntu2204 have IPv6 explicitly disabled. This undoes that.
|
||||||
if ["ubuntu1804", "ubuntu2004"].include? $os
|
if ["ubuntu2004", "ubuntu2204"].include? $os
|
||||||
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
|
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
|
||||||
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
|
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
|
||||||
end
|
end
|
||||||
|
# Hack for fedora37/38 to get the IP address of the second interface
|
||||||
|
if ["fedora37", "fedora38"].include? $os
|
||||||
|
config.vm.provision "shell", inline: <<-SHELL
|
||||||
|
nmcli conn modify 'Wired connection 2' ipv4.addresses $(cat /etc/sysconfig/network-scripts/ifcfg-eth1 | grep IPADDR | cut -d "=" -f2)
|
||||||
|
nmcli conn modify 'Wired connection 2' ipv4.method manual
|
||||||
|
service NetworkManager restart
|
||||||
|
SHELL
|
||||||
|
end
|
||||||
|
|
||||||
# Disable firewalld on oraclelinux/redhat vms
|
# Disable firewalld on oraclelinux/redhat vms
|
||||||
if ["oraclelinux","oraclelinux8","rhel7","rhel8"].include? $os
|
if ["oraclelinux","oraclelinux8","rhel7","rhel8","rockylinux8"].include? $os
|
||||||
node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
|
node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ class SearchEC2Tags(object):
|
|||||||
hosts[group] = []
|
hosts[group] = []
|
||||||
tag_key = "kubespray-role"
|
tag_key = "kubespray-role"
|
||||||
tag_value = ["*"+group+"*"]
|
tag_value = ["*"+group+"*"]
|
||||||
region = os.environ['REGION']
|
region = os.environ['AWS_REGION']
|
||||||
|
|
||||||
ec2 = boto3.resource('ec2', region)
|
ec2 = boto3.resource('ec2', region)
|
||||||
filters = [{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}]
|
filters = [{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}]
|
||||||
@@ -67,6 +67,11 @@ class SearchEC2Tags(object):
|
|||||||
if node_labels_tag:
|
if node_labels_tag:
|
||||||
ansible_host['node_labels'] = dict([ label.strip().split('=') for label in node_labels_tag[0]['Value'].split(',') ])
|
ansible_host['node_labels'] = dict([ label.strip().split('=') for label in node_labels_tag[0]['Value'].split(',') ])
|
||||||
|
|
||||||
|
##Set when instance actually has node_taints
|
||||||
|
node_taints_tag = list(filter(lambda t: t['Key'] == 'kubespray-node-taints', instance.tags))
|
||||||
|
if node_taints_tag:
|
||||||
|
ansible_host['node_taints'] = list([ taint.strip() for taint in node_taints_tag[0]['Value'].split(',') ])
|
||||||
|
|
||||||
hosts[group].append(dns_name)
|
hosts[group].append(dns_name)
|
||||||
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Generate Azure inventory
|
||||||
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- generate-inventory
|
- generate-inventory
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Generate Azure inventory
|
||||||
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- generate-inventory_2
|
- generate-inventory_2
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Generate Azure templates
|
||||||
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- generate-templates
|
- generate-templates
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
- name: Query Azure VMs # noqa 301
|
- name: Query Azure VMs
|
||||||
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
||||||
register: vm_list_cmd
|
register: vm_list_cmd
|
||||||
|
|
||||||
|
|||||||
@@ -1,14 +1,14 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
- name: Query Azure VMs IPs # noqa 301
|
- name: Query Azure VMs IPs
|
||||||
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
|
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
|
||||||
register: vm_ip_list_cmd
|
register: vm_ip_list_cmd
|
||||||
|
|
||||||
- name: Query Azure VMs Roles # noqa 301
|
- name: Query Azure VMs Roles
|
||||||
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
||||||
register: vm_list_cmd
|
register: vm_list_cmd
|
||||||
|
|
||||||
- name: Query Azure Load Balancer Public IP # noqa 301
|
- name: Query Azure Load Balancer Public IP
|
||||||
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
|
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
|
||||||
register: lb_pubip_cmd
|
register: lb_pubip_cmd
|
||||||
|
|
||||||
|
|||||||
@@ -24,14 +24,14 @@ bastionIPAddressName: bastion-pubip
|
|||||||
|
|
||||||
disablePasswordAuthentication: true
|
disablePasswordAuthentication: true
|
||||||
|
|
||||||
sshKeyPath: "/home/{{admin_username}}/.ssh/authorized_keys"
|
sshKeyPath: "/home/{{ admin_username }}/.ssh/authorized_keys"
|
||||||
|
|
||||||
imageReference:
|
imageReference:
|
||||||
publisher: "OpenLogic"
|
publisher: "OpenLogic"
|
||||||
offer: "CentOS"
|
offer: "CentOS"
|
||||||
sku: "7.5"
|
sku: "7.5"
|
||||||
version: "latest"
|
version: "latest"
|
||||||
imageReferenceJson: "{{imageReference|to_json}}"
|
imageReferenceJson: "{{ imageReference | to_json }}"
|
||||||
|
|
||||||
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
|
storageAccountName: "sa{{ nameSuffix | replace('-', '') }}"
|
||||||
storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"
|
storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Create nodes as docker containers
|
||||||
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- { role: dind-host }
|
- { role: dind-host }
|
||||||
|
|
||||||
- hosts: containers
|
- name: Customize each node containers
|
||||||
|
hosts: containers
|
||||||
roles:
|
roles:
|
||||||
- { role: dind-cluster }
|
- { role: dind-cluster }
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
---
|
---
|
||||||
- name: set_fact distro_setup
|
- name: Set_fact distro_setup
|
||||||
set_fact:
|
set_fact:
|
||||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||||
|
|
||||||
- name: set_fact other distro settings
|
- name: Set_fact other distro settings
|
||||||
set_fact:
|
set_fact:
|
||||||
distro_user: "{{ distro_setup['user'] }}"
|
distro_user: "{{ distro_setup['user'] }}"
|
||||||
distro_ssh_service: "{{ distro_setup['ssh_service'] }}"
|
distro_ssh_service: "{{ distro_setup['ssh_service'] }}"
|
||||||
@@ -43,7 +43,7 @@
|
|||||||
package:
|
package:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: present
|
state: present
|
||||||
with_items: "{{ distro_extra_packages + [ 'rsyslog', 'openssh-server' ] }}"
|
with_items: "{{ distro_extra_packages + ['rsyslog', 'openssh-server'] }}"
|
||||||
|
|
||||||
- name: Start needed services
|
- name: Start needed services
|
||||||
service:
|
service:
|
||||||
@@ -66,8 +66,8 @@
|
|||||||
dest: "/etc/sudoers.d/{{ distro_user }}"
|
dest: "/etc/sudoers.d/{{ distro_user }}"
|
||||||
mode: 0640
|
mode: 0640
|
||||||
|
|
||||||
- name: Add my pubkey to "{{ distro_user }}" user authorized keys
|
- name: "Add my pubkey to {{ distro_user }} user authorized keys"
|
||||||
authorized_key:
|
ansible.posix.authorized_key:
|
||||||
user: "{{ distro_user }}"
|
user: "{{ distro_user }}"
|
||||||
state: present
|
state: present
|
||||||
key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}"
|
key: "{{ lookup('file', lookup('env', 'HOME') + '/.ssh/id_rsa.pub') }}"
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
---
|
---
|
||||||
- name: set_fact distro_setup
|
- name: Set_fact distro_setup
|
||||||
set_fact:
|
set_fact:
|
||||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||||
|
|
||||||
- name: set_fact other distro settings
|
- name: Set_fact other distro settings
|
||||||
set_fact:
|
set_fact:
|
||||||
distro_image: "{{ distro_setup['image'] }}"
|
distro_image: "{{ distro_setup['image'] }}"
|
||||||
distro_init: "{{ distro_setup['init'] }}"
|
distro_init: "{{ distro_setup['init'] }}"
|
||||||
@@ -13,7 +13,7 @@
|
|||||||
distro_agetty_svc: "{{ distro_setup['agetty_svc'] }}"
|
distro_agetty_svc: "{{ distro_setup['agetty_svc'] }}"
|
||||||
|
|
||||||
- name: Create dind node containers from "containers" inventory section
|
- name: Create dind node containers from "containers" inventory section
|
||||||
docker_container:
|
community.docker.docker_container:
|
||||||
image: "{{ distro_image }}"
|
image: "{{ distro_image }}"
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: started
|
state: started
|
||||||
@@ -53,7 +53,7 @@
|
|||||||
{{ distro_raw_setup_done }} && echo SKIPPED && exit 0
|
{{ distro_raw_setup_done }} && echo SKIPPED && exit 0
|
||||||
until [ "$(readlink /proc/1/exe)" = "{{ distro_pid1_exe }}" ] ; do sleep 1; done
|
until [ "$(readlink /proc/1/exe)" = "{{ distro_pid1_exe }}" ] ; do sleep 1; done
|
||||||
{{ distro_raw_setup }}
|
{{ distro_raw_setup }}
|
||||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||||
with_items: "{{ containers.results }}"
|
with_items: "{{ containers.results }}"
|
||||||
register: result
|
register: result
|
||||||
changed_when: result.stdout.find("SKIPPED") < 0
|
changed_when: result.stdout.find("SKIPPED") < 0
|
||||||
@@ -63,26 +63,25 @@
|
|||||||
until test -S /var/run/dbus/system_bus_socket; do sleep 1; done
|
until test -S /var/run/dbus/system_bus_socket; do sleep 1; done
|
||||||
systemctl disable {{ distro_agetty_svc }}
|
systemctl disable {{ distro_agetty_svc }}
|
||||||
systemctl stop {{ distro_agetty_svc }}
|
systemctl stop {{ distro_agetty_svc }}
|
||||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||||
with_items: "{{ containers.results }}"
|
with_items: "{{ containers.results }}"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
|
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
|
||||||
# handle manually
|
# handle manually
|
||||||
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave) # noqa 301
|
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
|
||||||
raw: |
|
raw: |
|
||||||
echo {{ item | hash('sha1') }} > /etc/machine-id.new
|
echo {{ item | hash('sha1') }} > /etc/machine-id.new
|
||||||
mv -b /etc/machine-id.new /etc/machine-id
|
mv -b /etc/machine-id.new /etc/machine-id
|
||||||
cmp /etc/machine-id /etc/machine-id~ || true
|
cmp /etc/machine-id /etc/machine-id~ || true
|
||||||
systemctl daemon-reload
|
systemctl daemon-reload
|
||||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||||
with_items: "{{ containers.results }}"
|
with_items: "{{ containers.results }}"
|
||||||
|
|
||||||
- name: Early hack image install to adapt for DIND
|
- name: Early hack image install to adapt for DIND
|
||||||
# noqa 302 - this task uses the raw module intentionally
|
|
||||||
raw: |
|
raw: |
|
||||||
rm -fv /usr/bin/udevadm /usr/sbin/udevadm
|
rm -fv /usr/bin/udevadm /usr/sbin/udevadm
|
||||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||||
with_items: "{{ containers.results }}"
|
with_items: "{{ containers.results }}"
|
||||||
register: result
|
register: result
|
||||||
changed_when: result.stdout.find("removed") >= 0
|
changed_when: result.stdout.find("removed") >= 0
|
||||||
|
|||||||
@@ -1,21 +1,27 @@
|
|||||||
[tox]
|
[tox]
|
||||||
minversion = 1.6
|
minversion = 1.6
|
||||||
skipsdist = True
|
skipsdist = True
|
||||||
envlist = pep8, py33
|
envlist = pep8
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
whitelist_externals = py.test
|
allowlist_externals = py.test
|
||||||
usedevelop = True
|
usedevelop = True
|
||||||
deps =
|
deps =
|
||||||
-r{toxinidir}/requirements.txt
|
-r{toxinidir}/requirements.txt
|
||||||
-r{toxinidir}/test-requirements.txt
|
-r{toxinidir}/test-requirements.txt
|
||||||
setenv = VIRTUAL_ENV={envdir}
|
setenv = VIRTUAL_ENV={envdir}
|
||||||
passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
|
passenv =
|
||||||
|
http_proxy
|
||||||
|
HTTP_PROXY
|
||||||
|
https_proxy
|
||||||
|
HTTPS_PROXY
|
||||||
|
no_proxy
|
||||||
|
NO_PROXY
|
||||||
commands = pytest -vv #{posargs:./tests}
|
commands = pytest -vv #{posargs:./tests}
|
||||||
|
|
||||||
[testenv:pep8]
|
[testenv:pep8]
|
||||||
usedevelop = False
|
usedevelop = False
|
||||||
whitelist_externals = bash
|
allowlist_externals = bash
|
||||||
commands =
|
commands =
|
||||||
bash -c "find {toxinidir}/* -type f -name '*.py' -print0 | xargs -0 flake8"
|
bash -c "find {toxinidir}/* -type f -name '*.py' -print0 | xargs -0 flake8"
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Prepare Hypervisor to later install kubespray VMs
|
||||||
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
become: yes
|
become: yes
|
||||||
vars:
|
vars:
|
||||||
- bootstrap_os: none
|
bootstrap_os: none
|
||||||
roles:
|
roles:
|
||||||
- kvm-setup
|
- { role: kvm-setup }
|
||||||
|
|||||||
@@ -22,9 +22,9 @@
|
|||||||
- ntp
|
- ntp
|
||||||
when: ansible_os_family == "Debian"
|
when: ansible_os_family == "Debian"
|
||||||
|
|
||||||
# Create deployment user if required
|
- name: Create deployment user if required
|
||||||
- include: user.yml
|
include_tasks: user.yml
|
||||||
when: k8s_deployment_user is defined
|
when: k8s_deployment_user is defined
|
||||||
|
|
||||||
# Set proper sysctl values
|
- name: Set proper sysctl values
|
||||||
- include: sysctl.yml
|
import_tasks: sysctl.yml
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
- name: Load br_netfilter module
|
- name: Load br_netfilter module
|
||||||
modprobe:
|
community.general.modprobe:
|
||||||
name: br_netfilter
|
name: br_netfilter
|
||||||
state: present
|
state: present
|
||||||
register: br_netfilter
|
register: br_netfilter
|
||||||
@@ -25,7 +25,7 @@
|
|||||||
|
|
||||||
|
|
||||||
- name: Enable net.ipv4.ip_forward in sysctl
|
- name: Enable net.ipv4.ip_forward in sysctl
|
||||||
sysctl:
|
ansible.posix.sysctl:
|
||||||
name: net.ipv4.ip_forward
|
name: net.ipv4.ip_forward
|
||||||
value: 1
|
value: 1
|
||||||
sysctl_file: "{{ sysctl_file_path }}"
|
sysctl_file: "{{ sysctl_file_path }}"
|
||||||
@@ -33,7 +33,7 @@
|
|||||||
reload: yes
|
reload: yes
|
||||||
|
|
||||||
- name: Set bridge-nf-call-{arptables,iptables} to 0
|
- name: Set bridge-nf-call-{arptables,iptables} to 0
|
||||||
sysctl:
|
ansible.posix.sysctl:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: present
|
state: present
|
||||||
value: 0
|
value: 0
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
---
|
---
|
||||||
- name: Check ansible version
|
- name: Check ansible version
|
||||||
import_playbook: ansible_version.yml
|
import_playbook: kubernetes_sigs.kubespray.ansible_version
|
||||||
|
|
||||||
- hosts: localhost
|
- name: Install mitogen
|
||||||
|
hosts: localhost
|
||||||
strategy: linear
|
strategy: linear
|
||||||
vars:
|
vars:
|
||||||
mitogen_version: 0.3.2
|
mitogen_version: 0.3.2
|
||||||
@@ -19,24 +20,25 @@
|
|||||||
- "{{ playbook_dir }}/plugins/mitogen"
|
- "{{ playbook_dir }}/plugins/mitogen"
|
||||||
- "{{ playbook_dir }}/dist"
|
- "{{ playbook_dir }}/dist"
|
||||||
|
|
||||||
- name: download mitogen release
|
- name: Download mitogen release
|
||||||
get_url:
|
get_url:
|
||||||
url: "{{ mitogen_url }}"
|
url: "{{ mitogen_url }}"
|
||||||
dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
||||||
validate_certs: true
|
validate_certs: true
|
||||||
|
mode: 0644
|
||||||
|
|
||||||
- name: extract archive
|
- name: Extract archive
|
||||||
unarchive:
|
unarchive:
|
||||||
src: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
src: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
||||||
dest: "{{ playbook_dir }}/dist/"
|
dest: "{{ playbook_dir }}/dist/"
|
||||||
|
|
||||||
- name: copy plugin
|
- name: Copy plugin
|
||||||
synchronize:
|
ansible.posix.synchronize:
|
||||||
src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/"
|
src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/"
|
||||||
dest: "{{ playbook_dir }}/plugins/mitogen"
|
dest: "{{ playbook_dir }}/plugins/mitogen"
|
||||||
|
|
||||||
- name: add strategy to ansible.cfg
|
- name: Add strategy to ansible.cfg
|
||||||
ini_file:
|
community.general.ini_file:
|
||||||
path: ansible.cfg
|
path: ansible.cfg
|
||||||
mode: 0644
|
mode: 0644
|
||||||
section: "{{ item.section | d('defaults') }}"
|
section: "{{ item.section | d('defaults') }}"
|
||||||
|
|||||||
@@ -1,24 +1,29 @@
|
|||||||
---
|
---
|
||||||
- hosts: gfs-cluster
|
- name: Bootstrap hosts
|
||||||
|
hosts: gfs-cluster
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
vars:
|
vars:
|
||||||
ansible_ssh_pipelining: false
|
ansible_ssh_pipelining: false
|
||||||
roles:
|
roles:
|
||||||
- { role: bootstrap-os, tags: bootstrap-os}
|
- { role: bootstrap-os, tags: bootstrap-os}
|
||||||
|
|
||||||
- hosts: all
|
- name: Gather facts
|
||||||
|
hosts: all
|
||||||
gather_facts: true
|
gather_facts: true
|
||||||
|
|
||||||
- hosts: gfs-cluster
|
- name: Install glusterfs server
|
||||||
|
hosts: gfs-cluster
|
||||||
vars:
|
vars:
|
||||||
ansible_ssh_pipelining: true
|
ansible_ssh_pipelining: true
|
||||||
roles:
|
roles:
|
||||||
- { role: glusterfs/server }
|
- { role: glusterfs/server }
|
||||||
|
|
||||||
- hosts: k8s_cluster
|
- name: Install glusterfs servers
|
||||||
|
hosts: k8s_cluster
|
||||||
roles:
|
roles:
|
||||||
- { role: glusterfs/client }
|
- { role: glusterfs/client }
|
||||||
|
|
||||||
- hosts: kube_control_plane[0]
|
- name: Configure Kubernetes to use glusterfs
|
||||||
|
hosts: kube_control_plane[0]
|
||||||
roles:
|
roles:
|
||||||
- { role: kubernetes-pv }
|
- { role: kubernetes-pv }
|
||||||
|
|||||||
@@ -6,12 +6,12 @@ galaxy_info:
|
|||||||
description: GlusterFS installation for Linux.
|
description: GlusterFS installation for Linux.
|
||||||
company: "Midwestern Mac, LLC"
|
company: "Midwestern Mac, LLC"
|
||||||
license: "license (BSD, MIT)"
|
license: "license (BSD, MIT)"
|
||||||
min_ansible_version: 2.0
|
min_ansible_version: "2.0"
|
||||||
platforms:
|
platforms:
|
||||||
- name: EL
|
- name: EL
|
||||||
versions:
|
versions:
|
||||||
- 6
|
- "6"
|
||||||
- 7
|
- "7"
|
||||||
- name: Ubuntu
|
- name: Ubuntu
|
||||||
versions:
|
versions:
|
||||||
- precise
|
- precise
|
||||||
|
|||||||
@@ -3,14 +3,19 @@
|
|||||||
# hyperkube and needs to be installed as part of the system.
|
# hyperkube and needs to be installed as part of the system.
|
||||||
|
|
||||||
# Setup/install tasks.
|
# Setup/install tasks.
|
||||||
- include: setup-RedHat.yml
|
- name: Setup RedHat distros for glusterfs
|
||||||
|
include_tasks: setup-RedHat.yml
|
||||||
when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined
|
when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined
|
||||||
|
|
||||||
- include: setup-Debian.yml
|
- name: Setup Debian distros for glusterfs
|
||||||
|
include_tasks: setup-Debian.yml
|
||||||
when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined
|
when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined
|
||||||
|
|
||||||
- name: Ensure Gluster mount directories exist.
|
- name: Ensure Gluster mount directories exist.
|
||||||
file: "path={{ item }} state=directory mode=0775"
|
file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: directory
|
||||||
|
mode: 0775
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ gluster_mount_dir }}"
|
- "{{ gluster_mount_dir }}"
|
||||||
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
|
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
register: glusterfs_ppa_added
|
register: glusterfs_ppa_added
|
||||||
when: glusterfs_ppa_use
|
when: glusterfs_ppa_use
|
||||||
|
|
||||||
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa 503
|
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa no-handler
|
||||||
apt:
|
apt:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: absent
|
state: absent
|
||||||
|
|||||||
@@ -1,10 +1,14 @@
|
|||||||
---
|
---
|
||||||
- name: Install Prerequisites
|
- name: Install Prerequisites
|
||||||
package: name={{ item }} state=present
|
package:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: present
|
||||||
with_items:
|
with_items:
|
||||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||||
|
|
||||||
- name: Install Packages
|
- name: Install Packages
|
||||||
package: name={{ item }} state=present
|
package:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: present
|
||||||
with_items:
|
with_items:
|
||||||
- glusterfs-client
|
- glusterfs-client
|
||||||
|
|||||||
@@ -6,12 +6,12 @@ galaxy_info:
|
|||||||
description: GlusterFS installation for Linux.
|
description: GlusterFS installation for Linux.
|
||||||
company: "Midwestern Mac, LLC"
|
company: "Midwestern Mac, LLC"
|
||||||
license: "license (BSD, MIT)"
|
license: "license (BSD, MIT)"
|
||||||
min_ansible_version: 2.0
|
min_ansible_version: "2.0"
|
||||||
platforms:
|
platforms:
|
||||||
- name: EL
|
- name: EL
|
||||||
versions:
|
versions:
|
||||||
- 6
|
- "6"
|
||||||
- 7
|
- "7"
|
||||||
- name: Ubuntu
|
- name: Ubuntu
|
||||||
versions:
|
versions:
|
||||||
- precise
|
- precise
|
||||||
|
|||||||
@@ -4,78 +4,97 @@
|
|||||||
include_vars: "{{ ansible_os_family }}.yml"
|
include_vars: "{{ ansible_os_family }}.yml"
|
||||||
|
|
||||||
# Install xfs package
|
# Install xfs package
|
||||||
- name: install xfs Debian
|
- name: Install xfs Debian
|
||||||
apt: name=xfsprogs state=present
|
apt:
|
||||||
|
name: xfsprogs
|
||||||
|
state: present
|
||||||
when: ansible_os_family == "Debian"
|
when: ansible_os_family == "Debian"
|
||||||
|
|
||||||
- name: install xfs RedHat
|
- name: Install xfs RedHat
|
||||||
package: name=xfsprogs state=present
|
package:
|
||||||
|
name: xfsprogs
|
||||||
|
state: present
|
||||||
when: ansible_os_family == "RedHat"
|
when: ansible_os_family == "RedHat"
|
||||||
|
|
||||||
# Format external volumes in xfs
|
# Format external volumes in xfs
|
||||||
- name: Format volumes in xfs
|
- name: Format volumes in xfs
|
||||||
filesystem: "fstype=xfs dev={{ disk_volume_device_1 }}"
|
community.general.filesystem:
|
||||||
|
fstype: xfs
|
||||||
|
dev: "{{ disk_volume_device_1 }}"
|
||||||
|
|
||||||
# Mount external volumes
|
# Mount external volumes
|
||||||
- name: mounting new xfs filesystem
|
- name: Mounting new xfs filesystem
|
||||||
mount: "name={{ gluster_volume_node_mount_dir }} src={{ disk_volume_device_1 }} fstype=xfs state=mounted"
|
ansible.posix.mount:
|
||||||
|
name: "{{ gluster_volume_node_mount_dir }}"
|
||||||
|
src: "{{ disk_volume_device_1 }}"
|
||||||
|
fstype: xfs
|
||||||
|
state: mounted
|
||||||
|
|
||||||
# Setup/install tasks.
|
# Setup/install tasks.
|
||||||
- include: setup-RedHat.yml
|
- name: Setup RedHat distros for glusterfs
|
||||||
|
include_tasks: setup-RedHat.yml
|
||||||
when: ansible_os_family == 'RedHat'
|
when: ansible_os_family == 'RedHat'
|
||||||
|
|
||||||
- include: setup-Debian.yml
|
- name: Setup Debian distros for glusterfs
|
||||||
|
include_tasks: setup-Debian.yml
|
||||||
when: ansible_os_family == 'Debian'
|
when: ansible_os_family == 'Debian'
|
||||||
|
|
||||||
- name: Ensure GlusterFS is started and enabled at boot.
|
- name: Ensure GlusterFS is started and enabled at boot.
|
||||||
service: "name={{ glusterfs_daemon }} state=started enabled=yes"
|
service:
|
||||||
|
name: "{{ glusterfs_daemon }}"
|
||||||
|
state: started
|
||||||
|
enabled: yes
|
||||||
|
|
||||||
- name: Ensure Gluster brick and mount directories exist.
|
- name: Ensure Gluster brick and mount directories exist.
|
||||||
file: "path={{ item }} state=directory mode=0775"
|
file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: directory
|
||||||
|
mode: 0775
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ gluster_brick_dir }}"
|
- "{{ gluster_brick_dir }}"
|
||||||
- "{{ gluster_mount_dir }}"
|
- "{{ gluster_mount_dir }}"
|
||||||
|
|
||||||
- name: Configure Gluster volume with replicas
|
- name: Configure Gluster volume with replicas
|
||||||
gluster_volume:
|
gluster.gluster.gluster_volume:
|
||||||
state: present
|
state: present
|
||||||
name: "{{ gluster_brick_name }}"
|
name: "{{ gluster_brick_name }}"
|
||||||
brick: "{{ gluster_brick_dir }}"
|
brick: "{{ gluster_brick_dir }}"
|
||||||
replicas: "{{ groups['gfs-cluster'] | length }}"
|
replicas: "{{ groups['gfs-cluster'] | length }}"
|
||||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||||
host: "{{ inventory_hostname }}"
|
host: "{{ inventory_hostname }}"
|
||||||
force: yes
|
force: yes
|
||||||
run_once: true
|
run_once: true
|
||||||
when: groups['gfs-cluster']|length > 1
|
when: groups['gfs-cluster'] | length > 1
|
||||||
|
|
||||||
- name: Configure Gluster volume without replicas
|
- name: Configure Gluster volume without replicas
|
||||||
gluster_volume:
|
gluster.gluster.gluster_volume:
|
||||||
state: present
|
state: present
|
||||||
name: "{{ gluster_brick_name }}"
|
name: "{{ gluster_brick_name }}"
|
||||||
brick: "{{ gluster_brick_dir }}"
|
brick: "{{ gluster_brick_dir }}"
|
||||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||||
host: "{{ inventory_hostname }}"
|
host: "{{ inventory_hostname }}"
|
||||||
force: yes
|
force: yes
|
||||||
run_once: true
|
run_once: true
|
||||||
when: groups['gfs-cluster']|length <= 1
|
when: groups['gfs-cluster'] | length <= 1
|
||||||
|
|
||||||
- name: Mount glusterfs to retrieve disk size
|
- name: Mount glusterfs to retrieve disk size
|
||||||
mount:
|
ansible.posix.mount:
|
||||||
name: "{{ gluster_mount_dir }}"
|
name: "{{ gluster_mount_dir }}"
|
||||||
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster"
|
||||||
fstype: glusterfs
|
fstype: glusterfs
|
||||||
opts: "defaults,_netdev"
|
opts: "defaults,_netdev"
|
||||||
state: mounted
|
state: mounted
|
||||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
- name: Get Gluster disk size
|
- name: Get Gluster disk size
|
||||||
setup: filter=ansible_mounts
|
setup:
|
||||||
|
filter: ansible_mounts
|
||||||
register: mounts_data
|
register: mounts_data
|
||||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
- name: Set Gluster disk size to variable
|
- name: Set Gluster disk size to variable
|
||||||
set_fact:
|
set_fact:
|
||||||
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024*1024*1024)) | int }}"
|
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024 * 1024 * 1024)) | int }}"
|
||||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
- name: Create file on GlusterFS
|
- name: Create file on GlusterFS
|
||||||
@@ -86,9 +105,9 @@
|
|||||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
- name: Unmount glusterfs
|
- name: Unmount glusterfs
|
||||||
mount:
|
ansible.posix.mount:
|
||||||
name: "{{ gluster_mount_dir }}"
|
name: "{{ gluster_mount_dir }}"
|
||||||
fstype: glusterfs
|
fstype: glusterfs
|
||||||
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster"
|
||||||
state: unmounted
|
state: unmounted
|
||||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
register: glusterfs_ppa_added
|
register: glusterfs_ppa_added
|
||||||
when: glusterfs_ppa_use
|
when: glusterfs_ppa_use
|
||||||
|
|
||||||
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa 503
|
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa no-handler
|
||||||
apt:
|
apt:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: absent
|
state: absent
|
||||||
|
|||||||
@@ -1,11 +1,15 @@
|
|||||||
---
|
---
|
||||||
- name: Install Prerequisites
|
- name: Install Prerequisites
|
||||||
package: name={{ item }} state=present
|
package:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: present
|
||||||
with_items:
|
with_items:
|
||||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||||
|
|
||||||
- name: Install Packages
|
- name: Install Packages
|
||||||
package: name={{ item }} state=present
|
package:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: present
|
||||||
with_items:
|
with_items:
|
||||||
- glusterfs-server
|
- glusterfs-server
|
||||||
- glusterfs-client
|
- glusterfs-client
|
||||||
|
|||||||
@@ -18,6 +18,6 @@
|
|||||||
kubectl: "{{ bin_dir }}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "{{ item.item.type }}"
|
resource: "{{ item.item.type }}"
|
||||||
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
||||||
state: "{{ item.changed | ternary('latest','present') }}"
|
state: "{{ item.changed | ternary('latest', 'present') }}"
|
||||||
with_items: "{{ gluster_pv.results }}"
|
with_items: "{{ gluster_pv.results }}"
|
||||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined
|
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
---
|
---
|
||||||
- hosts: kube_control_plane[0]
|
- name: Tear down heketi
|
||||||
|
hosts: kube_control_plane[0]
|
||||||
roles:
|
roles:
|
||||||
- { role: tear-down }
|
- { role: tear-down }
|
||||||
|
|
||||||
- hosts: heketi-node
|
- name: Teardown disks in heketi
|
||||||
|
hosts: heketi-node
|
||||||
become: yes
|
become: yes
|
||||||
roles:
|
roles:
|
||||||
- { role: tear-down-disks }
|
- { role: tear-down-disks }
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
---
|
---
|
||||||
- hosts: heketi-node
|
- name: Prepare heketi install
|
||||||
|
hosts: heketi-node
|
||||||
roles:
|
roles:
|
||||||
- { role: prepare }
|
- { role: prepare }
|
||||||
|
|
||||||
- hosts: kube_control_plane[0]
|
- name: Provision heketi
|
||||||
|
hosts: kube_control_plane[0]
|
||||||
tags:
|
tags:
|
||||||
- "provision"
|
- "provision"
|
||||||
roles:
|
roles:
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
- "dm_snapshot"
|
- "dm_snapshot"
|
||||||
- "dm_mirror"
|
- "dm_mirror"
|
||||||
- "dm_thin_pool"
|
- "dm_thin_pool"
|
||||||
modprobe:
|
community.general.modprobe:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: "present"
|
state: "present"
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
---
|
---
|
||||||
- name: "stop port forwarding"
|
- name: "Stop port forwarding"
|
||||||
command: "killall "
|
command: "killall "
|
||||||
|
|||||||
@@ -7,9 +7,9 @@
|
|||||||
|
|
||||||
- name: "Bootstrap heketi."
|
- name: "Bootstrap heketi."
|
||||||
when:
|
when:
|
||||||
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Service']\"))|length == 0"
|
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Service']\")) | length == 0"
|
||||||
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Deployment']\"))|length == 0"
|
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Deployment']\")) | length == 0"
|
||||||
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod']\"))|length == 0"
|
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Pod']\")) | length == 0"
|
||||||
include_tasks: "bootstrap/deploy.yml"
|
include_tasks: "bootstrap/deploy.yml"
|
||||||
|
|
||||||
# Prepare heketi topology
|
# Prepare heketi topology
|
||||||
@@ -20,11 +20,11 @@
|
|||||||
|
|
||||||
- name: "Ensure heketi bootstrap pod is up."
|
- name: "Ensure heketi bootstrap pod is up."
|
||||||
assert:
|
assert:
|
||||||
that: "(initial_heketi_pod.stdout|from_json|json_query('items[*]'))|length == 1"
|
that: "(initial_heketi_pod.stdout | from_json | json_query('items[*]')) | length == 1"
|
||||||
|
|
||||||
- name: Store the initial heketi pod name
|
- name: Store the initial heketi pod name
|
||||||
set_fact:
|
set_fact:
|
||||||
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout|from_json|json_query(\"items[*].metadata.name|[0]\") }}"
|
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout | from_json | json_query(\"items[*].metadata.name | [0]\") }}"
|
||||||
|
|
||||||
- name: "Test heketi topology."
|
- name: "Test heketi topology."
|
||||||
changed_when: false
|
changed_when: false
|
||||||
@@ -32,7 +32,7 @@
|
|||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||||
|
|
||||||
- name: "Load heketi topology."
|
- name: "Load heketi topology."
|
||||||
when: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*]\")|flatten|length == 0"
|
when: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*]\") | flatten | length == 0"
|
||||||
include_tasks: "bootstrap/topology.yml"
|
include_tasks: "bootstrap/topology.yml"
|
||||||
|
|
||||||
# Provision heketi database volume
|
# Provision heketi database volume
|
||||||
@@ -58,7 +58,7 @@
|
|||||||
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
||||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
||||||
when:
|
when:
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0"
|
- "heketi_storage_state.stdout | from_json | json_query(secret_query) | length == 0"
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0"
|
- "heketi_storage_state.stdout | from_json | json_query(endpoints_query) | length == 0"
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0"
|
- "heketi_storage_state.stdout | from_json | json_query(service_query) | length == 0"
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0"
|
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 0"
|
||||||
|
|||||||
@@ -17,11 +17,11 @@
|
|||||||
register: "initial_heketi_state"
|
register: "initial_heketi_state"
|
||||||
vars:
|
vars:
|
||||||
initial_heketi_state: { stdout: "{}" }
|
initial_heketi_state: { stdout: "{}" }
|
||||||
pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]"
|
pods_query: "items[?kind=='Pod'].status.conditions | [0][?type=='Ready'].status | [0]"
|
||||||
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
deployments_query: "items[?kind=='Deployment'].status.conditions | [0][?type=='Available'].status | [0]"
|
||||||
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
||||||
until:
|
until:
|
||||||
- "initial_heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
- "initial_heketi_state.stdout | from_json | json_query(pods_query) == 'True'"
|
||||||
- "initial_heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
- "initial_heketi_state.stdout | from_json | json_query(deployments_query) == 'True'"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|||||||
@@ -15,10 +15,10 @@
|
|||||||
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
||||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
||||||
when:
|
when:
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0"
|
- "heketi_storage_state.stdout | from_json | json_query(secret_query) | length == 0"
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0"
|
- "heketi_storage_state.stdout | from_json | json_query(endpoints_query) | length == 0"
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0"
|
- "heketi_storage_state.stdout | from_json | json_query(service_query) | length == 0"
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0"
|
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 0"
|
||||||
register: "heketi_storage_result"
|
register: "heketi_storage_result"
|
||||||
- name: "Get state of heketi database copy job."
|
- name: "Get state of heketi database copy job."
|
||||||
command: "{{ bin_dir }}/kubectl get jobs --output=json"
|
command: "{{ bin_dir }}/kubectl get jobs --output=json"
|
||||||
@@ -28,6 +28,6 @@
|
|||||||
heketi_storage_state: { stdout: "{}" }
|
heketi_storage_state: { stdout: "{}" }
|
||||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job' && status.succeeded==1]"
|
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job' && status.succeeded==1]"
|
||||||
until:
|
until:
|
||||||
- "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 1"
|
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 1"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|||||||
@@ -5,10 +5,10 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
- name: "Delete bootstrap Heketi."
|
- name: "Delete bootstrap Heketi."
|
||||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
|
||||||
when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0"
|
when: "heketi_resources.stdout | from_json | json_query('items[*]') | length > 0"
|
||||||
- name: "Ensure there is nothing left over." # noqa 301
|
- name: "Ensure there is nothing left over."
|
||||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
|
||||||
register: "heketi_result"
|
register: "heketi_result"
|
||||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|||||||
@@ -14,7 +14,7 @@
|
|||||||
- name: "Copy topology configuration into container."
|
- name: "Copy topology configuration into container."
|
||||||
changed_when: false
|
changed_when: false
|
||||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
||||||
- name: "Load heketi topology." # noqa 503
|
- name: "Load heketi topology." # noqa no-handler
|
||||||
when: "render.changed"
|
when: "render.changed"
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||||
register: "load_heketi"
|
register: "load_heketi"
|
||||||
@@ -22,6 +22,6 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
register: "heketi_topology"
|
register: "heketi_topology"
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||||
until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length"
|
until: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\") | flatten | length == groups['heketi-node'] | length"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|||||||
@@ -6,19 +6,19 @@
|
|||||||
- name: "Get heketi volumes."
|
- name: "Get heketi volumes."
|
||||||
changed_when: false
|
changed_when: false
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
||||||
with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}"
|
with_items: "{{ heketi_volumes.stdout | from_json | json_query(\"volumes[*]\") }}"
|
||||||
loop_control: { loop_var: "volume_id" }
|
loop_control: { loop_var: "volume_id" }
|
||||||
register: "volumes_information"
|
register: "volumes_information"
|
||||||
- name: "Test heketi database volume."
|
- name: "Test heketi database volume."
|
||||||
set_fact: { heketi_database_volume_exists: true }
|
set_fact: { heketi_database_volume_exists: true }
|
||||||
with_items: "{{ volumes_information.results }}"
|
with_items: "{{ volumes_information.results }}"
|
||||||
loop_control: { loop_var: "volume_information" }
|
loop_control: { loop_var: "volume_information" }
|
||||||
vars: { volume: "{{ volume_information.stdout|from_json }}" }
|
vars: { volume: "{{ volume_information.stdout | from_json }}" }
|
||||||
when: "volume.name == 'heketidbstorage'"
|
when: "volume.name == 'heketidbstorage'"
|
||||||
- name: "Provision database volume."
|
- name: "Provision database volume."
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
|
||||||
when: "heketi_database_volume_exists is undefined"
|
when: "heketi_database_volume_exists is undefined"
|
||||||
- name: "Copy configuration from pod." # noqa 301
|
- name: "Copy configuration from pod."
|
||||||
become: true
|
become: true
|
||||||
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
||||||
- name: "Get heketi volume ids."
|
- name: "Get heketi volume ids."
|
||||||
@@ -28,14 +28,14 @@
|
|||||||
- name: "Get heketi volumes."
|
- name: "Get heketi volumes."
|
||||||
changed_when: false
|
changed_when: false
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
||||||
with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}"
|
with_items: "{{ heketi_volumes.stdout | from_json | json_query(\"volumes[*]\") }}"
|
||||||
loop_control: { loop_var: "volume_id" }
|
loop_control: { loop_var: "volume_id" }
|
||||||
register: "volumes_information"
|
register: "volumes_information"
|
||||||
- name: "Test heketi database volume."
|
- name: "Test heketi database volume."
|
||||||
set_fact: { heketi_database_volume_created: true }
|
set_fact: { heketi_database_volume_created: true }
|
||||||
with_items: "{{ volumes_information.results }}"
|
with_items: "{{ volumes_information.results }}"
|
||||||
loop_control: { loop_var: "volume_information" }
|
loop_control: { loop_var: "volume_information" }
|
||||||
vars: { volume: "{{ volume_information.stdout|from_json }}" }
|
vars: { volume: "{{ volume_information.stdout | from_json }}" }
|
||||||
when: "volume.name == 'heketidbstorage'"
|
when: "volume.name == 'heketidbstorage'"
|
||||||
- name: "Ensure heketi database volume exists."
|
- name: "Ensure heketi database volume exists."
|
||||||
assert: { that: "heketi_database_volume_created is defined", msg: "Heketi database volume does not exist." }
|
assert: { that: "heketi_database_volume_created is defined", msg: "Heketi database volume does not exist." }
|
||||||
|
|||||||
@@ -23,8 +23,8 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
vars:
|
vars:
|
||||||
daemonset_state: { stdout: "{}" }
|
daemonset_state: { stdout: "{}" }
|
||||||
ready: "{{ daemonset_state.stdout|from_json|json_query(\"status.numberReady\") }}"
|
ready: "{{ daemonset_state.stdout | from_json | json_query(\"status.numberReady\") }}"
|
||||||
desired: "{{ daemonset_state.stdout|from_json|json_query(\"status.desiredNumberScheduled\") }}"
|
desired: "{{ daemonset_state.stdout | from_json | json_query(\"status.desiredNumberScheduled\") }}"
|
||||||
until: "ready | int >= 3"
|
until: "ready | int >= 3"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: "Assign storage label"
|
- name: "Assign storage label"
|
||||||
when: "label_present.stdout_lines|length == 0"
|
when: "label_present.stdout_lines | length == 0"
|
||||||
command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
|
command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
|
||||||
|
|
||||||
- name: Get storage nodes again
|
- name: Get storage nodes again
|
||||||
@@ -15,5 +15,5 @@
|
|||||||
|
|
||||||
- name: Ensure the label has been set
|
- name: Ensure the label has been set
|
||||||
assert:
|
assert:
|
||||||
that: "label_present|length > 0"
|
that: "label_present | length > 0"
|
||||||
msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs."
|
msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs."
|
||||||
|
|||||||
@@ -24,11 +24,11 @@
|
|||||||
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
||||||
command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
|
command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
|
||||||
until:
|
until:
|
||||||
- "heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
- "heketi_state.stdout | from_json | json_query(pods_query) == 'True'"
|
||||||
- "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
- "heketi_state.stdout | from_json | json_query(deployments_query) == 'True'"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|
||||||
- name: Set the Heketi pod name
|
- name: Set the Heketi pod name
|
||||||
set_fact:
|
set_fact:
|
||||||
heketi_pod_name: "{{ heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
|
heketi_pod_name: "{{ heketi_state.stdout | from_json | json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
- name: "Render storage class configuration."
|
- name: "Render storage class configuration."
|
||||||
become: true
|
become: true
|
||||||
vars:
|
vars:
|
||||||
endpoint_address: "{{ (heketi_service.stdout|from_json).spec.clusterIP }}"
|
endpoint_address: "{{ (heketi_service.stdout | from_json).spec.clusterIP }}"
|
||||||
template:
|
template:
|
||||||
src: "storageclass.yml.j2"
|
src: "storageclass.yml.j2"
|
||||||
dest: "{{ kube_config_dir }}/storageclass.yml"
|
dest: "{{ kube_config_dir }}/storageclass.yml"
|
||||||
|
|||||||
@@ -11,16 +11,16 @@
|
|||||||
src: "topology.json.j2"
|
src: "topology.json.j2"
|
||||||
dest: "{{ kube_config_dir }}/topology.json"
|
dest: "{{ kube_config_dir }}/topology.json"
|
||||||
mode: 0644
|
mode: 0644
|
||||||
- name: "Copy topology configuration into container." # noqa 503
|
- name: "Copy topology configuration into container." # noqa no-handler
|
||||||
when: "rendering.changed"
|
when: "rendering.changed"
|
||||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
||||||
- name: "Load heketi topology." # noqa 503
|
- name: "Load heketi topology." # noqa no-handler
|
||||||
when: "rendering.changed"
|
when: "rendering.changed"
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||||
- name: "Get heketi topology."
|
- name: "Get heketi topology."
|
||||||
register: "heketi_topology"
|
register: "heketi_topology"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||||
until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length"
|
until: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\") | flatten | length == groups['heketi-node'] | length"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|||||||
@@ -22,7 +22,7 @@
|
|||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: "Remove volume groups." # noqa 301
|
- name: "Remove volume groups."
|
||||||
environment:
|
environment:
|
||||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||||
become: true
|
become: true
|
||||||
@@ -30,7 +30,7 @@
|
|||||||
with_items: "{{ volume_groups.stdout_lines }}"
|
with_items: "{{ volume_groups.stdout_lines }}"
|
||||||
loop_control: { loop_var: "volume_group" }
|
loop_control: { loop_var: "volume_group" }
|
||||||
|
|
||||||
- name: "Remove physical volume from cluster disks." # noqa 301
|
- name: "Remove physical volume from cluster disks."
|
||||||
environment:
|
environment:
|
||||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||||
become: true
|
become: true
|
||||||
|
|||||||
@@ -1,43 +1,43 @@
|
|||||||
---
|
---
|
||||||
- name: Remove storage class. # noqa 301
|
- name: Remove storage class.
|
||||||
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Tear down heketi. # noqa 301
|
- name: Tear down heketi.
|
||||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Tear down heketi. # noqa 301
|
- name: Tear down heketi.
|
||||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Tear down bootstrap.
|
- name: Tear down bootstrap.
|
||||||
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
|
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
|
||||||
- name: Ensure there is nothing left over. # noqa 301
|
- name: Ensure there is nothing left over.
|
||||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
||||||
register: "heketi_result"
|
register: "heketi_result"
|
||||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
- name: Ensure there is nothing left over. # noqa 301
|
- name: Ensure there is nothing left over.
|
||||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
||||||
register: "heketi_result"
|
register: "heketi_result"
|
||||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
- name: Tear down glusterfs. # noqa 301
|
- name: Tear down glusterfs.
|
||||||
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Remove heketi storage service. # noqa 301
|
- name: Remove heketi storage service.
|
||||||
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Remove heketi gluster role binding # noqa 301
|
- name: Remove heketi gluster role binding
|
||||||
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Remove heketi config secret # noqa 301
|
- name: Remove heketi config secret
|
||||||
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Remove heketi db backup # noqa 301
|
- name: Remove heketi db backup
|
||||||
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Remove heketi service account # noqa 301
|
- name: Remove heketi service account
|
||||||
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Get secrets
|
- name: Get secrets
|
||||||
@@ -46,6 +46,6 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
- name: Remove heketi storage secret
|
- name: Remove heketi storage secret
|
||||||
vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
|
vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
|
||||||
command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}"
|
command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout | from_json | json_query(storage_query) }}"
|
||||||
when: "storage_query is defined"
|
when: "storage_query is defined"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ manage-offline-container-images.sh register
|
|||||||
|
|
||||||
## generate_list.sh
|
## generate_list.sh
|
||||||
|
|
||||||
This script generates the list of downloaded files and the list of container images by `roles/download/defaults/main.yml` file.
|
This script generates the list of downloaded files and the list of container images by `roles/download/defaults/main/main.yml` file.
|
||||||
|
|
||||||
Run this script will execute `generate_list.yml` playbook in kubespray root directory and generate four files,
|
Run this script will execute `generate_list.yml` playbook in kubespray root directory and generate four files,
|
||||||
all downloaded files url in files.list, all container images in images.list, jinja2 templates in *.template.
|
all downloaded files url in files.list, all container images in images.list, jinja2 templates in *.template.
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ CURRENT_DIR=$(cd $(dirname $0); pwd)
|
|||||||
TEMP_DIR="${CURRENT_DIR}/temp"
|
TEMP_DIR="${CURRENT_DIR}/temp"
|
||||||
REPO_ROOT_DIR="${CURRENT_DIR%/contrib/offline}"
|
REPO_ROOT_DIR="${CURRENT_DIR%/contrib/offline}"
|
||||||
|
|
||||||
: ${DOWNLOAD_YML:="roles/download/defaults/main.yml"}
|
: ${DOWNLOAD_YML:="roles/download/defaults/main/main.yml"}
|
||||||
|
|
||||||
mkdir -p ${TEMP_DIR}
|
mkdir -p ${TEMP_DIR}
|
||||||
|
|
||||||
@@ -19,7 +19,7 @@ sed -n '/^downloads:/,/download_defaults:/p' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
|||||||
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/\"//g' > ${TEMP_DIR}/images.list.template
|
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/\"//g' > ${TEMP_DIR}/images.list.template
|
||||||
|
|
||||||
# add kube-* images to images list template
|
# add kube-* images to images list template
|
||||||
# Those container images are downloaded by kubeadm, then roles/download/defaults/main.yml
|
# Those container images are downloaded by kubeadm, then roles/download/defaults/main/main.yml
|
||||||
# doesn't contain those images. That is reason why here needs to put those images into the
|
# doesn't contain those images. That is reason why here needs to put those images into the
|
||||||
# list separately.
|
# list separately.
|
||||||
KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy"
|
KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy"
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
- hosts: localhost
|
- name: Collect container images for offline deployment
|
||||||
|
hosts: localhost
|
||||||
become: no
|
become: no
|
||||||
|
|
||||||
roles:
|
roles:
|
||||||
@@ -11,9 +12,11 @@
|
|||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
# Generate files.list and images.list files from templates.
|
# Generate files.list and images.list files from templates.
|
||||||
- template:
|
- name: Collect container images for offline deployment
|
||||||
|
template:
|
||||||
src: ./contrib/offline/temp/{{ item }}.list.template
|
src: ./contrib/offline/temp/{{ item }}.list.template
|
||||||
dest: ./contrib/offline/temp/{{ item }}.list
|
dest: ./contrib/offline/temp/{{ item }}.list
|
||||||
|
mode: 0644
|
||||||
with_items:
|
with_items:
|
||||||
- files
|
- files
|
||||||
- images
|
- images
|
||||||
|
|||||||
@@ -39,6 +39,6 @@ if [ $? -ne 0 ]; then
|
|||||||
sudo "${runtime}" run \
|
sudo "${runtime}" run \
|
||||||
--restart=always -d -p ${NGINX_PORT}:80 \
|
--restart=always -d -p ${NGINX_PORT}:80 \
|
||||||
--volume "${OFFLINE_FILES_DIR}:/usr/share/nginx/html/download" \
|
--volume "${OFFLINE_FILES_DIR}:/usr/share/nginx/html/download" \
|
||||||
--volume "$(pwd)"/nginx.conf:/etc/nginx/nginx.conf \
|
--volume "${CURRENT_DIR}"/nginx.conf:/etc/nginx/nginx.conf \
|
||||||
--name nginx nginx:alpine
|
--name nginx nginx:alpine
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
---
|
---
|
||||||
- hosts: all
|
- name: Disable firewalld/ufw
|
||||||
|
hosts: all
|
||||||
roles:
|
roles:
|
||||||
- { role: prepare }
|
- { role: prepare }
|
||||||
|
|||||||
@@ -1,5 +1,8 @@
|
|||||||
---
|
---
|
||||||
- block:
|
- name: Disable firewalld and ufw
|
||||||
|
when:
|
||||||
|
- disable_service_firewall is defined and disable_service_firewall
|
||||||
|
block:
|
||||||
- name: List services
|
- name: List services
|
||||||
service_facts:
|
service_facts:
|
||||||
|
|
||||||
@@ -9,7 +12,7 @@
|
|||||||
state: stopped
|
state: stopped
|
||||||
enabled: no
|
enabled: no
|
||||||
when:
|
when:
|
||||||
"'firewalld.service' in services"
|
"'firewalld.service' in services and services['firewalld.service'].status != 'not-found'"
|
||||||
|
|
||||||
- name: Disable service ufw
|
- name: Disable service ufw
|
||||||
systemd:
|
systemd:
|
||||||
@@ -17,7 +20,4 @@
|
|||||||
state: stopped
|
state: stopped
|
||||||
enabled: no
|
enabled: no
|
||||||
when:
|
when:
|
||||||
"'ufw.service' in services"
|
"'ufw.service' in services and services['ufw.service'].status != 'not-found'"
|
||||||
|
|
||||||
when:
|
|
||||||
- disable_service_firewall is defined and disable_service_firewall
|
|
||||||
|
|||||||
5
contrib/terraform/nifcloud/.gitignore
vendored
Normal file
5
contrib/terraform/nifcloud/.gitignore
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
*.tfstate*
|
||||||
|
.terraform.lock.hcl
|
||||||
|
.terraform
|
||||||
|
|
||||||
|
sample-inventory/inventory.ini
|
||||||
137
contrib/terraform/nifcloud/README.md
Normal file
137
contrib/terraform/nifcloud/README.md
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
# Kubernetes on NIFCLOUD with Terraform
|
||||||
|
|
||||||
|
Provision a Kubernetes cluster on [NIFCLOUD](https://pfs.nifcloud.com/) using Terraform and Kubespray
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The setup looks like following
|
||||||
|
|
||||||
|
```text
|
||||||
|
Kubernetes cluster
|
||||||
|
+----------------------------+
|
||||||
|
+---------------+ | +--------------------+ |
|
||||||
|
| | | | +--------------------+ |
|
||||||
|
| API server LB +---------> | | | |
|
||||||
|
| | | | | Control Plane/etcd | |
|
||||||
|
+---------------+ | | | node(s) | |
|
||||||
|
| +-+ | |
|
||||||
|
| +--------------------+ |
|
||||||
|
| ^ |
|
||||||
|
| | |
|
||||||
|
| v |
|
||||||
|
| +--------------------+ |
|
||||||
|
| | +--------------------+ |
|
||||||
|
| | | | |
|
||||||
|
| | | Worker | |
|
||||||
|
| | | node(s) | |
|
||||||
|
| +-+ | |
|
||||||
|
| +--------------------+ |
|
||||||
|
+----------------------------+
|
||||||
|
```
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
* Terraform 1.3.7
|
||||||
|
|
||||||
|
## Quickstart
|
||||||
|
|
||||||
|
### Export Variables
|
||||||
|
|
||||||
|
* Your NIFCLOUD credentials:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export NIFCLOUD_ACCESS_KEY_ID=<YOUR ACCESS KEY>
|
||||||
|
export NIFCLOUD_SECRET_ACCESS_KEY=<YOUR SECRET ACCESS KEY>
|
||||||
|
```
|
||||||
|
|
||||||
|
* The SSH KEY used to connect to the instance:
|
||||||
|
* FYI: [Cloud Help(SSH Key)](https://pfs.nifcloud.com/help/ssh.htm)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export TF_VAR_SSHKEY_NAME=<YOUR SSHKEY NAME>
|
||||||
|
```
|
||||||
|
|
||||||
|
* The IP address to connect to bastion server:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export TF_VAR_working_instance_ip=$(curl ifconfig.me)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create The Infrastructure
|
||||||
|
|
||||||
|
* Run terraform:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
terraform init
|
||||||
|
terraform apply -var-file ./sample-inventory/cluster.tfvars
|
||||||
|
```
|
||||||
|
|
||||||
|
### Setup The Kubernetes
|
||||||
|
|
||||||
|
* Generate cluster configuration file:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./generate-inventory.sh > sample-inventory/inventory.ini
|
||||||
|
|
||||||
|
* Export Variables:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
BASTION_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.bastion_info | to_entries[].value.public_ip')
|
||||||
|
API_LB_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_lb')
|
||||||
|
CP01_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[0].value.private_ip')
|
||||||
|
export ANSIBLE_SSH_ARGS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ProxyCommand=\"ssh root@${BASTION_IP} -W %h:%p\""
|
||||||
|
```
|
||||||
|
|
||||||
|
* Set ssh-agent"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
eval `ssh-agent`
|
||||||
|
ssh-add <THE PATH TO YOUR SSH KEY>
|
||||||
|
```
|
||||||
|
|
||||||
|
* Run cluster.yml playbook:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd ./../../../
|
||||||
|
ansible-playbook -i contrib/terraform/nifcloud/inventory/inventory.ini cluster.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
### Connecting to Kubernetes
|
||||||
|
|
||||||
|
* [Install kubectl](https://kubernetes.io/docs/tasks/tools/) on the localhost
|
||||||
|
* Fetching kubeconfig file:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mkdir -p ~/.kube
|
||||||
|
scp -o ProxyCommand="ssh root@${BASTION_IP} -W %h:%p" root@${CP01_IP}:/etc/kubernetes/admin.conf ~/.kube/config
|
||||||
|
```
|
||||||
|
|
||||||
|
* Rewrite /etc/hosts
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo echo "${API_LB_IP} lb-apiserver.kubernetes.local" >> /etc/hosts
|
||||||
|
```
|
||||||
|
|
||||||
|
* Run kubectl
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl get node
|
||||||
|
```
|
||||||
|
|
||||||
|
## Variables
|
||||||
|
|
||||||
|
* `region`: Region where to run the cluster
|
||||||
|
* `az`: Availability zone where to run the cluster
|
||||||
|
* `private_ip_bn`: Private ip address of bastion server
|
||||||
|
* `private_network_cidr`: Subnet of private network
|
||||||
|
* `instances_cp`: Machine to provision as Control Plane. Key of this object will be used as part of the machine' name
|
||||||
|
* `private_ip`: private ip address of machine
|
||||||
|
* `instances_wk`: Machine to provision as Worker Node. Key of this object will be used as part of the machine' name
|
||||||
|
* `private_ip`: private ip address of machine
|
||||||
|
* `instance_key_name`: The key name of the Key Pair to use for the instance
|
||||||
|
* `instance_type_bn`: The instance type of bastion server
|
||||||
|
* `instance_type_wk`: The instance type of worker node
|
||||||
|
* `instance_type_cp`: The instance type of control plane
|
||||||
|
* `image_name`: OS image used for the instance
|
||||||
|
* `working_instance_ip`: The IP address to connect to bastion server
|
||||||
|
* `accounting_type`: Accounting type. (1: monthly, 2: pay per use)
|
||||||
64
contrib/terraform/nifcloud/generate-inventory.sh
Executable file
64
contrib/terraform/nifcloud/generate-inventory.sh
Executable file
@@ -0,0 +1,64 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
#
|
||||||
|
# Generates a inventory file based on the terraform output.
|
||||||
|
# After provisioning a cluster, simply run this command and supply the terraform state file
|
||||||
|
# Default state file is terraform.tfstate
|
||||||
|
#
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
TF_OUT=$(terraform output -json)
|
||||||
|
|
||||||
|
CONTROL_PLANES=$(jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[]' <(echo "${TF_OUT}"))
|
||||||
|
WORKERS=$(jq -r '.kubernetes_cluster.value.worker_info | to_entries[]' <(echo "${TF_OUT}"))
|
||||||
|
mapfile -t CONTROL_PLANE_NAMES < <(jq -r '.key' <(echo "${CONTROL_PLANES}"))
|
||||||
|
mapfile -t WORKER_NAMES < <(jq -r '.key' <(echo "${WORKERS}"))
|
||||||
|
|
||||||
|
API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}"))
|
||||||
|
|
||||||
|
echo "[all]"
|
||||||
|
# Generate control plane hosts
|
||||||
|
i=1
|
||||||
|
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
||||||
|
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${CONTROL_PLANES}"))
|
||||||
|
echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip} etcd_member_name=etcd${i}"
|
||||||
|
i=$(( i + 1 ))
|
||||||
|
done
|
||||||
|
|
||||||
|
# Generate worker hosts
|
||||||
|
for name in "${WORKER_NAMES[@]}"; do
|
||||||
|
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${WORKERS}"))
|
||||||
|
echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip}"
|
||||||
|
done
|
||||||
|
|
||||||
|
API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}"))
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "[all:vars]"
|
||||||
|
echo "upstream_dns_servers=['8.8.8.8','8.8.4.4']"
|
||||||
|
echo "loadbalancer_apiserver={'address':'${API_LB}','port':'6443'}"
|
||||||
|
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "[kube_control_plane]"
|
||||||
|
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
||||||
|
echo "${name}"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "[etcd]"
|
||||||
|
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
||||||
|
echo "${name}"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "[kube_node]"
|
||||||
|
for name in "${WORKER_NAMES[@]}"; do
|
||||||
|
echo "${name}"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "[k8s_cluster:children]"
|
||||||
|
echo "kube_control_plane"
|
||||||
|
echo "kube_node"
|
||||||
36
contrib/terraform/nifcloud/main.tf
Normal file
36
contrib/terraform/nifcloud/main.tf
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
provider "nifcloud" {
|
||||||
|
region = var.region
|
||||||
|
}
|
||||||
|
|
||||||
|
module "kubernetes_cluster" {
|
||||||
|
source = "./modules/kubernetes-cluster"
|
||||||
|
|
||||||
|
availability_zone = var.az
|
||||||
|
prefix = "dev"
|
||||||
|
|
||||||
|
private_network_cidr = var.private_network_cidr
|
||||||
|
|
||||||
|
instance_key_name = var.instance_key_name
|
||||||
|
instances_cp = var.instances_cp
|
||||||
|
instances_wk = var.instances_wk
|
||||||
|
image_name = var.image_name
|
||||||
|
|
||||||
|
instance_type_bn = var.instance_type_bn
|
||||||
|
instance_type_cp = var.instance_type_cp
|
||||||
|
instance_type_wk = var.instance_type_wk
|
||||||
|
|
||||||
|
private_ip_bn = var.private_ip_bn
|
||||||
|
|
||||||
|
additional_lb_filter = [var.working_instance_ip]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nifcloud_security_group_rule" "ssh_from_bastion" {
|
||||||
|
security_group_names = [
|
||||||
|
module.kubernetes_cluster.security_group_name.bastion
|
||||||
|
]
|
||||||
|
type = "IN"
|
||||||
|
from_port = 22
|
||||||
|
to_port = 22
|
||||||
|
protocol = "TCP"
|
||||||
|
cidr_ip = var.working_instance_ip
|
||||||
|
}
|
||||||
301
contrib/terraform/nifcloud/modules/kubernetes-cluster/main.tf
Normal file
301
contrib/terraform/nifcloud/modules/kubernetes-cluster/main.tf
Normal file
@@ -0,0 +1,301 @@
|
|||||||
|
#################################################
|
||||||
|
##
|
||||||
|
## Local variables
|
||||||
|
##
|
||||||
|
locals {
|
||||||
|
# e.g. east-11 is 11
|
||||||
|
az_num = reverse(split("-", var.availability_zone))[0]
|
||||||
|
# e.g. east-11 is e11
|
||||||
|
az_short_name = "${substr(reverse(split("-", var.availability_zone))[1], 0, 1)}${local.az_num}"
|
||||||
|
|
||||||
|
# Port used by the protocol
|
||||||
|
port_ssh = 22
|
||||||
|
port_kubectl = 6443
|
||||||
|
port_kubelet = 10250
|
||||||
|
|
||||||
|
# calico: https://docs.tigera.io/calico/latest/getting-started/kubernetes/requirements#network-requirements
|
||||||
|
port_bgp = 179
|
||||||
|
port_vxlan = 4789
|
||||||
|
port_etcd = 2379
|
||||||
|
}
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
##
|
||||||
|
## General
|
||||||
|
##
|
||||||
|
|
||||||
|
# data
|
||||||
|
data "nifcloud_image" "this" {
|
||||||
|
image_name = var.image_name
|
||||||
|
}
|
||||||
|
|
||||||
|
# private lan
|
||||||
|
resource "nifcloud_private_lan" "this" {
|
||||||
|
private_lan_name = "${var.prefix}lan"
|
||||||
|
availability_zone = var.availability_zone
|
||||||
|
cidr_block = var.private_network_cidr
|
||||||
|
accounting_type = var.accounting_type
|
||||||
|
}
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
##
|
||||||
|
## Bastion
|
||||||
|
##
|
||||||
|
resource "nifcloud_security_group" "bn" {
|
||||||
|
group_name = "${var.prefix}bn"
|
||||||
|
description = "${var.prefix} bastion"
|
||||||
|
availability_zone = var.availability_zone
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nifcloud_instance" "bn" {
|
||||||
|
|
||||||
|
instance_id = "${local.az_short_name}${var.prefix}bn01"
|
||||||
|
security_group = nifcloud_security_group.bn.group_name
|
||||||
|
instance_type = var.instance_type_bn
|
||||||
|
|
||||||
|
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
||||||
|
private_ip_address = var.private_ip_bn
|
||||||
|
ssh_port = local.port_ssh
|
||||||
|
hostname = "${local.az_short_name}${var.prefix}bn01"
|
||||||
|
})
|
||||||
|
|
||||||
|
availability_zone = var.availability_zone
|
||||||
|
accounting_type = var.accounting_type
|
||||||
|
image_id = data.nifcloud_image.this.image_id
|
||||||
|
key_name = var.instance_key_name
|
||||||
|
|
||||||
|
network_interface {
|
||||||
|
network_id = "net-COMMON_GLOBAL"
|
||||||
|
}
|
||||||
|
network_interface {
|
||||||
|
network_id = nifcloud_private_lan.this.network_id
|
||||||
|
ip_address = "static"
|
||||||
|
}
|
||||||
|
|
||||||
|
# The image_id changes when the OS image type is demoted from standard to public.
|
||||||
|
lifecycle {
|
||||||
|
ignore_changes = [
|
||||||
|
image_id,
|
||||||
|
user_data,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
##
|
||||||
|
## Control Plane
|
||||||
|
##
|
||||||
|
resource "nifcloud_security_group" "cp" {
|
||||||
|
group_name = "${var.prefix}cp"
|
||||||
|
description = "${var.prefix} control plane"
|
||||||
|
availability_zone = var.availability_zone
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nifcloud_instance" "cp" {
|
||||||
|
for_each = var.instances_cp
|
||||||
|
|
||||||
|
instance_id = "${local.az_short_name}${var.prefix}${each.key}"
|
||||||
|
security_group = nifcloud_security_group.cp.group_name
|
||||||
|
instance_type = var.instance_type_cp
|
||||||
|
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
||||||
|
private_ip_address = each.value.private_ip
|
||||||
|
ssh_port = local.port_ssh
|
||||||
|
hostname = "${local.az_short_name}${var.prefix}${each.key}"
|
||||||
|
})
|
||||||
|
|
||||||
|
availability_zone = var.availability_zone
|
||||||
|
accounting_type = var.accounting_type
|
||||||
|
image_id = data.nifcloud_image.this.image_id
|
||||||
|
key_name = var.instance_key_name
|
||||||
|
|
||||||
|
network_interface {
|
||||||
|
network_id = "net-COMMON_GLOBAL"
|
||||||
|
}
|
||||||
|
network_interface {
|
||||||
|
network_id = nifcloud_private_lan.this.network_id
|
||||||
|
ip_address = "static"
|
||||||
|
}
|
||||||
|
|
||||||
|
# The image_id changes when the OS image type is demoted from standard to public.
|
||||||
|
lifecycle {
|
||||||
|
ignore_changes = [
|
||||||
|
image_id,
|
||||||
|
user_data,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nifcloud_load_balancer" "this" {
|
||||||
|
load_balancer_name = "${local.az_short_name}${var.prefix}cp"
|
||||||
|
accounting_type = var.accounting_type
|
||||||
|
balancing_type = 1 // Round-Robin
|
||||||
|
load_balancer_port = local.port_kubectl
|
||||||
|
instance_port = local.port_kubectl
|
||||||
|
instances = [for v in nifcloud_instance.cp : v.instance_id]
|
||||||
|
filter = concat(
|
||||||
|
[for k, v in nifcloud_instance.cp : v.public_ip],
|
||||||
|
[for k, v in nifcloud_instance.wk : v.public_ip],
|
||||||
|
var.additional_lb_filter,
|
||||||
|
)
|
||||||
|
filter_type = 1 // Allow
|
||||||
|
}
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
##
|
||||||
|
## Worker
|
||||||
|
##
|
||||||
|
resource "nifcloud_security_group" "wk" {
|
||||||
|
group_name = "${var.prefix}wk"
|
||||||
|
description = "${var.prefix} worker"
|
||||||
|
availability_zone = var.availability_zone
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nifcloud_instance" "wk" {
|
||||||
|
for_each = var.instances_wk
|
||||||
|
|
||||||
|
instance_id = "${local.az_short_name}${var.prefix}${each.key}"
|
||||||
|
security_group = nifcloud_security_group.wk.group_name
|
||||||
|
instance_type = var.instance_type_wk
|
||||||
|
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
||||||
|
private_ip_address = each.value.private_ip
|
||||||
|
ssh_port = local.port_ssh
|
||||||
|
hostname = "${local.az_short_name}${var.prefix}${each.key}"
|
||||||
|
})
|
||||||
|
|
||||||
|
availability_zone = var.availability_zone
|
||||||
|
accounting_type = var.accounting_type
|
||||||
|
image_id = data.nifcloud_image.this.image_id
|
||||||
|
key_name = var.instance_key_name
|
||||||
|
|
||||||
|
network_interface {
|
||||||
|
network_id = "net-COMMON_GLOBAL"
|
||||||
|
}
|
||||||
|
network_interface {
|
||||||
|
network_id = nifcloud_private_lan.this.network_id
|
||||||
|
ip_address = "static"
|
||||||
|
}
|
||||||
|
|
||||||
|
# The image_id changes when the OS image type is demoted from standard to public.
|
||||||
|
lifecycle {
|
||||||
|
ignore_changes = [
|
||||||
|
image_id,
|
||||||
|
user_data,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
##
|
||||||
|
## Security Group Rule: Kubernetes
|
||||||
|
##
|
||||||
|
|
||||||
|
# ssh
|
||||||
|
resource "nifcloud_security_group_rule" "ssh_from_bastion" {
|
||||||
|
security_group_names = [
|
||||||
|
nifcloud_security_group.wk.group_name,
|
||||||
|
nifcloud_security_group.cp.group_name,
|
||||||
|
]
|
||||||
|
type = "IN"
|
||||||
|
from_port = local.port_ssh
|
||||||
|
to_port = local.port_ssh
|
||||||
|
protocol = "TCP"
|
||||||
|
source_security_group_name = nifcloud_security_group.bn.group_name
|
||||||
|
}
|
||||||
|
|
||||||
|
# kubectl
|
||||||
|
resource "nifcloud_security_group_rule" "kubectl_from_worker" {
|
||||||
|
security_group_names = [
|
||||||
|
nifcloud_security_group.cp.group_name,
|
||||||
|
]
|
||||||
|
type = "IN"
|
||||||
|
from_port = local.port_kubectl
|
||||||
|
to_port = local.port_kubectl
|
||||||
|
protocol = "TCP"
|
||||||
|
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||||
|
}
|
||||||
|
|
||||||
|
# kubelet
|
||||||
|
resource "nifcloud_security_group_rule" "kubelet_from_worker" {
|
||||||
|
security_group_names = [
|
||||||
|
nifcloud_security_group.cp.group_name,
|
||||||
|
]
|
||||||
|
type = "IN"
|
||||||
|
from_port = local.port_kubelet
|
||||||
|
to_port = local.port_kubelet
|
||||||
|
protocol = "TCP"
|
||||||
|
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nifcloud_security_group_rule" "kubelet_from_control_plane" {
|
||||||
|
security_group_names = [
|
||||||
|
nifcloud_security_group.wk.group_name,
|
||||||
|
]
|
||||||
|
type = "IN"
|
||||||
|
from_port = local.port_kubelet
|
||||||
|
to_port = local.port_kubelet
|
||||||
|
protocol = "TCP"
|
||||||
|
source_security_group_name = nifcloud_security_group.cp.group_name
|
||||||
|
}
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
##
|
||||||
|
## Security Group Rule: calico
|
||||||
|
##
|
||||||
|
|
||||||
|
# vslan
|
||||||
|
resource "nifcloud_security_group_rule" "vxlan_from_control_plane" {
|
||||||
|
security_group_names = [
|
||||||
|
nifcloud_security_group.wk.group_name,
|
||||||
|
]
|
||||||
|
type = "IN"
|
||||||
|
from_port = local.port_vxlan
|
||||||
|
to_port = local.port_vxlan
|
||||||
|
protocol = "UDP"
|
||||||
|
source_security_group_name = nifcloud_security_group.cp.group_name
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nifcloud_security_group_rule" "vxlan_from_worker" {
|
||||||
|
security_group_names = [
|
||||||
|
nifcloud_security_group.cp.group_name,
|
||||||
|
]
|
||||||
|
type = "IN"
|
||||||
|
from_port = local.port_vxlan
|
||||||
|
to_port = local.port_vxlan
|
||||||
|
protocol = "UDP"
|
||||||
|
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||||
|
}
|
||||||
|
|
||||||
|
# bgp
|
||||||
|
resource "nifcloud_security_group_rule" "bgp_from_control_plane" {
|
||||||
|
security_group_names = [
|
||||||
|
nifcloud_security_group.wk.group_name,
|
||||||
|
]
|
||||||
|
type = "IN"
|
||||||
|
from_port = local.port_bgp
|
||||||
|
to_port = local.port_bgp
|
||||||
|
protocol = "TCP"
|
||||||
|
source_security_group_name = nifcloud_security_group.cp.group_name
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nifcloud_security_group_rule" "bgp_from_worker" {
|
||||||
|
security_group_names = [
|
||||||
|
nifcloud_security_group.cp.group_name,
|
||||||
|
]
|
||||||
|
type = "IN"
|
||||||
|
from_port = local.port_bgp
|
||||||
|
to_port = local.port_bgp
|
||||||
|
protocol = "TCP"
|
||||||
|
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||||
|
}
|
||||||
|
|
||||||
|
# etcd
|
||||||
|
resource "nifcloud_security_group_rule" "etcd_from_worker" {
|
||||||
|
security_group_names = [
|
||||||
|
nifcloud_security_group.cp.group_name,
|
||||||
|
]
|
||||||
|
type = "IN"
|
||||||
|
from_port = local.port_etcd
|
||||||
|
to_port = local.port_etcd
|
||||||
|
protocol = "TCP"
|
||||||
|
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||||
|
}
|
||||||
@@ -0,0 +1,48 @@
|
|||||||
|
output "control_plane_lb" {
|
||||||
|
description = "The DNS name of LB for control plane"
|
||||||
|
value = nifcloud_load_balancer.this.dns_name
|
||||||
|
}
|
||||||
|
|
||||||
|
output "security_group_name" {
|
||||||
|
description = "The security group used in the cluster"
|
||||||
|
value = {
|
||||||
|
bastion = nifcloud_security_group.bn.group_name,
|
||||||
|
control_plane = nifcloud_security_group.cp.group_name,
|
||||||
|
worker = nifcloud_security_group.wk.group_name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
output "private_network_id" {
|
||||||
|
description = "The private network used in the cluster"
|
||||||
|
value = nifcloud_private_lan.this.id
|
||||||
|
}
|
||||||
|
|
||||||
|
output "bastion_info" {
|
||||||
|
description = "The basion information in cluster"
|
||||||
|
value = { (nifcloud_instance.bn.instance_id) : {
|
||||||
|
instance_id = nifcloud_instance.bn.instance_id,
|
||||||
|
unique_id = nifcloud_instance.bn.unique_id,
|
||||||
|
private_ip = nifcloud_instance.bn.private_ip,
|
||||||
|
public_ip = nifcloud_instance.bn.public_ip,
|
||||||
|
} }
|
||||||
|
}
|
||||||
|
|
||||||
|
output "worker_info" {
|
||||||
|
description = "The worker information in cluster"
|
||||||
|
value = { for v in nifcloud_instance.wk : v.instance_id => {
|
||||||
|
instance_id = v.instance_id,
|
||||||
|
unique_id = v.unique_id,
|
||||||
|
private_ip = v.private_ip,
|
||||||
|
public_ip = v.public_ip,
|
||||||
|
} }
|
||||||
|
}
|
||||||
|
|
||||||
|
output "control_plane_info" {
|
||||||
|
description = "The control plane information in cluster"
|
||||||
|
value = { for v in nifcloud_instance.cp : v.instance_id => {
|
||||||
|
instance_id = v.instance_id,
|
||||||
|
unique_id = v.unique_id,
|
||||||
|
private_ip = v.private_ip,
|
||||||
|
public_ip = v.public_ip,
|
||||||
|
} }
|
||||||
|
}
|
||||||
@@ -0,0 +1,45 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
##
|
||||||
|
## IP Address
|
||||||
|
##
|
||||||
|
configure_private_ip_address () {
|
||||||
|
cat << EOS > /etc/netplan/01-netcfg.yaml
|
||||||
|
network:
|
||||||
|
version: 2
|
||||||
|
renderer: networkd
|
||||||
|
ethernets:
|
||||||
|
ens192:
|
||||||
|
dhcp4: yes
|
||||||
|
dhcp6: yes
|
||||||
|
dhcp-identifier: mac
|
||||||
|
ens224:
|
||||||
|
dhcp4: no
|
||||||
|
dhcp6: no
|
||||||
|
addresses: [${private_ip_address}]
|
||||||
|
EOS
|
||||||
|
netplan apply
|
||||||
|
}
|
||||||
|
configure_private_ip_address
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
##
|
||||||
|
## SSH
|
||||||
|
##
|
||||||
|
configure_ssh_port () {
|
||||||
|
sed -i 's/^#*Port [0-9]*/Port ${ssh_port}/' /etc/ssh/sshd_config
|
||||||
|
}
|
||||||
|
configure_ssh_port
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
##
|
||||||
|
## Hostname
|
||||||
|
##
|
||||||
|
hostnamectl set-hostname ${hostname}
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
##
|
||||||
|
## Disable swap files genereated by systemd-gpt-auto-generator
|
||||||
|
##
|
||||||
|
systemctl mask "dev-sda3.swap"
|
||||||
@@ -0,0 +1,9 @@
|
|||||||
|
terraform {
|
||||||
|
required_version = ">=1.3.7"
|
||||||
|
required_providers {
|
||||||
|
nifcloud = {
|
||||||
|
source = "nifcloud/nifcloud"
|
||||||
|
version = ">= 1.8.0, < 2.0.0"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,81 @@
|
|||||||
|
variable "availability_zone" {
|
||||||
|
description = "The availability zone"
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "prefix" {
|
||||||
|
description = "The prefix for the entire cluster"
|
||||||
|
type = string
|
||||||
|
validation {
|
||||||
|
condition = length(var.prefix) <= 5
|
||||||
|
error_message = "Must be a less than 5 character long."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "private_network_cidr" {
|
||||||
|
description = "The subnet of private network"
|
||||||
|
type = string
|
||||||
|
validation {
|
||||||
|
condition = can(cidrnetmask(var.private_network_cidr))
|
||||||
|
error_message = "Must be a valid IPv4 CIDR block address."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "private_ip_bn" {
|
||||||
|
description = "Private IP of bastion server"
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "instances_cp" {
|
||||||
|
type = map(object({
|
||||||
|
private_ip = string
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "instances_wk" {
|
||||||
|
type = map(object({
|
||||||
|
private_ip = string
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "instance_key_name" {
|
||||||
|
description = "The key name of the Key Pair to use for the instance"
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "instance_type_bn" {
|
||||||
|
description = "The instance type of bastion server"
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "instance_type_wk" {
|
||||||
|
description = "The instance type of worker"
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "instance_type_cp" {
|
||||||
|
description = "The instance type of control plane"
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "image_name" {
|
||||||
|
description = "The name of image"
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "additional_lb_filter" {
|
||||||
|
description = "Additional LB filter"
|
||||||
|
type = list(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "accounting_type" {
|
||||||
|
type = string
|
||||||
|
default = "1"
|
||||||
|
validation {
|
||||||
|
condition = anytrue([
|
||||||
|
var.accounting_type == "1", // Monthly
|
||||||
|
var.accounting_type == "2", // Pay per use
|
||||||
|
])
|
||||||
|
error_message = "Must be a 1 or 2."
|
||||||
|
}
|
||||||
|
}
|
||||||
3
contrib/terraform/nifcloud/output.tf
Normal file
3
contrib/terraform/nifcloud/output.tf
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
output "kubernetes_cluster" {
|
||||||
|
value = module.kubernetes_cluster
|
||||||
|
}
|
||||||
22
contrib/terraform/nifcloud/sample-inventory/cluster.tfvars
Normal file
22
contrib/terraform/nifcloud/sample-inventory/cluster.tfvars
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
region = "jp-west-1"
|
||||||
|
az = "west-11"
|
||||||
|
|
||||||
|
instance_key_name = "deployerkey"
|
||||||
|
|
||||||
|
instance_type_bn = "e-medium"
|
||||||
|
instance_type_cp = "e-medium"
|
||||||
|
instance_type_wk = "e-medium"
|
||||||
|
|
||||||
|
private_network_cidr = "192.168.30.0/24"
|
||||||
|
instances_cp = {
|
||||||
|
"cp01" : { private_ip : "192.168.30.11/24" }
|
||||||
|
"cp02" : { private_ip : "192.168.30.12/24" }
|
||||||
|
"cp03" : { private_ip : "192.168.30.13/24" }
|
||||||
|
}
|
||||||
|
instances_wk = {
|
||||||
|
"wk01" : { private_ip : "192.168.30.21/24" }
|
||||||
|
"wk02" : { private_ip : "192.168.30.22/24" }
|
||||||
|
}
|
||||||
|
private_ip_bn = "192.168.30.10/24"
|
||||||
|
|
||||||
|
image_name = "Ubuntu Server 22.04 LTS"
|
||||||
1
contrib/terraform/nifcloud/sample-inventory/group_vars
Symbolic link
1
contrib/terraform/nifcloud/sample-inventory/group_vars
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../../../inventory/sample/group_vars
|
||||||
9
contrib/terraform/nifcloud/terraform.tf
Normal file
9
contrib/terraform/nifcloud/terraform.tf
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
terraform {
|
||||||
|
required_version = ">=1.3.7"
|
||||||
|
required_providers {
|
||||||
|
nifcloud = {
|
||||||
|
source = "nifcloud/nifcloud"
|
||||||
|
version = "1.8.0"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
77
contrib/terraform/nifcloud/variables.tf
Normal file
77
contrib/terraform/nifcloud/variables.tf
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
variable "region" {
|
||||||
|
description = "The region"
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "az" {
|
||||||
|
description = "The availability zone"
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "private_ip_bn" {
|
||||||
|
description = "Private IP of bastion server"
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "private_network_cidr" {
|
||||||
|
description = "The subnet of private network"
|
||||||
|
type = string
|
||||||
|
validation {
|
||||||
|
condition = can(cidrnetmask(var.private_network_cidr))
|
||||||
|
error_message = "Must be a valid IPv4 CIDR block address."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "instances_cp" {
|
||||||
|
type = map(object({
|
||||||
|
private_ip = string
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "instances_wk" {
|
||||||
|
type = map(object({
|
||||||
|
private_ip = string
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "instance_key_name" {
|
||||||
|
description = "The key name of the Key Pair to use for the instance"
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "instance_type_bn" {
|
||||||
|
description = "The instance type of bastion server"
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "instance_type_wk" {
|
||||||
|
description = "The instance type of worker"
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "instance_type_cp" {
|
||||||
|
description = "The instance type of control plane"
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "image_name" {
|
||||||
|
description = "The name of image"
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "working_instance_ip" {
|
||||||
|
description = "The IP address to connect to bastion server."
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "accounting_type" {
|
||||||
|
type = string
|
||||||
|
default = "2"
|
||||||
|
validation {
|
||||||
|
condition = anytrue([
|
||||||
|
var.accounting_type == "1", // Monthly
|
||||||
|
var.accounting_type == "2", // Pay per use
|
||||||
|
])
|
||||||
|
error_message = "Must be a 1 or 2."
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -10,21 +10,29 @@ It is recommended to deploy the ansible version used by kubespray into a python
|
|||||||
```ShellSession
|
```ShellSession
|
||||||
VENVDIR=kubespray-venv
|
VENVDIR=kubespray-venv
|
||||||
KUBESPRAYDIR=kubespray
|
KUBESPRAYDIR=kubespray
|
||||||
ANSIBLE_VERSION=2.12
|
python3 -m venv $VENVDIR
|
||||||
virtualenv --python=$(which python3) $VENVDIR
|
|
||||||
source $VENVDIR/bin/activate
|
source $VENVDIR/bin/activate
|
||||||
cd $KUBESPRAYDIR
|
cd $KUBESPRAYDIR
|
||||||
pip install -U -r requirements-$ANSIBLE_VERSION.txt
|
pip install -U -r requirements.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
|
In case you have a similar message when installing the requirements:
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
ERROR: Could not find a version that satisfies the requirement ansible==7.6.0 (from -r requirements.txt (line 1)) (from versions: [...], 6.7.0)
|
||||||
|
ERROR: No matching distribution found for ansible==7.6.0 (from -r requirements.txt (line 1))
|
||||||
|
```
|
||||||
|
|
||||||
|
It means that the version of Python you are running is not compatible with the version of Ansible that Kubespray supports.
|
||||||
|
If the latest version supported according to pip is 6.7.0 it means you are running Python 3.8 or lower while you need at least Python 3.9 (see the table below).
|
||||||
|
|
||||||
### Ansible Python Compatibility
|
### Ansible Python Compatibility
|
||||||
|
|
||||||
Based on the table below and the available python version for your ansible host you should choose the appropriate ansible version to use with kubespray.
|
Based on the table below and the available python version for your ansible host you should choose the appropriate ansible version to use with kubespray.
|
||||||
|
|
||||||
| Ansible Version | Python Version |
|
| Ansible Version | Python Version |
|
||||||
|-----------------|----------------|
|
|-----------------|----------------|
|
||||||
| 2.11 | 2.7,3.5-3.9 |
|
| 2.14 | 3.9-3.11 |
|
||||||
| 2.12 | 3.8-3.10 |
|
|
||||||
|
|
||||||
## Inventory
|
## Inventory
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ Kubespray can be installed as an [Ansible collection](https://docs.ansible.com/a
|
|||||||
collections:
|
collections:
|
||||||
- name: https://github.com/kubernetes-sigs/kubespray
|
- name: https://github.com/kubernetes-sigs/kubespray
|
||||||
type: git
|
type: git
|
||||||
version: v2.21.0
|
version: v2.22.1
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Install your collection
|
2. Install your collection
|
||||||
|
|||||||
14
docs/aws.md
14
docs/aws.md
@@ -58,11 +58,23 @@ Guide:
|
|||||||
```ShellSession
|
```ShellSession
|
||||||
export AWS_ACCESS_KEY_ID="xxxxx"
|
export AWS_ACCESS_KEY_ID="xxxxx"
|
||||||
export AWS_SECRET_ACCESS_KEY="yyyyy"
|
export AWS_SECRET_ACCESS_KEY="yyyyy"
|
||||||
export REGION="us-east-2"
|
export AWS_REGION="us-east-2"
|
||||||
```
|
```
|
||||||
|
|
||||||
- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kubespray-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml`
|
- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kubespray-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml`
|
||||||
|
|
||||||
|
**Optional** Using labels and taints
|
||||||
|
|
||||||
|
To add labels to your kubernetes node, add the following tag to your instance:
|
||||||
|
|
||||||
|
- Key: `kubespray-node-labels`
|
||||||
|
- Value: `node-role.kubernetes.io/ingress=`
|
||||||
|
|
||||||
|
To add taints to your kubernetes node, add the following tag to your instance:
|
||||||
|
|
||||||
|
- Key: `kubespray-node-taints`
|
||||||
|
- Value: `node-role.kubernetes.io/ingress=:NoSchedule`
|
||||||
|
|
||||||
## Kubespray configuration
|
## Kubespray configuration
|
||||||
|
|
||||||
Declare the cloud config variables for the `aws` provider as follows. Setting these variables are optional and depend on your use case.
|
Declare the cloud config variables for the `aws` provider as follows. Setting these variables are optional and depend on your use case.
|
||||||
|
|||||||
@@ -80,10 +80,15 @@ docker_registry_mirrors:
|
|||||||
containerd_grpc_max_recv_message_size: 16777216
|
containerd_grpc_max_recv_message_size: 16777216
|
||||||
containerd_grpc_max_send_message_size: 16777216
|
containerd_grpc_max_send_message_size: 16777216
|
||||||
|
|
||||||
containerd_registries:
|
containerd_registries_mirrors:
|
||||||
"docker.io":
|
- prefix: docker.io
|
||||||
- "https://mirror.gcr.io"
|
mirrors:
|
||||||
- "https://registry-1.docker.io"
|
- host: https://mirror.gcr.io
|
||||||
|
capabilities: ["pull", "resolve"]
|
||||||
|
skip_verify: false
|
||||||
|
- host: https://registry-1.docker.io
|
||||||
|
capabilities: ["pull", "resolve"]
|
||||||
|
skip_verify: false
|
||||||
|
|
||||||
containerd_max_container_log_line_size: -1
|
containerd_max_container_log_line_size: -1
|
||||||
|
|
||||||
|
|||||||
27
docs/ci.md
27
docs/ci.md
@@ -11,14 +11,13 @@ amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|||||||
centos7 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: |
|
centos7 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: |
|
||||||
debian10 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
|
debian10 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
|
||||||
debian11 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
debian11 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||||
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
|
debian12 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
fedora36 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
fedora37 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
|
||||||
|
fedora38 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
||||||
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
rockylinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
rockylinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
rockylinux9 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
rockylinux9 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu16 | :x: | :x: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: |
|
ubuntu20 | :white_check_mark: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: |
|
||||||
ubuntu18 | :white_check_mark: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: |
|
|
||||||
ubuntu20 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
|
|
||||||
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
|
||||||
## crio
|
## crio
|
||||||
@@ -30,14 +29,13 @@ amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|||||||
centos7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
centos7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
debian11 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
debian11 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
debian12 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
fedora36 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
fedora37 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
fedora38 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|
||||||
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|
||||||
ubuntu22 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
ubuntu22 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
|
||||||
## docker
|
## docker
|
||||||
@@ -49,12 +47,11 @@ amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|||||||
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
debian10 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
debian10 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
fedora35 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
debian12 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
fedora36 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
fedora37 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
fedora38 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||||
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||||
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|
||||||
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
|
||||||
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||||
|
|||||||
@@ -99,4 +99,4 @@ For the moment, only Cinder v3 is supported by the CSI Driver.
|
|||||||
|
|
||||||
## More info
|
## More info
|
||||||
|
|
||||||
For further information about the Cinder CSI Driver, you can refer to this page: [Cloud Provider OpenStack](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/using-cinder-csi-plugin.md).
|
For further information about the Cinder CSI Driver, you can refer to this page: [Cloud Provider OpenStack](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md).
|
||||||
|
|||||||
@@ -24,15 +24,20 @@ etcd_deployment_type: host
|
|||||||
Example: define registry mirror for docker hub
|
Example: define registry mirror for docker hub
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
containerd_registries:
|
containerd_registries_mirrors:
|
||||||
"docker.io":
|
- prefix: docker.io
|
||||||
- "https://mirror.gcr.io"
|
mirrors:
|
||||||
- "https://registry-1.docker.io"
|
- host: https://mirror.gcr.io
|
||||||
|
capabilities: ["pull", "resolve"]
|
||||||
|
skip_verify: false
|
||||||
|
- host: https://registry-1.docker.io
|
||||||
|
capabilities: ["pull", "resolve"]
|
||||||
|
skip_verify: false
|
||||||
```
|
```
|
||||||
|
|
||||||
`containerd_registries` is ignored for pulling images when `image_command_tool=nerdctl`
|
`containerd_registries_mirrors` is ignored for pulling images when `image_command_tool=nerdctl`
|
||||||
(the default for `container_manager=containerd`). Use `crictl` instead, it supports
|
(the default for `container_manager=containerd`). Use `crictl` instead, it supports
|
||||||
`containerd_registries` but lacks proper multi-arch support (see
|
`containerd_registries_mirrors` but lacks proper multi-arch support (see
|
||||||
[#8375](https://github.com/kubernetes-sigs/kubespray/issues/8375)):
|
[#8375](https://github.com/kubernetes-sigs/kubespray/issues/8375)):
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
@@ -103,13 +108,35 @@ containerd_runc_runtime:
|
|||||||
Config insecure-registry access to self hosted registries.
|
Config insecure-registry access to self hosted registries.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
containerd_insecure_registries:
|
containerd_registries_mirrors:
|
||||||
"test.registry.io": "http://test.registry.io"
|
- prefix: test.registry.io
|
||||||
"172.19.16.11:5000": "http://172.19.16.11:5000"
|
mirrors:
|
||||||
"repo:5000": "http://repo:5000"
|
- host: http://test.registry.io
|
||||||
|
capabilities: ["pull", "resolve"]
|
||||||
|
skip_verify: true
|
||||||
|
- prefix: 172.19.16.11:5000
|
||||||
|
mirrors:
|
||||||
|
- host: http://172.19.16.11:5000
|
||||||
|
capabilities: ["pull", "resolve"]
|
||||||
|
skip_verify: true
|
||||||
|
- prefix: repo:5000
|
||||||
|
mirrors:
|
||||||
|
- host: http://repo:5000
|
||||||
|
capabilities: ["pull", "resolve"]
|
||||||
|
skip_verify: true
|
||||||
```
|
```
|
||||||
|
|
||||||
[containerd]: https://containerd.io/
|
[containerd]: https://containerd.io/
|
||||||
[RuntimeClass]: https://kubernetes.io/docs/concepts/containers/runtime-class/
|
[RuntimeClass]: https://kubernetes.io/docs/concepts/containers/runtime-class/
|
||||||
[runtime classes in containerd]: https://github.com/containerd/containerd/blob/main/docs/cri/config.md#runtime-classes
|
[runtime classes in containerd]: https://github.com/containerd/containerd/blob/main/docs/cri/config.md#runtime-classes
|
||||||
[runtime-spec]: https://github.com/opencontainers/runtime-spec
|
[runtime-spec]: https://github.com/opencontainers/runtime-spec
|
||||||
|
|
||||||
|
### Optional : NRI
|
||||||
|
|
||||||
|
[Node Resource Interface](https://github.com/containerd/nri) (NRI) is disabled by default for the containerd. If you
|
||||||
|
are using contained version v1.7.0 or above, then you can enable it with the
|
||||||
|
following configuration:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
nri_enabled: true
|
||||||
|
```
|
||||||
|
|||||||
@@ -62,3 +62,13 @@ The `allowed_annotations` configures `crio.conf` accordingly.
|
|||||||
|
|
||||||
The `crio_remap_enable` configures the `/etc/subuid` and `/etc/subgid` files to add an entry for the **containers** user.
|
The `crio_remap_enable` configures the `/etc/subuid` and `/etc/subgid` files to add an entry for the **containers** user.
|
||||||
By default, 16M uids and gids are reserved for user namespaces (256 pods * 65536 uids/gids) at the end of the uid/gid space.
|
By default, 16M uids and gids are reserved for user namespaces (256 pods * 65536 uids/gids) at the end of the uid/gid space.
|
||||||
|
|
||||||
|
## Optional : NRI
|
||||||
|
|
||||||
|
[Node Resource Interface](https://github.com/containerd/nri) (NRI) is disabled by default for the CRI-O. If you
|
||||||
|
are using CRI-O version v1.26.0 or above, then you can enable it with the
|
||||||
|
following configuration:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
nri_enabled: true
|
||||||
|
```
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# K8s DNS stack by Kubespray
|
# K8s DNS stack by Kubespray
|
||||||
|
|
||||||
For K8s cluster nodes, Kubespray configures a [Kubernetes DNS](https://kubernetes.io/docs/admin/dns/)
|
For K8s cluster nodes, Kubespray configures a [Kubernetes DNS](https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/)
|
||||||
[cluster add-on](https://releases.k8s.io/master/cluster/addons/README.md)
|
[cluster add-on](https://releases.k8s.io/master/cluster/addons/README.md)
|
||||||
to serve as an authoritative DNS server for a given ``dns_domain`` and its
|
to serve as an authoritative DNS server for a given ``dns_domain`` and its
|
||||||
``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels).
|
``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels).
|
||||||
@@ -143,6 +143,11 @@ coredns_default_zone_cache_block: |
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### systemd_resolved_disable_stub_listener
|
||||||
|
|
||||||
|
Whether or not to set `DNSStubListener=no` when using systemd-resolved. Defaults to `true` on Flatcar.
|
||||||
|
You might need to set it to `true` if CoreDNS fails to start with `address already in use` errors.
|
||||||
|
|
||||||
## DNS modes supported by Kubespray
|
## DNS modes supported by Kubespray
|
||||||
|
|
||||||
You can modify how Kubespray sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
|
You can modify how Kubespray sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
|
||||||
|
|||||||
@@ -44,3 +44,9 @@ kubeEtcd:
|
|||||||
service:
|
service:
|
||||||
enabled: false
|
enabled: false
|
||||||
```
|
```
|
||||||
|
|
||||||
|
To fully override metrics exposition urls, define it in the inventory with:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
etcd_listen_metrics_urls: "http://0.0.0.0:2381"
|
||||||
|
```
|
||||||
|
|||||||
@@ -24,9 +24,10 @@ configured by the variable `loadbalancer_apiserver_localhost` (defaults to
|
|||||||
`True`. Or `False`, if there is an external `loadbalancer_apiserver` defined).
|
`True`. Or `False`, if there is an external `loadbalancer_apiserver` defined).
|
||||||
You may also define the port the local internal loadbalancer uses by changing,
|
You may also define the port the local internal loadbalancer uses by changing,
|
||||||
`loadbalancer_apiserver_port`. This defaults to the value of
|
`loadbalancer_apiserver_port`. This defaults to the value of
|
||||||
`kube_apiserver_port`. It is also important to note that Kubespray will only
|
`kube_apiserver_port`. It is also important to note that Kubespray will only
|
||||||
configure kubelet and kube-proxy on non-master nodes to use the local internal
|
configure kubelet and kube-proxy on non-master nodes to use the local internal
|
||||||
loadbalancer.
|
loadbalancer. If you wish to control the name of the loadbalancer container,
|
||||||
|
you can set the variable `loadbalancer_apiserver_pod_name`.
|
||||||
|
|
||||||
If you choose to NOT use the local internal loadbalancer, you will need to
|
If you choose to NOT use the local internal loadbalancer, you will need to
|
||||||
use the [kube-vip](kube-vip.md) ansible role or configure your own loadbalancer to achieve HA. By default, it only configures a non-HA endpoint, which points to the
|
use the [kube-vip](kube-vip.md) ansible role or configure your own loadbalancer to achieve HA. By default, it only configures a non-HA endpoint, which points to the
|
||||||
|
|||||||
@@ -118,7 +118,7 @@ Let's take a deep look to the resultant **kubernetes** configuration:
|
|||||||
* The `enable-admission-plugins` has not the `PodSecurityPolicy` admission plugin. This because it is going to be definitely removed from **kubernetes** `v1.25`. For this reason we decided to set the newest `PodSecurity` (for more details, please take a look here: <https://kubernetes.io/docs/concepts/security/pod-security-admission/>). Then, we set the `EventRateLimit` plugin, providing additional configuration files (that are automatically created under the hood and mounted inside the `kube-apiserver` container) to make it work.
|
* The `enable-admission-plugins` has not the `PodSecurityPolicy` admission plugin. This because it is going to be definitely removed from **kubernetes** `v1.25`. For this reason we decided to set the newest `PodSecurity` (for more details, please take a look here: <https://kubernetes.io/docs/concepts/security/pod-security-admission/>). Then, we set the `EventRateLimit` plugin, providing additional configuration files (that are automatically created under the hood and mounted inside the `kube-apiserver` container) to make it work.
|
||||||
* The `encryption-provider-config` provide encryption at rest. This means that the `kube-apiserver` encrypt data that is going to be stored before they reach `etcd`. So the data is completely unreadable from `etcd` (in case an attacker is able to exploit this).
|
* The `encryption-provider-config` provide encryption at rest. This means that the `kube-apiserver` encrypt data that is going to be stored before they reach `etcd`. So the data is completely unreadable from `etcd` (in case an attacker is able to exploit this).
|
||||||
* The `rotateCertificates` in `KubeletConfiguration` is set to `true` along with `serverTLSBootstrap`. This could be used in alternative to `tlsCertFile` and `tlsPrivateKeyFile` parameters. Additionally it automatically generates certificates by itself. By default the CSRs are approved automatically via [kubelet-csr-approver](https://github.com/postfinance/kubelet-csr-approver). You can customize approval configuration by modifying Helm values via `kubelet_csr_approver_values`.
|
* The `rotateCertificates` in `KubeletConfiguration` is set to `true` along with `serverTLSBootstrap`. This could be used in alternative to `tlsCertFile` and `tlsPrivateKeyFile` parameters. Additionally it automatically generates certificates by itself. By default the CSRs are approved automatically via [kubelet-csr-approver](https://github.com/postfinance/kubelet-csr-approver). You can customize approval configuration by modifying Helm values via `kubelet_csr_approver_values`.
|
||||||
See <https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/> for more information on the subject.
|
See <https://kubernetes.io/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/> for more information on the subject.
|
||||||
* If you are installing **kubernetes** in an AppArmor-based OS (eg. Debian/Ubuntu) you can enable the `AppArmor` feature gate uncommenting the lines with the comment `# AppArmor-based OS` on top.
|
* If you are installing **kubernetes** in an AppArmor-based OS (eg. Debian/Ubuntu) you can enable the `AppArmor` feature gate uncommenting the lines with the comment `# AppArmor-based OS` on top.
|
||||||
* The `kubelet_systemd_hardening`, both with `kubelet_secure_addresses` setup a minimal firewall on the system. To better understand how these variables work, here's an explanatory image:
|
* The `kubelet_systemd_hardening`, both with `kubelet_secure_addresses` setup a minimal firewall on the system. To better understand how these variables work, here's an explanatory image:
|
||||||

|

|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
**NOTE:** The current image version is `v1.1.6`. Please file any issues you find and note the version used.
|
**NOTE:** The current image version is `v1.1.6`. Please file any issues you find and note the version used.
|
||||||
|
|
||||||
The AWS ALB Ingress Controller satisfies Kubernetes [ingress resources](https://kubernetes.io/docs/user-guide/ingress) by provisioning [Application Load Balancers](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/introduction.html).
|
The AWS ALB Ingress Controller satisfies Kubernetes [ingress resources](https://kubernetes.io/docs/concepts/services-networking/ingress/) by provisioning [Application Load Balancers](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/introduction.html).
|
||||||
|
|
||||||
This project was originated by [Ticketmaster](https://github.com/ticketmaster) and [CoreOS](https://github.com/coreos) as part of Ticketmaster's move to AWS and CoreOS Tectonic. Learn more about Ticketmaster's Kubernetes initiative from Justin Dean's video at [Tectonic Summit](https://www.youtube.com/watch?v=wqXVKneP0Hg).
|
This project was originated by [Ticketmaster](https://github.com/ticketmaster) and [CoreOS](https://github.com/coreos) as part of Ticketmaster's move to AWS and CoreOS Tectonic. Learn more about Ticketmaster's Kubernetes initiative from Justin Dean's video at [Tectonic Summit](https://www.youtube.com/watch?v=wqXVKneP0Hg).
|
||||||
|
|
||||||
@@ -10,20 +10,20 @@ This project was donated to Kubernetes SIG-AWS to allow AWS, CoreOS, Ticketmaste
|
|||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
Checkout our [Live Docs](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/)!
|
Checkout our [Live Docs](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v1.1/#aws-alb-ingress-controller)!
|
||||||
|
|
||||||
## Getting started
|
## Getting started
|
||||||
|
|
||||||
To get started with the controller, see our [walkthrough](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/walkthrough/echoserver/).
|
To get started with the controller, see our [walkthrough](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v1.1/guide/walkthrough/echoserver/).
|
||||||
|
|
||||||
## Setup
|
## Setup
|
||||||
|
|
||||||
- See [controller setup](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/controller/setup/) on how to install ALB ingress controller
|
- See [controller setup](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v1.1/guide/controller/setup/) on how to install ALB ingress controller
|
||||||
- See [external-dns setup](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/external-dns/setup/) for how to setup the external-dns to manage route 53 records.
|
- See [external-dns setup](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v1.1/guide/external-dns/setup/) for how to setup the external-dns to manage route 53 records.
|
||||||
|
|
||||||
## Building
|
## Building
|
||||||
|
|
||||||
For details on building this project, see our [building guide](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/BUILDING/).
|
For details on building this project, see our [building guide](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v1.1/BUILDING/).
|
||||||
|
|
||||||
## Community, discussion, contribution, and support
|
## Community, discussion, contribution, and support
|
||||||
|
|
||||||
|
|||||||
@@ -113,7 +113,7 @@ kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/mast
|
|||||||
|
|
||||||
This example creates an ELB with just two listeners, one in port 80 and another in port 443
|
This example creates an ELB with just two listeners, one in port 80 and another in port 443
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
##### ELB Idle Timeouts
|
##### ELB Idle Timeouts
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ You can use it quickly & easily deploy ceph RBD storage that works almost
|
|||||||
anywhere.
|
anywhere.
|
||||||
|
|
||||||
It works just like in-tree dynamic provisioner. For more information on how
|
It works just like in-tree dynamic provisioner. For more information on how
|
||||||
dynamic provisioning works, see [the docs](http://kubernetes.io/docs/user-guide/persistent-volumes/)
|
dynamic provisioning works, see [the docs](https://kubernetes.io/docs/concepts/storage/persistent-volumes/)
|
||||||
or [this blog post](http://blog.kubernetes.io/2016/10/dynamic-provisioning-and-storage-in-kubernetes.html).
|
or [this blog post](http://blog.kubernetes.io/2016/10/dynamic-provisioning-and-storage-in-kubernetes.html).
|
||||||
|
|
||||||
## Development
|
## Development
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
Distributed system such as Kubernetes are designed to be resilient to the
|
Distributed system such as Kubernetes are designed to be resilient to the
|
||||||
failures. More details about Kubernetes High-Availability (HA) may be found at
|
failures. More details about Kubernetes High-Availability (HA) may be found at
|
||||||
[Building High-Availability Clusters](https://kubernetes.io/docs/admin/high-availability/)
|
[Building High-Availability Clusters](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/)
|
||||||
|
|
||||||
To have a simple view the most of the parts of HA will be skipped to describe
|
To have a simple view the most of the parts of HA will be skipped to describe
|
||||||
Kubelet<->Controller Manager communication only.
|
Kubelet<->Controller Manager communication only.
|
||||||
|
|||||||
@@ -90,6 +90,9 @@ In all hosts, restart nginx-proxy pod. This pod is a local proxy for the apiserv
|
|||||||
```sh
|
```sh
|
||||||
# run in every host
|
# run in every host
|
||||||
docker ps | grep k8s_nginx-proxy_nginx-proxy | awk '{print $1}' | xargs docker restart
|
docker ps | grep k8s_nginx-proxy_nginx-proxy | awk '{print $1}' | xargs docker restart
|
||||||
|
|
||||||
|
# or with containerd
|
||||||
|
crictl ps | grep nginx-proxy | awk '{print $1}' | xargs crictl stop
|
||||||
```
|
```
|
||||||
|
|
||||||
### 3) Remove old control plane nodes
|
### 3) Remove old control plane nodes
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ kube_image_repo: "{{ registry_host }}"
|
|||||||
gcr_image_repo: "{{ registry_host }}"
|
gcr_image_repo: "{{ registry_host }}"
|
||||||
docker_image_repo: "{{ registry_host }}"
|
docker_image_repo: "{{ registry_host }}"
|
||||||
quay_image_repo: "{{ registry_host }}"
|
quay_image_repo: "{{ registry_host }}"
|
||||||
|
github_image_repo: "{{ registry_host }}"
|
||||||
|
|
||||||
kubeadm_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubeadm"
|
kubeadm_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubeadm"
|
||||||
kubectl_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubectl"
|
kubectl_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubectl"
|
||||||
@@ -50,8 +51,12 @@ containerd_download_url: "{{ files_repo }}/containerd-{{ containerd_version }}-l
|
|||||||
runc_download_url: "{{ files_repo }}/runc.{{ image_arch }}"
|
runc_download_url: "{{ files_repo }}/runc.{{ image_arch }}"
|
||||||
nerdctl_download_url: "{{ files_repo }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
nerdctl_download_url: "{{ files_repo }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
||||||
# Insecure registries for containerd
|
# Insecure registries for containerd
|
||||||
containerd_insecure_registries:
|
containerd_registries_mirrors:
|
||||||
"{{ registry_addr }}":"{{ registry_host }}"
|
- prefix: "{{ registry_addr }}"
|
||||||
|
mirrors:
|
||||||
|
- host: "{{ registry_host }}"
|
||||||
|
capabilities: ["pull", "resolve"]
|
||||||
|
skip_verify: true
|
||||||
|
|
||||||
# CentOS/Redhat/AlmaLinux/Rocky Linux
|
# CentOS/Redhat/AlmaLinux/Rocky Linux
|
||||||
## Docker / Containerd
|
## Docker / Containerd
|
||||||
@@ -90,7 +95,7 @@ If you use the settings like the one above, you'll need to define in your invent
|
|||||||
|
|
||||||
* `registry_host`: Container image registry. If you _don't_ use the same repository path for the container images that
|
* `registry_host`: Container image registry. If you _don't_ use the same repository path for the container images that
|
||||||
the ones defined
|
the ones defined
|
||||||
in [Download's role defaults](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/download/defaults/main.yml)
|
in [Download's role defaults](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/download/defaults/main/main.yml)
|
||||||
, you need to override the `*_image_repo` for these container images. If you want to make your life easier, use the
|
, you need to override the `*_image_repo` for these container images. If you want to make your life easier, use the
|
||||||
same repository path, you won't have to override anything else.
|
same repository path, you won't have to override anything else.
|
||||||
* `registry_addr`: Container image registry, but only have [domain or ip]:[port].
|
* `registry_addr`: Container image registry, but only have [domain or ip]:[port].
|
||||||
|
|||||||
@@ -7,6 +7,12 @@ If you set http and https proxy, all nodes and loadbalancer will be excluded fro
|
|||||||
`http_proxy:"http://example.proxy.tld:port"`
|
`http_proxy:"http://example.proxy.tld:port"`
|
||||||
`https_proxy:"http://example.proxy.tld:port"`
|
`https_proxy:"http://example.proxy.tld:port"`
|
||||||
|
|
||||||
|
## Set custom CA
|
||||||
|
|
||||||
|
CA must be already on each target nodes
|
||||||
|
|
||||||
|
`https_proxy_cert_file: /path/to/host/custom/ca.crt`
|
||||||
|
|
||||||
## Set default no_proxy (this will override default no_proxy generation)
|
## Set default no_proxy (this will override default no_proxy generation)
|
||||||
|
|
||||||
`no_proxy: "node1,node1_ip,node2,node2_ip...additional_host"`
|
`no_proxy: "node1,node1_ip,node2,node2_ip...additional_host"`
|
||||||
|
|||||||
@@ -403,3 +403,16 @@ Please note that **migrating container engines is not officially supported by Ku
|
|||||||
As of Kubespray 2.18.0, containerd is already the default container engine. If you have the chance, it is advisable and safer to reset and redeploy the entire cluster with a new container engine.
|
As of Kubespray 2.18.0, containerd is already the default container engine. If you have the chance, it is advisable and safer to reset and redeploy the entire cluster with a new container engine.
|
||||||
|
|
||||||
* [Migrating from Docker to Containerd](upgrades/migrate_docker2containerd.md)
|
* [Migrating from Docker to Containerd](upgrades/migrate_docker2containerd.md)
|
||||||
|
|
||||||
|
## System upgrade
|
||||||
|
|
||||||
|
If you want to upgrade the APT or YUM packages while the nodes are cordoned, you can use:
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
ansible-playbook upgrade-cluster.yml -b -i inventory/sample/hosts.ini -e system_upgrade=true
|
||||||
|
```
|
||||||
|
|
||||||
|
Nodes will be rebooted when there are package upgrades (`system_upgrade_reboot: on-upgrade`).
|
||||||
|
This can be changed to `always` or `never`.
|
||||||
|
|
||||||
|
Note: Downloads will happen twice unless `system_upgrade_reboot` is `never`.
|
||||||
|
|||||||
@@ -120,7 +120,7 @@ following default cluster parameters:
|
|||||||
alpha/experimental Kubeadm features. (defaults is `[]`)
|
alpha/experimental Kubeadm features. (defaults is `[]`)
|
||||||
|
|
||||||
* *authorization_modes* - A list of [authorization mode](
|
* *authorization_modes* - A list of [authorization mode](
|
||||||
https://kubernetes.io/docs/admin/authorization/#using-flags-for-your-authorization-module)
|
https://kubernetes.io/docs/reference/access-authn-authz/authorization/#using-flags-for-your-authorization-module)
|
||||||
that the cluster should be configured for. Defaults to `['Node', 'RBAC']`
|
that the cluster should be configured for. Defaults to `['Node', 'RBAC']`
|
||||||
(Node and RBAC authorizers).
|
(Node and RBAC authorizers).
|
||||||
Note: `Node` and `RBAC` are enabled by default. Previously deployed clusters can be
|
Note: `Node` and `RBAC` are enabled by default. Previously deployed clusters can be
|
||||||
@@ -216,6 +216,12 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
|
|||||||
|
|
||||||
* *kubelet_make_iptables_util_chains* - If `true`, causes the kubelet ensures a set of `iptables` rules are present on host.
|
* *kubelet_make_iptables_util_chains* - If `true`, causes the kubelet ensures a set of `iptables` rules are present on host.
|
||||||
|
|
||||||
|
* *kubelet_cpu_manager_policy* - If set to `static`, allows pods with certain resource characteristics to be granted increased CPU affinity and exclusivity on the node. And it should be set with `kube_reserved` or `system-reserved`, enable this with the following guide:[Control CPU Management Policies on the Node](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/)
|
||||||
|
|
||||||
|
* *kubelet_topology_manager_policy* - Control the behavior of the allocation of CPU and Memory from different [NUMA](https://en.wikipedia.org/wiki/Non-uniform_memory_access) Nodes. Enable this with the following guide: [Control Topology Management Policies on a node](https://kubernetes.io/docs/tasks/administer-cluster/topology-manager).
|
||||||
|
|
||||||
|
* *kubelet_topology_manager_scope* - The Topology Manager can deal with the alignment of resources in a couple of distinct scopes: `container` and `pod`. See [Topology Manager Scopes](https://kubernetes.io/docs/tasks/administer-cluster/topology-manager/#topology-manager-scopes).
|
||||||
|
|
||||||
* *kubelet_systemd_hardening* - If `true`, provides kubelet systemd service with security features for isolation.
|
* *kubelet_systemd_hardening* - If `true`, provides kubelet systemd service with security features for isolation.
|
||||||
|
|
||||||
**N.B.** To enable this feature, ensure you are using the **`cgroup v2`** on your system. Check it out with command: `sudo ls -l /sys/fs/cgroup/*.slice`. If directory does not exist, enable this with the following guide: [enable cgroup v2](https://rootlesscontaine.rs/getting-started/common/cgroup2/#enabling-cgroup-v2).
|
**N.B.** To enable this feature, ensure you are using the **`cgroup v2`** on your system. Check it out with command: `sudo ls -l /sys/fs/cgroup/*.slice`. If directory does not exist, enable this with the following guide: [enable cgroup v2](https://rootlesscontaine.rs/getting-started/common/cgroup2/#enabling-cgroup-v2).
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
- hosts: kube_node:kube_control_plane
|
- name: Remove old cloud provider config
|
||||||
|
hosts: kube_node:kube_control_plane
|
||||||
tasks:
|
tasks:
|
||||||
- name: Remove old cloud provider config
|
- name: Remove old cloud provider config
|
||||||
file:
|
file:
|
||||||
@@ -7,7 +8,8 @@
|
|||||||
state: absent
|
state: absent
|
||||||
with_items:
|
with_items:
|
||||||
- /etc/kubernetes/cloud_config
|
- /etc/kubernetes/cloud_config
|
||||||
- hosts: kube_control_plane[0]
|
- name: Migrate intree Cinder PV
|
||||||
|
hosts: kube_control_plane[0]
|
||||||
tasks:
|
tasks:
|
||||||
- name: Include kubespray-default variables
|
- name: Include kubespray-default variables
|
||||||
include_vars: ../roles/kubespray-defaults/defaults/main.yaml
|
include_vars: ../roles/kubespray-defaults/defaults/main.yaml
|
||||||
@@ -16,13 +18,13 @@
|
|||||||
src: get_cinder_pvs.sh
|
src: get_cinder_pvs.sh
|
||||||
dest: /tmp
|
dest: /tmp
|
||||||
mode: u+rwx
|
mode: u+rwx
|
||||||
- name: Get PVs provisioned by in-tree cloud provider # noqa 301
|
- name: Get PVs provisioned by in-tree cloud provider
|
||||||
command: /tmp/get_cinder_pvs.sh
|
command: /tmp/get_cinder_pvs.sh
|
||||||
register: pvs
|
register: pvs
|
||||||
- name: Remove get_cinder_pvs.sh
|
- name: Remove get_cinder_pvs.sh
|
||||||
file:
|
file:
|
||||||
path: /tmp/get_cinder_pvs.sh
|
path: /tmp/get_cinder_pvs.sh
|
||||||
state: absent
|
state: absent
|
||||||
- name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation # noqa 301
|
- name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation
|
||||||
command: "{{ bin_dir }}/kubectl annotate --overwrite pv {{ item }} pv.kubernetes.io/provisioned-by=cinder.csi.openstack.org"
|
command: "{{ bin_dir }}/kubectl annotate --overwrite pv {{ item }} pv.kubernetes.io/provisioned-by=cinder.csi.openstack.org"
|
||||||
loop: "{{ pvs.stdout_lines | list }}"
|
loop: "{{ pvs.stdout_lines | list }}"
|
||||||
|
|||||||
@@ -10,13 +10,15 @@
|
|||||||
### In most cases, you probably want to use upgrade-cluster.yml playbook and
|
### In most cases, you probably want to use upgrade-cluster.yml playbook and
|
||||||
### not this one.
|
### not this one.
|
||||||
|
|
||||||
- hosts: localhost
|
- name: Setup ssh config to use the bastion
|
||||||
|
hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||||
|
|
||||||
- hosts: k8s_cluster:etcd:calico_rr
|
- name: Bootstrap hosts OS for Ansible
|
||||||
|
hosts: k8s_cluster:etcd:calico_rr
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
vars:
|
vars:
|
||||||
@@ -27,7 +29,8 @@
|
|||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: bootstrap-os, tags: bootstrap-os}
|
- { role: bootstrap-os, tags: bootstrap-os}
|
||||||
|
|
||||||
- hosts: k8s_cluster:etcd:calico_rr
|
- name: Preinstall
|
||||||
|
hosts: k8s_cluster:etcd:calico_rr
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user