mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-13 21:34:40 +03:00
Compare commits
165 Commits
release-2.
...
release-2.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dd7681c272 | ||
|
|
e5fc8a933f | ||
|
|
07e19e546b | ||
|
|
3f6567bba0 | ||
|
|
e09a7c02a6 | ||
|
|
48b3d9c56d | ||
|
|
ca271b8a65 | ||
|
|
c264ae3016 | ||
|
|
1bcd7395fa | ||
|
|
3d76c30354 | ||
|
|
20a9e20c5a | ||
|
|
e4be213cf7 | ||
|
|
0107dbc29c | ||
|
|
72da838519 | ||
|
|
10679ebb5d | ||
|
|
8775dcf92f | ||
|
|
bd382a9c39 | ||
|
|
ffacfe3ede | ||
|
|
7dcc22fe8c | ||
|
|
47ed2b115d | ||
|
|
b9fc4ec43e | ||
|
|
7bd757da5f | ||
|
|
9dc2092042 | ||
|
|
c7cfd32c40 | ||
|
|
a4b0656d9b | ||
|
|
c33e4d7bb7 | ||
|
|
24b82917d1 | ||
|
|
9696936b59 | ||
|
|
aeca9304f4 | ||
|
|
8fef156e8f | ||
|
|
8497528240 | ||
|
|
ebd71f6ad7 | ||
|
|
c677438189 | ||
|
|
d646053c0e | ||
|
|
c9a7ae1cae | ||
|
|
e84c1004df | ||
|
|
b19b727fe7 | ||
|
|
0932318b85 | ||
|
|
e573a2f6d4 | ||
|
|
52c1826423 | ||
|
|
e1881fae02 | ||
|
|
5ed85094c2 | ||
|
|
bf29ea55cf | ||
|
|
cafe4f1352 | ||
|
|
a9ee1c4167 | ||
|
|
a8c1bccdd5 | ||
|
|
71cf553aa8 | ||
|
|
a894a5e29b | ||
|
|
9bc7492ff2 | ||
|
|
77bda0df1c | ||
|
|
4c37399c75 | ||
|
|
cd69283184 | ||
|
|
cf3b3ca6fd | ||
|
|
1955943d4a | ||
|
|
3b68d63643 | ||
|
|
d21bfb84ad | ||
|
|
2a7c9d27b2 | ||
|
|
9c610ee11d | ||
|
|
7295d13d60 | ||
|
|
2fbbb70baa | ||
|
|
b5ce69cf3c | ||
|
|
1c5f657f97 | ||
|
|
9613ed8782 | ||
|
|
b142995808 | ||
|
|
36e5d742dc | ||
|
|
b9e3861385 | ||
|
|
f2bb3aba1e | ||
|
|
4243003c94 | ||
|
|
050bd0527f | ||
|
|
fe32de94b9 | ||
|
|
d2383d27a9 | ||
|
|
788190beca | ||
|
|
13aa32278a | ||
|
|
38ce02c610 | ||
|
|
9312ae7c6e | ||
|
|
1d86919883 | ||
|
|
78c1775661 | ||
|
|
5d00b851ce | ||
|
|
f8b93fa88a | ||
|
|
0405af1107 | ||
|
|
872e173887 | ||
|
|
b42757d330 | ||
|
|
a4d8d15a0e | ||
|
|
f8f197e26b | ||
|
|
4f85b75087 | ||
|
|
8895e38060 | ||
|
|
9a896957d9 | ||
|
|
37e004164b | ||
|
|
77069354cf | ||
|
|
2aafab6c19 | ||
|
|
35aaf97216 | ||
|
|
25cb90bc2d | ||
|
|
3311e0a296 | ||
|
|
eb31653d66 | ||
|
|
180df831ba | ||
|
|
2fa64f9fd6 | ||
|
|
a1521dc16e | ||
|
|
bf31a3a872 | ||
|
|
4a8fd94a5f | ||
|
|
e214bd0e1b | ||
|
|
4ad89ef8f1 | ||
|
|
7a66be8254 | ||
|
|
db696785d5 | ||
|
|
dfec133273 | ||
|
|
41605b4135 | ||
|
|
475abcc3a8 | ||
|
|
3a7d84e014 | ||
|
|
ad3f84df98 | ||
|
|
79e742c03b | ||
|
|
d79ada931d | ||
|
|
b2f6abe4ab | ||
|
|
c5dac1cdf6 | ||
|
|
89a0f515c7 | ||
|
|
d296adcd65 | ||
|
|
141064c443 | ||
|
|
54859cb814 | ||
|
|
0f0991b145 | ||
|
|
658d62be16 | ||
|
|
0139bfdb71 | ||
|
|
efeac70e40 | ||
|
|
b4db077e6a | ||
|
|
280e4e3b57 | ||
|
|
a962fa2357 | ||
|
|
775851b00c | ||
|
|
f8fadf53cd | ||
|
|
ce13699dfa | ||
|
|
fc5937e948 | ||
|
|
729e2c565b | ||
|
|
26ed50f04a | ||
|
|
2b80d053f3 | ||
|
|
f5ee8b71ff | ||
|
|
4c76feb574 | ||
|
|
18d84db41c | ||
|
|
08a571b4a1 | ||
|
|
5ebd305d17 | ||
|
|
edc73bc3c8 | ||
|
|
b7fa2d7b87 | ||
|
|
7771ac6074 | ||
|
|
f25b6fce1c | ||
|
|
d7b79395c7 | ||
|
|
ce18b0f22d | ||
|
|
2d8f60000c | ||
|
|
0b102287d1 | ||
|
|
d325fd6af7 | ||
|
|
e949b8a1e8 | ||
|
|
ab6e284180 | ||
|
|
7421b6e180 | ||
|
|
a2f03c559a | ||
|
|
3ced391fab | ||
|
|
ea7dcd46d7 | ||
|
|
94e33bdbbf | ||
|
|
29f833e9a4 | ||
|
|
8c32be5feb | ||
|
|
0ba2e655f4 | ||
|
|
78189186e5 | ||
|
|
96e875cd50 | ||
|
|
808524bed6 | ||
|
|
75e00420ec | ||
|
|
8be5604da4 | ||
|
|
02624554ae | ||
|
|
9d1e9a6a78 | ||
|
|
861d5b763d | ||
|
|
4013c48acb | ||
|
|
f264426646 | ||
|
|
862fd2c5c4 |
@@ -7,34 +7,32 @@ skip_list:
|
||||
|
||||
# These rules are intentionally skipped:
|
||||
#
|
||||
# [E204]: "Lines should be no longer than 160 chars"
|
||||
# This could be re-enabled with a major rewrite in the future.
|
||||
# For now, there's not enough value gain from strictly limiting line length.
|
||||
# (Disabled in May 2019)
|
||||
- '204'
|
||||
|
||||
# [E701]: "meta/main.yml should contain relevant info"
|
||||
# Roles in Kubespray are not intended to be used/imported by Ansible Galaxy.
|
||||
# While it can be useful to have these metadata available, they are also available in the existing documentation.
|
||||
# (Disabled in May 2019)
|
||||
- '701'
|
||||
|
||||
# [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
|
||||
# Meta roles in Kubespray don't need proper names
|
||||
# (Disabled in June 2021)
|
||||
- 'role-name'
|
||||
|
||||
- 'experimental'
|
||||
# [var-naming] "defaults/main.yml" File defines variable 'apiVersion' that violates variable naming standards
|
||||
# In Kubespray we use variables that use camelCase to match their k8s counterparts
|
||||
# (Disabled in June 2021)
|
||||
- 'var-naming'
|
||||
- 'var-spacing'
|
||||
|
||||
# [fqcn-builtins]
|
||||
# Roles in kubespray don't need fully qualified collection names
|
||||
# (Disabled in Feb 2023)
|
||||
- 'fqcn-builtins'
|
||||
|
||||
# We use template in names
|
||||
- 'name[template]'
|
||||
|
||||
# No changed-when on commands
|
||||
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
||||
- 'no-changed-when'
|
||||
|
||||
# Disable run-once check with free strategy
|
||||
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
||||
- 'run-once[task]'
|
||||
exclude_paths:
|
||||
# Generated files
|
||||
- tests/files/custom_cni/cilium.yaml
|
||||
- venv
|
||||
|
||||
8
.ansible-lint-ignore
Normal file
8
.ansible-lint-ignore
Normal file
@@ -0,0 +1,8 @@
|
||||
# This file contains ignores rule violations for ansible-lint
|
||||
inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml jinja[spacing]
|
||||
roles/kubernetes/control-plane/defaults/main/kube-proxy.yml jinja[spacing]
|
||||
roles/kubernetes/control-plane/defaults/main/main.yml jinja[spacing]
|
||||
roles/kubernetes/kubeadm/defaults/main.yml jinja[spacing]
|
||||
roles/kubernetes/node/defaults/main.yml jinja[spacing]
|
||||
roles/kubernetes/preinstall/defaults/main.yml jinja[spacing]
|
||||
roles/kubespray-defaults/defaults/main.yaml jinja[spacing]
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -11,7 +11,7 @@ contrib/offline/offline-files.tar.gz
|
||||
.cache
|
||||
*.bak
|
||||
*.tfstate
|
||||
*.tfstate.backup
|
||||
*.tfstate*backup
|
||||
*.lock.hcl
|
||||
.terraform/
|
||||
contrib/terraform/aws/credentials.tfvars
|
||||
|
||||
@@ -9,7 +9,7 @@ stages:
|
||||
- deploy-special
|
||||
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.21.0
|
||||
KUBESPRAY_VERSION: v2.22.1
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
@@ -33,16 +33,12 @@ variables:
|
||||
MITOGEN_ENABLE: "false"
|
||||
ANSIBLE_LOG_LEVEL: "-vv"
|
||||
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
|
||||
TERRAFORM_VERSION: 1.3.7
|
||||
ANSIBLE_MAJOR_VERSION: "2.11"
|
||||
PIPELINE_IMAGE: "$CI_REGISTRY_IMAGE/pipeline:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}"
|
||||
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements-${ANSIBLE_MAJOR_VERSION}.txt
|
||||
- mkdir -p /.ssh
|
||||
|
||||
.job: &job
|
||||
@@ -57,6 +53,7 @@ before_script:
|
||||
.testcases: &testcases
|
||||
<<: *job
|
||||
retry: 1
|
||||
interruptible: true
|
||||
before_script:
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- ./tests/scripts/rebase.sh
|
||||
|
||||
@@ -14,7 +14,7 @@ vagrant-validate:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
VAGRANT_VERSION: 2.3.4
|
||||
VAGRANT_VERSION: 2.3.7
|
||||
script:
|
||||
- ./tests/scripts/vagrant-validate.sh
|
||||
except: ['triggers', 'master']
|
||||
@@ -67,10 +67,6 @@ tox-inventory-builder:
|
||||
extends: .job
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
script:
|
||||
- pip3 install tox
|
||||
- cd contrib/inventory_builder && tox
|
||||
|
||||
@@ -9,10 +9,6 @@
|
||||
stage: deploy-part1
|
||||
before_script:
|
||||
- tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh
|
||||
@@ -58,6 +54,7 @@ molecule_cri-o:
|
||||
stage: deploy-part2
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
||||
allow_failure: true
|
||||
when: on_success
|
||||
|
||||
# Stage 3 container engines don't get as much attention so allow them to fail
|
||||
|
||||
@@ -23,50 +23,45 @@
|
||||
allow_failure: true
|
||||
extends: .packet
|
||||
|
||||
# The ubuntu20-calico-aio jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
||||
packet_ubuntu20-calico-aio:
|
||||
packet_cleanup_old:
|
||||
stage: deploy-part1
|
||||
extends: .packet_periodic
|
||||
script:
|
||||
- cd tests
|
||||
- make cleanup-packet
|
||||
after_script: []
|
||||
|
||||
# The ubuntu20-calico-all-in-one jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
||||
packet_ubuntu20-calico-all-in-one:
|
||||
stage: deploy-part1
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_ubuntu20-calico-aio-ansible-2_11:
|
||||
stage: deploy-part1
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
ANSIBLE_MAJOR_VERSION: "2.11"
|
||||
RESET_CHECK: "true"
|
||||
|
||||
# ### PR JOBS PART2
|
||||
|
||||
packet_ubuntu18-aio-docker:
|
||||
packet_ubuntu20-all-in-one-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu20-aio-docker:
|
||||
packet_ubuntu20-calico-all-in-one-hardening:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu20-calico-aio-hardening:
|
||||
packet_ubuntu22-all-in-one-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu18-calico-aio:
|
||||
packet_ubuntu22-calico-all-in-one:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu22-aio-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu22-calico-aio:
|
||||
packet_ubuntu22-calico-etcd-datastore:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
@@ -80,18 +75,19 @@ packet_almalinux8-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
allow_failure: true
|
||||
|
||||
packet_ubuntu18-crio:
|
||||
packet_ubuntu20-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
|
||||
packet_fedora35-crio:
|
||||
packet_fedora37-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-flannel-ha:
|
||||
packet_ubuntu20-flannel-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
@@ -121,6 +117,21 @@ packet_debian11-docker:
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian12-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian12-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian12-cilium:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_centos7-calico-ha-once-localhost:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
@@ -133,7 +144,7 @@ packet_centos7-calico-ha-once-localhost:
|
||||
|
||||
packet_almalinux8-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_almalinux8-calico:
|
||||
@@ -163,10 +174,11 @@ packet_almalinux8-docker:
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_fedora36-docker-weave:
|
||||
packet_fedora38-docker-weave:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
allow_failure: true
|
||||
|
||||
packet_opensuse-docker-cilium:
|
||||
stage: deploy-part2
|
||||
@@ -175,22 +187,17 @@ packet_opensuse-docker-cilium:
|
||||
|
||||
# ### MANUAL JOBS
|
||||
|
||||
packet_ubuntu16-docker-weave-sep:
|
||||
packet_ubuntu20-docker-weave-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-cilium-sep:
|
||||
packet_ubuntu20-cilium-sep:
|
||||
stage: deploy-special
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-flannel-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-flannel-ha-once:
|
||||
packet_ubuntu20-flannel-ha-once:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
@@ -216,24 +223,24 @@ packet_centos7-multus-calico:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora36-docker-calico:
|
||||
packet_fedora38-docker-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_fedora35-calico-selinux:
|
||||
packet_fedora37-calico-selinux:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_fedora35-calico-swap-selinux:
|
||||
packet_fedora37-calico-swap-selinux:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_amazon-linux-2-aio:
|
||||
packet_amazon-linux-2-all-in-one:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
@@ -243,7 +250,7 @@ packet_almalinux8-calico-nodelocaldns-secondary:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora36-kube-ovn:
|
||||
packet_fedora38-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
@@ -308,18 +315,18 @@ packet_debian11-calico-upgrade-once:
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
|
||||
packet_ubuntu18-calico-ha-recover:
|
||||
packet_ubuntu20-calico-ha-recover:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
|
||||
|
||||
packet_ubuntu18-calico-ha-recover-noquorum:
|
||||
packet_ubuntu20-calico-ha-recover-noquorum:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube_control_plane[1:]"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:]:kube_control_plane[1:]"
|
||||
|
||||
@@ -100,21 +100,13 @@ tf-validate-upcloud:
|
||||
PROVIDER: upcloud
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
# tf-packet-ubuntu16-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
# TF_VAR_number_of_k8s_nodes: "1"
|
||||
# TF_VAR_plan_k8s_masters: t1.small.x86
|
||||
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||
# TF_VAR_metro: ny
|
||||
# TF_VAR_public_key_path: ""
|
||||
# TF_VAR_operating_system: ubuntu_16_04
|
||||
#
|
||||
# tf-packet-ubuntu18-default:
|
||||
tf-validate-nifcloud:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: nifcloud
|
||||
|
||||
# tf-packet-ubuntu20-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
@@ -126,7 +118,7 @@ tf-validate-upcloud:
|
||||
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||
# TF_VAR_metro: am
|
||||
# TF_VAR_public_key_path: ""
|
||||
# TF_VAR_operating_system: ubuntu_18_04
|
||||
# TF_VAR_operating_system: ubuntu_20_04
|
||||
|
||||
.ovh_variables: &ovh_variables
|
||||
OS_AUTH_URL: https://auth.cloud.ovh.net/v3
|
||||
@@ -164,7 +156,7 @@ tf-elastx_cleanup:
|
||||
script:
|
||||
- ./scripts/openstack-cleanup/main.py
|
||||
|
||||
tf-elastx_ubuntu18-calico:
|
||||
tf-elastx_ubuntu20-calico:
|
||||
extends: .terraform_apply
|
||||
stage: deploy-part3
|
||||
when: on_success
|
||||
@@ -194,7 +186,7 @@ tf-elastx_ubuntu18-calico:
|
||||
TF_VAR_az_list_node: '["sto1"]'
|
||||
TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||
TF_VAR_image: ubuntu-18.04-server-latest
|
||||
TF_VAR_image: ubuntu-20.04-server-latest
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
||||
# OVH voucher expired, commenting job until things are sorted out
|
||||
@@ -211,7 +203,7 @@ tf-elastx_ubuntu18-calico:
|
||||
# script:
|
||||
# - ./scripts/openstack-cleanup/main.py
|
||||
|
||||
# tf-ovh_ubuntu18-calico:
|
||||
# tf-ovh_ubuntu20-calico:
|
||||
# extends: .terraform_apply
|
||||
# when: on_success
|
||||
# environment: ovh
|
||||
@@ -237,5 +229,5 @@ tf-elastx_ubuntu18-calico:
|
||||
# TF_VAR_network_name: "Ext-Net"
|
||||
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_image: "Ubuntu 18.04"
|
||||
# TF_VAR_image: "Ubuntu 20.04"
|
||||
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
||||
@@ -13,10 +13,6 @@
|
||||
image: $PIPELINE_IMAGE
|
||||
services: []
|
||||
before_script:
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/testcases_run.sh
|
||||
@@ -24,17 +20,12 @@
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
allow_failure: true
|
||||
|
||||
vagrant_ubuntu18-calico-dual-stack:
|
||||
vagrant_ubuntu20-calico-dual-stack:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_ubuntu18-flannel:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_ubuntu18-weave-medium:
|
||||
vagrant_ubuntu20-weave-medium:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
@@ -50,18 +41,18 @@ vagrant_ubuntu20-flannel-collection:
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_ubuntu16-kube-router-sep:
|
||||
vagrant_ubuntu20-kube-router-sep:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
# Service proxy test fails connectivity testing
|
||||
vagrant_ubuntu16-kube-router-svc-proxy:
|
||||
vagrant_ubuntu20-kube-router-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
vagrant_fedora35-kube-router:
|
||||
vagrant_fedora37-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
1
CHANGELOG.md
Normal file
1
CHANGELOG.md
Normal file
@@ -0,0 +1 @@
|
||||
# See our release notes on [GitHub](https://github.com/kubernetes-sigs/kubespray/releases)
|
||||
@@ -12,6 +12,7 @@ To install development dependencies you can set up a python virtual env with the
|
||||
virtualenv venv
|
||||
source venv/bin/activate
|
||||
pip install -r tests/requirements.txt
|
||||
ansible-galaxy install -r tests/requirements.yml
|
||||
```
|
||||
|
||||
#### Linting
|
||||
|
||||
18
Dockerfile
18
Dockerfile
@@ -2,18 +2,21 @@
|
||||
FROM ubuntu:jammy-20230308
|
||||
# Some tools like yamllint need this
|
||||
# Pip needs this as well at the moment to install ansible
|
||||
# (and potentially other packages)
|
||||
# (and potentially other packages)
|
||||
# See: https://github.com/pypa/pip/issues/10219
|
||||
ENV LANG=C.UTF-8 \
|
||||
DEBIAN_FRONTEND=noninteractive \
|
||||
PYTHONDONTWRITEBYTECODE=1
|
||||
WORKDIR /kubespray
|
||||
COPY *yml .
|
||||
COPY *.yml ./
|
||||
COPY *.cfg ./
|
||||
COPY roles ./roles
|
||||
COPY contrib ./contrib
|
||||
COPY inventory ./inventory
|
||||
COPY library ./library
|
||||
COPY extra_playbooks ./extra_playbooks
|
||||
COPY playbooks ./playbooks
|
||||
COPY plugins ./plugins
|
||||
|
||||
RUN apt update -q \
|
||||
&& apt install -yq --no-install-recommends \
|
||||
@@ -25,17 +28,18 @@ RUN apt update -q \
|
||||
rsync \
|
||||
openssh-client \
|
||||
&& pip install --no-compile --no-cache-dir \
|
||||
ansible==5.7.1 \
|
||||
ansible-core==2.12.5 \
|
||||
cryptography==3.4.8 \
|
||||
ansible==7.6.0 \
|
||||
ansible-core==2.14.6 \
|
||||
cryptography==41.0.1 \
|
||||
jinja2==3.1.2 \
|
||||
netaddr==0.8.0 \
|
||||
jmespath==1.0.1 \
|
||||
MarkupSafe==2.1.2 \
|
||||
MarkupSafe==2.1.3 \
|
||||
ruamel.yaml==0.17.21 \
|
||||
passlib==1.7.4 \
|
||||
&& KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
||||
&& curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
|
||||
&& echo $(curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \
|
||||
&& chmod a+x /usr/local/bin/kubectl \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/log/* \
|
||||
&& find / -type d -name '*__pycache__' -prune -exec rm -rf {} \;
|
||||
&& find /usr -type d -name '*__pycache__' -prune -exec rm -rf {} \;
|
||||
|
||||
@@ -23,6 +23,7 @@ aliases:
|
||||
- cyclinder
|
||||
- mzaian
|
||||
- mrfreezeex
|
||||
- erikjiang
|
||||
kubespray-emeritus_approvers:
|
||||
- riverzhang
|
||||
- atoms
|
||||
|
||||
52
README.md
52
README.md
@@ -34,7 +34,7 @@ CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inv
|
||||
cat inventory/mycluster/group_vars/all/all.yml
|
||||
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
||||
|
||||
# Clean up old Kubernete cluster with Ansible Playbook - run the playbook as root
|
||||
# Clean up old Kubernetes cluster with Ansible Playbook - run the playbook as root
|
||||
# The option `--become` is required, as for example cleaning up SSL keys in /etc/,
|
||||
# uninstalling old packages and interacting with various systemd daemons.
|
||||
# Without --become the playbook will fail to run!
|
||||
@@ -75,11 +75,11 @@ You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mou
|
||||
to access the inventory and SSH key in the container, like this:
|
||||
|
||||
```ShellSession
|
||||
git checkout v2.21.0
|
||||
docker pull quay.io/kubespray/kubespray:v2.21.0
|
||||
git checkout v2.23.3
|
||||
docker pull quay.io/kubespray/kubespray:v2.23.3
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.21.0 bash
|
||||
quay.io/kubespray/kubespray:v2.23.3 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
@@ -142,10 +142,10 @@ vagrant up
|
||||
## Supported Linux Distributions
|
||||
|
||||
- **Flatcar Container Linux by Kinvolk**
|
||||
- **Debian** Bullseye, Buster
|
||||
- **Ubuntu** 16.04, 18.04, 20.04, 22.04
|
||||
- **Debian** Bookworm, Bullseye, Buster
|
||||
- **Ubuntu** 20.04, 22.04
|
||||
- **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8)
|
||||
- **Fedora** 35, 36
|
||||
- **Fedora** 37, 38
|
||||
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
||||
- **openSUSE** Leap 15.x/Tumbleweed
|
||||
- **Oracle Linux** 7, [8, 9](docs/centos.md#centos-8)
|
||||
@@ -161,28 +161,28 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
## Supported Components
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.26.5
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.6
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.27.10
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.10
|
||||
- [docker](https://www.docker.com/) v20.10 (see note)
|
||||
- [containerd](https://containerd.io/) v1.7.1
|
||||
- [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- [containerd](https://containerd.io/) v1.7.13
|
||||
- [cri-o](http://cri-o.io/) v1.27 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.2.0
|
||||
- [calico](https://github.com/projectcalico/calico) v3.25.1
|
||||
- [cilium](https://github.com/cilium/cilium) v1.13.0
|
||||
- [flannel](https://github.com/flannel-io/flannel) v0.21.4
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.10.7
|
||||
- [calico](https://github.com/projectcalico/calico) v3.25.2
|
||||
- [cilium](https://github.com/cilium/cilium) v1.13.4
|
||||
- [flannel](https://github.com/flannel-io/flannel) v0.22.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.11.5
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
|
||||
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) v3.8
|
||||
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.5.12
|
||||
- Application
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.11.1
|
||||
- [coredns](https://github.com/coredns/coredns) v1.9.3
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.7.1
|
||||
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.3
|
||||
- [argocd](https://argoproj.github.io/) v2.7.2
|
||||
- [helm](https://helm.sh/) v3.12.0
|
||||
- [coredns](https://github.com/coredns/coredns) v1.10.1
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.8.1
|
||||
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.4
|
||||
- [argocd](https://argoproj.github.io/) v2.8.0
|
||||
- [helm](https://helm.sh/) v3.12.3
|
||||
- [metallb](https://metallb.universe.tf/) v0.13.9
|
||||
- [registry](https://github.com/distribution/distribution) v2.8.1
|
||||
- Storage Plugin
|
||||
@@ -191,19 +191,19 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
|
||||
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
|
||||
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0
|
||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.4.0
|
||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.23
|
||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.9.2
|
||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.24
|
||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
|
||||
|
||||
## Container Runtime Notes
|
||||
|
||||
- Supported Docker versions are 18.09, 19.03 and 20.10. The *recommended* Docker version is 20.10. `Kubelet` might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. the YUM ``versionlock`` plugin or ``apt pin``).
|
||||
- Supported Docker versions are 18.09, 19.03, 20.10, 23.0 and 24.0. The *recommended* Docker version is 20.10 (except on Debian bookworm which without supporting for 20.10 and below any more). `Kubelet` might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. the YUM ``versionlock`` plugin or ``apt pin``).
|
||||
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Minimum required version of Kubernetes is v1.24**
|
||||
- **Ansible v2.11+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- **Minimum required version of Kubernetes is v1.25**
|
||||
- **Ansible v2.14+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
||||
- The target servers are configured to allow **IPv4 forwarding**.
|
||||
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
|
||||
@@ -227,7 +227,7 @@ You can choose among ten network plugins. (default: `calico`, except Vagrant use
|
||||
|
||||
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||
|
||||
- [Calico](https://docs.projectcalico.org/latest/introduction/) is a networking and network policy provider. Calico supports a flexible set of networking options
|
||||
- [Calico](https://docs.tigera.io/calico/latest/about/) is a networking and network policy provider. Calico supports a flexible set of networking options
|
||||
designed to give you the most efficient networking across a range of situations, including non-overlay
|
||||
and overlay networks, with or without BGP. Calico uses the same engine to enforce network policy for hosts,
|
||||
pods, and (if using Istio and Envoy) applications at the service mesh layer.
|
||||
|
||||
28
Vagrantfile
vendored
28
Vagrantfile
vendored
@@ -10,7 +10,6 @@ Vagrant.require_version ">= 2.0.0"
|
||||
CONFIG = File.join(File.dirname(__FILE__), ENV['KUBESPRAY_VAGRANT_CONFIG'] || 'vagrant/config.rb')
|
||||
|
||||
FLATCAR_URL_TEMPLATE = "https://%s.release.flatcar-linux.net/amd64-usr/current/flatcar_production_vagrant.json"
|
||||
FEDORA35_MIRROR = "https://download.fedoraproject.org/pub/fedora/linux/releases/35/Cloud/x86_64/images/Fedora-Cloud-Base-Vagrant-35-1.2.x86_64.vagrant-libvirt.box"
|
||||
|
||||
# Uniq disk UUID for libvirt
|
||||
DISK_UUID = Time.now.utc.to_i
|
||||
@@ -20,9 +19,8 @@ SUPPORTED_OS = {
|
||||
"flatcar-beta" => {box: "flatcar-beta", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["beta"]},
|
||||
"flatcar-alpha" => {box: "flatcar-alpha", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["alpha"]},
|
||||
"flatcar-edge" => {box: "flatcar-edge", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["edge"]},
|
||||
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
|
||||
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
|
||||
"ubuntu2004" => {box: "generic/ubuntu2004", user: "vagrant"},
|
||||
"ubuntu2204" => {box: "generic/ubuntu2204", user: "vagrant"},
|
||||
"centos" => {box: "centos/7", user: "vagrant"},
|
||||
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
||||
"centos8" => {box: "centos/8", user: "vagrant"},
|
||||
@@ -30,8 +28,8 @@ SUPPORTED_OS = {
|
||||
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
||||
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
||||
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"},
|
||||
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant", box_url: FEDORA35_MIRROR},
|
||||
"fedora36" => {box: "fedora/36-cloud-base", user: "vagrant"},
|
||||
"fedora37" => {box: "fedora/37-cloud-base", user: "vagrant"},
|
||||
"fedora38" => {box: "fedora/38-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
@@ -54,7 +52,7 @@ $shared_folders ||= {}
|
||||
$forwarded_ports ||= {}
|
||||
$subnet ||= "172.18.8"
|
||||
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
|
||||
$os ||= "ubuntu1804"
|
||||
$os ||= "ubuntu2004"
|
||||
$network_plugin ||= "flannel"
|
||||
# Setting multi_networking to true will install Multus: https://github.com/k8snetworkplumbingwg/multus-cni
|
||||
$multi_networking ||= "False"
|
||||
@@ -209,7 +207,8 @@ Vagrant.configure("2") do |config|
|
||||
end
|
||||
|
||||
ip = "#{$subnet}.#{i+100}"
|
||||
node.vm.network :private_network, ip: ip,
|
||||
node.vm.network :private_network,
|
||||
:ip => ip,
|
||||
:libvirt__guest_ipv6 => 'yes',
|
||||
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
|
||||
:libvirt__ipv6_prefix => "64",
|
||||
@@ -219,14 +218,22 @@ Vagrant.configure("2") do |config|
|
||||
# Disable swap for each vm
|
||||
node.vm.provision "shell", inline: "swapoff -a"
|
||||
|
||||
# ubuntu1804 and ubuntu2004 have IPv6 explicitly disabled. This undoes that.
|
||||
if ["ubuntu1804", "ubuntu2004"].include? $os
|
||||
# ubuntu2004 and ubuntu2204 have IPv6 explicitly disabled. This undoes that.
|
||||
if ["ubuntu2004", "ubuntu2204"].include? $os
|
||||
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
|
||||
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
|
||||
end
|
||||
# Hack for fedora37/38 to get the IP address of the second interface
|
||||
if ["fedora37", "fedora38"].include? $os
|
||||
config.vm.provision "shell", inline: <<-SHELL
|
||||
nmcli conn modify 'Wired connection 2' ipv4.addresses $(cat /etc/sysconfig/network-scripts/ifcfg-eth1 | grep IPADDR | cut -d "=" -f2)
|
||||
nmcli conn modify 'Wired connection 2' ipv4.method manual
|
||||
service NetworkManager restart
|
||||
SHELL
|
||||
end
|
||||
|
||||
# Disable firewalld on oraclelinux/redhat vms
|
||||
if ["oraclelinux","oraclelinux8","rhel7","rhel8"].include? $os
|
||||
if ["oraclelinux","oraclelinux8","rhel7","rhel8","rockylinux8"].include? $os
|
||||
node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
|
||||
end
|
||||
|
||||
@@ -256,6 +263,7 @@ Vagrant.configure("2") do |config|
|
||||
if i == $num_instances
|
||||
node.vm.provision "ansible" do |ansible|
|
||||
ansible.playbook = $playbook
|
||||
ansible.compatibility_mode = "2.0"
|
||||
ansible.verbose = $ansible_verbosity
|
||||
$ansible_inventory_path = File.join( $inventory, "hosts.ini")
|
||||
if File.exist?($ansible_inventory_path)
|
||||
|
||||
@@ -39,7 +39,7 @@ class SearchEC2Tags(object):
|
||||
hosts[group] = []
|
||||
tag_key = "kubespray-role"
|
||||
tag_value = ["*"+group+"*"]
|
||||
region = os.environ['REGION']
|
||||
region = os.environ['AWS_REGION']
|
||||
|
||||
ec2 = boto3.resource('ec2', region)
|
||||
filters = [{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}]
|
||||
@@ -67,6 +67,11 @@ class SearchEC2Tags(object):
|
||||
if node_labels_tag:
|
||||
ansible_host['node_labels'] = dict([ label.strip().split('=') for label in node_labels_tag[0]['Value'].split(',') ])
|
||||
|
||||
##Set when instance actually has node_taints
|
||||
node_taints_tag = list(filter(lambda t: t['Key'] == 'kubespray-node-taints', instance.tags))
|
||||
if node_taints_tag:
|
||||
ansible_host['node_taints'] = list([ taint.strip() for taint in node_taints_tag[0]['Value'].split(',') ])
|
||||
|
||||
hosts[group].append(dns_name)
|
||||
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
- name: Generate Azure inventory
|
||||
hosts: localhost
|
||||
gather_facts: False
|
||||
roles:
|
||||
- generate-inventory
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
- name: Generate Azure inventory
|
||||
hosts: localhost
|
||||
gather_facts: False
|
||||
roles:
|
||||
- generate-inventory_2
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
- name: Generate Azure templates
|
||||
hosts: localhost
|
||||
gather_facts: False
|
||||
roles:
|
||||
- generate-templates
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
|
||||
- name: Query Azure VMs # noqa 301
|
||||
- name: Query Azure VMs
|
||||
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
||||
register: vm_list_cmd
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
---
|
||||
|
||||
- name: Query Azure VMs IPs # noqa 301
|
||||
- name: Query Azure VMs IPs
|
||||
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
|
||||
register: vm_ip_list_cmd
|
||||
|
||||
- name: Query Azure VMs Roles # noqa 301
|
||||
- name: Query Azure VMs Roles
|
||||
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
||||
register: vm_list_cmd
|
||||
|
||||
- name: Query Azure Load Balancer Public IP # noqa 301
|
||||
- name: Query Azure Load Balancer Public IP
|
||||
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
|
||||
register: lb_pubip_cmd
|
||||
|
||||
|
||||
@@ -24,14 +24,14 @@ bastionIPAddressName: bastion-pubip
|
||||
|
||||
disablePasswordAuthentication: true
|
||||
|
||||
sshKeyPath: "/home/{{admin_username}}/.ssh/authorized_keys"
|
||||
sshKeyPath: "/home/{{ admin_username }}/.ssh/authorized_keys"
|
||||
|
||||
imageReference:
|
||||
publisher: "OpenLogic"
|
||||
offer: "CentOS"
|
||||
sku: "7.5"
|
||||
version: "latest"
|
||||
imageReferenceJson: "{{imageReference|to_json}}"
|
||||
imageReferenceJson: "{{ imageReference | to_json }}"
|
||||
|
||||
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
|
||||
storageAccountName: "sa{{ nameSuffix | replace('-', '') }}"
|
||||
storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
- name: Create nodes as docker containers
|
||||
hosts: localhost
|
||||
gather_facts: False
|
||||
roles:
|
||||
- { role: dind-host }
|
||||
|
||||
- hosts: containers
|
||||
- name: Customize each node containers
|
||||
hosts: containers
|
||||
roles:
|
||||
- { role: dind-cluster }
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
---
|
||||
- name: set_fact distro_setup
|
||||
- name: Set_fact distro_setup
|
||||
set_fact:
|
||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||
|
||||
- name: set_fact other distro settings
|
||||
- name: Set_fact other distro settings
|
||||
set_fact:
|
||||
distro_user: "{{ distro_setup['user'] }}"
|
||||
distro_ssh_service: "{{ distro_setup['ssh_service'] }}"
|
||||
@@ -43,7 +43,7 @@
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items: "{{ distro_extra_packages + [ 'rsyslog', 'openssh-server' ] }}"
|
||||
with_items: "{{ distro_extra_packages + ['rsyslog', 'openssh-server'] }}"
|
||||
|
||||
- name: Start needed services
|
||||
service:
|
||||
@@ -66,8 +66,8 @@
|
||||
dest: "/etc/sudoers.d/{{ distro_user }}"
|
||||
mode: 0640
|
||||
|
||||
- name: Add my pubkey to "{{ distro_user }}" user authorized keys
|
||||
authorized_key:
|
||||
- name: "Add my pubkey to {{ distro_user }} user authorized keys"
|
||||
ansible.posix.authorized_key:
|
||||
user: "{{ distro_user }}"
|
||||
state: present
|
||||
key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}"
|
||||
key: "{{ lookup('file', lookup('env', 'HOME') + '/.ssh/id_rsa.pub') }}"
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
---
|
||||
- name: set_fact distro_setup
|
||||
- name: Set_fact distro_setup
|
||||
set_fact:
|
||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||
|
||||
- name: set_fact other distro settings
|
||||
- name: Set_fact other distro settings
|
||||
set_fact:
|
||||
distro_image: "{{ distro_setup['image'] }}"
|
||||
distro_init: "{{ distro_setup['init'] }}"
|
||||
@@ -13,7 +13,7 @@
|
||||
distro_agetty_svc: "{{ distro_setup['agetty_svc'] }}"
|
||||
|
||||
- name: Create dind node containers from "containers" inventory section
|
||||
docker_container:
|
||||
community.docker.docker_container:
|
||||
image: "{{ distro_image }}"
|
||||
name: "{{ item }}"
|
||||
state: started
|
||||
@@ -53,7 +53,7 @@
|
||||
{{ distro_raw_setup_done }} && echo SKIPPED && exit 0
|
||||
until [ "$(readlink /proc/1/exe)" = "{{ distro_pid1_exe }}" ] ; do sleep 1; done
|
||||
{{ distro_raw_setup }}
|
||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||
with_items: "{{ containers.results }}"
|
||||
register: result
|
||||
changed_when: result.stdout.find("SKIPPED") < 0
|
||||
@@ -63,26 +63,25 @@
|
||||
until test -S /var/run/dbus/system_bus_socket; do sleep 1; done
|
||||
systemctl disable {{ distro_agetty_svc }}
|
||||
systemctl stop {{ distro_agetty_svc }}
|
||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||
with_items: "{{ containers.results }}"
|
||||
changed_when: false
|
||||
|
||||
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
|
||||
# handle manually
|
||||
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave) # noqa 301
|
||||
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
|
||||
raw: |
|
||||
echo {{ item | hash('sha1') }} > /etc/machine-id.new
|
||||
mv -b /etc/machine-id.new /etc/machine-id
|
||||
cmp /etc/machine-id /etc/machine-id~ || true
|
||||
systemctl daemon-reload
|
||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||
with_items: "{{ containers.results }}"
|
||||
|
||||
- name: Early hack image install to adapt for DIND
|
||||
# noqa 302 - this task uses the raw module intentionally
|
||||
raw: |
|
||||
rm -fv /usr/bin/udevadm /usr/sbin/udevadm
|
||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||
with_items: "{{ containers.results }}"
|
||||
register: result
|
||||
changed_when: result.stdout.find("removed") >= 0
|
||||
|
||||
@@ -1,21 +1,27 @@
|
||||
[tox]
|
||||
minversion = 1.6
|
||||
skipsdist = True
|
||||
envlist = pep8, py33
|
||||
envlist = pep8
|
||||
|
||||
[testenv]
|
||||
whitelist_externals = py.test
|
||||
allowlist_externals = py.test
|
||||
usedevelop = True
|
||||
deps =
|
||||
-r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
setenv = VIRTUAL_ENV={envdir}
|
||||
passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
|
||||
passenv =
|
||||
http_proxy
|
||||
HTTP_PROXY
|
||||
https_proxy
|
||||
HTTPS_PROXY
|
||||
no_proxy
|
||||
NO_PROXY
|
||||
commands = pytest -vv #{posargs:./tests}
|
||||
|
||||
[testenv:pep8]
|
||||
usedevelop = False
|
||||
whitelist_externals = bash
|
||||
allowlist_externals = bash
|
||||
commands =
|
||||
bash -c "find {toxinidir}/* -type f -name '*.py' -print0 | xargs -0 flake8"
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
- name: Prepare Hypervisor to later install kubespray VMs
|
||||
hosts: localhost
|
||||
gather_facts: False
|
||||
become: yes
|
||||
vars:
|
||||
- bootstrap_os: none
|
||||
bootstrap_os: none
|
||||
roles:
|
||||
- kvm-setup
|
||||
- { role: kvm-setup }
|
||||
|
||||
@@ -22,9 +22,9 @@
|
||||
- ntp
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
# Create deployment user if required
|
||||
- include: user.yml
|
||||
- name: Create deployment user if required
|
||||
include_tasks: user.yml
|
||||
when: k8s_deployment_user is defined
|
||||
|
||||
# Set proper sysctl values
|
||||
- include: sysctl.yml
|
||||
- name: Set proper sysctl values
|
||||
import_tasks: sysctl.yml
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
- name: Load br_netfilter module
|
||||
modprobe:
|
||||
community.general.modprobe:
|
||||
name: br_netfilter
|
||||
state: present
|
||||
register: br_netfilter
|
||||
@@ -25,7 +25,7 @@
|
||||
|
||||
|
||||
- name: Enable net.ipv4.ip_forward in sysctl
|
||||
sysctl:
|
||||
ansible.posix.sysctl:
|
||||
name: net.ipv4.ip_forward
|
||||
value: 1
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
@@ -33,7 +33,7 @@
|
||||
reload: yes
|
||||
|
||||
- name: Set bridge-nf-call-{arptables,iptables} to 0
|
||||
sysctl:
|
||||
ansible.posix.sysctl:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
value: 0
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
- name: Check ansible version
|
||||
import_playbook: ansible_version.yml
|
||||
import_playbook: kubernetes_sigs.kubespray.ansible_version
|
||||
|
||||
- hosts: localhost
|
||||
- name: Install mitogen
|
||||
hosts: localhost
|
||||
strategy: linear
|
||||
vars:
|
||||
mitogen_version: 0.3.2
|
||||
@@ -19,24 +20,25 @@
|
||||
- "{{ playbook_dir }}/plugins/mitogen"
|
||||
- "{{ playbook_dir }}/dist"
|
||||
|
||||
- name: download mitogen release
|
||||
- name: Download mitogen release
|
||||
get_url:
|
||||
url: "{{ mitogen_url }}"
|
||||
dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
||||
validate_certs: true
|
||||
mode: 0644
|
||||
|
||||
- name: extract archive
|
||||
- name: Extract archive
|
||||
unarchive:
|
||||
src: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
||||
dest: "{{ playbook_dir }}/dist/"
|
||||
|
||||
- name: copy plugin
|
||||
synchronize:
|
||||
- name: Copy plugin
|
||||
ansible.posix.synchronize:
|
||||
src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/"
|
||||
dest: "{{ playbook_dir }}/plugins/mitogen"
|
||||
|
||||
- name: add strategy to ansible.cfg
|
||||
ini_file:
|
||||
- name: Add strategy to ansible.cfg
|
||||
community.general.ini_file:
|
||||
path: ansible.cfg
|
||||
mode: 0644
|
||||
section: "{{ item.section | d('defaults') }}"
|
||||
|
||||
@@ -1,24 +1,29 @@
|
||||
---
|
||||
- hosts: gfs-cluster
|
||||
- name: Bootstrap hosts
|
||||
hosts: gfs-cluster
|
||||
gather_facts: false
|
||||
vars:
|
||||
ansible_ssh_pipelining: false
|
||||
roles:
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
|
||||
- hosts: all
|
||||
- name: Gather facts
|
||||
hosts: all
|
||||
gather_facts: true
|
||||
|
||||
- hosts: gfs-cluster
|
||||
- name: Install glusterfs server
|
||||
hosts: gfs-cluster
|
||||
vars:
|
||||
ansible_ssh_pipelining: true
|
||||
roles:
|
||||
- { role: glusterfs/server }
|
||||
|
||||
- hosts: k8s_cluster
|
||||
- name: Install glusterfs servers
|
||||
hosts: k8s_cluster
|
||||
roles:
|
||||
- { role: glusterfs/client }
|
||||
|
||||
- hosts: kube_control_plane[0]
|
||||
- name: Configure Kubernetes to use glusterfs
|
||||
hosts: kube_control_plane[0]
|
||||
roles:
|
||||
- { role: kubernetes-pv }
|
||||
|
||||
@@ -6,12 +6,12 @@ galaxy_info:
|
||||
description: GlusterFS installation for Linux.
|
||||
company: "Midwestern Mac, LLC"
|
||||
license: "license (BSD, MIT)"
|
||||
min_ansible_version: 2.0
|
||||
min_ansible_version: "2.0"
|
||||
platforms:
|
||||
- name: EL
|
||||
versions:
|
||||
- 6
|
||||
- 7
|
||||
- "6"
|
||||
- "7"
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- precise
|
||||
|
||||
@@ -3,14 +3,19 @@
|
||||
# hyperkube and needs to be installed as part of the system.
|
||||
|
||||
# Setup/install tasks.
|
||||
- include: setup-RedHat.yml
|
||||
- name: Setup RedHat distros for glusterfs
|
||||
include_tasks: setup-RedHat.yml
|
||||
when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined
|
||||
|
||||
- include: setup-Debian.yml
|
||||
- name: Setup Debian distros for glusterfs
|
||||
include_tasks: setup-Debian.yml
|
||||
when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined
|
||||
|
||||
- name: Ensure Gluster mount directories exist.
|
||||
file: "path={{ item }} state=directory mode=0775"
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: 0775
|
||||
with_items:
|
||||
- "{{ gluster_mount_dir }}"
|
||||
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
register: glusterfs_ppa_added
|
||||
when: glusterfs_ppa_use
|
||||
|
||||
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa 503
|
||||
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa no-handler
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
---
|
||||
- name: Install Prerequisites
|
||||
package: name={{ item }} state=present
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||
|
||||
- name: Install Packages
|
||||
package: name={{ item }} state=present
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- glusterfs-client
|
||||
|
||||
@@ -6,12 +6,12 @@ galaxy_info:
|
||||
description: GlusterFS installation for Linux.
|
||||
company: "Midwestern Mac, LLC"
|
||||
license: "license (BSD, MIT)"
|
||||
min_ansible_version: 2.0
|
||||
min_ansible_version: "2.0"
|
||||
platforms:
|
||||
- name: EL
|
||||
versions:
|
||||
- 6
|
||||
- 7
|
||||
- "6"
|
||||
- "7"
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- precise
|
||||
|
||||
@@ -4,78 +4,97 @@
|
||||
include_vars: "{{ ansible_os_family }}.yml"
|
||||
|
||||
# Install xfs package
|
||||
- name: install xfs Debian
|
||||
apt: name=xfsprogs state=present
|
||||
- name: Install xfs Debian
|
||||
apt:
|
||||
name: xfsprogs
|
||||
state: present
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: install xfs RedHat
|
||||
package: name=xfsprogs state=present
|
||||
- name: Install xfs RedHat
|
||||
package:
|
||||
name: xfsprogs
|
||||
state: present
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
# Format external volumes in xfs
|
||||
- name: Format volumes in xfs
|
||||
filesystem: "fstype=xfs dev={{ disk_volume_device_1 }}"
|
||||
community.general.filesystem:
|
||||
fstype: xfs
|
||||
dev: "{{ disk_volume_device_1 }}"
|
||||
|
||||
# Mount external volumes
|
||||
- name: mounting new xfs filesystem
|
||||
mount: "name={{ gluster_volume_node_mount_dir }} src={{ disk_volume_device_1 }} fstype=xfs state=mounted"
|
||||
- name: Mounting new xfs filesystem
|
||||
ansible.posix.mount:
|
||||
name: "{{ gluster_volume_node_mount_dir }}"
|
||||
src: "{{ disk_volume_device_1 }}"
|
||||
fstype: xfs
|
||||
state: mounted
|
||||
|
||||
# Setup/install tasks.
|
||||
- include: setup-RedHat.yml
|
||||
- name: Setup RedHat distros for glusterfs
|
||||
include_tasks: setup-RedHat.yml
|
||||
when: ansible_os_family == 'RedHat'
|
||||
|
||||
- include: setup-Debian.yml
|
||||
- name: Setup Debian distros for glusterfs
|
||||
include_tasks: setup-Debian.yml
|
||||
when: ansible_os_family == 'Debian'
|
||||
|
||||
- name: Ensure GlusterFS is started and enabled at boot.
|
||||
service: "name={{ glusterfs_daemon }} state=started enabled=yes"
|
||||
service:
|
||||
name: "{{ glusterfs_daemon }}"
|
||||
state: started
|
||||
enabled: yes
|
||||
|
||||
- name: Ensure Gluster brick and mount directories exist.
|
||||
file: "path={{ item }} state=directory mode=0775"
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: 0775
|
||||
with_items:
|
||||
- "{{ gluster_brick_dir }}"
|
||||
- "{{ gluster_mount_dir }}"
|
||||
|
||||
- name: Configure Gluster volume with replicas
|
||||
gluster_volume:
|
||||
gluster.gluster.gluster_volume:
|
||||
state: present
|
||||
name: "{{ gluster_brick_name }}"
|
||||
brick: "{{ gluster_brick_dir }}"
|
||||
replicas: "{{ groups['gfs-cluster'] | length }}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
host: "{{ inventory_hostname }}"
|
||||
force: yes
|
||||
run_once: true
|
||||
when: groups['gfs-cluster']|length > 1
|
||||
when: groups['gfs-cluster'] | length > 1
|
||||
|
||||
- name: Configure Gluster volume without replicas
|
||||
gluster_volume:
|
||||
gluster.gluster.gluster_volume:
|
||||
state: present
|
||||
name: "{{ gluster_brick_name }}"
|
||||
brick: "{{ gluster_brick_dir }}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
host: "{{ inventory_hostname }}"
|
||||
force: yes
|
||||
run_once: true
|
||||
when: groups['gfs-cluster']|length <= 1
|
||||
when: groups['gfs-cluster'] | length <= 1
|
||||
|
||||
- name: Mount glusterfs to retrieve disk size
|
||||
mount:
|
||||
ansible.posix.mount:
|
||||
name: "{{ gluster_mount_dir }}"
|
||||
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
fstype: glusterfs
|
||||
opts: "defaults,_netdev"
|
||||
state: mounted
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Get Gluster disk size
|
||||
setup: filter=ansible_mounts
|
||||
setup:
|
||||
filter: ansible_mounts
|
||||
register: mounts_data
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Set Gluster disk size to variable
|
||||
set_fact:
|
||||
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024*1024*1024)) | int }}"
|
||||
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024 * 1024 * 1024)) | int }}"
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Create file on GlusterFS
|
||||
@@ -86,9 +105,9 @@
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Unmount glusterfs
|
||||
mount:
|
||||
ansible.posix.mount:
|
||||
name: "{{ gluster_mount_dir }}"
|
||||
fstype: glusterfs
|
||||
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
state: unmounted
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
register: glusterfs_ppa_added
|
||||
when: glusterfs_ppa_use
|
||||
|
||||
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa 503
|
||||
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa no-handler
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
---
|
||||
- name: Install Prerequisites
|
||||
package: name={{ item }} state=present
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||
|
||||
- name: Install Packages
|
||||
package: name={{ item }} state=present
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- glusterfs-server
|
||||
- glusterfs-client
|
||||
|
||||
@@ -18,6 +18,6 @@
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
resource: "{{ item.item.type }}"
|
||||
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
||||
state: "{{ item.changed | ternary('latest','present') }}"
|
||||
state: "{{ item.changed | ternary('latest', 'present') }}"
|
||||
with_items: "{{ gluster_pv.results }}"
|
||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
---
|
||||
- hosts: kube_control_plane[0]
|
||||
- name: Tear down heketi
|
||||
hosts: kube_control_plane[0]
|
||||
roles:
|
||||
- { role: tear-down }
|
||||
|
||||
- hosts: heketi-node
|
||||
- name: Teardown disks in heketi
|
||||
hosts: heketi-node
|
||||
become: yes
|
||||
roles:
|
||||
- { role: tear-down-disks }
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
---
|
||||
- hosts: heketi-node
|
||||
- name: Prepare heketi install
|
||||
hosts: heketi-node
|
||||
roles:
|
||||
- { role: prepare }
|
||||
|
||||
- hosts: kube_control_plane[0]
|
||||
- name: Provision heketi
|
||||
hosts: kube_control_plane[0]
|
||||
tags:
|
||||
- "provision"
|
||||
roles:
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
- "dm_snapshot"
|
||||
- "dm_mirror"
|
||||
- "dm_thin_pool"
|
||||
modprobe:
|
||||
community.general.modprobe:
|
||||
name: "{{ item }}"
|
||||
state: "present"
|
||||
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
---
|
||||
- name: "stop port forwarding"
|
||||
- name: "Stop port forwarding"
|
||||
command: "killall "
|
||||
|
||||
@@ -7,9 +7,9 @@
|
||||
|
||||
- name: "Bootstrap heketi."
|
||||
when:
|
||||
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Service']\"))|length == 0"
|
||||
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Deployment']\"))|length == 0"
|
||||
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod']\"))|length == 0"
|
||||
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Service']\")) | length == 0"
|
||||
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Deployment']\")) | length == 0"
|
||||
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Pod']\")) | length == 0"
|
||||
include_tasks: "bootstrap/deploy.yml"
|
||||
|
||||
# Prepare heketi topology
|
||||
@@ -20,11 +20,11 @@
|
||||
|
||||
- name: "Ensure heketi bootstrap pod is up."
|
||||
assert:
|
||||
that: "(initial_heketi_pod.stdout|from_json|json_query('items[*]'))|length == 1"
|
||||
that: "(initial_heketi_pod.stdout | from_json | json_query('items[*]')) | length == 1"
|
||||
|
||||
- name: Store the initial heketi pod name
|
||||
set_fact:
|
||||
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout|from_json|json_query(\"items[*].metadata.name|[0]\") }}"
|
||||
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout | from_json | json_query(\"items[*].metadata.name | [0]\") }}"
|
||||
|
||||
- name: "Test heketi topology."
|
||||
changed_when: false
|
||||
@@ -32,7 +32,7 @@
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||
|
||||
- name: "Load heketi topology."
|
||||
when: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*]\")|flatten|length == 0"
|
||||
when: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*]\") | flatten | length == 0"
|
||||
include_tasks: "bootstrap/topology.yml"
|
||||
|
||||
# Provision heketi database volume
|
||||
@@ -58,7 +58,7 @@
|
||||
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
||||
when:
|
||||
- "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0"
|
||||
- "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0"
|
||||
- "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0"
|
||||
- "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(secret_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(endpoints_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(service_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 0"
|
||||
|
||||
@@ -17,11 +17,11 @@
|
||||
register: "initial_heketi_state"
|
||||
vars:
|
||||
initial_heketi_state: { stdout: "{}" }
|
||||
pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]"
|
||||
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
||||
pods_query: "items[?kind=='Pod'].status.conditions | [0][?type=='Ready'].status | [0]"
|
||||
deployments_query: "items[?kind=='Deployment'].status.conditions | [0][?type=='Available'].status | [0]"
|
||||
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
||||
until:
|
||||
- "initial_heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
||||
- "initial_heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
||||
- "initial_heketi_state.stdout | from_json | json_query(pods_query) == 'True'"
|
||||
- "initial_heketi_state.stdout | from_json | json_query(deployments_query) == 'True'"
|
||||
retries: 60
|
||||
delay: 5
|
||||
|
||||
@@ -15,10 +15,10 @@
|
||||
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
||||
when:
|
||||
- "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0"
|
||||
- "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0"
|
||||
- "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0"
|
||||
- "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(secret_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(endpoints_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(service_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 0"
|
||||
register: "heketi_storage_result"
|
||||
- name: "Get state of heketi database copy job."
|
||||
command: "{{ bin_dir }}/kubectl get jobs --output=json"
|
||||
@@ -28,6 +28,6 @@
|
||||
heketi_storage_state: { stdout: "{}" }
|
||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job' && status.succeeded==1]"
|
||||
until:
|
||||
- "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 1"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 1"
|
||||
retries: 60
|
||||
delay: 5
|
||||
|
||||
@@ -5,10 +5,10 @@
|
||||
changed_when: false
|
||||
- name: "Delete bootstrap Heketi."
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
|
||||
when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0"
|
||||
- name: "Ensure there is nothing left over." # noqa 301
|
||||
when: "heketi_resources.stdout | from_json | json_query('items[*]') | length > 0"
|
||||
- name: "Ensure there is nothing left over."
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
- name: "Copy topology configuration into container."
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
||||
- name: "Load heketi topology." # noqa 503
|
||||
- name: "Load heketi topology." # noqa no-handler
|
||||
when: "render.changed"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||
register: "load_heketi"
|
||||
@@ -22,6 +22,6 @@
|
||||
changed_when: false
|
||||
register: "heketi_topology"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||
until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length"
|
||||
until: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\") | flatten | length == groups['heketi-node'] | length"
|
||||
retries: 60
|
||||
delay: 5
|
||||
|
||||
@@ -6,19 +6,19 @@
|
||||
- name: "Get heketi volumes."
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
||||
with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}"
|
||||
with_items: "{{ heketi_volumes.stdout | from_json | json_query(\"volumes[*]\") }}"
|
||||
loop_control: { loop_var: "volume_id" }
|
||||
register: "volumes_information"
|
||||
- name: "Test heketi database volume."
|
||||
set_fact: { heketi_database_volume_exists: true }
|
||||
with_items: "{{ volumes_information.results }}"
|
||||
loop_control: { loop_var: "volume_information" }
|
||||
vars: { volume: "{{ volume_information.stdout|from_json }}" }
|
||||
vars: { volume: "{{ volume_information.stdout | from_json }}" }
|
||||
when: "volume.name == 'heketidbstorage'"
|
||||
- name: "Provision database volume."
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
|
||||
when: "heketi_database_volume_exists is undefined"
|
||||
- name: "Copy configuration from pod." # noqa 301
|
||||
- name: "Copy configuration from pod."
|
||||
become: true
|
||||
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
||||
- name: "Get heketi volume ids."
|
||||
@@ -28,14 +28,14 @@
|
||||
- name: "Get heketi volumes."
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
||||
with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}"
|
||||
with_items: "{{ heketi_volumes.stdout | from_json | json_query(\"volumes[*]\") }}"
|
||||
loop_control: { loop_var: "volume_id" }
|
||||
register: "volumes_information"
|
||||
- name: "Test heketi database volume."
|
||||
set_fact: { heketi_database_volume_created: true }
|
||||
with_items: "{{ volumes_information.results }}"
|
||||
loop_control: { loop_var: "volume_information" }
|
||||
vars: { volume: "{{ volume_information.stdout|from_json }}" }
|
||||
vars: { volume: "{{ volume_information.stdout | from_json }}" }
|
||||
when: "volume.name == 'heketidbstorage'"
|
||||
- name: "Ensure heketi database volume exists."
|
||||
assert: { that: "heketi_database_volume_created is defined", msg: "Heketi database volume does not exist." }
|
||||
|
||||
@@ -23,8 +23,8 @@
|
||||
changed_when: false
|
||||
vars:
|
||||
daemonset_state: { stdout: "{}" }
|
||||
ready: "{{ daemonset_state.stdout|from_json|json_query(\"status.numberReady\") }}"
|
||||
desired: "{{ daemonset_state.stdout|from_json|json_query(\"status.desiredNumberScheduled\") }}"
|
||||
ready: "{{ daemonset_state.stdout | from_json | json_query(\"status.numberReady\") }}"
|
||||
desired: "{{ daemonset_state.stdout | from_json | json_query(\"status.desiredNumberScheduled\") }}"
|
||||
until: "ready | int >= 3"
|
||||
retries: 60
|
||||
delay: 5
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
changed_when: false
|
||||
|
||||
- name: "Assign storage label"
|
||||
when: "label_present.stdout_lines|length == 0"
|
||||
when: "label_present.stdout_lines | length == 0"
|
||||
command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
|
||||
|
||||
- name: Get storage nodes again
|
||||
@@ -15,5 +15,5 @@
|
||||
|
||||
- name: Ensure the label has been set
|
||||
assert:
|
||||
that: "label_present|length > 0"
|
||||
that: "label_present | length > 0"
|
||||
msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs."
|
||||
|
||||
@@ -24,11 +24,11 @@
|
||||
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
||||
command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
|
||||
until:
|
||||
- "heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
||||
- "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
||||
- "heketi_state.stdout | from_json | json_query(pods_query) == 'True'"
|
||||
- "heketi_state.stdout | from_json | json_query(deployments_query) == 'True'"
|
||||
retries: 60
|
||||
delay: 5
|
||||
|
||||
- name: Set the Heketi pod name
|
||||
set_fact:
|
||||
heketi_pod_name: "{{ heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
|
||||
heketi_pod_name: "{{ heketi_state.stdout | from_json | json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
- name: "Render storage class configuration."
|
||||
become: true
|
||||
vars:
|
||||
endpoint_address: "{{ (heketi_service.stdout|from_json).spec.clusterIP }}"
|
||||
endpoint_address: "{{ (heketi_service.stdout | from_json).spec.clusterIP }}"
|
||||
template:
|
||||
src: "storageclass.yml.j2"
|
||||
dest: "{{ kube_config_dir }}/storageclass.yml"
|
||||
|
||||
@@ -11,16 +11,16 @@
|
||||
src: "topology.json.j2"
|
||||
dest: "{{ kube_config_dir }}/topology.json"
|
||||
mode: 0644
|
||||
- name: "Copy topology configuration into container." # noqa 503
|
||||
- name: "Copy topology configuration into container." # noqa no-handler
|
||||
when: "rendering.changed"
|
||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
||||
- name: "Load heketi topology." # noqa 503
|
||||
- name: "Load heketi topology." # noqa no-handler
|
||||
when: "rendering.changed"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||
- name: "Get heketi topology."
|
||||
register: "heketi_topology"
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||
until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length"
|
||||
until: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\") | flatten | length == groups['heketi-node'] | length"
|
||||
retries: 60
|
||||
delay: 5
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
changed_when: false
|
||||
|
||||
- name: "Remove volume groups." # noqa 301
|
||||
- name: "Remove volume groups."
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||
become: true
|
||||
@@ -30,7 +30,7 @@
|
||||
with_items: "{{ volume_groups.stdout_lines }}"
|
||||
loop_control: { loop_var: "volume_group" }
|
||||
|
||||
- name: "Remove physical volume from cluster disks." # noqa 301
|
||||
- name: "Remove physical volume from cluster disks."
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||
become: true
|
||||
|
||||
@@ -1,43 +1,43 @@
|
||||
---
|
||||
- name: Remove storage class. # noqa 301
|
||||
- name: Remove storage class.
|
||||
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down heketi. # noqa 301
|
||||
- name: Tear down heketi.
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down heketi. # noqa 301
|
||||
- name: Tear down heketi.
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down bootstrap.
|
||||
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
|
||||
- name: Ensure there is nothing left over. # noqa 301
|
||||
- name: Ensure there is nothing left over.
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
- name: Ensure there is nothing left over. # noqa 301
|
||||
- name: Ensure there is nothing left over.
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
- name: Tear down glusterfs. # noqa 301
|
||||
- name: Tear down glusterfs.
|
||||
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi storage service. # noqa 301
|
||||
- name: Remove heketi storage service.
|
||||
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi gluster role binding # noqa 301
|
||||
- name: Remove heketi gluster role binding
|
||||
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi config secret # noqa 301
|
||||
- name: Remove heketi config secret
|
||||
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi db backup # noqa 301
|
||||
- name: Remove heketi db backup
|
||||
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi service account # noqa 301
|
||||
- name: Remove heketi service account
|
||||
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Get secrets
|
||||
@@ -46,6 +46,6 @@
|
||||
changed_when: false
|
||||
- name: Remove heketi storage secret
|
||||
vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
|
||||
command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}"
|
||||
command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout | from_json | json_query(storage_query) }}"
|
||||
when: "storage_query is defined"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
|
||||
@@ -27,7 +27,7 @@ manage-offline-container-images.sh register
|
||||
|
||||
## generate_list.sh
|
||||
|
||||
This script generates the list of downloaded files and the list of container images by `roles/download/defaults/main.yml` file.
|
||||
This script generates the list of downloaded files and the list of container images by `roles/download/defaults/main/main.yml` file.
|
||||
|
||||
Run this script will execute `generate_list.yml` playbook in kubespray root directory and generate four files,
|
||||
all downloaded files url in files.list, all container images in images.list, jinja2 templates in *.template.
|
||||
|
||||
@@ -5,7 +5,7 @@ CURRENT_DIR=$(cd $(dirname $0); pwd)
|
||||
TEMP_DIR="${CURRENT_DIR}/temp"
|
||||
REPO_ROOT_DIR="${CURRENT_DIR%/contrib/offline}"
|
||||
|
||||
: ${DOWNLOAD_YML:="roles/download/defaults/main.yml"}
|
||||
: ${DOWNLOAD_YML:="roles/download/defaults/main/main.yml"}
|
||||
|
||||
mkdir -p ${TEMP_DIR}
|
||||
|
||||
@@ -19,7 +19,7 @@ sed -n '/^downloads:/,/download_defaults:/p' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/\"//g' > ${TEMP_DIR}/images.list.template
|
||||
|
||||
# add kube-* images to images list template
|
||||
# Those container images are downloaded by kubeadm, then roles/download/defaults/main.yml
|
||||
# Those container images are downloaded by kubeadm, then roles/download/defaults/main/main.yml
|
||||
# doesn't contain those images. That is reason why here needs to put those images into the
|
||||
# list separately.
|
||||
KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy"
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
- name: Collect container images for offline deployment
|
||||
hosts: localhost
|
||||
become: no
|
||||
|
||||
roles:
|
||||
@@ -11,9 +12,11 @@
|
||||
|
||||
tasks:
|
||||
# Generate files.list and images.list files from templates.
|
||||
- template:
|
||||
- name: Collect container images for offline deployment
|
||||
template:
|
||||
src: ./contrib/offline/temp/{{ item }}.list.template
|
||||
dest: ./contrib/offline/temp/{{ item }}.list
|
||||
mode: 0644
|
||||
with_items:
|
||||
- files
|
||||
- images
|
||||
|
||||
@@ -39,6 +39,6 @@ if [ $? -ne 0 ]; then
|
||||
sudo "${runtime}" run \
|
||||
--restart=always -d -p ${NGINX_PORT}:80 \
|
||||
--volume "${OFFLINE_FILES_DIR}:/usr/share/nginx/html/download" \
|
||||
--volume "$(pwd)"/nginx.conf:/etc/nginx/nginx.conf \
|
||||
--volume "${CURRENT_DIR}"/nginx.conf:/etc/nginx/nginx.conf \
|
||||
--name nginx nginx:alpine
|
||||
fi
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
---
|
||||
- hosts: all
|
||||
- name: Disable firewalld/ufw
|
||||
hosts: all
|
||||
roles:
|
||||
- { role: prepare }
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
---
|
||||
- block:
|
||||
- name: Disable firewalld and ufw
|
||||
when:
|
||||
- disable_service_firewall is defined and disable_service_firewall
|
||||
block:
|
||||
- name: List services
|
||||
service_facts:
|
||||
|
||||
@@ -9,7 +12,7 @@
|
||||
state: stopped
|
||||
enabled: no
|
||||
when:
|
||||
"'firewalld.service' in services"
|
||||
"'firewalld.service' in services and services['firewalld.service'].status != 'not-found'"
|
||||
|
||||
- name: Disable service ufw
|
||||
systemd:
|
||||
@@ -17,7 +20,4 @@
|
||||
state: stopped
|
||||
enabled: no
|
||||
when:
|
||||
"'ufw.service' in services"
|
||||
|
||||
when:
|
||||
- disable_service_firewall is defined and disable_service_firewall
|
||||
"'ufw.service' in services and services['ufw.service'].status != 'not-found'"
|
||||
|
||||
@@ -12,7 +12,7 @@ ssh_public_keys = [
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
"size" : "Medium",
|
||||
"size" : "standard.medium",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
@@ -22,7 +22,7 @@ machines = {
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"size" : "standard.large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
@@ -32,7 +32,7 @@ machines = {
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"size" : "standard.large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
@@ -42,7 +42,7 @@ machines = {
|
||||
},
|
||||
"worker-2" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"size" : "standard.large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
|
||||
@@ -1,29 +1,25 @@
|
||||
data "exoscale_compute_template" "os_image" {
|
||||
data "exoscale_template" "os_image" {
|
||||
for_each = var.machines
|
||||
|
||||
zone = var.zone
|
||||
name = each.value.boot_disk.image_name
|
||||
}
|
||||
|
||||
data "exoscale_compute" "master_nodes" {
|
||||
for_each = exoscale_compute.master
|
||||
data "exoscale_compute_instance" "master_nodes" {
|
||||
for_each = exoscale_compute_instance.master
|
||||
|
||||
id = each.value.id
|
||||
|
||||
# Since private IP address is not assigned until the nics are created we need this
|
||||
depends_on = [exoscale_nic.master_private_network_nic]
|
||||
id = each.value.id
|
||||
zone = var.zone
|
||||
}
|
||||
|
||||
data "exoscale_compute" "worker_nodes" {
|
||||
for_each = exoscale_compute.worker
|
||||
data "exoscale_compute_instance" "worker_nodes" {
|
||||
for_each = exoscale_compute_instance.worker
|
||||
|
||||
id = each.value.id
|
||||
|
||||
# Since private IP address is not assigned until the nics are created we need this
|
||||
depends_on = [exoscale_nic.worker_private_network_nic]
|
||||
id = each.value.id
|
||||
zone = var.zone
|
||||
}
|
||||
|
||||
resource "exoscale_network" "private_network" {
|
||||
resource "exoscale_private_network" "private_network" {
|
||||
zone = var.zone
|
||||
name = "${var.prefix}-network"
|
||||
|
||||
@@ -34,25 +30,29 @@ resource "exoscale_network" "private_network" {
|
||||
netmask = cidrnetmask(var.private_network_cidr)
|
||||
}
|
||||
|
||||
resource "exoscale_compute" "master" {
|
||||
resource "exoscale_compute_instance" "master" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "master"
|
||||
}
|
||||
|
||||
display_name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_compute_template.os_image[each.key].id
|
||||
size = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_groups = [exoscale_security_group.master_sg.name]
|
||||
name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_template.os_image[each.key].id
|
||||
type = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_group_ids = [exoscale_security_group.master_sg.id]
|
||||
network_interface {
|
||||
network_id = exoscale_private_network.private_network.id
|
||||
}
|
||||
elastic_ip_ids = [exoscale_elastic_ip.control_plane_lb.id]
|
||||
|
||||
user_data = templatefile(
|
||||
"${path.module}/templates/cloud-init.tmpl",
|
||||
{
|
||||
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address
|
||||
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
||||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||
root_partition_size = each.value.boot_disk.root_partition_size
|
||||
@@ -62,25 +62,29 @@ resource "exoscale_compute" "master" {
|
||||
)
|
||||
}
|
||||
|
||||
resource "exoscale_compute" "worker" {
|
||||
resource "exoscale_compute_instance" "worker" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "worker"
|
||||
}
|
||||
|
||||
display_name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_compute_template.os_image[each.key].id
|
||||
size = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_groups = [exoscale_security_group.worker_sg.name]
|
||||
name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_template.os_image[each.key].id
|
||||
type = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_group_ids = [exoscale_security_group.worker_sg.id]
|
||||
network_interface {
|
||||
network_id = exoscale_private_network.private_network.id
|
||||
}
|
||||
elastic_ip_ids = [exoscale_elastic_ip.ingress_controller_lb.id]
|
||||
|
||||
user_data = templatefile(
|
||||
"${path.module}/templates/cloud-init.tmpl",
|
||||
{
|
||||
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address
|
||||
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
||||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||
root_partition_size = each.value.boot_disk.root_partition_size
|
||||
@@ -90,41 +94,33 @@ resource "exoscale_compute" "worker" {
|
||||
)
|
||||
}
|
||||
|
||||
resource "exoscale_nic" "master_private_network_nic" {
|
||||
for_each = exoscale_compute.master
|
||||
|
||||
compute_id = each.value.id
|
||||
network_id = exoscale_network.private_network.id
|
||||
}
|
||||
|
||||
resource "exoscale_nic" "worker_private_network_nic" {
|
||||
for_each = exoscale_compute.worker
|
||||
|
||||
compute_id = each.value.id
|
||||
network_id = exoscale_network.private_network.id
|
||||
}
|
||||
|
||||
resource "exoscale_security_group" "master_sg" {
|
||||
name = "${var.prefix}-master-sg"
|
||||
description = "Security group for Kubernetes masters"
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rules" "master_sg_rules" {
|
||||
resource "exoscale_security_group_rule" "master_sg_rule_ssh" {
|
||||
security_group_id = exoscale_security_group.master_sg.id
|
||||
|
||||
for_each = toset(var.ssh_whitelist)
|
||||
# SSH
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.ssh_whitelist
|
||||
ports = ["22"]
|
||||
}
|
||||
type = "INGRESS"
|
||||
start_port = 22
|
||||
end_port = 22
|
||||
protocol = "TCP"
|
||||
cidr = each.value
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rule" "master_sg_rule_k8s_api" {
|
||||
security_group_id = exoscale_security_group.master_sg.id
|
||||
|
||||
for_each = toset(var.api_server_whitelist)
|
||||
# Kubernetes API
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.api_server_whitelist
|
||||
ports = ["6443"]
|
||||
}
|
||||
type = "INGRESS"
|
||||
start_port = 6443
|
||||
end_port = 6443
|
||||
protocol = "TCP"
|
||||
cidr = each.value
|
||||
}
|
||||
|
||||
resource "exoscale_security_group" "worker_sg" {
|
||||
@@ -132,62 +128,64 @@ resource "exoscale_security_group" "worker_sg" {
|
||||
description = "security group for kubernetes worker nodes"
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rules" "worker_sg_rules" {
|
||||
resource "exoscale_security_group_rule" "worker_sg_rule_ssh" {
|
||||
security_group_id = exoscale_security_group.worker_sg.id
|
||||
|
||||
# SSH
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.ssh_whitelist
|
||||
ports = ["22"]
|
||||
}
|
||||
for_each = toset(var.ssh_whitelist)
|
||||
type = "INGRESS"
|
||||
start_port = 22
|
||||
end_port = 22
|
||||
protocol = "TCP"
|
||||
cidr = each.value
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rule" "worker_sg_rule_http" {
|
||||
security_group_id = exoscale_security_group.worker_sg.id
|
||||
|
||||
# HTTP(S)
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = ["0.0.0.0/0"]
|
||||
ports = ["80", "443"]
|
||||
}
|
||||
for_each = toset(["80", "443"])
|
||||
type = "INGRESS"
|
||||
start_port = each.value
|
||||
end_port = each.value
|
||||
protocol = "TCP"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
|
||||
# Kubernetes Nodeport
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.nodeport_whitelist
|
||||
ports = ["30000-32767"]
|
||||
|
||||
resource "exoscale_security_group_rule" "worker_sg_rule_nodeport" {
|
||||
security_group_id = exoscale_security_group.worker_sg.id
|
||||
|
||||
# HTTP(S)
|
||||
for_each = toset(var.nodeport_whitelist)
|
||||
type = "INGRESS"
|
||||
start_port = 30000
|
||||
end_port = 32767
|
||||
protocol = "TCP"
|
||||
cidr = each.value
|
||||
}
|
||||
|
||||
resource "exoscale_elastic_ip" "ingress_controller_lb" {
|
||||
zone = var.zone
|
||||
healthcheck {
|
||||
mode = "http"
|
||||
port = 80
|
||||
uri = "/healthz"
|
||||
interval = 10
|
||||
timeout = 2
|
||||
strikes_ok = 2
|
||||
strikes_fail = 3
|
||||
}
|
||||
}
|
||||
|
||||
resource "exoscale_ipaddress" "ingress_controller_lb" {
|
||||
zone = var.zone
|
||||
healthcheck_mode = "http"
|
||||
healthcheck_port = 80
|
||||
healthcheck_path = "/healthz"
|
||||
healthcheck_interval = 10
|
||||
healthcheck_timeout = 2
|
||||
healthcheck_strikes_ok = 2
|
||||
healthcheck_strikes_fail = 3
|
||||
}
|
||||
|
||||
resource "exoscale_secondary_ipaddress" "ingress_controller_lb" {
|
||||
for_each = exoscale_compute.worker
|
||||
|
||||
compute_id = each.value.id
|
||||
ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
}
|
||||
|
||||
resource "exoscale_ipaddress" "control_plane_lb" {
|
||||
zone = var.zone
|
||||
healthcheck_mode = "tcp"
|
||||
healthcheck_port = 6443
|
||||
healthcheck_interval = 10
|
||||
healthcheck_timeout = 2
|
||||
healthcheck_strikes_ok = 2
|
||||
healthcheck_strikes_fail = 3
|
||||
}
|
||||
|
||||
resource "exoscale_secondary_ipaddress" "control_plane_lb" {
|
||||
for_each = exoscale_compute.master
|
||||
|
||||
compute_id = each.value.id
|
||||
ip_address = exoscale_ipaddress.control_plane_lb.ip_address
|
||||
resource "exoscale_elastic_ip" "control_plane_lb" {
|
||||
zone = var.zone
|
||||
healthcheck {
|
||||
mode = "tcp"
|
||||
port = 6443
|
||||
interval = 10
|
||||
timeout = 2
|
||||
strikes_ok = 2
|
||||
strikes_fail = 3
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
output "master_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in exoscale_compute.master :
|
||||
for key, instance in exoscale_compute_instance.master :
|
||||
instance.name => {
|
||||
"private_ip" = contains(keys(data.exoscale_compute.master_nodes), key) ? data.exoscale_compute.master_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute.master[key].ip_address
|
||||
"private_ip" = contains(keys(data.exoscale_compute_instance.master_nodes), key) ? data.exoscale_compute_instance.master_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute_instance.master[key].ip_address
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "worker_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in exoscale_compute.worker :
|
||||
for key, instance in exoscale_compute_instance.worker :
|
||||
instance.name => {
|
||||
"private_ip" = contains(keys(data.exoscale_compute.worker_nodes), key) ? data.exoscale_compute.worker_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute.worker[key].ip_address
|
||||
"private_ip" = contains(keys(data.exoscale_compute_instance.worker_nodes), key) ? data.exoscale_compute_instance.worker_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute_instance.worker[key].ip_address
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -23,9 +23,9 @@ output "cluster_private_network_cidr" {
|
||||
}
|
||||
|
||||
output "ingress_controller_lb_ip_address" {
|
||||
value = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
value = exoscale_elastic_ip.ingress_controller_lb.ip_address
|
||||
}
|
||||
|
||||
output "control_plane_lb_ip_address" {
|
||||
value = exoscale_ipaddress.control_plane_lb.ip_address
|
||||
value = exoscale_elastic_ip.control_plane_lb.ip_address
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
exoscale = {
|
||||
source = "exoscale/exoscale"
|
||||
source = "exoscale/exoscale"
|
||||
version = ">= 0.21"
|
||||
}
|
||||
}
|
||||
|
||||
5
contrib/terraform/nifcloud/.gitignore
vendored
Normal file
5
contrib/terraform/nifcloud/.gitignore
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
*.tfstate*
|
||||
.terraform.lock.hcl
|
||||
.terraform
|
||||
|
||||
sample-inventory/inventory.ini
|
||||
137
contrib/terraform/nifcloud/README.md
Normal file
137
contrib/terraform/nifcloud/README.md
Normal file
@@ -0,0 +1,137 @@
|
||||
# Kubernetes on NIFCLOUD with Terraform
|
||||
|
||||
Provision a Kubernetes cluster on [NIFCLOUD](https://pfs.nifcloud.com/) using Terraform and Kubespray
|
||||
|
||||
## Overview
|
||||
|
||||
The setup looks like following
|
||||
|
||||
```text
|
||||
Kubernetes cluster
|
||||
+----------------------------+
|
||||
+---------------+ | +--------------------+ |
|
||||
| | | | +--------------------+ |
|
||||
| API server LB +---------> | | | |
|
||||
| | | | | Control Plane/etcd | |
|
||||
+---------------+ | | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------------+ |
|
||||
| ^ |
|
||||
| | |
|
||||
| v |
|
||||
| +--------------------+ |
|
||||
| | +--------------------+ |
|
||||
| | | | |
|
||||
| | | Worker | |
|
||||
| | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------------+ |
|
||||
+----------------------------+
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
* Terraform 1.3.7
|
||||
|
||||
## Quickstart
|
||||
|
||||
### Export Variables
|
||||
|
||||
* Your NIFCLOUD credentials:
|
||||
|
||||
```bash
|
||||
export NIFCLOUD_ACCESS_KEY_ID=<YOUR ACCESS KEY>
|
||||
export NIFCLOUD_SECRET_ACCESS_KEY=<YOUR SECRET ACCESS KEY>
|
||||
```
|
||||
|
||||
* The SSH KEY used to connect to the instance:
|
||||
* FYI: [Cloud Help(SSH Key)](https://pfs.nifcloud.com/help/ssh.htm)
|
||||
|
||||
```bash
|
||||
export TF_VAR_SSHKEY_NAME=<YOUR SSHKEY NAME>
|
||||
```
|
||||
|
||||
* The IP address to connect to bastion server:
|
||||
|
||||
```bash
|
||||
export TF_VAR_working_instance_ip=$(curl ifconfig.me)
|
||||
```
|
||||
|
||||
### Create The Infrastructure
|
||||
|
||||
* Run terraform:
|
||||
|
||||
```bash
|
||||
terraform init
|
||||
terraform apply -var-file ./sample-inventory/cluster.tfvars
|
||||
```
|
||||
|
||||
### Setup The Kubernetes
|
||||
|
||||
* Generate cluster configuration file:
|
||||
|
||||
```bash
|
||||
./generate-inventory.sh > sample-inventory/inventory.ini
|
||||
|
||||
* Export Variables:
|
||||
|
||||
```bash
|
||||
BASTION_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.bastion_info | to_entries[].value.public_ip')
|
||||
API_LB_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_lb')
|
||||
CP01_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[0].value.private_ip')
|
||||
export ANSIBLE_SSH_ARGS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ProxyCommand=\"ssh root@${BASTION_IP} -W %h:%p\""
|
||||
```
|
||||
|
||||
* Set ssh-agent"
|
||||
|
||||
```bash
|
||||
eval `ssh-agent`
|
||||
ssh-add <THE PATH TO YOUR SSH KEY>
|
||||
```
|
||||
|
||||
* Run cluster.yml playbook:
|
||||
|
||||
```bash
|
||||
cd ./../../../
|
||||
ansible-playbook -i contrib/terraform/nifcloud/inventory/inventory.ini cluster.yml
|
||||
```
|
||||
|
||||
### Connecting to Kubernetes
|
||||
|
||||
* [Install kubectl](https://kubernetes.io/docs/tasks/tools/) on the localhost
|
||||
* Fetching kubeconfig file:
|
||||
|
||||
```bash
|
||||
mkdir -p ~/.kube
|
||||
scp -o ProxyCommand="ssh root@${BASTION_IP} -W %h:%p" root@${CP01_IP}:/etc/kubernetes/admin.conf ~/.kube/config
|
||||
```
|
||||
|
||||
* Rewrite /etc/hosts
|
||||
|
||||
```bash
|
||||
sudo echo "${API_LB_IP} lb-apiserver.kubernetes.local" >> /etc/hosts
|
||||
```
|
||||
|
||||
* Run kubectl
|
||||
|
||||
```bash
|
||||
kubectl get node
|
||||
```
|
||||
|
||||
## Variables
|
||||
|
||||
* `region`: Region where to run the cluster
|
||||
* `az`: Availability zone where to run the cluster
|
||||
* `private_ip_bn`: Private ip address of bastion server
|
||||
* `private_network_cidr`: Subnet of private network
|
||||
* `instances_cp`: Machine to provision as Control Plane. Key of this object will be used as part of the machine' name
|
||||
* `private_ip`: private ip address of machine
|
||||
* `instances_wk`: Machine to provision as Worker Node. Key of this object will be used as part of the machine' name
|
||||
* `private_ip`: private ip address of machine
|
||||
* `instance_key_name`: The key name of the Key Pair to use for the instance
|
||||
* `instance_type_bn`: The instance type of bastion server
|
||||
* `instance_type_wk`: The instance type of worker node
|
||||
* `instance_type_cp`: The instance type of control plane
|
||||
* `image_name`: OS image used for the instance
|
||||
* `working_instance_ip`: The IP address to connect to bastion server
|
||||
* `accounting_type`: Accounting type. (1: monthly, 2: pay per use)
|
||||
64
contrib/terraform/nifcloud/generate-inventory.sh
Executable file
64
contrib/terraform/nifcloud/generate-inventory.sh
Executable file
@@ -0,0 +1,64 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Generates a inventory file based on the terraform output.
|
||||
# After provisioning a cluster, simply run this command and supply the terraform state file
|
||||
# Default state file is terraform.tfstate
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
TF_OUT=$(terraform output -json)
|
||||
|
||||
CONTROL_PLANES=$(jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[]' <(echo "${TF_OUT}"))
|
||||
WORKERS=$(jq -r '.kubernetes_cluster.value.worker_info | to_entries[]' <(echo "${TF_OUT}"))
|
||||
mapfile -t CONTROL_PLANE_NAMES < <(jq -r '.key' <(echo "${CONTROL_PLANES}"))
|
||||
mapfile -t WORKER_NAMES < <(jq -r '.key' <(echo "${WORKERS}"))
|
||||
|
||||
API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}"))
|
||||
|
||||
echo "[all]"
|
||||
# Generate control plane hosts
|
||||
i=1
|
||||
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
||||
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${CONTROL_PLANES}"))
|
||||
echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip} etcd_member_name=etcd${i}"
|
||||
i=$(( i + 1 ))
|
||||
done
|
||||
|
||||
# Generate worker hosts
|
||||
for name in "${WORKER_NAMES[@]}"; do
|
||||
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${WORKERS}"))
|
||||
echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip}"
|
||||
done
|
||||
|
||||
API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}"))
|
||||
|
||||
echo ""
|
||||
echo "[all:vars]"
|
||||
echo "upstream_dns_servers=['8.8.8.8','8.8.4.4']"
|
||||
echo "loadbalancer_apiserver={'address':'${API_LB}','port':'6443'}"
|
||||
|
||||
|
||||
echo ""
|
||||
echo "[kube_control_plane]"
|
||||
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
||||
echo "${name}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[etcd]"
|
||||
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
||||
echo "${name}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[kube_node]"
|
||||
for name in "${WORKER_NAMES[@]}"; do
|
||||
echo "${name}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[k8s_cluster:children]"
|
||||
echo "kube_control_plane"
|
||||
echo "kube_node"
|
||||
36
contrib/terraform/nifcloud/main.tf
Normal file
36
contrib/terraform/nifcloud/main.tf
Normal file
@@ -0,0 +1,36 @@
|
||||
provider "nifcloud" {
|
||||
region = var.region
|
||||
}
|
||||
|
||||
module "kubernetes_cluster" {
|
||||
source = "./modules/kubernetes-cluster"
|
||||
|
||||
availability_zone = var.az
|
||||
prefix = "dev"
|
||||
|
||||
private_network_cidr = var.private_network_cidr
|
||||
|
||||
instance_key_name = var.instance_key_name
|
||||
instances_cp = var.instances_cp
|
||||
instances_wk = var.instances_wk
|
||||
image_name = var.image_name
|
||||
|
||||
instance_type_bn = var.instance_type_bn
|
||||
instance_type_cp = var.instance_type_cp
|
||||
instance_type_wk = var.instance_type_wk
|
||||
|
||||
private_ip_bn = var.private_ip_bn
|
||||
|
||||
additional_lb_filter = [var.working_instance_ip]
|
||||
}
|
||||
|
||||
resource "nifcloud_security_group_rule" "ssh_from_bastion" {
|
||||
security_group_names = [
|
||||
module.kubernetes_cluster.security_group_name.bastion
|
||||
]
|
||||
type = "IN"
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "TCP"
|
||||
cidr_ip = var.working_instance_ip
|
||||
}
|
||||
301
contrib/terraform/nifcloud/modules/kubernetes-cluster/main.tf
Normal file
301
contrib/terraform/nifcloud/modules/kubernetes-cluster/main.tf
Normal file
@@ -0,0 +1,301 @@
|
||||
#################################################
|
||||
##
|
||||
## Local variables
|
||||
##
|
||||
locals {
|
||||
# e.g. east-11 is 11
|
||||
az_num = reverse(split("-", var.availability_zone))[0]
|
||||
# e.g. east-11 is e11
|
||||
az_short_name = "${substr(reverse(split("-", var.availability_zone))[1], 0, 1)}${local.az_num}"
|
||||
|
||||
# Port used by the protocol
|
||||
port_ssh = 22
|
||||
port_kubectl = 6443
|
||||
port_kubelet = 10250
|
||||
|
||||
# calico: https://docs.tigera.io/calico/latest/getting-started/kubernetes/requirements#network-requirements
|
||||
port_bgp = 179
|
||||
port_vxlan = 4789
|
||||
port_etcd = 2379
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## General
|
||||
##
|
||||
|
||||
# data
|
||||
data "nifcloud_image" "this" {
|
||||
image_name = var.image_name
|
||||
}
|
||||
|
||||
# private lan
|
||||
resource "nifcloud_private_lan" "this" {
|
||||
private_lan_name = "${var.prefix}lan"
|
||||
availability_zone = var.availability_zone
|
||||
cidr_block = var.private_network_cidr
|
||||
accounting_type = var.accounting_type
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Bastion
|
||||
##
|
||||
resource "nifcloud_security_group" "bn" {
|
||||
group_name = "${var.prefix}bn"
|
||||
description = "${var.prefix} bastion"
|
||||
availability_zone = var.availability_zone
|
||||
}
|
||||
|
||||
resource "nifcloud_instance" "bn" {
|
||||
|
||||
instance_id = "${local.az_short_name}${var.prefix}bn01"
|
||||
security_group = nifcloud_security_group.bn.group_name
|
||||
instance_type = var.instance_type_bn
|
||||
|
||||
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
||||
private_ip_address = var.private_ip_bn
|
||||
ssh_port = local.port_ssh
|
||||
hostname = "${local.az_short_name}${var.prefix}bn01"
|
||||
})
|
||||
|
||||
availability_zone = var.availability_zone
|
||||
accounting_type = var.accounting_type
|
||||
image_id = data.nifcloud_image.this.image_id
|
||||
key_name = var.instance_key_name
|
||||
|
||||
network_interface {
|
||||
network_id = "net-COMMON_GLOBAL"
|
||||
}
|
||||
network_interface {
|
||||
network_id = nifcloud_private_lan.this.network_id
|
||||
ip_address = "static"
|
||||
}
|
||||
|
||||
# The image_id changes when the OS image type is demoted from standard to public.
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
image_id,
|
||||
user_data,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Control Plane
|
||||
##
|
||||
resource "nifcloud_security_group" "cp" {
|
||||
group_name = "${var.prefix}cp"
|
||||
description = "${var.prefix} control plane"
|
||||
availability_zone = var.availability_zone
|
||||
}
|
||||
|
||||
resource "nifcloud_instance" "cp" {
|
||||
for_each = var.instances_cp
|
||||
|
||||
instance_id = "${local.az_short_name}${var.prefix}${each.key}"
|
||||
security_group = nifcloud_security_group.cp.group_name
|
||||
instance_type = var.instance_type_cp
|
||||
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
||||
private_ip_address = each.value.private_ip
|
||||
ssh_port = local.port_ssh
|
||||
hostname = "${local.az_short_name}${var.prefix}${each.key}"
|
||||
})
|
||||
|
||||
availability_zone = var.availability_zone
|
||||
accounting_type = var.accounting_type
|
||||
image_id = data.nifcloud_image.this.image_id
|
||||
key_name = var.instance_key_name
|
||||
|
||||
network_interface {
|
||||
network_id = "net-COMMON_GLOBAL"
|
||||
}
|
||||
network_interface {
|
||||
network_id = nifcloud_private_lan.this.network_id
|
||||
ip_address = "static"
|
||||
}
|
||||
|
||||
# The image_id changes when the OS image type is demoted from standard to public.
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
image_id,
|
||||
user_data,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "nifcloud_load_balancer" "this" {
|
||||
load_balancer_name = "${local.az_short_name}${var.prefix}cp"
|
||||
accounting_type = var.accounting_type
|
||||
balancing_type = 1 // Round-Robin
|
||||
load_balancer_port = local.port_kubectl
|
||||
instance_port = local.port_kubectl
|
||||
instances = [for v in nifcloud_instance.cp : v.instance_id]
|
||||
filter = concat(
|
||||
[for k, v in nifcloud_instance.cp : v.public_ip],
|
||||
[for k, v in nifcloud_instance.wk : v.public_ip],
|
||||
var.additional_lb_filter,
|
||||
)
|
||||
filter_type = 1 // Allow
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Worker
|
||||
##
|
||||
resource "nifcloud_security_group" "wk" {
|
||||
group_name = "${var.prefix}wk"
|
||||
description = "${var.prefix} worker"
|
||||
availability_zone = var.availability_zone
|
||||
}
|
||||
|
||||
resource "nifcloud_instance" "wk" {
|
||||
for_each = var.instances_wk
|
||||
|
||||
instance_id = "${local.az_short_name}${var.prefix}${each.key}"
|
||||
security_group = nifcloud_security_group.wk.group_name
|
||||
instance_type = var.instance_type_wk
|
||||
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
||||
private_ip_address = each.value.private_ip
|
||||
ssh_port = local.port_ssh
|
||||
hostname = "${local.az_short_name}${var.prefix}${each.key}"
|
||||
})
|
||||
|
||||
availability_zone = var.availability_zone
|
||||
accounting_type = var.accounting_type
|
||||
image_id = data.nifcloud_image.this.image_id
|
||||
key_name = var.instance_key_name
|
||||
|
||||
network_interface {
|
||||
network_id = "net-COMMON_GLOBAL"
|
||||
}
|
||||
network_interface {
|
||||
network_id = nifcloud_private_lan.this.network_id
|
||||
ip_address = "static"
|
||||
}
|
||||
|
||||
# The image_id changes when the OS image type is demoted from standard to public.
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
image_id,
|
||||
user_data,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Security Group Rule: Kubernetes
|
||||
##
|
||||
|
||||
# ssh
|
||||
resource "nifcloud_security_group_rule" "ssh_from_bastion" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.wk.group_name,
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_ssh
|
||||
to_port = local.port_ssh
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.bn.group_name
|
||||
}
|
||||
|
||||
# kubectl
|
||||
resource "nifcloud_security_group_rule" "kubectl_from_worker" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_kubectl
|
||||
to_port = local.port_kubectl
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||
}
|
||||
|
||||
# kubelet
|
||||
resource "nifcloud_security_group_rule" "kubelet_from_worker" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_kubelet
|
||||
to_port = local.port_kubelet
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||
}
|
||||
|
||||
resource "nifcloud_security_group_rule" "kubelet_from_control_plane" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.wk.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_kubelet
|
||||
to_port = local.port_kubelet
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.cp.group_name
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Security Group Rule: calico
|
||||
##
|
||||
|
||||
# vslan
|
||||
resource "nifcloud_security_group_rule" "vxlan_from_control_plane" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.wk.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_vxlan
|
||||
to_port = local.port_vxlan
|
||||
protocol = "UDP"
|
||||
source_security_group_name = nifcloud_security_group.cp.group_name
|
||||
}
|
||||
|
||||
resource "nifcloud_security_group_rule" "vxlan_from_worker" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_vxlan
|
||||
to_port = local.port_vxlan
|
||||
protocol = "UDP"
|
||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||
}
|
||||
|
||||
# bgp
|
||||
resource "nifcloud_security_group_rule" "bgp_from_control_plane" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.wk.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_bgp
|
||||
to_port = local.port_bgp
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.cp.group_name
|
||||
}
|
||||
|
||||
resource "nifcloud_security_group_rule" "bgp_from_worker" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_bgp
|
||||
to_port = local.port_bgp
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||
}
|
||||
|
||||
# etcd
|
||||
resource "nifcloud_security_group_rule" "etcd_from_worker" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_etcd
|
||||
to_port = local.port_etcd
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
output "control_plane_lb" {
|
||||
description = "The DNS name of LB for control plane"
|
||||
value = nifcloud_load_balancer.this.dns_name
|
||||
}
|
||||
|
||||
output "security_group_name" {
|
||||
description = "The security group used in the cluster"
|
||||
value = {
|
||||
bastion = nifcloud_security_group.bn.group_name,
|
||||
control_plane = nifcloud_security_group.cp.group_name,
|
||||
worker = nifcloud_security_group.wk.group_name,
|
||||
}
|
||||
}
|
||||
|
||||
output "private_network_id" {
|
||||
description = "The private network used in the cluster"
|
||||
value = nifcloud_private_lan.this.id
|
||||
}
|
||||
|
||||
output "bastion_info" {
|
||||
description = "The basion information in cluster"
|
||||
value = { (nifcloud_instance.bn.instance_id) : {
|
||||
instance_id = nifcloud_instance.bn.instance_id,
|
||||
unique_id = nifcloud_instance.bn.unique_id,
|
||||
private_ip = nifcloud_instance.bn.private_ip,
|
||||
public_ip = nifcloud_instance.bn.public_ip,
|
||||
} }
|
||||
}
|
||||
|
||||
output "worker_info" {
|
||||
description = "The worker information in cluster"
|
||||
value = { for v in nifcloud_instance.wk : v.instance_id => {
|
||||
instance_id = v.instance_id,
|
||||
unique_id = v.unique_id,
|
||||
private_ip = v.private_ip,
|
||||
public_ip = v.public_ip,
|
||||
} }
|
||||
}
|
||||
|
||||
output "control_plane_info" {
|
||||
description = "The control plane information in cluster"
|
||||
value = { for v in nifcloud_instance.cp : v.instance_id => {
|
||||
instance_id = v.instance_id,
|
||||
unique_id = v.unique_id,
|
||||
private_ip = v.private_ip,
|
||||
public_ip = v.public_ip,
|
||||
} }
|
||||
}
|
||||
@@ -0,0 +1,45 @@
|
||||
#!/bin/bash
|
||||
|
||||
#################################################
|
||||
##
|
||||
## IP Address
|
||||
##
|
||||
configure_private_ip_address () {
|
||||
cat << EOS > /etc/netplan/01-netcfg.yaml
|
||||
network:
|
||||
version: 2
|
||||
renderer: networkd
|
||||
ethernets:
|
||||
ens192:
|
||||
dhcp4: yes
|
||||
dhcp6: yes
|
||||
dhcp-identifier: mac
|
||||
ens224:
|
||||
dhcp4: no
|
||||
dhcp6: no
|
||||
addresses: [${private_ip_address}]
|
||||
EOS
|
||||
netplan apply
|
||||
}
|
||||
configure_private_ip_address
|
||||
|
||||
#################################################
|
||||
##
|
||||
## SSH
|
||||
##
|
||||
configure_ssh_port () {
|
||||
sed -i 's/^#*Port [0-9]*/Port ${ssh_port}/' /etc/ssh/sshd_config
|
||||
}
|
||||
configure_ssh_port
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Hostname
|
||||
##
|
||||
hostnamectl set-hostname ${hostname}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Disable swap files genereated by systemd-gpt-auto-generator
|
||||
##
|
||||
systemctl mask "dev-sda3.swap"
|
||||
@@ -0,0 +1,9 @@
|
||||
terraform {
|
||||
required_version = ">=1.3.7"
|
||||
required_providers {
|
||||
nifcloud = {
|
||||
source = "nifcloud/nifcloud"
|
||||
version = ">= 1.8.0, < 2.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,81 @@
|
||||
variable "availability_zone" {
|
||||
description = "The availability zone"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "prefix" {
|
||||
description = "The prefix for the entire cluster"
|
||||
type = string
|
||||
validation {
|
||||
condition = length(var.prefix) <= 5
|
||||
error_message = "Must be a less than 5 character long."
|
||||
}
|
||||
}
|
||||
|
||||
variable "private_network_cidr" {
|
||||
description = "The subnet of private network"
|
||||
type = string
|
||||
validation {
|
||||
condition = can(cidrnetmask(var.private_network_cidr))
|
||||
error_message = "Must be a valid IPv4 CIDR block address."
|
||||
}
|
||||
}
|
||||
|
||||
variable "private_ip_bn" {
|
||||
description = "Private IP of bastion server"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instances_cp" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "instances_wk" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "instance_key_name" {
|
||||
description = "The key name of the Key Pair to use for the instance"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_bn" {
|
||||
description = "The instance type of bastion server"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_wk" {
|
||||
description = "The instance type of worker"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_cp" {
|
||||
description = "The instance type of control plane"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "image_name" {
|
||||
description = "The name of image"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "additional_lb_filter" {
|
||||
description = "Additional LB filter"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "accounting_type" {
|
||||
type = string
|
||||
default = "1"
|
||||
validation {
|
||||
condition = anytrue([
|
||||
var.accounting_type == "1", // Monthly
|
||||
var.accounting_type == "2", // Pay per use
|
||||
])
|
||||
error_message = "Must be a 1 or 2."
|
||||
}
|
||||
}
|
||||
3
contrib/terraform/nifcloud/output.tf
Normal file
3
contrib/terraform/nifcloud/output.tf
Normal file
@@ -0,0 +1,3 @@
|
||||
output "kubernetes_cluster" {
|
||||
value = module.kubernetes_cluster
|
||||
}
|
||||
22
contrib/terraform/nifcloud/sample-inventory/cluster.tfvars
Normal file
22
contrib/terraform/nifcloud/sample-inventory/cluster.tfvars
Normal file
@@ -0,0 +1,22 @@
|
||||
region = "jp-west-1"
|
||||
az = "west-11"
|
||||
|
||||
instance_key_name = "deployerkey"
|
||||
|
||||
instance_type_bn = "e-medium"
|
||||
instance_type_cp = "e-medium"
|
||||
instance_type_wk = "e-medium"
|
||||
|
||||
private_network_cidr = "192.168.30.0/24"
|
||||
instances_cp = {
|
||||
"cp01" : { private_ip : "192.168.30.11/24" }
|
||||
"cp02" : { private_ip : "192.168.30.12/24" }
|
||||
"cp03" : { private_ip : "192.168.30.13/24" }
|
||||
}
|
||||
instances_wk = {
|
||||
"wk01" : { private_ip : "192.168.30.21/24" }
|
||||
"wk02" : { private_ip : "192.168.30.22/24" }
|
||||
}
|
||||
private_ip_bn = "192.168.30.10/24"
|
||||
|
||||
image_name = "Ubuntu Server 22.04 LTS"
|
||||
1
contrib/terraform/nifcloud/sample-inventory/group_vars
Symbolic link
1
contrib/terraform/nifcloud/sample-inventory/group_vars
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../../inventory/sample/group_vars
|
||||
9
contrib/terraform/nifcloud/terraform.tf
Normal file
9
contrib/terraform/nifcloud/terraform.tf
Normal file
@@ -0,0 +1,9 @@
|
||||
terraform {
|
||||
required_version = ">=1.3.7"
|
||||
required_providers {
|
||||
nifcloud = {
|
||||
source = "nifcloud/nifcloud"
|
||||
version = "1.8.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
77
contrib/terraform/nifcloud/variables.tf
Normal file
77
contrib/terraform/nifcloud/variables.tf
Normal file
@@ -0,0 +1,77 @@
|
||||
variable "region" {
|
||||
description = "The region"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "az" {
|
||||
description = "The availability zone"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "private_ip_bn" {
|
||||
description = "Private IP of bastion server"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "private_network_cidr" {
|
||||
description = "The subnet of private network"
|
||||
type = string
|
||||
validation {
|
||||
condition = can(cidrnetmask(var.private_network_cidr))
|
||||
error_message = "Must be a valid IPv4 CIDR block address."
|
||||
}
|
||||
}
|
||||
|
||||
variable "instances_cp" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "instances_wk" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "instance_key_name" {
|
||||
description = "The key name of the Key Pair to use for the instance"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_bn" {
|
||||
description = "The instance type of bastion server"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_wk" {
|
||||
description = "The instance type of worker"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_cp" {
|
||||
description = "The instance type of control plane"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "image_name" {
|
||||
description = "The name of image"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "working_instance_ip" {
|
||||
description = "The IP address to connect to bastion server."
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "accounting_type" {
|
||||
type = string
|
||||
default = "2"
|
||||
validation {
|
||||
condition = anytrue([
|
||||
var.accounting_type == "1", // Monthly
|
||||
var.accounting_type == "2", // Pay per use
|
||||
])
|
||||
error_message = "Must be a 1 or 2."
|
||||
}
|
||||
}
|
||||
@@ -10,21 +10,29 @@ It is recommended to deploy the ansible version used by kubespray into a python
|
||||
```ShellSession
|
||||
VENVDIR=kubespray-venv
|
||||
KUBESPRAYDIR=kubespray
|
||||
ANSIBLE_VERSION=2.12
|
||||
virtualenv --python=$(which python3) $VENVDIR
|
||||
python3 -m venv $VENVDIR
|
||||
source $VENVDIR/bin/activate
|
||||
cd $KUBESPRAYDIR
|
||||
pip install -U -r requirements-$ANSIBLE_VERSION.txt
|
||||
pip install -U -r requirements.txt
|
||||
```
|
||||
|
||||
In case you have a similar message when installing the requirements:
|
||||
|
||||
```ShellSession
|
||||
ERROR: Could not find a version that satisfies the requirement ansible==7.6.0 (from -r requirements.txt (line 1)) (from versions: [...], 6.7.0)
|
||||
ERROR: No matching distribution found for ansible==7.6.0 (from -r requirements.txt (line 1))
|
||||
```
|
||||
|
||||
It means that the version of Python you are running is not compatible with the version of Ansible that Kubespray supports.
|
||||
If the latest version supported according to pip is 6.7.0 it means you are running Python 3.8 or lower while you need at least Python 3.9 (see the table below).
|
||||
|
||||
### Ansible Python Compatibility
|
||||
|
||||
Based on the table below and the available python version for your ansible host you should choose the appropriate ansible version to use with kubespray.
|
||||
|
||||
| Ansible Version | Python Version |
|
||||
|-----------------|----------------|
|
||||
| 2.11 | 2.7,3.5-3.9 |
|
||||
| 2.12 | 3.8-3.10 |
|
||||
| 2.14 | 3.9-3.11 |
|
||||
|
||||
## Inventory
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ Kubespray can be installed as an [Ansible collection](https://docs.ansible.com/a
|
||||
collections:
|
||||
- name: https://github.com/kubernetes-sigs/kubespray
|
||||
type: git
|
||||
version: v2.21.0
|
||||
version: master # use the appropriate tag or branch for the version you need
|
||||
```
|
||||
|
||||
2. Install your collection
|
||||
|
||||
14
docs/aws.md
14
docs/aws.md
@@ -58,11 +58,23 @@ Guide:
|
||||
```ShellSession
|
||||
export AWS_ACCESS_KEY_ID="xxxxx"
|
||||
export AWS_SECRET_ACCESS_KEY="yyyyy"
|
||||
export REGION="us-east-2"
|
||||
export AWS_REGION="us-east-2"
|
||||
```
|
||||
|
||||
- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kubespray-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml`
|
||||
|
||||
**Optional** Using labels and taints
|
||||
|
||||
To add labels to your kubernetes node, add the following tag to your instance:
|
||||
|
||||
- Key: `kubespray-node-labels`
|
||||
- Value: `node-role.kubernetes.io/ingress=`
|
||||
|
||||
To add taints to your kubernetes node, add the following tag to your instance:
|
||||
|
||||
- Key: `kubespray-node-taints`
|
||||
- Value: `node-role.kubernetes.io/ingress=:NoSchedule`
|
||||
|
||||
## Kubespray configuration
|
||||
|
||||
Declare the cloud config variables for the `aws` provider as follows. Setting these variables are optional and depend on your use case.
|
||||
|
||||
@@ -80,10 +80,15 @@ docker_registry_mirrors:
|
||||
containerd_grpc_max_recv_message_size: 16777216
|
||||
containerd_grpc_max_send_message_size: 16777216
|
||||
|
||||
containerd_registries:
|
||||
"docker.io":
|
||||
- "https://mirror.gcr.io"
|
||||
- "https://registry-1.docker.io"
|
||||
containerd_registries_mirrors:
|
||||
- prefix: docker.io
|
||||
mirrors:
|
||||
- host: https://mirror.gcr.io
|
||||
capabilities: ["pull", "resolve"]
|
||||
skip_verify: false
|
||||
- host: https://registry-1.docker.io
|
||||
capabilities: ["pull", "resolve"]
|
||||
skip_verify: false
|
||||
|
||||
containerd_max_container_log_line_size: -1
|
||||
|
||||
|
||||
27
docs/ci.md
27
docs/ci.md
@@ -11,14 +11,13 @@ amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: |
|
||||
debian10 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
|
||||
debian11 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
|
||||
fedora36 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
||||
debian12 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora37 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
|
||||
fedora38 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :x: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: |
|
||||
ubuntu18 | :white_check_mark: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: |
|
||||
ubuntu20 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :white_check_mark: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: |
|
||||
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
||||
## crio
|
||||
@@ -30,14 +29,13 @@ amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian11 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora36 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian12 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora37 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora38 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu22 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
||||
## docker
|
||||
@@ -49,12 +47,11 @@ amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian10 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora35 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora36 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||
debian12 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora37 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora38 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
||||
@@ -99,4 +99,4 @@ For the moment, only Cinder v3 is supported by the CSI Driver.
|
||||
|
||||
## More info
|
||||
|
||||
For further information about the Cinder CSI Driver, you can refer to this page: [Cloud Provider OpenStack](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/using-cinder-csi-plugin.md).
|
||||
For further information about the Cinder CSI Driver, you can refer to this page: [Cloud Provider OpenStack](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md).
|
||||
|
||||
@@ -24,15 +24,20 @@ etcd_deployment_type: host
|
||||
Example: define registry mirror for docker hub
|
||||
|
||||
```yaml
|
||||
containerd_registries:
|
||||
"docker.io":
|
||||
- "https://mirror.gcr.io"
|
||||
- "https://registry-1.docker.io"
|
||||
containerd_registries_mirrors:
|
||||
- prefix: docker.io
|
||||
mirrors:
|
||||
- host: https://mirror.gcr.io
|
||||
capabilities: ["pull", "resolve"]
|
||||
skip_verify: false
|
||||
- host: https://registry-1.docker.io
|
||||
capabilities: ["pull", "resolve"]
|
||||
skip_verify: false
|
||||
```
|
||||
|
||||
`containerd_registries` is ignored for pulling images when `image_command_tool=nerdctl`
|
||||
`containerd_registries_mirrors` is ignored for pulling images when `image_command_tool=nerdctl`
|
||||
(the default for `container_manager=containerd`). Use `crictl` instead, it supports
|
||||
`containerd_registries` but lacks proper multi-arch support (see
|
||||
`containerd_registries_mirrors` but lacks proper multi-arch support (see
|
||||
[#8375](https://github.com/kubernetes-sigs/kubespray/issues/8375)):
|
||||
|
||||
```yaml
|
||||
@@ -103,13 +108,35 @@ containerd_runc_runtime:
|
||||
Config insecure-registry access to self hosted registries.
|
||||
|
||||
```yaml
|
||||
containerd_insecure_registries:
|
||||
"test.registry.io": "http://test.registry.io"
|
||||
"172.19.16.11:5000": "http://172.19.16.11:5000"
|
||||
"repo:5000": "http://repo:5000"
|
||||
containerd_registries_mirrors:
|
||||
- prefix: test.registry.io
|
||||
mirrors:
|
||||
- host: http://test.registry.io
|
||||
capabilities: ["pull", "resolve"]
|
||||
skip_verify: true
|
||||
- prefix: 172.19.16.11:5000
|
||||
mirrors:
|
||||
- host: http://172.19.16.11:5000
|
||||
capabilities: ["pull", "resolve"]
|
||||
skip_verify: true
|
||||
- prefix: repo:5000
|
||||
mirrors:
|
||||
- host: http://repo:5000
|
||||
capabilities: ["pull", "resolve"]
|
||||
skip_verify: true
|
||||
```
|
||||
|
||||
[containerd]: https://containerd.io/
|
||||
[RuntimeClass]: https://kubernetes.io/docs/concepts/containers/runtime-class/
|
||||
[runtime classes in containerd]: https://github.com/containerd/containerd/blob/main/docs/cri/config.md#runtime-classes
|
||||
[runtime-spec]: https://github.com/opencontainers/runtime-spec
|
||||
|
||||
### Optional : NRI
|
||||
|
||||
[Node Resource Interface](https://github.com/containerd/nri) (NRI) is disabled by default for the containerd. If you
|
||||
are using contained version v1.7.0 or above, then you can enable it with the
|
||||
following configuration:
|
||||
|
||||
```yaml
|
||||
nri_enabled: true
|
||||
```
|
||||
|
||||
@@ -62,3 +62,13 @@ The `allowed_annotations` configures `crio.conf` accordingly.
|
||||
|
||||
The `crio_remap_enable` configures the `/etc/subuid` and `/etc/subgid` files to add an entry for the **containers** user.
|
||||
By default, 16M uids and gids are reserved for user namespaces (256 pods * 65536 uids/gids) at the end of the uid/gid space.
|
||||
|
||||
## Optional : NRI
|
||||
|
||||
[Node Resource Interface](https://github.com/containerd/nri) (NRI) is disabled by default for the CRI-O. If you
|
||||
are using CRI-O version v1.26.0 or above, then you can enable it with the
|
||||
following configuration:
|
||||
|
||||
```yaml
|
||||
nri_enabled: true
|
||||
```
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# K8s DNS stack by Kubespray
|
||||
|
||||
For K8s cluster nodes, Kubespray configures a [Kubernetes DNS](https://kubernetes.io/docs/admin/dns/)
|
||||
For K8s cluster nodes, Kubespray configures a [Kubernetes DNS](https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/)
|
||||
[cluster add-on](https://releases.k8s.io/master/cluster/addons/README.md)
|
||||
to serve as an authoritative DNS server for a given ``dns_domain`` and its
|
||||
``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels).
|
||||
@@ -143,6 +143,11 @@ coredns_default_zone_cache_block: |
|
||||
}
|
||||
```
|
||||
|
||||
### systemd_resolved_disable_stub_listener
|
||||
|
||||
Whether or not to set `DNSStubListener=no` when using systemd-resolved. Defaults to `true` on Flatcar.
|
||||
You might need to set it to `true` if CoreDNS fails to start with `address already in use` errors.
|
||||
|
||||
## DNS modes supported by Kubespray
|
||||
|
||||
You can modify how Kubespray sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
|
||||
|
||||
@@ -44,3 +44,9 @@ kubeEtcd:
|
||||
service:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
To fully override metrics exposition urls, define it in the inventory with:
|
||||
|
||||
```yaml
|
||||
etcd_listen_metrics_urls: "http://0.0.0.0:2381"
|
||||
```
|
||||
|
||||
@@ -24,9 +24,10 @@ configured by the variable `loadbalancer_apiserver_localhost` (defaults to
|
||||
`True`. Or `False`, if there is an external `loadbalancer_apiserver` defined).
|
||||
You may also define the port the local internal loadbalancer uses by changing,
|
||||
`loadbalancer_apiserver_port`. This defaults to the value of
|
||||
`kube_apiserver_port`. It is also important to note that Kubespray will only
|
||||
`kube_apiserver_port`. It is also important to note that Kubespray will only
|
||||
configure kubelet and kube-proxy on non-master nodes to use the local internal
|
||||
loadbalancer.
|
||||
loadbalancer. If you wish to control the name of the loadbalancer container,
|
||||
you can set the variable `loadbalancer_apiserver_pod_name`.
|
||||
|
||||
If you choose to NOT use the local internal loadbalancer, you will need to
|
||||
use the [kube-vip](kube-vip.md) ansible role or configure your own loadbalancer to achieve HA. By default, it only configures a non-HA endpoint, which points to the
|
||||
|
||||
@@ -118,7 +118,7 @@ Let's take a deep look to the resultant **kubernetes** configuration:
|
||||
* The `enable-admission-plugins` has not the `PodSecurityPolicy` admission plugin. This because it is going to be definitely removed from **kubernetes** `v1.25`. For this reason we decided to set the newest `PodSecurity` (for more details, please take a look here: <https://kubernetes.io/docs/concepts/security/pod-security-admission/>). Then, we set the `EventRateLimit` plugin, providing additional configuration files (that are automatically created under the hood and mounted inside the `kube-apiserver` container) to make it work.
|
||||
* The `encryption-provider-config` provide encryption at rest. This means that the `kube-apiserver` encrypt data that is going to be stored before they reach `etcd`. So the data is completely unreadable from `etcd` (in case an attacker is able to exploit this).
|
||||
* The `rotateCertificates` in `KubeletConfiguration` is set to `true` along with `serverTLSBootstrap`. This could be used in alternative to `tlsCertFile` and `tlsPrivateKeyFile` parameters. Additionally it automatically generates certificates by itself. By default the CSRs are approved automatically via [kubelet-csr-approver](https://github.com/postfinance/kubelet-csr-approver). You can customize approval configuration by modifying Helm values via `kubelet_csr_approver_values`.
|
||||
See <https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/> for more information on the subject.
|
||||
See <https://kubernetes.io/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/> for more information on the subject.
|
||||
* If you are installing **kubernetes** in an AppArmor-based OS (eg. Debian/Ubuntu) you can enable the `AppArmor` feature gate uncommenting the lines with the comment `# AppArmor-based OS` on top.
|
||||
* The `kubelet_systemd_hardening`, both with `kubelet_secure_addresses` setup a minimal firewall on the system. To better understand how these variables work, here's an explanatory image:
|
||||

|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
**NOTE:** The current image version is `v1.1.6`. Please file any issues you find and note the version used.
|
||||
|
||||
The AWS ALB Ingress Controller satisfies Kubernetes [ingress resources](https://kubernetes.io/docs/user-guide/ingress) by provisioning [Application Load Balancers](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/introduction.html).
|
||||
The AWS ALB Ingress Controller satisfies Kubernetes [ingress resources](https://kubernetes.io/docs/concepts/services-networking/ingress/) by provisioning [Application Load Balancers](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/introduction.html).
|
||||
|
||||
This project was originated by [Ticketmaster](https://github.com/ticketmaster) and [CoreOS](https://github.com/coreos) as part of Ticketmaster's move to AWS and CoreOS Tectonic. Learn more about Ticketmaster's Kubernetes initiative from Justin Dean's video at [Tectonic Summit](https://www.youtube.com/watch?v=wqXVKneP0Hg).
|
||||
|
||||
@@ -10,20 +10,20 @@ This project was donated to Kubernetes SIG-AWS to allow AWS, CoreOS, Ticketmaste
|
||||
|
||||
## Documentation
|
||||
|
||||
Checkout our [Live Docs](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/)!
|
||||
Checkout our [Live Docs](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v1.1/#aws-alb-ingress-controller)!
|
||||
|
||||
## Getting started
|
||||
|
||||
To get started with the controller, see our [walkthrough](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/walkthrough/echoserver/).
|
||||
To get started with the controller, see our [walkthrough](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v1.1/guide/walkthrough/echoserver/).
|
||||
|
||||
## Setup
|
||||
|
||||
- See [controller setup](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/controller/setup/) on how to install ALB ingress controller
|
||||
- See [external-dns setup](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/external-dns/setup/) for how to setup the external-dns to manage route 53 records.
|
||||
- See [controller setup](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v1.1/guide/controller/setup/) on how to install ALB ingress controller
|
||||
- See [external-dns setup](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v1.1/guide/external-dns/setup/) for how to setup the external-dns to manage route 53 records.
|
||||
|
||||
## Building
|
||||
|
||||
For details on building this project, see our [building guide](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/BUILDING/).
|
||||
For details on building this project, see our [building guide](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v1.1/BUILDING/).
|
||||
|
||||
## Community, discussion, contribution, and support
|
||||
|
||||
|
||||
@@ -113,7 +113,7 @@ kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/mast
|
||||
|
||||
This example creates an ELB with just two listeners, one in port 80 and another in port 443
|
||||
|
||||

|
||||

|
||||
|
||||
##### ELB Idle Timeouts
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ You can use it quickly & easily deploy ceph RBD storage that works almost
|
||||
anywhere.
|
||||
|
||||
It works just like in-tree dynamic provisioner. For more information on how
|
||||
dynamic provisioning works, see [the docs](http://kubernetes.io/docs/user-guide/persistent-volumes/)
|
||||
dynamic provisioning works, see [the docs](https://kubernetes.io/docs/concepts/storage/persistent-volumes/)
|
||||
or [this blog post](http://blog.kubernetes.io/2016/10/dynamic-provisioning-and-storage-in-kubernetes.html).
|
||||
|
||||
## Development
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
Distributed system such as Kubernetes are designed to be resilient to the
|
||||
failures. More details about Kubernetes High-Availability (HA) may be found at
|
||||
[Building High-Availability Clusters](https://kubernetes.io/docs/admin/high-availability/)
|
||||
[Building High-Availability Clusters](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/)
|
||||
|
||||
To have a simple view the most of the parts of HA will be skipped to describe
|
||||
Kubelet<->Controller Manager communication only.
|
||||
|
||||
@@ -26,15 +26,39 @@ By default only the MetalLB BGP speaker is allowed to run on control plane nodes
|
||||
```yaml
|
||||
metallb_config:
|
||||
controller:
|
||||
nodeselector:
|
||||
kubernetes.io/os: linux
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
operator: "Equal"
|
||||
value: ""
|
||||
effect: "NoSchedule"
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
operator: "Equal"
|
||||
value: ""
|
||||
effect: "NoSchedule"
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
operator: "Equal"
|
||||
value: ""
|
||||
effect: "NoSchedule"
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
operator: "Equal"
|
||||
value: ""
|
||||
effect: "NoSchedule"
|
||||
```
|
||||
|
||||
If you'd like to set additional nodeSelector and tolerations values, you can do so in the following fasion:
|
||||
|
||||
```yaml
|
||||
metallb_config:
|
||||
controller:
|
||||
nodeselector:
|
||||
kubernetes.io/os: linux
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
operator: "Equal"
|
||||
value: ""
|
||||
effect: "NoSchedule"
|
||||
speaker:
|
||||
nodeselector:
|
||||
kubernetes.io/os: linux
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
operator: "Equal"
|
||||
value: ""
|
||||
effect: "NoSchedule"
|
||||
```
|
||||
|
||||
## Pools
|
||||
@@ -137,7 +161,6 @@ In this scenario you should disable the MetalLB speaker and configure the `calic
|
||||
|
||||
```yaml
|
||||
metallb_speaker_enabled: false
|
||||
metallb_avoid_buggy_ips: true
|
||||
metallb_config:
|
||||
address_pools:
|
||||
primary:
|
||||
@@ -177,22 +200,22 @@ metallb_config:
|
||||
vpn-only: "1234:1"
|
||||
NO_ADVERTISE: "65535:65282"
|
||||
metallb_peers:
|
||||
peer1:
|
||||
peer_address: 10.6.0.1
|
||||
peer_asn: 64512
|
||||
my_asn: 4200000000
|
||||
communities:
|
||||
- vpn-only
|
||||
address_pool:
|
||||
- pool1
|
||||
peer2:
|
||||
peer_address: 10.10.0.1
|
||||
peer_asn: 64513
|
||||
my_asn: 4200000000
|
||||
communities:
|
||||
- NO_ADVERTISE
|
||||
address_pool:
|
||||
- pool2
|
||||
peer1:
|
||||
peer_address: 10.6.0.1
|
||||
peer_asn: 64512
|
||||
my_asn: 4200000000
|
||||
communities:
|
||||
- vpn-only
|
||||
address_pool:
|
||||
- pool1
|
||||
peer2:
|
||||
peer_address: 10.10.0.1
|
||||
peer_asn: 64513
|
||||
my_asn: 4200000000
|
||||
communities:
|
||||
- NO_ADVERTISE
|
||||
address_pool:
|
||||
- pool2
|
||||
calico_advertise_service_loadbalancer_ips:
|
||||
- 10.5.0.0/16
|
||||
- 10.6.0.0/16
|
||||
|
||||
@@ -90,6 +90,9 @@ In all hosts, restart nginx-proxy pod. This pod is a local proxy for the apiserv
|
||||
```sh
|
||||
# run in every host
|
||||
docker ps | grep k8s_nginx-proxy_nginx-proxy | awk '{print $1}' | xargs docker restart
|
||||
|
||||
# or with containerd
|
||||
crictl ps | grep nginx-proxy | awk '{print $1}' | xargs crictl stop
|
||||
```
|
||||
|
||||
### 3) Remove old control plane nodes
|
||||
|
||||
@@ -33,6 +33,7 @@ kube_image_repo: "{{ registry_host }}"
|
||||
gcr_image_repo: "{{ registry_host }}"
|
||||
docker_image_repo: "{{ registry_host }}"
|
||||
quay_image_repo: "{{ registry_host }}"
|
||||
github_image_repo: "{{ registry_host }}"
|
||||
|
||||
kubeadm_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubeadm"
|
||||
kubectl_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubectl"
|
||||
@@ -50,8 +51,12 @@ containerd_download_url: "{{ files_repo }}/containerd-{{ containerd_version }}-l
|
||||
runc_download_url: "{{ files_repo }}/runc.{{ image_arch }}"
|
||||
nerdctl_download_url: "{{ files_repo }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
||||
# Insecure registries for containerd
|
||||
containerd_insecure_registries:
|
||||
"{{ registry_addr }}":"{{ registry_host }}"
|
||||
containerd_registries_mirrors:
|
||||
- prefix: "{{ registry_addr }}"
|
||||
mirrors:
|
||||
- host: "{{ registry_host }}"
|
||||
capabilities: ["pull", "resolve"]
|
||||
skip_verify: true
|
||||
|
||||
# CentOS/Redhat/AlmaLinux/Rocky Linux
|
||||
## Docker / Containerd
|
||||
@@ -90,7 +95,7 @@ If you use the settings like the one above, you'll need to define in your invent
|
||||
|
||||
* `registry_host`: Container image registry. If you _don't_ use the same repository path for the container images that
|
||||
the ones defined
|
||||
in [Download's role defaults](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/download/defaults/main.yml)
|
||||
in [Download's role defaults](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/download/defaults/main/main.yml)
|
||||
, you need to override the `*_image_repo` for these container images. If you want to make your life easier, use the
|
||||
same repository path, you won't have to override anything else.
|
||||
* `registry_addr`: Container image registry, but only have [domain or ip]:[port].
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user