mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 13:54:37 +03:00
Compare commits
6 Commits
v2.23.3
...
pre-commit
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2187882ee0 | ||
|
|
4a994c82d1 | ||
|
|
b074b91ee9 | ||
|
|
b3f7be7135 | ||
|
|
d4082da97f | ||
|
|
faecc7420d |
@@ -7,6 +7,18 @@ skip_list:
|
|||||||
|
|
||||||
# These rules are intentionally skipped:
|
# These rules are intentionally skipped:
|
||||||
#
|
#
|
||||||
|
# [E204]: "Lines should be no longer than 160 chars"
|
||||||
|
# This could be re-enabled with a major rewrite in the future.
|
||||||
|
# For now, there's not enough value gain from strictly limiting line length.
|
||||||
|
# (Disabled in May 2019)
|
||||||
|
- '204'
|
||||||
|
|
||||||
|
# [E701]: "meta/main.yml should contain relevant info"
|
||||||
|
# Roles in Kubespray are not intended to be used/imported by Ansible Galaxy.
|
||||||
|
# While it can be useful to have these metadata available, they are also available in the existing documentation.
|
||||||
|
# (Disabled in May 2019)
|
||||||
|
- '701'
|
||||||
|
|
||||||
# [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
|
# [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
|
||||||
# Meta roles in Kubespray don't need proper names
|
# Meta roles in Kubespray don't need proper names
|
||||||
# (Disabled in June 2021)
|
# (Disabled in June 2021)
|
||||||
@@ -16,23 +28,3 @@ skip_list:
|
|||||||
# In Kubespray we use variables that use camelCase to match their k8s counterparts
|
# In Kubespray we use variables that use camelCase to match their k8s counterparts
|
||||||
# (Disabled in June 2021)
|
# (Disabled in June 2021)
|
||||||
- 'var-naming'
|
- 'var-naming'
|
||||||
|
|
||||||
# [fqcn-builtins]
|
|
||||||
# Roles in kubespray don't need fully qualified collection names
|
|
||||||
# (Disabled in Feb 2023)
|
|
||||||
- 'fqcn-builtins'
|
|
||||||
|
|
||||||
# We use template in names
|
|
||||||
- 'name[template]'
|
|
||||||
|
|
||||||
# No changed-when on commands
|
|
||||||
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
|
||||||
- 'no-changed-when'
|
|
||||||
|
|
||||||
# Disable run-once check with free strategy
|
|
||||||
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
|
||||||
- 'run-once[task]'
|
|
||||||
exclude_paths:
|
|
||||||
# Generated files
|
|
||||||
- tests/files/custom_cni/cilium.yaml
|
|
||||||
- venv
|
|
||||||
|
|||||||
@@ -1,8 +0,0 @@
|
|||||||
# This file contains ignores rule violations for ansible-lint
|
|
||||||
inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml jinja[spacing]
|
|
||||||
roles/kubernetes/control-plane/defaults/main/kube-proxy.yml jinja[spacing]
|
|
||||||
roles/kubernetes/control-plane/defaults/main/main.yml jinja[spacing]
|
|
||||||
roles/kubernetes/kubeadm/defaults/main.yml jinja[spacing]
|
|
||||||
roles/kubernetes/node/defaults/main.yml jinja[spacing]
|
|
||||||
roles/kubernetes/preinstall/defaults/main.yml jinja[spacing]
|
|
||||||
roles/kubespray-defaults/defaults/main.yaml jinja[spacing]
|
|
||||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -11,8 +11,7 @@ contrib/offline/offline-files.tar.gz
|
|||||||
.cache
|
.cache
|
||||||
*.bak
|
*.bak
|
||||||
*.tfstate
|
*.tfstate
|
||||||
*.tfstate*backup
|
*.tfstate.backup
|
||||||
*.lock.hcl
|
|
||||||
.terraform/
|
.terraform/
|
||||||
contrib/terraform/aws/credentials.tfvars
|
contrib/terraform/aws/credentials.tfvars
|
||||||
.terraform.lock.hcl
|
.terraform.lock.hcl
|
||||||
@@ -114,7 +113,3 @@ roles/**/molecule/**/__pycache__/
|
|||||||
# Temp location used by our scripts
|
# Temp location used by our scripts
|
||||||
scripts/tmp/
|
scripts/tmp/
|
||||||
tmp.md
|
tmp.md
|
||||||
|
|
||||||
# Ansible collection files
|
|
||||||
kubernetes_sigs-kubespray*tar.gz
|
|
||||||
ansible_collections
|
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
---
|
---
|
||||||
stages:
|
stages:
|
||||||
- build
|
|
||||||
- unit-tests
|
- unit-tests
|
||||||
- deploy-part1
|
- deploy-part1
|
||||||
- moderator
|
- moderator
|
||||||
@@ -9,12 +8,12 @@ stages:
|
|||||||
- deploy-special
|
- deploy-special
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
KUBESPRAY_VERSION: v2.22.1
|
KUBESPRAY_VERSION: v2.19.0
|
||||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||||
ANSIBLE_FORCE_COLOR: "true"
|
ANSIBLE_FORCE_COLOR: "true"
|
||||||
MAGIC: "ci check this"
|
MAGIC: "ci check this"
|
||||||
TEST_ID: "$CI_PIPELINE_ID-$CI_JOB_ID"
|
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||||
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
||||||
CI_TEST_REGISTRY_MIRROR: "./tests/common/_docker_hub_registry_mirror.yml"
|
CI_TEST_REGISTRY_MIRROR: "./tests/common/_docker_hub_registry_mirror.yml"
|
||||||
CI_TEST_SETTING: "./tests/common/_kubespray_test_settings.yml"
|
CI_TEST_SETTING: "./tests/common/_kubespray_test_settings.yml"
|
||||||
@@ -33,18 +32,21 @@ variables:
|
|||||||
MITOGEN_ENABLE: "false"
|
MITOGEN_ENABLE: "false"
|
||||||
ANSIBLE_LOG_LEVEL: "-vv"
|
ANSIBLE_LOG_LEVEL: "-vv"
|
||||||
RECOVER_CONTROL_PLANE_TEST: "false"
|
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
|
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||||
TERRAFORM_VERSION: 1.3.7
|
TERRAFORM_VERSION: 1.0.8
|
||||||
PIPELINE_IMAGE: "$CI_REGISTRY_IMAGE/pipeline:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}"
|
ANSIBLE_MAJOR_VERSION: "2.11"
|
||||||
|
|
||||||
before_script:
|
before_script:
|
||||||
- ./tests/scripts/rebase.sh
|
- ./tests/scripts/rebase.sh
|
||||||
|
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||||
|
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||||
|
- python -m pip install -r tests/requirements-${ANSIBLE_MAJOR_VERSION}.txt
|
||||||
- mkdir -p /.ssh
|
- mkdir -p /.ssh
|
||||||
|
|
||||||
.job: &job
|
.job: &job
|
||||||
tags:
|
tags:
|
||||||
- packet
|
- packet
|
||||||
image: $PIPELINE_IMAGE
|
image: quay.io/kubespray/kubespray:$KUBESPRAY_VERSION
|
||||||
artifacts:
|
artifacts:
|
||||||
when: always
|
when: always
|
||||||
paths:
|
paths:
|
||||||
@@ -53,7 +55,6 @@ before_script:
|
|||||||
.testcases: &testcases
|
.testcases: &testcases
|
||||||
<<: *job
|
<<: *job
|
||||||
retry: 1
|
retry: 1
|
||||||
interruptible: true
|
|
||||||
before_script:
|
before_script:
|
||||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||||
- ./tests/scripts/rebase.sh
|
- ./tests/scripts/rebase.sh
|
||||||
@@ -75,7 +76,6 @@ ci-authorized:
|
|||||||
only: []
|
only: []
|
||||||
|
|
||||||
include:
|
include:
|
||||||
- .gitlab-ci/build.yml
|
|
||||||
- .gitlab-ci/lint.yml
|
- .gitlab-ci/lint.yml
|
||||||
- .gitlab-ci/shellcheck.yml
|
- .gitlab-ci/shellcheck.yml
|
||||||
- .gitlab-ci/terraform.yml
|
- .gitlab-ci/terraform.yml
|
||||||
|
|||||||
@@ -1,40 +0,0 @@
|
|||||||
---
|
|
||||||
.build:
|
|
||||||
stage: build
|
|
||||||
image:
|
|
||||||
name: moby/buildkit:rootless
|
|
||||||
entrypoint: [""]
|
|
||||||
variables:
|
|
||||||
BUILDKITD_FLAGS: --oci-worker-no-process-sandbox
|
|
||||||
before_script:
|
|
||||||
- mkdir ~/.docker
|
|
||||||
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > ~/.docker/config.json
|
|
||||||
|
|
||||||
pipeline image:
|
|
||||||
extends: .build
|
|
||||||
script:
|
|
||||||
- |
|
|
||||||
buildctl-daemonless.sh build \
|
|
||||||
--frontend=dockerfile.v0 \
|
|
||||||
--local context=. \
|
|
||||||
--local dockerfile=. \
|
|
||||||
--opt filename=./pipeline.Dockerfile \
|
|
||||||
--output type=image,name=$PIPELINE_IMAGE,push=true \
|
|
||||||
--import-cache type=registry,ref=$CI_REGISTRY_IMAGE/pipeline:cache
|
|
||||||
rules:
|
|
||||||
- if: '$CI_COMMIT_REF_NAME != $CI_DEFAULT_BRANCH'
|
|
||||||
|
|
||||||
pipeline image and build cache:
|
|
||||||
extends: .build
|
|
||||||
script:
|
|
||||||
- |
|
|
||||||
buildctl-daemonless.sh build \
|
|
||||||
--frontend=dockerfile.v0 \
|
|
||||||
--local context=. \
|
|
||||||
--local dockerfile=. \
|
|
||||||
--opt filename=./pipeline.Dockerfile \
|
|
||||||
--output type=image,name=$PIPELINE_IMAGE,push=true \
|
|
||||||
--import-cache type=registry,ref=$CI_REGISTRY_IMAGE/pipeline:cache \
|
|
||||||
--export-cache type=registry,ref=$CI_REGISTRY_IMAGE/pipeline:cache,mode=max
|
|
||||||
rules:
|
|
||||||
- if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH'
|
|
||||||
@@ -14,7 +14,7 @@ vagrant-validate:
|
|||||||
stage: unit-tests
|
stage: unit-tests
|
||||||
tags: [light]
|
tags: [light]
|
||||||
variables:
|
variables:
|
||||||
VAGRANT_VERSION: 2.3.7
|
VAGRANT_VERSION: 2.2.19
|
||||||
script:
|
script:
|
||||||
- ./tests/scripts/vagrant-validate.sh
|
- ./tests/scripts/vagrant-validate.sh
|
||||||
except: ['triggers', 'master']
|
except: ['triggers', 'master']
|
||||||
@@ -39,34 +39,21 @@ syntax-check:
|
|||||||
ANSIBLE_VERBOSITY: "3"
|
ANSIBLE_VERBOSITY: "3"
|
||||||
script:
|
script:
|
||||||
- ansible-playbook --syntax-check cluster.yml
|
- ansible-playbook --syntax-check cluster.yml
|
||||||
- ansible-playbook --syntax-check playbooks/cluster.yml
|
|
||||||
- ansible-playbook --syntax-check upgrade-cluster.yml
|
- ansible-playbook --syntax-check upgrade-cluster.yml
|
||||||
- ansible-playbook --syntax-check playbooks/upgrade_cluster.yml
|
|
||||||
- ansible-playbook --syntax-check reset.yml
|
- ansible-playbook --syntax-check reset.yml
|
||||||
- ansible-playbook --syntax-check playbooks/reset.yml
|
|
||||||
- ansible-playbook --syntax-check extra_playbooks/upgrade-only-k8s.yml
|
- ansible-playbook --syntax-check extra_playbooks/upgrade-only-k8s.yml
|
||||||
except: ['triggers', 'master']
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
collection-build-install-sanity-check:
|
|
||||||
extends: .job
|
|
||||||
stage: unit-tests
|
|
||||||
tags: [light]
|
|
||||||
variables:
|
|
||||||
ANSIBLE_COLLECTIONS_PATH: "./ansible_collections"
|
|
||||||
script:
|
|
||||||
- ansible-galaxy collection build
|
|
||||||
- ansible-galaxy collection install kubernetes_sigs-kubespray-$(grep "^version:" galaxy.yml | awk '{print $2}').tar.gz
|
|
||||||
- ansible-galaxy collection list $(egrep -i '(name:\s+|namespace:\s+)' galaxy.yml | awk '{print $2}' | tr '\n' '.' | sed 's|\.$||g') | grep "^kubernetes_sigs.kubespray"
|
|
||||||
- test -f ansible_collections/kubernetes_sigs/kubespray/playbooks/cluster.yml
|
|
||||||
- test -f ansible_collections/kubernetes_sigs/kubespray/playbooks/reset.yml
|
|
||||||
except: ['triggers', 'master']
|
|
||||||
|
|
||||||
tox-inventory-builder:
|
tox-inventory-builder:
|
||||||
stage: unit-tests
|
stage: unit-tests
|
||||||
tags: [light]
|
tags: [light]
|
||||||
extends: .job
|
extends: .job
|
||||||
before_script:
|
before_script:
|
||||||
- ./tests/scripts/rebase.sh
|
- ./tests/scripts/rebase.sh
|
||||||
|
- apt-get update && apt-get install -y python3-pip
|
||||||
|
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||||
|
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||||
|
- python -m pip install -r tests/requirements.txt
|
||||||
script:
|
script:
|
||||||
- pip3 install tox
|
- pip3 install tox
|
||||||
- cd contrib/inventory_builder && tox
|
- cd contrib/inventory_builder && tox
|
||||||
@@ -88,20 +75,6 @@ check-readme-versions:
|
|||||||
script:
|
script:
|
||||||
- tests/scripts/check_readme_versions.sh
|
- tests/scripts/check_readme_versions.sh
|
||||||
|
|
||||||
check-galaxy-version:
|
|
||||||
stage: unit-tests
|
|
||||||
tags: [light]
|
|
||||||
image: python:3
|
|
||||||
script:
|
|
||||||
- tests/scripts/check_galaxy_version.sh
|
|
||||||
|
|
||||||
check-typo:
|
|
||||||
stage: unit-tests
|
|
||||||
tags: [light]
|
|
||||||
image: python:3
|
|
||||||
script:
|
|
||||||
- tests/scripts/check_typo.sh
|
|
||||||
|
|
||||||
ci-matrix:
|
ci-matrix:
|
||||||
stage: unit-tests
|
stage: unit-tests
|
||||||
tags: [light]
|
tags: [light]
|
||||||
|
|||||||
@@ -4,11 +4,15 @@
|
|||||||
tags: [c3.small.x86]
|
tags: [c3.small.x86]
|
||||||
only: [/^pr-.*$/]
|
only: [/^pr-.*$/]
|
||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
image: $PIPELINE_IMAGE
|
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||||
services: []
|
services: []
|
||||||
stage: deploy-part1
|
stage: deploy-part1
|
||||||
before_script:
|
before_script:
|
||||||
- tests/scripts/rebase.sh
|
- tests/scripts/rebase.sh
|
||||||
|
- apt-get update && apt-get install -y python3-pip
|
||||||
|
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||||
|
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||||
|
- python -m pip install -r tests/requirements.txt
|
||||||
- ./tests/scripts/vagrant_clean.sh
|
- ./tests/scripts/vagrant_clean.sh
|
||||||
script:
|
script:
|
||||||
- ./tests/scripts/molecule_run.sh
|
- ./tests/scripts/molecule_run.sh
|
||||||
@@ -54,7 +58,6 @@ molecule_cri-o:
|
|||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
script:
|
script:
|
||||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
||||||
allow_failure: true
|
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
# Stage 3 container engines don't get as much attention so allow them to fail
|
# Stage 3 container engines don't get as much attention so allow them to fail
|
||||||
|
|||||||
@@ -23,45 +23,45 @@
|
|||||||
allow_failure: true
|
allow_failure: true
|
||||||
extends: .packet
|
extends: .packet
|
||||||
|
|
||||||
packet_cleanup_old:
|
# The ubuntu20-calico-aio jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
||||||
stage: deploy-part1
|
packet_ubuntu20-calico-aio:
|
||||||
extends: .packet_periodic
|
|
||||||
script:
|
|
||||||
- cd tests
|
|
||||||
- make cleanup-packet
|
|
||||||
after_script: []
|
|
||||||
|
|
||||||
# The ubuntu20-calico-all-in-one jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
|
||||||
packet_ubuntu20-calico-all-in-one:
|
|
||||||
stage: deploy-part1
|
stage: deploy-part1
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
variables:
|
variables:
|
||||||
RESET_CHECK: "true"
|
RESET_CHECK: "true"
|
||||||
|
|
||||||
|
packet_ubuntu20-calico-aio-ansible-2_11:
|
||||||
|
stage: deploy-part1
|
||||||
|
extends: .packet_periodic
|
||||||
|
when: on_success
|
||||||
|
variables:
|
||||||
|
ANSIBLE_MAJOR_VERSION: "2.11"
|
||||||
|
RESET_CHECK: "true"
|
||||||
|
|
||||||
# ### PR JOBS PART2
|
# ### PR JOBS PART2
|
||||||
|
|
||||||
packet_ubuntu20-all-in-one-docker:
|
packet_ubuntu18-aio-docker:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_ubuntu20-calico-all-in-one-hardening:
|
packet_ubuntu20-aio-docker:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_ubuntu22-all-in-one-docker:
|
packet_ubuntu18-calico-aio:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_ubuntu22-calico-all-in-one:
|
packet_ubuntu22-aio-docker:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_ubuntu22-calico-etcd-datastore:
|
packet_ubuntu22-calico-aio:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
@@ -75,19 +75,28 @@ packet_almalinux8-crio:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
when: on_success
|
when: on_success
|
||||||
allow_failure: true
|
|
||||||
|
|
||||||
packet_ubuntu20-crio:
|
packet_ubuntu18-crio:
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_fedora37-crio:
|
packet_fedora35-crio:
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_ubuntu20-flannel-ha:
|
packet_ubuntu16-canal-ha:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_periodic
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
packet_ubuntu16-canal-sep:
|
||||||
|
stage: deploy-special
|
||||||
|
extends: .packet_pr
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
packet_ubuntu16-flannel-ha:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
@@ -117,21 +126,6 @@ packet_debian11-docker:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_debian12-calico:
|
|
||||||
stage: deploy-part2
|
|
||||||
extends: .packet_pr
|
|
||||||
when: on_success
|
|
||||||
|
|
||||||
packet_debian12-docker:
|
|
||||||
stage: deploy-part2
|
|
||||||
extends: .packet_pr
|
|
||||||
when: on_success
|
|
||||||
|
|
||||||
packet_debian12-cilium:
|
|
||||||
stage: deploy-part2
|
|
||||||
extends: .packet_periodic
|
|
||||||
when: on_success
|
|
||||||
|
|
||||||
packet_centos7-calico-ha-once-localhost:
|
packet_centos7-calico-ha-once-localhost:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
@@ -144,7 +138,7 @@ packet_centos7-calico-ha-once-localhost:
|
|||||||
|
|
||||||
packet_almalinux8-kube-ovn:
|
packet_almalinux8-kube-ovn:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_almalinux8-calico:
|
packet_almalinux8-calico:
|
||||||
@@ -157,28 +151,20 @@ packet_rockylinux8-calico:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_rockylinux9-calico:
|
|
||||||
stage: deploy-part2
|
|
||||||
extends: .packet_pr
|
|
||||||
when: on_success
|
|
||||||
|
|
||||||
packet_rockylinux9-cilium:
|
|
||||||
stage: deploy-part2
|
|
||||||
extends: .packet_pr
|
|
||||||
when: on_success
|
|
||||||
variables:
|
|
||||||
RESET_CHECK: "true"
|
|
||||||
|
|
||||||
packet_almalinux8-docker:
|
packet_almalinux8-docker:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_fedora38-docker-weave:
|
packet_fedora36-docker-weave:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: on_success
|
when: on_success
|
||||||
allow_failure: true
|
|
||||||
|
packet_opensuse-canal:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_periodic
|
||||||
|
when: on_success
|
||||||
|
|
||||||
packet_opensuse-docker-cilium:
|
packet_opensuse-docker-cilium:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
@@ -187,17 +173,22 @@ packet_opensuse-docker-cilium:
|
|||||||
|
|
||||||
# ### MANUAL JOBS
|
# ### MANUAL JOBS
|
||||||
|
|
||||||
packet_ubuntu20-docker-weave-sep:
|
packet_ubuntu16-docker-weave-sep:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_ubuntu20-cilium-sep:
|
packet_ubuntu18-cilium-sep:
|
||||||
stage: deploy-special
|
stage: deploy-special
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_ubuntu20-flannel-ha-once:
|
packet_ubuntu18-flannel-ha:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_pr
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
packet_ubuntu18-flannel-ha-once:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
@@ -208,7 +199,7 @@ packet_almalinux8-calico-ha-ebpf:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_debian10-macvlan:
|
packet_debian9-macvlan:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
@@ -223,24 +214,29 @@ packet_centos7-multus-calico:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_fedora38-docker-calico:
|
packet_centos7-canal-ha:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .packet_pr
|
||||||
|
when: manual
|
||||||
|
|
||||||
|
packet_fedora36-docker-calico:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
variables:
|
variables:
|
||||||
RESET_CHECK: "true"
|
RESET_CHECK: "true"
|
||||||
|
|
||||||
packet_fedora37-calico-selinux:
|
packet_fedora35-calico-selinux:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_fedora37-calico-swap-selinux:
|
packet_fedora35-calico-swap-selinux:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_amazon-linux-2-all-in-one:
|
packet_amazon-linux-2-aio:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
@@ -250,21 +246,11 @@ packet_almalinux8-calico-nodelocaldns-secondary:
|
|||||||
extends: .packet_pr
|
extends: .packet_pr
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
packet_fedora38-kube-ovn:
|
packet_fedora36-kube-ovn:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
packet_debian11-custom-cni:
|
|
||||||
stage: deploy-part2
|
|
||||||
extends: .packet_pr
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
packet_debian11-kubelet-csr-approver:
|
|
||||||
stage: deploy-part2
|
|
||||||
extends: .packet_pr
|
|
||||||
when: manual
|
|
||||||
|
|
||||||
# ### PR JOBS PART3
|
# ### PR JOBS PART3
|
||||||
# Long jobs (45min+)
|
# Long jobs (45min+)
|
||||||
|
|
||||||
@@ -315,18 +301,18 @@ packet_debian11-calico-upgrade-once:
|
|||||||
variables:
|
variables:
|
||||||
UPGRADE_TEST: graceful
|
UPGRADE_TEST: graceful
|
||||||
|
|
||||||
packet_ubuntu20-calico-ha-recover:
|
packet_ubuntu18-calico-ha-recover:
|
||||||
stage: deploy-part3
|
stage: deploy-part3
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
variables:
|
variables:
|
||||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
|
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||||
|
|
||||||
packet_ubuntu20-calico-ha-recover-noquorum:
|
packet_ubuntu18-calico-ha-recover-noquorum:
|
||||||
stage: deploy-part3
|
stage: deploy-part3
|
||||||
extends: .packet_periodic
|
extends: .packet_periodic
|
||||||
when: on_success
|
when: on_success
|
||||||
variables:
|
variables:
|
||||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:]:kube_control_plane[1:]"
|
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube_control_plane[1:]"
|
||||||
|
|||||||
@@ -60,11 +60,11 @@ tf-validate-openstack:
|
|||||||
PROVIDER: openstack
|
PROVIDER: openstack
|
||||||
CLUSTER: $CI_COMMIT_REF_NAME
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
|
||||||
tf-validate-equinix:
|
tf-validate-metal:
|
||||||
extends: .terraform_validate
|
extends: .terraform_validate
|
||||||
variables:
|
variables:
|
||||||
TF_VERSION: $TERRAFORM_VERSION
|
TF_VERSION: $TERRAFORM_VERSION
|
||||||
PROVIDER: equinix
|
PROVIDER: metal
|
||||||
CLUSTER: $CI_COMMIT_REF_NAME
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
|
||||||
tf-validate-aws:
|
tf-validate-aws:
|
||||||
@@ -80,12 +80,6 @@ tf-validate-exoscale:
|
|||||||
TF_VERSION: $TERRAFORM_VERSION
|
TF_VERSION: $TERRAFORM_VERSION
|
||||||
PROVIDER: exoscale
|
PROVIDER: exoscale
|
||||||
|
|
||||||
tf-validate-hetzner:
|
|
||||||
extends: .terraform_validate
|
|
||||||
variables:
|
|
||||||
TF_VERSION: $TERRAFORM_VERSION
|
|
||||||
PROVIDER: hetzner
|
|
||||||
|
|
||||||
tf-validate-vsphere:
|
tf-validate-vsphere:
|
||||||
extends: .terraform_validate
|
extends: .terraform_validate
|
||||||
variables:
|
variables:
|
||||||
@@ -100,13 +94,7 @@ tf-validate-upcloud:
|
|||||||
PROVIDER: upcloud
|
PROVIDER: upcloud
|
||||||
CLUSTER: $CI_COMMIT_REF_NAME
|
CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
|
||||||
tf-validate-nifcloud:
|
# tf-packet-ubuntu16-default:
|
||||||
extends: .terraform_validate
|
|
||||||
variables:
|
|
||||||
TF_VERSION: $TERRAFORM_VERSION
|
|
||||||
PROVIDER: nifcloud
|
|
||||||
|
|
||||||
# tf-packet-ubuntu20-default:
|
|
||||||
# extends: .terraform_apply
|
# extends: .terraform_apply
|
||||||
# variables:
|
# variables:
|
||||||
# TF_VERSION: $TERRAFORM_VERSION
|
# TF_VERSION: $TERRAFORM_VERSION
|
||||||
@@ -116,9 +104,23 @@ tf-validate-nifcloud:
|
|||||||
# TF_VAR_number_of_k8s_nodes: "1"
|
# TF_VAR_number_of_k8s_nodes: "1"
|
||||||
# TF_VAR_plan_k8s_masters: t1.small.x86
|
# TF_VAR_plan_k8s_masters: t1.small.x86
|
||||||
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||||
# TF_VAR_metro: am
|
# TF_VAR_facility: ewr1
|
||||||
# TF_VAR_public_key_path: ""
|
# TF_VAR_public_key_path: ""
|
||||||
# TF_VAR_operating_system: ubuntu_20_04
|
# TF_VAR_operating_system: ubuntu_16_04
|
||||||
|
#
|
||||||
|
# tf-packet-ubuntu18-default:
|
||||||
|
# extends: .terraform_apply
|
||||||
|
# variables:
|
||||||
|
# TF_VERSION: $TERRAFORM_VERSION
|
||||||
|
# PROVIDER: packet
|
||||||
|
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||||
|
# TF_VAR_number_of_k8s_masters: "1"
|
||||||
|
# TF_VAR_number_of_k8s_nodes: "1"
|
||||||
|
# TF_VAR_plan_k8s_masters: t1.small.x86
|
||||||
|
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||||
|
# TF_VAR_facility: ams1
|
||||||
|
# TF_VAR_public_key_path: ""
|
||||||
|
# TF_VAR_operating_system: ubuntu_18_04
|
||||||
|
|
||||||
.ovh_variables: &ovh_variables
|
.ovh_variables: &ovh_variables
|
||||||
OS_AUTH_URL: https://auth.cloud.ovh.net/v3
|
OS_AUTH_URL: https://auth.cloud.ovh.net/v3
|
||||||
@@ -156,7 +158,7 @@ tf-elastx_cleanup:
|
|||||||
script:
|
script:
|
||||||
- ./scripts/openstack-cleanup/main.py
|
- ./scripts/openstack-cleanup/main.py
|
||||||
|
|
||||||
tf-elastx_ubuntu20-calico:
|
tf-elastx_ubuntu18-calico:
|
||||||
extends: .terraform_apply
|
extends: .terraform_apply
|
||||||
stage: deploy-part3
|
stage: deploy-part3
|
||||||
when: on_success
|
when: on_success
|
||||||
@@ -186,7 +188,7 @@ tf-elastx_ubuntu20-calico:
|
|||||||
TF_VAR_az_list_node: '["sto1"]'
|
TF_VAR_az_list_node: '["sto1"]'
|
||||||
TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||||
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||||
TF_VAR_image: ubuntu-20.04-server-latest
|
TF_VAR_image: ubuntu-18.04-server-latest
|
||||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||||
|
|
||||||
# OVH voucher expired, commenting job until things are sorted out
|
# OVH voucher expired, commenting job until things are sorted out
|
||||||
@@ -203,7 +205,7 @@ tf-elastx_ubuntu20-calico:
|
|||||||
# script:
|
# script:
|
||||||
# - ./scripts/openstack-cleanup/main.py
|
# - ./scripts/openstack-cleanup/main.py
|
||||||
|
|
||||||
# tf-ovh_ubuntu20-calico:
|
# tf-ovh_ubuntu18-calico:
|
||||||
# extends: .terraform_apply
|
# extends: .terraform_apply
|
||||||
# when: on_success
|
# when: on_success
|
||||||
# environment: ovh
|
# environment: ovh
|
||||||
@@ -229,5 +231,5 @@ tf-elastx_ubuntu20-calico:
|
|||||||
# TF_VAR_network_name: "Ext-Net"
|
# TF_VAR_network_name: "Ext-Net"
|
||||||
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||||
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||||
# TF_VAR_image: "Ubuntu 20.04"
|
# TF_VAR_image: "Ubuntu 18.04"
|
||||||
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||||
|
|||||||
@@ -10,9 +10,13 @@
|
|||||||
tags: [c3.small.x86]
|
tags: [c3.small.x86]
|
||||||
only: [/^pr-.*$/]
|
only: [/^pr-.*$/]
|
||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
image: $PIPELINE_IMAGE
|
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||||
services: []
|
services: []
|
||||||
before_script:
|
before_script:
|
||||||
|
- apt-get update && apt-get install -y python3-pip
|
||||||
|
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||||
|
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||||
|
- python -m pip install -r tests/requirements.txt
|
||||||
- ./tests/scripts/vagrant_clean.sh
|
- ./tests/scripts/vagrant_clean.sh
|
||||||
script:
|
script:
|
||||||
- ./tests/scripts/testcases_run.sh
|
- ./tests/scripts/testcases_run.sh
|
||||||
@@ -20,12 +24,17 @@
|
|||||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
|
|
||||||
vagrant_ubuntu20-calico-dual-stack:
|
vagrant_ubuntu18-calico-dual-stack:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|
||||||
vagrant_ubuntu20-weave-medium:
|
vagrant_ubuntu18-flannel:
|
||||||
|
stage: deploy-part2
|
||||||
|
extends: .vagrant
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
vagrant_ubuntu18-weave-medium:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: manual
|
when: manual
|
||||||
@@ -34,25 +43,19 @@ vagrant_ubuntu20-flannel:
|
|||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: on_success
|
when: on_success
|
||||||
allow_failure: false
|
|
||||||
|
|
||||||
vagrant_ubuntu20-flannel-collection:
|
vagrant_ubuntu16-kube-router-sep:
|
||||||
stage: deploy-part2
|
|
||||||
extends: .vagrant
|
|
||||||
when: on_success
|
|
||||||
|
|
||||||
vagrant_ubuntu20-kube-router-sep:
|
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
# Service proxy test fails connectivity testing
|
# Service proxy test fails connectivity testing
|
||||||
vagrant_ubuntu20-kube-router-svc-proxy:
|
vagrant_ubuntu16-kube-router-svc-proxy:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
vagrant_fedora37-kube-router:
|
vagrant_fedora35-kube-router:
|
||||||
stage: deploy-part2
|
stage: deploy-part2
|
||||||
extends: .vagrant
|
extends: .vagrant
|
||||||
when: on_success
|
when: on_success
|
||||||
|
|||||||
@@ -1,20 +1,5 @@
|
|||||||
---
|
---
|
||||||
repos:
|
repos:
|
||||||
|
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
|
||||||
rev: v3.4.0
|
|
||||||
hooks:
|
|
||||||
- id: check-added-large-files
|
|
||||||
- id: check-case-conflict
|
|
||||||
- id: check-executables-have-shebangs
|
|
||||||
- id: check-xml
|
|
||||||
- id: check-merge-conflict
|
|
||||||
- id: detect-private-key
|
|
||||||
- id: end-of-file-fixer
|
|
||||||
- id: forbid-new-submodules
|
|
||||||
- id: requirements-txt-fixer
|
|
||||||
- id: trailing-whitespace
|
|
||||||
|
|
||||||
- repo: https://github.com/adrienverge/yamllint.git
|
- repo: https://github.com/adrienverge/yamllint.git
|
||||||
rev: v1.27.1
|
rev: v1.27.1
|
||||||
hooks:
|
hooks:
|
||||||
@@ -28,14 +13,6 @@ repos:
|
|||||||
args: [ -r, "~MD013,~MD029" ]
|
args: [ -r, "~MD013,~MD029" ]
|
||||||
exclude: "^.git"
|
exclude: "^.git"
|
||||||
|
|
||||||
- repo: https://github.com/jumanjihouse/pre-commit-hooks
|
|
||||||
rev: 3.0.0
|
|
||||||
hooks:
|
|
||||||
- id: shellcheck
|
|
||||||
args: [ --severity, "error" ]
|
|
||||||
exclude: "^.git"
|
|
||||||
files: "\\.sh$"
|
|
||||||
|
|
||||||
- repo: local
|
- repo: local
|
||||||
hooks:
|
hooks:
|
||||||
- id: ansible-lint
|
- id: ansible-lint
|
||||||
|
|||||||
@@ -3,8 +3,6 @@ extends: default
|
|||||||
|
|
||||||
ignore: |
|
ignore: |
|
||||||
.git/
|
.git/
|
||||||
# Generated file
|
|
||||||
tests/files/custom_cni/cilium.yaml
|
|
||||||
|
|
||||||
rules:
|
rules:
|
||||||
braces:
|
braces:
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
# See our release notes on [GitHub](https://github.com/kubernetes-sigs/kubespray/releases)
|
|
||||||
@@ -12,7 +12,6 @@ To install development dependencies you can set up a python virtual env with the
|
|||||||
virtualenv venv
|
virtualenv venv
|
||||||
source venv/bin/activate
|
source venv/bin/activate
|
||||||
pip install -r tests/requirements.txt
|
pip install -r tests/requirements.txt
|
||||||
ansible-galaxy install -r tests/requirements.yml
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Linting
|
#### Linting
|
||||||
@@ -39,7 +38,7 @@ Vagrant with VirtualBox or libvirt driver helps you to quickly spin test cluster
|
|||||||
1. Submit an issue describing your proposed change to the repo in question.
|
1. Submit an issue describing your proposed change to the repo in question.
|
||||||
2. The [repo owners](OWNERS) will respond to your issue promptly.
|
2. The [repo owners](OWNERS) will respond to your issue promptly.
|
||||||
3. Fork the desired repo, develop and test your code changes.
|
3. Fork the desired repo, develop and test your code changes.
|
||||||
4. Install [pre-commit](https://pre-commit.com) and install it in your development repo.
|
4. Install [pre-commit](https://pre-commit.com) and install it in your development repo).
|
||||||
5. Addess any pre-commit validation failures.
|
5. Addess any pre-commit validation failures.
|
||||||
6. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
6. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
||||||
7. Submit a pull request.
|
7. Submit a pull request.
|
||||||
|
|||||||
72
Dockerfile
72
Dockerfile
@@ -1,45 +1,37 @@
|
|||||||
# Use imutable image tags rather than mutable tags (like ubuntu:22.04)
|
# Use imutable image tags rather than mutable tags (like ubuntu:20.04)
|
||||||
FROM ubuntu:jammy-20230308
|
FROM ubuntu:focal-20220531
|
||||||
|
|
||||||
|
ARG ARCH=amd64
|
||||||
|
ARG TZ=Etc/UTC
|
||||||
|
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||||
|
|
||||||
|
RUN apt update -y \
|
||||||
|
&& apt install -y \
|
||||||
|
libssl-dev python3-dev sshpass apt-transport-https jq moreutils \
|
||||||
|
ca-certificates curl gnupg2 software-properties-common python3-pip unzip rsync git \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
||||||
|
&& add-apt-repository \
|
||||||
|
"deb [arch=$ARCH] https://download.docker.com/linux/ubuntu \
|
||||||
|
$(lsb_release -cs) \
|
||||||
|
stable" \
|
||||||
|
&& apt update -y && apt-get install --no-install-recommends -y docker-ce \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Some tools like yamllint need this
|
# Some tools like yamllint need this
|
||||||
# Pip needs this as well at the moment to install ansible
|
# Pip needs this as well at the moment to install ansible
|
||||||
# (and potentially other packages)
|
# (and potentially other packages)
|
||||||
# See: https://github.com/pypa/pip/issues/10219
|
# See: https://github.com/pypa/pip/issues/10219
|
||||||
ENV LANG=C.UTF-8 \
|
ENV LANG=C.UTF-8
|
||||||
DEBIAN_FRONTEND=noninteractive \
|
|
||||||
PYTHONDONTWRITEBYTECODE=1
|
|
||||||
WORKDIR /kubespray
|
|
||||||
COPY *.yml ./
|
|
||||||
COPY *.cfg ./
|
|
||||||
COPY roles ./roles
|
|
||||||
COPY contrib ./contrib
|
|
||||||
COPY inventory ./inventory
|
|
||||||
COPY library ./library
|
|
||||||
COPY extra_playbooks ./extra_playbooks
|
|
||||||
COPY playbooks ./playbooks
|
|
||||||
COPY plugins ./plugins
|
|
||||||
|
|
||||||
RUN apt update -q \
|
WORKDIR /kubespray
|
||||||
&& apt install -yq --no-install-recommends \
|
COPY . .
|
||||||
curl \
|
RUN /usr/bin/python3 -m pip install --no-cache-dir pip -U \
|
||||||
python3 \
|
&& /usr/bin/python3 -m pip install --no-cache-dir -r tests/requirements.txt \
|
||||||
python3-pip \
|
&& python3 -m pip install --no-cache-dir -r requirements.txt \
|
||||||
sshpass \
|
&& update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||||
vim \
|
|
||||||
rsync \
|
RUN KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
||||||
openssh-client \
|
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$ARCH/kubectl \
|
||||||
&& pip install --no-compile --no-cache-dir \
|
&& chmod a+x kubectl \
|
||||||
ansible==7.6.0 \
|
&& mv kubectl /usr/local/bin/kubectl
|
||||||
ansible-core==2.14.6 \
|
|
||||||
cryptography==41.0.1 \
|
|
||||||
jinja2==3.1.2 \
|
|
||||||
netaddr==0.8.0 \
|
|
||||||
jmespath==1.0.1 \
|
|
||||||
MarkupSafe==2.1.3 \
|
|
||||||
ruamel.yaml==0.17.21 \
|
|
||||||
passlib==1.7.4 \
|
|
||||||
&& KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
|
||||||
&& curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
|
|
||||||
&& echo $(curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \
|
|
||||||
&& chmod a+x /usr/local/bin/kubectl \
|
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/log/* \
|
|
||||||
&& find /usr -type d -name '*__pycache__' -prune -exec rm -rf {} \;
|
|
||||||
|
|||||||
2
LICENSE
2
LICENSE
@@ -187,7 +187,7 @@
|
|||||||
identification within third-party archives.
|
identification within third-party archives.
|
||||||
|
|
||||||
Copyright 2016 Kubespray
|
Copyright 2016 Kubespray
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
You may obtain a copy of the License at
|
You may obtain a copy of the License at
|
||||||
|
|||||||
2
OWNERS
2
OWNERS
@@ -5,4 +5,4 @@ approvers:
|
|||||||
reviewers:
|
reviewers:
|
||||||
- kubespray-reviewers
|
- kubespray-reviewers
|
||||||
emeritus_approvers:
|
emeritus_approvers:
|
||||||
- kubespray-emeritus_approvers
|
- kubespray-emeritus_approvers
|
||||||
@@ -8,9 +8,6 @@ aliases:
|
|||||||
- floryut
|
- floryut
|
||||||
- oomichi
|
- oomichi
|
||||||
- cristicalin
|
- cristicalin
|
||||||
- liupeng0518
|
|
||||||
- yankay
|
|
||||||
- mzaian
|
|
||||||
kubespray-reviewers:
|
kubespray-reviewers:
|
||||||
- holmsten
|
- holmsten
|
||||||
- bozzo
|
- bozzo
|
||||||
@@ -19,11 +16,6 @@ aliases:
|
|||||||
- jayonlau
|
- jayonlau
|
||||||
- cristicalin
|
- cristicalin
|
||||||
- liupeng0518
|
- liupeng0518
|
||||||
- yankay
|
|
||||||
- cyclinder
|
|
||||||
- mzaian
|
|
||||||
- mrfreezeex
|
|
||||||
- erikjiang
|
|
||||||
kubespray-emeritus_approvers:
|
kubespray-emeritus_approvers:
|
||||||
- riverzhang
|
- riverzhang
|
||||||
- atoms
|
- atoms
|
||||||
|
|||||||
131
README.md
131
README.md
@@ -13,7 +13,7 @@ You can get your invite [here](http://slack.k8s.io/)
|
|||||||
|
|
||||||
## Quick Start
|
## Quick Start
|
||||||
|
|
||||||
Below are several ways to use Kubespray to deploy a Kubernetes cluster.
|
To deploy the cluster you can use :
|
||||||
|
|
||||||
### Ansible
|
### Ansible
|
||||||
|
|
||||||
@@ -34,13 +34,6 @@ CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inv
|
|||||||
cat inventory/mycluster/group_vars/all/all.yml
|
cat inventory/mycluster/group_vars/all/all.yml
|
||||||
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
||||||
|
|
||||||
# Clean up old Kubernetes cluster with Ansible Playbook - run the playbook as root
|
|
||||||
# The option `--become` is required, as for example cleaning up SSL keys in /etc/,
|
|
||||||
# uninstalling old packages and interacting with various systemd daemons.
|
|
||||||
# Without --become the playbook will fail to run!
|
|
||||||
# And be mind it will remove the current kubernetes cluster (if it's running)!
|
|
||||||
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root reset.yml
|
|
||||||
|
|
||||||
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
||||||
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
||||||
# installing packages and interacting with various systemd daemons.
|
# installing packages and interacting with various systemd daemons.
|
||||||
@@ -48,50 +41,34 @@ ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root
|
|||||||
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml
|
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
Note: When Ansible is already installed via system packages on the control node,
|
Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu).
|
||||||
Python packages installed via `sudo pip install -r requirements.txt` will go to
|
As a consequence, `ansible-playbook` command will fail with:
|
||||||
a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on
|
|
||||||
Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on
|
|
||||||
Ubuntu). As a consequence, the `ansible-playbook` command will fail with:
|
|
||||||
|
|
||||||
```raw
|
```raw
|
||||||
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
|
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
|
||||||
```
|
```
|
||||||
|
|
||||||
This likely indicates that a task depends on a module present in ``requirements.txt``.
|
probably pointing on a task depending on a module present in requirements.txt.
|
||||||
|
|
||||||
One way of addressing this is to uninstall the system Ansible package then
|
One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible.
|
||||||
reinstall Ansible via ``pip``, but this not always possible and one must
|
A workaround consists of setting `ANSIBLE_LIBRARY` and `ANSIBLE_MODULE_UTILS` environment variables respectively to the `ansible/modules` and `ansible/module_utils` subdirectories of pip packages installation location, which can be found in the Location field of the output of `pip show [package]` before executing `ansible-playbook`.
|
||||||
take care regarding package versions.
|
|
||||||
A workaround consists of setting the `ANSIBLE_LIBRARY`
|
|
||||||
and `ANSIBLE_MODULE_UTILS` environment variables respectively to
|
|
||||||
the `ansible/modules` and `ansible/module_utils` subdirectories of the ``pip``
|
|
||||||
installation location, which is the ``Location`` shown by running
|
|
||||||
`pip show [package]` before executing `ansible-playbook`.
|
|
||||||
|
|
||||||
A simple way to ensure you get all the correct version of Ansible is to use
|
A simple way to ensure you get all the correct version of Ansible is to use the [pre-built docker image from Quay](https://quay.io/repository/kubespray/kubespray?tab=tags).
|
||||||
the [pre-built docker image from Quay](https://quay.io/repository/kubespray/kubespray?tab=tags).
|
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
|
||||||
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/)
|
|
||||||
to access the inventory and SSH key in the container, like this:
|
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
git checkout v2.23.2
|
docker pull quay.io/kubespray/kubespray:v2.19.0
|
||||||
docker pull quay.io/kubespray/kubespray:v2.23.2
|
|
||||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||||
quay.io/kubespray/kubespray:v2.23.2 bash
|
quay.io/kubespray/kubespray:v2.19.0 bash
|
||||||
# Inside the container you may now run the kubespray playbooks:
|
# Inside the container you may now run the kubespray playbooks:
|
||||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Collection
|
|
||||||
|
|
||||||
See [here](docs/ansible_collection.md) if you wish to use this repository as an Ansible collection
|
|
||||||
|
|
||||||
### Vagrant
|
### Vagrant
|
||||||
|
|
||||||
For Vagrant we need to install Python dependencies for provisioning tasks.
|
For Vagrant we need to install python dependencies for provisioning tasks.
|
||||||
Check that ``Python`` and ``pip`` are installed:
|
Check if Python and pip are installed:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
python -V && pip -V
|
python -V && pip -V
|
||||||
@@ -136,54 +113,52 @@ vagrant up
|
|||||||
- [Air-Gap installation](docs/offline-environment.md)
|
- [Air-Gap installation](docs/offline-environment.md)
|
||||||
- [NTP](docs/ntp.md)
|
- [NTP](docs/ntp.md)
|
||||||
- [Hardening](docs/hardening.md)
|
- [Hardening](docs/hardening.md)
|
||||||
- [Mirror](docs/mirror.md)
|
|
||||||
- [Roadmap](docs/roadmap.md)
|
- [Roadmap](docs/roadmap.md)
|
||||||
|
|
||||||
## Supported Linux Distributions
|
## Supported Linux Distributions
|
||||||
|
|
||||||
- **Flatcar Container Linux by Kinvolk**
|
- **Flatcar Container Linux by Kinvolk**
|
||||||
- **Debian** Bookworm, Bullseye, Buster
|
- **Debian** Bullseye, Buster, Jessie, Stretch
|
||||||
- **Ubuntu** 20.04, 22.04
|
- **Ubuntu** 16.04, 18.04, 20.04, 22.04
|
||||||
- **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8)
|
- **CentOS/RHEL** 7, [8](docs/centos.md#centos-8)
|
||||||
- **Fedora** 37, 38
|
- **Fedora** 35, 36
|
||||||
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
||||||
- **openSUSE** Leap 15.x/Tumbleweed
|
- **openSUSE** Leap 15.x/Tumbleweed
|
||||||
- **Oracle Linux** 7, [8, 9](docs/centos.md#centos-8)
|
- **Oracle Linux** 7, [8](docs/centos.md#centos-8)
|
||||||
- **Alma Linux** [8, 9](docs/centos.md#centos-8)
|
- **Alma Linux** [8](docs/centos.md#centos-8)
|
||||||
- **Rocky Linux** [8, 9](docs/centos.md#centos-8)
|
- **Rocky Linux** [8](docs/centos.md#centos-8)
|
||||||
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md))
|
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md))
|
||||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
|
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
|
||||||
- **UOS Linux** (experimental: see [uos linux notes](docs/uoslinux.md))
|
|
||||||
- **openEuler** (experimental: see [openEuler notes](docs/openeuler.md))
|
|
||||||
|
|
||||||
Note: Upstart/SysV init based OS types are not supported.
|
Note: Upstart/SysV init based OS types are not supported.
|
||||||
|
|
||||||
## Supported Components
|
## Supported Components
|
||||||
|
|
||||||
- Core
|
- Core
|
||||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.27.10
|
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.24.3
|
||||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.10
|
- [etcd](https://github.com/etcd-io/etcd) v3.5.4
|
||||||
- [docker](https://www.docker.com/) v20.10 (see note)
|
- [docker](https://www.docker.com/) v20.10 (see note)
|
||||||
- [containerd](https://containerd.io/) v1.7.13
|
- [containerd](https://containerd.io/) v1.6.6
|
||||||
- [cri-o](http://cri-o.io/) v1.27 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
- [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||||
- Network Plugin
|
- Network Plugin
|
||||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.2.0
|
- [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1
|
||||||
- [calico](https://github.com/projectcalico/calico) v3.25.2
|
- [calico](https://github.com/projectcalico/calico) v3.23.3
|
||||||
- [cilium](https://github.com/cilium/cilium) v1.13.4
|
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||||
- [flannel](https://github.com/flannel-io/flannel) v0.22.0
|
- [cilium](https://github.com/cilium/cilium) v1.11.7
|
||||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.11.5
|
- [flannel](https://github.com/flannel-io/flannel) v0.18.1
|
||||||
|
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.9.7
|
||||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
|
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
|
||||||
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) v3.8
|
- [multus](https://github.com/intel/multus-cni) v3.8
|
||||||
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
||||||
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.5.12
|
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.4.2
|
||||||
- Application
|
- Application
|
||||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.11.1
|
- [cert-manager](https://github.com/jetstack/cert-manager) v1.9.0
|
||||||
- [coredns](https://github.com/coredns/coredns) v1.10.1
|
- [coredns](https://github.com/coredns/coredns) v1.8.6
|
||||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.8.1
|
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.3.0
|
||||||
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.4
|
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.3
|
||||||
- [argocd](https://argoproj.github.io/) v2.8.0
|
- [argocd](https://argoproj.github.io/) v2.4.7
|
||||||
- [helm](https://helm.sh/) v3.12.3
|
- [helm](https://helm.sh/) v3.9.2
|
||||||
- [metallb](https://metallb.universe.tf/) v0.13.9
|
- [metallb](https://metallb.universe.tf/) v0.12.1
|
||||||
- [registry](https://github.com/distribution/distribution) v2.8.1
|
- [registry](https://github.com/distribution/distribution) v2.8.1
|
||||||
- Storage Plugin
|
- Storage Plugin
|
||||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||||
@@ -191,30 +166,30 @@ Note: Upstart/SysV init based OS types are not supported.
|
|||||||
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
|
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
|
||||||
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
|
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
|
||||||
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0
|
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0
|
||||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.9.2
|
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.4.0
|
||||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.24
|
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.22
|
||||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
|
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.4.0
|
||||||
|
|
||||||
## Container Runtime Notes
|
## Container Runtime Notes
|
||||||
|
|
||||||
- Supported Docker versions are 18.09, 19.03, 20.10, 23.0 and 24.0. The *recommended* Docker version is 20.10 (except on Debian bookworm which without supporting for 20.10 and below any more). `Kubelet` might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. the YUM ``versionlock`` plugin or ``apt pin``).
|
- The list of available docker version is 18.09, 19.03 and 20.10. The recommended docker version is 20.10. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||||
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
|
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
- **Minimum required version of Kubernetes is v1.25**
|
- **Minimum required version of Kubernetes is v1.22**
|
||||||
- **Ansible v2.14+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
- **Ansible v2.11+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
||||||
- The target servers are configured to allow **IPv4 forwarding**.
|
- The target servers are configured to allow **IPv4 forwarding**.
|
||||||
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
|
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
|
||||||
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||||
in order to avoid any issue during deployment you should disable your firewall.
|
in order to avoid any issue during deployment you should disable your firewall.
|
||||||
- If kubespray is run from non-root user account, correct privilege escalation method
|
- If kubespray is ran from non-root user account, correct privilege escalation method
|
||||||
should be configured in the target servers. Then the `ansible_become` flag
|
should be configured in the target servers. Then the `ansible_become` flag
|
||||||
or command parameters `--become or -b` should be specified.
|
or command parameters `--become or -b` should be specified.
|
||||||
|
|
||||||
Hardware:
|
Hardware:
|
||||||
These limits are safeguarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide.
|
These limits are safe guarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide.
|
||||||
|
|
||||||
- Master
|
- Master
|
||||||
- Memory: 1500 MB
|
- Memory: 1500 MB
|
||||||
@@ -223,15 +198,17 @@ These limits are safeguarded by Kubespray. Actual requirements for your workload
|
|||||||
|
|
||||||
## Network Plugins
|
## Network Plugins
|
||||||
|
|
||||||
You can choose among ten network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
You can choose between 10 network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
||||||
|
|
||||||
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||||
|
|
||||||
- [Calico](https://docs.tigera.io/calico/latest/about/) is a networking and network policy provider. Calico supports a flexible set of networking options
|
- [Calico](https://docs.projectcalico.org/latest/introduction/) is a networking and network policy provider. Calico supports a flexible set of networking options
|
||||||
designed to give you the most efficient networking across a range of situations, including non-overlay
|
designed to give you the most efficient networking across a range of situations, including non-overlay
|
||||||
and overlay networks, with or without BGP. Calico uses the same engine to enforce network policy for hosts,
|
and overlay networks, with or without BGP. Calico uses the same engine to enforce network policy for hosts,
|
||||||
pods, and (if using Istio and Envoy) applications at the service mesh layer.
|
pods, and (if using Istio and Envoy) applications at the service mesh layer.
|
||||||
|
|
||||||
|
- [canal](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
|
||||||
|
|
||||||
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
||||||
|
|
||||||
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
||||||
@@ -248,10 +225,7 @@ You can choose among ten network plugins. (default: `calico`, except Vagrant use
|
|||||||
|
|
||||||
- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
|
- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
|
||||||
|
|
||||||
- [custom_cni](roles/network-plugin/custom_cni/) : You can specify some manifests that will be applied to the clusters to bring you own CNI and use non-supported ones by Kubespray.
|
The choice is defined with the variable `kube_network_plugin`. There is also an
|
||||||
See `tests/files/custom_cni/README.md` and `tests/files/custom_cni/values.yaml`for an example with a CNI provided by a Helm Chart.
|
|
||||||
|
|
||||||
The network plugin to use is defined by the variable `kube_network_plugin`. There is also an
|
|
||||||
option to leverage built-in cloud provider networking instead.
|
option to leverage built-in cloud provider networking instead.
|
||||||
See also [Network checker](docs/netcheck.md).
|
See also [Network checker](docs/netcheck.md).
|
||||||
|
|
||||||
@@ -272,11 +246,10 @@ See also [Network checker](docs/netcheck.md).
|
|||||||
|
|
||||||
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst)
|
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst)
|
||||||
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
|
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
|
||||||
- [Kubean](https://github.com/kubean-io/kubean)
|
|
||||||
|
|
||||||
## CI Tests
|
## CI Tests
|
||||||
|
|
||||||
[](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/-/pipelines)
|
[](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines)
|
||||||
|
|
||||||
CI/end-to-end tests sponsored by: [CNCF](https://cncf.io), [Equinix Metal](https://metal.equinix.com/), [OVHcloud](https://www.ovhcloud.com/), [ELASTX](https://elastx.se/).
|
CI/end-to-end tests sponsored by: [CNCF](https://cncf.io), [Equinix Metal](https://metal.equinix.com/), [OVHcloud](https://www.ovhcloud.com/), [ELASTX](https://elastx.se/).
|
||||||
|
|
||||||
|
|||||||
@@ -60,7 +60,7 @@ release-notes --start-sha <The start commit-id> --end-sha <The end commit-id> --
|
|||||||
```
|
```
|
||||||
|
|
||||||
If the release note file(/tmp/kubespray-release-note) contains "### Uncategorized" pull requests, those pull requests don't have a valid kind label(`kind/feature`, etc.).
|
If the release note file(/tmp/kubespray-release-note) contains "### Uncategorized" pull requests, those pull requests don't have a valid kind label(`kind/feature`, etc.).
|
||||||
It is necessary to put a valid label on each pull request and run the above release-notes command again to get a better release note
|
It is necessary to put a valid label on each pull request and run the above release-notes command again to get a better release note)
|
||||||
|
|
||||||
## Container image creation
|
## Container image creation
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,5 @@
|
|||||||
#
|
#
|
||||||
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||||
# INSTRUCTIONS AT https://kubernetes.io/security/
|
# INSTRUCTIONS AT https://kubernetes.io/security/
|
||||||
|
atoms
|
||||||
mattymo
|
mattymo
|
||||||
floryut
|
|
||||||
oomichi
|
|
||||||
cristicalin
|
|
||||||
|
|||||||
42
Vagrantfile
vendored
42
Vagrantfile
vendored
@@ -19,8 +19,9 @@ SUPPORTED_OS = {
|
|||||||
"flatcar-beta" => {box: "flatcar-beta", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["beta"]},
|
"flatcar-beta" => {box: "flatcar-beta", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["beta"]},
|
||||||
"flatcar-alpha" => {box: "flatcar-alpha", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["alpha"]},
|
"flatcar-alpha" => {box: "flatcar-alpha", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["alpha"]},
|
||||||
"flatcar-edge" => {box: "flatcar-edge", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["edge"]},
|
"flatcar-edge" => {box: "flatcar-edge", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["edge"]},
|
||||||
|
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
|
||||||
|
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
|
||||||
"ubuntu2004" => {box: "generic/ubuntu2004", user: "vagrant"},
|
"ubuntu2004" => {box: "generic/ubuntu2004", user: "vagrant"},
|
||||||
"ubuntu2204" => {box: "generic/ubuntu2204", user: "vagrant"},
|
|
||||||
"centos" => {box: "centos/7", user: "vagrant"},
|
"centos" => {box: "centos/7", user: "vagrant"},
|
||||||
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
||||||
"centos8" => {box: "centos/8", user: "vagrant"},
|
"centos8" => {box: "centos/8", user: "vagrant"},
|
||||||
@@ -28,9 +29,9 @@ SUPPORTED_OS = {
|
|||||||
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
||||||
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
||||||
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"},
|
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"},
|
||||||
"fedora37" => {box: "fedora/37-cloud-base", user: "vagrant"},
|
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"},
|
||||||
"fedora38" => {box: "fedora/38-cloud-base", user: "vagrant"},
|
"fedora36" => {box: "fedora/36-cloud-base", user: "vagrant"},
|
||||||
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
|
"opensuse" => {box: "opensuse/Leap-15.3.x86_64", user: "vagrant"},
|
||||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||||
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
||||||
@@ -52,16 +53,16 @@ $shared_folders ||= {}
|
|||||||
$forwarded_ports ||= {}
|
$forwarded_ports ||= {}
|
||||||
$subnet ||= "172.18.8"
|
$subnet ||= "172.18.8"
|
||||||
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
|
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
|
||||||
$os ||= "ubuntu2004"
|
$os ||= "ubuntu1804"
|
||||||
$network_plugin ||= "flannel"
|
$network_plugin ||= "flannel"
|
||||||
# Setting multi_networking to true will install Multus: https://github.com/k8snetworkplumbingwg/multus-cni
|
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
|
||||||
$multi_networking ||= "False"
|
$multi_networking ||= "False"
|
||||||
$download_run_once ||= "True"
|
$download_run_once ||= "True"
|
||||||
$download_force_cache ||= "False"
|
$download_force_cache ||= "False"
|
||||||
# The first three nodes are etcd servers
|
# The first three nodes are etcd servers
|
||||||
$etcd_instances ||= [$num_instances, 3].min
|
$etcd_instances ||= $num_instances
|
||||||
# The first two nodes are kube masters
|
# The first two nodes are kube masters
|
||||||
$kube_master_instances ||= [$num_instances, 2].min
|
$kube_master_instances ||= $num_instances == 1 ? $num_instances : ($num_instances - 1)
|
||||||
# All nodes are kube nodes
|
# All nodes are kube nodes
|
||||||
$kube_node_instances ||= $num_instances
|
$kube_node_instances ||= $num_instances
|
||||||
# The following only works when using the libvirt provider
|
# The following only works when using the libvirt provider
|
||||||
@@ -81,13 +82,6 @@ $playbook ||= "cluster.yml"
|
|||||||
|
|
||||||
host_vars = {}
|
host_vars = {}
|
||||||
|
|
||||||
# throw error if os is not supported
|
|
||||||
if ! SUPPORTED_OS.key?($os)
|
|
||||||
puts "Unsupported OS: #{$os}"
|
|
||||||
puts "Supported OS are: #{SUPPORTED_OS.keys.join(', ')}"
|
|
||||||
exit 1
|
|
||||||
end
|
|
||||||
|
|
||||||
$box = SUPPORTED_OS[$os][:box]
|
$box = SUPPORTED_OS[$os][:box]
|
||||||
# if $inventory is not set, try to use example
|
# if $inventory is not set, try to use example
|
||||||
$inventory = "inventory/sample" if ! $inventory
|
$inventory = "inventory/sample" if ! $inventory
|
||||||
@@ -207,8 +201,7 @@ Vagrant.configure("2") do |config|
|
|||||||
end
|
end
|
||||||
|
|
||||||
ip = "#{$subnet}.#{i+100}"
|
ip = "#{$subnet}.#{i+100}"
|
||||||
node.vm.network :private_network,
|
node.vm.network :private_network, ip: ip,
|
||||||
:ip => ip,
|
|
||||||
:libvirt__guest_ipv6 => 'yes',
|
:libvirt__guest_ipv6 => 'yes',
|
||||||
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
|
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
|
||||||
:libvirt__ipv6_prefix => "64",
|
:libvirt__ipv6_prefix => "64",
|
||||||
@@ -218,22 +211,14 @@ Vagrant.configure("2") do |config|
|
|||||||
# Disable swap for each vm
|
# Disable swap for each vm
|
||||||
node.vm.provision "shell", inline: "swapoff -a"
|
node.vm.provision "shell", inline: "swapoff -a"
|
||||||
|
|
||||||
# ubuntu2004 and ubuntu2204 have IPv6 explicitly disabled. This undoes that.
|
# ubuntu1804 and ubuntu2004 have IPv6 explicitly disabled. This undoes that.
|
||||||
if ["ubuntu2004", "ubuntu2204"].include? $os
|
if ["ubuntu1804", "ubuntu2004"].include? $os
|
||||||
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
|
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
|
||||||
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
|
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
|
||||||
end
|
end
|
||||||
# Hack for fedora37/38 to get the IP address of the second interface
|
|
||||||
if ["fedora37", "fedora38"].include? $os
|
|
||||||
config.vm.provision "shell", inline: <<-SHELL
|
|
||||||
nmcli conn modify 'Wired connection 2' ipv4.addresses $(cat /etc/sysconfig/network-scripts/ifcfg-eth1 | grep IPADDR | cut -d "=" -f2)
|
|
||||||
nmcli conn modify 'Wired connection 2' ipv4.method manual
|
|
||||||
service NetworkManager restart
|
|
||||||
SHELL
|
|
||||||
end
|
|
||||||
|
|
||||||
# Disable firewalld on oraclelinux/redhat vms
|
# Disable firewalld on oraclelinux/redhat vms
|
||||||
if ["oraclelinux","oraclelinux8","rhel7","rhel8","rockylinux8"].include? $os
|
if ["oraclelinux","oraclelinux8","rhel7","rhel8"].include? $os
|
||||||
node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
|
node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
|
||||||
end
|
end
|
||||||
|
|
||||||
@@ -263,7 +248,6 @@ Vagrant.configure("2") do |config|
|
|||||||
if i == $num_instances
|
if i == $num_instances
|
||||||
node.vm.provision "ansible" do |ansible|
|
node.vm.provision "ansible" do |ansible|
|
||||||
ansible.playbook = $playbook
|
ansible.playbook = $playbook
|
||||||
ansible.compatibility_mode = "2.0"
|
|
||||||
ansible.verbose = $ansible_verbosity
|
ansible.verbose = $ansible_verbosity
|
||||||
$ansible_inventory_path = File.join( $inventory, "hosts.ini")
|
$ansible_inventory_path = File.join( $inventory, "hosts.ini")
|
||||||
if File.exist?($ansible_inventory_path)
|
if File.exist?($ansible_inventory_path)
|
||||||
|
|||||||
@@ -1,17 +1,16 @@
|
|||||||
---
|
---
|
||||||
- name: Check Ansible version
|
- hosts: localhost
|
||||||
hosts: localhost
|
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
become: no
|
become: no
|
||||||
vars:
|
vars:
|
||||||
minimal_ansible_version: 2.14.0
|
minimal_ansible_version: 2.11.0
|
||||||
maximal_ansible_version: 2.15.0
|
maximal_ansible_version: 2.13.0
|
||||||
ansible_connection: local
|
ansible_connection: local
|
||||||
tags: always
|
tags: always
|
||||||
tasks:
|
tasks:
|
||||||
- name: "Check {{ minimal_ansible_version }} <= Ansible version < {{ maximal_ansible_version }}"
|
- name: "Check {{ minimal_ansible_version }} <= Ansible version < {{ maximal_ansible_version }}"
|
||||||
assert:
|
assert:
|
||||||
msg: "Ansible must be between {{ minimal_ansible_version }} and {{ maximal_ansible_version }} exclusive - you have {{ ansible_version.string }}"
|
msg: "Ansible must be between {{ minimal_ansible_version }} and {{ maximal_ansible_version }} exclusive"
|
||||||
that:
|
that:
|
||||||
- ansible_version.string is version(minimal_ansible_version, ">=")
|
- ansible_version.string is version(minimal_ansible_version, ">=")
|
||||||
- ansible_version.string is version(maximal_ansible_version, "<")
|
- ansible_version.string is version(maximal_ansible_version, "<")
|
||||||
129
cluster.yml
129
cluster.yml
@@ -1,3 +1,128 @@
|
|||||||
---
|
---
|
||||||
- name: Install Kubernetes
|
- name: Check ansible version
|
||||||
ansible.builtin.import_playbook: playbooks/cluster.yml
|
import_playbook: ansible_version.yml
|
||||||
|
|
||||||
|
- name: Ensure compatibility with old groups
|
||||||
|
import_playbook: legacy_groups.yml
|
||||||
|
|
||||||
|
- hosts: bastion[0]
|
||||||
|
gather_facts: False
|
||||||
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults }
|
||||||
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
|
||||||
|
|
||||||
|
- hosts: k8s_cluster:etcd
|
||||||
|
strategy: linear
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
gather_facts: false
|
||||||
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults }
|
||||||
|
- { role: bootstrap-os, tags: bootstrap-os}
|
||||||
|
|
||||||
|
- name: Gather facts
|
||||||
|
tags: always
|
||||||
|
import_playbook: facts.yml
|
||||||
|
|
||||||
|
- hosts: k8s_cluster:etcd
|
||||||
|
gather_facts: False
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults }
|
||||||
|
- { role: kubernetes/preinstall, tags: preinstall }
|
||||||
|
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
|
||||||
|
- { role: download, tags: download, when: "not skip_downloads" }
|
||||||
|
|
||||||
|
- hosts: etcd
|
||||||
|
gather_facts: False
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults }
|
||||||
|
- role: etcd
|
||||||
|
tags: etcd
|
||||||
|
vars:
|
||||||
|
etcd_cluster_setup: true
|
||||||
|
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
|
||||||
|
when: etcd_deployment_type != "kubeadm"
|
||||||
|
|
||||||
|
- hosts: k8s_cluster
|
||||||
|
gather_facts: False
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults }
|
||||||
|
- role: etcd
|
||||||
|
tags: etcd
|
||||||
|
vars:
|
||||||
|
etcd_cluster_setup: false
|
||||||
|
etcd_events_cluster_setup: false
|
||||||
|
when: etcd_deployment_type != "kubeadm"
|
||||||
|
|
||||||
|
- hosts: k8s_cluster
|
||||||
|
gather_facts: False
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults }
|
||||||
|
- { role: kubernetes/node, tags: node }
|
||||||
|
|
||||||
|
- hosts: kube_control_plane
|
||||||
|
gather_facts: False
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults }
|
||||||
|
- { role: kubernetes/control-plane, tags: master }
|
||||||
|
- { role: kubernetes/client, tags: client }
|
||||||
|
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
||||||
|
|
||||||
|
- hosts: k8s_cluster
|
||||||
|
gather_facts: False
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults }
|
||||||
|
- { role: kubernetes/kubeadm, tags: kubeadm}
|
||||||
|
- { role: kubernetes/node-label, tags: node-label }
|
||||||
|
- { role: network_plugin, tags: network }
|
||||||
|
|
||||||
|
- hosts: calico_rr
|
||||||
|
gather_facts: False
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults }
|
||||||
|
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
|
||||||
|
|
||||||
|
- hosts: kube_control_plane[0]
|
||||||
|
gather_facts: False
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults }
|
||||||
|
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
|
||||||
|
|
||||||
|
- hosts: kube_control_plane
|
||||||
|
gather_facts: False
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults }
|
||||||
|
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
|
||||||
|
- { role: kubernetes-apps/network_plugin, tags: network }
|
||||||
|
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
||||||
|
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
||||||
|
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
||||||
|
- { role: kubernetes-apps, tags: apps }
|
||||||
|
|
||||||
|
- name: Apply resolv.conf changes now that cluster DNS is up
|
||||||
|
hosts: k8s_cluster
|
||||||
|
gather_facts: False
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults }
|
||||||
|
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ class SearchEC2Tags(object):
|
|||||||
hosts[group] = []
|
hosts[group] = []
|
||||||
tag_key = "kubespray-role"
|
tag_key = "kubespray-role"
|
||||||
tag_value = ["*"+group+"*"]
|
tag_value = ["*"+group+"*"]
|
||||||
region = os.environ['AWS_REGION']
|
region = os.environ['REGION']
|
||||||
|
|
||||||
ec2 = boto3.resource('ec2', region)
|
ec2 = boto3.resource('ec2', region)
|
||||||
filters = [{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}]
|
filters = [{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}]
|
||||||
@@ -67,11 +67,6 @@ class SearchEC2Tags(object):
|
|||||||
if node_labels_tag:
|
if node_labels_tag:
|
||||||
ansible_host['node_labels'] = dict([ label.strip().split('=') for label in node_labels_tag[0]['Value'].split(',') ])
|
ansible_host['node_labels'] = dict([ label.strip().split('=') for label in node_labels_tag[0]['Value'].split(',') ])
|
||||||
|
|
||||||
##Set when instance actually has node_taints
|
|
||||||
node_taints_tag = list(filter(lambda t: t['Key'] == 'kubespray-node-taints', instance.tags))
|
|
||||||
if node_taints_tag:
|
|
||||||
ansible_host['node_taints'] = list([ taint.strip() for taint in node_taints_tag[0]['Value'].split(',') ])
|
|
||||||
|
|
||||||
hosts[group].append(dns_name)
|
hosts[group].append(dns_name)
|
||||||
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
||||||
|
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
boto3 # Apache-2.0
|
boto3 # Apache-2.0
|
||||||
2
contrib/azurerm/.gitignore
vendored
2
contrib/azurerm/.gitignore
vendored
@@ -1,2 +1,2 @@
|
|||||||
.generated
|
.generated
|
||||||
/inventory
|
/inventory
|
||||||
@@ -1,6 +1,5 @@
|
|||||||
---
|
---
|
||||||
- name: Generate Azure inventory
|
- hosts: localhost
|
||||||
hosts: localhost
|
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- generate-inventory
|
- generate-inventory
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
---
|
---
|
||||||
- name: Generate Azure inventory
|
- hosts: localhost
|
||||||
hosts: localhost
|
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- generate-inventory_2
|
- generate-inventory_2
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
---
|
---
|
||||||
- name: Generate Azure templates
|
- hosts: localhost
|
||||||
hosts: localhost
|
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- generate-templates
|
- generate-templates
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
- name: Query Azure VMs
|
- name: Query Azure VMs # noqa 301
|
||||||
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
||||||
register: vm_list_cmd
|
register: vm_list_cmd
|
||||||
|
|
||||||
|
|||||||
@@ -1,14 +1,14 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
- name: Query Azure VMs IPs
|
- name: Query Azure VMs IPs # noqa 301
|
||||||
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
|
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
|
||||||
register: vm_ip_list_cmd
|
register: vm_ip_list_cmd
|
||||||
|
|
||||||
- name: Query Azure VMs Roles
|
- name: Query Azure VMs Roles # noqa 301
|
||||||
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
||||||
register: vm_list_cmd
|
register: vm_list_cmd
|
||||||
|
|
||||||
- name: Query Azure Load Balancer Public IP
|
- name: Query Azure Load Balancer Public IP # noqa 301
|
||||||
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
|
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
|
||||||
register: lb_pubip_cmd
|
register: lb_pubip_cmd
|
||||||
|
|
||||||
|
|||||||
@@ -31,3 +31,4 @@
|
|||||||
[k8s_cluster:children]
|
[k8s_cluster:children]
|
||||||
kube_node
|
kube_node
|
||||||
kube_control_plane
|
kube_control_plane
|
||||||
|
|
||||||
|
|||||||
@@ -24,14 +24,14 @@ bastionIPAddressName: bastion-pubip
|
|||||||
|
|
||||||
disablePasswordAuthentication: true
|
disablePasswordAuthentication: true
|
||||||
|
|
||||||
sshKeyPath: "/home/{{ admin_username }}/.ssh/authorized_keys"
|
sshKeyPath: "/home/{{admin_username}}/.ssh/authorized_keys"
|
||||||
|
|
||||||
imageReference:
|
imageReference:
|
||||||
publisher: "OpenLogic"
|
publisher: "OpenLogic"
|
||||||
offer: "CentOS"
|
offer: "CentOS"
|
||||||
sku: "7.5"
|
sku: "7.5"
|
||||||
version: "latest"
|
version: "latest"
|
||||||
imageReferenceJson: "{{ imageReference | to_json }}"
|
imageReferenceJson: "{{imageReference|to_json}}"
|
||||||
|
|
||||||
storageAccountName: "sa{{ nameSuffix | replace('-', '') }}"
|
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
|
||||||
storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"
|
storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"
|
||||||
|
|||||||
@@ -27,4 +27,4 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@@ -103,4 +103,4 @@
|
|||||||
}
|
}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@@ -5,4 +5,4 @@
|
|||||||
"variables": {},
|
"variables": {},
|
||||||
"resources": [],
|
"resources": [],
|
||||||
"outputs": {}
|
"outputs": {}
|
||||||
}
|
}
|
||||||
@@ -16,4 +16,4 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@@ -1,11 +1,9 @@
|
|||||||
---
|
---
|
||||||
- name: Create nodes as docker containers
|
- hosts: localhost
|
||||||
hosts: localhost
|
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- { role: dind-host }
|
- { role: dind-host }
|
||||||
|
|
||||||
- name: Customize each node containers
|
- hosts: containers
|
||||||
hosts: containers
|
|
||||||
roles:
|
roles:
|
||||||
- { role: dind-cluster }
|
- { role: dind-cluster }
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
---
|
---
|
||||||
- name: Set_fact distro_setup
|
- name: set_fact distro_setup
|
||||||
set_fact:
|
set_fact:
|
||||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||||
|
|
||||||
- name: Set_fact other distro settings
|
- name: set_fact other distro settings
|
||||||
set_fact:
|
set_fact:
|
||||||
distro_user: "{{ distro_setup['user'] }}"
|
distro_user: "{{ distro_setup['user'] }}"
|
||||||
distro_ssh_service: "{{ distro_setup['ssh_service'] }}"
|
distro_ssh_service: "{{ distro_setup['ssh_service'] }}"
|
||||||
@@ -43,7 +43,7 @@
|
|||||||
package:
|
package:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: present
|
state: present
|
||||||
with_items: "{{ distro_extra_packages + ['rsyslog', 'openssh-server'] }}"
|
with_items: "{{ distro_extra_packages }} + [ 'rsyslog', 'openssh-server' ]"
|
||||||
|
|
||||||
- name: Start needed services
|
- name: Start needed services
|
||||||
service:
|
service:
|
||||||
@@ -66,8 +66,8 @@
|
|||||||
dest: "/etc/sudoers.d/{{ distro_user }}"
|
dest: "/etc/sudoers.d/{{ distro_user }}"
|
||||||
mode: 0640
|
mode: 0640
|
||||||
|
|
||||||
- name: "Add my pubkey to {{ distro_user }} user authorized keys"
|
- name: Add my pubkey to "{{ distro_user }}" user authorized keys
|
||||||
ansible.posix.authorized_key:
|
authorized_key:
|
||||||
user: "{{ distro_user }}"
|
user: "{{ distro_user }}"
|
||||||
state: present
|
state: present
|
||||||
key: "{{ lookup('file', lookup('env', 'HOME') + '/.ssh/id_rsa.pub') }}"
|
key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}"
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
---
|
---
|
||||||
- name: Set_fact distro_setup
|
- name: set_fact distro_setup
|
||||||
set_fact:
|
set_fact:
|
||||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||||
|
|
||||||
- name: Set_fact other distro settings
|
- name: set_fact other distro settings
|
||||||
set_fact:
|
set_fact:
|
||||||
distro_image: "{{ distro_setup['image'] }}"
|
distro_image: "{{ distro_setup['image'] }}"
|
||||||
distro_init: "{{ distro_setup['init'] }}"
|
distro_init: "{{ distro_setup['init'] }}"
|
||||||
@@ -13,7 +13,7 @@
|
|||||||
distro_agetty_svc: "{{ distro_setup['agetty_svc'] }}"
|
distro_agetty_svc: "{{ distro_setup['agetty_svc'] }}"
|
||||||
|
|
||||||
- name: Create dind node containers from "containers" inventory section
|
- name: Create dind node containers from "containers" inventory section
|
||||||
community.docker.docker_container:
|
docker_container:
|
||||||
image: "{{ distro_image }}"
|
image: "{{ distro_image }}"
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: started
|
state: started
|
||||||
@@ -53,7 +53,7 @@
|
|||||||
{{ distro_raw_setup_done }} && echo SKIPPED && exit 0
|
{{ distro_raw_setup_done }} && echo SKIPPED && exit 0
|
||||||
until [ "$(readlink /proc/1/exe)" = "{{ distro_pid1_exe }}" ] ; do sleep 1; done
|
until [ "$(readlink /proc/1/exe)" = "{{ distro_pid1_exe }}" ] ; do sleep 1; done
|
||||||
{{ distro_raw_setup }}
|
{{ distro_raw_setup }}
|
||||||
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||||
with_items: "{{ containers.results }}"
|
with_items: "{{ containers.results }}"
|
||||||
register: result
|
register: result
|
||||||
changed_when: result.stdout.find("SKIPPED") < 0
|
changed_when: result.stdout.find("SKIPPED") < 0
|
||||||
@@ -63,25 +63,26 @@
|
|||||||
until test -S /var/run/dbus/system_bus_socket; do sleep 1; done
|
until test -S /var/run/dbus/system_bus_socket; do sleep 1; done
|
||||||
systemctl disable {{ distro_agetty_svc }}
|
systemctl disable {{ distro_agetty_svc }}
|
||||||
systemctl stop {{ distro_agetty_svc }}
|
systemctl stop {{ distro_agetty_svc }}
|
||||||
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||||
with_items: "{{ containers.results }}"
|
with_items: "{{ containers.results }}"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
|
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
|
||||||
# handle manually
|
# handle manually
|
||||||
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
|
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave) # noqa 301
|
||||||
raw: |
|
raw: |
|
||||||
echo {{ item | hash('sha1') }} > /etc/machine-id.new
|
echo {{ item | hash('sha1') }} > /etc/machine-id.new
|
||||||
mv -b /etc/machine-id.new /etc/machine-id
|
mv -b /etc/machine-id.new /etc/machine-id
|
||||||
cmp /etc/machine-id /etc/machine-id~ || true
|
cmp /etc/machine-id /etc/machine-id~ || true
|
||||||
systemctl daemon-reload
|
systemctl daemon-reload
|
||||||
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||||
with_items: "{{ containers.results }}"
|
with_items: "{{ containers.results }}"
|
||||||
|
|
||||||
- name: Early hack image install to adapt for DIND
|
- name: Early hack image install to adapt for DIND
|
||||||
|
# noqa 302 - this task uses the raw module intentionally
|
||||||
raw: |
|
raw: |
|
||||||
rm -fv /usr/bin/udevadm /usr/sbin/udevadm
|
rm -fv /usr/bin/udevadm /usr/sbin/udevadm
|
||||||
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||||
with_items: "{{ containers.results }}"
|
with_items: "{{ containers.results }}"
|
||||||
register: result
|
register: result
|
||||||
changed_when: result.stdout.find("removed") >= 0
|
changed_when: result.stdout.find("removed") >= 0
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
configparser>=3.3.0
|
configparser>=3.3.0
|
||||||
ipaddress
|
|
||||||
ruamel.yaml>=0.15.88
|
ruamel.yaml>=0.15.88
|
||||||
|
ipaddress
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
hacking>=0.10.2
|
hacking>=0.10.2
|
||||||
mock>=1.3.0
|
|
||||||
pytest>=2.8.0
|
pytest>=2.8.0
|
||||||
|
mock>=1.3.0
|
||||||
|
|||||||
@@ -13,7 +13,7 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import inventory
|
import inventory
|
||||||
from io import StringIO
|
from test import support
|
||||||
import unittest
|
import unittest
|
||||||
from unittest import mock
|
from unittest import mock
|
||||||
|
|
||||||
@@ -41,7 +41,7 @@ class TestInventoryPrintHostnames(unittest.TestCase):
|
|||||||
'access_ip': '10.90.0.3'}}}})
|
'access_ip': '10.90.0.3'}}}})
|
||||||
with mock.patch('builtins.open', mock_io):
|
with mock.patch('builtins.open', mock_io):
|
||||||
with self.assertRaises(SystemExit) as cm:
|
with self.assertRaises(SystemExit) as cm:
|
||||||
with mock.patch('sys.stdout', new_callable=StringIO) as stdout:
|
with support.captured_stdout() as stdout:
|
||||||
inventory.KubesprayInventory(
|
inventory.KubesprayInventory(
|
||||||
changed_hosts=["print_hostnames"],
|
changed_hosts=["print_hostnames"],
|
||||||
config_file="file")
|
config_file="file")
|
||||||
|
|||||||
@@ -1,27 +1,21 @@
|
|||||||
[tox]
|
[tox]
|
||||||
minversion = 1.6
|
minversion = 1.6
|
||||||
skipsdist = True
|
skipsdist = True
|
||||||
envlist = pep8
|
envlist = pep8, py33
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
allowlist_externals = py.test
|
whitelist_externals = py.test
|
||||||
usedevelop = True
|
usedevelop = True
|
||||||
deps =
|
deps =
|
||||||
-r{toxinidir}/requirements.txt
|
-r{toxinidir}/requirements.txt
|
||||||
-r{toxinidir}/test-requirements.txt
|
-r{toxinidir}/test-requirements.txt
|
||||||
setenv = VIRTUAL_ENV={envdir}
|
setenv = VIRTUAL_ENV={envdir}
|
||||||
passenv =
|
passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
|
||||||
http_proxy
|
|
||||||
HTTP_PROXY
|
|
||||||
https_proxy
|
|
||||||
HTTPS_PROXY
|
|
||||||
no_proxy
|
|
||||||
NO_PROXY
|
|
||||||
commands = pytest -vv #{posargs:./tests}
|
commands = pytest -vv #{posargs:./tests}
|
||||||
|
|
||||||
[testenv:pep8]
|
[testenv:pep8]
|
||||||
usedevelop = False
|
usedevelop = False
|
||||||
allowlist_externals = bash
|
whitelist_externals = bash
|
||||||
commands =
|
commands =
|
||||||
bash -c "find {toxinidir}/* -type f -name '*.py' -print0 | xargs -0 flake8"
|
bash -c "find {toxinidir}/* -type f -name '*.py' -print0 | xargs -0 flake8"
|
||||||
|
|
||||||
|
|||||||
@@ -1,2 +1,3 @@
|
|||||||
#k8s_deployment_user: kubespray
|
#k8s_deployment_user: kubespray
|
||||||
#k8s_deployment_user_pkey_path: /tmp/ssh_rsa
|
#k8s_deployment_user_pkey_path: /tmp/ssh_rsa
|
||||||
|
|
||||||
|
|||||||
@@ -1,9 +1,8 @@
|
|||||||
---
|
---
|
||||||
- name: Prepare Hypervisor to later install kubespray VMs
|
- hosts: localhost
|
||||||
hosts: localhost
|
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
become: yes
|
become: yes
|
||||||
vars:
|
vars:
|
||||||
bootstrap_os: none
|
- bootstrap_os: none
|
||||||
roles:
|
roles:
|
||||||
- { role: kvm-setup }
|
- kvm-setup
|
||||||
|
|||||||
@@ -22,9 +22,9 @@
|
|||||||
- ntp
|
- ntp
|
||||||
when: ansible_os_family == "Debian"
|
when: ansible_os_family == "Debian"
|
||||||
|
|
||||||
- name: Create deployment user if required
|
# Create deployment user if required
|
||||||
include_tasks: user.yml
|
- include: user.yml
|
||||||
when: k8s_deployment_user is defined
|
when: k8s_deployment_user is defined
|
||||||
|
|
||||||
- name: Set proper sysctl values
|
# Set proper sysctl values
|
||||||
import_tasks: sysctl.yml
|
- include: sysctl.yml
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
- name: Load br_netfilter module
|
- name: Load br_netfilter module
|
||||||
community.general.modprobe:
|
modprobe:
|
||||||
name: br_netfilter
|
name: br_netfilter
|
||||||
state: present
|
state: present
|
||||||
register: br_netfilter
|
register: br_netfilter
|
||||||
@@ -25,7 +25,7 @@
|
|||||||
|
|
||||||
|
|
||||||
- name: Enable net.ipv4.ip_forward in sysctl
|
- name: Enable net.ipv4.ip_forward in sysctl
|
||||||
ansible.posix.sysctl:
|
sysctl:
|
||||||
name: net.ipv4.ip_forward
|
name: net.ipv4.ip_forward
|
||||||
value: 1
|
value: 1
|
||||||
sysctl_file: "{{ sysctl_file_path }}"
|
sysctl_file: "{{ sysctl_file_path }}"
|
||||||
@@ -33,7 +33,7 @@
|
|||||||
reload: yes
|
reload: yes
|
||||||
|
|
||||||
- name: Set bridge-nf-call-{arptables,iptables} to 0
|
- name: Set bridge-nf-call-{arptables,iptables} to 0
|
||||||
ansible.posix.sysctl:
|
sysctl:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: present
|
state: present
|
||||||
value: 0
|
value: 0
|
||||||
|
|||||||
@@ -1,9 +1,8 @@
|
|||||||
---
|
---
|
||||||
- name: Check ansible version
|
- name: Check ansible version
|
||||||
import_playbook: kubernetes_sigs.kubespray.ansible_version
|
import_playbook: ansible_version.yml
|
||||||
|
|
||||||
- name: Install mitogen
|
- hosts: localhost
|
||||||
hosts: localhost
|
|
||||||
strategy: linear
|
strategy: linear
|
||||||
vars:
|
vars:
|
||||||
mitogen_version: 0.3.2
|
mitogen_version: 0.3.2
|
||||||
@@ -20,25 +19,24 @@
|
|||||||
- "{{ playbook_dir }}/plugins/mitogen"
|
- "{{ playbook_dir }}/plugins/mitogen"
|
||||||
- "{{ playbook_dir }}/dist"
|
- "{{ playbook_dir }}/dist"
|
||||||
|
|
||||||
- name: Download mitogen release
|
- name: download mitogen release
|
||||||
get_url:
|
get_url:
|
||||||
url: "{{ mitogen_url }}"
|
url: "{{ mitogen_url }}"
|
||||||
dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
||||||
validate_certs: true
|
validate_certs: true
|
||||||
mode: 0644
|
|
||||||
|
|
||||||
- name: Extract archive
|
- name: extract archive
|
||||||
unarchive:
|
unarchive:
|
||||||
src: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
src: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
||||||
dest: "{{ playbook_dir }}/dist/"
|
dest: "{{ playbook_dir }}/dist/"
|
||||||
|
|
||||||
- name: Copy plugin
|
- name: copy plugin
|
||||||
ansible.posix.synchronize:
|
synchronize:
|
||||||
src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/"
|
src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/"
|
||||||
dest: "{{ playbook_dir }}/plugins/mitogen"
|
dest: "{{ playbook_dir }}/plugins/mitogen"
|
||||||
|
|
||||||
- name: Add strategy to ansible.cfg
|
- name: add strategy to ansible.cfg
|
||||||
community.general.ini_file:
|
ini_file:
|
||||||
path: ansible.cfg
|
path: ansible.cfg
|
||||||
mode: 0644
|
mode: 0644
|
||||||
section: "{{ item.section | d('defaults') }}"
|
section: "{{ item.section | d('defaults') }}"
|
||||||
|
|||||||
@@ -1,29 +1,24 @@
|
|||||||
---
|
---
|
||||||
- name: Bootstrap hosts
|
- hosts: gfs-cluster
|
||||||
hosts: gfs-cluster
|
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
vars:
|
vars:
|
||||||
ansible_ssh_pipelining: false
|
ansible_ssh_pipelining: false
|
||||||
roles:
|
roles:
|
||||||
- { role: bootstrap-os, tags: bootstrap-os}
|
- { role: bootstrap-os, tags: bootstrap-os}
|
||||||
|
|
||||||
- name: Gather facts
|
- hosts: all
|
||||||
hosts: all
|
|
||||||
gather_facts: true
|
gather_facts: true
|
||||||
|
|
||||||
- name: Install glusterfs server
|
- hosts: gfs-cluster
|
||||||
hosts: gfs-cluster
|
|
||||||
vars:
|
vars:
|
||||||
ansible_ssh_pipelining: true
|
ansible_ssh_pipelining: true
|
||||||
roles:
|
roles:
|
||||||
- { role: glusterfs/server }
|
- { role: glusterfs/server }
|
||||||
|
|
||||||
- name: Install glusterfs servers
|
- hosts: k8s_cluster
|
||||||
hosts: k8s_cluster
|
|
||||||
roles:
|
roles:
|
||||||
- { role: glusterfs/client }
|
- { role: glusterfs/client }
|
||||||
|
|
||||||
- name: Configure Kubernetes to use glusterfs
|
- hosts: kube_control_plane[0]
|
||||||
hosts: kube_control_plane[0]
|
|
||||||
roles:
|
roles:
|
||||||
- { role: kubernetes-pv }
|
- { role: kubernetes-pv }
|
||||||
|
|||||||
@@ -41,3 +41,4 @@
|
|||||||
|
|
||||||
# [network-storage:children]
|
# [network-storage:children]
|
||||||
# gfs-cluster
|
# gfs-cluster
|
||||||
|
|
||||||
|
|||||||
@@ -6,12 +6,12 @@ galaxy_info:
|
|||||||
description: GlusterFS installation for Linux.
|
description: GlusterFS installation for Linux.
|
||||||
company: "Midwestern Mac, LLC"
|
company: "Midwestern Mac, LLC"
|
||||||
license: "license (BSD, MIT)"
|
license: "license (BSD, MIT)"
|
||||||
min_ansible_version: "2.0"
|
min_ansible_version: 2.0
|
||||||
platforms:
|
platforms:
|
||||||
- name: EL
|
- name: EL
|
||||||
versions:
|
versions:
|
||||||
- "6"
|
- 6
|
||||||
- "7"
|
- 7
|
||||||
- name: Ubuntu
|
- name: Ubuntu
|
||||||
versions:
|
versions:
|
||||||
- precise
|
- precise
|
||||||
|
|||||||
@@ -3,19 +3,14 @@
|
|||||||
# hyperkube and needs to be installed as part of the system.
|
# hyperkube and needs to be installed as part of the system.
|
||||||
|
|
||||||
# Setup/install tasks.
|
# Setup/install tasks.
|
||||||
- name: Setup RedHat distros for glusterfs
|
- include: setup-RedHat.yml
|
||||||
include_tasks: setup-RedHat.yml
|
|
||||||
when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined
|
when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined
|
||||||
|
|
||||||
- name: Setup Debian distros for glusterfs
|
- include: setup-Debian.yml
|
||||||
include_tasks: setup-Debian.yml
|
|
||||||
when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined
|
when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined
|
||||||
|
|
||||||
- name: Ensure Gluster mount directories exist.
|
- name: Ensure Gluster mount directories exist.
|
||||||
file:
|
file: "path={{ item }} state=directory mode=0775"
|
||||||
path: "{{ item }}"
|
|
||||||
state: directory
|
|
||||||
mode: 0775
|
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ gluster_mount_dir }}"
|
- "{{ gluster_mount_dir }}"
|
||||||
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
|
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
register: glusterfs_ppa_added
|
register: glusterfs_ppa_added
|
||||||
when: glusterfs_ppa_use
|
when: glusterfs_ppa_use
|
||||||
|
|
||||||
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa no-handler
|
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa 503
|
||||||
apt:
|
apt:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: absent
|
state: absent
|
||||||
|
|||||||
@@ -1,14 +1,10 @@
|
|||||||
---
|
---
|
||||||
- name: Install Prerequisites
|
- name: Install Prerequisites
|
||||||
package:
|
package: name={{ item }} state=present
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
with_items:
|
||||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||||
|
|
||||||
- name: Install Packages
|
- name: Install Packages
|
||||||
package:
|
package: name={{ item }} state=present
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
with_items:
|
||||||
- glusterfs-client
|
- glusterfs-client
|
||||||
|
|||||||
@@ -6,12 +6,12 @@ galaxy_info:
|
|||||||
description: GlusterFS installation for Linux.
|
description: GlusterFS installation for Linux.
|
||||||
company: "Midwestern Mac, LLC"
|
company: "Midwestern Mac, LLC"
|
||||||
license: "license (BSD, MIT)"
|
license: "license (BSD, MIT)"
|
||||||
min_ansible_version: "2.0"
|
min_ansible_version: 2.0
|
||||||
platforms:
|
platforms:
|
||||||
- name: EL
|
- name: EL
|
||||||
versions:
|
versions:
|
||||||
- "6"
|
- 6
|
||||||
- "7"
|
- 7
|
||||||
- name: Ubuntu
|
- name: Ubuntu
|
||||||
versions:
|
versions:
|
||||||
- precise
|
- precise
|
||||||
|
|||||||
@@ -4,97 +4,78 @@
|
|||||||
include_vars: "{{ ansible_os_family }}.yml"
|
include_vars: "{{ ansible_os_family }}.yml"
|
||||||
|
|
||||||
# Install xfs package
|
# Install xfs package
|
||||||
- name: Install xfs Debian
|
- name: install xfs Debian
|
||||||
apt:
|
apt: name=xfsprogs state=present
|
||||||
name: xfsprogs
|
|
||||||
state: present
|
|
||||||
when: ansible_os_family == "Debian"
|
when: ansible_os_family == "Debian"
|
||||||
|
|
||||||
- name: Install xfs RedHat
|
- name: install xfs RedHat
|
||||||
package:
|
package: name=xfsprogs state=present
|
||||||
name: xfsprogs
|
|
||||||
state: present
|
|
||||||
when: ansible_os_family == "RedHat"
|
when: ansible_os_family == "RedHat"
|
||||||
|
|
||||||
# Format external volumes in xfs
|
# Format external volumes in xfs
|
||||||
- name: Format volumes in xfs
|
- name: Format volumes in xfs
|
||||||
community.general.filesystem:
|
filesystem: "fstype=xfs dev={{ disk_volume_device_1 }}"
|
||||||
fstype: xfs
|
|
||||||
dev: "{{ disk_volume_device_1 }}"
|
|
||||||
|
|
||||||
# Mount external volumes
|
# Mount external volumes
|
||||||
- name: Mounting new xfs filesystem
|
- name: mounting new xfs filesystem
|
||||||
ansible.posix.mount:
|
mount: "name={{ gluster_volume_node_mount_dir }} src={{ disk_volume_device_1 }} fstype=xfs state=mounted"
|
||||||
name: "{{ gluster_volume_node_mount_dir }}"
|
|
||||||
src: "{{ disk_volume_device_1 }}"
|
|
||||||
fstype: xfs
|
|
||||||
state: mounted
|
|
||||||
|
|
||||||
# Setup/install tasks.
|
# Setup/install tasks.
|
||||||
- name: Setup RedHat distros for glusterfs
|
- include: setup-RedHat.yml
|
||||||
include_tasks: setup-RedHat.yml
|
|
||||||
when: ansible_os_family == 'RedHat'
|
when: ansible_os_family == 'RedHat'
|
||||||
|
|
||||||
- name: Setup Debian distros for glusterfs
|
- include: setup-Debian.yml
|
||||||
include_tasks: setup-Debian.yml
|
|
||||||
when: ansible_os_family == 'Debian'
|
when: ansible_os_family == 'Debian'
|
||||||
|
|
||||||
- name: Ensure GlusterFS is started and enabled at boot.
|
- name: Ensure GlusterFS is started and enabled at boot.
|
||||||
service:
|
service: "name={{ glusterfs_daemon }} state=started enabled=yes"
|
||||||
name: "{{ glusterfs_daemon }}"
|
|
||||||
state: started
|
|
||||||
enabled: yes
|
|
||||||
|
|
||||||
- name: Ensure Gluster brick and mount directories exist.
|
- name: Ensure Gluster brick and mount directories exist.
|
||||||
file:
|
file: "path={{ item }} state=directory mode=0775"
|
||||||
path: "{{ item }}"
|
|
||||||
state: directory
|
|
||||||
mode: 0775
|
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ gluster_brick_dir }}"
|
- "{{ gluster_brick_dir }}"
|
||||||
- "{{ gluster_mount_dir }}"
|
- "{{ gluster_mount_dir }}"
|
||||||
|
|
||||||
- name: Configure Gluster volume with replicas
|
- name: Configure Gluster volume with replicas
|
||||||
gluster.gluster.gluster_volume:
|
gluster_volume:
|
||||||
state: present
|
state: present
|
||||||
name: "{{ gluster_brick_name }}"
|
name: "{{ gluster_brick_name }}"
|
||||||
brick: "{{ gluster_brick_dir }}"
|
brick: "{{ gluster_brick_dir }}"
|
||||||
replicas: "{{ groups['gfs-cluster'] | length }}"
|
replicas: "{{ groups['gfs-cluster'] | length }}"
|
||||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||||
host: "{{ inventory_hostname }}"
|
host: "{{ inventory_hostname }}"
|
||||||
force: yes
|
force: yes
|
||||||
run_once: true
|
run_once: true
|
||||||
when: groups['gfs-cluster'] | length > 1
|
when: groups['gfs-cluster']|length > 1
|
||||||
|
|
||||||
- name: Configure Gluster volume without replicas
|
- name: Configure Gluster volume without replicas
|
||||||
gluster.gluster.gluster_volume:
|
gluster_volume:
|
||||||
state: present
|
state: present
|
||||||
name: "{{ gluster_brick_name }}"
|
name: "{{ gluster_brick_name }}"
|
||||||
brick: "{{ gluster_brick_dir }}"
|
brick: "{{ gluster_brick_dir }}"
|
||||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||||
host: "{{ inventory_hostname }}"
|
host: "{{ inventory_hostname }}"
|
||||||
force: yes
|
force: yes
|
||||||
run_once: true
|
run_once: true
|
||||||
when: groups['gfs-cluster'] | length <= 1
|
when: groups['gfs-cluster']|length <= 1
|
||||||
|
|
||||||
- name: Mount glusterfs to retrieve disk size
|
- name: Mount glusterfs to retrieve disk size
|
||||||
ansible.posix.mount:
|
mount:
|
||||||
name: "{{ gluster_mount_dir }}"
|
name: "{{ gluster_mount_dir }}"
|
||||||
src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster"
|
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
||||||
fstype: glusterfs
|
fstype: glusterfs
|
||||||
opts: "defaults,_netdev"
|
opts: "defaults,_netdev"
|
||||||
state: mounted
|
state: mounted
|
||||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
- name: Get Gluster disk size
|
- name: Get Gluster disk size
|
||||||
setup:
|
setup: filter=ansible_mounts
|
||||||
filter: ansible_mounts
|
|
||||||
register: mounts_data
|
register: mounts_data
|
||||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
- name: Set Gluster disk size to variable
|
- name: Set Gluster disk size to variable
|
||||||
set_fact:
|
set_fact:
|
||||||
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024 * 1024 * 1024)) | int }}"
|
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024*1024*1024)) | int }}"
|
||||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
- name: Create file on GlusterFS
|
- name: Create file on GlusterFS
|
||||||
@@ -105,9 +86,9 @@
|
|||||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
- name: Unmount glusterfs
|
- name: Unmount glusterfs
|
||||||
ansible.posix.mount:
|
mount:
|
||||||
name: "{{ gluster_mount_dir }}"
|
name: "{{ gluster_mount_dir }}"
|
||||||
fstype: glusterfs
|
fstype: glusterfs
|
||||||
src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster"
|
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
||||||
state: unmounted
|
state: unmounted
|
||||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
register: glusterfs_ppa_added
|
register: glusterfs_ppa_added
|
||||||
when: glusterfs_ppa_use
|
when: glusterfs_ppa_use
|
||||||
|
|
||||||
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa no-handler
|
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa 503
|
||||||
apt:
|
apt:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: absent
|
state: absent
|
||||||
|
|||||||
@@ -1,15 +1,11 @@
|
|||||||
---
|
---
|
||||||
- name: Install Prerequisites
|
- name: Install Prerequisites
|
||||||
package:
|
package: name={{ item }} state=present
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
with_items:
|
||||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||||
|
|
||||||
- name: Install Packages
|
- name: Install Packages
|
||||||
package:
|
package: name={{ item }} state=present
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
with_items:
|
||||||
- glusterfs-server
|
- glusterfs-server
|
||||||
- glusterfs-client
|
- glusterfs-client
|
||||||
|
|||||||
@@ -18,6 +18,6 @@
|
|||||||
kubectl: "{{ bin_dir }}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "{{ item.item.type }}"
|
resource: "{{ item.item.type }}"
|
||||||
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
||||||
state: "{{ item.changed | ternary('latest', 'present') }}"
|
state: "{{ item.changed | ternary('latest','present') }}"
|
||||||
with_items: "{{ gluster_pv.results }}"
|
with_items: "{{ gluster_pv.results }}"
|
||||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined
|
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined
|
||||||
|
|||||||
@@ -21,3 +21,4 @@
|
|||||||
{% endfor %}
|
{% endfor %}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,11 +1,9 @@
|
|||||||
---
|
---
|
||||||
- name: Tear down heketi
|
- hosts: kube_control_plane[0]
|
||||||
hosts: kube_control_plane[0]
|
|
||||||
roles:
|
roles:
|
||||||
- { role: tear-down }
|
- { role: tear-down }
|
||||||
|
|
||||||
- name: Teardown disks in heketi
|
- hosts: heketi-node
|
||||||
hosts: heketi-node
|
|
||||||
become: yes
|
become: yes
|
||||||
roles:
|
roles:
|
||||||
- { role: tear-down-disks }
|
- { role: tear-down-disks }
|
||||||
|
|||||||
@@ -1,11 +1,9 @@
|
|||||||
---
|
---
|
||||||
- name: Prepare heketi install
|
- hosts: heketi-node
|
||||||
hosts: heketi-node
|
|
||||||
roles:
|
roles:
|
||||||
- { role: prepare }
|
- { role: prepare }
|
||||||
|
|
||||||
- name: Provision heketi
|
- hosts: kube_control_plane[0]
|
||||||
hosts: kube_control_plane[0]
|
|
||||||
tags:
|
tags:
|
||||||
- "provision"
|
- "provision"
|
||||||
roles:
|
roles:
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
- "dm_snapshot"
|
- "dm_snapshot"
|
||||||
- "dm_mirror"
|
- "dm_mirror"
|
||||||
- "dm_thin_pool"
|
- "dm_thin_pool"
|
||||||
community.general.modprobe:
|
modprobe:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: "present"
|
state: "present"
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
---
|
---
|
||||||
- name: "Stop port forwarding"
|
- name: "stop port forwarding"
|
||||||
command: "killall "
|
command: "killall "
|
||||||
|
|||||||
@@ -7,9 +7,9 @@
|
|||||||
|
|
||||||
- name: "Bootstrap heketi."
|
- name: "Bootstrap heketi."
|
||||||
when:
|
when:
|
||||||
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Service']\")) | length == 0"
|
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Service']\"))|length == 0"
|
||||||
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Deployment']\")) | length == 0"
|
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Deployment']\"))|length == 0"
|
||||||
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Pod']\")) | length == 0"
|
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod']\"))|length == 0"
|
||||||
include_tasks: "bootstrap/deploy.yml"
|
include_tasks: "bootstrap/deploy.yml"
|
||||||
|
|
||||||
# Prepare heketi topology
|
# Prepare heketi topology
|
||||||
@@ -20,11 +20,11 @@
|
|||||||
|
|
||||||
- name: "Ensure heketi bootstrap pod is up."
|
- name: "Ensure heketi bootstrap pod is up."
|
||||||
assert:
|
assert:
|
||||||
that: "(initial_heketi_pod.stdout | from_json | json_query('items[*]')) | length == 1"
|
that: "(initial_heketi_pod.stdout|from_json|json_query('items[*]'))|length == 1"
|
||||||
|
|
||||||
- name: Store the initial heketi pod name
|
- name: Store the initial heketi pod name
|
||||||
set_fact:
|
set_fact:
|
||||||
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout | from_json | json_query(\"items[*].metadata.name | [0]\") }}"
|
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout|from_json|json_query(\"items[*].metadata.name|[0]\") }}"
|
||||||
|
|
||||||
- name: "Test heketi topology."
|
- name: "Test heketi topology."
|
||||||
changed_when: false
|
changed_when: false
|
||||||
@@ -32,7 +32,7 @@
|
|||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||||
|
|
||||||
- name: "Load heketi topology."
|
- name: "Load heketi topology."
|
||||||
when: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*]\") | flatten | length == 0"
|
when: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*]\")|flatten|length == 0"
|
||||||
include_tasks: "bootstrap/topology.yml"
|
include_tasks: "bootstrap/topology.yml"
|
||||||
|
|
||||||
# Provision heketi database volume
|
# Provision heketi database volume
|
||||||
@@ -58,7 +58,7 @@
|
|||||||
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
||||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
||||||
when:
|
when:
|
||||||
- "heketi_storage_state.stdout | from_json | json_query(secret_query) | length == 0"
|
- "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0"
|
||||||
- "heketi_storage_state.stdout | from_json | json_query(endpoints_query) | length == 0"
|
- "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0"
|
||||||
- "heketi_storage_state.stdout | from_json | json_query(service_query) | length == 0"
|
- "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0"
|
||||||
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 0"
|
- "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0"
|
||||||
|
|||||||
@@ -17,11 +17,11 @@
|
|||||||
register: "initial_heketi_state"
|
register: "initial_heketi_state"
|
||||||
vars:
|
vars:
|
||||||
initial_heketi_state: { stdout: "{}" }
|
initial_heketi_state: { stdout: "{}" }
|
||||||
pods_query: "items[?kind=='Pod'].status.conditions | [0][?type=='Ready'].status | [0]"
|
pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]"
|
||||||
deployments_query: "items[?kind=='Deployment'].status.conditions | [0][?type=='Available'].status | [0]"
|
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
||||||
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
||||||
until:
|
until:
|
||||||
- "initial_heketi_state.stdout | from_json | json_query(pods_query) == 'True'"
|
- "initial_heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
||||||
- "initial_heketi_state.stdout | from_json | json_query(deployments_query) == 'True'"
|
- "initial_heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|||||||
@@ -15,10 +15,10 @@
|
|||||||
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
||||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
||||||
when:
|
when:
|
||||||
- "heketi_storage_state.stdout | from_json | json_query(secret_query) | length == 0"
|
- "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0"
|
||||||
- "heketi_storage_state.stdout | from_json | json_query(endpoints_query) | length == 0"
|
- "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0"
|
||||||
- "heketi_storage_state.stdout | from_json | json_query(service_query) | length == 0"
|
- "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0"
|
||||||
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 0"
|
- "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0"
|
||||||
register: "heketi_storage_result"
|
register: "heketi_storage_result"
|
||||||
- name: "Get state of heketi database copy job."
|
- name: "Get state of heketi database copy job."
|
||||||
command: "{{ bin_dir }}/kubectl get jobs --output=json"
|
command: "{{ bin_dir }}/kubectl get jobs --output=json"
|
||||||
@@ -28,6 +28,6 @@
|
|||||||
heketi_storage_state: { stdout: "{}" }
|
heketi_storage_state: { stdout: "{}" }
|
||||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job' && status.succeeded==1]"
|
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job' && status.succeeded==1]"
|
||||||
until:
|
until:
|
||||||
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 1"
|
- "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 1"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|||||||
@@ -5,10 +5,10 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
- name: "Delete bootstrap Heketi."
|
- name: "Delete bootstrap Heketi."
|
||||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
|
||||||
when: "heketi_resources.stdout | from_json | json_query('items[*]') | length > 0"
|
when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0"
|
||||||
- name: "Ensure there is nothing left over."
|
- name: "Ensure there is nothing left over." # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
|
||||||
register: "heketi_result"
|
register: "heketi_result"
|
||||||
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
|
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|||||||
@@ -14,7 +14,7 @@
|
|||||||
- name: "Copy topology configuration into container."
|
- name: "Copy topology configuration into container."
|
||||||
changed_when: false
|
changed_when: false
|
||||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
||||||
- name: "Load heketi topology." # noqa no-handler
|
- name: "Load heketi topology." # noqa 503
|
||||||
when: "render.changed"
|
when: "render.changed"
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||||
register: "load_heketi"
|
register: "load_heketi"
|
||||||
@@ -22,6 +22,6 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
register: "heketi_topology"
|
register: "heketi_topology"
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||||
until: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\") | flatten | length == groups['heketi-node'] | length"
|
until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|||||||
@@ -6,19 +6,19 @@
|
|||||||
- name: "Get heketi volumes."
|
- name: "Get heketi volumes."
|
||||||
changed_when: false
|
changed_when: false
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
||||||
with_items: "{{ heketi_volumes.stdout | from_json | json_query(\"volumes[*]\") }}"
|
with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}"
|
||||||
loop_control: { loop_var: "volume_id" }
|
loop_control: { loop_var: "volume_id" }
|
||||||
register: "volumes_information"
|
register: "volumes_information"
|
||||||
- name: "Test heketi database volume."
|
- name: "Test heketi database volume."
|
||||||
set_fact: { heketi_database_volume_exists: true }
|
set_fact: { heketi_database_volume_exists: true }
|
||||||
with_items: "{{ volumes_information.results }}"
|
with_items: "{{ volumes_information.results }}"
|
||||||
loop_control: { loop_var: "volume_information" }
|
loop_control: { loop_var: "volume_information" }
|
||||||
vars: { volume: "{{ volume_information.stdout | from_json }}" }
|
vars: { volume: "{{ volume_information.stdout|from_json }}" }
|
||||||
when: "volume.name == 'heketidbstorage'"
|
when: "volume.name == 'heketidbstorage'"
|
||||||
- name: "Provision database volume."
|
- name: "Provision database volume."
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
|
||||||
when: "heketi_database_volume_exists is undefined"
|
when: "heketi_database_volume_exists is undefined"
|
||||||
- name: "Copy configuration from pod."
|
- name: "Copy configuration from pod." # noqa 301
|
||||||
become: true
|
become: true
|
||||||
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
||||||
- name: "Get heketi volume ids."
|
- name: "Get heketi volume ids."
|
||||||
@@ -28,14 +28,14 @@
|
|||||||
- name: "Get heketi volumes."
|
- name: "Get heketi volumes."
|
||||||
changed_when: false
|
changed_when: false
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
||||||
with_items: "{{ heketi_volumes.stdout | from_json | json_query(\"volumes[*]\") }}"
|
with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}"
|
||||||
loop_control: { loop_var: "volume_id" }
|
loop_control: { loop_var: "volume_id" }
|
||||||
register: "volumes_information"
|
register: "volumes_information"
|
||||||
- name: "Test heketi database volume."
|
- name: "Test heketi database volume."
|
||||||
set_fact: { heketi_database_volume_created: true }
|
set_fact: { heketi_database_volume_created: true }
|
||||||
with_items: "{{ volumes_information.results }}"
|
with_items: "{{ volumes_information.results }}"
|
||||||
loop_control: { loop_var: "volume_information" }
|
loop_control: { loop_var: "volume_information" }
|
||||||
vars: { volume: "{{ volume_information.stdout | from_json }}" }
|
vars: { volume: "{{ volume_information.stdout|from_json }}" }
|
||||||
when: "volume.name == 'heketidbstorage'"
|
when: "volume.name == 'heketidbstorage'"
|
||||||
- name: "Ensure heketi database volume exists."
|
- name: "Ensure heketi database volume exists."
|
||||||
assert: { that: "heketi_database_volume_created is defined", msg: "Heketi database volume does not exist." }
|
assert: { that: "heketi_database_volume_created is defined", msg: "Heketi database volume does not exist." }
|
||||||
|
|||||||
@@ -23,8 +23,8 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
vars:
|
vars:
|
||||||
daemonset_state: { stdout: "{}" }
|
daemonset_state: { stdout: "{}" }
|
||||||
ready: "{{ daemonset_state.stdout | from_json | json_query(\"status.numberReady\") }}"
|
ready: "{{ daemonset_state.stdout|from_json|json_query(\"status.numberReady\") }}"
|
||||||
desired: "{{ daemonset_state.stdout | from_json | json_query(\"status.desiredNumberScheduled\") }}"
|
desired: "{{ daemonset_state.stdout|from_json|json_query(\"status.desiredNumberScheduled\") }}"
|
||||||
until: "ready | int >= 3"
|
until: "ready | int >= 3"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: "Assign storage label"
|
- name: "Assign storage label"
|
||||||
when: "label_present.stdout_lines | length == 0"
|
when: "label_present.stdout_lines|length == 0"
|
||||||
command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
|
command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
|
||||||
|
|
||||||
- name: Get storage nodes again
|
- name: Get storage nodes again
|
||||||
@@ -15,5 +15,5 @@
|
|||||||
|
|
||||||
- name: Ensure the label has been set
|
- name: Ensure the label has been set
|
||||||
assert:
|
assert:
|
||||||
that: "label_present | length > 0"
|
that: "label_present|length > 0"
|
||||||
msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs."
|
msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs."
|
||||||
|
|||||||
@@ -24,11 +24,11 @@
|
|||||||
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
||||||
command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
|
command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
|
||||||
until:
|
until:
|
||||||
- "heketi_state.stdout | from_json | json_query(pods_query) == 'True'"
|
- "heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
||||||
- "heketi_state.stdout | from_json | json_query(deployments_query) == 'True'"
|
- "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|
||||||
- name: Set the Heketi pod name
|
- name: Set the Heketi pod name
|
||||||
set_fact:
|
set_fact:
|
||||||
heketi_pod_name: "{{ heketi_state.stdout | from_json | json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
|
heketi_pod_name: "{{ heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
- name: "Render storage class configuration."
|
- name: "Render storage class configuration."
|
||||||
become: true
|
become: true
|
||||||
vars:
|
vars:
|
||||||
endpoint_address: "{{ (heketi_service.stdout | from_json).spec.clusterIP }}"
|
endpoint_address: "{{ (heketi_service.stdout|from_json).spec.clusterIP }}"
|
||||||
template:
|
template:
|
||||||
src: "storageclass.yml.j2"
|
src: "storageclass.yml.j2"
|
||||||
dest: "{{ kube_config_dir }}/storageclass.yml"
|
dest: "{{ kube_config_dir }}/storageclass.yml"
|
||||||
|
|||||||
@@ -11,16 +11,16 @@
|
|||||||
src: "topology.json.j2"
|
src: "topology.json.j2"
|
||||||
dest: "{{ kube_config_dir }}/topology.json"
|
dest: "{{ kube_config_dir }}/topology.json"
|
||||||
mode: 0644
|
mode: 0644
|
||||||
- name: "Copy topology configuration into container." # noqa no-handler
|
- name: "Copy topology configuration into container." # noqa 503
|
||||||
when: "rendering.changed"
|
when: "rendering.changed"
|
||||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
||||||
- name: "Load heketi topology." # noqa no-handler
|
- name: "Load heketi topology." # noqa 503
|
||||||
when: "rendering.changed"
|
when: "rendering.changed"
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||||
- name: "Get heketi topology."
|
- name: "Get heketi topology."
|
||||||
register: "heketi_topology"
|
register: "heketi_topology"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||||
until: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\") | flatten | length == groups['heketi-node'] | length"
|
until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
|||||||
@@ -22,7 +22,7 @@
|
|||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: "Remove volume groups."
|
- name: "Remove volume groups." # noqa 301
|
||||||
environment:
|
environment:
|
||||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||||
become: true
|
become: true
|
||||||
@@ -30,7 +30,7 @@
|
|||||||
with_items: "{{ volume_groups.stdout_lines }}"
|
with_items: "{{ volume_groups.stdout_lines }}"
|
||||||
loop_control: { loop_var: "volume_group" }
|
loop_control: { loop_var: "volume_group" }
|
||||||
|
|
||||||
- name: "Remove physical volume from cluster disks."
|
- name: "Remove physical volume from cluster disks." # noqa 301
|
||||||
environment:
|
environment:
|
||||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||||
become: true
|
become: true
|
||||||
|
|||||||
@@ -1,43 +1,43 @@
|
|||||||
---
|
---
|
||||||
- name: Remove storage class.
|
- name: Remove storage class. # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Tear down heketi.
|
- name: Tear down heketi. # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Tear down heketi.
|
- name: Tear down heketi. # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Tear down bootstrap.
|
- name: Tear down bootstrap.
|
||||||
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
|
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
|
||||||
- name: Ensure there is nothing left over.
|
- name: Ensure there is nothing left over. # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
||||||
register: "heketi_result"
|
register: "heketi_result"
|
||||||
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
|
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
- name: Ensure there is nothing left over.
|
- name: Ensure there is nothing left over. # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
||||||
register: "heketi_result"
|
register: "heketi_result"
|
||||||
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
|
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
- name: Tear down glusterfs.
|
- name: Tear down glusterfs. # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Remove heketi storage service.
|
- name: Remove heketi storage service. # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Remove heketi gluster role binding
|
- name: Remove heketi gluster role binding # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Remove heketi config secret
|
- name: Remove heketi config secret # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Remove heketi db backup
|
- name: Remove heketi db backup # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Remove heketi service account
|
- name: Remove heketi service account # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
- name: Get secrets
|
- name: Get secrets
|
||||||
@@ -46,6 +46,6 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
- name: Remove heketi storage secret
|
- name: Remove heketi storage secret
|
||||||
vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
|
vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
|
||||||
command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout | from_json | json_query(storage_query) }}"
|
command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}"
|
||||||
when: "storage_query is defined"
|
when: "storage_query is defined"
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ manage-offline-container-images.sh register
|
|||||||
|
|
||||||
## generate_list.sh
|
## generate_list.sh
|
||||||
|
|
||||||
This script generates the list of downloaded files and the list of container images by `roles/download/defaults/main/main.yml` file.
|
This script generates the list of downloaded files and the list of container images by `roles/download/defaults/main.yml` file.
|
||||||
|
|
||||||
Run this script will execute `generate_list.yml` playbook in kubespray root directory and generate four files,
|
Run this script will execute `generate_list.yml` playbook in kubespray root directory and generate four files,
|
||||||
all downloaded files url in files.list, all container images in images.list, jinja2 templates in *.template.
|
all downloaded files url in files.list, all container images in images.list, jinja2 templates in *.template.
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ CURRENT_DIR=$(cd $(dirname $0); pwd)
|
|||||||
TEMP_DIR="${CURRENT_DIR}/temp"
|
TEMP_DIR="${CURRENT_DIR}/temp"
|
||||||
REPO_ROOT_DIR="${CURRENT_DIR%/contrib/offline}"
|
REPO_ROOT_DIR="${CURRENT_DIR%/contrib/offline}"
|
||||||
|
|
||||||
: ${DOWNLOAD_YML:="roles/download/defaults/main/main.yml"}
|
: ${DOWNLOAD_YML:="roles/download/defaults/main.yml"}
|
||||||
|
|
||||||
mkdir -p ${TEMP_DIR}
|
mkdir -p ${TEMP_DIR}
|
||||||
|
|
||||||
@@ -19,7 +19,7 @@ sed -n '/^downloads:/,/download_defaults:/p' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
|||||||
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/\"//g' > ${TEMP_DIR}/images.list.template
|
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/\"//g' > ${TEMP_DIR}/images.list.template
|
||||||
|
|
||||||
# add kube-* images to images list template
|
# add kube-* images to images list template
|
||||||
# Those container images are downloaded by kubeadm, then roles/download/defaults/main/main.yml
|
# Those container images are downloaded by kubeadm, then roles/download/defaults/main.yml
|
||||||
# doesn't contain those images. That is reason why here needs to put those images into the
|
# doesn't contain those images. That is reason why here needs to put those images into the
|
||||||
# list separately.
|
# list separately.
|
||||||
KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy"
|
KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy"
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
---
|
---
|
||||||
- name: Collect container images for offline deployment
|
- hosts: localhost
|
||||||
hosts: localhost
|
|
||||||
become: no
|
become: no
|
||||||
|
|
||||||
roles:
|
roles:
|
||||||
@@ -12,11 +11,9 @@
|
|||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
# Generate files.list and images.list files from templates.
|
# Generate files.list and images.list files from templates.
|
||||||
- name: Collect container images for offline deployment
|
- template:
|
||||||
template:
|
|
||||||
src: ./contrib/offline/temp/{{ item }}.list.template
|
src: ./contrib/offline/temp/{{ item }}.list.template
|
||||||
dest: ./contrib/offline/temp/{{ item }}.list
|
dest: ./contrib/offline/temp/{{ item }}.list
|
||||||
mode: 0644
|
|
||||||
with_items:
|
with_items:
|
||||||
- files
|
- files
|
||||||
- images
|
- images
|
||||||
|
|||||||
@@ -39,6 +39,6 @@ if [ $? -ne 0 ]; then
|
|||||||
sudo "${runtime}" run \
|
sudo "${runtime}" run \
|
||||||
--restart=always -d -p ${NGINX_PORT}:80 \
|
--restart=always -d -p ${NGINX_PORT}:80 \
|
||||||
--volume "${OFFLINE_FILES_DIR}:/usr/share/nginx/html/download" \
|
--volume "${OFFLINE_FILES_DIR}:/usr/share/nginx/html/download" \
|
||||||
--volume "${CURRENT_DIR}"/nginx.conf:/etc/nginx/nginx.conf \
|
--volume "$(pwd)"/nginx.conf:/etc/nginx/nginx.conf \
|
||||||
--name nginx nginx:alpine
|
--name nginx nginx:alpine
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
---
|
---
|
||||||
- name: Disable firewalld/ufw
|
- hosts: all
|
||||||
hosts: all
|
|
||||||
roles:
|
roles:
|
||||||
- { role: prepare }
|
- { role: prepare }
|
||||||
|
|||||||
@@ -1,8 +1,5 @@
|
|||||||
---
|
---
|
||||||
- name: Disable firewalld and ufw
|
- block:
|
||||||
when:
|
|
||||||
- disable_service_firewall is defined and disable_service_firewall
|
|
||||||
block:
|
|
||||||
- name: List services
|
- name: List services
|
||||||
service_facts:
|
service_facts:
|
||||||
|
|
||||||
@@ -12,7 +9,7 @@
|
|||||||
state: stopped
|
state: stopped
|
||||||
enabled: no
|
enabled: no
|
||||||
when:
|
when:
|
||||||
"'firewalld.service' in services and services['firewalld.service'].status != 'not-found'"
|
"'firewalld.service' in services"
|
||||||
|
|
||||||
- name: Disable service ufw
|
- name: Disable service ufw
|
||||||
systemd:
|
systemd:
|
||||||
@@ -20,4 +17,7 @@
|
|||||||
state: stopped
|
state: stopped
|
||||||
enabled: no
|
enabled: no
|
||||||
when:
|
when:
|
||||||
"'ufw.service' in services and services['ufw.service'].status != 'not-found'"
|
"'ufw.service' in services"
|
||||||
|
|
||||||
|
when:
|
||||||
|
- disable_service_firewall is defined and disable_service_firewall
|
||||||
|
|||||||
@@ -1,15 +0,0 @@
|
|||||||
output "k8s_masters" {
|
|
||||||
value = equinix_metal_device.k8s_master.*.access_public_ipv4
|
|
||||||
}
|
|
||||||
|
|
||||||
output "k8s_masters_no_etc" {
|
|
||||||
value = equinix_metal_device.k8s_master_no_etcd.*.access_public_ipv4
|
|
||||||
}
|
|
||||||
|
|
||||||
output "k8s_etcds" {
|
|
||||||
value = equinix_metal_device.k8s_etcd.*.access_public_ipv4
|
|
||||||
}
|
|
||||||
|
|
||||||
output "k8s_nodes" {
|
|
||||||
value = equinix_metal_device.k8s_node.*.access_public_ipv4
|
|
||||||
}
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
terraform {
|
|
||||||
required_version = ">= 1.0.0"
|
|
||||||
|
|
||||||
provider_meta "equinix" {
|
|
||||||
module_name = "kubespray"
|
|
||||||
}
|
|
||||||
required_providers {
|
|
||||||
equinix = {
|
|
||||||
source = "equinix/equinix"
|
|
||||||
version = "~> 1.14"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Configure the Equinix Metal Provider
|
|
||||||
provider "equinix" {
|
|
||||||
}
|
|
||||||
@@ -12,7 +12,7 @@ ssh_public_keys = [
|
|||||||
machines = {
|
machines = {
|
||||||
"master-0" : {
|
"master-0" : {
|
||||||
"node_type" : "master",
|
"node_type" : "master",
|
||||||
"size" : "standard.medium",
|
"size" : "Medium",
|
||||||
"boot_disk" : {
|
"boot_disk" : {
|
||||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||||
"root_partition_size" : 50,
|
"root_partition_size" : 50,
|
||||||
@@ -22,7 +22,7 @@ machines = {
|
|||||||
},
|
},
|
||||||
"worker-0" : {
|
"worker-0" : {
|
||||||
"node_type" : "worker",
|
"node_type" : "worker",
|
||||||
"size" : "standard.large",
|
"size" : "Large",
|
||||||
"boot_disk" : {
|
"boot_disk" : {
|
||||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||||
"root_partition_size" : 50,
|
"root_partition_size" : 50,
|
||||||
@@ -32,7 +32,7 @@ machines = {
|
|||||||
},
|
},
|
||||||
"worker-1" : {
|
"worker-1" : {
|
||||||
"node_type" : "worker",
|
"node_type" : "worker",
|
||||||
"size" : "standard.large",
|
"size" : "Large",
|
||||||
"boot_disk" : {
|
"boot_disk" : {
|
||||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||||
"root_partition_size" : 50,
|
"root_partition_size" : 50,
|
||||||
@@ -42,7 +42,7 @@ machines = {
|
|||||||
},
|
},
|
||||||
"worker-2" : {
|
"worker-2" : {
|
||||||
"node_type" : "worker",
|
"node_type" : "worker",
|
||||||
"size" : "standard.large",
|
"size" : "Large",
|
||||||
"boot_disk" : {
|
"boot_disk" : {
|
||||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||||
"root_partition_size" : 50,
|
"root_partition_size" : 50,
|
||||||
|
|||||||
@@ -3,8 +3,8 @@ provider "exoscale" {}
|
|||||||
module "kubernetes" {
|
module "kubernetes" {
|
||||||
source = "./modules/kubernetes-cluster"
|
source = "./modules/kubernetes-cluster"
|
||||||
|
|
||||||
prefix = var.prefix
|
prefix = var.prefix
|
||||||
zone = var.zone
|
|
||||||
machines = var.machines
|
machines = var.machines
|
||||||
|
|
||||||
ssh_public_keys = var.ssh_public_keys
|
ssh_public_keys = var.ssh_public_keys
|
||||||
|
|||||||
@@ -1,25 +1,29 @@
|
|||||||
data "exoscale_template" "os_image" {
|
data "exoscale_compute_template" "os_image" {
|
||||||
for_each = var.machines
|
for_each = var.machines
|
||||||
|
|
||||||
zone = var.zone
|
zone = var.zone
|
||||||
name = each.value.boot_disk.image_name
|
name = each.value.boot_disk.image_name
|
||||||
}
|
}
|
||||||
|
|
||||||
data "exoscale_compute_instance" "master_nodes" {
|
data "exoscale_compute" "master_nodes" {
|
||||||
for_each = exoscale_compute_instance.master
|
for_each = exoscale_compute.master
|
||||||
|
|
||||||
id = each.value.id
|
id = each.value.id
|
||||||
zone = var.zone
|
|
||||||
|
# Since private IP address is not assigned until the nics are created we need this
|
||||||
|
depends_on = [exoscale_nic.master_private_network_nic]
|
||||||
}
|
}
|
||||||
|
|
||||||
data "exoscale_compute_instance" "worker_nodes" {
|
data "exoscale_compute" "worker_nodes" {
|
||||||
for_each = exoscale_compute_instance.worker
|
for_each = exoscale_compute.worker
|
||||||
|
|
||||||
id = each.value.id
|
id = each.value.id
|
||||||
zone = var.zone
|
|
||||||
|
# Since private IP address is not assigned until the nics are created we need this
|
||||||
|
depends_on = [exoscale_nic.worker_private_network_nic]
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "exoscale_private_network" "private_network" {
|
resource "exoscale_network" "private_network" {
|
||||||
zone = var.zone
|
zone = var.zone
|
||||||
name = "${var.prefix}-network"
|
name = "${var.prefix}-network"
|
||||||
|
|
||||||
@@ -30,29 +34,25 @@ resource "exoscale_private_network" "private_network" {
|
|||||||
netmask = cidrnetmask(var.private_network_cidr)
|
netmask = cidrnetmask(var.private_network_cidr)
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "exoscale_compute_instance" "master" {
|
resource "exoscale_compute" "master" {
|
||||||
for_each = {
|
for_each = {
|
||||||
for name, machine in var.machines :
|
for name, machine in var.machines :
|
||||||
name => machine
|
name => machine
|
||||||
if machine.node_type == "master"
|
if machine.node_type == "master"
|
||||||
}
|
}
|
||||||
|
|
||||||
name = "${var.prefix}-${each.key}"
|
display_name = "${var.prefix}-${each.key}"
|
||||||
template_id = data.exoscale_template.os_image[each.key].id
|
template_id = data.exoscale_compute_template.os_image[each.key].id
|
||||||
type = each.value.size
|
size = each.value.size
|
||||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||||
state = "Running"
|
state = "Running"
|
||||||
zone = var.zone
|
zone = var.zone
|
||||||
security_group_ids = [exoscale_security_group.master_sg.id]
|
security_groups = [exoscale_security_group.master_sg.name]
|
||||||
network_interface {
|
|
||||||
network_id = exoscale_private_network.private_network.id
|
|
||||||
}
|
|
||||||
elastic_ip_ids = [exoscale_elastic_ip.control_plane_lb.id]
|
|
||||||
|
|
||||||
user_data = templatefile(
|
user_data = templatefile(
|
||||||
"${path.module}/templates/cloud-init.tmpl",
|
"${path.module}/templates/cloud-init.tmpl",
|
||||||
{
|
{
|
||||||
eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address
|
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||||
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
||||||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||||
root_partition_size = each.value.boot_disk.root_partition_size
|
root_partition_size = each.value.boot_disk.root_partition_size
|
||||||
@@ -62,29 +62,25 @@ resource "exoscale_compute_instance" "master" {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "exoscale_compute_instance" "worker" {
|
resource "exoscale_compute" "worker" {
|
||||||
for_each = {
|
for_each = {
|
||||||
for name, machine in var.machines :
|
for name, machine in var.machines :
|
||||||
name => machine
|
name => machine
|
||||||
if machine.node_type == "worker"
|
if machine.node_type == "worker"
|
||||||
}
|
}
|
||||||
|
|
||||||
name = "${var.prefix}-${each.key}"
|
display_name = "${var.prefix}-${each.key}"
|
||||||
template_id = data.exoscale_template.os_image[each.key].id
|
template_id = data.exoscale_compute_template.os_image[each.key].id
|
||||||
type = each.value.size
|
size = each.value.size
|
||||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||||
state = "Running"
|
state = "Running"
|
||||||
zone = var.zone
|
zone = var.zone
|
||||||
security_group_ids = [exoscale_security_group.worker_sg.id]
|
security_groups = [exoscale_security_group.worker_sg.name]
|
||||||
network_interface {
|
|
||||||
network_id = exoscale_private_network.private_network.id
|
|
||||||
}
|
|
||||||
elastic_ip_ids = [exoscale_elastic_ip.ingress_controller_lb.id]
|
|
||||||
|
|
||||||
user_data = templatefile(
|
user_data = templatefile(
|
||||||
"${path.module}/templates/cloud-init.tmpl",
|
"${path.module}/templates/cloud-init.tmpl",
|
||||||
{
|
{
|
||||||
eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address
|
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||||
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
||||||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||||
root_partition_size = each.value.boot_disk.root_partition_size
|
root_partition_size = each.value.boot_disk.root_partition_size
|
||||||
@@ -94,33 +90,41 @@ resource "exoscale_compute_instance" "worker" {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resource "exoscale_nic" "master_private_network_nic" {
|
||||||
|
for_each = exoscale_compute.master
|
||||||
|
|
||||||
|
compute_id = each.value.id
|
||||||
|
network_id = exoscale_network.private_network.id
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "exoscale_nic" "worker_private_network_nic" {
|
||||||
|
for_each = exoscale_compute.worker
|
||||||
|
|
||||||
|
compute_id = each.value.id
|
||||||
|
network_id = exoscale_network.private_network.id
|
||||||
|
}
|
||||||
|
|
||||||
resource "exoscale_security_group" "master_sg" {
|
resource "exoscale_security_group" "master_sg" {
|
||||||
name = "${var.prefix}-master-sg"
|
name = "${var.prefix}-master-sg"
|
||||||
description = "Security group for Kubernetes masters"
|
description = "Security group for Kubernetes masters"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "exoscale_security_group_rule" "master_sg_rule_ssh" {
|
resource "exoscale_security_group_rules" "master_sg_rules" {
|
||||||
security_group_id = exoscale_security_group.master_sg.id
|
security_group_id = exoscale_security_group.master_sg.id
|
||||||
|
|
||||||
for_each = toset(var.ssh_whitelist)
|
|
||||||
# SSH
|
# SSH
|
||||||
type = "INGRESS"
|
ingress {
|
||||||
start_port = 22
|
protocol = "TCP"
|
||||||
end_port = 22
|
cidr_list = var.ssh_whitelist
|
||||||
protocol = "TCP"
|
ports = ["22"]
|
||||||
cidr = each.value
|
}
|
||||||
}
|
|
||||||
|
|
||||||
resource "exoscale_security_group_rule" "master_sg_rule_k8s_api" {
|
|
||||||
security_group_id = exoscale_security_group.master_sg.id
|
|
||||||
|
|
||||||
for_each = toset(var.api_server_whitelist)
|
|
||||||
# Kubernetes API
|
# Kubernetes API
|
||||||
type = "INGRESS"
|
ingress {
|
||||||
start_port = 6443
|
protocol = "TCP"
|
||||||
end_port = 6443
|
cidr_list = var.api_server_whitelist
|
||||||
protocol = "TCP"
|
ports = ["6443"]
|
||||||
cidr = each.value
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "exoscale_security_group" "worker_sg" {
|
resource "exoscale_security_group" "worker_sg" {
|
||||||
@@ -128,64 +132,62 @@ resource "exoscale_security_group" "worker_sg" {
|
|||||||
description = "security group for kubernetes worker nodes"
|
description = "security group for kubernetes worker nodes"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "exoscale_security_group_rule" "worker_sg_rule_ssh" {
|
resource "exoscale_security_group_rules" "worker_sg_rules" {
|
||||||
security_group_id = exoscale_security_group.worker_sg.id
|
security_group_id = exoscale_security_group.worker_sg.id
|
||||||
|
|
||||||
# SSH
|
# SSH
|
||||||
for_each = toset(var.ssh_whitelist)
|
ingress {
|
||||||
type = "INGRESS"
|
protocol = "TCP"
|
||||||
start_port = 22
|
cidr_list = var.ssh_whitelist
|
||||||
end_port = 22
|
ports = ["22"]
|
||||||
protocol = "TCP"
|
}
|
||||||
cidr = each.value
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "exoscale_security_group_rule" "worker_sg_rule_http" {
|
|
||||||
security_group_id = exoscale_security_group.worker_sg.id
|
|
||||||
|
|
||||||
# HTTP(S)
|
# HTTP(S)
|
||||||
for_each = toset(["80", "443"])
|
ingress {
|
||||||
type = "INGRESS"
|
protocol = "TCP"
|
||||||
start_port = each.value
|
cidr_list = ["0.0.0.0/0"]
|
||||||
end_port = each.value
|
ports = ["80", "443"]
|
||||||
protocol = "TCP"
|
}
|
||||||
cidr = "0.0.0.0/0"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
# Kubernetes Nodeport
|
||||||
resource "exoscale_security_group_rule" "worker_sg_rule_nodeport" {
|
ingress {
|
||||||
security_group_id = exoscale_security_group.worker_sg.id
|
protocol = "TCP"
|
||||||
|
cidr_list = var.nodeport_whitelist
|
||||||
# HTTP(S)
|
ports = ["30000-32767"]
|
||||||
for_each = toset(var.nodeport_whitelist)
|
|
||||||
type = "INGRESS"
|
|
||||||
start_port = 30000
|
|
||||||
end_port = 32767
|
|
||||||
protocol = "TCP"
|
|
||||||
cidr = each.value
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "exoscale_elastic_ip" "ingress_controller_lb" {
|
|
||||||
zone = var.zone
|
|
||||||
healthcheck {
|
|
||||||
mode = "http"
|
|
||||||
port = 80
|
|
||||||
uri = "/healthz"
|
|
||||||
interval = 10
|
|
||||||
timeout = 2
|
|
||||||
strikes_ok = 2
|
|
||||||
strikes_fail = 3
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "exoscale_elastic_ip" "control_plane_lb" {
|
resource "exoscale_ipaddress" "ingress_controller_lb" {
|
||||||
zone = var.zone
|
zone = var.zone
|
||||||
healthcheck {
|
healthcheck_mode = "http"
|
||||||
mode = "tcp"
|
healthcheck_port = 80
|
||||||
port = 6443
|
healthcheck_path = "/healthz"
|
||||||
interval = 10
|
healthcheck_interval = 10
|
||||||
timeout = 2
|
healthcheck_timeout = 2
|
||||||
strikes_ok = 2
|
healthcheck_strikes_ok = 2
|
||||||
strikes_fail = 3
|
healthcheck_strikes_fail = 3
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resource "exoscale_secondary_ipaddress" "ingress_controller_lb" {
|
||||||
|
for_each = exoscale_compute.worker
|
||||||
|
|
||||||
|
compute_id = each.value.id
|
||||||
|
ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "exoscale_ipaddress" "control_plane_lb" {
|
||||||
|
zone = var.zone
|
||||||
|
healthcheck_mode = "tcp"
|
||||||
|
healthcheck_port = 6443
|
||||||
|
healthcheck_interval = 10
|
||||||
|
healthcheck_timeout = 2
|
||||||
|
healthcheck_strikes_ok = 2
|
||||||
|
healthcheck_strikes_fail = 3
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "exoscale_secondary_ipaddress" "control_plane_lb" {
|
||||||
|
for_each = exoscale_compute.master
|
||||||
|
|
||||||
|
compute_id = each.value.id
|
||||||
|
ip_address = exoscale_ipaddress.control_plane_lb.ip_address
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,19 +1,19 @@
|
|||||||
output "master_ip_addresses" {
|
output "master_ip_addresses" {
|
||||||
value = {
|
value = {
|
||||||
for key, instance in exoscale_compute_instance.master :
|
for key, instance in exoscale_compute.master :
|
||||||
instance.name => {
|
instance.name => {
|
||||||
"private_ip" = contains(keys(data.exoscale_compute_instance.master_nodes), key) ? data.exoscale_compute_instance.master_nodes[key].private_network_ip_addresses[0] : ""
|
"private_ip" = contains(keys(data.exoscale_compute.master_nodes), key) ? data.exoscale_compute.master_nodes[key].private_network_ip_addresses[0] : ""
|
||||||
"public_ip" = exoscale_compute_instance.master[key].ip_address
|
"public_ip" = exoscale_compute.master[key].ip_address
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
output "worker_ip_addresses" {
|
output "worker_ip_addresses" {
|
||||||
value = {
|
value = {
|
||||||
for key, instance in exoscale_compute_instance.worker :
|
for key, instance in exoscale_compute.worker :
|
||||||
instance.name => {
|
instance.name => {
|
||||||
"private_ip" = contains(keys(data.exoscale_compute_instance.worker_nodes), key) ? data.exoscale_compute_instance.worker_nodes[key].private_network_ip_addresses[0] : ""
|
"private_ip" = contains(keys(data.exoscale_compute.worker_nodes), key) ? data.exoscale_compute.worker_nodes[key].private_network_ip_addresses[0] : ""
|
||||||
"public_ip" = exoscale_compute_instance.worker[key].ip_address
|
"public_ip" = exoscale_compute.worker[key].ip_address
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -23,9 +23,9 @@ output "cluster_private_network_cidr" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
output "ingress_controller_lb_ip_address" {
|
output "ingress_controller_lb_ip_address" {
|
||||||
value = exoscale_elastic_ip.ingress_controller_lb.ip_address
|
value = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||||
}
|
}
|
||||||
|
|
||||||
output "control_plane_lb_ip_address" {
|
output "control_plane_lb_ip_address" {
|
||||||
value = exoscale_elastic_ip.control_plane_lb.ip_address
|
value = exoscale_ipaddress.control_plane_lb.ip_address
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
terraform {
|
terraform {
|
||||||
required_providers {
|
required_providers {
|
||||||
exoscale = {
|
exoscale = {
|
||||||
source = "exoscale/exoscale"
|
source = "exoscale/exoscale"
|
||||||
version = ">= 0.21"
|
version = ">= 0.21"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -75,11 +75,6 @@ ansible-playbook -i contrib/terraform/gcs/inventory.ini cluster.yml -b -v
|
|||||||
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server
|
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server
|
||||||
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports)
|
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports)
|
||||||
* `ingress_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to ingress on ports 80 and 443
|
* `ingress_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to ingress on ports 80 and 443
|
||||||
* `extra_ingress_firewalls`: Additional ingress firewall rules. Key will be used as the name of the rule
|
|
||||||
* `source_ranges`: List of IP ranges (CIDR). Example: `["8.8.8.8"]`
|
|
||||||
* `protocol`: Protocol. Example `"tcp"`
|
|
||||||
* `ports`: List of ports, as string. Example `["53"]`
|
|
||||||
* `target_tags`: List of target tag (either the machine name or `control-plane` or `worker`). Example: `["control-plane", "worker-0"]`
|
|
||||||
|
|
||||||
### Optional
|
### Optional
|
||||||
|
|
||||||
|
|||||||
@@ -34,6 +34,4 @@ module "kubernetes" {
|
|||||||
api_server_whitelist = var.api_server_whitelist
|
api_server_whitelist = var.api_server_whitelist
|
||||||
nodeport_whitelist = var.nodeport_whitelist
|
nodeport_whitelist = var.nodeport_whitelist
|
||||||
ingress_whitelist = var.ingress_whitelist
|
ingress_whitelist = var.ingress_whitelist
|
||||||
|
|
||||||
extra_ingress_firewalls = var.extra_ingress_firewalls
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -219,7 +219,7 @@ resource "google_compute_instance" "master" {
|
|||||||
machine_type = each.value.size
|
machine_type = each.value.size
|
||||||
zone = each.value.zone
|
zone = each.value.zone
|
||||||
|
|
||||||
tags = ["control-plane", "master", each.key]
|
tags = ["master"]
|
||||||
|
|
||||||
boot_disk {
|
boot_disk {
|
||||||
initialize_params {
|
initialize_params {
|
||||||
@@ -325,7 +325,7 @@ resource "google_compute_instance" "worker" {
|
|||||||
machine_type = each.value.size
|
machine_type = each.value.size
|
||||||
zone = each.value.zone
|
zone = each.value.zone
|
||||||
|
|
||||||
tags = ["worker", each.key]
|
tags = ["worker"]
|
||||||
|
|
||||||
boot_disk {
|
boot_disk {
|
||||||
initialize_params {
|
initialize_params {
|
||||||
@@ -398,24 +398,3 @@ resource "google_compute_target_pool" "worker_lb" {
|
|||||||
name = "${var.prefix}-worker-lb-pool"
|
name = "${var.prefix}-worker-lb-pool"
|
||||||
instances = local.worker_target_list
|
instances = local.worker_target_list
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "google_compute_firewall" "extra_ingress_firewall" {
|
|
||||||
for_each = {
|
|
||||||
for name, firewall in var.extra_ingress_firewalls :
|
|
||||||
name => firewall
|
|
||||||
}
|
|
||||||
|
|
||||||
name = "${var.prefix}-${each.key}-ingress"
|
|
||||||
network = google_compute_network.main.name
|
|
||||||
|
|
||||||
priority = 100
|
|
||||||
|
|
||||||
source_ranges = each.value.source_ranges
|
|
||||||
|
|
||||||
target_tags = each.value.target_tags
|
|
||||||
|
|
||||||
allow {
|
|
||||||
protocol = each.value.protocol
|
|
||||||
ports = each.value.ports
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ variable "machines" {
|
|||||||
}))
|
}))
|
||||||
boot_disk = object({
|
boot_disk = object({
|
||||||
image_name = string
|
image_name = string
|
||||||
size = number
|
size = number
|
||||||
})
|
})
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
@@ -73,14 +73,3 @@ variable "ingress_whitelist" {
|
|||||||
variable "private_network_cidr" {
|
variable "private_network_cidr" {
|
||||||
default = "10.0.10.0/24"
|
default = "10.0.10.0/24"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "extra_ingress_firewalls" {
|
|
||||||
type = map(object({
|
|
||||||
source_ranges = set(string)
|
|
||||||
protocol = string
|
|
||||||
ports = list(string)
|
|
||||||
target_tags = set(string)
|
|
||||||
}))
|
|
||||||
|
|
||||||
default = {}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -95,14 +95,3 @@ variable "ingress_whitelist" {
|
|||||||
type = list(string)
|
type = list(string)
|
||||||
default = ["0.0.0.0/0"]
|
default = ["0.0.0.0/0"]
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "extra_ingress_firewalls" {
|
|
||||||
type = map(object({
|
|
||||||
source_ranges = set(string)
|
|
||||||
protocol = string
|
|
||||||
ports = list(string)
|
|
||||||
target_tags = set(string)
|
|
||||||
}))
|
|
||||||
|
|
||||||
default = {}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -56,24 +56,11 @@ cd inventory/$CLUSTER
|
|||||||
|
|
||||||
Edit `default.tfvars` to match your requirement.
|
Edit `default.tfvars` to match your requirement.
|
||||||
|
|
||||||
Flatcar Container Linux instead of the basic Hetzner Images.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cd ../../contrib/terraform/hetzner
|
|
||||||
```
|
|
||||||
|
|
||||||
Edit `main.tf` and reactivate the module `source = "./modules/kubernetes-cluster-flatcar"`and
|
|
||||||
comment out the `#source = "./modules/kubernetes-cluster"`.
|
|
||||||
|
|
||||||
activate `ssh_private_key_path = var.ssh_private_key_path`. The VM boots into
|
|
||||||
Rescue-Mode with the selected image of the `var.machines` but installs Flatcar instead.
|
|
||||||
|
|
||||||
Run Terraform to create the infrastructure.
|
Run Terraform to create the infrastructure.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cd ./kubespray
|
terraform init ../../contrib/terraform/hetzner
|
||||||
terraform -chdir=./contrib/terraform/hetzner/ init
|
terraform apply --var-file default.tfvars ../../contrib/terraform/hetzner/
|
||||||
terraform -chdir=./contrib/terraform/hetzner/ apply --var-file=../../../inventory/$CLUSTER/default.tfvars
|
|
||||||
```
|
```
|
||||||
|
|
||||||
You should now have a inventory file named `inventory.ini` that you can use with kubespray.
|
You should now have a inventory file named `inventory.ini` that you can use with kubespray.
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
prefix = "default"
|
prefix = "default"
|
||||||
zone = "hel1"
|
zone = "hel1"
|
||||||
network_zone = "eu-central"
|
network_zone = "eu-central"
|
||||||
inventory_file = "inventory.ini"
|
inventory_file = "inventory.ini"
|
||||||
|
|
||||||
ssh_public_keys = [
|
ssh_public_keys = [
|
||||||
@@ -9,23 +9,21 @@ ssh_public_keys = [
|
|||||||
"ssh-rsa I-did-not-read-the-docs 2",
|
"ssh-rsa I-did-not-read-the-docs 2",
|
||||||
]
|
]
|
||||||
|
|
||||||
ssh_private_key_path = "~/.ssh/id_rsa"
|
|
||||||
|
|
||||||
machines = {
|
machines = {
|
||||||
"master-0" : {
|
"master-0" : {
|
||||||
"node_type" : "master",
|
"node_type" : "master",
|
||||||
"size" : "cx21",
|
"size" : "cx21",
|
||||||
"image" : "ubuntu-22.04",
|
"image" : "ubuntu-20.04",
|
||||||
},
|
},
|
||||||
"worker-0" : {
|
"worker-0" : {
|
||||||
"node_type" : "worker",
|
"node_type" : "worker",
|
||||||
"size" : "cx21",
|
"size" : "cx21",
|
||||||
"image" : "ubuntu-22.04",
|
"image" : "ubuntu-20.04",
|
||||||
},
|
},
|
||||||
"worker-1" : {
|
"worker-1" : {
|
||||||
"node_type" : "worker",
|
"node_type" : "worker",
|
||||||
"size" : "cx21",
|
"size" : "cx21",
|
||||||
"image" : "ubuntu-22.04",
|
"image" : "ubuntu-20.04",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user