mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 13:54:37 +03:00
Compare commits
9 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
453dbcef1d | ||
|
|
4a6600002f | ||
|
|
6eb313584e | ||
|
|
a270632466 | ||
|
|
00550ba832 | ||
|
|
b4951da405 | ||
|
|
cd93d10688 | ||
|
|
e6940d8a7b | ||
|
|
dca5cde493 |
@@ -7,6 +7,18 @@ skip_list:
|
||||
|
||||
# These rules are intentionally skipped:
|
||||
#
|
||||
# [E204]: "Lines should be no longer than 160 chars"
|
||||
# This could be re-enabled with a major rewrite in the future.
|
||||
# For now, there's not enough value gain from strictly limiting line length.
|
||||
# (Disabled in May 2019)
|
||||
- '204'
|
||||
|
||||
# [E701]: "meta/main.yml should contain relevant info"
|
||||
# Roles in Kubespray are not intended to be used/imported by Ansible Galaxy.
|
||||
# While it can be useful to have these metadata available, they are also available in the existing documentation.
|
||||
# (Disabled in May 2019)
|
||||
- '701'
|
||||
|
||||
# [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
|
||||
# Meta roles in Kubespray don't need proper names
|
||||
# (Disabled in June 2021)
|
||||
@@ -16,23 +28,3 @@ skip_list:
|
||||
# In Kubespray we use variables that use camelCase to match their k8s counterparts
|
||||
# (Disabled in June 2021)
|
||||
- 'var-naming'
|
||||
|
||||
# [fqcn-builtins]
|
||||
# Roles in kubespray don't need fully qualified collection names
|
||||
# (Disabled in Feb 2023)
|
||||
- 'fqcn-builtins'
|
||||
|
||||
# We use template in names
|
||||
- 'name[template]'
|
||||
|
||||
# No changed-when on commands
|
||||
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
||||
- 'no-changed-when'
|
||||
|
||||
# Disable run-once check with free strategy
|
||||
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
||||
- 'run-once[task]'
|
||||
exclude_paths:
|
||||
# Generated files
|
||||
- tests/files/custom_cni/cilium.yaml
|
||||
- venv
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
# This file contains ignores rule violations for ansible-lint
|
||||
inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml jinja[spacing]
|
||||
roles/kubernetes/control-plane/defaults/main/kube-proxy.yml jinja[spacing]
|
||||
roles/kubernetes/control-plane/defaults/main/main.yml jinja[spacing]
|
||||
roles/kubernetes/kubeadm/defaults/main.yml jinja[spacing]
|
||||
roles/kubernetes/node/defaults/main.yml jinja[spacing]
|
||||
roles/kubernetes/preinstall/defaults/main.yml jinja[spacing]
|
||||
roles/kubespray-defaults/defaults/main.yaml jinja[spacing]
|
||||
12
.gitignore
vendored
12
.gitignore
vendored
@@ -3,19 +3,14 @@
|
||||
**/vagrant_ansible_inventory
|
||||
*.iml
|
||||
temp
|
||||
contrib/offline/offline-files
|
||||
contrib/offline/offline-files.tar.gz
|
||||
.idea
|
||||
.vscode
|
||||
.tox
|
||||
.cache
|
||||
*.bak
|
||||
*.tfstate
|
||||
*.tfstate*backup
|
||||
*.lock.hcl
|
||||
*.tfstate.backup
|
||||
.terraform/
|
||||
contrib/terraform/aws/credentials.tfvars
|
||||
.terraform.lock.hcl
|
||||
/ssh-bastion.conf
|
||||
**/*.sw[pon]
|
||||
*~
|
||||
@@ -113,8 +108,3 @@ roles/**/molecule/**/__pycache__/
|
||||
|
||||
# Temp location used by our scripts
|
||||
scripts/tmp/
|
||||
tmp.md
|
||||
|
||||
# Ansible collection files
|
||||
kubernetes_sigs-kubespray*tar.gz
|
||||
ansible_collections
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
---
|
||||
stages:
|
||||
- build
|
||||
- unit-tests
|
||||
- deploy-part1
|
||||
- moderator
|
||||
@@ -9,12 +8,12 @@ stages:
|
||||
- deploy-special
|
||||
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.22.1
|
||||
KUBESPRAY_VERSION: v2.18.1
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
MAGIC: "ci check this"
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_JOB_ID"
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
||||
CI_TEST_REGISTRY_MIRROR: "./tests/common/_docker_hub_registry_mirror.yml"
|
||||
CI_TEST_SETTING: "./tests/common/_kubespray_test_settings.yml"
|
||||
@@ -33,18 +32,21 @@ variables:
|
||||
MITOGEN_ENABLE: "false"
|
||||
ANSIBLE_LOG_LEVEL: "-vv"
|
||||
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
|
||||
TERRAFORM_VERSION: 1.3.7
|
||||
PIPELINE_IMAGE: "$CI_REGISTRY_IMAGE/pipeline:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||
TERRAFORM_VERSION: 1.0.8
|
||||
ANSIBLE_MAJOR_VERSION: "2.10"
|
||||
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements-${ANSIBLE_MAJOR_VERSION}.txt
|
||||
- mkdir -p /.ssh
|
||||
|
||||
.job: &job
|
||||
tags:
|
||||
- packet
|
||||
image: $PIPELINE_IMAGE
|
||||
image: quay.io/kubespray/kubespray:$KUBESPRAY_VERSION
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
@@ -53,7 +55,6 @@ before_script:
|
||||
.testcases: &testcases
|
||||
<<: *job
|
||||
retry: 1
|
||||
interruptible: true
|
||||
before_script:
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- ./tests/scripts/rebase.sh
|
||||
@@ -75,7 +76,6 @@ ci-authorized:
|
||||
only: []
|
||||
|
||||
include:
|
||||
- .gitlab-ci/build.yml
|
||||
- .gitlab-ci/lint.yml
|
||||
- .gitlab-ci/shellcheck.yml
|
||||
- .gitlab-ci/terraform.yml
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
---
|
||||
.build:
|
||||
stage: build
|
||||
image:
|
||||
name: moby/buildkit:rootless
|
||||
entrypoint: [""]
|
||||
variables:
|
||||
BUILDKITD_FLAGS: --oci-worker-no-process-sandbox
|
||||
before_script:
|
||||
- mkdir ~/.docker
|
||||
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > ~/.docker/config.json
|
||||
|
||||
pipeline image:
|
||||
extends: .build
|
||||
script:
|
||||
- |
|
||||
buildctl-daemonless.sh build \
|
||||
--frontend=dockerfile.v0 \
|
||||
--local context=. \
|
||||
--local dockerfile=. \
|
||||
--opt filename=./pipeline.Dockerfile \
|
||||
--output type=image,name=$PIPELINE_IMAGE,push=true \
|
||||
--import-cache type=registry,ref=$CI_REGISTRY_IMAGE/pipeline:cache
|
||||
rules:
|
||||
- if: '$CI_COMMIT_REF_NAME != $CI_DEFAULT_BRANCH'
|
||||
|
||||
pipeline image and build cache:
|
||||
extends: .build
|
||||
script:
|
||||
- |
|
||||
buildctl-daemonless.sh build \
|
||||
--frontend=dockerfile.v0 \
|
||||
--local context=. \
|
||||
--local dockerfile=. \
|
||||
--opt filename=./pipeline.Dockerfile \
|
||||
--output type=image,name=$PIPELINE_IMAGE,push=true \
|
||||
--import-cache type=registry,ref=$CI_REGISTRY_IMAGE/pipeline:cache \
|
||||
--export-cache type=registry,ref=$CI_REGISTRY_IMAGE/pipeline:cache,mode=max
|
||||
rules:
|
||||
- if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH'
|
||||
@@ -14,7 +14,7 @@ vagrant-validate:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
VAGRANT_VERSION: 2.3.4
|
||||
VAGRANT_VERSION: 2.2.19
|
||||
script:
|
||||
- ./tests/scripts/vagrant-validate.sh
|
||||
except: ['triggers', 'master']
|
||||
@@ -39,34 +39,21 @@ syntax-check:
|
||||
ANSIBLE_VERBOSITY: "3"
|
||||
script:
|
||||
- ansible-playbook --syntax-check cluster.yml
|
||||
- ansible-playbook --syntax-check playbooks/cluster.yml
|
||||
- ansible-playbook --syntax-check upgrade-cluster.yml
|
||||
- ansible-playbook --syntax-check playbooks/upgrade_cluster.yml
|
||||
- ansible-playbook --syntax-check reset.yml
|
||||
- ansible-playbook --syntax-check playbooks/reset.yml
|
||||
- ansible-playbook --syntax-check extra_playbooks/upgrade-only-k8s.yml
|
||||
except: ['triggers', 'master']
|
||||
|
||||
collection-build-install-sanity-check:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
ANSIBLE_COLLECTIONS_PATH: "./ansible_collections"
|
||||
script:
|
||||
- ansible-galaxy collection build
|
||||
- ansible-galaxy collection install kubernetes_sigs-kubespray-$(grep "^version:" galaxy.yml | awk '{print $2}').tar.gz
|
||||
- ansible-galaxy collection list $(egrep -i '(name:\s+|namespace:\s+)' galaxy.yml | awk '{print $2}' | tr '\n' '.' | sed 's|\.$||g') | grep "^kubernetes_sigs.kubespray"
|
||||
- test -f ansible_collections/kubernetes_sigs/kubespray/playbooks/cluster.yml
|
||||
- test -f ansible_collections/kubernetes_sigs/kubespray/playbooks/reset.yml
|
||||
except: ['triggers', 'master']
|
||||
|
||||
tox-inventory-builder:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
extends: .job
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
script:
|
||||
- pip3 install tox
|
||||
- cd contrib/inventory_builder && tox
|
||||
@@ -81,27 +68,6 @@ markdownlint:
|
||||
script:
|
||||
- markdownlint $(find . -name '*.md' | grep -vF './.git') --ignore docs/_sidebar.md --ignore contrib/dind/README.md
|
||||
|
||||
check-readme-versions:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python:3
|
||||
script:
|
||||
- tests/scripts/check_readme_versions.sh
|
||||
|
||||
check-galaxy-version:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python:3
|
||||
script:
|
||||
- tests/scripts/check_galaxy_version.sh
|
||||
|
||||
check-typo:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python:3
|
||||
script:
|
||||
- tests/scripts/check_typo.sh
|
||||
|
||||
ci-matrix:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
|
||||
@@ -4,11 +4,15 @@
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: $PIPELINE_IMAGE
|
||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||
services: []
|
||||
stage: deploy-part1
|
||||
before_script:
|
||||
- tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh
|
||||
@@ -40,7 +44,7 @@ molecule_no_container_engines:
|
||||
molecule_docker:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/docker
|
||||
when: on_success
|
||||
|
||||
molecule_containerd:
|
||||
@@ -54,7 +58,13 @@ molecule_cri-o:
|
||||
stage: deploy-part2
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
||||
allow_failure: true
|
||||
when: on_success
|
||||
|
||||
molecule_cri-dockerd:
|
||||
extends: .molecule
|
||||
stage: deploy-part2
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
|
||||
when: on_success
|
||||
|
||||
# Stage 3 container engines don't get as much attention so allow them to fail
|
||||
|
||||
@@ -23,14 +23,6 @@
|
||||
allow_failure: true
|
||||
extends: .packet
|
||||
|
||||
packet_cleanup_old:
|
||||
stage: deploy-part1
|
||||
extends: .packet_periodic
|
||||
script:
|
||||
- cd tests
|
||||
- make cleanup-packet
|
||||
after_script: []
|
||||
|
||||
# The ubuntu20-calico-aio jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
||||
packet_ubuntu20-calico-aio:
|
||||
stage: deploy-part1
|
||||
@@ -39,24 +31,44 @@ packet_ubuntu20-calico-aio:
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
# Exericse ansible variants during the nightly jobs
|
||||
packet_ubuntu20-calico-aio-ansible-2_9:
|
||||
stage: deploy-part1
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
ANSIBLE_MAJOR_VERSION: "2.9"
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_ubuntu20-calico-aio-ansible-2_10:
|
||||
stage: deploy-part1
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
ANSIBLE_MAJOR_VERSION: "2.10"
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_ubuntu20-calico-aio-ansible-2_11:
|
||||
stage: deploy-part1
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
ANSIBLE_MAJOR_VERSION: "2.11"
|
||||
RESET_CHECK: "true"
|
||||
|
||||
# ### PR JOBS PART2
|
||||
|
||||
packet_ubuntu18-aio-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu20-aio-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu20-calico-aio-hardening:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu22-aio-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu22-calico-aio:
|
||||
packet_ubuntu18-calico-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
@@ -70,19 +82,28 @@ packet_almalinux8-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
allow_failure: true
|
||||
|
||||
packet_ubuntu20-crio:
|
||||
packet_ubuntu18-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
|
||||
packet_fedora37-crio:
|
||||
packet_fedora35-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
|
||||
packet_ubuntu20-flannel-ha:
|
||||
packet_ubuntu16-canal-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu16-canal-sep:
|
||||
stage: deploy-special
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-flannel-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
@@ -112,21 +133,6 @@ packet_debian11-docker:
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian12-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian12-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian12-cilium:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_centos7-calico-ha-once-localhost:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
@@ -139,7 +145,7 @@ packet_centos7-calico-ha-once-localhost:
|
||||
|
||||
packet_almalinux8-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_almalinux8-calico:
|
||||
@@ -147,33 +153,20 @@ packet_almalinux8-calico:
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_rockylinux8-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_rockylinux9-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_rockylinux9-cilium:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_almalinux8-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_fedora38-docker-weave:
|
||||
packet_fedora34-docker-weave:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
allow_failure: true
|
||||
|
||||
packet_opensuse-canal:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_opensuse-docker-cilium:
|
||||
stage: deploy-part2
|
||||
@@ -182,17 +175,22 @@ packet_opensuse-docker-cilium:
|
||||
|
||||
# ### MANUAL JOBS
|
||||
|
||||
packet_ubuntu20-docker-weave-sep:
|
||||
packet_ubuntu16-docker-weave-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu20-cilium-sep:
|
||||
packet_ubuntu18-cilium-sep:
|
||||
stage: deploy-special
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu20-flannel-ha-once:
|
||||
packet_ubuntu18-flannel-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-flannel-ha-once:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
@@ -203,7 +201,7 @@ packet_almalinux8-calico-ha-ebpf:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian10-macvlan:
|
||||
packet_debian9-macvlan:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
@@ -218,19 +216,24 @@ packet_centos7-multus-calico:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora38-docker-calico:
|
||||
packet_oracle7-canal-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora35-docker-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_fedora37-calico-selinux:
|
||||
packet_fedora34-calico-selinux:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_fedora37-calico-swap-selinux:
|
||||
packet_fedora35-calico-swap-selinux:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
@@ -245,25 +248,15 @@ packet_almalinux8-calico-nodelocaldns-secondary:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora38-kube-ovn:
|
||||
packet_fedora34-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_debian11-custom-cni:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian11-kubelet-csr-approver:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
# ### PR JOBS PART3
|
||||
# Long jobs (45min+)
|
||||
|
||||
packet_centos7-weave-upgrade-ha:
|
||||
packet_centos7-docker-weave-upgrade-ha:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
@@ -283,7 +276,7 @@ packet_ubuntu20-calico-ha-wireguard:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian11-calico-upgrade:
|
||||
packet_debian10-calico-upgrade:
|
||||
stage: deploy-part3
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
@@ -298,30 +291,25 @@ packet_almalinux8-calico-remove-node:
|
||||
REMOVE_NODE_CHECK: "true"
|
||||
REMOVE_NODE_NAME: "instance-3"
|
||||
|
||||
packet_ubuntu20-calico-etcd-kubeadm:
|
||||
stage: deploy-part3
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian11-calico-upgrade-once:
|
||||
packet_debian10-calico-upgrade-once:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
|
||||
packet_ubuntu20-calico-ha-recover:
|
||||
packet_ubuntu18-calico-ha-recover:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||
|
||||
packet_ubuntu20-calico-ha-recover-noquorum:
|
||||
packet_ubuntu18-calico-ha-recover-noquorum:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:]:kube_control_plane[1:]"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube_control_plane[1:]"
|
||||
|
||||
@@ -11,6 +11,6 @@ shellcheck:
|
||||
- cp shellcheck-"${SHELLCHECK_VERSION}"/shellcheck /usr/bin/
|
||||
- shellcheck --version
|
||||
script:
|
||||
# Run shellcheck for all *.sh
|
||||
- find . -name '*.sh' -not -path './.git/*' | xargs shellcheck --severity error
|
||||
# Run shellcheck for all *.sh except contrib/
|
||||
- find . -name '*.sh' -not -path './contrib/*' -not -path './.git/*' | xargs shellcheck --severity error
|
||||
except: ['triggers', 'master']
|
||||
|
||||
@@ -60,11 +60,11 @@ tf-validate-openstack:
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-equinix:
|
||||
tf-validate-metal:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: equinix
|
||||
PROVIDER: metal
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-aws:
|
||||
@@ -80,12 +80,6 @@ tf-validate-exoscale:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: exoscale
|
||||
|
||||
tf-validate-hetzner:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: hetzner
|
||||
|
||||
tf-validate-vsphere:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
@@ -100,13 +94,7 @@ tf-validate-upcloud:
|
||||
PROVIDER: upcloud
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-nifcloud:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: nifcloud
|
||||
|
||||
# tf-packet-ubuntu20-default:
|
||||
# tf-packet-ubuntu16-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
@@ -116,9 +104,23 @@ tf-validate-nifcloud:
|
||||
# TF_VAR_number_of_k8s_nodes: "1"
|
||||
# TF_VAR_plan_k8s_masters: t1.small.x86
|
||||
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||
# TF_VAR_metro: am
|
||||
# TF_VAR_facility: ewr1
|
||||
# TF_VAR_public_key_path: ""
|
||||
# TF_VAR_operating_system: ubuntu_20_04
|
||||
# TF_VAR_operating_system: ubuntu_16_04
|
||||
#
|
||||
# tf-packet-ubuntu18-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
# TF_VAR_number_of_k8s_nodes: "1"
|
||||
# TF_VAR_plan_k8s_masters: t1.small.x86
|
||||
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||
# TF_VAR_facility: ams1
|
||||
# TF_VAR_public_key_path: ""
|
||||
# TF_VAR_operating_system: ubuntu_18_04
|
||||
|
||||
.ovh_variables: &ovh_variables
|
||||
OS_AUTH_URL: https://auth.cloud.ovh.net/v3
|
||||
@@ -156,7 +158,7 @@ tf-elastx_cleanup:
|
||||
script:
|
||||
- ./scripts/openstack-cleanup/main.py
|
||||
|
||||
tf-elastx_ubuntu20-calico:
|
||||
tf-elastx_ubuntu18-calico:
|
||||
extends: .terraform_apply
|
||||
stage: deploy-part3
|
||||
when: on_success
|
||||
@@ -186,7 +188,7 @@ tf-elastx_ubuntu20-calico:
|
||||
TF_VAR_az_list_node: '["sto1"]'
|
||||
TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||
TF_VAR_image: ubuntu-20.04-server-latest
|
||||
TF_VAR_image: ubuntu-18.04-server-latest
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
||||
# OVH voucher expired, commenting job until things are sorted out
|
||||
@@ -203,7 +205,7 @@ tf-elastx_ubuntu20-calico:
|
||||
# script:
|
||||
# - ./scripts/openstack-cleanup/main.py
|
||||
|
||||
# tf-ovh_ubuntu20-calico:
|
||||
# tf-ovh_ubuntu18-calico:
|
||||
# extends: .terraform_apply
|
||||
# when: on_success
|
||||
# environment: ovh
|
||||
@@ -229,5 +231,5 @@ tf-elastx_ubuntu20-calico:
|
||||
# TF_VAR_network_name: "Ext-Net"
|
||||
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_image: "Ubuntu 20.04"
|
||||
# TF_VAR_image: "Ubuntu 18.04"
|
||||
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
||||
@@ -10,9 +10,13 @@
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: $PIPELINE_IMAGE
|
||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||
services: []
|
||||
before_script:
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/testcases_run.sh
|
||||
@@ -20,12 +24,17 @@
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
allow_failure: true
|
||||
|
||||
vagrant_ubuntu20-calico-dual-stack:
|
||||
vagrant_ubuntu18-calico-dual-stack:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_ubuntu20-weave-medium:
|
||||
vagrant_ubuntu18-flannel:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_ubuntu18-weave-medium:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
@@ -34,25 +43,19 @@ vagrant_ubuntu20-flannel:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
allow_failure: false
|
||||
|
||||
vagrant_ubuntu20-flannel-collection:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_ubuntu20-kube-router-sep:
|
||||
vagrant_ubuntu16-kube-router-sep:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
# Service proxy test fails connectivity testing
|
||||
vagrant_ubuntu20-kube-router-svc-proxy:
|
||||
vagrant_ubuntu16-kube-router-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
vagrant_fedora37-kube-router:
|
||||
vagrant_fedora35-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
@@ -1,3 +1,2 @@
|
||||
---
|
||||
MD013: false
|
||||
MD029: false
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
---
|
||||
repos:
|
||||
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v3.4.0
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
- id: check-case-conflict
|
||||
- id: check-executables-have-shebangs
|
||||
- id: check-xml
|
||||
- id: check-merge-conflict
|
||||
- id: detect-private-key
|
||||
- id: end-of-file-fixer
|
||||
- id: forbid-new-submodules
|
||||
- id: requirements-txt-fixer
|
||||
- id: trailing-whitespace
|
||||
|
||||
- repo: https://github.com/adrienverge/yamllint.git
|
||||
rev: v1.27.1
|
||||
hooks:
|
||||
- id: yamllint
|
||||
args: [--strict]
|
||||
|
||||
- repo: https://github.com/markdownlint/markdownlint
|
||||
rev: v0.11.0
|
||||
hooks:
|
||||
- id: markdownlint
|
||||
args: [ -r, "~MD013,~MD029" ]
|
||||
exclude: "^.git"
|
||||
|
||||
- repo: https://github.com/jumanjihouse/pre-commit-hooks
|
||||
rev: 3.0.0
|
||||
hooks:
|
||||
- id: shellcheck
|
||||
args: [ --severity, "error" ]
|
||||
exclude: "^.git"
|
||||
files: "\\.sh$"
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: ansible-lint
|
||||
name: ansible-lint
|
||||
entry: ansible-lint -v
|
||||
language: python
|
||||
pass_filenames: false
|
||||
additional_dependencies:
|
||||
- .[community]
|
||||
|
||||
- id: ansible-syntax-check
|
||||
name: ansible-syntax-check
|
||||
entry: env ANSIBLE_INVENTORY=inventory/local-tests.cfg ANSIBLE_REMOTE_USER=root ANSIBLE_BECOME="true" ANSIBLE_BECOME_USER=root ANSIBLE_VERBOSITY="3" ansible-playbook --syntax-check
|
||||
language: python
|
||||
files: "^cluster.yml|^upgrade-cluster.yml|^reset.yml|^extra_playbooks/upgrade-only-k8s.yml"
|
||||
|
||||
- id: tox-inventory-builder
|
||||
name: tox-inventory-builder
|
||||
entry: bash -c "cd contrib/inventory_builder && tox"
|
||||
language: python
|
||||
pass_filenames: false
|
||||
|
||||
- id: check-readme-versions
|
||||
name: check-readme-versions
|
||||
entry: tests/scripts/check_readme_versions.sh
|
||||
language: script
|
||||
pass_filenames: false
|
||||
|
||||
- id: ci-matrix
|
||||
name: ci-matrix
|
||||
entry: tests/scripts/md-table/test.sh
|
||||
language: script
|
||||
pass_filenames: false
|
||||
@@ -3,8 +3,6 @@ extends: default
|
||||
|
||||
ignore: |
|
||||
.git/
|
||||
# Generated file
|
||||
tests/files/custom_cni/cilium.yaml
|
||||
|
||||
rules:
|
||||
braces:
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
# See our release notes on [GitHub](https://github.com/kubernetes-sigs/kubespray/releases)
|
||||
@@ -12,17 +12,11 @@ To install development dependencies you can set up a python virtual env with the
|
||||
virtualenv venv
|
||||
source venv/bin/activate
|
||||
pip install -r tests/requirements.txt
|
||||
ansible-galaxy install -r tests/requirements.yml
|
||||
```
|
||||
|
||||
#### Linting
|
||||
|
||||
Kubespray uses [pre-commit](https://pre-commit.com) hook configuration to run several linters, please install this tool and use it to run validation tests before submitting a PR.
|
||||
|
||||
```ShellSession
|
||||
pre-commit install
|
||||
pre-commit run -a # To run pre-commit hook on all files in the repository, even if they were not modified
|
||||
```
|
||||
Kubespray uses `yamllint` and `ansible-lint`. To run them locally use `yamllint .` and `ansible-lint`. It is a good idea to add call these tools as part of your pre-commit hook and avoid a lot of back end forth on fixing linting issues (<https://support.gitkraken.com/working-with-repositories/githooksexample/>).
|
||||
|
||||
#### Molecule
|
||||
|
||||
@@ -39,9 +33,7 @@ Vagrant with VirtualBox or libvirt driver helps you to quickly spin test cluster
|
||||
1. Submit an issue describing your proposed change to the repo in question.
|
||||
2. The [repo owners](OWNERS) will respond to your issue promptly.
|
||||
3. Fork the desired repo, develop and test your code changes.
|
||||
4. Install [pre-commit](https://pre-commit.com) and install it in your development repo.
|
||||
5. Addess any pre-commit validation failures.
|
||||
6. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
||||
7. Submit a pull request.
|
||||
8. Work with the reviewers on their suggestions.
|
||||
9. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits (<https://blog.carbonfive.com/always-squash-and-rebase-your-git-commits/>) before final merger of your contribution.
|
||||
4. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
||||
5. Submit a pull request.
|
||||
6. Work with the reviewers on their suggestions.
|
||||
7. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits (<https://blog.carbonfive.com/always-squash-and-rebase-your-git-commits/>) before final merger of your contribution.
|
||||
|
||||
72
Dockerfile
72
Dockerfile
@@ -1,45 +1,37 @@
|
||||
# Use imutable image tags rather than mutable tags (like ubuntu:22.04)
|
||||
FROM ubuntu:jammy-20230308
|
||||
# Use imutable image tags rather than mutable tags (like ubuntu:20.04)
|
||||
FROM ubuntu:focal-20220316
|
||||
|
||||
ARG ARCH=amd64
|
||||
ARG TZ=Etc/UTC
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN apt update -y \
|
||||
&& apt install -y \
|
||||
libssl-dev python3-dev sshpass apt-transport-https jq moreutils \
|
||||
ca-certificates curl gnupg2 software-properties-common python3-pip unzip rsync git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
||||
&& add-apt-repository \
|
||||
"deb [arch=$ARCH] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable" \
|
||||
&& apt update -y && apt-get install --no-install-recommends -y docker-ce \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Some tools like yamllint need this
|
||||
# Pip needs this as well at the moment to install ansible
|
||||
# (and potentially other packages)
|
||||
# See: https://github.com/pypa/pip/issues/10219
|
||||
ENV LANG=C.UTF-8 \
|
||||
DEBIAN_FRONTEND=noninteractive \
|
||||
PYTHONDONTWRITEBYTECODE=1
|
||||
WORKDIR /kubespray
|
||||
COPY *.yml ./
|
||||
COPY *.cfg ./
|
||||
COPY roles ./roles
|
||||
COPY contrib ./contrib
|
||||
COPY inventory ./inventory
|
||||
COPY library ./library
|
||||
COPY extra_playbooks ./extra_playbooks
|
||||
COPY playbooks ./playbooks
|
||||
COPY plugins ./plugins
|
||||
ENV LANG=C.UTF-8
|
||||
|
||||
RUN apt update -q \
|
||||
&& apt install -yq --no-install-recommends \
|
||||
curl \
|
||||
python3 \
|
||||
python3-pip \
|
||||
sshpass \
|
||||
vim \
|
||||
rsync \
|
||||
openssh-client \
|
||||
&& pip install --no-compile --no-cache-dir \
|
||||
ansible==7.6.0 \
|
||||
ansible-core==2.14.6 \
|
||||
cryptography==41.0.1 \
|
||||
jinja2==3.1.2 \
|
||||
netaddr==0.8.0 \
|
||||
jmespath==1.0.1 \
|
||||
MarkupSafe==2.1.3 \
|
||||
ruamel.yaml==0.17.21 \
|
||||
passlib==1.7.4 \
|
||||
&& KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
||||
&& curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
|
||||
&& echo $(curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \
|
||||
&& chmod a+x /usr/local/bin/kubectl \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/log/* \
|
||||
&& find /usr -type d -name '*__pycache__' -prune -exec rm -rf {} \;
|
||||
WORKDIR /kubespray
|
||||
COPY . .
|
||||
RUN /usr/bin/python3 -m pip install --no-cache-dir pip -U \
|
||||
&& /usr/bin/python3 -m pip install --no-cache-dir -r tests/requirements.txt \
|
||||
&& python3 -m pip install --no-cache-dir -r requirements.txt \
|
||||
&& update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
|
||||
RUN KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
||||
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$ARCH/kubectl \
|
||||
&& chmod a+x kubectl \
|
||||
&& mv kubectl /usr/local/bin/kubectl
|
||||
|
||||
@@ -8,9 +8,6 @@ aliases:
|
||||
- floryut
|
||||
- oomichi
|
||||
- cristicalin
|
||||
- liupeng0518
|
||||
- yankay
|
||||
- mzaian
|
||||
kubespray-reviewers:
|
||||
- holmsten
|
||||
- bozzo
|
||||
@@ -19,11 +16,6 @@ aliases:
|
||||
- jayonlau
|
||||
- cristicalin
|
||||
- liupeng0518
|
||||
- yankay
|
||||
- cyclinder
|
||||
- mzaian
|
||||
- mrfreezeex
|
||||
- erikjiang
|
||||
kubespray-emeritus_approvers:
|
||||
- riverzhang
|
||||
- atoms
|
||||
|
||||
130
README.md
130
README.md
@@ -13,7 +13,7 @@ You can get your invite [here](http://slack.k8s.io/)
|
||||
|
||||
## Quick Start
|
||||
|
||||
Below are several ways to use Kubespray to deploy a Kubernetes cluster.
|
||||
To deploy the cluster you can use :
|
||||
|
||||
### Ansible
|
||||
|
||||
@@ -34,13 +34,6 @@ CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inv
|
||||
cat inventory/mycluster/group_vars/all/all.yml
|
||||
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
||||
|
||||
# Clean up old Kubernetes cluster with Ansible Playbook - run the playbook as root
|
||||
# The option `--become` is required, as for example cleaning up SSL keys in /etc/,
|
||||
# uninstalling old packages and interacting with various systemd daemons.
|
||||
# Without --become the playbook will fail to run!
|
||||
# And be mind it will remove the current kubernetes cluster (if it's running)!
|
||||
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root reset.yml
|
||||
|
||||
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
||||
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
||||
# installing packages and interacting with various systemd daemons.
|
||||
@@ -48,50 +41,34 @@ ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root
|
||||
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml
|
||||
```
|
||||
|
||||
Note: When Ansible is already installed via system packages on the control node,
|
||||
Python packages installed via `sudo pip install -r requirements.txt` will go to
|
||||
a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on
|
||||
Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on
|
||||
Ubuntu). As a consequence, the `ansible-playbook` command will fail with:
|
||||
Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu).
|
||||
As a consequence, `ansible-playbook` command will fail with:
|
||||
|
||||
```raw
|
||||
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
|
||||
```
|
||||
|
||||
This likely indicates that a task depends on a module present in ``requirements.txt``.
|
||||
probably pointing on a task depending on a module present in requirements.txt.
|
||||
|
||||
One way of addressing this is to uninstall the system Ansible package then
|
||||
reinstall Ansible via ``pip``, but this not always possible and one must
|
||||
take care regarding package versions.
|
||||
A workaround consists of setting the `ANSIBLE_LIBRARY`
|
||||
and `ANSIBLE_MODULE_UTILS` environment variables respectively to
|
||||
the `ansible/modules` and `ansible/module_utils` subdirectories of the ``pip``
|
||||
installation location, which is the ``Location`` shown by running
|
||||
`pip show [package]` before executing `ansible-playbook`.
|
||||
One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible.
|
||||
A workaround consists of setting `ANSIBLE_LIBRARY` and `ANSIBLE_MODULE_UTILS` environment variables respectively to the `ansible/modules` and `ansible/module_utils` subdirectories of pip packages installation location, which can be found in the Location field of the output of `pip show [package]` before executing `ansible-playbook`.
|
||||
|
||||
A simple way to ensure you get all the correct version of Ansible is to use
|
||||
the [pre-built docker image from Quay](https://quay.io/repository/kubespray/kubespray?tab=tags).
|
||||
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/)
|
||||
to access the inventory and SSH key in the container, like this:
|
||||
A simple way to ensure you get all the correct version of Ansible is to use the [pre-built docker image from Quay](https://quay.io/repository/kubespray/kubespray?tab=tags).
|
||||
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
|
||||
|
||||
```ShellSession
|
||||
git checkout v2.22.1
|
||||
docker pull quay.io/kubespray/kubespray:v2.22.1
|
||||
docker pull quay.io/kubespray/kubespray:v2.19.0
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.22.1 bash
|
||||
quay.io/kubespray/kubespray:v2.19.0 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
|
||||
#### Collection
|
||||
|
||||
See [here](docs/ansible_collection.md) if you wish to use this repository as an Ansible collection
|
||||
|
||||
### Vagrant
|
||||
|
||||
For Vagrant we need to install Python dependencies for provisioning tasks.
|
||||
Check that ``Python`` and ``pip`` are installed:
|
||||
For Vagrant we need to install python dependencies for provisioning tasks.
|
||||
Check if Python and pip are installed:
|
||||
|
||||
```ShellSession
|
||||
python -V && pip -V
|
||||
@@ -134,82 +111,65 @@ vagrant up
|
||||
- [Adding/replacing a node](docs/nodes.md)
|
||||
- [Upgrades basics](docs/upgrades.md)
|
||||
- [Air-Gap installation](docs/offline-environment.md)
|
||||
- [NTP](docs/ntp.md)
|
||||
- [Hardening](docs/hardening.md)
|
||||
- [Mirror](docs/mirror.md)
|
||||
- [Roadmap](docs/roadmap.md)
|
||||
|
||||
## Supported Linux Distributions
|
||||
|
||||
- **Flatcar Container Linux by Kinvolk**
|
||||
- **Debian** Bookworm, Bullseye, Buster
|
||||
- **Ubuntu** 20.04, 22.04
|
||||
- **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8)
|
||||
- **Fedora** 37, 38
|
||||
- **Debian** Bullseye, Buster, Jessie, Stretch
|
||||
- **Ubuntu** 16.04, 18.04, 20.04
|
||||
- **CentOS/RHEL** 7, [8](docs/centos8.md)
|
||||
- **Fedora** 34, 35
|
||||
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
||||
- **openSUSE** Leap 15.x/Tumbleweed
|
||||
- **Oracle Linux** 7, [8, 9](docs/centos.md#centos-8)
|
||||
- **Alma Linux** [8, 9](docs/centos.md#centos-8)
|
||||
- **Rocky Linux** [8, 9](docs/centos.md#centos-8)
|
||||
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md))
|
||||
- **Oracle Linux** 7, [8](docs/centos8.md)
|
||||
- **Alma Linux** [8](docs/centos8.md)
|
||||
- **Rocky Linux** [8](docs/centos8.md)
|
||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
|
||||
- **UOS Linux** (experimental: see [uos linux notes](docs/uoslinux.md))
|
||||
- **openEuler** (experimental: see [openEuler notes](docs/openeuler.md))
|
||||
|
||||
Note: Upstart/SysV init based OS types are not supported.
|
||||
|
||||
## Supported Components
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.27.7
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.9
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.23.7
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.3
|
||||
- [docker](https://www.docker.com/) v20.10 (see note)
|
||||
- [containerd](https://containerd.io/) v1.7.5
|
||||
- [cri-o](http://cri-o.io/) v1.27 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- [containerd](https://containerd.io/) v1.6.4
|
||||
- [cri-o](http://cri-o.io/) v1.22 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.2.0
|
||||
- [calico](https://github.com/projectcalico/calico) v3.25.2
|
||||
- [cilium](https://github.com/cilium/cilium) v1.13.4
|
||||
- [flannel](https://github.com/flannel-io/flannel) v0.22.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.11.5
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
|
||||
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) v3.8
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1
|
||||
- [calico](https://github.com/projectcalico/calico) v3.22.3
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.11.3
|
||||
- [flanneld](https://github.com/flannel-io/flannel) v0.17.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.9.2
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.4.0
|
||||
- [multus](https://github.com/intel/multus-cni) v3.8
|
||||
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.5.12
|
||||
- Application
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.11.1
|
||||
- [coredns](https://github.com/coredns/coredns) v1.10.1
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.8.1
|
||||
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.4
|
||||
- [argocd](https://argoproj.github.io/) v2.8.0
|
||||
- [helm](https://helm.sh/) v3.12.3
|
||||
- [metallb](https://metallb.universe.tf/) v0.13.9
|
||||
- [registry](https://github.com/distribution/distribution) v2.8.1
|
||||
- Storage Plugin
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
|
||||
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
|
||||
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0
|
||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.9.2
|
||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.24
|
||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.8.0
|
||||
- [coredns](https://github.com/coredns/coredns) v1.8.6
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.2.1
|
||||
|
||||
## Container Runtime Notes
|
||||
|
||||
- Supported Docker versions are 18.09, 19.03, 20.10, 23.0 and 24.0. The *recommended* Docker version is 20.10 (except on Debian bookworm which without supporting for 20.10 and below any more). `Kubelet` might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. the YUM ``versionlock`` plugin or ``apt pin``).
|
||||
- The list of available docker version is 18.09, 19.03 and 20.10. The recommended docker version is 20.10. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Minimum required version of Kubernetes is v1.25**
|
||||
- **Ansible v2.14+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- **Minimum required version of Kubernetes is v1.21**
|
||||
- **Ansible v2.9.x, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
||||
- The target servers are configured to allow **IPv4 forwarding**.
|
||||
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
|
||||
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||
in order to avoid any issue during deployment you should disable your firewall.
|
||||
- If kubespray is run from non-root user account, correct privilege escalation method
|
||||
- If kubespray is ran from non-root user account, correct privilege escalation method
|
||||
should be configured in the target servers. Then the `ansible_become` flag
|
||||
or command parameters `--become or -b` should be specified.
|
||||
|
||||
@@ -223,15 +183,17 @@ These limits are safeguarded by Kubespray. Actual requirements for your workload
|
||||
|
||||
## Network Plugins
|
||||
|
||||
You can choose among ten network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
||||
You can choose between 10 network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
||||
|
||||
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||
|
||||
- [Calico](https://docs.tigera.io/calico/latest/about/) is a networking and network policy provider. Calico supports a flexible set of networking options
|
||||
- [Calico](https://docs.projectcalico.org/latest/introduction/) is a networking and network policy provider. Calico supports a flexible set of networking options
|
||||
designed to give you the most efficient networking across a range of situations, including non-overlay
|
||||
and overlay networks, with or without BGP. Calico uses the same engine to enforce network policy for hosts,
|
||||
pods, and (if using Istio and Envoy) applications at the service mesh layer.
|
||||
|
||||
- [canal](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
|
||||
|
||||
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
||||
|
||||
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
||||
@@ -248,10 +210,7 @@ You can choose among ten network plugins. (default: `calico`, except Vagrant use
|
||||
|
||||
- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
|
||||
|
||||
- [custom_cni](roles/network-plugin/custom_cni/) : You can specify some manifests that will be applied to the clusters to bring you own CNI and use non-supported ones by Kubespray.
|
||||
See `tests/files/custom_cni/README.md` and `tests/files/custom_cni/values.yaml`for an example with a CNI provided by a Helm Chart.
|
||||
|
||||
The network plugin to use is defined by the variable `kube_network_plugin`. There is also an
|
||||
The choice is defined with the variable `kube_network_plugin`. There is also an
|
||||
option to leverage built-in cloud provider networking instead.
|
||||
See also [Network checker](docs/netcheck.md).
|
||||
|
||||
@@ -272,11 +231,10 @@ See also [Network checker](docs/netcheck.md).
|
||||
|
||||
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst)
|
||||
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
|
||||
- [Kubean](https://github.com/kubean-io/kubean)
|
||||
|
||||
## CI Tests
|
||||
|
||||
[](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/-/pipelines)
|
||||
[](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines)
|
||||
|
||||
CI/end-to-end tests sponsored by: [CNCF](https://cncf.io), [Equinix Metal](https://metal.equinix.com/), [OVHcloud](https://www.ovhcloud.com/), [ELASTX](https://elastx.se/).
|
||||
|
||||
|
||||
26
RELEASE.md
26
RELEASE.md
@@ -9,10 +9,10 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
|
||||
5. Create the release note with [Kubernetes Release Notes Generator](https://github.com/kubernetes/release/blob/master/cmd/release-notes/README.md). See the following `Release note creation` section for the details.
|
||||
6. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes
|
||||
7. An approver creates a release branch in the form `release-X.Y`
|
||||
8. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) container images are built and tagged. See the following `Container image creation` section for the details.
|
||||
8. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) docker images are built and tagged
|
||||
9. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml`
|
||||
10. The release issue is closed
|
||||
11. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
11. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
12. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||
|
||||
## Major/minor releases and milestones
|
||||
@@ -60,24 +60,4 @@ release-notes --start-sha <The start commit-id> --end-sha <The end commit-id> --
|
||||
```
|
||||
|
||||
If the release note file(/tmp/kubespray-release-note) contains "### Uncategorized" pull requests, those pull requests don't have a valid kind label(`kind/feature`, etc.).
|
||||
It is necessary to put a valid label on each pull request and run the above release-notes command again to get a better release note
|
||||
|
||||
## Container image creation
|
||||
|
||||
The container image `quay.io/kubespray/kubespray:vX.Y.Z` can be created from Dockerfile of the kubespray root directory:
|
||||
|
||||
```shell
|
||||
cd kubespray/
|
||||
nerdctl build -t quay.io/kubespray/kubespray:vX.Y.Z .
|
||||
nerdctl push quay.io/kubespray/kubespray:vX.Y.Z
|
||||
```
|
||||
|
||||
The container image `quay.io/kubespray/vagrant:vX.Y.Z` can be created from build.sh of test-infra/vagrant-docker/:
|
||||
|
||||
```shell
|
||||
cd kubespray/test-infra/vagrant-docker/
|
||||
./build vX.Y.Z
|
||||
```
|
||||
|
||||
Please note that the above operation requires the permission to push container images into quay.io/kubespray/.
|
||||
If you don't have the permission, please ask it on the #kubespray-dev channel.
|
||||
It is necessary to put a valid label on each pull request and run the above release-notes command again to get a better release note)
|
||||
|
||||
@@ -9,7 +9,5 @@
|
||||
#
|
||||
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||
# INSTRUCTIONS AT https://kubernetes.io/security/
|
||||
atoms
|
||||
mattymo
|
||||
floryut
|
||||
oomichi
|
||||
cristicalin
|
||||
|
||||
42
Vagrantfile
vendored
42
Vagrantfile
vendored
@@ -19,18 +19,18 @@ SUPPORTED_OS = {
|
||||
"flatcar-beta" => {box: "flatcar-beta", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["beta"]},
|
||||
"flatcar-alpha" => {box: "flatcar-alpha", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["alpha"]},
|
||||
"flatcar-edge" => {box: "flatcar-edge", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["edge"]},
|
||||
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
|
||||
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
|
||||
"ubuntu2004" => {box: "generic/ubuntu2004", user: "vagrant"},
|
||||
"ubuntu2204" => {box: "generic/ubuntu2204", user: "vagrant"},
|
||||
"centos" => {box: "centos/7", user: "vagrant"},
|
||||
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
||||
"centos8" => {box: "centos/8", user: "vagrant"},
|
||||
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
||||
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
||||
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
||||
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"},
|
||||
"fedora37" => {box: "fedora/37-cloud-base", user: "vagrant"},
|
||||
"fedora38" => {box: "fedora/38-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
|
||||
"fedora34" => {box: "fedora/34-cloud-base", user: "vagrant"},
|
||||
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.3.x86_64", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
||||
@@ -52,16 +52,16 @@ $shared_folders ||= {}
|
||||
$forwarded_ports ||= {}
|
||||
$subnet ||= "172.18.8"
|
||||
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
|
||||
$os ||= "ubuntu2004"
|
||||
$os ||= "ubuntu1804"
|
||||
$network_plugin ||= "flannel"
|
||||
# Setting multi_networking to true will install Multus: https://github.com/k8snetworkplumbingwg/multus-cni
|
||||
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
|
||||
$multi_networking ||= "False"
|
||||
$download_run_once ||= "True"
|
||||
$download_force_cache ||= "False"
|
||||
# The first three nodes are etcd servers
|
||||
$etcd_instances ||= [$num_instances, 3].min
|
||||
$etcd_instances ||= $num_instances
|
||||
# The first two nodes are kube masters
|
||||
$kube_master_instances ||= [$num_instances, 2].min
|
||||
$kube_master_instances ||= $num_instances == 1 ? $num_instances : ($num_instances - 1)
|
||||
# All nodes are kube nodes
|
||||
$kube_node_instances ||= $num_instances
|
||||
# The following only works when using the libvirt provider
|
||||
@@ -81,13 +81,6 @@ $playbook ||= "cluster.yml"
|
||||
|
||||
host_vars = {}
|
||||
|
||||
# throw error if os is not supported
|
||||
if ! SUPPORTED_OS.key?($os)
|
||||
puts "Unsupported OS: #{$os}"
|
||||
puts "Supported OS are: #{SUPPORTED_OS.keys.join(', ')}"
|
||||
exit 1
|
||||
end
|
||||
|
||||
$box = SUPPORTED_OS[$os][:box]
|
||||
# if $inventory is not set, try to use example
|
||||
$inventory = "inventory/sample" if ! $inventory
|
||||
@@ -207,8 +200,7 @@ Vagrant.configure("2") do |config|
|
||||
end
|
||||
|
||||
ip = "#{$subnet}.#{i+100}"
|
||||
node.vm.network :private_network,
|
||||
:ip => ip,
|
||||
node.vm.network :private_network, ip: ip,
|
||||
:libvirt__guest_ipv6 => 'yes',
|
||||
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
|
||||
:libvirt__ipv6_prefix => "64",
|
||||
@@ -218,22 +210,14 @@ Vagrant.configure("2") do |config|
|
||||
# Disable swap for each vm
|
||||
node.vm.provision "shell", inline: "swapoff -a"
|
||||
|
||||
# ubuntu2004 and ubuntu2204 have IPv6 explicitly disabled. This undoes that.
|
||||
if ["ubuntu2004", "ubuntu2204"].include? $os
|
||||
# ubuntu1804 and ubuntu2004 have IPv6 explicitly disabled. This undoes that.
|
||||
if ["ubuntu1804", "ubuntu2004"].include? $os
|
||||
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
|
||||
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
|
||||
end
|
||||
# Hack for fedora37/38 to get the IP address of the second interface
|
||||
if ["fedora37", "fedora38"].include? $os
|
||||
config.vm.provision "shell", inline: <<-SHELL
|
||||
nmcli conn modify 'Wired connection 2' ipv4.addresses $(cat /etc/sysconfig/network-scripts/ifcfg-eth1 | grep IPADDR | cut -d "=" -f2)
|
||||
nmcli conn modify 'Wired connection 2' ipv4.method manual
|
||||
service NetworkManager restart
|
||||
SHELL
|
||||
end
|
||||
|
||||
# Disable firewalld on oraclelinux/redhat vms
|
||||
if ["oraclelinux","oraclelinux8","rhel7","rhel8","rockylinux8"].include? $os
|
||||
if ["oraclelinux","oraclelinux8","rhel7","rhel8"].include? $os
|
||||
node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
|
||||
end
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[ssh_connection]
|
||||
pipelining=True
|
||||
ansible_ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
||||
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
||||
[defaults]
|
||||
# https://github.com/ansible/ansible/issues/56930 (to ignore group names with - and .)
|
||||
@@ -10,11 +10,11 @@ host_key_checking=False
|
||||
gathering = smart
|
||||
fact_caching = jsonfile
|
||||
fact_caching_connection = /tmp
|
||||
fact_caching_timeout = 86400
|
||||
fact_caching_timeout = 7200
|
||||
stdout_callback = default
|
||||
display_skipped_hosts = no
|
||||
library = ./library
|
||||
callbacks_enabled = profile_tasks,ara_default
|
||||
callback_whitelist = profile_tasks,ara_default
|
||||
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
||||
deprecation_warnings=False
|
||||
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds, .gpg
|
||||
|
||||
@@ -1,23 +1,34 @@
|
||||
---
|
||||
- name: Check Ansible version
|
||||
hosts: localhost
|
||||
- hosts: localhost
|
||||
gather_facts: false
|
||||
become: no
|
||||
vars:
|
||||
minimal_ansible_version: 2.14.0
|
||||
maximal_ansible_version: 2.15.0
|
||||
minimal_ansible_version: 2.9.0
|
||||
minimal_ansible_version_2_10: 2.10.11
|
||||
maximal_ansible_version: 2.13.0
|
||||
ansible_connection: local
|
||||
tags: always
|
||||
tasks:
|
||||
- name: "Check {{ minimal_ansible_version }} <= Ansible version < {{ maximal_ansible_version }}"
|
||||
assert:
|
||||
msg: "Ansible must be between {{ minimal_ansible_version }} and {{ maximal_ansible_version }} exclusive - you have {{ ansible_version.string }}"
|
||||
msg: "Ansible must be between {{ minimal_ansible_version }} and {{ maximal_ansible_version }}"
|
||||
that:
|
||||
- ansible_version.string is version(minimal_ansible_version, ">=")
|
||||
- ansible_version.string is version(maximal_ansible_version, "<")
|
||||
tags:
|
||||
- check
|
||||
|
||||
- name: "Check Ansible version > {{ minimal_ansible_version_2_10 }} when using ansible 2.10"
|
||||
assert:
|
||||
msg: "When using Ansible 2.10, the minimum supported version is {{ minimal_ansible_version_2_10 }}"
|
||||
that:
|
||||
- ansible_version.string is version(minimal_ansible_version_2_10, ">=")
|
||||
- ansible_version.string is version(maximal_ansible_version, "<")
|
||||
when:
|
||||
- ansible_version.string is version('2.10.0', ">=")
|
||||
tags:
|
||||
- check
|
||||
|
||||
- name: "Check that python netaddr is installed"
|
||||
assert:
|
||||
msg: "Python netaddr is not present"
|
||||
129
cluster.yml
129
cluster.yml
@@ -1,3 +1,128 @@
|
||||
---
|
||||
- name: Install Kubernetes
|
||||
ansible.builtin.import_playbook: playbooks/cluster.yml
|
||||
- name: Check ansible version
|
||||
import_playbook: ansible_version.yml
|
||||
|
||||
- name: Ensure compatibility with old groups
|
||||
import_playbook: legacy_groups.yml
|
||||
|
||||
- hosts: bastion[0]
|
||||
gather_facts: False
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
|
||||
|
||||
- hosts: k8s_cluster:etcd
|
||||
strategy: linear
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
gather_facts: false
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
|
||||
- name: Gather facts
|
||||
tags: always
|
||||
import_playbook: facts.yml
|
||||
|
||||
- hosts: k8s_cluster:etcd
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
|
||||
- { role: download, tags: download, when: "not skip_downloads" }
|
||||
|
||||
- hosts: etcd
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- role: etcd
|
||||
tags: etcd
|
||||
vars:
|
||||
etcd_cluster_setup: true
|
||||
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
|
||||
when: etcd_deployment_type != "kubeadm"
|
||||
|
||||
- hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- role: etcd
|
||||
tags: etcd
|
||||
vars:
|
||||
etcd_cluster_setup: false
|
||||
etcd_events_cluster_setup: false
|
||||
when: etcd_deployment_type != "kubeadm"
|
||||
|
||||
- hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/node, tags: node }
|
||||
|
||||
- hosts: kube_control_plane
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/control-plane, tags: master }
|
||||
- { role: kubernetes/client, tags: client }
|
||||
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
||||
|
||||
- hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/kubeadm, tags: kubeadm}
|
||||
- { role: kubernetes/node-label, tags: node-label }
|
||||
- { role: network_plugin, tags: network }
|
||||
|
||||
- hosts: calico_rr
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
|
||||
|
||||
- hosts: kube_control_plane[0]
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
|
||||
|
||||
- hosts: kube_control_plane
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
|
||||
- { role: kubernetes-apps/network_plugin, tags: network }
|
||||
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
||||
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
||||
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
||||
- { role: kubernetes-apps, tags: apps }
|
||||
|
||||
- name: Apply resolv.conf changes now that cluster DNS is up
|
||||
hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
|
||||
|
||||
@@ -39,7 +39,7 @@ class SearchEC2Tags(object):
|
||||
hosts[group] = []
|
||||
tag_key = "kubespray-role"
|
||||
tag_value = ["*"+group+"*"]
|
||||
region = os.environ['AWS_REGION']
|
||||
region = os.environ['REGION']
|
||||
|
||||
ec2 = boto3.resource('ec2', region)
|
||||
filters = [{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}]
|
||||
@@ -67,11 +67,6 @@ class SearchEC2Tags(object):
|
||||
if node_labels_tag:
|
||||
ansible_host['node_labels'] = dict([ label.strip().split('=') for label in node_labels_tag[0]['Value'].split(',') ])
|
||||
|
||||
##Set when instance actually has node_taints
|
||||
node_taints_tag = list(filter(lambda t: t['Key'] == 'kubespray-node-taints', instance.tags))
|
||||
if node_taints_tag:
|
||||
ansible_host['node_taints'] = list([ taint.strip() for taint in node_taints_tag[0]['Value'].split(',') ])
|
||||
|
||||
hosts[group].append(dns_name)
|
||||
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
---
|
||||
- name: Generate Azure inventory
|
||||
hosts: localhost
|
||||
- hosts: localhost
|
||||
gather_facts: False
|
||||
roles:
|
||||
- generate-inventory
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
---
|
||||
- name: Generate Azure inventory
|
||||
hosts: localhost
|
||||
- hosts: localhost
|
||||
gather_facts: False
|
||||
roles:
|
||||
- generate-inventory_2
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
---
|
||||
- name: Generate Azure templates
|
||||
hosts: localhost
|
||||
- hosts: localhost
|
||||
gather_facts: False
|
||||
roles:
|
||||
- generate-templates
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
|
||||
- name: Query Azure VMs
|
||||
- name: Query Azure VMs # noqa 301
|
||||
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
||||
register: vm_list_cmd
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
---
|
||||
|
||||
- name: Query Azure VMs IPs
|
||||
- name: Query Azure VMs IPs # noqa 301
|
||||
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
|
||||
register: vm_ip_list_cmd
|
||||
|
||||
- name: Query Azure VMs Roles
|
||||
- name: Query Azure VMs Roles # noqa 301
|
||||
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
||||
register: vm_list_cmd
|
||||
|
||||
- name: Query Azure Load Balancer Public IP
|
||||
- name: Query Azure Load Balancer Public IP # noqa 301
|
||||
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
|
||||
register: lb_pubip_cmd
|
||||
|
||||
|
||||
@@ -31,3 +31,4 @@
|
||||
[k8s_cluster:children]
|
||||
kube_node
|
||||
kube_control_plane
|
||||
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
---
|
||||
- name: Create nodes as docker containers
|
||||
hosts: localhost
|
||||
- hosts: localhost
|
||||
gather_facts: False
|
||||
roles:
|
||||
- { role: dind-host }
|
||||
|
||||
- name: Customize each node containers
|
||||
hosts: containers
|
||||
- hosts: containers
|
||||
roles:
|
||||
- { role: dind-cluster }
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
---
|
||||
- name: Set_fact distro_setup
|
||||
- name: set_fact distro_setup
|
||||
set_fact:
|
||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||
|
||||
- name: Set_fact other distro settings
|
||||
- name: set_fact other distro settings
|
||||
set_fact:
|
||||
distro_user: "{{ distro_setup['user'] }}"
|
||||
distro_ssh_service: "{{ distro_setup['ssh_service'] }}"
|
||||
@@ -43,7 +43,7 @@
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items: "{{ distro_extra_packages + ['rsyslog', 'openssh-server'] }}"
|
||||
with_items: "{{ distro_extra_packages }} + [ 'rsyslog', 'openssh-server' ]"
|
||||
|
||||
- name: Start needed services
|
||||
service:
|
||||
@@ -66,8 +66,8 @@
|
||||
dest: "/etc/sudoers.d/{{ distro_user }}"
|
||||
mode: 0640
|
||||
|
||||
- name: "Add my pubkey to {{ distro_user }} user authorized keys"
|
||||
ansible.posix.authorized_key:
|
||||
- name: Add my pubkey to "{{ distro_user }}" user authorized keys
|
||||
authorized_key:
|
||||
user: "{{ distro_user }}"
|
||||
state: present
|
||||
key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}"
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
---
|
||||
- name: Set_fact distro_setup
|
||||
- name: set_fact distro_setup
|
||||
set_fact:
|
||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||
|
||||
- name: Set_fact other distro settings
|
||||
- name: set_fact other distro settings
|
||||
set_fact:
|
||||
distro_image: "{{ distro_setup['image'] }}"
|
||||
distro_init: "{{ distro_setup['init'] }}"
|
||||
@@ -13,7 +13,7 @@
|
||||
distro_agetty_svc: "{{ distro_setup['agetty_svc'] }}"
|
||||
|
||||
- name: Create dind node containers from "containers" inventory section
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
image: "{{ distro_image }}"
|
||||
name: "{{ item }}"
|
||||
state: started
|
||||
@@ -69,7 +69,7 @@
|
||||
|
||||
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
|
||||
# handle manually
|
||||
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
|
||||
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave) # noqa 301
|
||||
raw: |
|
||||
echo {{ item | hash('sha1') }} > /etc/machine-id.new
|
||||
mv -b /etc/machine-id.new /etc/machine-id
|
||||
@@ -79,6 +79,7 @@
|
||||
with_items: "{{ containers.results }}"
|
||||
|
||||
- name: Early hack image install to adapt for DIND
|
||||
# noqa 302 - this task uses the raw module intentionally
|
||||
raw: |
|
||||
rm -fv /usr/bin/udevadm /usr/sbin/udevadm
|
||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||
|
||||
@@ -17,7 +17,7 @@ pass_or_fail() {
|
||||
test_distro() {
|
||||
local distro=${1:?};shift
|
||||
local extra="${*:-}"
|
||||
local prefix="${distro[${extra}]}"
|
||||
local prefix="$distro[${extra}]}"
|
||||
ansible-playbook -i hosts dind-cluster.yaml -e node_distro=$distro
|
||||
pass_or_fail "$prefix: dind-nodes" || return 1
|
||||
(cd ../..
|
||||
@@ -71,15 +71,15 @@ for spec in ${SPECS}; do
|
||||
echo "Loading file=${spec} ..."
|
||||
. ${spec} || continue
|
||||
: ${DISTROS:?} || continue
|
||||
echo "DISTROS:" "${DISTROS[@]}"
|
||||
echo "DISTROS=${DISTROS[@]}"
|
||||
echo "EXTRAS->"
|
||||
printf " %s\n" "${EXTRAS[@]}"
|
||||
let n=1
|
||||
for distro in "${DISTROS[@]}"; do
|
||||
for distro in ${DISTROS[@]}; do
|
||||
for extra in "${EXTRAS[@]:-NULL}"; do
|
||||
# Magic value to let this for run once:
|
||||
[[ ${extra} == NULL ]] && unset extra
|
||||
docker rm -f "${NODES[@]}"
|
||||
docker rm -f ${NODES[@]}
|
||||
printf -v file_out "%s/%s-%02d.out" ${OUTPUT_DIR} ${spec} $((n++))
|
||||
{
|
||||
info "${distro}[${extra}] START: file_out=${file_out}"
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
configparser>=3.3.0
|
||||
ipaddress
|
||||
ruamel.yaml>=0.15.88
|
||||
ipaddress
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
hacking>=0.10.2
|
||||
mock>=1.3.0
|
||||
pytest>=2.8.0
|
||||
mock>=1.3.0
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# under the License.
|
||||
|
||||
import inventory
|
||||
from io import StringIO
|
||||
from test import support
|
||||
import unittest
|
||||
from unittest import mock
|
||||
|
||||
@@ -41,7 +41,7 @@ class TestInventoryPrintHostnames(unittest.TestCase):
|
||||
'access_ip': '10.90.0.3'}}}})
|
||||
with mock.patch('builtins.open', mock_io):
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
with mock.patch('sys.stdout', new_callable=StringIO) as stdout:
|
||||
with support.captured_stdout() as stdout:
|
||||
inventory.KubesprayInventory(
|
||||
changed_hosts=["print_hostnames"],
|
||||
config_file="file")
|
||||
|
||||
@@ -1,27 +1,21 @@
|
||||
[tox]
|
||||
minversion = 1.6
|
||||
skipsdist = True
|
||||
envlist = pep8
|
||||
envlist = pep8, py33
|
||||
|
||||
[testenv]
|
||||
allowlist_externals = py.test
|
||||
whitelist_externals = py.test
|
||||
usedevelop = True
|
||||
deps =
|
||||
-r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
setenv = VIRTUAL_ENV={envdir}
|
||||
passenv =
|
||||
http_proxy
|
||||
HTTP_PROXY
|
||||
https_proxy
|
||||
HTTPS_PROXY
|
||||
no_proxy
|
||||
NO_PROXY
|
||||
passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
|
||||
commands = pytest -vv #{posargs:./tests}
|
||||
|
||||
[testenv:pep8]
|
||||
usedevelop = False
|
||||
allowlist_externals = bash
|
||||
whitelist_externals = bash
|
||||
commands =
|
||||
bash -c "find {toxinidir}/* -type f -name '*.py' -print0 | xargs -0 flake8"
|
||||
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
#k8s_deployment_user: kubespray
|
||||
#k8s_deployment_user_pkey_path: /tmp/ssh_rsa
|
||||
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
---
|
||||
- name: Prepare Hypervisor to later install kubespray VMs
|
||||
hosts: localhost
|
||||
- hosts: localhost
|
||||
gather_facts: False
|
||||
become: yes
|
||||
vars:
|
||||
bootstrap_os: none
|
||||
- bootstrap_os: none
|
||||
roles:
|
||||
- { role: kvm-setup }
|
||||
- kvm-setup
|
||||
|
||||
@@ -22,9 +22,9 @@
|
||||
- ntp
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: Create deployment user if required
|
||||
include_tasks: user.yml
|
||||
# Create deployment user if required
|
||||
- include: user.yml
|
||||
when: k8s_deployment_user is defined
|
||||
|
||||
- name: Set proper sysctl values
|
||||
import_tasks: sysctl.yml
|
||||
# Set proper sysctl values
|
||||
- include: sysctl.yml
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
- name: Load br_netfilter module
|
||||
community.general.modprobe:
|
||||
modprobe:
|
||||
name: br_netfilter
|
||||
state: present
|
||||
register: br_netfilter
|
||||
@@ -25,7 +25,7 @@
|
||||
|
||||
|
||||
- name: Enable net.ipv4.ip_forward in sysctl
|
||||
ansible.posix.sysctl:
|
||||
sysctl:
|
||||
name: net.ipv4.ip_forward
|
||||
value: 1
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
@@ -33,7 +33,7 @@
|
||||
reload: yes
|
||||
|
||||
- name: Set bridge-nf-call-{arptables,iptables} to 0
|
||||
ansible.posix.sysctl:
|
||||
sysctl:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
value: 0
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
---
|
||||
- name: Check ansible version
|
||||
import_playbook: kubernetes_sigs.kubespray.ansible_version
|
||||
import_playbook: ansible_version.yml
|
||||
|
||||
- name: Install mitogen
|
||||
hosts: localhost
|
||||
- hosts: localhost
|
||||
strategy: linear
|
||||
vars:
|
||||
mitogen_version: 0.3.2
|
||||
@@ -20,25 +19,24 @@
|
||||
- "{{ playbook_dir }}/plugins/mitogen"
|
||||
- "{{ playbook_dir }}/dist"
|
||||
|
||||
- name: Download mitogen release
|
||||
- name: download mitogen release
|
||||
get_url:
|
||||
url: "{{ mitogen_url }}"
|
||||
dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
||||
validate_certs: true
|
||||
mode: 0644
|
||||
|
||||
- name: Extract archive
|
||||
- name: extract archive
|
||||
unarchive:
|
||||
src: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
||||
dest: "{{ playbook_dir }}/dist/"
|
||||
|
||||
- name: Copy plugin
|
||||
ansible.posix.synchronize:
|
||||
- name: copy plugin
|
||||
synchronize:
|
||||
src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/"
|
||||
dest: "{{ playbook_dir }}/plugins/mitogen"
|
||||
|
||||
- name: Add strategy to ansible.cfg
|
||||
community.general.ini_file:
|
||||
- name: add strategy to ansible.cfg
|
||||
ini_file:
|
||||
path: ansible.cfg
|
||||
mode: 0644
|
||||
section: "{{ item.section | d('defaults') }}"
|
||||
|
||||
@@ -1,29 +1,24 @@
|
||||
---
|
||||
- name: Bootstrap hosts
|
||||
hosts: gfs-cluster
|
||||
- hosts: gfs-cluster
|
||||
gather_facts: false
|
||||
vars:
|
||||
ansible_ssh_pipelining: false
|
||||
roles:
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
|
||||
- name: Gather facts
|
||||
hosts: all
|
||||
- hosts: all
|
||||
gather_facts: true
|
||||
|
||||
- name: Install glusterfs server
|
||||
hosts: gfs-cluster
|
||||
- hosts: gfs-cluster
|
||||
vars:
|
||||
ansible_ssh_pipelining: true
|
||||
roles:
|
||||
- { role: glusterfs/server }
|
||||
|
||||
- name: Install glusterfs servers
|
||||
hosts: k8s_cluster
|
||||
- hosts: k8s_cluster
|
||||
roles:
|
||||
- { role: glusterfs/client }
|
||||
|
||||
- name: Configure Kubernetes to use glusterfs
|
||||
hosts: kube_control_plane[0]
|
||||
- hosts: kube_control_plane[0]
|
||||
roles:
|
||||
- { role: kubernetes-pv }
|
||||
|
||||
@@ -41,3 +41,4 @@
|
||||
|
||||
# [network-storage:children]
|
||||
# gfs-cluster
|
||||
|
||||
|
||||
@@ -14,16 +14,12 @@ This role performs basic installation and setup of Gluster, but it does not conf
|
||||
|
||||
Available variables are listed below, along with default values (see `defaults/main.yml`):
|
||||
|
||||
```yaml
|
||||
glusterfs_default_release: ""
|
||||
```
|
||||
|
||||
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
|
||||
|
||||
```yaml
|
||||
glusterfs_ppa_use: yes
|
||||
glusterfs_ppa_version: "3.5"
|
||||
```
|
||||
|
||||
For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/) for more info.
|
||||
|
||||
@@ -33,11 +29,9 @@ None.
|
||||
|
||||
## Example Playbook
|
||||
|
||||
```yaml
|
||||
- hosts: server
|
||||
roles:
|
||||
- geerlingguy.glusterfs
|
||||
```
|
||||
|
||||
For a real-world use example, read through [Simple GlusterFS Setup with Ansible](http://www.jeffgeerling.com/blog/simple-glusterfs-setup-ansible), a blog post by this role's author, which is included in Chapter 8 of [Ansible for DevOps](https://www.ansiblefordevops.com/).
|
||||
|
||||
|
||||
@@ -6,12 +6,12 @@ galaxy_info:
|
||||
description: GlusterFS installation for Linux.
|
||||
company: "Midwestern Mac, LLC"
|
||||
license: "license (BSD, MIT)"
|
||||
min_ansible_version: "2.0"
|
||||
min_ansible_version: 2.0
|
||||
platforms:
|
||||
- name: EL
|
||||
versions:
|
||||
- "6"
|
||||
- "7"
|
||||
- 6
|
||||
- 7
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- precise
|
||||
|
||||
@@ -3,19 +3,14 @@
|
||||
# hyperkube and needs to be installed as part of the system.
|
||||
|
||||
# Setup/install tasks.
|
||||
- name: Setup RedHat distros for glusterfs
|
||||
include_tasks: setup-RedHat.yml
|
||||
- include: setup-RedHat.yml
|
||||
when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined
|
||||
|
||||
- name: Setup Debian distros for glusterfs
|
||||
include_tasks: setup-Debian.yml
|
||||
- include: setup-Debian.yml
|
||||
when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined
|
||||
|
||||
- name: Ensure Gluster mount directories exist.
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: 0775
|
||||
file: "path={{ item }} state=directory mode=0775"
|
||||
with_items:
|
||||
- "{{ gluster_mount_dir }}"
|
||||
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
register: glusterfs_ppa_added
|
||||
when: glusterfs_ppa_use
|
||||
|
||||
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa no-handler
|
||||
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa 503
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
|
||||
@@ -1,14 +1,10 @@
|
||||
---
|
||||
- name: Install Prerequisites
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
package: name={{ item }} state=present
|
||||
with_items:
|
||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||
|
||||
- name: Install Packages
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
package: name={{ item }} state=present
|
||||
with_items:
|
||||
- glusterfs-client
|
||||
|
||||
@@ -6,12 +6,12 @@ galaxy_info:
|
||||
description: GlusterFS installation for Linux.
|
||||
company: "Midwestern Mac, LLC"
|
||||
license: "license (BSD, MIT)"
|
||||
min_ansible_version: "2.0"
|
||||
min_ansible_version: 2.0
|
||||
platforms:
|
||||
- name: EL
|
||||
versions:
|
||||
- "6"
|
||||
- "7"
|
||||
- 6
|
||||
- 7
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- precise
|
||||
|
||||
@@ -4,58 +4,40 @@
|
||||
include_vars: "{{ ansible_os_family }}.yml"
|
||||
|
||||
# Install xfs package
|
||||
- name: Install xfs Debian
|
||||
apt:
|
||||
name: xfsprogs
|
||||
state: present
|
||||
- name: install xfs Debian
|
||||
apt: name=xfsprogs state=present
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: Install xfs RedHat
|
||||
package:
|
||||
name: xfsprogs
|
||||
state: present
|
||||
- name: install xfs RedHat
|
||||
package: name=xfsprogs state=present
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
# Format external volumes in xfs
|
||||
- name: Format volumes in xfs
|
||||
community.general.filesystem:
|
||||
fstype: xfs
|
||||
dev: "{{ disk_volume_device_1 }}"
|
||||
filesystem: "fstype=xfs dev={{ disk_volume_device_1 }}"
|
||||
|
||||
# Mount external volumes
|
||||
- name: Mounting new xfs filesystem
|
||||
ansible.posix.mount:
|
||||
name: "{{ gluster_volume_node_mount_dir }}"
|
||||
src: "{{ disk_volume_device_1 }}"
|
||||
fstype: xfs
|
||||
state: mounted
|
||||
- name: mounting new xfs filesystem
|
||||
mount: "name={{ gluster_volume_node_mount_dir }} src={{ disk_volume_device_1 }} fstype=xfs state=mounted"
|
||||
|
||||
# Setup/install tasks.
|
||||
- name: Setup RedHat distros for glusterfs
|
||||
include_tasks: setup-RedHat.yml
|
||||
- include: setup-RedHat.yml
|
||||
when: ansible_os_family == 'RedHat'
|
||||
|
||||
- name: Setup Debian distros for glusterfs
|
||||
include_tasks: setup-Debian.yml
|
||||
- include: setup-Debian.yml
|
||||
when: ansible_os_family == 'Debian'
|
||||
|
||||
- name: Ensure GlusterFS is started and enabled at boot.
|
||||
service:
|
||||
name: "{{ glusterfs_daemon }}"
|
||||
state: started
|
||||
enabled: yes
|
||||
service: "name={{ glusterfs_daemon }} state=started enabled=yes"
|
||||
|
||||
- name: Ensure Gluster brick and mount directories exist.
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: 0775
|
||||
file: "path={{ item }} state=directory mode=0775"
|
||||
with_items:
|
||||
- "{{ gluster_brick_dir }}"
|
||||
- "{{ gluster_mount_dir }}"
|
||||
|
||||
- name: Configure Gluster volume with replicas
|
||||
gluster.gluster.gluster_volume:
|
||||
gluster_volume:
|
||||
state: present
|
||||
name: "{{ gluster_brick_name }}"
|
||||
brick: "{{ gluster_brick_dir }}"
|
||||
@@ -67,7 +49,7 @@
|
||||
when: groups['gfs-cluster']|length > 1
|
||||
|
||||
- name: Configure Gluster volume without replicas
|
||||
gluster.gluster.gluster_volume:
|
||||
gluster_volume:
|
||||
state: present
|
||||
name: "{{ gluster_brick_name }}"
|
||||
brick: "{{ gluster_brick_dir }}"
|
||||
@@ -78,7 +60,7 @@
|
||||
when: groups['gfs-cluster']|length <= 1
|
||||
|
||||
- name: Mount glusterfs to retrieve disk size
|
||||
ansible.posix.mount:
|
||||
mount:
|
||||
name: "{{ gluster_mount_dir }}"
|
||||
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
fstype: glusterfs
|
||||
@@ -87,8 +69,7 @@
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Get Gluster disk size
|
||||
setup:
|
||||
filter: ansible_mounts
|
||||
setup: filter=ansible_mounts
|
||||
register: mounts_data
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
@@ -105,7 +86,7 @@
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Unmount glusterfs
|
||||
ansible.posix.mount:
|
||||
mount:
|
||||
name: "{{ gluster_mount_dir }}"
|
||||
fstype: glusterfs
|
||||
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
register: glusterfs_ppa_added
|
||||
when: glusterfs_ppa_use
|
||||
|
||||
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa no-handler
|
||||
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa 503
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
|
||||
@@ -1,15 +1,11 @@
|
||||
---
|
||||
- name: Install Prerequisites
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
package: name={{ item }} state=present
|
||||
with_items:
|
||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||
|
||||
- name: Install Packages
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
package: name={{ item }} state=present
|
||||
with_items:
|
||||
- glusterfs-server
|
||||
- glusterfs-client
|
||||
|
||||
@@ -21,3 +21,4 @@
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
---
|
||||
- name: Tear down heketi
|
||||
hosts: kube_control_plane[0]
|
||||
- hosts: kube_control_plane[0]
|
||||
roles:
|
||||
- { role: tear-down }
|
||||
|
||||
- name: Teardown disks in heketi
|
||||
hosts: heketi-node
|
||||
- hosts: heketi-node
|
||||
become: yes
|
||||
roles:
|
||||
- { role: tear-down-disks }
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
---
|
||||
- name: Prepare heketi install
|
||||
hosts: heketi-node
|
||||
- hosts: heketi-node
|
||||
roles:
|
||||
- { role: prepare }
|
||||
|
||||
- name: Provision heketi
|
||||
hosts: kube_control_plane[0]
|
||||
- hosts: kube_control_plane[0]
|
||||
tags:
|
||||
- "provision"
|
||||
roles:
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
- "dm_snapshot"
|
||||
- "dm_mirror"
|
||||
- "dm_thin_pool"
|
||||
community.general.modprobe:
|
||||
modprobe:
|
||||
name: "{{ item }}"
|
||||
state: "present"
|
||||
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
---
|
||||
- name: "Stop port forwarding"
|
||||
- name: "stop port forwarding"
|
||||
command: "killall "
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
- name: "Delete bootstrap Heketi."
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
|
||||
when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0"
|
||||
- name: "Ensure there is nothing left over."
|
||||
- name: "Ensure there is nothing left over." # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
- name: "Copy topology configuration into container."
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
||||
- name: "Load heketi topology." # noqa no-handler
|
||||
- name: "Load heketi topology." # noqa 503
|
||||
when: "render.changed"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||
register: "load_heketi"
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
- name: "Provision database volume."
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
|
||||
when: "heketi_database_volume_exists is undefined"
|
||||
- name: "Copy configuration from pod."
|
||||
- name: "Copy configuration from pod." # noqa 301
|
||||
become: true
|
||||
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
||||
- name: "Get heketi volume ids."
|
||||
|
||||
@@ -11,10 +11,10 @@
|
||||
src: "topology.json.j2"
|
||||
dest: "{{ kube_config_dir }}/topology.json"
|
||||
mode: 0644
|
||||
- name: "Copy topology configuration into container." # noqa no-handler
|
||||
- name: "Copy topology configuration into container." # noqa 503
|
||||
when: "rendering.changed"
|
||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
||||
- name: "Load heketi topology." # noqa no-handler
|
||||
- name: "Load heketi topology." # noqa 503
|
||||
when: "rendering.changed"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||
- name: "Get heketi topology."
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
changed_when: false
|
||||
|
||||
- name: "Remove volume groups."
|
||||
- name: "Remove volume groups." # noqa 301
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||
become: true
|
||||
@@ -30,7 +30,7 @@
|
||||
with_items: "{{ volume_groups.stdout_lines }}"
|
||||
loop_control: { loop_var: "volume_group" }
|
||||
|
||||
- name: "Remove physical volume from cluster disks."
|
||||
- name: "Remove physical volume from cluster disks." # noqa 301
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||
become: true
|
||||
|
||||
@@ -1,43 +1,43 @@
|
||||
---
|
||||
- name: Remove storage class.
|
||||
- name: Remove storage class. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down heketi.
|
||||
- name: Tear down heketi. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down heketi.
|
||||
- name: Tear down heketi. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down bootstrap.
|
||||
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
|
||||
- name: Ensure there is nothing left over.
|
||||
- name: Ensure there is nothing left over. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
- name: Ensure there is nothing left over.
|
||||
- name: Ensure there is nothing left over. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
- name: Tear down glusterfs.
|
||||
- name: Tear down glusterfs. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi storage service.
|
||||
- name: Remove heketi storage service. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi gluster role binding
|
||||
- name: Remove heketi gluster role binding # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi config secret
|
||||
- name: Remove heketi config secret # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi db backup
|
||||
- name: Remove heketi db backup # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi service account
|
||||
- name: Remove heketi service account # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Get secrets
|
||||
|
||||
@@ -27,7 +27,7 @@ manage-offline-container-images.sh register
|
||||
|
||||
## generate_list.sh
|
||||
|
||||
This script generates the list of downloaded files and the list of container images by `roles/download/defaults/main/main.yml` file.
|
||||
This script generates the list of downloaded files and the list of container images by `roles/download/defaults/main.yml` file.
|
||||
|
||||
Run this script will execute `generate_list.yml` playbook in kubespray root directory and generate four files,
|
||||
all downloaded files url in files.list, all container images in images.list, jinja2 templates in *.template.
|
||||
@@ -45,21 +45,3 @@ temp
|
||||
|
||||
In some cases you may want to update some component version, you can declare version variables in ansible inventory file or group_vars,
|
||||
then run `./generate_list.sh -i [inventory_file]` to update file.list and images.list.
|
||||
|
||||
## manage-offline-files.sh
|
||||
|
||||
This script will download all files according to `temp/files.list` and run nginx container to provide offline file download.
|
||||
|
||||
Step(1) generate `files.list`
|
||||
|
||||
```shell
|
||||
./generate_list.sh
|
||||
```
|
||||
|
||||
Step(2) download files and run nginx container
|
||||
|
||||
```shell
|
||||
./manage-offline-files.sh
|
||||
```
|
||||
|
||||
when nginx container is running, it can be accessed through <http://127.0.0.1:8080/>.
|
||||
|
||||
@@ -5,7 +5,7 @@ CURRENT_DIR=$(cd $(dirname $0); pwd)
|
||||
TEMP_DIR="${CURRENT_DIR}/temp"
|
||||
REPO_ROOT_DIR="${CURRENT_DIR%/contrib/offline}"
|
||||
|
||||
: ${DOWNLOAD_YML:="roles/download/defaults/main/main.yml"}
|
||||
: ${DOWNLOAD_YML:="roles/download/defaults/main.yml"}
|
||||
|
||||
mkdir -p ${TEMP_DIR}
|
||||
|
||||
@@ -19,7 +19,7 @@ sed -n '/^downloads:/,/download_defaults:/p' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/\"//g' > ${TEMP_DIR}/images.list.template
|
||||
|
||||
# add kube-* images to images list template
|
||||
# Those container images are downloaded by kubeadm, then roles/download/defaults/main/main.yml
|
||||
# Those container images are downloaded by kubeadm, then roles/download/defaults/main.yml
|
||||
# doesn't contain those images. That is reason why here needs to put those images into the
|
||||
# list separately.
|
||||
KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy"
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
---
|
||||
- name: Collect container images for offline deployment
|
||||
hosts: localhost
|
||||
- hosts: localhost
|
||||
become: no
|
||||
|
||||
roles:
|
||||
@@ -12,11 +11,9 @@
|
||||
|
||||
tasks:
|
||||
# Generate files.list and images.list files from templates.
|
||||
- name: Collect container images for offline deployment
|
||||
template:
|
||||
- template:
|
||||
src: ./contrib/offline/temp/{{ item }}.list.template
|
||||
dest: ./contrib/offline/temp/{{ item }}.list
|
||||
mode: 0644
|
||||
with_items:
|
||||
- files
|
||||
- images
|
||||
|
||||
@@ -15,7 +15,7 @@ function create_container_image_tar() {
|
||||
IMAGES=$(kubectl describe pods --all-namespaces | grep " Image:" | awk '{print $2}' | sort | uniq)
|
||||
# NOTE: etcd and pause cannot be seen as pods.
|
||||
# The pause image is used for --pod-infra-container-image option of kubelet.
|
||||
EXT_IMAGES=$(kubectl cluster-info dump | egrep "quay.io/coreos/etcd:|registry.k8s.io/pause:" | sed s@\"@@g)
|
||||
EXT_IMAGES=$(kubectl cluster-info dump | egrep "quay.io/coreos/etcd:|k8s.gcr.io/pause:" | sed s@\"@@g)
|
||||
IMAGES="${IMAGES} ${EXT_IMAGES}"
|
||||
|
||||
rm -f ${IMAGE_TAR_FILE}
|
||||
@@ -46,12 +46,12 @@ function create_container_image_tar() {
|
||||
|
||||
# NOTE: Here removes the following repo parts from each image
|
||||
# so that these parts will be replaced with Kubespray.
|
||||
# - kube_image_repo: "registry.k8s.io"
|
||||
# - kube_image_repo: "k8s.gcr.io"
|
||||
# - gcr_image_repo: "gcr.io"
|
||||
# - docker_image_repo: "docker.io"
|
||||
# - quay_image_repo: "quay.io"
|
||||
FIRST_PART=$(echo ${image} | awk -F"/" '{print $1}')
|
||||
if [ "${FIRST_PART}" = "registry.k8s.io" ] ||
|
||||
if [ "${FIRST_PART}" = "k8s.gcr.io" ] ||
|
||||
[ "${FIRST_PART}" = "gcr.io" ] ||
|
||||
[ "${FIRST_PART}" = "docker.io" ] ||
|
||||
[ "${FIRST_PART}" = "quay.io" ] ||
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
CURRENT_DIR=$( dirname "$(readlink -f "$0")" )
|
||||
OFFLINE_FILES_DIR_NAME="offline-files"
|
||||
OFFLINE_FILES_DIR="${CURRENT_DIR}/${OFFLINE_FILES_DIR_NAME}"
|
||||
OFFLINE_FILES_ARCHIVE="${CURRENT_DIR}/offline-files.tar.gz"
|
||||
FILES_LIST=${FILES_LIST:-"${CURRENT_DIR}/temp/files.list"}
|
||||
NGINX_PORT=8080
|
||||
|
||||
# download files
|
||||
if [ ! -f "${FILES_LIST}" ]; then
|
||||
echo "${FILES_LIST} should exist, run ./generate_list.sh first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -rf "${OFFLINE_FILES_DIR}"
|
||||
rm "${OFFLINE_FILES_ARCHIVE}"
|
||||
mkdir "${OFFLINE_FILES_DIR}"
|
||||
|
||||
wget -x -P "${OFFLINE_FILES_DIR}" -i "${FILES_LIST}"
|
||||
tar -czvf "${OFFLINE_FILES_ARCHIVE}" "${OFFLINE_FILES_DIR_NAME}"
|
||||
|
||||
[ -n "$NO_HTTP_SERVER" ] && echo "skip to run nginx" && exit 0
|
||||
|
||||
# run nginx container server
|
||||
if command -v nerdctl 1>/dev/null 2>&1; then
|
||||
runtime="nerdctl"
|
||||
elif command -v podman 1>/dev/null 2>&1; then
|
||||
runtime="podman"
|
||||
elif command -v docker 1>/dev/null 2>&1; then
|
||||
runtime="docker"
|
||||
else
|
||||
echo "No supported container runtime found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sudo "${runtime}" container inspect nginx >/dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
sudo "${runtime}" run \
|
||||
--restart=always -d -p ${NGINX_PORT}:80 \
|
||||
--volume "${OFFLINE_FILES_DIR}:/usr/share/nginx/html/download" \
|
||||
--volume "${CURRENT_DIR}"/nginx.conf:/etc/nginx/nginx.conf \
|
||||
--name nginx nginx:alpine
|
||||
fi
|
||||
@@ -1,39 +0,0 @@
|
||||
user nginx;
|
||||
worker_processes auto;
|
||||
error_log /var/log/nginx/error.log;
|
||||
pid /run/nginx.pid;
|
||||
include /usr/share/nginx/modules/*.conf;
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
http {
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
access_log /var/log/nginx/access.log main;
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
default_type application/octet-stream;
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
server {
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
server_name _;
|
||||
include /etc/nginx/default.d/*.conf;
|
||||
location / {
|
||||
root /usr/share/nginx/html/download;
|
||||
autoindex on;
|
||||
autoindex_exact_size off;
|
||||
autoindex_localtime on;
|
||||
}
|
||||
error_page 404 /404.html;
|
||||
location = /40x.html {
|
||||
}
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,4 @@
|
||||
---
|
||||
- name: Disable firewalld/ufw
|
||||
hosts: all
|
||||
- hosts: all
|
||||
roles:
|
||||
- { role: prepare }
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
---
|
||||
- name: Disable firewalld and ufw
|
||||
when:
|
||||
- disable_service_firewall is defined and disable_service_firewall
|
||||
block:
|
||||
- block:
|
||||
- name: List services
|
||||
service_facts:
|
||||
|
||||
@@ -12,7 +9,7 @@
|
||||
state: stopped
|
||||
enabled: no
|
||||
when:
|
||||
"'firewalld.service' in services and services['firewalld.service'].status != 'not-found'"
|
||||
"'firewalld.service' in services"
|
||||
|
||||
- name: Disable service ufw
|
||||
systemd:
|
||||
@@ -20,4 +17,7 @@
|
||||
state: stopped
|
||||
enabled: no
|
||||
when:
|
||||
"'ufw.service' in services and services['ufw.service'].status != 'not-found'"
|
||||
"'ufw.service' in services"
|
||||
|
||||
when:
|
||||
- disable_service_firewall is defined and disable_service_firewall
|
||||
|
||||
@@ -36,7 +36,8 @@ terraform apply -var-file=credentials.tfvars
|
||||
```
|
||||
|
||||
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
|
||||
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated `ssh-bastion.conf`. Ansible automatically detects bastion and changes `ssh_args`
|
||||
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated ssh-bastion.conf.
|
||||
Ansible automatically detects bastion and changes ssh_args
|
||||
|
||||
```commandline
|
||||
ssh -F ./ssh-bastion.conf user@$ip
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
output "k8s_masters" {
|
||||
value = equinix_metal_device.k8s_master.*.access_public_ipv4
|
||||
}
|
||||
|
||||
output "k8s_masters_no_etc" {
|
||||
value = equinix_metal_device.k8s_master_no_etcd.*.access_public_ipv4
|
||||
}
|
||||
|
||||
output "k8s_etcds" {
|
||||
value = equinix_metal_device.k8s_etcd.*.access_public_ipv4
|
||||
}
|
||||
|
||||
output "k8s_nodes" {
|
||||
value = equinix_metal_device.k8s_node.*.access_public_ipv4
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
terraform {
|
||||
required_version = ">= 1.0.0"
|
||||
|
||||
provider_meta "equinix" {
|
||||
module_name = "kubespray"
|
||||
}
|
||||
required_providers {
|
||||
equinix = {
|
||||
source = "equinix/equinix"
|
||||
version = "~> 1.14"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Configure the Equinix Metal Provider
|
||||
provider "equinix" {
|
||||
}
|
||||
@@ -31,7 +31,9 @@ The setup looks like following
|
||||
|
||||
## Requirements
|
||||
|
||||
* Terraform 0.13.0 or newer (0.12 also works if you modify the provider block to include version and remove all `versions.tf` files)
|
||||
* Terraform 0.13.0 or newer
|
||||
|
||||
*0.12 also works if you modify the provider block to include version and remove all `versions.tf` files*
|
||||
|
||||
## Quickstart
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ module "kubernetes" {
|
||||
source = "./modules/kubernetes-cluster"
|
||||
|
||||
prefix = var.prefix
|
||||
zone = var.zone
|
||||
|
||||
machines = var.machines
|
||||
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
|
||||
@@ -75,11 +75,6 @@ ansible-playbook -i contrib/terraform/gcs/inventory.ini cluster.yml -b -v
|
||||
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server
|
||||
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports)
|
||||
* `ingress_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to ingress on ports 80 and 443
|
||||
* `extra_ingress_firewalls`: Additional ingress firewall rules. Key will be used as the name of the rule
|
||||
* `source_ranges`: List of IP ranges (CIDR). Example: `["8.8.8.8"]`
|
||||
* `protocol`: Protocol. Example `"tcp"`
|
||||
* `ports`: List of ports, as string. Example `["53"]`
|
||||
* `target_tags`: List of target tag (either the machine name or `control-plane` or `worker`). Example: `["control-plane", "worker-0"]`
|
||||
|
||||
### Optional
|
||||
|
||||
|
||||
@@ -34,6 +34,4 @@ module "kubernetes" {
|
||||
api_server_whitelist = var.api_server_whitelist
|
||||
nodeport_whitelist = var.nodeport_whitelist
|
||||
ingress_whitelist = var.ingress_whitelist
|
||||
|
||||
extra_ingress_firewalls = var.extra_ingress_firewalls
|
||||
}
|
||||
|
||||
@@ -219,7 +219,7 @@ resource "google_compute_instance" "master" {
|
||||
machine_type = each.value.size
|
||||
zone = each.value.zone
|
||||
|
||||
tags = ["control-plane", "master", each.key]
|
||||
tags = ["master"]
|
||||
|
||||
boot_disk {
|
||||
initialize_params {
|
||||
@@ -325,7 +325,7 @@ resource "google_compute_instance" "worker" {
|
||||
machine_type = each.value.size
|
||||
zone = each.value.zone
|
||||
|
||||
tags = ["worker", each.key]
|
||||
tags = ["worker"]
|
||||
|
||||
boot_disk {
|
||||
initialize_params {
|
||||
@@ -398,24 +398,3 @@ resource "google_compute_target_pool" "worker_lb" {
|
||||
name = "${var.prefix}-worker-lb-pool"
|
||||
instances = local.worker_target_list
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "extra_ingress_firewall" {
|
||||
for_each = {
|
||||
for name, firewall in var.extra_ingress_firewalls :
|
||||
name => firewall
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}-ingress"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
priority = 100
|
||||
|
||||
source_ranges = each.value.source_ranges
|
||||
|
||||
target_tags = each.value.target_tags
|
||||
|
||||
allow {
|
||||
protocol = each.value.protocol
|
||||
ports = each.value.ports
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,14 +73,3 @@ variable "ingress_whitelist" {
|
||||
variable "private_network_cidr" {
|
||||
default = "10.0.10.0/24"
|
||||
}
|
||||
|
||||
variable "extra_ingress_firewalls" {
|
||||
type = map(object({
|
||||
source_ranges = set(string)
|
||||
protocol = string
|
||||
ports = list(string)
|
||||
target_tags = set(string)
|
||||
}))
|
||||
|
||||
default = {}
|
||||
}
|
||||
|
||||
@@ -95,14 +95,3 @@ variable "ingress_whitelist" {
|
||||
type = list(string)
|
||||
default = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
variable "extra_ingress_firewalls" {
|
||||
type = map(object({
|
||||
source_ranges = set(string)
|
||||
protocol = string
|
||||
ports = list(string)
|
||||
target_tags = set(string)
|
||||
}))
|
||||
|
||||
default = {}
|
||||
}
|
||||
|
||||
@@ -56,24 +56,11 @@ cd inventory/$CLUSTER
|
||||
|
||||
Edit `default.tfvars` to match your requirement.
|
||||
|
||||
Flatcar Container Linux instead of the basic Hetzner Images.
|
||||
|
||||
```bash
|
||||
cd ../../contrib/terraform/hetzner
|
||||
```
|
||||
|
||||
Edit `main.tf` and reactivate the module `source = "./modules/kubernetes-cluster-flatcar"`and
|
||||
comment out the `#source = "./modules/kubernetes-cluster"`.
|
||||
|
||||
activate `ssh_private_key_path = var.ssh_private_key_path`. The VM boots into
|
||||
Rescue-Mode with the selected image of the `var.machines` but installs Flatcar instead.
|
||||
|
||||
Run Terraform to create the infrastructure.
|
||||
|
||||
```bash
|
||||
cd ./kubespray
|
||||
terraform -chdir=./contrib/terraform/hetzner/ init
|
||||
terraform -chdir=./contrib/terraform/hetzner/ apply --var-file=../../../inventory/$CLUSTER/default.tfvars
|
||||
terraform init ../../contrib/terraform/hetzner
|
||||
terraform apply --var-file default.tfvars ../../contrib/terraform/hetzner/
|
||||
```
|
||||
|
||||
You should now have a inventory file named `inventory.ini` that you can use with kubespray.
|
||||
|
||||
@@ -9,23 +9,21 @@ ssh_public_keys = [
|
||||
"ssh-rsa I-did-not-read-the-docs 2",
|
||||
]
|
||||
|
||||
ssh_private_key_path = "~/.ssh/id_rsa"
|
||||
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
"size" : "cx21",
|
||||
"image" : "ubuntu-22.04",
|
||||
"image" : "ubuntu-20.04",
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "cx21",
|
||||
"image" : "ubuntu-22.04",
|
||||
"image" : "ubuntu-20.04",
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "cx21",
|
||||
"image" : "ubuntu-22.04",
|
||||
"image" : "ubuntu-20.04",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ provider "hcloud" {}
|
||||
|
||||
module "kubernetes" {
|
||||
source = "./modules/kubernetes-cluster"
|
||||
# source = "./modules/kubernetes-cluster-flatcar"
|
||||
|
||||
prefix = var.prefix
|
||||
|
||||
@@ -10,9 +9,6 @@ module "kubernetes" {
|
||||
|
||||
machines = var.machines
|
||||
|
||||
#only for flatcar
|
||||
#ssh_private_key_path = var.ssh_private_key_path
|
||||
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
network_zone = var.network_zone
|
||||
|
||||
@@ -26,10 +22,10 @@ module "kubernetes" {
|
||||
# Generate ansible inventory
|
||||
#
|
||||
|
||||
locals {
|
||||
inventory = templatefile(
|
||||
"${path.module}/templates/inventory.tpl",
|
||||
{
|
||||
data "template_file" "inventory" {
|
||||
template = file("${path.module}/templates/inventory.tpl")
|
||||
|
||||
vars = {
|
||||
connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s etcd_member_name=etcd%d",
|
||||
keys(module.kubernetes.master_ip_addresses),
|
||||
values(module.kubernetes.master_ip_addresses).*.public_ip,
|
||||
@@ -43,15 +39,14 @@ locals {
|
||||
list_worker = join("\n", keys(module.kubernetes.worker_ip_addresses))
|
||||
network_id = module.kubernetes.network_id
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
resource "null_resource" "inventories" {
|
||||
provisioner "local-exec" {
|
||||
command = "echo '${local.inventory}' > ${var.inventory_file}"
|
||||
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
|
||||
}
|
||||
|
||||
triggers = {
|
||||
template = local.inventory
|
||||
template = data.template_file.inventory.rendered
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,144 +0,0 @@
|
||||
resource "hcloud_network" "kubernetes" {
|
||||
name = "${var.prefix}-network"
|
||||
ip_range = var.private_network_cidr
|
||||
}
|
||||
|
||||
resource "hcloud_network_subnet" "kubernetes" {
|
||||
type = "cloud"
|
||||
network_id = hcloud_network.kubernetes.id
|
||||
network_zone = var.network_zone
|
||||
ip_range = var.private_subnet_cidr
|
||||
}
|
||||
|
||||
resource "hcloud_ssh_key" "first" {
|
||||
name = var.prefix
|
||||
public_key = var.ssh_public_keys.0
|
||||
}
|
||||
|
||||
resource "hcloud_server" "machine" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}"
|
||||
ssh_keys = [hcloud_ssh_key.first.id]
|
||||
# boot into rescue OS
|
||||
rescue = "linux64"
|
||||
# dummy value for the OS because Flatcar is not available
|
||||
image = each.value.image
|
||||
server_type = each.value.size
|
||||
location = var.zone
|
||||
connection {
|
||||
host = self.ipv4_address
|
||||
timeout = "5m"
|
||||
private_key = file(var.ssh_private_key_path)
|
||||
}
|
||||
firewall_ids = each.value.node_type == "master" ? [hcloud_firewall.master.id] : [hcloud_firewall.worker.id]
|
||||
provisioner "file" {
|
||||
content = data.ct_config.machine-ignitions[each.key].rendered
|
||||
destination = "/root/ignition.json"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"set -ex",
|
||||
"apt update",
|
||||
"apt install -y gawk",
|
||||
"curl -fsSLO --retry-delay 1 --retry 60 --retry-connrefused --retry-max-time 60 --connect-timeout 20 https://raw.githubusercontent.com/flatcar/init/flatcar-master/bin/flatcar-install",
|
||||
"chmod +x flatcar-install",
|
||||
"./flatcar-install -s -i /root/ignition.json -C stable",
|
||||
"shutdown -r +1",
|
||||
]
|
||||
}
|
||||
|
||||
# optional:
|
||||
provisioner "remote-exec" {
|
||||
connection {
|
||||
host = self.ipv4_address
|
||||
private_key = file(var.ssh_private_key_path)
|
||||
timeout = "3m"
|
||||
user = var.user_flatcar
|
||||
}
|
||||
|
||||
inline = [
|
||||
"sudo hostnamectl set-hostname ${self.name}",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "hcloud_server_network" "machine" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => hcloud_server.machine[name]
|
||||
}
|
||||
server_id = each.value.id
|
||||
subnet_id = hcloud_network_subnet.kubernetes.id
|
||||
}
|
||||
|
||||
data "ct_config" "machine-ignitions" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
}
|
||||
|
||||
strict = false
|
||||
content = templatefile(
|
||||
"${path.module}/templates/machine.yaml.tmpl",
|
||||
{
|
||||
ssh_keys = jsonencode(var.ssh_public_keys)
|
||||
user_flatcar = var.user_flatcar
|
||||
name = each.key
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
resource "hcloud_firewall" "master" {
|
||||
name = "${var.prefix}-master-firewall"
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "22"
|
||||
source_ips = var.ssh_whitelist
|
||||
}
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "6443"
|
||||
source_ips = var.api_server_whitelist
|
||||
}
|
||||
}
|
||||
|
||||
resource "hcloud_firewall" "worker" {
|
||||
name = "${var.prefix}-worker-firewall"
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "22"
|
||||
source_ips = var.ssh_whitelist
|
||||
}
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "80"
|
||||
source_ips = var.ingress_whitelist
|
||||
}
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "443"
|
||||
source_ips = var.ingress_whitelist
|
||||
}
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "30000-32767"
|
||||
source_ips = var.nodeport_whitelist
|
||||
}
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
output "master_ip_addresses" {
|
||||
value = {
|
||||
for name, machine in var.machines :
|
||||
name => {
|
||||
"private_ip" = hcloud_server_network.machine[name].ip
|
||||
"public_ip" = hcloud_server.machine[name].ipv4_address
|
||||
}
|
||||
if machine.node_type == "master"
|
||||
}
|
||||
}
|
||||
|
||||
output "worker_ip_addresses" {
|
||||
value = {
|
||||
for name, machine in var.machines :
|
||||
name => {
|
||||
"private_ip" = hcloud_server_network.machine[name].ip
|
||||
"public_ip" = hcloud_server.machine[name].ipv4_address
|
||||
}
|
||||
if machine.node_type == "worker"
|
||||
}
|
||||
}
|
||||
|
||||
output "cluster_private_network_cidr" {
|
||||
value = var.private_subnet_cidr
|
||||
}
|
||||
|
||||
output "network_id" {
|
||||
value = hcloud_network.kubernetes.id
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
variant: flatcar
|
||||
version: 1.0.0
|
||||
|
||||
passwd:
|
||||
users:
|
||||
- name: ${user_flatcar}
|
||||
ssh_authorized_keys: ${ssh_keys}
|
||||
|
||||
storage:
|
||||
files:
|
||||
- path: /home/core/works
|
||||
filesystem: root
|
||||
mode: 0755
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
hostname="$(hostname)"
|
||||
echo My name is ${name} and the hostname is $${hostname}
|
||||
@@ -1,60 +0,0 @@
|
||||
|
||||
variable "zone" {
|
||||
type = string
|
||||
default = "fsn1"
|
||||
}
|
||||
|
||||
variable "prefix" {
|
||||
default = "k8s"
|
||||
}
|
||||
|
||||
variable "user_flatcar" {
|
||||
type = string
|
||||
default = "core"
|
||||
}
|
||||
|
||||
variable "machines" {
|
||||
type = map(object({
|
||||
node_type = string
|
||||
size = string
|
||||
image = string
|
||||
}))
|
||||
}
|
||||
|
||||
|
||||
|
||||
variable "ssh_public_keys" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ssh_private_key_path" {
|
||||
type = string
|
||||
default = "~/.ssh/id_rsa"
|
||||
}
|
||||
|
||||
variable "ssh_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "api_server_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "nodeport_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ingress_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "private_network_cidr" {
|
||||
default = "10.0.0.0/16"
|
||||
}
|
||||
|
||||
variable "private_subnet_cidr" {
|
||||
default = "10.0.10.0/24"
|
||||
}
|
||||
variable "network_zone" {
|
||||
default = "eu-central"
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
hcloud = {
|
||||
source = "hetznercloud/hcloud"
|
||||
}
|
||||
ct = {
|
||||
source = "poseidon/ct"
|
||||
version = "0.11.0"
|
||||
}
|
||||
null = {
|
||||
source = "hashicorp/null"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -14,3 +14,4 @@ ssh_authorized_keys:
|
||||
%{ for ssh_public_key in ssh_public_keys ~}
|
||||
- ${ssh_public_key}
|
||||
%{ endfor ~}
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ terraform {
|
||||
required_providers {
|
||||
hcloud = {
|
||||
source = "hetznercloud/hcloud"
|
||||
version = "1.38.2"
|
||||
version = "1.31.1"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.14"
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
prefix = "default"
|
||||
zone = "hel1"
|
||||
network_zone = "eu-central"
|
||||
inventory_file = "inventory.ini"
|
||||
|
||||
ssh_public_keys = [
|
||||
# Put your public SSH key here
|
||||
"ssh-rsa I-did-not-read-the-docs",
|
||||
"ssh-rsa I-did-not-read-the-docs 2",
|
||||
]
|
||||
|
||||
ssh_private_key_path = "~/.ssh/id_rsa"
|
||||
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
"size" : "cx21",
|
||||
"image" : "ubuntu-22.04",
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "cx21",
|
||||
"image" : "ubuntu-22.04",
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "cx21",
|
||||
"image" : "ubuntu-22.04",
|
||||
}
|
||||
}
|
||||
|
||||
nodeport_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
ingress_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
ssh_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
api_server_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
@@ -1 +0,0 @@
|
||||
../../../../inventory/sample/group_vars
|
||||
@@ -2,18 +2,18 @@
|
||||
${connection_strings_master}
|
||||
${connection_strings_worker}
|
||||
|
||||
[kube_control_plane]
|
||||
[kube-master]
|
||||
${list_master}
|
||||
|
||||
[etcd]
|
||||
${list_master}
|
||||
|
||||
[kube_node]
|
||||
[kube-node]
|
||||
${list_worker}
|
||||
|
||||
[k8s_cluster:children]
|
||||
[k8s-cluster:children]
|
||||
kube-master
|
||||
kube-node
|
||||
|
||||
[k8s_cluster:vars]
|
||||
[k8s-cluster:vars]
|
||||
network_id=${network_id}
|
||||
|
||||
@@ -25,12 +25,6 @@ variable "ssh_public_keys" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ssh_private_key_path" {
|
||||
description = "Private SSH key which connect to the VMs."
|
||||
type = string
|
||||
default = "~/.ssh/id_rsa"
|
||||
}
|
||||
|
||||
variable "ssh_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for ssh"
|
||||
type = list(string)
|
||||
|
||||
@@ -2,11 +2,14 @@ terraform {
|
||||
required_providers {
|
||||
hcloud = {
|
||||
source = "hetznercloud/hcloud"
|
||||
version = "1.38.2"
|
||||
version = "1.31.1"
|
||||
}
|
||||
null = {
|
||||
source = "hashicorp/null"
|
||||
}
|
||||
template = {
|
||||
source = "hashicorp/template"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.14"
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user