Compare commits

..

1 Commits

Author SHA1 Message Date
ant31
2214071a82 CI: reduce VM resources requests to improve scheduling 2024-05-31 11:36:15 +02:00
414 changed files with 2104 additions and 37402 deletions

View File

@@ -4,6 +4,4 @@ updates:
directory: "/"
schedule:
interval: "weekly"
labels:
- dependencies
- release-note-none
labels: [ "dependencies" ]

View File

@@ -1,9 +1,12 @@
---
stages:
- build
- test
- unit-tests
- deploy-part1
- deploy-extended
- moderator
- deploy-part2
- deploy-part3
- deploy-special
variables:
KUBESPRAY_VERSION: v2.25.0
@@ -40,26 +43,15 @@ before_script:
.job: &job
tags:
- ffci
- packet
image: $PIPELINE_IMAGE
artifacts:
when: always
paths:
- cluster-dump/
needs:
- pipeline-image
.job-moderated:
extends: .job
needs:
- pipeline-image
- ci-not-authorized
- check-galaxy-version # lint
- pre-commit # lint
- vagrant-validate # lint
.testcases: &testcases
extends: .job-moderated
<<: *job
retry: 1
interruptible: true
before_script:
@@ -69,34 +61,18 @@ before_script:
script:
- ./tests/scripts/testcases_run.sh
after_script:
- ./tests/scripts/testcases_cleanup.sh
- chronic ./tests/scripts/testcases_cleanup.sh
# For failfast, at least 1 job must be defined in .gitlab-ci.yml
# Premoderated with manual actions
ci-not-authorized:
stage: build
before_script: []
after_script: []
rules:
# LGTM or ok-to-test labels
- if: $PR_LABELS =~ /.*,(lgtm|approved|ok-to-test).*|^(lgtm|approved|ok-to-test).*/i
variables:
CI_OK_TO_TEST: '0'
when: always
- if: $CI_PIPELINE_SOURCE == "schedule" || $CI_PIPELINE_SOURCE == "trigger"
variables:
CI_OK_TO_TEST: '0'
- if: $CI_COMMIT_BRANCH == "master"
variables:
CI_OK_TO_TEST: '0'
- when: always
variables:
CI_OK_TO_TEST: '1'
ci-authorized:
extends: .job
stage: moderator
script:
- exit $CI_OK_TO_TEST
tags:
- ffci
needs: []
- /bin/sh scripts/premoderator.sh
except: ['triggers', 'master']
# Disable ci moderator
only: []
include:
- .gitlab-ci/build.yml

View File

@@ -1,32 +1,40 @@
---
.build-container:
cache:
key: $CI_COMMIT_REF_SLUG
paths:
- image-cache
tags:
- ffci
.build:
stage: build
image:
name: gcr.io/kaniko-project/executor:debug
entrypoint: ['']
name: moby/buildkit:rootless
entrypoint: [""]
variables:
TAG: $CI_COMMIT_SHORT_SHA
PROJECT_DIR: $CI_PROJECT_DIR
DOCKERFILE: Dockerfile
GODEBUG: "http2client=0"
BUILDKITD_FLAGS: --oci-worker-no-process-sandbox
before_script:
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"auth\":\"$(echo -n ${CI_REGISTRY_USER}:${CI_REGISTRY_PASSWORD} | base64)\"}}}" > /kaniko/.docker/config.json
script:
- /kaniko/executor --cache=true
--cache-dir=image-cache
--context $PROJECT_DIR
--dockerfile $PROJECT_DIR/$DOCKERFILE
--label 'git-branch'=$CI_COMMIT_REF_SLUG
--label 'git-tag=$CI_COMMIT_TAG'
--destination $PIPELINE_IMAGE
- mkdir ~/.docker
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > ~/.docker/config.json
pipeline-image:
extends: .build-container
variables:
DOCKERFILE: pipeline.Dockerfile
pipeline image:
extends: .build
script:
- |
buildctl-daemonless.sh build \
--frontend=dockerfile.v0 \
--local context=. \
--local dockerfile=. \
--opt filename=./pipeline.Dockerfile \
--output type=image,name=$PIPELINE_IMAGE,push=true \
--import-cache type=registry,ref=$CI_REGISTRY_IMAGE/pipeline:cache
rules:
- if: '$CI_COMMIT_REF_NAME != $CI_DEFAULT_BRANCH'
pipeline image and build cache:
extends: .build
script:
- |
buildctl-daemonless.sh build \
--frontend=dockerfile.v0 \
--local context=. \
--local dockerfile=. \
--opt filename=./pipeline.Dockerfile \
--output type=image,name=$PIPELINE_IMAGE,push=true \
--import-cache type=registry,ref=$CI_REGISTRY_IMAGE/pipeline:cache \
--export-cache type=registry,ref=$CI_REGISTRY_IMAGE/pipeline:cache,mode=max
rules:
- if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH'

View File

@@ -1,35 +1,40 @@
---
pre-commit:
stage: test
tags:
- ffci
image: 'ghcr.io/pre-commit-ci/runner-image@sha256:aaf2c7b38b22286f2d381c11673bec571c28f61dd086d11b43a1c9444a813cef'
variables:
PRE_COMMIT_HOME: /pre-commit-cache
generate-pre-commit:
image: 'mikefarah/yq@sha256:bcb889a1f9bdb0613c8a054542d02360c2b1b35521041be3e1bd8fbd0534d411'
stage: build
before_script: []
script:
- pre-commit run --all-files
cache:
key: pre-commit-all
- >
yq -r < .pre-commit-config.yaml '.repos[].hooks[].id' |
sed 's/^/ - /' |
cat .gitlab-ci/pre-commit-dynamic-stub.yml - > pre-commit-generated.yml
artifacts:
paths:
- /pre-commit-cache
needs: []
- pre-commit-generated.yml
run-pre-commit:
stage: unit-tests
trigger:
include:
- artifact: pre-commit-generated.yml
job: generate-pre-commit
strategy: depend
vagrant-validate:
extends: .job
stage: test
tags: [ffci]
stage: unit-tests
tags: [light]
variables:
VAGRANT_VERSION: 2.3.7
script:
- ./tests/scripts/vagrant-validate.sh
- ./tests/scripts/vagrant-validate.sh
except: ['triggers', 'master']
# TODO: convert to pre-commit hook
check-galaxy-version:
needs: []
stage: test
tags: [ffci]
stage: unit-tests
tags: [light]
image: python:3
script:
- tests/scripts/check_galaxy_version.sh
- tests/scripts/check_galaxy_version.sh

View File

@@ -1,42 +1,30 @@
---
.molecule:
tags: [ffci-vm-med]
tags: [c3.small.x86]
only: [/^pr-.*$/]
except: ['triggers']
image: quay.io/kubespray/vm-kubespray-ci:v6
image: $PIPELINE_IMAGE
services: []
stage: deploy-part1
needs: []
# - ci-not-authorized
variables:
VAGRANT_DEFAULT_PROVIDER: "libvirt"
before_script:
- groups
- python3 -m venv citest
- source citest/bin/activate
- vagrant plugin expunge --reinstall --force --no-tty
- vagrant plugin install vagrant-libvirt
- pip install --no-compile --no-cache-dir pip -U
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/requirements.txt
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/tests/requirements.txt
- ./tests/scripts/rebase.sh
- ./tests/scripts/vagrant_clean.sh
- tests/scripts/rebase.sh
- ./tests/scripts/vagrant_clean.sh
script:
- ./tests/scripts/molecule_run.sh
- ./tests/scripts/molecule_run.sh
after_script:
- ./tests/scripts/molecule_logs.sh
- chronic ./tests/scripts/molecule_logs.sh
artifacts:
when: always
paths:
- molecule_logs/
- molecule_logs/
# CI template for periodic CI jobs
# Enabled when PERIODIC_CI_ENABLED var is set
.molecule_periodic:
only:
variables:
- $PERIODIC_CI_ENABLED
- $PERIODIC_CI_ENABLED
allow_failure: true
extends: .molecule
@@ -46,50 +34,50 @@ molecule_full:
molecule_no_container_engines:
extends: .molecule
script:
- ./tests/scripts/molecule_run.sh -e container-engine
- ./tests/scripts/molecule_run.sh -e container-engine
when: on_success
molecule_docker:
extends: .molecule
script:
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
when: on_success
molecule_containerd:
extends: .molecule
script:
- ./tests/scripts/molecule_run.sh -i container-engine/containerd
- ./tests/scripts/molecule_run.sh -i container-engine/containerd
when: on_success
molecule_cri-o:
extends: .molecule
stage: deploy-part1
stage: deploy-part2
script:
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
allow_failure: true
when: on_success
# # Stage 3 container engines don't get as much attention so allow them to fail
# molecule_kata:
# extends: .molecule
# stage: deploy-extended
# script:
# - ./tests/scripts/molecule_run.sh -i container-engine/kata-containers
# when: manual
# # FIXME: this test is broken (perma-failing)
# Stage 3 container engines don't get as much attention so allow them to fail
molecule_kata:
extends: .molecule
stage: deploy-part3
script:
- ./tests/scripts/molecule_run.sh -i container-engine/kata-containers
when: manual
# FIXME: this test is broken (perma-failing)
molecule_gvisor:
extends: .molecule
stage: deploy-extended
stage: deploy-part3
script:
- ./tests/scripts/molecule_run.sh -i container-engine/gvisor
- ./tests/scripts/molecule_run.sh -i container-engine/gvisor
when: manual
# FIXME: this test is broken (perma-failing)
molecule_youki:
extends: .molecule
stage: deploy-extended
stage: deploy-part3
script:
- ./tests/scripts/molecule_run.sh -i container-engine/youki
- ./tests/scripts/molecule_run.sh -i container-engine/youki
when: manual
# FIXME: this test is broken (perma-failing)

View File

@@ -6,56 +6,14 @@
CI_PLATFORM: packet
SSH_USER: kubespray
tags:
- ffci
needs:
- pipeline-image
- ci-not-authorized
- packet
except: [triggers]
# CI template for PRs
.packet_pr:
stage: deploy-part1
rules:
- if: $PR_LABELS =~ /.*ci-short.*/
when: manual
allow_failure: true
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
when: on_success
- when: manual
allow_failure: true
only: [/^pr-.*$/]
extends: .packet
## Uncomment this to have multiple stages
# needs:
# - packet_ubuntu20-calico-all-in-one
.packet_pr_short:
stage: deploy-part1
extends: .packet
rules:
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
when: on_success
- when: manual
allow_failure: true
.packet_pr_manual:
extends: .packet_pr
stage: deploy-extended
rules:
- if: $PR_LABELS =~ /.*ci-full.*/
when: on_success
# Else run as manual
- when: manual
allow_failure: true
.packet_pr_extended:
extends: .packet_pr
stage: deploy-extended
rules:
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
when: on_success
- when: manual
allow_failure: true
# CI template for periodic CI jobs
# Enabled when PERIODIC_CI_ENABLED var is set
.packet_periodic:
@@ -76,182 +34,314 @@ packet_cleanup_old:
# The ubuntu20-calico-all-in-one jobs are meant as early stages to prevent running the full CI if something is horribly broken
packet_ubuntu20-calico-all-in-one:
stage: deploy-part1
extends: .packet_pr_short
extends: .packet_pr
when: on_success
variables:
RESET_CHECK: "true"
# ### PR JOBS PART2
packet_ubuntu20-crio:
extends: .packet_pr_manual
packet_ubuntu20-all-in-one-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu20-calico-all-in-one-hardening:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu22-all-in-one-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu22-calico-all-in-one:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu22-calico-all-in-one-upgrade:
packet_ubuntu24-all-in-one-docker:
stage: deploy-part2
extends: .packet_pr
variables:
UPGRADE_TEST: graceful
when: on_success
packet_ubuntu24-calico-all-in-one:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu24-calico-etcd-datastore:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_centos7-flannel-addons-ha:
extends: .packet_pr
stage: deploy-part2
when: on_success
packet_almalinux8-crio:
extends: .packet_pr
stage: deploy-part2
when: on_success
allow_failure: true
packet_almalinux8-kube-ovn:
packet_ubuntu20-crio:
extends: .packet_pr
stage: deploy-part2
when: manual
packet_fedora37-crio:
extends: .packet_pr
stage: deploy-part2
when: manual
packet_ubuntu20-flannel-ha:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_debian10-cilium-svc-proxy:
stage: deploy-part2
extends: .packet_periodic
when: on_success
packet_debian10-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_debian10-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_debian11-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_debian11-macvlan:
packet_debian11-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_debian12-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_debian12-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_debian12-cilium:
stage: deploy-part2
extends: .packet_periodic
when: on_success
packet_centos7-calico-ha-once-localhost:
stage: deploy-part2
extends: .packet_pr
when: on_success
variables:
# This will instruct Docker not to start over TLS.
DOCKER_TLS_CERTDIR: ""
services:
- docker:19.03.9-dind
packet_almalinux8-kube-ovn:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_almalinux8-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_rockylinux8-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_rockylinux9-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_rockylinux9-cilium:
stage: deploy-part2
extends: .packet_pr
when: on_success
variables:
RESET_CHECK: "true"
# Need an update of the container image to use schema v2
# update: quay.io/kubespray/vm-amazon-linux-2:latest
packet_almalinux8-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_amazon-linux-2-all-in-one:
extends: .packet_pr_manual
rules:
- when: manual
allow_failure: true
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_fedora38-docker-weave:
stage: deploy-part2
extends: .packet_pr
when: on_success
allow_failure: true
packet_opensuse-docker-cilium:
stage: deploy-part2
extends: .packet_pr
packet_ubuntu20-cilium-sep:
extends: .packet_pr
## Extended
packet_debian11-docker:
extends: .packet_pr_extended
packet_debian12-docker:
extends: .packet_pr_extended
packet_debian12-calico:
extends: .packet_pr_extended
packet_almalinux8-calico-remove-node:
extends: .packet_pr_extended
variables:
REMOVE_NODE_CHECK: "true"
REMOVE_NODE_NAME: "instance-3"
packet_rockylinux9-calico:
extends: .packet_pr_extended
packet_almalinux8-calico:
extends: .packet_pr_extended
packet_almalinux8-docker:
extends: .packet_pr_extended
packet_ubuntu20-calico-all-in-one-hardening:
extends: .packet_pr_extended
packet_ubuntu24-calico-all-in-one:
extends: .packet_pr_extended
packet_ubuntu20-calico-etcd-kubeadm:
extends: .packet_pr_extended
packet_ubuntu24-all-in-one-docker:
extends: .packet_pr_extended
packet_ubuntu22-all-in-one-docker:
extends: .packet_pr_extended
when: on_success
# ### MANUAL JOBS
packet_fedora37-crio:
extends: .packet_pr_manual
packet_ubuntu20-flannel-ha:
extends: .packet_pr_manual
packet_ubuntu20-docker-weave-sep:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_ubuntu20-all-in-one-docker:
extends: .packet_pr_manual
packet_ubuntu20-cilium-sep:
stage: deploy-special
extends: .packet_pr
when: manual
packet_ubuntu20-flannel-ha-once:
extends: .packet_pr_manual
packet_fedora37-calico-swap-selinux:
extends: .packet_pr_manual
stage: deploy-part2
extends: .packet_pr
when: manual
# Calico HA eBPF
packet_almalinux8-calico-ha-ebpf:
extends: .packet_pr_manual
stage: deploy-part2
extends: .packet_pr
when: manual
packet_almalinux8-calico-nodelocaldns-secondary:
extends: .packet_pr_manual
packet_debian10-macvlan:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_debian11-custom-cni:
extends: .packet_pr_manual
packet_centos7-calico-ha:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_debian11-kubelet-csr-approver:
extends: .packet_pr_manual
packet_centos7-multus-calico:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_debian12-custom-cni-helm:
extends: .packet_pr_manual
packet_ubuntu20-calico-ha-wireguard:
extends: .packet_pr_manual
# PERIODIC
packet_fedora38-docker-calico:
stage: deploy-extended
stage: deploy-part2
extends: .packet_periodic
when: on_success
variables:
RESET_CHECK: "true"
packet_fedora37-calico-selinux:
stage: deploy-extended
stage: deploy-part2
extends: .packet_periodic
when: on_success
packet_fedora37-calico-swap-selinux:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha:
stage: deploy-extended
packet_almalinux8-calico-nodelocaldns-secondary:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_fedora38-kube-ovn:
stage: deploy-part2
extends: .packet_periodic
when: on_success
packet_debian11-custom-cni:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_debian11-kubelet-csr-approver:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_debian12-custom-cni-helm:
stage: deploy-part2
extends: .packet_pr
when: manual
# ### PR JOBS PART3
# Long jobs (45min+)
packet_centos7-weave-upgrade-ha:
stage: deploy-part3
extends: .packet_periodic
when: on_success
variables:
UPGRADE_TEST: basic
packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha:
stage: deploy-part3
extends: .packet_periodic
when: on_success
variables:
UPGRADE_TEST: basic
# Calico HA Wireguard
packet_ubuntu20-calico-ha-wireguard:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_debian11-calico-upgrade:
stage: deploy-part3
extends: .packet_pr
when: on_success
variables:
UPGRADE_TEST: graceful
packet_almalinux8-calico-remove-node:
stage: deploy-part3
extends: .packet_pr
when: on_success
variables:
REMOVE_NODE_CHECK: "true"
REMOVE_NODE_NAME: "instance-3"
packet_ubuntu20-calico-etcd-kubeadm:
stage: deploy-part3
extends: .packet_pr
when: on_success
packet_debian11-calico-upgrade-once:
stage: deploy-extended
stage: deploy-part3
extends: .packet_periodic
when: on_success
variables:
UPGRADE_TEST: graceful
packet_ubuntu20-calico-ha-recover:
stage: deploy-extended
stage: deploy-part3
extends: .packet_periodic
when: on_success
variables:
RECOVER_CONTROL_PLANE_TEST: "true"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
packet_ubuntu20-calico-ha-recover-noquorum:
stage: deploy-extended
stage: deploy-part3
extends: .packet_periodic
when: on_success
variables:
RECOVER_CONTROL_PLANE_TEST: "true"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:]:kube_control_plane[1:]"
packet_debian11-calico-upgrade:
stage: deploy-extended
extends: .packet_periodic
variables:
UPGRADE_TEST: graceful
packet_debian12-cilium-svc-proxy:
stage: deploy-extended
extends: .packet_periodic

View File

@@ -7,7 +7,7 @@ pre-commit:
variables:
PRE_COMMIT_HOME: /pre-commit-cache
script:
- pre-commit run --all-files
- pre-commit run -a $HOOK_ID
cache:
key: pre-commit-$HOOK_ID
paths:

View File

@@ -2,10 +2,6 @@
# Tests for contrib/terraform/
.terraform_install:
extends: .job
needs:
- ci-not-authorized
- pipeline-image
stage: deploy-part1
before_script:
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
- ./tests/scripts/rebase.sh
@@ -28,19 +24,17 @@
.terraform_validate:
extends: .terraform_install
tags: [ffci]
stage: unit-tests
tags: [light]
only: ['master', /^pr-.*$/]
script:
- terraform -chdir="contrib/terraform/$PROVIDER" validate
- terraform -chdir="contrib/terraform/$PROVIDER" fmt -check -diff
stage: test
needs:
- pipeline-image
.terraform_apply:
extends: .terraform_install
tags: [ffci]
stage: deploy-extended
tags: [light]
stage: deploy-part3
when: manual
only: [/^pr-.*$/]
artifacts:
@@ -57,7 +51,7 @@
- tests/scripts/testcases_run.sh
after_script:
# Cleanup regardless of exit code
- ./tests/scripts/testcases_cleanup.sh
- chronic ./tests/scripts/testcases_cleanup.sh
tf-validate-openstack:
extends: .terraform_validate
@@ -152,7 +146,8 @@ tf-validate-nifcloud:
TF_VAR_router_id: "ab95917c-41fb-4881-b507-3a6dfe9403df"
tf-elastx_cleanup:
tags: [ffci]
stage: unit-tests
tags: [light]
image: python
variables:
<<: *elastx_variables
@@ -160,11 +155,10 @@ tf-elastx_cleanup:
- pip install -r scripts/openstack-cleanup/requirements.txt
script:
- ./scripts/openstack-cleanup/main.py
allow_failure: true
tf-elastx_ubuntu20-calico:
extends: .terraform_apply
stage: deploy-part1
stage: deploy-part3
when: on_success
allow_failure: true
variables:

View File

@@ -1,63 +1,64 @@
---
.vagrant:
extends: .testcases
needs:
- ci-not-authorized
variables:
CI_PLATFORM: "vagrant"
SSH_USER: "vagrant"
VAGRANT_DEFAULT_PROVIDER: "libvirt"
KUBESPRAY_VAGRANT_CONFIG: tests/files/${CI_JOB_NAME}.rb
DOCKER_NAME: vagrant
VAGRANT_ANSIBLE_TAGS: facts
tags: [ffci-vm-large]
# only: [/^pr-.*$/]
# except: ['triggers']
image: quay.io/kubespray/vm-kubespray-ci:v6
tags: [c3.small.x86]
only: [/^pr-.*$/]
except: ['triggers']
image: $PIPELINE_IMAGE
services: []
before_script:
- echo $USER
- python3 -m venv citest
- source citest/bin/activate
- vagrant plugin expunge --reinstall --force --no-tty
- vagrant plugin install vagrant-libvirt
- pip install --no-compile --no-cache-dir pip -U
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/requirements.txt
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/tests/requirements.txt
- ./tests/scripts/vagrant_clean.sh
script:
- ./tests/scripts/testcases_run.sh
after_script:
- chronic ./tests/scripts/testcases_cleanup.sh
vagrant_ubuntu20-calico-dual-stack:
stage: deploy-extended
stage: deploy-part2
extends: .vagrant
when: manual
# FIXME: this test if broken (perma-failing)
vagrant_ubuntu20-weave-medium:
stage: deploy-part2
extends: .vagrant
when: manual
vagrant_ubuntu20-flannel:
stage: deploy-part1
stage: deploy-part2
extends: .vagrant
when: on_success
allow_failure: false
vagrant_ubuntu20-flannel-collection:
stage: deploy-extended
stage: deploy-part2
extends: .vagrant
when: manual
when: on_success
vagrant_ubuntu20-kube-router-sep:
stage: deploy-extended
stage: deploy-part2
extends: .vagrant
when: manual
# Service proxy test fails connectivity testing
vagrant_ubuntu20-kube-router-svc-proxy:
stage: deploy-extended
stage: deploy-part2
extends: .vagrant
when: manual
vagrant_fedora37-kube-router:
stage: deploy-extended
stage: deploy-part2
extends: .vagrant
when: manual
# FIXME: this test if broken (perma-failing)
vagrant_centos7-kube-router:
stage: deploy-part2
extends: .vagrant
when: manual

View File

@@ -39,14 +39,15 @@ repos:
hooks:
- id: ansible-lint
additional_dependencies:
- ansible==9.8.0
- ansible==9.5.1
- jsonschema==4.22.0
- jmespath==1.0.1
- netaddr==1.3.0
- netaddr==1.2.1
- distlib
- repo: https://github.com/golangci/misspell
rev: v0.6.0
- repo: https://github.com/VannTen/misspell
# Waiting on https://github.com/golangci/misspell/pull/19 to get merged
rev: 8592a4e
hooks:
- id: misspell
exclude: "OWNERS_ALIASES$"
@@ -61,6 +62,14 @@ repos:
additional_dependencies:
- ansible==9.5.1
- id: tox-inventory-builder
name: tox-inventory-builder
entry: bash -c "cd contrib/inventory_builder && tox"
language: python
pass_filenames: false
additional_dependencies:
- tox==4.15.0
- id: check-readme-versions
name: check-readme-versions
entry: tests/scripts/check_readme_versions.sh

View File

@@ -6,7 +6,7 @@ ignore: |
.github/
# Generated file
tests/files/custom_cni/cilium.yaml
# https://ansible.readthedocs.io/projects/lint/rules/yaml/
rules:
braces:
min-spaces-inside: 0
@@ -14,15 +14,9 @@ rules:
brackets:
min-spaces-inside: 0
max-spaces-inside: 1
comments:
min-spaces-from-content: 1
# https://github.com/adrienverge/yamllint/issues/384
comments-indentation: false
indentation:
spaces: 2
indent-sequences: consistent
line-length: disable
new-line-at-end-of-file: disable
octal-values:
forbid-implicit-octal: true # yamllint defaults to false
forbid-explicit-octal: true # yamllint defaults to false
truthy: disable

View File

@@ -7,13 +7,11 @@ aliases:
- oomichi
- yankay
- ant31
- vannten
kubespray-reviewers:
- cyclinder
- erikjiang
- mrfreezeex
- mzaian
- tico88612
- vannten
- yankay
kubespray-emeritus_approvers:

View File

@@ -141,13 +141,13 @@ vagrant up
## Supported Linux Distributions
- **Flatcar Container Linux by Kinvolk**
- **Debian** Bookworm, Bullseye
- **Debian** Bookworm, Bullseye, Buster
- **Ubuntu** 20.04, 22.04, 24.04
- **CentOS/RHEL** [8, 9](docs/operating_systems/centos.md#centos-8)
- **CentOS/RHEL** 7, [8, 9](docs/operating_systems/centos.md#centos-8)
- **Fedora** 37, 38
- **Fedora CoreOS** (see [fcos Note](docs/operating_systems/fcos.md))
- **openSUSE** Leap 15.x/Tumbleweed
- **Oracle Linux** [8, 9](docs/operating_systems/centos.md#centos-8)
- **Oracle Linux** 7, [8, 9](docs/operating_systems/centos.md#centos-8)
- **Alma Linux** [8, 9](docs/operating_systems/centos.md#centos-8)
- **Rocky Linux** [8, 9](docs/operating_systems/centos.md#centos-8)
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/operating_systems/kylinlinux.md))
@@ -160,28 +160,28 @@ Note: Upstart/SysV init based OS types are not supported.
## Supported Components
- Core
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.30.6
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.29.5
- [etcd](https://github.com/etcd-io/etcd) v3.5.12
- [docker](https://www.docker.com/) v26.1
- [containerd](https://containerd.io/) v1.7.23
- [cri-o](http://cri-o.io/) v1.30.3 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS)
- [containerd](https://containerd.io/) v1.7.16
- [cri-o](http://cri-o.io/) v1.29.1 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS)
- Network Plugin
- [cni-plugins](https://github.com/containernetworking/plugins) v1.2.0
- [calico](https://github.com/projectcalico/calico) v3.28.1
- [calico](https://github.com/projectcalico/calico) v3.27.3
- [cilium](https://github.com/cilium/cilium) v1.15.4
- [flannel](https://github.com/flannel-io/flannel) v0.22.0
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.12.21
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.11.5
- [kube-router](https://github.com/cloudnativelabs/kube-router) v2.0.0
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) v3.8
- [weave](https://github.com/rajch/weave) v2.8.7
- [weave](https://github.com/weaveworks/weave) v2.8.1
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.8.0
- Application
- [cert-manager](https://github.com/jetstack/cert-manager) v1.14.7
- [cert-manager](https://github.com/jetstack/cert-manager) v1.13.2
- [coredns](https://github.com/coredns/coredns) v1.11.1
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.11.5
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.10.1
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.4
- [argocd](https://argoproj.github.io/) v2.11.0
- [helm](https://helm.sh/) v3.15.4
- [helm](https://helm.sh/) v3.14.2
- [metallb](https://metallb.universe.tf/) v0.13.9
- [registry](https://github.com/distribution/distribution) v2.8.1
- Storage Plugin
@@ -189,11 +189,11 @@ Note: Upstart/SysV init based OS types are not supported.
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.30.0
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.29.0
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.9.2
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.24
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
- [node-feature-discovery](https://github.com/kubernetes-sigs/node-feature-discovery) v0.16.4
- [node-feature-discovery](https://github.com/kubernetes-sigs/node-feature-discovery) v0.14.2
## Container Runtime Notes

View File

@@ -16,7 +16,6 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
1. The release issue is closed
1. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
1. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
1. Create/Update Issue for upgradeing kubernetes and [k8s-conformance](https://github.com/cncf/k8s-conformance)
## Major/minor releases and milestones

6
Vagrantfile vendored
View File

@@ -1,7 +1,7 @@
# -*- mode: ruby -*-
# # vi: set ft=ruby :
# For help on using kubespray with vagrant, check out docs/developers/vagrant.md
# For help on using kubespray with vagrant, check out docs/vagrant.md
require 'fileutils'
@@ -22,6 +22,8 @@ SUPPORTED_OS = {
"ubuntu2004" => {box: "generic/ubuntu2004", user: "vagrant"},
"ubuntu2204" => {box: "generic/ubuntu2204", user: "vagrant"},
"ubuntu2404" => {box: "bento/ubuntu-24.04", user: "vagrant"},
"centos" => {box: "centos/7", user: "vagrant"},
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
"centos8" => {box: "centos/8", user: "vagrant"},
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
@@ -34,6 +36,7 @@ SUPPORTED_OS = {
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
"rhel7" => {box: "generic/rhel7", user: "vagrant"},
"rhel8" => {box: "generic/rhel8", user: "vagrant"},
"debian11" => {box: "debian/bullseye64", user: "vagrant"},
"debian12" => {box: "debian/bookworm64", user: "vagrant"},
@@ -275,7 +278,6 @@ Vagrant.configure("2") do |config|
"local_path_provisioner_enabled": "#{$local_path_provisioner_enabled}",
"local_path_provisioner_claim_root": "#{$local_path_provisioner_claim_root}",
"ansible_ssh_user": SUPPORTED_OS[$os][:user],
"ansible_ssh_private_key_file": File.join(Dir.home, ".vagrant.d", "insecure_private_key"),
"unsafe_show_logs": "True"
}

View File

@@ -11,7 +11,6 @@ gathering = smart
fact_caching = jsonfile
fact_caching_connection = /tmp
fact_caching_timeout = 86400
timeout = 300
stdout_callback = default
display_skipped_hosts = no
library = ./library

View File

@@ -1,6 +1,6 @@
---
- name: Generate Azure inventory
hosts: localhost
gather_facts: false
gather_facts: False
roles:
- generate-inventory

View File

@@ -1,6 +1,6 @@
---
- name: Generate Azure inventory
hosts: localhost
gather_facts: false
gather_facts: False
roles:
- generate-inventory_2

View File

@@ -1,6 +1,6 @@
---
- name: Generate Azure templates
hosts: localhost
gather_facts: false
gather_facts: False
roles:
- generate-templates

View File

@@ -12,4 +12,4 @@
template:
src: inventory.j2
dest: "{{ playbook_dir }}/inventory"
mode: "0644"
mode: 0644

View File

@@ -22,10 +22,10 @@
template:
src: inventory.j2
dest: "{{ playbook_dir }}/inventory"
mode: "0644"
mode: 0644
- name: Generate Load Balancer variables
template:
src: loadbalancer_vars.j2
dest: "{{ playbook_dir }}/loadbalancer_vars.yml"
mode: "0644"
mode: 0644

View File

@@ -8,13 +8,13 @@
path: "{{ base_dir }}"
state: directory
recurse: true
mode: "0755"
mode: 0755
- name: Store json files in base_dir
template:
src: "{{ item }}"
dest: "{{ base_dir }}/{{ item }}"
mode: "0644"
mode: 0644
with_items:
- network.json
- storage.json

View File

@@ -1,7 +1,7 @@
---
- name: Create nodes as docker containers
hosts: localhost
gather_facts: false
gather_facts: False
roles:
- { role: dind-host }

View File

@@ -18,7 +18,7 @@ distro_settings:
init: |
/sbin/init
centos: &CENTOS
image: "centos:8"
image: "centos:7"
user: "centos"
pid1_exe: /usr/lib/systemd/systemd
init: |

View File

@@ -15,7 +15,7 @@ docker_storage_options: -s overlay2 --storage-opt overlay2.override_kernel_check
dns_mode: coredns
deploy_netchecker: true
deploy_netchecker: True
netcheck_agent_image_repo: quay.io/l23network/k8s-netchecker-agent
netcheck_server_image_repo: quay.io/l23network/k8s-netchecker-server
netcheck_agent_image_tag: v1.0

View File

@@ -14,7 +14,7 @@
src: "/bin/true"
dest: "{{ item }}"
state: link
force: true
force: yes
with_items:
# DIND box may have swap enable, don't bother
- /sbin/swapoff
@@ -35,7 +35,7 @@
path-exclude=/usr/share/doc/*
path-include=/usr/share/doc/*/copyright
dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
mode: "0644"
mode: 0644
when:
- ansible_os_family == 'Debian'
@@ -58,13 +58,13 @@
name: "{{ distro_user }}"
uid: 1000
# groups: sudo
append: true
append: yes
- name: Allow password-less sudo to "{{ distro_user }}"
copy:
content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL"
dest: "/etc/sudoers.d/{{ distro_user }}"
mode: "0640"
mode: 0640
- name: "Add my pubkey to {{ distro_user }} user authorized keys"
ansible.posix.authorized_key:

View File

@@ -19,7 +19,7 @@
state: started
hostname: "{{ item }}"
command: "{{ distro_init }}"
# recreate: true
# recreate: yes
privileged: true
tmpfs:
- /sys/module/nf_conntrack/parameters
@@ -42,7 +42,7 @@
template:
src: inventory_builder.sh.j2
dest: /tmp/kubespray.dind.inventory_builder.sh
mode: "0755"
mode: 0755
tags:
- addresses

View File

@@ -1,8 +1,8 @@
---
- name: Prepare Hypervisor to later install kubespray VMs
hosts: localhost
gather_facts: false
become: true
gather_facts: False
become: yes
vars:
bootstrap_os: none
roles:

View File

@@ -11,12 +11,12 @@
- name: Install required packages
apt:
upgrade: true
update_cache: true
upgrade: yes
update_cache: yes
cache_valid_time: 3600
name: "{{ item }}"
state: present
install_recommends: false
install_recommends: no
with_items:
- dnsutils
- ntp

View File

@@ -20,7 +20,7 @@
br-netfilter
owner: root
group: root
mode: "0644"
mode: 0644
when: br_netfilter is defined
@@ -30,7 +30,7 @@
value: 1
sysctl_file: "{{ sysctl_file_path }}"
state: present
reload: true
reload: yes
- name: Set bridge-nf-call-{arptables,iptables} to 0
ansible.posix.sysctl:
@@ -38,7 +38,7 @@
state: present
value: 0
sysctl_file: "{{ sysctl_file_path }}"
reload: true
reload: yes
with_items:
- net.bridge.bridge-nf-call-arptables
- net.bridge.bridge-nf-call-ip6tables

View File

@@ -11,7 +11,7 @@
state: directory
owner: "{{ k8s_deployment_user }}"
group: "{{ k8s_deployment_user }}"
mode: "0700"
mode: 0700
- name: Configure sudo for deployment user
copy:
@@ -20,13 +20,13 @@
dest: "/etc/sudoers.d/55-k8s-deployment"
owner: root
group: root
mode: "0644"
mode: 0644
- name: Write private SSH key
copy:
src: "{{ k8s_deployment_user_pkey_path }}"
dest: "/home/{{ k8s_deployment_user }}/.ssh/id_rsa"
mode: "0400"
mode: 0400
owner: "{{ k8s_deployment_user }}"
group: "{{ k8s_deployment_user }}"
when: k8s_deployment_user_pkey_path is defined
@@ -41,7 +41,7 @@
- name: Fix ssh-pub-key permissions
file:
path: "/home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
mode: "0600"
mode: 0600
owner: "{{ k8s_deployment_user }}"
group: "{{ k8s_deployment_user }}"
when: k8s_deployment_user_pkey_path is defined

View File

@@ -14,7 +14,7 @@
file:
path: "{{ item }}"
state: directory
mode: "0755"
mode: 0755
become: false
loop:
- "{{ playbook_dir }}/plugins/mitogen"
@@ -25,7 +25,7 @@
url: "{{ mitogen_url }}"
dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
validate_certs: true
mode: "0644"
mode: 0644
- name: Extract archive
unarchive:
@@ -40,7 +40,7 @@
- name: Add strategy to ansible.cfg
community.general.ini_file:
path: ansible.cfg
mode: "0644"
mode: 0644
section: "{{ item.section | d('defaults') }}"
option: "{{ item.option }}"
value: "{{ item.value }}"

View File

@@ -21,7 +21,7 @@ glusterfs_default_release: ""
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
```yaml
glusterfs_ppa_use: true
glusterfs_ppa_use: yes
glusterfs_ppa_version: "3.5"
```

View File

@@ -1,7 +1,7 @@
---
# For Ubuntu.
glusterfs_default_release: ""
glusterfs_ppa_use: true
glusterfs_ppa_use: yes
glusterfs_ppa_version: "4.1"
# Gluster configuration.

View File

@@ -15,7 +15,7 @@
file:
path: "{{ item }}"
state: directory
mode: "0775"
mode: 0775
with_items:
- "{{ gluster_mount_dir }}"
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined

View File

@@ -3,7 +3,7 @@
apt_repository:
repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
state: present
update_cache: true
update_cache: yes
register: glusterfs_ppa_added
when: glusterfs_ppa_use

View File

@@ -1,7 +1,7 @@
---
# For Ubuntu.
glusterfs_default_release: ""
glusterfs_ppa_use: true
glusterfs_ppa_use: yes
glusterfs_ppa_version: "3.12"
# Gluster configuration.

View File

@@ -43,13 +43,13 @@
service:
name: "{{ glusterfs_daemon }}"
state: started
enabled: true
enabled: yes
- name: Ensure Gluster brick and mount directories exist.
file:
path: "{{ item }}"
state: directory
mode: "0775"
mode: 0775
with_items:
- "{{ gluster_brick_dir }}"
- "{{ gluster_mount_dir }}"
@@ -62,7 +62,7 @@
replicas: "{{ groups['gfs-cluster'] | length }}"
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
host: "{{ inventory_hostname }}"
force: true
force: yes
run_once: true
when: groups['gfs-cluster'] | length > 1
@@ -73,7 +73,7 @@
brick: "{{ gluster_brick_dir }}"
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
host: "{{ inventory_hostname }}"
force: true
force: yes
run_once: true
when: groups['gfs-cluster'] | length <= 1
@@ -101,7 +101,7 @@
template:
dest: "{{ gluster_mount_dir }}/.test-file.txt"
src: test-file.txt
mode: "0644"
mode: 0644
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
- name: Unmount glusterfs

View File

@@ -3,7 +3,7 @@
apt_repository:
repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
state: present
update_cache: true
update_cache: yes
register: glusterfs_ppa_added
when: glusterfs_ppa_use

View File

@@ -3,7 +3,7 @@
template:
src: "{{ item.file }}"
dest: "{{ kube_config_dir }}/{{ item.dest }}"
mode: "0644"
mode: 0644
with_items:
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}

View File

@@ -6,6 +6,6 @@
- name: Teardown disks in heketi
hosts: heketi-node
become: true
become: yes
roles:
- { role: tear-down-disks }

View File

@@ -4,7 +4,7 @@
template:
src: "heketi-bootstrap.json.j2"
dest: "{{ kube_config_dir }}/heketi-bootstrap.json"
mode: "0640"
mode: 0640
register: "rendering"
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
kube:

View File

@@ -10,7 +10,7 @@
template:
src: "topology.json.j2"
dest: "{{ kube_config_dir }}/topology.json"
mode: "0644"
mode: 0644
- name: "Copy topology configuration into container."
changed_when: false
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"

View File

@@ -3,7 +3,7 @@
template:
src: "glusterfs-daemonset.json.j2"
dest: "{{ kube_config_dir }}/glusterfs-daemonset.json"
mode: "0644"
mode: 0644
become: true
register: "rendering"
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
@@ -33,7 +33,7 @@
template:
src: "heketi-service-account.json.j2"
dest: "{{ kube_config_dir }}/heketi-service-account.json"
mode: "0644"
mode: 0644
become: true
register: "rendering"
- name: "Kubernetes Apps | Install and configure Heketi Service Account"

View File

@@ -4,7 +4,7 @@
template:
src: "heketi-deployment.json.j2"
dest: "{{ kube_config_dir }}/heketi-deployment.json"
mode: "0644"
mode: 0644
register: "rendering"
- name: "Kubernetes Apps | Install and configure Heketi"

View File

@@ -28,7 +28,7 @@
template:
src: "heketi.json.j2"
dest: "{{ kube_config_dir }}/heketi.json"
mode: "0644"
mode: 0644
- name: "Deploy Heketi config secret"
when: "secret_state.stdout | length == 0"

View File

@@ -5,7 +5,7 @@
template:
src: "heketi-storage.json.j2"
dest: "{{ kube_config_dir }}/heketi-storage.json"
mode: "0644"
mode: 0644
register: "rendering"
- name: "Kubernetes Apps | Install and configure Heketi Storage"
kube:

View File

@@ -16,7 +16,7 @@
template:
src: "storageclass.yml.j2"
dest: "{{ kube_config_dir }}/storageclass.yml"
mode: "0644"
mode: 0644
register: "rendering"
- name: "Kubernetes Apps | Install and configure Storace Class"
kube:

View File

@@ -10,7 +10,7 @@
template:
src: "topology.json.j2"
dest: "{{ kube_config_dir }}/topology.json"
mode: "0644"
mode: 0644
- name: "Copy topology configuration into container." # noqa no-handler
when: "rendering.changed"
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"

View File

@@ -1,7 +1,7 @@
---
- name: Collect container images for offline deployment
hosts: localhost
become: false
become: no
roles:
# Just load default variables from roles.
@@ -16,7 +16,7 @@
template:
src: ./contrib/offline/temp/{{ item }}.list.template
dest: ./contrib/offline/temp/{{ item }}.list
mode: "0644"
mode: 0644
with_items:
- files
- images

View File

@@ -146,7 +146,7 @@ function register_container_images() {
if [ "${org_image}" == "ID:" ]; then
org_image=$(echo "${load_image}" | awk '{print $4}')
fi
image_id=$(sudo ${runtime} image inspect --format "{{.Id}}" "${org_image}")
image_id=$(sudo ${runtime} image inspect ${org_image} | grep "\"Id\":" | awk -F: '{print $3}'| sed s/'\",'//)
if [ -z "${file_name}" ]; then
echo "Failed to get file_name for line ${line}"
exit 1

View File

@@ -7,17 +7,17 @@
service_facts:
- name: Disable service firewalld
systemd_service:
systemd:
name: firewalld
state: stopped
enabled: false
enabled: no
when:
"'firewalld.service' in services and services['firewalld.service'].status != 'not-found'"
- name: Disable service ufw
systemd_service:
systemd:
name: ufw
state: stopped
enabled: false
enabled: no
when:
"'ufw.service' in services and services['ufw.service'].status != 'not-found'"

View File

@@ -1,11 +1,5 @@
terraform {
required_version = ">= 0.12.0"
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 5.0"
}
}
}
provider "aws" {

View File

@@ -12,8 +12,8 @@ ${list_master}
${list_worker}
[k8s_cluster:children]
kube_control_plane
kube_node
kube-master
kube-node
[k8s_cluster:vars]
network_id=${network_id}

View File

@@ -368,7 +368,7 @@ def iter_host_ips(hosts, ips):
'ansible_host': ip,
})
if 'use_access_ip' in host[1]['metadata'] and host[1]['metadata']['use_access_ip'] == "0" and 'access_ip' in host[1]:
if 'use_access_ip' in host[1]['metadata'] and host[1]['metadata']['use_access_ip'] == "0":
host[1].pop('access_ip')
yield host

View File

@@ -1,11 +1,5 @@
# See: https://developers.upcloud.com/1.3/5-zones/
zone = "fi-hel1"
private_cloud = false
# Only used if private_cloud = true, public zone equivalent
# For example use finnish public zone for finnish private zone
public_zone = "fi-hel2"
zone = "fi-hel1"
username = "ubuntu"
# Prefix to use for all resources to separate them from other resources

View File

@@ -11,10 +11,8 @@ provider "upcloud" {
module "kubernetes" {
source = "./modules/kubernetes-cluster"
prefix = var.prefix
zone = var.zone
private_cloud = var.private_cloud
public_zone = var.public_zone
prefix = var.prefix
zone = var.zone
template_name = var.template_name
username = var.username

View File

@@ -54,12 +54,11 @@ resource "upcloud_server" "master" {
if machine.node_type == "master"
}
hostname = "${local.resource-prefix}${each.key}"
plan = each.value.plan
cpu = each.value.plan == null ? null : each.value.cpu
mem = each.value.plan == null ? null : each.value.mem
zone = var.zone
server_group = each.value.server_group == null ? null : upcloud_server_group.server_groups[each.value.server_group].id
hostname = "${local.resource-prefix}${each.key}"
plan = each.value.plan
cpu = each.value.plan == null ? each.value.cpu : null
mem = each.value.plan == null ? each.value.mem : null
zone = var.zone
template {
storage = var.template_name
@@ -112,13 +111,11 @@ resource "upcloud_server" "worker" {
if machine.node_type == "worker"
}
hostname = "${local.resource-prefix}${each.key}"
plan = each.value.plan
cpu = each.value.plan == null ? null : each.value.cpu
mem = each.value.plan == null ? null : each.value.mem
zone = var.zone
server_group = each.value.server_group == null ? null : upcloud_server_group.server_groups[each.value.server_group].id
hostname = "${local.resource-prefix}${each.key}"
plan = each.value.plan
cpu = each.value.plan == null ? each.value.cpu : null
mem = each.value.plan == null ? each.value.mem : null
zone = var.zone
template {
storage = var.template_name
@@ -515,18 +512,8 @@ resource "upcloud_loadbalancer" "lb" {
configured_status = "started"
name = "${local.resource-prefix}lb"
plan = var.loadbalancer_plan
zone = var.private_cloud ? var.public_zone : var.zone
networks {
name = "Private-Net"
type = "private"
family = "IPv4"
network = upcloud_network.private.id
}
networks {
name = "Public-Net"
type = "public"
family = "IPv4"
}
zone = var.zone
network = upcloud_network.private.id
}
resource "upcloud_loadbalancer_backend" "lb_backend" {
@@ -547,9 +534,6 @@ resource "upcloud_loadbalancer_frontend" "lb_frontend" {
mode = "tcp"
port = each.value.port
default_backend_name = upcloud_loadbalancer_backend.lb_backend[each.key].name
networks {
name = "Public-Net"
}
}
resource "upcloud_loadbalancer_static_backend_member" "lb_backend_member" {
@@ -573,9 +557,5 @@ resource "upcloud_server_group" "server_groups" {
title = each.key
anti_affinity_policy = each.value.anti_affinity_policy
labels = {}
# Managed upstream via upcloud_server resource
members = []
lifecycle {
ignore_changes = [members]
}
members = [for server in each.value.servers : merge(upcloud_server.master, upcloud_server.worker)[server].id]
}

View File

@@ -6,14 +6,6 @@ variable "zone" {
type = string
}
variable "private_cloud" {
type = bool
}
variable "public_zone" {
type = string
}
variable "template_name" {}
variable "username" {}
@@ -28,7 +20,6 @@ variable "machines" {
cpu = string
mem = string
disk_size = number
server_group : string
additional_disks = map(object({
size = number
tier = string
@@ -113,5 +104,6 @@ variable "server_groups" {
type = map(object({
anti_affinity_policy = string
servers = list(string)
}))
}

View File

@@ -3,7 +3,7 @@ terraform {
required_providers {
upcloud = {
source = "UpCloudLtd/upcloud"
version = "~>5.6.0"
version = "~>2.12.0"
}
}
required_version = ">= 0.13"

View File

@@ -9,15 +9,6 @@ variable "zone" {
description = "The zone where to run the cluster"
}
variable "private_cloud" {
description = "Whether the environment is in the private cloud region"
default = false
}
variable "public_zone" {
description = "The public zone equivalent if the cluster is running in a private cloud zone"
}
variable "template_name" {
description = "Block describing the preconfigured operating system"
}
@@ -41,7 +32,6 @@ variable "machines" {
cpu = string
mem = string
disk_size = number
server_group : string
additional_disks = map(object({
size = number
tier = string
@@ -152,6 +142,7 @@ variable "server_groups" {
type = map(object({
anti_affinity_policy = string
servers = list(string)
}))
default = {}

View File

@@ -3,7 +3,7 @@ terraform {
required_providers {
upcloud = {
source = "UpCloudLtd/upcloud"
version = "~>5.6.0"
version = "~>2.12.0"
}
}
required_version = ">= 0.13"

View File

@@ -424,7 +424,7 @@ calico_wireguard_enabled: true
The following OSes will require enabling the EPEL repo in order to bring in wireguard tools:
* CentOS 8
* CentOS 7 & 8
* AlmaLinux 8
* Rocky Linux 8
* Amazon Linux 2

View File

@@ -132,7 +132,7 @@ Wireguard option is only available in Cilium 1.10.0 and newer.
### IPsec Encryption
For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/stable/security/network/encryption-ipsec/)
For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/stable/gettingstarted/encryption-ipsec/)
To enable IPsec encryption, you just need to set three variables.
@@ -157,7 +157,7 @@ echo "cilium_ipsec_key: "$(echo -n "3 rfc4106(gcm(aes)) $(echo $(dd if=/dev/uran
### Wireguard Encryption
For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/stable/security/network/encryption-wireguard/)
For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/stable/gettingstarted/encryption-wireguard/)
To enable Wireguard encryption, you just need to set two variables.

View File

@@ -16,6 +16,14 @@ Enabling the `overlay2` graph driver:
docker_storage_options: -s overlay2
```
Enabling `docker_container_storage_setup`, it will configure devicemapper driver on Centos7 or RedHat7.
Deployers must be define a disk path for `docker_container_storage_setup_devs`, otherwise docker-storage-setup will be executed incorrectly.
```yaml
docker_container_storage_setup: true
docker_container_storage_setup_devs: /dev/vdb
```
Changing the Docker cgroup driver (native.cgroupdriver); valid options are `systemd` or `cgroupfs`, default is `systemd`:
```yaml

View File

@@ -216,8 +216,6 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
The percent is calculated by dividing this field value by 100, so the field value must be between 0 and 100, inclusive.
When specified, the value must be less than imageGCHighThresholdPercent. Default: 80
* *kubelet_max_parallel_image_pulls* - Sets the maximum number of image pulls in parallel. The value is `1` by default which means the default is serial image pulling, set it to a integer great than `1` to enable image pulling in parallel.
* *kubelet_make_iptables_util_chains* - If `true`, causes the kubelet ensures a set of `iptables` rules are present on host.
* *kubelet_cpu_manager_policy* - If set to `static`, allows pods with certain resource characteristics to be granted increased CPU affinity and exclusivity on the node. And it should be set with `kube_reserved` or `system-reserved`, enable this with the following guide:[Control CPU Management Policies on the Node](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/)
@@ -245,10 +243,6 @@ kubelet_cpu_manager_policy_options:
By default the `kubelet_secure_addresses` is set with the `10.0.0.110` the ansible control host uses `eth0` to connect to the machine. In case you want to use `eth1` as the outgoing interface on which `kube-apiserver` connects to the `kubelet`s, you should override the variable in this way: `kubelet_secure_addresses: "192.168.1.110"`.
* *kubelet_systemd_wants_dependencies* - List of kubelet service dependencies, other than container runtime.
If you use nfs dynamically mounted volumes, sometimes rpc-statd does not start within the kubelet. You can fix it with this parameter : `kubelet_systemd_wants_dependencies: ["rpc-statd.service"]` This will add `Wants=rpc-statd.service` in `[Unit]` section of /etc/systemd/system/kubelet.service
* *node_labels* - Labels applied to nodes via `kubectl label node`.
For example, labels can be set in the inventory as variables or more widely in group_vars.
*node_labels* can only be defined as a dict:

View File

@@ -5,8 +5,8 @@
1. build: build a docker image to be used in the pipeline
2. unit-tests: fast jobs for fast feedback (linting, etc...)
3. deploy-part1: small number of jobs to test if the PR works with default settings
4. deploy-extended: slow jobs testing different platforms, OS, settings, CNI, etc...
5. deploy-extended: very slow jobs (upgrades, etc...)
4. deploy-part2: slow jobs testing different platforms, OS, settings, CNI, etc...
5. deploy-part3: very slow jobs (upgrades, etc...)
## Runners

View File

@@ -8,8 +8,9 @@ To generate this Matrix run `./tests/scripts/md-table/main.py`
|---| --- | --- | --- | --- | --- | --- | --- | --- |
almalinux8 | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos8 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: |
debian11 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: |
centos7 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: |
debian10 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
debian11 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
debian12 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
fedora37 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
fedora38 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
@@ -26,7 +27,8 @@ ubuntu24 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|---| --- | --- | --- | --- | --- | --- | --- | --- |
almalinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian11 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian12 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora37 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
@@ -44,7 +46,8 @@ ubuntu24 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|---| --- | --- | --- | --- | --- | --- | --- | --- |
almalinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian10 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian12 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora37 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |

View File

@@ -80,7 +80,7 @@ cat << EOF > vagrant/config.rb
\$instance_name_prefix = "kub"
\$vm_cpus = 1
\$num_instances = 3
\$os = "centos8-bento"
\$os = "centos-bento"
\$subnet = "10.0.20"
\$network_plugin = "flannel"
\$inventory = "$INV"

View File

@@ -35,7 +35,7 @@ kubectl create clusterrolebinding cluster-admin-binding \
The following **Mandatory Command** is required for all deployments except for AWS. See below for the AWS version.
```console
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.11.2/deploy/static/provider/cloud/deploy.yaml
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.10.1/deploy/static/provider/cloud/deploy.yaml
```
### Provider Specific Steps

View File

@@ -1,5 +1,10 @@
# CentOS and derivatives
## CentOS 7
The maximum python version officially supported in CentOS is 3.6. Ansible as of version 5 (ansible core 2.12.x) increased their python requirement to python 3.8 and above.
Kubespray supports multiple ansible versions but only the default (5.x) gets wide testing coverage. If your deployment host is CentOS 7 it is recommended to use one of the earlier versions still supported.
## CentOS 8
If you have containers that are using iptables in the host network namespace (`hostNetwork=true`),

View File

@@ -1,6 +1,6 @@
# cgroups
To avoid resource contention between containers and host daemons in Kubernetes, the kubelet components can use cgroups to limit resource usage.
To avoid the rivals for resources between containers or the impact on the host in Kubernetes, the kubelet components will rely on cgroups to limit the containers resources usage.
## Enforcing Node Allocatable
@@ -20,9 +20,8 @@ Here is an example:
```yaml
kubelet_enforce_node_allocatable: "pods,kube-reserved,system-reserved"
# Set kube_reserved to true to run kubelet and container-engine daemons in a dedicated cgroup.
# This is required if you want to enforce limits on the resource usage of these daemons.
# It is not required if you just want to make resource reservations (kube_memory_reserved, kube_cpu_reserved, etc.)
# Reserve this space for kube resources
# Set to true to reserve resources for kube daemons
kube_reserved: true
kube_reserved_cgroups_for_service_slice: kube.slice
kube_reserved_cgroups: "/{{ kube_reserved_cgroups_for_service_slice }}"

View File

@@ -30,12 +30,12 @@ loadbalancer. If you wish to control the name of the loadbalancer container,
you can set the variable `loadbalancer_apiserver_pod_name`.
If you choose to NOT use the local internal loadbalancer, you will need to
use the [kube-vip](/docs/ingress/kube-vip.md) ansible role or configure your own loadbalancer to achieve HA. By default, it only configures a non-HA endpoint, which points to the
use the [kube-vip](kube-vip.md) ansible role or configure your own loadbalancer to achieve HA. By default, it only configures a non-HA endpoint, which points to the
`access_ip` or IP address of the first server node in the `kube_control_plane` group.
It can also configure clients to use endpoints for a given loadbalancer type.
The following diagram shows how traffic to the apiserver is directed.
![Image](/docs/figures/loadbalancer_localhost.png?raw=true)
![Image](figures/loadbalancer_localhost.png?raw=true)
A user may opt to use an external loadbalancer (LB) instead. An external LB
provides access for external clients, while the internal LB accepts client

View File

@@ -12,7 +12,7 @@
- name: Setup ssh config to use the bastion
hosts: localhost
gather_facts: false
gather_facts: False
roles:
- { role: kubespray-defaults}
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}

View File

@@ -14,12 +14,7 @@ documentation: https://kubespray.io
license_file: LICENSE
dependencies:
ansible.utils: '>=2.5.0'
community.general: '>=7.0.0'
ansible.netcommon: '>=5.3.0'
ansible.posix: '>=1.5.4'
community.docker: '>=3.11.0'
kubernetes.core: '>=2.4.2'
community.general: '>=3.0.0'
manifest:
directives:
- recursive-exclude tests **
- recursive-include roles **/files/*

View File

@@ -24,21 +24,8 @@
# containerd_grpc_max_recv_message_size: 16777216
# containerd_grpc_max_send_message_size: 16777216
# Containerd debug socket location: unix or tcp format
# containerd_debug_address: ""
# Containerd log level
# containerd_debug_level: "info"
# Containerd logs format, supported values: text, json
# containerd_debug_format: ""
# Containerd debug socket UID
# containerd_debug_uid: 0
# Containerd debug socket GID
# containerd_debug_gid: 0
# containerd_metrics_address: ""
# containerd_metrics_grpc_histogram: false

View File

@@ -18,7 +18,7 @@
# quay_image_repo: "{{ registry_host }}"
## Kubernetes components
# kubeadm_download_url: "{{ files_repo }}/dl.k8s.io/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubeadm"
# kubeadm_download_url: "{{ files_repo }}/dl.k8s.io/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm"
# kubectl_download_url: "{{ files_repo }}/dl.k8s.io/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl"
# kubelet_download_url: "{{ files_repo }}/dl.k8s.io/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet"
@@ -82,7 +82,7 @@
# krew_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/krew/releases/download/{{ krew_version }}/krew-{{ host_os }}_{{ image_arch }}.tar.gz"
## CentOS/Redhat/AlmaLinux
### For EL8, baseos and appstream must be available,
### For EL7, base and extras repo must be available, for EL8, baseos and appstream
### By default we enable those repo automatically
# rhel_enable_repos: false
### Docker / Containerd

View File

@@ -33,6 +33,3 @@
# etcd_experimental_distributed_tracing_sample_rate: 100
# etcd_experimental_distributed_tracing_address: "localhost:4317"
# etcd_experimental_distributed_tracing_service_name: etcd
## The interval for etcd watch progress notify events
# etcd_experimental_watch_progress_notify_interval: 5s

View File

@@ -96,16 +96,10 @@ rbd_provisioner_enabled: false
# rbd_provisioner_storage_class: rbd
# rbd_provisioner_reclaim_policy: Delete
# Gateway API CRDs
gateway_api_enabled: false
# gateway_api_experimental_channel: false
# Nginx ingress controller deployment
ingress_nginx_enabled: false
# ingress_nginx_host_network: false
# ingress_nginx_service_type: LoadBalancer
# ingress_nginx_service_nodeport_http: 30080
# ingress_nginx_service_nodeport_https: 30081
ingress_publish_status_address: ""
# ingress_nginx_nodeselector:
# kubernetes.io/os: "linux"

View File

@@ -17,7 +17,7 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
kube_api_anonymous_auth: true
## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.30.4
kube_version: v1.29.5
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
@@ -262,7 +262,7 @@ default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
# kubelet_runtime_cgroups_cgroupfs: "/system.slice/{{ container_manager }}.service"
# kubelet_kubelet_cgroups_cgroupfs: "/system.slice/kubelet.service"
# Whether to run kubelet and container-engine daemons in a dedicated cgroup.
# Optionally reserve this space for kube daemons.
# kube_reserved: false
## Uncomment to override default values
## The following two items need to be set when kube_reserved is true

View File

@@ -163,13 +163,6 @@ cilium_l2announcements: false
### Enable auto generate certs if cilium_hubble_install: true
# cilium_hubble_tls_generate: false
### Tune cilium_hubble_event_buffer_capacity & cilium_hubble_event_queue_size values to avoid dropping events when hubble is under heavy load
### Capacity of Hubble events buffer. The provided value must be one less than an integer power of two and no larger than 65535
### (ie: 1, 3, ..., 2047, 4095, ..., 65535) (default 4095)
# cilium_hubble_event_buffer_capacity: 4095
### Buffer size of the channel to receive monitor events.
# cilium_hubble_event_queue_size: 50
# IP address management mode for v1.9+.
# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/
# cilium_ipam_mode: kubernetes

View File

@@ -4,7 +4,7 @@ FROM ubuntu:jammy-20230308
# Pip needs this as well at the moment to install ansible
# (and potentially other packages)
# See: https://github.com/pypa/pip/issues/10219
ENV VAGRANT_VERSION=2.4.1 \
ENV VAGRANT_VERSION=2.3.7 \
VAGRANT_DEFAULT_PROVIDER=libvirt \
VAGRANT_ANSIBLE_TAGS=facts \
LANG=C.UTF-8 \
@@ -30,9 +30,6 @@ RUN apt update -q \
software-properties-common \
unzip \
libvirt-clients \
qemu-utils \
qemu-kvm \
dnsmasq \
&& curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
&& add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" \
&& apt update -q \
@@ -40,15 +37,13 @@ RUN apt update -q \
&& apt autoremove -yqq --purge && apt clean && rm -rf /var/lib/apt/lists/* /var/log/*
WORKDIR /kubespray
ADD ./requirements.txt /kubespray/requirements.txt
ADD ./tests/requirements.txt /kubespray/tests/requirements.txt
ADD ./roles/kubespray-defaults/defaults/main/main.yml /kubespray/roles/kubespray-defaults/defaults/main/main.yml
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \
RUN --mount=type=bind,target=./requirements.txt,src=./requirements.txt \
--mount=type=bind,target=./tests/requirements.txt,src=./tests/requirements.txt \
--mount=type=bind,target=./roles/kubespray-defaults/defaults/main/main.yml,src=./roles/kubespray-defaults/defaults/main/main.yml \
update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \
&& pip install --no-compile --no-cache-dir pip -U \
&& pip install --no-compile --no-cache-dir -r tests/requirements.txt \
&& pip install --no-compile --no-cache-dir -r requirements.txt \
&& KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main/main.yml) \
&& curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
&& echo $(curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \

View File

@@ -2,7 +2,7 @@
- name: Check Ansible version
hosts: all
gather_facts: false
become: false
become: no
run_once: true
vars:
minimal_ansible_version: 2.16.4
@@ -25,6 +25,7 @@
tags:
- check
# CentOS 7 provides too old jinja version
- name: "Check that jinja is not too old (install via pip)"
assert:
msg: "Your Jinja version is too old, install via pip"

View File

@@ -51,7 +51,7 @@
- name: Install bastion ssh config
hosts: bastion[0]
gather_facts: false
gather_facts: False
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }

View File

@@ -7,7 +7,7 @@
- name: Prepare for etcd install
hosts: k8s_cluster:etcd
gather_facts: false
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@@ -21,7 +21,7 @@
- name: Install Kubernetes nodes
hosts: k8s_cluster
gather_facts: false
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@@ -30,7 +30,7 @@
- name: Install the control plane
hosts: kube_control_plane
gather_facts: false
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@@ -41,7 +41,7 @@
- name: Invoke kubeadm and install a CNI
hosts: k8s_cluster
gather_facts: false
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@@ -54,7 +54,7 @@
- name: Install Calico Route Reflector
hosts: calico_rr
gather_facts: false
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@@ -63,7 +63,7 @@
- name: Patch Kubernetes for Windows
hosts: kube_control_plane[0]
gather_facts: false
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@@ -72,7 +72,7 @@
- name: Install Kubernetes apps
hosts: kube_control_plane
gather_facts: false
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@@ -86,7 +86,7 @@
- name: Apply resolv.conf changes now that cluster DNS is up
hosts: k8s_cluster
gather_facts: false
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:

View File

@@ -15,7 +15,7 @@
- name: Gather facts
hosts: k8s_cluster:etcd:calico_rr
gather_facts: false
gather_facts: False
tags: always
tasks:
- name: Gather minimal facts

View File

@@ -16,7 +16,7 @@
- name: Install etcd
hosts: etcd:kube_control_plane:_kubespray_needs_etcd
gather_facts: false
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:

View File

@@ -4,13 +4,13 @@
- name: Confirm node removal
hosts: "{{ node | default('etcd:k8s_cluster:calico_rr') }}"
gather_facts: false
gather_facts: no
tasks:
- name: Confirm Execution
pause:
prompt: "Are you sure you want to delete nodes state? Type 'yes' to delete nodes."
register: pause_result
run_once: true
run_once: True
when:
- not (skip_confirmation | default(false) | bool)
@@ -25,12 +25,8 @@
- name: Reset node
hosts: "{{ node | default('kube_node') }}"
gather_facts: false
gather_facts: no
environment: "{{ proxy_disable_env }}"
pre_tasks:
- name: Gather information about installed services
service_facts:
when: reset_nodes | default(True) | bool
roles:
- { role: kubespray-defaults, when: reset_nodes | default(True) | bool }
- { role: remove-node/pre-remove, tags: pre-remove }
@@ -40,7 +36,7 @@
# Currently cannot remove first master or etcd
- name: Post node removal
hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}"
gather_facts: false
gather_facts: no
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults, when: reset_nodes | default(True) | bool }

View File

@@ -7,13 +7,13 @@
- name: Reset cluster
hosts: etcd:k8s_cluster:calico_rr
gather_facts: false
gather_facts: False
pre_tasks:
- name: Reset Confirmation
pause:
prompt: "Are you sure you want to reset cluster state? Type 'yes' to reset your cluster."
register: reset_confirmation_prompt
run_once: true
run_once: True
when:
- not (skip_confirmation | default(false) | bool)
- reset_confirmation is not defined

View File

@@ -7,7 +7,7 @@
- name: Generate the etcd certificates beforehand
hosts: etcd:kube_control_plane
gather_facts: false
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@@ -24,7 +24,7 @@
- name: Download images to ansible host cache via first kube_control_plane node
hosts: kube_control_plane[0]
gather_facts: false
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@@ -34,7 +34,7 @@
- name: Target only workers to get kubelet installed and checking in on any new nodes(engine)
hosts: kube_node
gather_facts: false
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@@ -53,7 +53,7 @@
- name: Target only workers to get kubelet installed and checking in on any new nodes(node)
hosts: kube_node
gather_facts: false
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@@ -63,7 +63,7 @@
- name: Upload control plane certs and retrieve encryption key
hosts: kube_control_plane | first
environment: "{{ proxy_disable_env }}"
gather_facts: false
gather_facts: False
tags: kubeadm
roles:
- { role: kubespray-defaults }
@@ -84,7 +84,7 @@
- name: Target only workers to get kubelet installed and checking in on any new nodes(network)
hosts: kube_node
gather_facts: false
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@@ -96,7 +96,7 @@
- name: Apply resolv.conf changes now that cluster DNS is up
hosts: k8s_cluster
gather_facts: false
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:

View File

@@ -7,7 +7,7 @@
- name: Download images to ansible host cache via first kube_control_plane node
hosts: kube_control_plane[0]
gather_facts: false
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@@ -17,7 +17,7 @@
- name: Prepare nodes for upgrade
hosts: k8s_cluster:etcd:calico_rr
gather_facts: false
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@@ -27,7 +27,7 @@
- name: Upgrade container engine on non-cluster nodes
hosts: etcd:calico_rr:!k8s_cluster
gather_facts: false
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
serial: "{{ serial | default('20%') }}"
@@ -39,7 +39,7 @@
import_playbook: install_etcd.yml
- name: Handle upgrades to master components first to maintain backwards compat.
gather_facts: false
gather_facts: False
hosts: kube_control_plane
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
@@ -62,7 +62,7 @@
- name: Upgrade calico and external cloud provider on all masters, calico-rrs, and nodes
hosts: kube_control_plane:calico_rr:kube_node
gather_facts: false
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: "{{ serial | default('20%') }}"
environment: "{{ proxy_disable_env }}"
@@ -75,7 +75,7 @@
- name: Finally handle worker upgrades, based on given batch size
hosts: kube_node:calico_rr:!kube_control_plane
gather_facts: false
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
serial: "{{ serial | default('20%') }}"
@@ -93,7 +93,7 @@
- name: Patch Kubernetes for Windows
hosts: kube_control_plane[0]
gather_facts: false
gather_facts: False
any_errors_fatal: true
environment: "{{ proxy_disable_env }}"
roles:
@@ -102,7 +102,7 @@
- name: Install Calico Route Reflector
hosts: calico_rr
gather_facts: false
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@@ -111,7 +111,7 @@
- name: Install Kubernetes apps
hosts: kube_control_plane
gather_facts: false
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
@@ -122,7 +122,7 @@
- name: Apply resolv.conf changes now that cluster DNS is up
hosts: k8s_cluster
gather_facts: false
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:

View File

@@ -1,7 +1,10 @@
ansible==9.8.0
# Needed for jinja2 json_query templating
ansible==9.6.0
cryptography==42.0.7
jinja2==3.1.4
jmespath==1.0.1
# Needed for ansible.utils.validate module
jsonschema==4.23.0
# Needed for ansible.utils.ipaddr
netaddr==1.3.0
jsonschema==4.22.0
MarkupSafe==2.1.5
netaddr==1.2.1
pbr==6.0.0
ruamel.yaml==0.18.6
ruamel.yaml.clib==0.2.8

View File

@@ -7,14 +7,14 @@ addusers:
etcd:
name: etcd
comment: "Etcd user"
create_home: false
system: true
create_home: no
system: yes
shell: /sbin/nologin
kube:
name: kube
comment: "Kubernetes user"
create_home: false
system: true
create_home: no
system: yes
shell: /sbin/nologin
group: "{{ kube_cert_group }}"

View File

@@ -3,6 +3,6 @@ addusers:
- name: kube
comment: "Kubernetes user"
shell: /sbin/nologin
system: true
system: yes
group: "{{ kube_cert_group }}"
create_home: false
create_home: no

View File

@@ -2,14 +2,14 @@
addusers:
- name: etcd
comment: "Etcd user"
create_home: true
create_home: yes
home: "{{ etcd_data_dir }}"
system: true
system: yes
shell: /sbin/nologin
- name: kube
comment: "Kubernetes user"
create_home: false
system: true
create_home: no
system: yes
shell: /sbin/nologin
group: "{{ kube_cert_group }}"

View File

@@ -2,14 +2,14 @@
addusers:
- name: etcd
comment: "Etcd user"
create_home: true
create_home: yes
home: "{{ etcd_data_dir }}"
system: true
system: yes
shell: /sbin/nologin
- name: kube
comment: "Kubernetes user"
create_home: false
system: true
create_home: no
system: yes
shell: /sbin/nologin
group: "{{ kube_cert_group }}"

View File

@@ -12,4 +12,4 @@
dest: "{{ ssh_bastion_confing__name }}"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: "0644"
mode: 0644

View File

@@ -19,4 +19,4 @@
template:
src: "{{ ssh_bastion_confing__name }}.j2"
dest: "{{ playbook_dir }}/{{ ssh_bastion_confing__name }}"
mode: "0640"
mode: 0640

View File

@@ -11,10 +11,6 @@ coreos_locksmithd_disable: false
# Install public repo on Oracle Linux
use_oracle_public_repo: true
## Ubuntu specific variables
# Disable unattended-upgrades for Linux kernel and all packages start with linux- on Ubuntu
ubuntu_kernel_unattended_upgrades_disabled: false
fedora_coreos_packages:
- python
- python3-libselinux

View File

@@ -1,6 +1,6 @@
---
- name: Converge
hosts: all
gather_facts: false
gather_facts: no
roles:
- role: bootstrap-os

Some files were not shown because too many files have changed in this diff Show More