mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 13:54:37 +03:00
Compare commits
1 Commits
ant31-patc
...
revert-118
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2720c8137a |
@@ -37,7 +37,5 @@ exclude_paths:
|
||||
- tests/files/custom_cni/cilium.yaml
|
||||
- venv
|
||||
- .github
|
||||
- .ansible
|
||||
- .cache
|
||||
mock_modules:
|
||||
- gluster.gluster.gluster_volume
|
||||
|
||||
28
.github/ISSUE_TEMPLATE/bug-report.yaml
vendored
28
.github/ISSUE_TEMPLATE/bug-report.yaml
vendored
@@ -36,35 +36,11 @@ body:
|
||||
attributes:
|
||||
value: '### Environment'
|
||||
|
||||
- type: dropdown
|
||||
- type: textarea
|
||||
id: os
|
||||
attributes:
|
||||
label: OS
|
||||
options:
|
||||
- 'RHEL 9'
|
||||
- 'RHEL 8'
|
||||
- 'Fedora 40'
|
||||
- 'Ubuntu 24'
|
||||
- 'Ubuntu 22'
|
||||
- 'Ubuntu 20'
|
||||
- 'Debian 12'
|
||||
- 'Debian 11'
|
||||
- 'Flatcar Container Linux'
|
||||
- 'openSUSE Leap'
|
||||
- 'openSUSE Tumbleweed'
|
||||
- 'Oracle Linux 9'
|
||||
- 'Oracle Linux 8'
|
||||
- 'AlmaLinux 9'
|
||||
- 'AlmaLinux 8'
|
||||
- 'Rocky Linux 9'
|
||||
- 'Rocky Linux 8'
|
||||
- 'Amazon Linux 2'
|
||||
- 'Kylin Linux Advanced Server V10'
|
||||
- 'UOS Linux 20'
|
||||
- 'openEuler 24'
|
||||
- 'openEuler 22'
|
||||
- 'openEuler 20'
|
||||
- 'Other|Unsupported'
|
||||
placeholder: 'printf "$(uname -srm)\n$(cat /etc/os-release)\n"'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
|
||||
32
.github/workflows/auto-label-os.yml
vendored
32
.github/workflows/auto-label-os.yml
vendored
@@ -1,32 +0,0 @@
|
||||
name: Issue labeler
|
||||
on:
|
||||
issues:
|
||||
types: [opened]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
label-component:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Parse issue form
|
||||
uses: stefanbuck/github-issue-parser@v3
|
||||
id: issue-parser
|
||||
with:
|
||||
template-path: .github/ISSUE_TEMPLATE/bug-report.yaml
|
||||
|
||||
- name: Set labels based on OS field
|
||||
uses: redhat-plumbers-in-action/advanced-issue-labeler@v2
|
||||
with:
|
||||
issue-form: ${{ steps.issue-parser.outputs.jsonString }}
|
||||
section: os
|
||||
block-list: |
|
||||
None
|
||||
Other
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -6,24 +6,18 @@ stages:
|
||||
- deploy-extended
|
||||
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.27.0
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
GIT_CONFIG_COUNT: 1
|
||||
GIT_CONFIG_KEY_0: user.key
|
||||
GIT_CONFIG_VALUE_0: "ci@kubespray.io"
|
||||
GIT_CONFIG_KEY_1: user.name
|
||||
GIT_CONFIG_VALUE_1: "CI"
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
MAGIC: "ci check this"
|
||||
GS_ACCESS_KEY_ID: $GS_KEY
|
||||
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
||||
CONTAINER_ENGINE: docker
|
||||
SSH_USER: root
|
||||
GCE_PREEMPTIBLE: "false"
|
||||
ANSIBLE_KEEP_REMOTE_FILES: "1"
|
||||
ANSIBLE_CONFIG: ./tests/ansible.cfg
|
||||
ANSIBLE_REMOTE_USER: kubespray
|
||||
ANSIBLE_PRIVATE_KEY_FILE: /tmp/id_rsa
|
||||
ANSIBLE_INVENTORY: /tmp/inventory
|
||||
RESET_CHECK: "false"
|
||||
REMOVE_NODE_CHECK: "false"
|
||||
UPGRADE_TEST: "false"
|
||||
@@ -56,6 +50,7 @@ before_script:
|
||||
needs:
|
||||
- pipeline-image
|
||||
- ci-not-authorized
|
||||
- check-galaxy-version # lint
|
||||
- pre-commit # lint
|
||||
- vagrant-validate # lint
|
||||
|
||||
|
||||
@@ -3,16 +3,15 @@ pre-commit:
|
||||
stage: test
|
||||
tags:
|
||||
- ffci
|
||||
image: 'ghcr.io/pre-commit-ci/runner-image@sha256:fe01a6ec51b298412990b88627c3973b1146c7304f930f469bafa29ba60bcde9'
|
||||
image: 'ghcr.io/pre-commit-ci/runner-image@sha256:aaf2c7b38b22286f2d381c11673bec571c28f61dd086d11b43a1c9444a813cef'
|
||||
variables:
|
||||
PRE_COMMIT_HOME: ${CI_PROJECT_DIR}/.cache/pre-commit
|
||||
PRE_COMMIT_HOME: /pre-commit-cache
|
||||
script:
|
||||
- pre-commit run --all-files --show-diff-on-failure
|
||||
cache:
|
||||
key: pre-commit-2
|
||||
key: pre-commit-all
|
||||
paths:
|
||||
- ${PRE_COMMIT_HOME}
|
||||
when: 'always'
|
||||
- /pre-commit-cache
|
||||
needs: []
|
||||
|
||||
vagrant-validate:
|
||||
@@ -24,3 +23,13 @@ vagrant-validate:
|
||||
script:
|
||||
- ./tests/scripts/vagrant-validate.sh
|
||||
except: ['triggers', 'master']
|
||||
|
||||
|
||||
# TODO: convert to pre-commit hook
|
||||
check-galaxy-version:
|
||||
needs: []
|
||||
stage: test
|
||||
tags: [ffci]
|
||||
image: python:3
|
||||
script:
|
||||
- tests/scripts/check_galaxy_version.sh
|
||||
|
||||
@@ -1,15 +1,29 @@
|
||||
---
|
||||
.molecule:
|
||||
tags: [ffci]
|
||||
tags: [ffci-vm-med]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: quay.io/kubespray/vm-kubespray-ci:v13
|
||||
services: []
|
||||
stage: deploy-part1
|
||||
image: $PIPELINE_IMAGE
|
||||
needs:
|
||||
- pipeline-image
|
||||
needs: []
|
||||
# - ci-not-authorized
|
||||
variables:
|
||||
VAGRANT_DEFAULT_PROVIDER: "libvirt"
|
||||
VAGRANT_HOME: "$CI_PROJECT_DIR/.vagrant.d"
|
||||
PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip"
|
||||
before_script:
|
||||
- mkdir -p $VAGRANT_HOME
|
||||
- groups
|
||||
- python3 -m venv citest
|
||||
- source citest/bin/activate
|
||||
- vagrant plugin expunge --reinstall --force --no-tty
|
||||
- vagrant plugin install vagrant-libvirt
|
||||
- pip install --no-compile --no-cache-dir pip -U
|
||||
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/requirements.txt
|
||||
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/tests/requirements.txt
|
||||
- ./tests/scripts/rebase.sh
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh
|
||||
after_script:
|
||||
@@ -18,39 +32,72 @@
|
||||
when: always
|
||||
paths:
|
||||
- molecule_logs/
|
||||
|
||||
molecule:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i $ROLE
|
||||
parallel:
|
||||
matrix:
|
||||
- ROLE:
|
||||
- container-engine/cri-dockerd
|
||||
- container-engine/containerd
|
||||
- container-engine/cri-o
|
||||
- adduser
|
||||
- bastion-ssh-config
|
||||
- bootstrap-os
|
||||
cache:
|
||||
key: $CI_JOB_NAME_SLUG
|
||||
paths:
|
||||
- .vagrant.d/boxes
|
||||
- .cache/pip
|
||||
policy: pull-push # TODO: change to "pull" when not on main
|
||||
|
||||
# CI template for periodic CI jobs
|
||||
# Enabled when PERIODIC_CI_ENABLED var is set
|
||||
molecule_full:
|
||||
.molecule_periodic:
|
||||
only:
|
||||
variables:
|
||||
- $PERIODIC_CI_ENABLED
|
||||
allow_failure: true
|
||||
extends: molecule
|
||||
parallel:
|
||||
matrix:
|
||||
- ROLE:
|
||||
- container-engine/cri-dockerd
|
||||
- container-engine/containerd
|
||||
- container-engine/cri-o
|
||||
- adduser
|
||||
- bastion-ssh-config
|
||||
- bootstrap-os
|
||||
# FIXME : tests below are perma-failing
|
||||
- container-engine/kata-containers
|
||||
- container-engine/gvisor
|
||||
- container-engine/youki
|
||||
extends: .molecule
|
||||
|
||||
molecule_full:
|
||||
extends: .molecule_periodic
|
||||
|
||||
molecule_no_container_engines:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -e container-engine
|
||||
when: on_success
|
||||
|
||||
molecule_docker:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
|
||||
when: on_success
|
||||
|
||||
molecule_containerd:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/containerd
|
||||
when: on_success
|
||||
|
||||
molecule_cri-o:
|
||||
extends: .molecule
|
||||
stage: deploy-part1
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
||||
allow_failure: true
|
||||
when: on_success
|
||||
|
||||
# # Stage 3 container engines don't get as much attention so allow them to fail
|
||||
# molecule_kata:
|
||||
# extends: .molecule
|
||||
# stage: deploy-extended
|
||||
# script:
|
||||
# - ./tests/scripts/molecule_run.sh -i container-engine/kata-containers
|
||||
# when: manual
|
||||
# # FIXME: this test is broken (perma-failing)
|
||||
|
||||
molecule_gvisor:
|
||||
extends: .molecule
|
||||
stage: deploy-extended
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/gvisor
|
||||
when: manual
|
||||
# FIXME: this test is broken (perma-failing)
|
||||
|
||||
molecule_youki:
|
||||
extends: .molecule
|
||||
stage: deploy-extended
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/youki
|
||||
when: manual
|
||||
# FIXME: this test is broken (perma-failing)
|
||||
|
||||
@@ -114,13 +114,8 @@ packet_rockylinux9-cilium:
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
# Need an update of the container image to use schema v2
|
||||
# update: quay.io/kubespray/vm-amazon-linux-2:latest
|
||||
packet_amazon-linux-2-all-in-one:
|
||||
extends: .packet_pr_manual
|
||||
rules:
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
extends: .packet_pr
|
||||
|
||||
packet_opensuse-docker-cilium:
|
||||
extends: .packet_pr
|
||||
|
||||
17
.gitlab-ci/pre-commit-dynamic-stub.yml
Normal file
17
.gitlab-ci/pre-commit-dynamic-stub.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
# stub pipeline for dynamic generation
|
||||
pre-commit:
|
||||
tags:
|
||||
- light
|
||||
image: 'ghcr.io/pre-commit-ci/runner-image@sha256:aaf2c7b38b22286f2d381c11673bec571c28f61dd086d11b43a1c9444a813cef'
|
||||
variables:
|
||||
PRE_COMMIT_HOME: /pre-commit-cache
|
||||
script:
|
||||
- pre-commit run --all-files
|
||||
cache:
|
||||
key: pre-commit-$HOOK_ID
|
||||
paths:
|
||||
- /pre-commit-cache
|
||||
parallel:
|
||||
matrix:
|
||||
- HOOK_ID:
|
||||
@@ -36,21 +36,11 @@
|
||||
- .cache/pip
|
||||
policy: pull-push # TODO: change to "pull" when not on main
|
||||
|
||||
vagrant_ubuntu24-calico-dual-stack:
|
||||
vagrant_ubuntu20-calico-dual-stack:
|
||||
stage: deploy-extended
|
||||
extends: .vagrant
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
|
||||
when: on_success
|
||||
allow_failure: false
|
||||
|
||||
vagrant_ubuntu24-calico-ipv6only-stack:
|
||||
stage: deploy-extended
|
||||
extends: .vagrant
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
|
||||
when: on_success
|
||||
allow_failure: false
|
||||
when: manual
|
||||
# FIXME: this test if broken (perma-failing)
|
||||
|
||||
vagrant_ubuntu20-flannel:
|
||||
stage: deploy-part1
|
||||
|
||||
@@ -29,7 +29,7 @@ repos:
|
||||
files: "\\.sh$"
|
||||
|
||||
- repo: https://github.com/ansible/ansible-lint
|
||||
rev: v25.1.1
|
||||
rev: v25.1.0
|
||||
hooks:
|
||||
- id: ansible-lint
|
||||
additional_dependencies:
|
||||
@@ -70,14 +70,6 @@ repos:
|
||||
- pathlib
|
||||
- pyaml
|
||||
|
||||
- id: check-galaxy-version
|
||||
name: Verify correct version for galaxy.yml
|
||||
entry: scripts/galaxy_version.py
|
||||
language: python
|
||||
pass_filenames: false
|
||||
additional_dependencies:
|
||||
- ruamel.yaml
|
||||
|
||||
- id: jinja-syntax-check
|
||||
name: jinja-syntax-check
|
||||
entry: tests/scripts/check-templates.py
|
||||
@@ -87,22 +79,14 @@ repos:
|
||||
additional_dependencies:
|
||||
- jinja2
|
||||
|
||||
- id: propagate-ansible-variables
|
||||
name: Update static files referencing default kubespray values
|
||||
- id: render-readme-versions
|
||||
name: Update versions in README.md to match their defaults values
|
||||
language: python
|
||||
additional_dependencies:
|
||||
- ansible-core>=2.16.4
|
||||
entry: scripts/propagate_ansible_variables.yml
|
||||
entry: scripts/render_readme_version.yml
|
||||
pass_filenames: false
|
||||
|
||||
- id: check-checksums-sorted
|
||||
name: Check that our checksums are correctly sorted by version
|
||||
entry: scripts/assert-sorted-checksums.yml
|
||||
language: python
|
||||
pass_filenames: false
|
||||
additional_dependencies:
|
||||
- ansible
|
||||
|
||||
- repo: https://github.com/markdownlint/markdownlint
|
||||
rev: v0.12.0
|
||||
hooks:
|
||||
|
||||
@@ -34,9 +34,11 @@ RUN --mount=type=bind,source=requirements.txt,target=requirements.txt \
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
RUN OS_ARCHITECTURE=$(dpkg --print-architecture) \
|
||||
&& curl -L "https://dl.k8s.io/release/v1.32.3/bin/linux/${OS_ARCHITECTURE}/kubectl" -o /usr/local/bin/kubectl \
|
||||
&& echo "$(curl -L "https://dl.k8s.io/release/v1.32.3/bin/linux/${OS_ARCHITECTURE}/kubectl.sha256")" /usr/local/bin/kubectl | sha256sum --check \
|
||||
RUN --mount=type=bind,source=roles/kubespray-defaults/defaults/main/main.yml,target=roles/kubespray-defaults/defaults/main/main.yml \
|
||||
KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main/main.yml) \
|
||||
OS_ARCHITECTURE=$(dpkg --print-architecture) \
|
||||
&& curl -L "https://dl.k8s.io/release/${KUBE_VERSION}/bin/linux/${OS_ARCHITECTURE}/kubectl" -o /usr/local/bin/kubectl \
|
||||
&& echo "$(curl -L "https://dl.k8s.io/release/${KUBE_VERSION}/bin/linux/${OS_ARCHITECTURE}/kubectl.sha256")" /usr/local/bin/kubectl | sha256sum --check \
|
||||
&& chmod a+x /usr/local/bin/kubectl
|
||||
|
||||
COPY *.yml ./
|
||||
|
||||
73
README.md
73
README.md
@@ -1,4 +1,3 @@
|
||||
test
|
||||
# Deploy a Production Ready Kubernetes Cluster
|
||||
|
||||

|
||||
@@ -16,18 +15,6 @@ You can get your invite [here](http://slack.k8s.io/)
|
||||
|
||||
Below are several ways to use Kubespray to deploy a Kubernetes cluster.
|
||||
|
||||
### Docker
|
||||
|
||||
Ensure you have installed Docker then
|
||||
|
||||
```ShellSession
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.27.0 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
|
||||
### Ansible
|
||||
|
||||
#### Usage
|
||||
@@ -112,39 +99,39 @@ Note:
|
||||
<!-- BEGIN ANSIBLE MANAGED BLOCK -->
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) 1.32.3
|
||||
- [etcd](https://github.com/etcd-io/etcd) 3.5.16
|
||||
- [docker](https://www.docker.com/) 26.1
|
||||
- [containerd](https://containerd.io/) 2.0.3
|
||||
- [cri-o](http://cri-o.io/) 1.32.0 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.32.0
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.16
|
||||
- [docker](https://www.docker.com/) v26.1
|
||||
- [containerd](https://containerd.io/) v1.7.24
|
||||
- [cri-o](http://cri-o.io/) v1.32.0 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) 1.4.1
|
||||
- [calico](https://github.com/projectcalico/calico) 3.29.2
|
||||
- [cilium](https://github.com/cilium/cilium) 1.15.9
|
||||
- [flannel](https://github.com/flannel-io/flannel) 0.22.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) 1.12.21
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) 2.0.0
|
||||
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) 4.1.0
|
||||
- [weave](https://github.com/rajch/weave) 2.8.7
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) 0.8.0
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.4.0
|
||||
- [calico](https://github.com/projectcalico/calico) v3.29.1
|
||||
- [cilium](https://github.com/cilium/cilium) v1.15.9
|
||||
- [flannel](https://github.com/flannel-io/flannel) v0.22.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.12.21
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v2.0.0
|
||||
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) v4.1.0
|
||||
- [weave](https://github.com/rajch/weave) v2.8.7
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.8.0
|
||||
- Application
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) 1.15.3
|
||||
- [coredns](https://github.com/coredns/coredns) 1.11.3
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) 1.12.0
|
||||
- [argocd](https://argoproj.github.io/) 2.14.5
|
||||
- [helm](https://helm.sh/) 3.16.4
|
||||
- [metallb](https://metallb.universe.tf/) 0.13.9
|
||||
- [registry](https://github.com/distribution/distribution) 2.8.1
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.15.3
|
||||
- [coredns](https://github.com/coredns/coredns) v1.11.3
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.12.0
|
||||
- [argocd](https://argoproj.github.io/) v2.11.0
|
||||
- [helm](https://helm.sh/) v3.16.4
|
||||
- [metallb](https://metallb.universe.tf/) v0.13.9
|
||||
- [registry](https://github.com/distribution/distribution) v2.8.1
|
||||
- Storage Plugin
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) 2.1.0-k8s1.11
|
||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) 2.1.1-k8s1.11
|
||||
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) 0.5.0
|
||||
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) 1.10.0
|
||||
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) 1.30.0
|
||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) 1.9.2
|
||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) 0.0.24
|
||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) 2.5.0
|
||||
- [node-feature-discovery](https://github.com/kubernetes-sigs/node-feature-discovery) 0.16.4
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
|
||||
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
|
||||
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.30.0
|
||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.9.2
|
||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.24
|
||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
|
||||
- [node-feature-discovery](https://github.com/kubernetes-sigs/node-feature-discovery) v0.16.4
|
||||
|
||||
<!-- END ANSIBLE MANAGED BLOCK -->
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
|
||||
1. (For major releases) On the `master` branch: bump the version in `galaxy.yml` to the next expected major release (X.y.0 with y = Y + 1), make a Pull Request.
|
||||
1. (For minor releases) On the `release-X.Y` branch: bump the version in `galaxy.yml` to the next expected minor release (X.Y.z with z = Z + 1), make a Pull Request.
|
||||
1. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) container images are built and tagged. See the following `Container image creation` section for the details.
|
||||
1. (Only for major releases) The `KUBESPRAY_VERSION` in `.gitlab-ci.yml` is upgraded to the version we just released # TODO clarify this, this variable is for testing upgrades.
|
||||
1. The release issue is closed
|
||||
1. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
1. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||
|
||||
28
Vagrantfile
vendored
28
Vagrantfile
vendored
@@ -63,22 +63,12 @@ $inventories ||= []
|
||||
$multi_networking ||= "False"
|
||||
$download_run_once ||= "True"
|
||||
$download_force_cache ||= "False"
|
||||
# Modify those to have separate groups (for instance, to test separate etcd:)
|
||||
# first_control_plane = 1
|
||||
# first_etcd = 4
|
||||
# control_plane_instances = 3
|
||||
# etcd_instances = 3
|
||||
$first_node ||= 1
|
||||
$first_control_plane ||= 1
|
||||
$first_etcd ||= 1
|
||||
|
||||
# The first three nodes are etcd servers
|
||||
$etcd_instances ||= [$num_instances, 3].min
|
||||
# The first two nodes are kube masters
|
||||
$control_plane_instances ||= [$num_instances, 2].min
|
||||
$kube_master_instances ||= [$num_instances, 2].min
|
||||
# All nodes are kube nodes
|
||||
$kube_node_instances ||= $num_instances - $first_node + 1
|
||||
|
||||
$kube_node_instances ||= $num_instances
|
||||
# The following only works when using the libvirt provider
|
||||
$kube_node_instances_with_disks ||= false
|
||||
$kube_node_instances_with_disks_size ||= "20G"
|
||||
@@ -220,20 +210,14 @@ Vagrant.configure("2") do |config|
|
||||
end
|
||||
|
||||
ip = "#{$subnet}.#{i+100}"
|
||||
ip6 = "#{$subnet_ipv6}::#{i+100}"
|
||||
node.vm.network :private_network,
|
||||
:ip => ip,
|
||||
:libvirt__guest_ipv6 => 'yes',
|
||||
:libvirt__ipv6_address => ip6,
|
||||
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
|
||||
:libvirt__ipv6_prefix => "64",
|
||||
:libvirt__forward_mode => "none",
|
||||
:libvirt__dhcp_enabled => false
|
||||
|
||||
# libvirt__ipv6_address does not work as intended, the address is obtained with the desired prefix, but auto-generated(like fd3c:b398:698:756:5054:ff:fe48:c61e/64)
|
||||
# add default route for detect ansible_default_ipv6
|
||||
# TODO: fix libvirt__ipv6 or use $subnet in shell
|
||||
config.vm.provision "shell", inline: "ip -6 r a fd3c:b398:698:756::/64 dev eth1;ip -6 r add default via fd3c:b398:0698:0756::1 dev eth1 || true"
|
||||
|
||||
# Disable swap for each vm
|
||||
node.vm.provision "shell", inline: "swapoff -a"
|
||||
|
||||
@@ -307,9 +291,9 @@ Vagrant.configure("2") do |config|
|
||||
ansible.tags = [$ansible_tags]
|
||||
end
|
||||
ansible.groups = {
|
||||
"etcd" => ["#{$instance_name_prefix}-[#{$first_etcd}:#{$etcd_instances + $first_etcd - 1}]"],
|
||||
"kube_control_plane" => ["#{$instance_name_prefix}-[#{$first_control_plane}:#{$control_plane_instances + $first_control_plane - 1}]"],
|
||||
"kube_node" => ["#{$instance_name_prefix}-[#{$first_node}:#{$kube_node_instances + $first_node - 1}]"],
|
||||
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
|
||||
"kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
||||
"kube_node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
|
||||
"k8s_cluster:children" => ["kube_control_plane", "kube_node"],
|
||||
}
|
||||
end
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
|
||||
OPTION=$1
|
||||
CURRENT_DIR=$(cd $(dirname $0); pwd)
|
||||
@@ -118,8 +118,6 @@ function register_container_images() {
|
||||
cp ${CURRENT_DIR}/registries.conf ${TEMP_DIR}/registries.conf
|
||||
sed -i s@"HOSTNAME"@"$(hostname)"@ ${TEMP_DIR}/registries.conf
|
||||
sudo cp ${TEMP_DIR}/registries.conf /etc/containers/registries.conf
|
||||
elif [ "$(uname)" == "Darwin" ]; then
|
||||
echo "This is a Mac, no configuration changes are required"
|
||||
else
|
||||
echo "runtime package(docker-ce, podman, nerctl, etc.) should be installed"
|
||||
exit 1
|
||||
|
||||
@@ -15,7 +15,7 @@ resource "openstack_networking_floatingip_v2" "k8s_master" {
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "k8s_masters" {
|
||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 ? { for key, value in var.k8s_masters : key => value if value.floating_ip && (lookup(value, "reserved_floating_ip", "") == "") } : tomap({})
|
||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 ? { for key, value in var.k8s_masters : key => value if value.floating_ip && (lookup(value, "reserved_floating_ip", "") == "") } : {}
|
||||
pool = var.floatingip_pool
|
||||
depends_on = [null_resource.dummy_dependency]
|
||||
}
|
||||
@@ -40,7 +40,7 @@ resource "openstack_networking_floatingip_v2" "bastion" {
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "k8s_nodes" {
|
||||
for_each = var.number_of_k8s_nodes == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip && (lookup(value, "reserved_floating_ip", "") == "") } : tomap({})
|
||||
for_each = var.number_of_k8s_nodes == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip && (lookup(value, "reserved_floating_ip", "") == "") } : {}
|
||||
pool = var.floatingip_pool
|
||||
depends_on = [null_resource.dummy_dependency]
|
||||
}
|
||||
|
||||
@@ -273,7 +273,6 @@ def openstack_host(resource, module_name):
|
||||
'access_ip_v4': raw_attrs['access_ip_v4'],
|
||||
'access_ip_v6': raw_attrs['access_ip_v6'],
|
||||
'access_ip': raw_attrs['access_ip_v4'],
|
||||
'access_ip6': raw_attrs['access_ip_v6'],
|
||||
'ip': raw_attrs['network.0.fixed_ip_v4'],
|
||||
'flavor': parse_dict(raw_attrs, 'flavor',
|
||||
sep='_'),
|
||||
|
||||
@@ -134,40 +134,10 @@ terraform destroy --var-file cluster-settings.tfvars \
|
||||
* `end_address`: End of address range to allow
|
||||
* `loadbalancer_enabled`: Enable managed load balancer
|
||||
* `loadbalancer_plan`: Plan to use for load balancer *(development|production-small)*
|
||||
* `loadbalancer_legacy_network`: If the loadbalancer should use the deprecated network field instead of networks blocks. You probably want to have this set to false (default value)
|
||||
* `loadbalancers`: Ports to load balance and which machines to forward to. Key of this object will be used as the name of the load balancer frontends/backends
|
||||
* `port`: Port to load balance.
|
||||
* `target_port`: Port to the backend servers.
|
||||
* `backend_servers`: List of servers that traffic to the port should be forwarded to.
|
||||
* `router_enable`: If a router should be connected to the private network or not
|
||||
* `gateways`: Gateways that should be connected to the router, requires router_enable is set to true
|
||||
* `features`: List of features for the gateway
|
||||
* `plan`: Plan to use for the gateway
|
||||
* `connections`: The connections and tunnel to create for the gateway
|
||||
* `type`: What type of connection
|
||||
* `local_routes`: Map of local routes for the connection
|
||||
* `type`: Type of route
|
||||
* `static_network`: Destination prefix of the route; needs to be a valid IPv4 prefix
|
||||
* `remote_routes`: Map of local routes for the connection
|
||||
* `type`: Type of route
|
||||
* `static_network`: Destination prefix of the route; needs to be a valid IPv4 prefix
|
||||
* `tunnels`: The tunnels to create for this connection
|
||||
* `remote_address`: The remote address for the tunnel
|
||||
* `ipsec_properties`: Set properties of IPSec, if not set, defaults will be used
|
||||
* `child_rekey_time`: IKE child SA rekey time in seconds
|
||||
* `dpd_delay`: Delay before sending Dead Peer Detection packets if no traffic is detected, in seconds
|
||||
* `dpd_timeout`: Timeout period for DPD reply before considering the peer to be dead, in seconds
|
||||
* `ike_lifetime`: Maximum IKE SA lifetime in seconds()
|
||||
* `rekey_time`: IKE SA rekey time in seconds
|
||||
* `phase1_algorithms`: List of Phase 1: Proposal algorithms
|
||||
* `phase1_dh_group_numbers`: List of Phase 1 Diffie-Hellman group numbers
|
||||
* `phase1_integrity_algorithms`: List of Phase 1 integrity algorithms
|
||||
* `phase2_algorithms`: List of Phase 2: Security Association algorithms
|
||||
* `phase2_dh_group_numbers`: List of Phase 2 Diffie-Hellman group numbers
|
||||
* `phase2_integrity_algorithms`: List of Phase 2 integrity algorithms
|
||||
* `gateway_vpn_psks`: Separate variable for providing psks for connection tunnels. Environment variable can be exported in the following format `export TF_VAR_gateway_vpn_psks='{"${gateway-name}-${connecton-name}-tunnel":{psk:"..."}}'`
|
||||
* `static_routes`: Static routes to apply to the router, requires `router_enable` is set to true
|
||||
* `network_peerings`: Other UpCloud private networks to peer with, requires `router_enable` is set to true
|
||||
* `server_groups`: Group servers together
|
||||
* `servers`: The servers that should be included in the group.
|
||||
* `anti_affinity_policy`: Defines if a server group is an anti-affinity group. Setting this to "strict" or yes" will result in all servers in the group being placed on separate compute hosts. The value can be "strict", "yes" or "no". "strict" refers to strict policy doesn't allow servers in the same server group to be on the same host. "yes" refers to best-effort policy and tries to put servers on different hosts, but this is not guaranteed.
|
||||
|
||||
@@ -153,46 +153,3 @@ server_groups = {
|
||||
# anti_affinity_policy = "yes"
|
||||
# }
|
||||
}
|
||||
|
||||
router_enable = false
|
||||
gateways = {
|
||||
# "gateway" : {
|
||||
# features: [ "vpn" ]
|
||||
# plan = "production"
|
||||
# connections = {
|
||||
# "connection" = {
|
||||
# name = "connection"
|
||||
# type = "ipsec"
|
||||
# remote_routes = {
|
||||
# "them" = {
|
||||
# type = "static"
|
||||
# static_network = "1.2.3.4/24"
|
||||
# }
|
||||
# }
|
||||
# local_routes = {
|
||||
# "me" = {
|
||||
# type = "static"
|
||||
# static_network = "4.3.2.1/24"
|
||||
# }
|
||||
# }
|
||||
# tunnels = {
|
||||
# "tunnel1" = {
|
||||
# remote_address = "1.2.3.4"
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
}
|
||||
# gateway_vpn_psks = {} # Should be loaded as an environment variable
|
||||
static_routes = {
|
||||
# "route": {
|
||||
# route: "1.2.3.4/24"
|
||||
# nexthop: "4.3.2.1"
|
||||
# }
|
||||
}
|
||||
network_peerings = {
|
||||
# "peering": {
|
||||
# remote_network: "uuid"
|
||||
# }
|
||||
}
|
||||
|
||||
@@ -36,15 +36,8 @@ module "kubernetes" {
|
||||
loadbalancer_enabled = var.loadbalancer_enabled
|
||||
loadbalancer_plan = var.loadbalancer_plan
|
||||
loadbalancer_outbound_proxy_protocol = var.loadbalancer_proxy_protocol ? "v2" : ""
|
||||
loadbalancer_legacy_network = var.loadbalancer_legacy_network
|
||||
loadbalancers = var.loadbalancers
|
||||
|
||||
router_enable = var.router_enable
|
||||
gateways = var.gateways
|
||||
gateway_vpn_psks = var.gateway_vpn_psks
|
||||
static_routes = var.static_routes
|
||||
network_peerings = var.network_peerings
|
||||
|
||||
server_groups = var.server_groups
|
||||
}
|
||||
|
||||
|
||||
@@ -20,36 +20,6 @@ locals {
|
||||
]
|
||||
])
|
||||
|
||||
gateway_connections = flatten([
|
||||
for gateway_name, gateway in var.gateways : [
|
||||
for connection_name, connection in gateway.connections : {
|
||||
"gateway_id" = upcloud_gateway.gateway[gateway_name].id
|
||||
"gateway_name" = gateway_name
|
||||
"connection_name" = connection_name
|
||||
"type" = connection.type
|
||||
"local_routes" = connection.local_routes
|
||||
"remote_routes" = connection.remote_routes
|
||||
}
|
||||
]
|
||||
])
|
||||
|
||||
gateway_connection_tunnels = flatten([
|
||||
for gateway_name, gateway in var.gateways : [
|
||||
for connection_name, connection in gateway.connections : [
|
||||
for tunnel_name, tunnel in connection.tunnels : {
|
||||
"gateway_id" = upcloud_gateway.gateway[gateway_name].id
|
||||
"gateway_name" = gateway_name
|
||||
"connection_id" = upcloud_gateway_connection.gateway_connection["${gateway_name}-${connection_name}"].id
|
||||
"connection_name" = connection_name
|
||||
"tunnel_name" = tunnel_name
|
||||
"local_address_name" = tolist(upcloud_gateway.gateway[gateway_name].address).0.name
|
||||
"remote_address" = tunnel.remote_address
|
||||
"ipsec_properties" = tunnel.ipsec_properties
|
||||
}
|
||||
]
|
||||
]
|
||||
])
|
||||
|
||||
# If prefix is set, all resources will be prefixed with "${var.prefix}-"
|
||||
# Else don't prefix with anything
|
||||
resource-prefix = "%{if var.prefix != ""}${var.prefix}-%{endif}"
|
||||
@@ -60,13 +30,10 @@ resource "upcloud_network" "private" {
|
||||
zone = var.zone
|
||||
|
||||
ip_network {
|
||||
address = var.private_network_cidr
|
||||
dhcp_default_route = var.router_enable
|
||||
dhcp = true
|
||||
family = "IPv4"
|
||||
address = var.private_network_cidr
|
||||
dhcp = true
|
||||
family = "IPv4"
|
||||
}
|
||||
|
||||
router = var.router_enable ? upcloud_router.router[0].id : null
|
||||
}
|
||||
|
||||
resource "upcloud_storage" "additional_disks" {
|
||||
@@ -549,31 +516,16 @@ resource "upcloud_loadbalancer" "lb" {
|
||||
name = "${local.resource-prefix}lb"
|
||||
plan = var.loadbalancer_plan
|
||||
zone = var.private_cloud ? var.public_zone : var.zone
|
||||
network = var.loadbalancer_legacy_network ? upcloud_network.private.id : null
|
||||
|
||||
dynamic "networks" {
|
||||
for_each = var.loadbalancer_legacy_network ? [] : [1]
|
||||
|
||||
content {
|
||||
name = "Private-Net"
|
||||
type = "private"
|
||||
family = "IPv4"
|
||||
network = upcloud_network.private.id
|
||||
}
|
||||
networks {
|
||||
name = "Private-Net"
|
||||
type = "private"
|
||||
family = "IPv4"
|
||||
network = upcloud_network.private.id
|
||||
}
|
||||
|
||||
dynamic "networks" {
|
||||
for_each = var.loadbalancer_legacy_network ? [] : [1]
|
||||
|
||||
content {
|
||||
name = "Public-Net"
|
||||
type = "public"
|
||||
family = "IPv4"
|
||||
}
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [ maintenance_dow, maintenance_time ]
|
||||
networks {
|
||||
name = "Public-Net"
|
||||
type = "public"
|
||||
family = "IPv4"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -595,21 +547,8 @@ resource "upcloud_loadbalancer_frontend" "lb_frontend" {
|
||||
mode = "tcp"
|
||||
port = each.value.port
|
||||
default_backend_name = upcloud_loadbalancer_backend.lb_backend[each.key].name
|
||||
|
||||
dynamic "networks" {
|
||||
for_each = var.loadbalancer_legacy_network ? [] : [1]
|
||||
|
||||
content {
|
||||
name = "Public-Net"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "networks" {
|
||||
for_each = each.value.allow_internal_frontend ? [1] : []
|
||||
|
||||
content{
|
||||
name = "Private-Net"
|
||||
}
|
||||
networks {
|
||||
name = "Public-Net"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -640,111 +579,3 @@ resource "upcloud_server_group" "server_groups" {
|
||||
ignore_changes = [members]
|
||||
}
|
||||
}
|
||||
|
||||
resource "upcloud_router" "router" {
|
||||
count = var.router_enable ? 1 : 0
|
||||
|
||||
name = "${local.resource-prefix}router"
|
||||
|
||||
dynamic "static_route" {
|
||||
for_each = var.static_routes
|
||||
|
||||
content {
|
||||
name = static_route.key
|
||||
|
||||
nexthop = static_route.value["nexthop"]
|
||||
route = static_route.value["route"]
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "upcloud_gateway" "gateway" {
|
||||
for_each = var.router_enable ? var.gateways : {}
|
||||
name = "${local.resource-prefix}${each.key}-gateway"
|
||||
zone = var.zone
|
||||
|
||||
features = each.value.features
|
||||
plan = each.value.plan
|
||||
|
||||
router {
|
||||
id = upcloud_router.router[0].id
|
||||
}
|
||||
}
|
||||
|
||||
resource "upcloud_gateway_connection" "gateway_connection" {
|
||||
for_each = {
|
||||
for gc in local.gateway_connections : "${gc.gateway_name}-${gc.connection_name}" => gc
|
||||
}
|
||||
|
||||
gateway = each.value.gateway_id
|
||||
name = "${local.resource-prefix}${each.key}-gateway-connection"
|
||||
type = each.value.type
|
||||
|
||||
dynamic "local_route" {
|
||||
for_each = each.value.local_routes
|
||||
|
||||
content {
|
||||
name = local_route.key
|
||||
type = local_route.value["type"]
|
||||
static_network = local_route.value["static_network"]
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "remote_route" {
|
||||
for_each = each.value.remote_routes
|
||||
|
||||
content {
|
||||
name = remote_route.key
|
||||
type = remote_route.value["type"]
|
||||
static_network = remote_route.value["static_network"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "upcloud_gateway_connection_tunnel" "gateway_connection_tunnel" {
|
||||
for_each = {
|
||||
for gct in local.gateway_connection_tunnels : "${gct.gateway_name}-${gct.connection_name}-${gct.tunnel_name}-tunnel" => gct
|
||||
}
|
||||
|
||||
connection_id = each.value.connection_id
|
||||
name = each.key
|
||||
local_address_name = each.value.local_address_name
|
||||
remote_address = each.value.remote_address
|
||||
|
||||
ipsec_auth_psk {
|
||||
psk = var.gateway_vpn_psks[each.key].psk
|
||||
}
|
||||
|
||||
dynamic "ipsec_properties" {
|
||||
for_each = each.value.ipsec_properties != null ? { "ip": each.value.ipsec_properties } : {}
|
||||
|
||||
content {
|
||||
child_rekey_time = ipsec_properties.value["child_rekey_time"]
|
||||
dpd_delay = ipsec_properties.value["dpd_delay"]
|
||||
dpd_timeout = ipsec_properties.value["dpd_timeout"]
|
||||
ike_lifetime = ipsec_properties.value["ike_lifetime"]
|
||||
rekey_time = ipsec_properties.value["rekey_time"]
|
||||
phase1_algorithms = ipsec_properties.value["phase1_algorithms"]
|
||||
phase1_dh_group_numbers = ipsec_properties.value["phase1_dh_group_numbers"]
|
||||
phase1_integrity_algorithms = ipsec_properties.value["phase1_integrity_algorithms"]
|
||||
phase2_algorithms = ipsec_properties.value["phase2_algorithms"]
|
||||
phase2_dh_group_numbers = ipsec_properties.value["phase2_dh_group_numbers"]
|
||||
phase2_integrity_algorithms = ipsec_properties.value["phase2_integrity_algorithms"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "upcloud_network_peering" "peering" {
|
||||
for_each = var.network_peerings
|
||||
|
||||
name = "${local.resource-prefix}${each.key}"
|
||||
|
||||
network {
|
||||
uuid = upcloud_network.private.id
|
||||
}
|
||||
|
||||
peer_network {
|
||||
uuid = each.value.remote_network
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,19 +98,13 @@ variable "loadbalancer_outbound_proxy_protocol" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "loadbalancer_legacy_network" {
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "loadbalancers" {
|
||||
description = "Load balancers"
|
||||
|
||||
type = map(object({
|
||||
port = number
|
||||
target_port = number
|
||||
allow_internal_frontend = optional(bool)
|
||||
backend_servers = list(string)
|
||||
port = number
|
||||
target_port = number
|
||||
backend_servers = list(string)
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -121,72 +115,3 @@ variable "server_groups" {
|
||||
anti_affinity_policy = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "router_enable" {
|
||||
description = "If a router should be enabled and connected to the private network or not"
|
||||
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "gateways" {
|
||||
description = "Gateways that should be connected to the router, requires router_enable is set to true"
|
||||
|
||||
type = map(object({
|
||||
features = list(string)
|
||||
plan = optional(string)
|
||||
connections = optional(map(object({
|
||||
type = string
|
||||
local_routes = optional(map(object({
|
||||
type = string
|
||||
static_network = string
|
||||
})))
|
||||
remote_routes = optional(map(object({
|
||||
type = string
|
||||
static_network = string
|
||||
})))
|
||||
tunnels = optional(map(object({
|
||||
remote_address = string
|
||||
ipsec_properties = optional(object({
|
||||
child_rekey_time = number
|
||||
dpd_delay = number
|
||||
dpd_timeout = number
|
||||
ike_lifetime = number
|
||||
rekey_time = number
|
||||
phase1_algorithms = set(string)
|
||||
phase1_dh_group_numbers = set(string)
|
||||
phase1_integrity_algorithms = set(string)
|
||||
phase2_algorithms = set(string)
|
||||
phase2_dh_group_numbers = set(string)
|
||||
phase2_integrity_algorithms = set(string)
|
||||
}))
|
||||
})))
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
variable "gateway_vpn_psks" {
|
||||
description = "Separate variable for providing psks for connection tunnels"
|
||||
|
||||
type = map(object({
|
||||
psk = string
|
||||
}))
|
||||
default = {}
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "static_routes" {
|
||||
description = "Static routes to apply to the router, requires router_enable is set to true"
|
||||
|
||||
type = map(object({
|
||||
nexthop = string
|
||||
route = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "network_peerings" {
|
||||
description = "Other UpCloud private networks to peer with, requires router_enable is set to true"
|
||||
|
||||
type = map(object({
|
||||
remote_network = string
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ terraform {
|
||||
required_providers {
|
||||
upcloud = {
|
||||
source = "UpCloudLtd/upcloud"
|
||||
version = "~>5.9.0"
|
||||
version = "~>5.6.0"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
|
||||
@@ -136,21 +136,13 @@ variable "loadbalancer_proxy_protocol" {
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "loadbalancer_legacy_network" {
|
||||
description = "If the loadbalancer should use the deprecated network field instead of networks blocks. You probably want to have this set to false"
|
||||
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "loadbalancers" {
|
||||
description = "Load balancers"
|
||||
|
||||
type = map(object({
|
||||
port = number
|
||||
target_port = number
|
||||
allow_internal_frontend = optional(bool, false)
|
||||
backend_servers = list(string)
|
||||
port = number
|
||||
target_port = number
|
||||
backend_servers = list(string)
|
||||
}))
|
||||
default = {}
|
||||
}
|
||||
@@ -164,76 +156,3 @@ variable "server_groups" {
|
||||
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "router_enable" {
|
||||
description = "If a router should be enabled and connected to the private network or not"
|
||||
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "gateways" {
|
||||
description = "Gateways that should be connected to the router, requires router_enable is set to true"
|
||||
|
||||
type = map(object({
|
||||
features = list(string)
|
||||
plan = optional(string)
|
||||
connections = optional(map(object({
|
||||
type = string
|
||||
local_routes = optional(map(object({
|
||||
type = string
|
||||
static_network = string
|
||||
})), {})
|
||||
remote_routes = optional(map(object({
|
||||
type = string
|
||||
static_network = string
|
||||
})), {})
|
||||
tunnels = optional(map(object({
|
||||
remote_address = string
|
||||
ipsec_properties = optional(object({
|
||||
child_rekey_time = number
|
||||
dpd_delay = number
|
||||
dpd_timeout = number
|
||||
ike_lifetime = number
|
||||
rekey_time = number
|
||||
phase1_algorithms = set(string)
|
||||
phase1_dh_group_numbers = set(string)
|
||||
phase1_integrity_algorithms = set(string)
|
||||
phase2_algorithms = set(string)
|
||||
phase2_dh_group_numbers = set(string)
|
||||
phase2_integrity_algorithms = set(string)
|
||||
}))
|
||||
})), {})
|
||||
})), {})
|
||||
}))
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "gateway_vpn_psks" {
|
||||
description = "Separate variable for providing psks for connection tunnels"
|
||||
|
||||
type = map(object({
|
||||
psk = string
|
||||
}))
|
||||
default = {}
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "static_routes" {
|
||||
description = "Static routes to apply to the router, requires router_enable is set to true"
|
||||
|
||||
type = map(object({
|
||||
nexthop = string
|
||||
route = string
|
||||
}))
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "network_peerings" {
|
||||
description = "Other UpCloud private networks to peer with, requires router_enable is set to true"
|
||||
|
||||
type = map(object({
|
||||
remote_network = string
|
||||
}))
|
||||
default = {}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ terraform {
|
||||
required_providers {
|
||||
upcloud = {
|
||||
source = "UpCloudLtd/upcloud"
|
||||
version = "~>5.9.0"
|
||||
version = "~>5.6.0"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
|
||||
@@ -96,7 +96,7 @@ You can tune many more [settings][runtime-spec] by supplying your own file name
|
||||
containerd_base_runtime_specs:
|
||||
cri-spec-custom.json: |
|
||||
{
|
||||
"ociVersion": "1.1.0",
|
||||
"ociVersion": "1.0.2-dev",
|
||||
"process": {
|
||||
"user": {
|
||||
"uid": 0,
|
||||
|
||||
@@ -79,24 +79,6 @@ The `allowed_annotations` configures `crio.conf` accordingly.
|
||||
The `crio_remap_enable` configures the `/etc/subuid` and `/etc/subgid` files to add an entry for the **containers** user.
|
||||
By default, 16M uids and gids are reserved for user namespaces (256 pods * 65536 uids/gids) at the end of the uid/gid space.
|
||||
|
||||
The `crio_default_capabilities` configure the default containers capabilities for the crio.
|
||||
Defaults capabilties are:
|
||||
|
||||
```yaml
|
||||
crio_default_capabilities:
|
||||
- CHOWN
|
||||
- DAC_OVERRIDE
|
||||
- FSETID
|
||||
- FOWNER
|
||||
- SETGID
|
||||
- SETUID
|
||||
- SETPCAP
|
||||
- NET_BIND_SERVICE
|
||||
- KILL
|
||||
```
|
||||
|
||||
You can add MKNOD to the list for a rancher deployment
|
||||
|
||||
## Optional : NRI
|
||||
|
||||
[Node Resource Interface](https://github.com/containerd/nri) (NRI) is disabled by default for the CRI-O. If you
|
||||
|
||||
@@ -41,12 +41,8 @@ Some variables of note include:
|
||||
* *ansible_default_ipv4.address* - Not Kubespray-specific, but it is used if ip
|
||||
and access_ip are undefined
|
||||
* *ip6* - IPv6 address to use for binding services. (host var)
|
||||
If *ipv6_stack*(*enable_dual_stack_networks* deprecated) is set to ``true`` and *ip6* is defined,
|
||||
If *enable_dual_stack_networks* is set to ``true`` and *ip6* is defined,
|
||||
kubelet's ``--node-ip`` and node's ``InternalIP`` will be the combination of *ip* and *ip6*.
|
||||
Similarly used for ipv6only scheme.
|
||||
* *access_ip6* - similarly ``access_ip`` but IPv6
|
||||
* *ansible_default_ipv6.address* - Not Kubespray-specific, but it is used if ip6
|
||||
and access_ip6 are undefined
|
||||
* *loadbalancer_apiserver* - If defined, all hosts will connect to this
|
||||
address instead of localhost for kube_control_planes and kube_control_plane[0] for
|
||||
kube_nodes. See more details in the
|
||||
@@ -56,20 +52,6 @@ Some variables of note include:
|
||||
`loadbalancer_apiserver`. See more details in the
|
||||
[HA guide](/docs/operations/ha-mode.md).
|
||||
|
||||
## Special network variables
|
||||
|
||||
These variables help avoid a large number of if/else constructs throughout the code associated with enabling different network stack.
|
||||
These variables are used in all templates.
|
||||
By default, only ipv4_stack is enabled, so it is given priority in dualstack mode.
|
||||
Don't change these variables if you don't understand what you're doing.
|
||||
|
||||
* *main_access_ip* - equal to ``access_ip`` when ipv4_stack is enabled(even in case of dualstack),
|
||||
and ``access_ip6`` for IPv6 only clusters
|
||||
* *main_ip* - equal to ``ip`` when ipv4_stack is enabled(even in case of dualstack),
|
||||
and ``ip6`` for IPv6 only clusters
|
||||
* *main_access_ips* - list of ``access_ip`` and ``access_ip6`` for dualstack and one corresponding variable for single
|
||||
* *main_ips* - list of ``ip`` and ``ip6`` for dualstack and one corresponding variable for single
|
||||
|
||||
## Cluster variables
|
||||
|
||||
Kubernetes needs some parameters in order to get deployed. These are the
|
||||
@@ -101,18 +83,12 @@ following default cluster parameters:
|
||||
(assertion not applicable to calico which doesn't use this as a hard limit, see
|
||||
[Calico IP block sizes](https://docs.projectcalico.org/reference/resources/ippool#block-sizes)).
|
||||
|
||||
* *enable_dual_stack_networks* - Setting this to true will provision both IPv4 and IPv6 networking for pods and services.
|
||||
|
||||
* *kube_service_addresses_ipv6* - Subnet for cluster IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1000/116``). Must not overlap with ``kube_pods_subnet_ipv6``.
|
||||
|
||||
* *kube_service_subnets* - All service subnets separated by commas (default is a mix of ``kube_service_addresses`` and ``kube_service_addresses_ipv6`` depending on ``ipv4_stack`` and ``ipv6_stacke`` options),
|
||||
for example ``10.233.0.0/18,fd85:ee78:d8a6:8607::1000/116`` for dual stack(ipv4_stack/ipv6_stack set to `true`).
|
||||
It is not recommended to change this variable directly.
|
||||
|
||||
* *kube_pods_subnet_ipv6* - Subnet for Pod IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1:0000/112``). Must not overlap with ``kube_service_addresses_ipv6``.
|
||||
|
||||
* *kube_pods_subnets* - All pods subnets separated by commas (default is a mix of ``kube_pods_subnet`` and ``kube_pod_subnet_ipv6`` depending on ``ipv4_stack`` and ``ipv6_stacke`` options),
|
||||
for example ``10.233.64.0/18,fd85:ee78:d8a6:8607::1:0000/112`` for dual stack(ipv4_stack/ipv6_stack set to `true`).
|
||||
It is not recommended to change this variable directly.
|
||||
|
||||
* *kube_network_node_prefix_ipv6* - Subnet allocated per-node for pod IPv6 IPs. Remaining bits in ``kube_pods_subnet_ipv6`` dictates how many kube_nodes can be in cluster.
|
||||
|
||||
* *skydns_server* - Cluster IP for DNS (default is 10.233.0.3)
|
||||
@@ -176,14 +152,9 @@ Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances'
|
||||
private addresses, make sure to pick another values for ``kube_service_addresses``
|
||||
and ``kube_pods_subnet``, for example from the ``172.18.0.0/16``.
|
||||
|
||||
## Enabling Dual Stack (IPV4 + IPV6) or IPV6 only networking
|
||||
## Enabling Dual Stack (IPV4 + IPV6) networking
|
||||
|
||||
IPv4 stack enable by *ipv4_stack* is set to ``true``, by default.
|
||||
IPv6 stack enable by *ipv6_stack* is set to ``false`` by default.
|
||||
This will use the default IPv4 and IPv6 subnets specified in the defaults file in the ``kubespray-defaults`` role, unless overridden of course. The default config will give you room for up to 256 nodes with 126 pods per node, and up to 4096 services.
|
||||
Set both variables to ``true`` for Dual Stack mode.
|
||||
IPv4 has higher priority in Dual Stack mode(e.g. in variables `main_ip`, `main_access_ip` and other).
|
||||
You can also make IPv6 only clusters with ``false`` in *ipv4_stack*.
|
||||
If *enable_dual_stack_networks* is set to ``true``, Dual Stack networking will be enabled in the cluster. This will use the default IPv4 and IPv6 subnets specified in the defaults file in the ``kubespray-defaults`` role, unless overridden of course. The default config will give you room for up to 256 nodes with 126 pods per node, and up to 4096 services.
|
||||
|
||||
## DNS variables
|
||||
|
||||
|
||||
@@ -88,7 +88,7 @@ $ pip install -r requirements.txt
|
||||
$ vagrant up
|
||||
|
||||
# Access the cluster
|
||||
$ export INV=.vagrant/provisioners/ansible/inventory
|
||||
$ export INV=.vagrant/provisionners/ansible/inventory
|
||||
$ export KUBECONFIG=${INV}/artifacts/admin.conf
|
||||
# make the kubectl binary available
|
||||
$ export PATH=$PATH:$PWD/$INV/artifacts
|
||||
|
||||
@@ -56,7 +56,7 @@
|
||||
# crun_download_url: "{{ files_repo }}/github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}"
|
||||
|
||||
# [Optional] kata: only if you set kata_containers_enabled: true
|
||||
# kata_containers_download_url: "{{ files_repo }}/github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ image_arch }}.tar.xz"
|
||||
# kata_containers_download_url: "{{ files_repo }}/github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz"
|
||||
|
||||
# [Optional] cri-dockerd: only if you set container_manager: docker
|
||||
# cri_dockerd_download_url: "{{ files_repo }}/github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz"
|
||||
|
||||
@@ -242,7 +242,7 @@ metallb_namespace: "metallb-system"
|
||||
# - pool2
|
||||
|
||||
argocd_enabled: false
|
||||
# argocd_version: v2.14.5
|
||||
# argocd_version: v2.11.0
|
||||
# argocd_namespace: argocd
|
||||
# Default password:
|
||||
# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli
|
||||
|
||||
@@ -17,7 +17,7 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
|
||||
kube_api_anonymous_auth: true
|
||||
|
||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||
kube_version: v1.32.2
|
||||
kube_version: v1.32.0
|
||||
|
||||
# Where the binaries will be downloaded.
|
||||
# Note: ensure that you've enough disk space (about 1G)
|
||||
@@ -97,24 +97,27 @@ kube_pods_subnet: 10.233.64.0/18
|
||||
# - kubelet_max_pods: 110
|
||||
kube_network_node_prefix: 24
|
||||
|
||||
# Configure Dual Stack networking (i.e. both IPv4 and IPv6)
|
||||
enable_dual_stack_networks: false
|
||||
|
||||
# Kubernetes internal network for IPv6 services, unused block of space.
|
||||
# This is only used if ipv6_stack is set to true
|
||||
# This is only used if enable_dual_stack_networks is set to true
|
||||
# This provides 4096 IPv6 IPs
|
||||
kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116
|
||||
|
||||
# Internal network. When used, it will assign IPv6 addresses from this range to individual pods.
|
||||
# This network must not already be in your network infrastructure!
|
||||
# This is only used if ipv6_stack is set to true.
|
||||
# This is only used if enable_dual_stack_networks is set to true.
|
||||
# This provides room for 256 nodes with 254 pods per node.
|
||||
kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112
|
||||
|
||||
# IPv6 subnet size allocated to each for pods.
|
||||
# This is only used if ipv6_stack is set to true
|
||||
# This is only used if enable_dual_stack_networks is set to true
|
||||
# This provides room for 254 pods per node.
|
||||
kube_network_node_prefix_ipv6: 120
|
||||
|
||||
# The port the API Server will be listening on.
|
||||
kube_apiserver_ip: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}"
|
||||
kube_apiserver_ip: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}"
|
||||
kube_apiserver_port: 6443 # (https)
|
||||
|
||||
# Kube-proxy proxyMode configuration.
|
||||
@@ -212,8 +215,8 @@ resolvconf_mode: host_resolvconf
|
||||
# Deploy netchecker app to verify DNS resolve as an HTTP service
|
||||
deploy_netchecker: false
|
||||
# Ip address of the kubernetes skydns service
|
||||
skydns_server: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(3) | ansible.utils.ipaddr('address') }}"
|
||||
skydns_server_secondary: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(4) | ansible.utils.ipaddr('address') }}"
|
||||
skydns_server: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(3) | ansible.utils.ipaddr('address') }}"
|
||||
skydns_server_secondary: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(4) | ansible.utils.ipaddr('address') }}"
|
||||
dns_domain: "{{ cluster_name }}"
|
||||
|
||||
## Container runtime
|
||||
|
||||
@@ -11,7 +11,7 @@ calico_cni_name: k8s-pod-network
|
||||
|
||||
# Enables Internet connectivity from containers
|
||||
# nat_outgoing: true
|
||||
# nat_outgoing_ipv6: true
|
||||
# nat_outgoing_ipv6: false
|
||||
|
||||
# Enables Calico CNI "host-local" IPAM plugin
|
||||
# calico_ipam_host_local: true
|
||||
|
||||
@@ -154,7 +154,7 @@ cilium_l2announcements: false
|
||||
# cilium_enable_hubble: false
|
||||
### Enable Hubble-ui
|
||||
### Installed by default when hubble is enabled. To disable set to false
|
||||
# cilium_enable_hubble_ui: "{{ cilium_enable_hubble }}"
|
||||
# cilium_enable_hubble_ui: "{{ cilium_enable_hubble }}
|
||||
### Enable Hubble Metrics
|
||||
# cilium_enable_hubble_metrics: false
|
||||
### if cilium_enable_hubble_metrics: true
|
||||
|
||||
@@ -42,13 +42,16 @@ RUN apt update -q \
|
||||
WORKDIR /kubespray
|
||||
ADD ./requirements.txt /kubespray/requirements.txt
|
||||
ADD ./tests/requirements.txt /kubespray/tests/requirements.txt
|
||||
ADD ./roles/kubespray-defaults/defaults/main/main.yml /kubespray/roles/kubespray-defaults/defaults/main/main.yml
|
||||
|
||||
|
||||
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \
|
||||
&& pip install --no-compile --no-cache-dir pip -U \
|
||||
&& pip install --no-compile --no-cache-dir -r tests/requirements.txt \
|
||||
&& pip install --no-compile --no-cache-dir -r requirements.txt \
|
||||
&& curl -L https://dl.k8s.io/release/v1.32.3/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
|
||||
&& echo $(curl -L https://dl.k8s.io/release/v1.32.3/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \
|
||||
&& KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main/main.yml) \
|
||||
&& curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
|
||||
&& echo $(curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \
|
||||
&& chmod a+x /usr/local/bin/kubectl \
|
||||
# Install Vagrant
|
||||
&& curl -LO https://releases.hashicorp.com/vagrant/${VAGRANT_VERSION}/vagrant_${VAGRANT_VERSION}-1_$(dpkg --print-architecture).deb \
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
ansible==9.13.0
|
||||
# Needed for community.crypto module
|
||||
cryptography==44.0.2
|
||||
cryptography==44.0.0
|
||||
# Needed for jinja2 json_query templating
|
||||
jmespath==1.0.1
|
||||
# Needed for ansible.utils.ipaddr
|
||||
|
||||
@@ -2,18 +2,22 @@
|
||||
role_name_check: 1
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: vagrant
|
||||
provider:
|
||||
name: libvirt
|
||||
platforms:
|
||||
- name: ubuntu20
|
||||
cloud_image: ubuntu-2004
|
||||
vm_cpu_cores: 1
|
||||
vm_memory: 512
|
||||
- name: adduser-01
|
||||
box: generic/ubuntu2004
|
||||
cpus: 1
|
||||
memory: 512
|
||||
provider_options:
|
||||
driver: kvm
|
||||
provisioner:
|
||||
name: ansible
|
||||
config_options:
|
||||
defaults:
|
||||
callbacks_enabled: profile_tasks
|
||||
timeout: 120
|
||||
playbooks:
|
||||
create: ../../../../tests/cloud_playbooks/create-packet.yml
|
||||
verifier:
|
||||
name: testinfra
|
||||
|
||||
@@ -2,11 +2,17 @@
|
||||
role_name_check: 1
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: vagrant
|
||||
provider:
|
||||
name: libvirt
|
||||
platforms:
|
||||
- name: bastion-01
|
||||
cloud_image: ubuntu-2004
|
||||
vm_cpu_cores: 1
|
||||
vm_memory: 512
|
||||
box: generic/ubuntu2004
|
||||
cpus: 1
|
||||
memory: 512
|
||||
provider_options:
|
||||
driver: kvm
|
||||
provisioner:
|
||||
name: ansible
|
||||
config_options:
|
||||
@@ -21,7 +27,5 @@ provisioner:
|
||||
bastion:
|
||||
hosts:
|
||||
bastion-01:
|
||||
playbooks:
|
||||
create: ../../../../tests/cloud_playbooks/create-packet.yml
|
||||
verifier:
|
||||
name: testinfra
|
||||
|
||||
@@ -2,6 +2,5 @@
|
||||
- name: Converge
|
||||
hosts: all
|
||||
gather_facts: false
|
||||
become: true
|
||||
roles:
|
||||
- role: bootstrap-os
|
||||
|
||||
@@ -2,23 +2,35 @@
|
||||
role_name_check: 1
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: vagrant
|
||||
provider:
|
||||
name: libvirt
|
||||
platforms:
|
||||
- name: ubuntu20
|
||||
cloud_image: ubuntu-2004
|
||||
vm_cpu_cores: 1
|
||||
vm_memory: 512
|
||||
box: generic/ubuntu2004
|
||||
cpus: 1
|
||||
memory: 512
|
||||
provider_options:
|
||||
driver: kvm
|
||||
- name: ubuntu22
|
||||
cloud_image: ubuntu-2204
|
||||
vm_cpu_cores: 1
|
||||
vm_memory: 512
|
||||
box: generic/ubuntu2204
|
||||
cpus: 1
|
||||
memory: 1024
|
||||
provider_options:
|
||||
driver: kvm
|
||||
- name: almalinux9
|
||||
cloud_image: almalinux-9
|
||||
vm_cpu_cores: 1
|
||||
vm_memory: 512
|
||||
- name: debian12
|
||||
cloud_image: debian-12
|
||||
vm_cpu_cores: 1
|
||||
vm_memory: 512
|
||||
box: almalinux/9
|
||||
cpus: 1
|
||||
memory: 512
|
||||
provider_options:
|
||||
driver: kvm
|
||||
- name: debian10
|
||||
box: generic/debian10
|
||||
cpus: 1
|
||||
memory: 512
|
||||
provider_options:
|
||||
driver: kvm
|
||||
provisioner:
|
||||
name: ansible
|
||||
config_options:
|
||||
@@ -31,7 +43,5 @@ provisioner:
|
||||
user:
|
||||
name: foo
|
||||
comment: My test comment
|
||||
playbooks:
|
||||
create: ../../../../tests/cloud_playbooks/create-packet.yml
|
||||
verifier:
|
||||
name: testinfra
|
||||
|
||||
@@ -92,7 +92,7 @@ containerd_registry_auth: []
|
||||
# Configure containerd service
|
||||
containerd_limit_proc_num: "infinity"
|
||||
containerd_limit_core: "infinity"
|
||||
containerd_limit_open_file_num: 1048576
|
||||
containerd_limit_open_file_num: "infinity"
|
||||
containerd_limit_mem_lock: "infinity"
|
||||
|
||||
# OS distributions that already support containerd
|
||||
@@ -122,7 +122,7 @@ enable_cdi: false
|
||||
# For containerd tracing configuration please check out the official documentation:
|
||||
# https://github.com/containerd/containerd/blob/main/docs/tracing.md
|
||||
containerd_tracing_enabled: false
|
||||
containerd_tracing_endpoint: "[::]:4317"
|
||||
containerd_tracing_endpoint: "0.0.0.0:4317"
|
||||
containerd_tracing_protocol: "grpc"
|
||||
containerd_tracing_sampling_ratio: 1.0
|
||||
containerd_tracing_service_name: "containerd"
|
||||
|
||||
@@ -1,30 +1,40 @@
|
||||
---
|
||||
role_name_check: 1
|
||||
driver:
|
||||
name: vagrant
|
||||
provider:
|
||||
name: libvirt
|
||||
platforms:
|
||||
- cloud_image: ubuntu-2004
|
||||
name: ubuntu20
|
||||
vm_cpu_cores: 1
|
||||
vm_memory: 1024
|
||||
node_groups:
|
||||
- name: ubuntu20
|
||||
box: generic/ubuntu2004
|
||||
cpus: 1
|
||||
memory: 1024
|
||||
groups:
|
||||
- kube_control_plane
|
||||
- kube_node
|
||||
- k8s_cluster
|
||||
- cloud_image: debian-11
|
||||
name: debian11
|
||||
vm_cpu_cores: 1
|
||||
vm_memory: 1024
|
||||
node_groups:
|
||||
provider_options:
|
||||
driver: kvm
|
||||
- name: debian11
|
||||
box: generic/debian11
|
||||
cpus: 1
|
||||
memory: 1024
|
||||
groups:
|
||||
- kube_control_plane
|
||||
- kube_node
|
||||
- k8s_cluster
|
||||
- cloud_image: almalinux-9
|
||||
name: almalinux9
|
||||
vm_cpu_cores: 1
|
||||
vm_memory: 1024
|
||||
node_groups:
|
||||
provider_options:
|
||||
driver: kvm
|
||||
- name: almalinux9
|
||||
box: almalinux/9
|
||||
cpus: 1
|
||||
memory: 1024
|
||||
groups:
|
||||
- kube_control_plane
|
||||
- kube_node
|
||||
- k8s_cluster
|
||||
provider_options:
|
||||
driver: kvm
|
||||
provisioner:
|
||||
name: ansible
|
||||
env:
|
||||
@@ -33,7 +43,5 @@ provisioner:
|
||||
defaults:
|
||||
callbacks_enabled: profile_tasks
|
||||
timeout: 120
|
||||
playbooks:
|
||||
create: ../../../../../tests/cloud_playbooks/create-packet.yml
|
||||
verifier:
|
||||
name: testinfra
|
||||
|
||||
@@ -108,7 +108,7 @@
|
||||
|
||||
- name: Containerd | Copy containerd config file
|
||||
template:
|
||||
src: "{{ 'config.toml.j2' if containerd_version is version('2.0.0', '>=') else 'config-v1.toml.j2' }}"
|
||||
src: config.toml.j2
|
||||
dest: "{{ containerd_cfg_dir }}/config.toml"
|
||||
owner: "root"
|
||||
mode: "0640"
|
||||
|
||||
@@ -1,102 +0,0 @@
|
||||
# This is for containerd v1 for compatibility
|
||||
version = 2
|
||||
|
||||
root = "{{ containerd_storage_dir }}"
|
||||
state = "{{ containerd_state_dir }}"
|
||||
oom_score = {{ containerd_oom_score }}
|
||||
|
||||
{% if containerd_extra_args is defined %}
|
||||
{{ containerd_extra_args }}
|
||||
{% endif %}
|
||||
|
||||
[grpc]
|
||||
max_recv_message_size = {{ containerd_grpc_max_recv_message_size }}
|
||||
max_send_message_size = {{ containerd_grpc_max_send_message_size }}
|
||||
|
||||
[debug]
|
||||
address = "{{ containerd_debug_address }}"
|
||||
level = "{{ containerd_debug_level }}"
|
||||
format = "{{ containerd_debug_format }}"
|
||||
uid = {{ containerd_debug_uid }}
|
||||
gid = {{ containerd_debug_gid }}
|
||||
|
||||
[metrics]
|
||||
address = "{{ containerd_metrics_address }}"
|
||||
grpc_histogram = {{ containerd_metrics_grpc_histogram | lower }}
|
||||
|
||||
[plugins]
|
||||
[plugins."io.containerd.grpc.v1.cri"]
|
||||
sandbox_image = "{{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}"
|
||||
max_container_log_line_size = {{ containerd_max_container_log_line_size }}
|
||||
enable_unprivileged_ports = {{ containerd_enable_unprivileged_ports | lower }}
|
||||
enable_unprivileged_icmp = {{ containerd_enable_unprivileged_icmp | lower }}
|
||||
enable_selinux = {{ containerd_enable_selinux | lower }}
|
||||
disable_apparmor = {{ containerd_disable_apparmor | lower }}
|
||||
tolerate_missing_hugetlb_controller = {{ containerd_tolerate_missing_hugetlb_controller | lower }}
|
||||
disable_hugetlb_controller = {{ containerd_disable_hugetlb_controller | lower }}
|
||||
image_pull_progress_timeout = "{{ containerd_image_pull_progress_timeout }}"
|
||||
{% if enable_cdi %}
|
||||
enable_cdi = true
|
||||
cdi_spec_dirs = ["/etc/cdi", "/var/run/cdi"]
|
||||
{% endif %}
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd]
|
||||
default_runtime_name = "{{ containerd_default_runtime }}"
|
||||
snapshotter = "{{ containerd_snapshotter }}"
|
||||
discard_unpacked_layers = {{ containerd_discard_unpacked_layers | lower }}
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
|
||||
{% for runtime in [containerd_runc_runtime] + containerd_additional_runtimes %}
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.{{ runtime.name }}]
|
||||
runtime_type = "{{ runtime.type }}"
|
||||
runtime_engine = "{{ runtime.engine }}"
|
||||
runtime_root = "{{ runtime.root }}"
|
||||
{% if runtime.base_runtime_spec is defined %}
|
||||
base_runtime_spec = "{{ containerd_cfg_dir }}/{{ runtime.base_runtime_spec }}"
|
||||
{% endif %}
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.{{ runtime.name }}.options]
|
||||
{% for key, value in runtime.options.items() %}
|
||||
{% if value | string != "true" and value | string != "false" %}
|
||||
{{ key }} = "{{ value }}"
|
||||
{% else %}
|
||||
{{ key }} = {{ value }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% if kata_containers_enabled %}
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata-qemu]
|
||||
runtime_type = "io.containerd.kata-qemu.v2"
|
||||
{% endif %}
|
||||
{% if gvisor_enabled %}
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runsc]
|
||||
runtime_type = "io.containerd.runsc.v1"
|
||||
{% endif %}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry]
|
||||
config_path = "{{ containerd_cfg_dir }}/certs.d"
|
||||
{% for registry in containerd_registry_auth if registry['registry'] is defined %}
|
||||
{% if (registry['username'] is defined and registry['password'] is defined) or registry['auth'] is defined %}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.configs."{{ registry['registry'] }}".auth]
|
||||
{% if registry['username'] is defined and registry['password'] is defined %}
|
||||
password = "{{ registry['password'] }}"
|
||||
username = "{{ registry['username'] }}"
|
||||
{% else %}
|
||||
auth = "{{ registry['auth'] }}"
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{% if nri_enabled and containerd_version is version('1.7.0', '>=') %}
|
||||
[plugins."io.containerd.nri.v1.nri"]
|
||||
disable = false
|
||||
{% endif %}
|
||||
|
||||
{% if containerd_tracing_enabled %}
|
||||
[plugins."io.containerd.tracing.processor.v1.otlp"]
|
||||
endpoint = "{{ containerd_tracing_endpoint }}"
|
||||
protocol = "{{ containerd_tracing_protocol }}"
|
||||
{% if containerd_tracing_protocol == "grpc" %}
|
||||
insecure = false
|
||||
{% endif %}
|
||||
[plugins."io.containerd.internal.v1.tracing"]
|
||||
sampling_ratio = {{ containerd_tracing_sampling_ratio }}
|
||||
service_name = "{{ containerd_tracing_service_name }}"
|
||||
{% endif %}
|
||||
@@ -1,5 +1,4 @@
|
||||
version = 3
|
||||
|
||||
version = 2
|
||||
root = "{{ containerd_storage_dir }}"
|
||||
state = "{{ containerd_state_dir }}"
|
||||
oom_score = {{ containerd_oom_score }}
|
||||
@@ -24,7 +23,8 @@ oom_score = {{ containerd_oom_score }}
|
||||
grpc_histogram = {{ containerd_metrics_grpc_histogram | lower }}
|
||||
|
||||
[plugins]
|
||||
[plugins."io.containerd.cri.v1.runtime"]
|
||||
[plugins."io.containerd.grpc.v1.cri"]
|
||||
sandbox_image = "{{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}"
|
||||
max_container_log_line_size = {{ containerd_max_container_log_line_size }}
|
||||
enable_unprivileged_ports = {{ containerd_enable_unprivileged_ports | lower }}
|
||||
enable_unprivileged_icmp = {{ containerd_enable_unprivileged_icmp | lower }}
|
||||
@@ -32,51 +32,57 @@ oom_score = {{ containerd_oom_score }}
|
||||
disable_apparmor = {{ containerd_disable_apparmor | lower }}
|
||||
tolerate_missing_hugetlb_controller = {{ containerd_tolerate_missing_hugetlb_controller | lower }}
|
||||
disable_hugetlb_controller = {{ containerd_disable_hugetlb_controller | lower }}
|
||||
image_pull_progress_timeout = "{{ containerd_image_pull_progress_timeout }}"
|
||||
{% if enable_cdi %}
|
||||
enable_cdi = true
|
||||
cdi_spec_dirs = ["/etc/cdi", "/var/run/cdi"]
|
||||
{% endif %}
|
||||
|
||||
[plugins."io.containerd.cri.v1.runtime".containerd]
|
||||
default_runtime_name = "{{ containerd_default_runtime }}"
|
||||
[plugins."io.containerd.cri.v1.runtime".containerd.runtimes]
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd]
|
||||
default_runtime_name = "{{ containerd_default_runtime }}"
|
||||
snapshotter = "{{ containerd_snapshotter }}"
|
||||
discard_unpacked_layers = {{ containerd_discard_unpacked_layers | lower }}
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
|
||||
{% for runtime in [containerd_runc_runtime] + containerd_additional_runtimes %}
|
||||
[plugins."io.containerd.cri.v1.runtime".containerd.runtimes.{{ runtime.name }}]
|
||||
runtime_type = "{{ runtime.type }}"
|
||||
runtime_engine = "{{ runtime.engine }}"
|
||||
runtime_root = "{{ runtime.root }}"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.{{ runtime.name }}]
|
||||
runtime_type = "{{ runtime.type }}"
|
||||
runtime_engine = "{{ runtime.engine }}"
|
||||
runtime_root = "{{ runtime.root }}"
|
||||
{% if runtime.base_runtime_spec is defined %}
|
||||
base_runtime_spec = "{{ containerd_cfg_dir }}/{{ runtime.base_runtime_spec }}"
|
||||
base_runtime_spec = "{{ containerd_cfg_dir }}/{{ runtime.base_runtime_spec }}"
|
||||
{% endif %}
|
||||
|
||||
[plugins."io.containerd.cri.v1.runtime".containerd.runtimes.{{ runtime.name }}.options]
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.{{ runtime.name }}.options]
|
||||
{% for key, value in runtime.options.items() %}
|
||||
{% if value | string != "true" and value | string != "false" %}
|
||||
{{ key }} = "{{ value }}"
|
||||
{{ key }} = "{{ value }}"
|
||||
{% else %}
|
||||
{{ key }} = {{ value }}
|
||||
{{ key }} = {{ value }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% if kata_containers_enabled %}
|
||||
[plugins."io.containerd.cri.v1.runtime".containerd.runtimes.kata-qemu]
|
||||
runtime_type = "io.containerd.kata-qemu.v2"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata-qemu]
|
||||
runtime_type = "io.containerd.kata-qemu.v2"
|
||||
{% endif %}
|
||||
{% if gvisor_enabled %}
|
||||
[plugins."io.containerd.cri.v1.runtime".containerd.runtimes.runsc]
|
||||
runtime_type = "io.containerd.runsc.v1"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runsc]
|
||||
runtime_type = "io.containerd.runsc.v1"
|
||||
{% endif %}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry]
|
||||
config_path = "{{ containerd_cfg_dir }}/certs.d"
|
||||
{% for registry in containerd_registry_auth if registry['registry'] is defined %}
|
||||
{% if (registry['username'] is defined and registry['password'] is defined) or registry['auth'] is defined %}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.configs."{{ registry['registry'] }}".auth]
|
||||
{% if registry['username'] is defined and registry['password'] is defined %}
|
||||
password = "{{ registry['password'] }}"
|
||||
username = "{{ registry['username'] }}"
|
||||
{% else %}
|
||||
auth = "{{ registry['auth'] }}"
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[plugins."io.containerd.cri.v1.images"]
|
||||
snapshotter = "{{ containerd_snapshotter }}"
|
||||
discard_unpacked_layers = {{ containerd_discard_unpacked_layers | lower }}
|
||||
image_pull_progress_timeout = "{{ containerd_image_pull_progress_timeout }}"
|
||||
[plugins."io.containerd.cri.v1.images".pinned_images]
|
||||
sandbox = "{{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}"
|
||||
[plugins."io.containerd.cri.v1.images".registry]
|
||||
config_path = "{{ containerd_cfg_dir }}/certs.d"
|
||||
|
||||
{% if nri_enabled %}
|
||||
{% if nri_enabled and containerd_version is version('1.7.0', '>=') %}
|
||||
[plugins."io.containerd.nri.v1.nri"]
|
||||
disable = false
|
||||
{% endif %}
|
||||
|
||||
@@ -1,18 +1,28 @@
|
||||
---
|
||||
role_name_check: 1
|
||||
driver:
|
||||
name: vagrant
|
||||
provider:
|
||||
name: libvirt
|
||||
platforms:
|
||||
- name: almalinux9
|
||||
cloud_image: almalinux-9
|
||||
vm_cpu_cores: 1
|
||||
vm_memory: 1024
|
||||
node_groups:
|
||||
box: almalinux/9
|
||||
cpus: 1
|
||||
memory: 1024
|
||||
nested: true
|
||||
groups:
|
||||
- kube_control_plane
|
||||
provider_options:
|
||||
driver: kvm
|
||||
- name: ubuntu20
|
||||
cloud_image: ubuntu-2004
|
||||
vm_cpu_cores: 1
|
||||
vm_memory: 1024
|
||||
node_groups:
|
||||
box: generic/ubuntu2004
|
||||
cpus: 1
|
||||
memory: 1024
|
||||
nested: true
|
||||
groups:
|
||||
- kube_control_plane
|
||||
provider_options:
|
||||
driver: kvm
|
||||
provisioner:
|
||||
name: ansible
|
||||
env:
|
||||
@@ -25,7 +35,5 @@ provisioner:
|
||||
group_vars:
|
||||
all:
|
||||
become: true
|
||||
playbooks:
|
||||
create: ../../../../../tests/cloud_playbooks/create-packet.yml
|
||||
verifier:
|
||||
name: testinfra
|
||||
|
||||
@@ -7,7 +7,7 @@ Requires=cri-dockerd.socket
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
ExecStart={{ bin_dir }}/cri-dockerd --container-runtime-endpoint {{ cri_socket }} --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin --network-plugin=cni --pod-cidr={{ kube_pods_subnets }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_version }} --log-level {{ cri_dockerd_log_level }} {% if ipv6_stack %}--ipv6-dual-stack=True{% endif %}
|
||||
ExecStart={{ bin_dir }}/cri-dockerd --container-runtime-endpoint {{ cri_socket }} --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin --network-plugin=cni --pod-cidr={{ kube_pods_subnet }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_version }} --log-level {{ cri_dockerd_log_level }} {% if enable_dual_stack_networks %}--ipv6-dual-stack=True{% endif %}
|
||||
|
||||
ExecReload=/bin/kill -s HUP $MAINPID
|
||||
TimeoutSec=0
|
||||
|
||||
@@ -37,7 +37,7 @@ crio_signature_policy: "{% if ansible_os_family == 'ClearLinux' %}/usr/share/def
|
||||
|
||||
crio_stream_port: "10010"
|
||||
|
||||
crio_required_version: "{{ kube_version | regex_replace('^(?P<major>\\d+).(?P<minor>\\d+).(?P<patch>\\d+)$', '\\g<major>.\\g<minor>') }}"
|
||||
crio_required_version: "{{ kube_version | regex_replace('^v(?P<major>\\d+).(?P<minor>\\d+).(?P<patch>\\d+)$', '\\g<major>.\\g<minor>') }}"
|
||||
|
||||
crio_root: "/var/lib/containers/storage"
|
||||
|
||||
@@ -99,15 +99,3 @@ crio_man_files:
|
||||
|
||||
# If set to true, it will enable the CRIU support in cri-o
|
||||
crio_criu_support_enabled: false
|
||||
|
||||
# Configure default_capabilities in crio.conf
|
||||
crio_default_capabilities:
|
||||
- CHOWN
|
||||
- DAC_OVERRIDE
|
||||
- FSETID
|
||||
- FOWNER
|
||||
- SETGID
|
||||
- SETUID
|
||||
- SETPCAP
|
||||
- NET_BIND_SERVICE
|
||||
- KILL
|
||||
|
||||
@@ -1,38 +1,50 @@
|
||||
---
|
||||
role_name_check: 1
|
||||
driver:
|
||||
name: vagrant
|
||||
provider:
|
||||
name: libvirt
|
||||
platforms:
|
||||
- name: ubuntu20
|
||||
cloud_image: ubuntu-2004
|
||||
vm_cpu_cores: 2
|
||||
vm_memory: 1024
|
||||
node_groups:
|
||||
box: generic/ubuntu2004
|
||||
cpus: 2
|
||||
memory: 1024
|
||||
groups:
|
||||
- kube_control_plane
|
||||
- kube_node
|
||||
- k8s_cluster
|
||||
provider_options:
|
||||
driver: kvm
|
||||
- name: almalinux9
|
||||
cloud_image: almalinux-9
|
||||
vm_cpu_cores: 2
|
||||
vm_memory: 1024
|
||||
node_groups:
|
||||
box: almalinux/9
|
||||
cpus: 2
|
||||
memory: 1024
|
||||
groups:
|
||||
- kube_control_plane
|
||||
- kube_node
|
||||
- k8s_cluster
|
||||
provider_options:
|
||||
driver: kvm
|
||||
- name: fedora
|
||||
cloud_image: fedora-39
|
||||
vm_cpu_cores: 2
|
||||
vm_memory: 1024
|
||||
node_groups:
|
||||
box: fedora/38-cloud-base
|
||||
cpus: 2
|
||||
memory: 2048
|
||||
groups:
|
||||
- kube_control_plane
|
||||
- kube_node
|
||||
- k8s_cluster
|
||||
- name: debian12
|
||||
cloud_image: debian-12
|
||||
vm_cpu_cores: 2
|
||||
vm_memory: 1024
|
||||
node_groups:
|
||||
provider_options:
|
||||
driver: kvm
|
||||
- name: debian10
|
||||
box: generic/debian10
|
||||
cpus: 2
|
||||
memory: 1024
|
||||
groups:
|
||||
- kube_control_plane
|
||||
- kube_node
|
||||
- k8s_cluster
|
||||
provider_options:
|
||||
driver: kvm
|
||||
provisioner:
|
||||
name: ansible
|
||||
env:
|
||||
@@ -41,7 +53,5 @@ provisioner:
|
||||
defaults:
|
||||
callbacks_enabled: profile_tasks
|
||||
timeout: 120
|
||||
playbooks:
|
||||
create: ../../../../../tests/cloud_playbooks/create-packet.yml
|
||||
verifier:
|
||||
name: testinfra
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
---
|
||||
- name: Cri-o | include vars/v1.29.yml
|
||||
include_vars: v1.29.yml
|
||||
when: crio_version is version("1.29.0", operator=">=")
|
||||
when: crio_version is version("v1.29.0", operator=">=")
|
||||
|
||||
- name: Cri-o | include vars/v1.31.yml
|
||||
include_vars: v1.31.yml
|
||||
when: crio_version is version("1.31.0", operator=">=")
|
||||
when: crio_version is version("v1.31.0", operator=">=")
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
|
||||
- name: CRI-O | Remove cri-o apt repo
|
||||
apt_repository:
|
||||
repo: "deb {{ crio_download_crio }}v{{ crio_version }}/{{ crio_kubic_debian_repo_name }}/ /"
|
||||
repo: "deb {{ crio_download_crio }}{{ crio_version }}/{{ crio_kubic_debian_repo_name }}/ /"
|
||||
state: absent
|
||||
filename: devel-kubic-libcontainers-stable-cri-o
|
||||
when: crio_kubic_debian_repo_name is defined
|
||||
@@ -36,7 +36,7 @@
|
||||
|
||||
- name: CRI-O | Remove CRI-O kubic yum repo
|
||||
yum_repository:
|
||||
name: "devel_kubic_libcontainers_stable_cri-o_v{{ crio_version }}"
|
||||
name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}"
|
||||
state: absent
|
||||
when:
|
||||
- ansible_os_family == "RedHat"
|
||||
|
||||
@@ -155,9 +155,17 @@ cgroup_manager = "{{ crio_cgroup_manager }}"
|
||||
# only the capabilities defined in the containers json file by the user/kube
|
||||
# will be added.
|
||||
default_capabilities = [
|
||||
{%- for item in crio_default_capabilities %}
|
||||
"{{ item }}",
|
||||
{%- endfor %}
|
||||
"CHOWN",
|
||||
"DAC_OVERRIDE",
|
||||
"FSETID",
|
||||
"FOWNER",
|
||||
"NET_RAW",
|
||||
"SETGID",
|
||||
"SETUID",
|
||||
"SETPCAP",
|
||||
"NET_BIND_SERVICE",
|
||||
"SYS_CHROOT",
|
||||
"KILL",
|
||||
]
|
||||
|
||||
# List of default sysctls. If it is empty or commented out, only the sysctls
|
||||
@@ -374,7 +382,7 @@ enable_metrics = {{ crio_enable_metrics | bool | lower }}
|
||||
# The port on which the metrics server will listen.
|
||||
metrics_port = {{ crio_metrics_port }}
|
||||
|
||||
{% if nri_enabled and crio_version is version('1.26.0', operator='>=') %}
|
||||
{% if nri_enabled and crio_version is version('v1.26.0', operator='>=') %}
|
||||
[crio.nri]
|
||||
|
||||
enable_nri=true
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
download_force_cache: "{{ true if download_run_once else download_force_cache }}"
|
||||
|
||||
- name: Download_file | Show url of file to download
|
||||
when: unsafe_show_logs | bool
|
||||
debug:
|
||||
msg: "{{ download.url }}"
|
||||
run_once: "{{ download_run_once }}"
|
||||
@@ -62,7 +61,7 @@
|
||||
dest: "{{ file_path_cached if download_force_cache else download.dest }}"
|
||||
owner: "{{ omit if download_localhost else (download.owner | default(omit)) }}"
|
||||
mode: "{{ omit if download_localhost else (download.mode | default(omit)) }}"
|
||||
checksum: "{{ download.checksum }}"
|
||||
checksum: "{{ 'sha256:' + download.sha256 if download.sha256 else omit }}"
|
||||
validate_certs: "{{ download_validate_certs }}"
|
||||
url_username: "{{ download.username | default(omit) }}"
|
||||
url_password: "{{ download.password | default(omit) }}"
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
src: "kubeadm-images.yaml.j2"
|
||||
dest: "{{ kube_config_dir }}/kubeadm-images.yaml"
|
||||
mode: "0644"
|
||||
validate: "{{ kubeadm_config_validate_enabled | ternary(bin_dir + '/kubeadm config validate --config %s', omit) }}"
|
||||
validate: "{{ bin_dir }}/kubeadm config validate --config %s"
|
||||
when:
|
||||
- not skip_kubeadm_images | default(false)
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ nodeRegistration:
|
||||
apiVersion: kubeadm.k8s.io/{{ kubeadm_config_api_version }}
|
||||
kind: ClusterConfiguration
|
||||
imageRepository: {{ kube_image_repo }}
|
||||
kubernetesVersion: v{{ kube_version }}
|
||||
kubernetesVersion: {{ kube_version }}
|
||||
etcd:
|
||||
{% if etcd_deployment_type == "kubeadm" %}
|
||||
local:
|
||||
|
||||
@@ -34,6 +34,8 @@ etcd_script_dir: "{{ bin_dir }}/etcd-scripts"
|
||||
etcd_heartbeat_interval: "250"
|
||||
etcd_election_timeout: "5000"
|
||||
|
||||
# etcd_snapshot_count: "10000"
|
||||
|
||||
etcd_metrics: "basic"
|
||||
|
||||
# Define in inventory to set a separate port for etcd to expose metrics on
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
|
||||
- name: Wait for etcd up
|
||||
uri:
|
||||
url: "https://{% if 'etcd' in group_names %}{{ etcd_address | ansible.utils.ipwrap }}{% else %}127.0.0.1{% endif %}:2379/health"
|
||||
url: "https://{% if 'etcd' in group_names %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health"
|
||||
validate_certs: false
|
||||
client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem"
|
||||
client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"
|
||||
@@ -39,7 +39,7 @@
|
||||
|
||||
- name: Wait for etcd-events up
|
||||
uri:
|
||||
url: "https://{% if 'etcd' in group_names %}{{ etcd_address | ansible.utils.ipwrap }}{% else %}127.0.0.1{% endif %}:2383/health"
|
||||
url: "https://{% if 'etcd' in group_names %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2383/health"
|
||||
validate_certs: false
|
||||
client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem"
|
||||
client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"
|
||||
|
||||
@@ -145,7 +145,7 @@
|
||||
ETCDCTL_ENDPOINTS: "{{ etcd_events_access_addresses }}"
|
||||
|
||||
- name: Configure | Check if member is in etcd cluster
|
||||
shell: "{{ bin_dir }}/etcdctl member list | grep -w -q {{ etcd_access_address | replace('[', '') | replace(']', '') }}"
|
||||
shell: "{{ bin_dir }}/etcdctl member list | grep -w -q {{ etcd_access_address }}"
|
||||
register: etcd_member_in_cluster
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
changed_when: false
|
||||
@@ -163,7 +163,7 @@
|
||||
ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
|
||||
|
||||
- name: Configure | Check if member is in etcd-events cluster
|
||||
shell: "{{ bin_dir }}/etcdctl member list | grep -w -q {{ etcd_access_address | replace('[', '') | replace(']', '') }}"
|
||||
shell: "{{ bin_dir }}/etcdctl member list | grep -w -q {{ etcd_access_address }}"
|
||||
register: etcd_events_member_in_cluster
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
changed_when: false
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
command: "{{ bin_dir }}/etcd --version"
|
||||
register: etcd_current_host_version
|
||||
# There's a chance this play could run before etcd is installed at all
|
||||
# TODO: figure out whether this happens. "A chance" is not enough information
|
||||
ignore_errors: true
|
||||
when: etcd_cluster_setup
|
||||
|
||||
@@ -12,18 +11,18 @@
|
||||
notify: Restart etcd
|
||||
when:
|
||||
- etcd_cluster_setup
|
||||
- etcd_version not in etcd_current_host_version.stdout | default('')
|
||||
- etcd_version.lstrip('v') not in etcd_current_host_version.stdout | default('')
|
||||
|
||||
- name: Restart etcd-events if necessary
|
||||
command: /bin/true
|
||||
notify: Restart etcd-events
|
||||
when:
|
||||
- etcd_events_cluster_setup
|
||||
- etcd_version not in etcd_current_host_version.stdout | default('')
|
||||
- etcd_version.lstrip('v') not in etcd_current_host_version.stdout | default('')
|
||||
|
||||
- name: Install | Copy etcd binary from download dir
|
||||
copy:
|
||||
src: "{{ local_release_dir }}/etcd-v{{ etcd_version }}-linux-{{ host_architecture }}/{{ item }}"
|
||||
src: "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-{{ host_architecture }}/{{ item }}"
|
||||
dest: "{{ bin_dir }}/{{ item }}"
|
||||
mode: "0755"
|
||||
remote_src: true
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
etcd_events_peer_addresses: >-
|
||||
{% for host in groups['etcd'] -%}
|
||||
{%- if hostvars[host]['etcd_events_member_in_cluster'].rc == 0 -%}
|
||||
{{ "etcd" + loop.index | string }}="https://{{ hostvars[host].etcd_events_access_address | default(hostvars[host]['main_ip']) | ansible.utils.ipwrap }}:2382",
|
||||
{{ "etcd" + loop.index | string }}=https://{{ hostvars[host].etcd_events_access_address | default(hostvars[host].ip | default(hostvars[host]['fallback_ip'])) }}:2382,
|
||||
{%- endif -%}
|
||||
{%- if loop.last -%}
|
||||
{{ etcd_member_name }}={{ etcd_events_peer_url }}
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
etcd_peer_addresses: >-
|
||||
{% for host in groups['etcd'] -%}
|
||||
{%- if hostvars[host]['etcd_member_in_cluster'].rc == 0 -%}
|
||||
{{ "etcd" + loop.index | string }}="https://{{ hostvars[host].etcd_access_address | default(hostvars[host]['main_ip']) | ansible.utils.ipwrap }}:2380",
|
||||
{{ "etcd" + loop.index | string }}=https://{{ hostvars[host].etcd_access_address | default(hostvars[host].ip | default(hostvars[host]['fallback_ip'])) }}:2380,
|
||||
{%- endif -%}
|
||||
{%- if loop.last -%}
|
||||
{{ etcd_member_name }}={{ etcd_peer_url }}
|
||||
|
||||
@@ -4,11 +4,11 @@ ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_events_peer_url }}
|
||||
ETCD_INITIAL_CLUSTER_STATE={% if etcd_events_cluster_is_healthy.rc == 0 | bool %}existing{% else %}new{% endif %}
|
||||
|
||||
ETCD_METRICS={{ etcd_metrics }}
|
||||
ETCD_LISTEN_CLIENT_URLS=https://{{ etcd_address | ansible.utils.ipwrap }}:2383,https://127.0.0.1:2383
|
||||
ETCD_LISTEN_CLIENT_URLS=https://{{ etcd_address }}:2383,https://127.0.0.1:2383
|
||||
ETCD_ELECTION_TIMEOUT={{ etcd_election_timeout }}
|
||||
ETCD_HEARTBEAT_INTERVAL={{ etcd_heartbeat_interval }}
|
||||
ETCD_INITIAL_CLUSTER_TOKEN=k8s_events_etcd
|
||||
ETCD_LISTEN_PEER_URLS=https://{{ etcd_address | ansible.utils.ipwrap }}:2382
|
||||
ETCD_LISTEN_PEER_URLS=https://{{ etcd_address }}:2382
|
||||
ETCD_NAME={{ etcd_member_name }}-events
|
||||
ETCD_PROXY=off
|
||||
ETCD_INITIAL_CLUSTER={{ etcd_events_peer_addresses }}
|
||||
|
||||
@@ -8,13 +8,13 @@ ETCD_METRICS={{ etcd_metrics }}
|
||||
{% if etcd_listen_metrics_urls is defined %}
|
||||
ETCD_LISTEN_METRICS_URLS={{ etcd_listen_metrics_urls }}
|
||||
{% elif etcd_metrics_port is defined %}
|
||||
ETCD_LISTEN_METRICS_URLS=http://{{ etcd_address | ansible.utils.ipwrap }}:{{ etcd_metrics_port }},http://127.0.0.1:{{ etcd_metrics_port }}
|
||||
ETCD_LISTEN_METRICS_URLS=http://{{ etcd_address }}:{{ etcd_metrics_port }},http://127.0.0.1:{{ etcd_metrics_port }}
|
||||
{% endif %}
|
||||
ETCD_LISTEN_CLIENT_URLS=https://{{ etcd_address | ansible.utils.ipwrap }}:2379,https://127.0.0.1:2379
|
||||
ETCD_LISTEN_CLIENT_URLS=https://{{ etcd_address }}:2379,https://127.0.0.1:2379
|
||||
ETCD_ELECTION_TIMEOUT={{ etcd_election_timeout }}
|
||||
ETCD_HEARTBEAT_INTERVAL={{ etcd_heartbeat_interval }}
|
||||
ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd
|
||||
ETCD_LISTEN_PEER_URLS=https://{{ etcd_address | ansible.utils.ipwrap }}:2380
|
||||
ETCD_LISTEN_PEER_URLS=https://{{ etcd_address }}:2380
|
||||
ETCD_NAME={{ etcd_member_name }}
|
||||
ETCD_PROXY=off
|
||||
ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }}
|
||||
|
||||
@@ -42,16 +42,9 @@ DNS.{{ counter["dns"] }} = {{ etcd_alt_name }}{{ increment(counter, 'dns') }}
|
||||
{% if hostvars[host]['access_ip'] is defined %}
|
||||
IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip'] }}{{ increment(counter, 'ip') }}
|
||||
{% endif %}
|
||||
{% if hostvars[host]['access_ip6'] is defined %}
|
||||
IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip6'] }}{{ increment(counter, 'ip') }}
|
||||
{% endif %}
|
||||
{% if ipv6_stack %}
|
||||
IP.{{ counter["ip"] }} = {{ hostvars[host]['ip6'] | default(hostvars[host]['fallback_ip6']) }}{{ increment(counter, 'ip') }}
|
||||
{% endif %}
|
||||
IP.{{ counter["ip"] }} = {{ hostvars[host]['main_ip'] }}{{ increment(counter, 'ip') }}
|
||||
IP.{{ counter["ip"] }} = {{ hostvars[host]['ip'] | default(hostvars[host]['fallback_ip']) }}{{ increment(counter, 'ip') }}
|
||||
{% endfor %}
|
||||
{% for cert_alt_ip in etcd_cert_alt_ips %}
|
||||
IP.{{ counter["ip"] }} = {{ cert_alt_ip }}{{ increment(counter, 'ip') }}
|
||||
{% endfor %}
|
||||
IP.{{ counter["ip"] }} = 127.0.0.1{{ increment(counter, 'ip') }}
|
||||
IP.{{ counter["ip"] }} = ::1
|
||||
IP.{{ counter["ip"] }} = 127.0.0.1
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
|
||||
- name: Copy etcdctl and etcdutl binary from download dir
|
||||
copy:
|
||||
src: "{{ local_release_dir }}/etcd-v{{ etcd_version }}-linux-{{ host_architecture }}/{{ item }}"
|
||||
src: "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-{{ host_architecture }}/{{ item }}"
|
||||
dest: "{{ bin_dir }}/{{ item }}"
|
||||
mode: "0755"
|
||||
remote_src: true
|
||||
|
||||
@@ -13,10 +13,10 @@ coredns_manifests:
|
||||
- coredns-sa.yml.j2
|
||||
- coredns-svc.yml.j2
|
||||
- "{{ dns_autoscaler_manifests if enable_dns_autoscaler else [] }}"
|
||||
- "{{ 'coredns-poddisruptionbudget.yml.j2' if coredns_pod_disruption_budget else [] }}"
|
||||
- "{{ coredns-poddisruptionbudget.yml.j2 if coredns_pod_disruption_budget else [] }}"
|
||||
|
||||
nodelocaldns_manifests:
|
||||
- nodelocaldns-config.yml.j2
|
||||
- nodelocaldns-daemonset.yml.j2
|
||||
- nodelocaldns-sa.yml.j2
|
||||
- "{{ 'nodelocaldns-second-daemonset.yml.j2' if enable_nodelocaldns_secondary else [] }}"
|
||||
- "{{ nodelocaldns-second-daemonset.yml.j2 if enable_nodelocaldns_secondary else [] }}"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
argocd_enabled: false
|
||||
argocd_version: 2.14.5
|
||||
argocd_version: v2.11.0
|
||||
argocd_namespace: argocd
|
||||
# argocd_admin_password:
|
||||
argocd_install_url: "https://raw.githubusercontent.com/argoproj/argo-cd/v{{ argocd_version }}/manifests/install.yaml"
|
||||
argocd_install_url: "https://raw.githubusercontent.com/argoproj/argo-cd/{{ argocd_version }}/manifests/install.yaml"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
---
|
||||
gateway_api_enabled: false
|
||||
gateway_api_version: 1.1.0
|
||||
gateway_api_version: v1.1.0
|
||||
gateway_api_experimental_channel: false
|
||||
|
||||
@@ -121,7 +121,7 @@ dependencies:
|
||||
- role: kubernetes-apps/scheduler_plugins
|
||||
when:
|
||||
- scheduler_plugins_enabled
|
||||
- kube_major_version is version('1.29', '<')
|
||||
- kube_major_version is version('v1.29', '<')
|
||||
- inventory_hostname == groups['kube_control_plane'][0]
|
||||
tags:
|
||||
- scheduler_plugins
|
||||
|
||||
@@ -1716,7 +1716,7 @@ spec:
|
||||
value: memberlist
|
||||
- name: METALLB_DEPLOYMENT
|
||||
value: controller
|
||||
image: "{{ metallb_controller_image_repo }}:v{{ metallb_version }}"
|
||||
image: "{{ metallb_controller_image_repo }}:{{ metallb_version }}"
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
@@ -1824,7 +1824,7 @@ spec:
|
||||
secretKeyRef:
|
||||
key: secretkey
|
||||
name: memberlist
|
||||
image: "{{ metallb_speaker_image_repo }}:v{{ metallb_version }}"
|
||||
image: "{{ metallb_speaker_image_repo }}:{{ metallb_version }}"
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
|
||||
@@ -101,7 +101,6 @@ rules:
|
||||
verbs:
|
||||
# read its own config
|
||||
- get
|
||||
- list
|
||||
# create a default if none exists
|
||||
- create
|
||||
# update status
|
||||
|
||||
@@ -71,7 +71,7 @@
|
||||
user_certs: "{{ admin_kubeconfig['users'][0]['user'] }}"
|
||||
username: "kubernetes-admin-{{ cluster_name }}"
|
||||
context: "kubernetes-admin-{{ cluster_name }}@{{ cluster_name }}"
|
||||
override_cluster_name: "{{ {'clusters': [{'cluster': (cluster_infos | combine({'server': 'https://' + (external_apiserver_address | ansible.utils.ipwrap) + ':' + (external_apiserver_port | string)})), 'name': cluster_name}]} }}"
|
||||
override_cluster_name: "{{ {'clusters': [{'cluster': (cluster_infos | combine({'server': 'https://' + external_apiserver_address + ':' + (external_apiserver_port | string)})), 'name': cluster_name}]} }}"
|
||||
override_context: "{{ {'contexts': [{'context': {'user': username, 'cluster': cluster_name}, 'name': context}], 'current-context': context} }}"
|
||||
override_user: "{{ {'users': [{'name': username, 'user': user_certs}]} }}"
|
||||
when: kubeconfig_localhost
|
||||
|
||||
@@ -14,6 +14,8 @@ etcd_cert_alt_ips: []
|
||||
etcd_heartbeat_interval: "250"
|
||||
etcd_election_timeout: "5000"
|
||||
|
||||
# etcd_snapshot_count: "10000"
|
||||
|
||||
etcd_metrics: "basic"
|
||||
|
||||
## A dictionary of extra environment variables to add to etcd.env, formatted like:
|
||||
|
||||
@@ -4,7 +4,7 @@ kube_kubeadm_scheduler_extra_args: {}
|
||||
|
||||
# Associated interface must be reachable by the rest of the cluster, and by
|
||||
# CLI/web clients.
|
||||
kube_scheduler_bind_address: "::"
|
||||
kube_scheduler_bind_address: 0.0.0.0
|
||||
|
||||
# ClientConnection options (e.g. Burst, QPS) except from kubeconfig.
|
||||
kube_scheduler_client_conn_extra_opts: {}
|
||||
|
||||
@@ -6,7 +6,7 @@ upgrade_cluster_setup: false
|
||||
# listen on a specific address/interface.
|
||||
# NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost
|
||||
# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on control plane nodes on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too.
|
||||
kube_apiserver_bind_address: "::"
|
||||
kube_apiserver_bind_address: 0.0.0.0
|
||||
|
||||
# A port range to reserve for services with NodePort visibility.
|
||||
# Inclusive at both ends of the range.
|
||||
@@ -29,7 +29,7 @@ kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem
|
||||
|
||||
# Associated interfaces must be reachable by the rest of the cluster, and by
|
||||
# CLI/web clients.
|
||||
kube_controller_manager_bind_address: "::"
|
||||
kube_controller_manager_bind_address: 0.0.0.0
|
||||
|
||||
# Leader election lease durations and timeouts for controller-manager
|
||||
kube_controller_manager_leader_elect_lease_duration: 15s
|
||||
@@ -110,11 +110,11 @@ kube_apiserver_admission_event_rate_limits: {}
|
||||
## PodSecurityAdmission plugin configuration
|
||||
kube_pod_security_use_default: false
|
||||
kube_pod_security_default_enforce: baseline
|
||||
kube_pod_security_default_enforce_version: "v{{ kube_major_version }}"
|
||||
kube_pod_security_default_enforce_version: "{{ kube_major_version }}"
|
||||
kube_pod_security_default_audit: restricted
|
||||
kube_pod_security_default_audit_version: "v{{ kube_major_version }}"
|
||||
kube_pod_security_default_audit_version: "{{ kube_major_version }}"
|
||||
kube_pod_security_default_warn: restricted
|
||||
kube_pod_security_default_warn_version: "v{{ kube_major_version }}"
|
||||
kube_pod_security_default_warn_version: "{{ kube_major_version }}"
|
||||
kube_pod_security_exemptions_usernames: []
|
||||
kube_pod_security_exemptions_runtime_class_names: []
|
||||
kube_pod_security_exemptions_namespaces:
|
||||
@@ -242,15 +242,12 @@ kubeadm_upgrade_auto_cert_renewal: true
|
||||
|
||||
## Enable distributed tracing for kube-apiserver
|
||||
kube_apiserver_tracing: false
|
||||
kube_apiserver_tracing_endpoint: "[::]:4317"
|
||||
kube_apiserver_tracing_endpoint: 0.0.0.0:4317
|
||||
kube_apiserver_tracing_sampling_rate_per_million: 100
|
||||
|
||||
# Enable kubeadm file discovery if anonymous access has been removed
|
||||
kubeadm_use_file_discovery: "{{ remove_anonymous_access }}"
|
||||
|
||||
# imagePullSerial specifies if image pulling performed by kubeadm must be done serially or in parallel. Default: true
|
||||
kubeadm_image_pull_serial: true
|
||||
|
||||
# Supported asymmetric encryption algorithm types for the cluster's keys and certificates.
|
||||
# can be one of RSA-2048(default), RSA-3072, RSA-4096, ECDSA-P256
|
||||
# ref: https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta4/#kubeadm-k8s-io-v1beta4-ClusterConfiguration
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
listen: Control plane | Restart apiserver
|
||||
|
||||
- name: Control plane | Remove apiserver container containerd/crio
|
||||
shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name 'kube-apiserver*' -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
|
||||
shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: remove_apiserver_container
|
||||
@@ -44,7 +44,7 @@
|
||||
listen: Control plane | Restart kube-scheduler
|
||||
|
||||
- name: Control plane | Remove scheduler container containerd/crio
|
||||
shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name 'kube-scheduler*' -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
|
||||
shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-scheduler* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: remove_scheduler_container
|
||||
@@ -66,7 +66,7 @@
|
||||
listen: Control plane | Restart kube-controller-manager
|
||||
|
||||
- name: Control plane | Remove controller manager container containerd/crio
|
||||
shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name 'kube-controller-manager*' -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
|
||||
shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: remove_cm_container
|
||||
@@ -78,7 +78,7 @@
|
||||
|
||||
- name: Control plane | wait for kube-scheduler
|
||||
vars:
|
||||
endpoint: "{{ kube_scheduler_bind_address if kube_scheduler_bind_address != '::' else 'localhost' }}"
|
||||
endpoint: "{{ kube_scheduler_bind_address if kube_scheduler_bind_address != '0.0.0.0' else 'localhost' }}"
|
||||
uri:
|
||||
url: https://{{ endpoint }}:10259/healthz
|
||||
validate_certs: false
|
||||
@@ -92,7 +92,7 @@
|
||||
|
||||
- name: Control plane | wait for kube-controller-manager
|
||||
vars:
|
||||
endpoint: "{{ kube_controller_manager_bind_address if kube_controller_manager_bind_address != '::' else 'localhost' }}"
|
||||
endpoint: "{{ kube_controller_manager_bind_address if kube_controller_manager_bind_address != '0.0.0.0' else 'localhost' }}"
|
||||
uri:
|
||||
url: https://{{ endpoint }}:10257/healthz
|
||||
validate_certs: false
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
---
|
||||
- name: Kubeadm | Check api is up
|
||||
uri:
|
||||
url: "https://{{ ip | default(fallback_ip) }}:{{ kube_apiserver_port }}/healthz"
|
||||
validate_certs: false
|
||||
when: ('kube_control_plane' in group_names)
|
||||
register: _result
|
||||
retries: 60
|
||||
delay: 5
|
||||
until: _result.status == 200
|
||||
@@ -4,7 +4,7 @@
|
||||
# noqa: jinja[spacing]
|
||||
kubeadm_discovery_address: >-
|
||||
{%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%}
|
||||
{{ first_kube_control_plane_address | ansible.utils.ipwrap }}:{{ kube_apiserver_port }}
|
||||
{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}
|
||||
{%- else -%}
|
||||
{{ kube_apiserver_endpoint | regex_replace('https://', '') }}
|
||||
{%- endif %}
|
||||
@@ -36,15 +36,15 @@
|
||||
dest: "{{ kube_config_dir }}/kubeadm-controlplane.yaml"
|
||||
mode: "0640"
|
||||
backup: true
|
||||
validate: "{{ kubeadm_config_validate_enabled | ternary(bin_dir + '/kubeadm config validate --config %s', omit) }}"
|
||||
validate: "{{ bin_dir }}/kubeadm config validate --config %s"
|
||||
when:
|
||||
- inventory_hostname != first_kube_control_plane
|
||||
- not kubeadm_already_run.stat.exists
|
||||
|
||||
- name: Wait for k8s apiserver
|
||||
wait_for:
|
||||
host: "{{ kubeadm_discovery_address | regex_replace('\\]?:\\d+$', '') | regex_replace('^\\[', '') }}"
|
||||
port: "{{ kubeadm_discovery_address.split(':')[-1] }}"
|
||||
host: "{{ kubeadm_discovery_address.split(':')[0] }}"
|
||||
port: "{{ kubeadm_discovery_address.split(':')[1] }}"
|
||||
timeout: 180
|
||||
|
||||
|
||||
|
||||
@@ -35,13 +35,12 @@
|
||||
- "{{ kube_apiserver_ip }}"
|
||||
- "localhost"
|
||||
- "127.0.0.1"
|
||||
- "::1"
|
||||
sans_lb: "{{ [apiserver_loadbalancer_domain_name] if apiserver_loadbalancer_domain_name is defined else [] }}"
|
||||
sans_lb_ip: "{{ [loadbalancer_apiserver.address] if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined else [] }}"
|
||||
sans_supp: "{{ supplementary_addresses_in_ssl_keys if supplementary_addresses_in_ssl_keys is defined else [] }}"
|
||||
sans_access_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'main_access_ip') | list | select('defined') | list }}"
|
||||
sans_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'main_ip') | list | select('defined') | list }}"
|
||||
sans_address: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_default_ipv6', 'ansible_default_ipv4', 'address']) | list | select('defined') | list }}"
|
||||
sans_access_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'access_ip') | list | select('defined') | list }}"
|
||||
sans_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'ip') | list | select('defined') | list }}"
|
||||
sans_address: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list | select('defined') | list }}"
|
||||
sans_override: "{{ [kube_override_hostname] if kube_override_hostname else [] }}"
|
||||
sans_hostname: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_hostname']) | list | select('defined') | list }}"
|
||||
sans_fqdn: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_fqdn']) | list | select('defined') | list }}"
|
||||
@@ -94,7 +93,7 @@
|
||||
src: "kubeadm-config.{{ kubeadm_config_api_version }}.yaml.j2"
|
||||
dest: "{{ kube_config_dir }}/kubeadm-config.yaml"
|
||||
mode: "0640"
|
||||
validate: "{{ kubeadm_config_validate_enabled | ternary(bin_dir + '/kubeadm config validate --config %s', omit) }}"
|
||||
validate: "{{ bin_dir }}/kubeadm config validate --config %s"
|
||||
|
||||
- name: Kubeadm | Create directory to store admission control configurations
|
||||
file:
|
||||
@@ -229,7 +228,7 @@
|
||||
- name: Kubeadm | Join other control plane nodes
|
||||
include_tasks: kubeadm-secondary.yml
|
||||
|
||||
- name: Kubeadm | upgrade kubernetes cluster to {{ kube_version }}
|
||||
- name: Kubeadm | upgrade kubernetes cluster
|
||||
include_tasks: kubeadm-upgrade.yml
|
||||
when:
|
||||
- upgrade_cluster_setup
|
||||
|
||||
@@ -1,81 +1,56 @@
|
||||
---
|
||||
- name: Ensure kube-apiserver is up before upgrade
|
||||
import_tasks: check-api.yml
|
||||
- name: Kubeadm | Check api is up
|
||||
uri:
|
||||
url: "https://{{ ip | default(fallback_ip) }}:{{ kube_apiserver_port }}/healthz"
|
||||
validate_certs: false
|
||||
when: ('kube_control_plane' in group_names)
|
||||
register: _result
|
||||
retries: 60
|
||||
delay: 5
|
||||
until: _result.status == 200
|
||||
|
||||
# kubeadm-config.v1beta4 with UpgradeConfiguration requires some values that were previously allowed as args to be specified in the config file
|
||||
- name: Kubeadm | Upgrade first control plane node
|
||||
command: >-
|
||||
timeout -k 600s 600s
|
||||
{{ bin_dir }}/kubeadm upgrade apply -y v{{ kube_version }}
|
||||
{%- if kubeadm_config_api_version == 'v1beta3' %}
|
||||
{{ bin_dir }}/kubeadm
|
||||
upgrade apply -y {{ kube_version }}
|
||||
--certificate-renewal={{ kubeadm_upgrade_auto_cert_renewal }}
|
||||
--ignore-preflight-errors={{ kubeadm_ignore_preflight_errors | join(',') }}
|
||||
--allow-experimental-upgrades
|
||||
--etcd-upgrade={{ (etcd_deployment_type == "kubeadm") | lower }}
|
||||
{% if kubeadm_patches | length > 0 %}--patches={{ kubeadm_patches_dir }}{% endif %}
|
||||
--force
|
||||
{%- else %}
|
||||
--config={{ kube_config_dir }}/kubeadm-config.yaml
|
||||
{%- endif -%}
|
||||
register: kubeadm_upgrade
|
||||
# Retry is because upload config sometimes fails
|
||||
retries: 3
|
||||
until: kubeadm_upgrade.rc == 0
|
||||
when: inventory_hostname == first_kube_control_plane
|
||||
failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr
|
||||
environment:
|
||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||
notify: Control plane | restart kubelet
|
||||
|
||||
- name: Kubeadm | Upgrade other control plane nodes
|
||||
command: >-
|
||||
{{ bin_dir }}/kubeadm upgrade node
|
||||
{%- if kubeadm_config_api_version == 'v1beta3' %}
|
||||
timeout -k 600s 600s
|
||||
{{ bin_dir }}/kubeadm
|
||||
upgrade apply -y {{ kube_version }}
|
||||
--certificate-renewal={{ kubeadm_upgrade_auto_cert_renewal }}
|
||||
--ignore-preflight-errors={{ kubeadm_ignore_preflight_errors | join(',') }}
|
||||
--allow-experimental-upgrades
|
||||
--etcd-upgrade={{ (etcd_deployment_type == "kubeadm") | lower }}
|
||||
{% if kubeadm_patches | length > 0 %}--patches={{ kubeadm_patches_dir }}{% endif %}
|
||||
{%- else %}
|
||||
--config={{ kube_config_dir }}/kubeadm-config.yaml
|
||||
{%- endif -%}
|
||||
--force
|
||||
register: kubeadm_upgrade
|
||||
# Retry is because upload config sometimes fails
|
||||
retries: 3
|
||||
until: kubeadm_upgrade.rc == 0
|
||||
when: inventory_hostname != first_kube_control_plane
|
||||
failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr
|
||||
environment:
|
||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||
|
||||
# kubeadm upgrade no longer reconciles ClusterConfiguration and KubeProxyConfiguration changes, this must be done separately after upgrade to ensure the latest config is applied
|
||||
- name: Update kubeadm and kubelet configmaps after upgrade
|
||||
command: "{{ bin_dir }}/kubeadm init phase upload-config all --config {{ kube_config_dir }}/kubeadm-config.yaml"
|
||||
register: kubeadm_upload_config
|
||||
# Retry is because upload config sometimes fails
|
||||
retries: 3
|
||||
until: kubeadm_upload_config.rc == 0
|
||||
when:
|
||||
- inventory_hostname == first_kube_control_plane
|
||||
|
||||
- name: Update kube-proxy configmap after upgrade
|
||||
command: "{{ bin_dir }}/kubeadm init phase addon kube-proxy --config {{ kube_config_dir }}/kubeadm-config.yaml"
|
||||
register: kube_proxy_upload_config
|
||||
# Retry is because upload config sometimes fails
|
||||
retries: 3
|
||||
until: kube_proxy_upload_config.rc == 0
|
||||
when:
|
||||
- inventory_hostname == first_kube_control_plane
|
||||
- ('addon/kube-proxy' not in kubeadm_init_phases_skip)
|
||||
|
||||
- name: Rewrite kubeadm managed etcd static pod manifests with updated configmap
|
||||
command: "{{ bin_dir }}/kubeadm init phase etcd local --config {{ kube_config_dir }}/kubeadm-config.yaml"
|
||||
when:
|
||||
- etcd_deployment_type == "kubeadm"
|
||||
notify: Control plane | restart kubelet
|
||||
|
||||
- name: Rewrite kubernetes control plane static pod manifests with updated configmap
|
||||
command: "{{ bin_dir }}/kubeadm init phase control-plane all --config {{ kube_config_dir }}/kubeadm-config.yaml"
|
||||
notify: Control plane | restart kubelet
|
||||
|
||||
- name: Flush kubelet handlers
|
||||
meta: flush_handlers
|
||||
|
||||
- name: Ensure kube-apiserver is up after upgrade and control plane configuration updates
|
||||
import_tasks: check-api.yml
|
||||
|
||||
- name: Kubeadm | Remove binding to anonymous user
|
||||
command: "{{ kubectl }} -n kube-public delete rolebinding kubeadm:bootstrap-signer-clusterinfo --ignore-not-found"
|
||||
when: remove_anonymous_access
|
||||
@@ -85,8 +60,8 @@
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- /root/.kube/cache
|
||||
- /root/.kube/http-cache
|
||||
- /root/.kube/cache
|
||||
- /root/.kube/http-cache
|
||||
|
||||
# FIXME: https://github.com/kubernetes/kubeadm/issues/1318
|
||||
- name: Kubeadm | scale down coredns replicas to 0 if not using coredns dns_mode
|
||||
@@ -100,6 +75,6 @@
|
||||
until: scale_down_coredns is succeeded
|
||||
run_once: true
|
||||
when:
|
||||
- kubeadm_scale_down_coredns_enabled
|
||||
- dns_mode not in ['coredns', 'coredns_dual']
|
||||
- kubeadm_scale_down_coredns_enabled
|
||||
- dns_mode not in ['coredns', 'coredns_dual']
|
||||
changed_when: false
|
||||
|
||||
@@ -21,11 +21,11 @@
|
||||
- name: Create structured AuthorizationConfiguration file
|
||||
copy:
|
||||
content: "{{ authz_config | to_nice_yaml(indent=2, sort_keys=false) }}"
|
||||
dest: "{{ kube_config_dir }}/apiserver-authorization-config-{{ kube_apiserver_authorization_config_api_version }}.yaml"
|
||||
dest: "{{ kube_config_dir }}/apiserver-authorization-config.yaml"
|
||||
mode: "0640"
|
||||
vars:
|
||||
authz_config:
|
||||
apiVersion: apiserver.config.k8s.io/{{ kube_apiserver_authorization_config_api_version }}
|
||||
apiVersion: apiserver.config.k8s.io/{{ 'v1alpha1' if kube_version is version('v1.30.0', '<') else 'v1beta1' if kube_version is version('v1.32.0', '<') else 'v1' }}
|
||||
kind: AuthorizationConfiguration
|
||||
authorizers: "{{ kube_apiserver_authorization_config_authorizers }}"
|
||||
when: kube_apiserver_use_authorization_config_file
|
||||
@@ -105,13 +105,6 @@
|
||||
- name: Include kubeadm secondary server apiserver fixes
|
||||
include_tasks: kubeadm-fix-apiserver.yml
|
||||
|
||||
- name: Cleanup unused AuthorizationConfiguration file versions
|
||||
file:
|
||||
path: "{{ kube_config_dir }}/apiserver-authorization-config-{{ item }}.yaml"
|
||||
state: absent
|
||||
loop: "{{ ['v1alpha1', 'v1beta1', 'v1'] | reject('equalto', kube_apiserver_authorization_config_api_version) | list }}"
|
||||
when: kube_apiserver_use_authorization_config_file
|
||||
|
||||
- name: Include kubelet client cert rotation fixes
|
||||
include_tasks: kubelet-fix-client-cert-rotation.yml
|
||||
when: kubelet_rotate_certificates
|
||||
|
||||
@@ -7,7 +7,7 @@ bootstrapTokens:
|
||||
ttl: "24h"
|
||||
{% endif %}
|
||||
localAPIEndpoint:
|
||||
advertiseAddress: "{{ kube_apiserver_address }}"
|
||||
advertiseAddress: {{ kube_apiserver_address }}
|
||||
bindPort: {{ kube_apiserver_port }}
|
||||
{% if kubeadm_certificate_key is defined %}
|
||||
certificateKey: {{ kubeadm_certificate_key }}
|
||||
@@ -41,7 +41,7 @@ etcd:
|
||||
external:
|
||||
endpoints:
|
||||
{% for endpoint in etcd_access_addresses.split(',') %}
|
||||
- "{{ endpoint }}"
|
||||
- {{ endpoint }}
|
||||
{% endfor %}
|
||||
caFile: {{ etcd_cert_dir }}/{{ kube_etcd_cacert_file }}
|
||||
certFile: {{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}
|
||||
@@ -94,9 +94,9 @@ dns:
|
||||
imageTag: {{ coredns_image_tag }}
|
||||
networking:
|
||||
dnsDomain: {{ dns_domain }}
|
||||
serviceSubnet: "{{ kube_service_subnets }}"
|
||||
serviceSubnet: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}"
|
||||
{% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %}
|
||||
podSubnet: "{{ kube_pods_subnets }}"
|
||||
podSubnet: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}"
|
||||
{% endif %}
|
||||
{% if kubeadm_feature_gates %}
|
||||
featureGates:
|
||||
@@ -104,11 +104,11 @@ featureGates:
|
||||
{{ feature | replace("=", ": ") }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
kubernetesVersion: v{{ kube_version }}
|
||||
kubernetesVersion: {{ kube_version }}
|
||||
{% if kubeadm_config_api_fqdn is defined %}
|
||||
controlPlaneEndpoint: "{{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}"
|
||||
controlPlaneEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
|
||||
{% else %}
|
||||
controlPlaneEndpoint: "{{ main_ip | ansible.utils.ipwrap }}:{{ kube_apiserver_port }}"
|
||||
controlPlaneEndpoint: {{ ip | default(fallback_ip) }}:{{ kube_apiserver_port }}
|
||||
{% endif %}
|
||||
certificatesDir: {{ kube_cert_dir }}
|
||||
imageRepository: {{ kube_image_repo }}
|
||||
@@ -127,11 +127,11 @@ apiServer:
|
||||
anonymous-auth: "{{ kube_api_anonymous_auth }}"
|
||||
{% endif %}
|
||||
{% if kube_apiserver_use_authorization_config_file %}
|
||||
authorization-config: "{{ kube_config_dir }}/apiserver-authorization-config-{{ kube_apiserver_authorization_config_api_version }}.yaml"
|
||||
authorization-config: "{{ kube_config_dir }}/apiserver-authorization-config.yaml"
|
||||
{% else %}
|
||||
authorization-mode: {{ authorization_modes | join(',') }}
|
||||
{% endif %}
|
||||
bind-address: "{{ kube_apiserver_bind_address }}"
|
||||
bind-address: {{ kube_apiserver_bind_address }}
|
||||
{% if kube_apiserver_enable_admission_plugins | length > 0 %}
|
||||
enable-admission-plugins: {{ kube_apiserver_enable_admission_plugins | join(',') }}
|
||||
{% endif %}
|
||||
@@ -147,7 +147,7 @@ apiServer:
|
||||
etcd-servers-overrides: "/events#{{ etcd_events_access_addresses_semicolon }}"
|
||||
{% endif %}
|
||||
service-node-port-range: {{ kube_apiserver_node_port_range }}
|
||||
service-cluster-ip-range: "{{ kube_service_subnets }}"
|
||||
service-cluster-ip-range: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}"
|
||||
kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}"
|
||||
profiling: "{{ kube_profiling }}"
|
||||
request-timeout: "{{ kube_apiserver_request_timeout }}"
|
||||
@@ -249,8 +249,8 @@ apiServer:
|
||||
{% endif %}
|
||||
{% if kube_apiserver_use_authorization_config_file %}
|
||||
- name: authorization-config
|
||||
hostPath: {{ kube_config_dir }}/apiserver-authorization-config-{{ kube_apiserver_authorization_config_api_version }}.yaml
|
||||
mountPath: {{ kube_config_dir }}/apiserver-authorization-config-{{ kube_apiserver_authorization_config_api_version }}.yaml
|
||||
hostPath: {{ kube_config_dir }}/apiserver-authorization-config.yaml
|
||||
mountPath: {{ kube_config_dir }}/apiserver-authorization-config.yaml
|
||||
{% endif %}
|
||||
{% if kubernetes_audit or kubernetes_audit_webhook %}
|
||||
- name: {{ audit_policy_name }}
|
||||
@@ -294,7 +294,7 @@ apiServer:
|
||||
{% endif %}
|
||||
certSANs:
|
||||
{% for san in apiserver_sans %}
|
||||
- {{ san }}
|
||||
- "{{ san }}"
|
||||
{% endfor %}
|
||||
timeoutForControlPlane: 5m0s
|
||||
controllerManager:
|
||||
@@ -302,22 +302,22 @@ controllerManager:
|
||||
node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }}
|
||||
node-monitor-period: {{ kube_controller_node_monitor_period }}
|
||||
{% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %}
|
||||
cluster-cidr: "{{ kube_pods_subnets }}"
|
||||
cluster-cidr: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}"
|
||||
{% endif %}
|
||||
service-cluster-ip-range: "{{ kube_service_subnets }}"
|
||||
service-cluster-ip-range: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}"
|
||||
{% if kube_network_plugin is defined and kube_network_plugin == "calico" and not calico_ipam_host_local %}
|
||||
allocate-node-cidrs: "false"
|
||||
{% else %}
|
||||
{% if ipv4_stack %}
|
||||
{% if enable_dual_stack_networks %}
|
||||
node-cidr-mask-size-ipv4: "{{ kube_network_node_prefix }}"
|
||||
{% endif %}
|
||||
{% if ipv6_stack %}
|
||||
node-cidr-mask-size-ipv6: "{{ kube_network_node_prefix_ipv6 }}"
|
||||
{% else %}
|
||||
node-cidr-mask-size: "{{ kube_network_node_prefix }}"
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
profiling: "{{ kube_profiling }}"
|
||||
terminated-pod-gc-threshold: "{{ kube_controller_terminated_pod_gc_threshold }}"
|
||||
bind-address: "{{ kube_controller_manager_bind_address }}"
|
||||
bind-address: {{ kube_controller_manager_bind_address }}
|
||||
leader-elect-lease-duration: {{ kube_controller_manager_leader_elect_lease_duration }}
|
||||
leader-elect-renew-deadline: {{ kube_controller_manager_leader_elect_renew_deadline }}
|
||||
{% if kube_controller_feature_gates or kube_feature_gates %}
|
||||
@@ -350,7 +350,7 @@ controllerManager:
|
||||
{% endif %}
|
||||
scheduler:
|
||||
extraArgs:
|
||||
bind-address: "{{ kube_scheduler_bind_address }}"
|
||||
bind-address: {{ kube_scheduler_bind_address }}
|
||||
config: {{ kube_config_dir }}/kubescheduler-config.yaml
|
||||
{% if kube_scheduler_feature_gates or kube_feature_gates %}
|
||||
feature-gates: "{{ kube_scheduler_feature_gates | default(kube_feature_gates, true) | join(',') }}"
|
||||
@@ -384,7 +384,7 @@ scheduler:
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
bindAddress: "{{ kube_proxy_bind_address }}"
|
||||
bindAddress: {{ kube_proxy_bind_address }}
|
||||
clientConnection:
|
||||
acceptContentTypes: {{ kube_proxy_client_accept_content_types }}
|
||||
burst: {{ kube_proxy_client_burst }}
|
||||
@@ -392,7 +392,7 @@ clientConnection:
|
||||
kubeconfig: {{ kube_proxy_client_kubeconfig }}
|
||||
qps: {{ kube_proxy_client_qps }}
|
||||
{% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %}
|
||||
clusterCIDR: "{{ kube_pods_subnets }}"
|
||||
clusterCIDR: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}"
|
||||
{% endif %}
|
||||
configSyncPeriod: {{ kube_proxy_config_sync_period }}
|
||||
conntrack:
|
||||
@@ -401,7 +401,7 @@ conntrack:
|
||||
tcpCloseWaitTimeout: {{ kube_proxy_conntrack_tcp_close_wait_timeout }}
|
||||
tcpEstablishedTimeout: {{ kube_proxy_conntrack_tcp_established_timeout }}
|
||||
enableProfiling: {{ kube_proxy_enable_profiling }}
|
||||
healthzBindAddress: "{{ kube_proxy_healthz_bind_address }}"
|
||||
healthzBindAddress: {{ kube_proxy_healthz_bind_address }}
|
||||
hostnameOverride: "{{ kube_override_hostname }}"
|
||||
iptables:
|
||||
masqueradeAll: {{ kube_proxy_masquerade_all }}
|
||||
@@ -417,7 +417,7 @@ ipvs:
|
||||
tcpTimeout: {{ kube_proxy_tcp_timeout }}
|
||||
tcpFinTimeout: {{ kube_proxy_tcp_fin_timeout }}
|
||||
udpTimeout: {{ kube_proxy_udp_timeout }}
|
||||
metricsBindAddress: "{{ kube_proxy_metrics_bind_address }}"
|
||||
metricsBindAddress: {{ kube_proxy_metrics_bind_address }}
|
||||
mode: {{ kube_proxy_mode }}
|
||||
nodePortAddresses: {{ kube_proxy_nodeport_addresses }}
|
||||
oomScoreAdj: {{ kube_proxy_oom_score_adj }}
|
||||
|
||||
@@ -7,7 +7,7 @@ bootstrapTokens:
|
||||
ttl: "24h"
|
||||
{% endif %}
|
||||
localAPIEndpoint:
|
||||
advertiseAddress: "{{ kube_apiserver_address }}"
|
||||
advertiseAddress: {{ kube_apiserver_address }}
|
||||
bindPort: {{ kube_apiserver_port }}
|
||||
{% if kubeadm_certificate_key is defined %}
|
||||
certificateKey: {{ kubeadm_certificate_key }}
|
||||
@@ -29,8 +29,6 @@ nodeRegistration:
|
||||
- name: cloud-provider
|
||||
value: external
|
||||
{% endif %}
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
imagePullSerial: {{ kubeadm_image_pull_serial | lower }}
|
||||
{% if kubeadm_patches | length > 0 %}
|
||||
patches:
|
||||
directory: {{ kubeadm_patches_dir }}
|
||||
@@ -45,7 +43,7 @@ etcd:
|
||||
external:
|
||||
endpoints:
|
||||
{% for endpoint in etcd_access_addresses.split(',') %}
|
||||
- "{{ endpoint }}"
|
||||
- {{ endpoint }}
|
||||
{% endfor %}
|
||||
caFile: {{ etcd_cert_dir }}/{{ kube_etcd_cacert_file }}
|
||||
certFile: {{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}
|
||||
@@ -104,16 +102,13 @@ etcd:
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
dns:
|
||||
{% if 'addon/coredns' in kubeadm_init_phases_skip %}
|
||||
disabled: true
|
||||
{% endif %}
|
||||
imageRepository: {{ coredns_image_repo | regex_replace('/coredns(?!/coredns).*$', '') }}
|
||||
imageTag: {{ coredns_image_tag }}
|
||||
networking:
|
||||
dnsDomain: {{ dns_domain }}
|
||||
serviceSubnet: "{{ kube_service_subnets }}"
|
||||
serviceSubnet: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}"
|
||||
{% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %}
|
||||
podSubnet: "{{ kube_pods_subnets }}"
|
||||
podSubnet: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}"
|
||||
{% endif %}
|
||||
{% if kubeadm_feature_gates %}
|
||||
featureGates:
|
||||
@@ -121,11 +116,11 @@ featureGates:
|
||||
{{ feature | replace("=", ": ") }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
kubernetesVersion: v{{ kube_version }}
|
||||
kubernetesVersion: {{ kube_version }}
|
||||
{% if kubeadm_config_api_fqdn is defined %}
|
||||
controlPlaneEndpoint: "{{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}"
|
||||
controlPlaneEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
|
||||
{% else %}
|
||||
controlPlaneEndpoint: "{{ main_ip | ansible.utils.ipwrap }}:{{ kube_apiserver_port }}"
|
||||
controlPlaneEndpoint: {{ ip | default(fallback_ip) }}:{{ kube_apiserver_port }}
|
||||
{% endif %}
|
||||
certificatesDir: {{ kube_cert_dir }}
|
||||
imageRepository: {{ kube_image_repo }}
|
||||
@@ -149,7 +144,7 @@ apiServer:
|
||||
{% endif %}
|
||||
{% if kube_apiserver_use_authorization_config_file %}
|
||||
- name: authorization-config
|
||||
value: "{{ kube_config_dir }}/apiserver-authorization-config-{{ kube_apiserver_authorization_config_api_version }}.yaml"
|
||||
value: "{{ kube_config_dir }}/apiserver-authorization-config.yaml"
|
||||
{% else %}
|
||||
- name: authorization-mode
|
||||
value: "{{ authorization_modes | join(',') }}"
|
||||
@@ -179,7 +174,7 @@ apiServer:
|
||||
- name: service-node-port-range
|
||||
value: "{{ kube_apiserver_node_port_range }}"
|
||||
- name: service-cluster-ip-range
|
||||
value: "{{ kube_service_subnets }}"
|
||||
value: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}"
|
||||
- name: kubelet-preferred-address-types
|
||||
value: "{{ kubelet_preferred_address_types }}"
|
||||
- name: profiling
|
||||
@@ -311,8 +306,8 @@ apiServer:
|
||||
{% endif %}
|
||||
{% if kube_apiserver_use_authorization_config_file %}
|
||||
- name: authorization-config
|
||||
hostPath: {{ kube_config_dir }}/apiserver-authorization-config-{{ kube_apiserver_authorization_config_api_version }}.yaml
|
||||
mountPath: {{ kube_config_dir }}/apiserver-authorization-config-{{ kube_apiserver_authorization_config_api_version }}.yaml
|
||||
hostPath: {{ kube_config_dir }}/apiserver-authorization-config.yaml
|
||||
mountPath: {{ kube_config_dir }}/apiserver-authorization-config.yaml
|
||||
{% endif %}
|
||||
{% if kubernetes_audit or kubernetes_audit_webhook %}
|
||||
- name: {{ audit_policy_name }}
|
||||
@@ -356,7 +351,7 @@ apiServer:
|
||||
{% endif %}
|
||||
certSANs:
|
||||
{% for san in apiserver_sans %}
|
||||
- {{ san }}
|
||||
- "{{ san }}"
|
||||
{% endfor %}
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
@@ -366,21 +361,22 @@ controllerManager:
|
||||
value: "{{ kube_controller_node_monitor_period }}"
|
||||
{% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %}
|
||||
- name: cluster-cidr
|
||||
value: "{{ kube_pods_subnets }}"
|
||||
value: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}"
|
||||
{% endif %}
|
||||
- name: service-cluster-ip-range
|
||||
value: "{{ kube_service_subnets }}"
|
||||
value: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks else '' }}"
|
||||
{% if kube_network_plugin is defined and kube_network_plugin == "calico" and not calico_ipam_host_local %}
|
||||
- name: allocate-node-cidrs
|
||||
value: "false"
|
||||
{% else %}
|
||||
{% if ipv4_stack %}
|
||||
{% if enable_dual_stack_networks %}
|
||||
- name: node-cidr-mask-size-ipv4
|
||||
value: "{{ kube_network_node_prefix }}"
|
||||
{% endif %}
|
||||
{% if ipv6_stack %}
|
||||
- name: node-cidr-mask-size-ipv6
|
||||
value: "{{ kube_network_node_prefix_ipv6 }}"
|
||||
{% else %}
|
||||
- name: node-cidr-mask-size
|
||||
value: "{{ kube_network_node_prefix }}"
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
- name: profiling
|
||||
@@ -482,45 +478,9 @@ scheduler:
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1beta4
|
||||
kind: UpgradeConfiguration
|
||||
apply:
|
||||
kubernetesVersion: v{{ kube_version }}
|
||||
allowExperimentalUpgrades: true
|
||||
certificateRenewal: {{ kubeadm_upgrade_auto_cert_renewal | lower }}
|
||||
etcdUpgrade: {{ (etcd_deployment_type == "kubeadm") | lower }}
|
||||
forceUpgrade: true
|
||||
{% if kubeadm_ignore_preflight_errors | length > 0 %}
|
||||
ignorePreflightErrors:
|
||||
{% for ignore_error in kubeadm_ignore_preflight_errors %}
|
||||
- "{{ ignore_error }}"
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if kubeadm_patches | length > 0 %}
|
||||
patches:
|
||||
directory: {{ kubeadm_patches_dir }}
|
||||
{% endif %}
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
imagePullSerial: {{ kubeadm_image_pull_serial | lower }}
|
||||
node:
|
||||
certificateRenewal: {{ kubeadm_upgrade_auto_cert_renewal | lower }}
|
||||
etcdUpgrade: {{ (etcd_deployment_type == "kubeadm") | lower }}
|
||||
{% if kubeadm_ignore_preflight_errors | length > 0 %}
|
||||
ignorePreflightErrors:
|
||||
{% for ignore_error in kubeadm_ignore_preflight_errors %}
|
||||
- "{{ ignore_error }}"
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if kubeadm_patches | length > 0 %}
|
||||
patches:
|
||||
directory: {{ kubeadm_patches_dir }}
|
||||
{% endif %}
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
imagePullSerial: {{ kubeadm_image_pull_serial | lower }}
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
bindAddress: "{{ kube_proxy_bind_address }}"
|
||||
bindAddress: {{ kube_proxy_bind_address }}
|
||||
clientConnection:
|
||||
acceptContentTypes: {{ kube_proxy_client_accept_content_types }}
|
||||
burst: {{ kube_proxy_client_burst }}
|
||||
@@ -528,7 +488,7 @@ clientConnection:
|
||||
kubeconfig: {{ kube_proxy_client_kubeconfig }}
|
||||
qps: {{ kube_proxy_client_qps }}
|
||||
{% if kube_network_plugin is defined and kube_network_plugin not in ["kube-ovn"] %}
|
||||
clusterCIDR: "{{ kube_pods_subnets }}"
|
||||
clusterCIDR: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks else '' }}"
|
||||
{% endif %}
|
||||
configSyncPeriod: {{ kube_proxy_config_sync_period }}
|
||||
conntrack:
|
||||
@@ -537,7 +497,7 @@ conntrack:
|
||||
tcpCloseWaitTimeout: {{ kube_proxy_conntrack_tcp_close_wait_timeout }}
|
||||
tcpEstablishedTimeout: {{ kube_proxy_conntrack_tcp_established_timeout }}
|
||||
enableProfiling: {{ kube_proxy_enable_profiling }}
|
||||
healthzBindAddress: "{{ kube_proxy_healthz_bind_address }}"
|
||||
healthzBindAddress: {{ kube_proxy_healthz_bind_address }}
|
||||
hostnameOverride: "{{ kube_override_hostname }}"
|
||||
iptables:
|
||||
masqueradeAll: {{ kube_proxy_masquerade_all }}
|
||||
@@ -553,7 +513,7 @@ ipvs:
|
||||
tcpTimeout: {{ kube_proxy_tcp_timeout }}
|
||||
tcpFinTimeout: {{ kube_proxy_tcp_fin_timeout }}
|
||||
udpTimeout: {{ kube_proxy_udp_timeout }}
|
||||
metricsBindAddress: "{{ kube_proxy_metrics_bind_address }}"
|
||||
metricsBindAddress: {{ kube_proxy_metrics_bind_address }}
|
||||
mode: {{ kube_proxy_mode }}
|
||||
nodePortAddresses: {{ kube_proxy_nodeport_addresses }}
|
||||
oomScoreAdj: {{ kube_proxy_oom_score_adj }}
|
||||
|
||||
@@ -9,7 +9,7 @@ discovery:
|
||||
{% if kubeadm_config_api_fqdn is defined %}
|
||||
apiServerEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
|
||||
{% else %}
|
||||
apiServerEndpoint: "{{ kubeadm_discovery_address }}"
|
||||
apiServerEndpoint: {{ kubeadm_discovery_address }}
|
||||
{% endif %}
|
||||
token: {{ kubeadm_token }}
|
||||
unsafeSkipCAVerification: true
|
||||
@@ -24,7 +24,7 @@ timeouts:
|
||||
{% endif %}
|
||||
controlPlane:
|
||||
localAPIEndpoint:
|
||||
advertiseAddress: "{{ kube_apiserver_address }}"
|
||||
advertiseAddress: {{ kube_apiserver_address }}
|
||||
bindPort: {{ kube_apiserver_port }}
|
||||
certificateKey: {{ kubeadm_certificate_key }}
|
||||
nodeRegistration:
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
src: "kubeadm-client.conf.j2"
|
||||
dest: "{{ kube_config_dir }}/kubeadm-cert-controlplane.conf"
|
||||
mode: "0640"
|
||||
validate: "{{ kubeadm_config_validate_enabled | ternary(bin_dir + '/kubeadm config validate --config %s', omit) }}"
|
||||
validate: "{{ bin_dir }}/kubeadm config validate --config %s"
|
||||
vars:
|
||||
kubeadm_cert_controlplane: true
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# noqa: jinja[spacing]
|
||||
kubeadm_discovery_address: >-
|
||||
{%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%}
|
||||
{{ first_kube_control_plane_address | ansible.utils.ipwrap }}:{{ kube_apiserver_port }}
|
||||
{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}
|
||||
{%- else -%}
|
||||
{{ kube_apiserver_endpoint | replace("https://", "") }}
|
||||
{%- endif %}
|
||||
@@ -75,7 +75,7 @@
|
||||
dest: "{{ kube_config_dir }}/kubeadm-client.conf"
|
||||
backup: true
|
||||
mode: "0640"
|
||||
validate: "{{ kubeadm_config_validate_enabled | ternary(bin_dir + '/kubeadm config validate --config %s', omit) }}"
|
||||
validate: "{{ bin_dir }}/kubeadm config validate --config %s"
|
||||
when: ('kube_control_plane' not in group_names)
|
||||
|
||||
- name: Join to cluster if needed
|
||||
|
||||
@@ -8,9 +8,9 @@ discovery:
|
||||
{% else %}
|
||||
bootstrapToken:
|
||||
{% if kubeadm_config_api_fqdn is defined %}
|
||||
apiServerEndpoint: "{{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}"
|
||||
apiServerEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
|
||||
{% else %}
|
||||
apiServerEndpoint: "{{ kubeadm_discovery_address }}"
|
||||
apiServerEndpoint: {{ kubeadm_discovery_address }}
|
||||
{% endif %}
|
||||
token: {{ kubeadm_token }}
|
||||
{% if ca_cert_content is defined %}
|
||||
@@ -32,7 +32,7 @@ caCertPath: {{ kube_cert_dir }}/ca.crt
|
||||
{% if kubeadm_cert_controlplane is defined and kubeadm_cert_controlplane %}
|
||||
controlPlane:
|
||||
localAPIEndpoint:
|
||||
advertiseAddress: "{{ kube_apiserver_address }}"
|
||||
advertiseAddress: {{ kube_apiserver_address }}
|
||||
bindPort: {{ kube_apiserver_port }}
|
||||
certificateKey: {{ kubeadm_certificate_key }}
|
||||
{% endif %}
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
---
|
||||
# advertised host IP for kubelet. This affects network plugin config. Take caution
|
||||
# add ipv6 manual for dualstack mode because ipv4 priority in main_ip for dualstack
|
||||
kubelet_address: "{{ main_ips | join(',') }}"
|
||||
kubelet_address: "{{ ip | default(fallback_ip) }}{{ (',' + ip6) if enable_dual_stack_networks and ip6 is defined else '' }}"
|
||||
|
||||
# bind address for kubelet. Set to :: to listen on all interfaces
|
||||
kubelet_bind_address: "{{ main_ip | default('::') }}"
|
||||
# bind address for kubelet. Set to 0.0.0.0 to listen on all interfaces
|
||||
kubelet_bind_address: "{{ ip | default('0.0.0.0') }}"
|
||||
|
||||
# resolv.conf to base dns config
|
||||
kube_resolv_conf: "/etc/resolv.conf"
|
||||
@@ -28,12 +27,11 @@ kubelet_systemd_hardening: false
|
||||
kubelet_systemd_wants_dependencies: []
|
||||
|
||||
# List of secure IPs for kubelet
|
||||
# don't forget ipv6 addresses for dualstack(because "main_ip" prioritizes ipv4)
|
||||
kube_node_addresses: >-
|
||||
{%- for host in (groups['k8s_cluster'] | union(groups['etcd'])) -%}
|
||||
{{ hostvars[host]['main_ips'] | join(' ') }}{{ ' ' if not loop.last else '' }}
|
||||
{{ hostvars[host]['ip'] | default(hostvars[host]['fallback_ip']) }}{{ ' ' if not loop.last else '' }}
|
||||
{%- endfor -%}
|
||||
kubelet_secure_addresses: "localhost link-local {{ kube_pods_subnets | regex_replace(',', ' ') }} {{ kube_node_addresses }}"
|
||||
kubelet_secure_addresses: "localhost link-local {{ kube_pods_subnet }} {{ kube_node_addresses }}"
|
||||
|
||||
# Reserve this space for kube resources
|
||||
# Whether to run kubelet and container-engine daemons in a dedicated cgroup. (Not required for resource reservations).
|
||||
@@ -61,7 +59,7 @@ eviction_hard_control_plane: {}
|
||||
kubelet_status_update_frequency: 10s
|
||||
|
||||
# kube-vip
|
||||
kube_vip_version: 0.8.0
|
||||
kube_vip_version: v0.8.0
|
||||
|
||||
kube_vip_arp_enabled: false
|
||||
kube_vip_interface:
|
||||
@@ -192,7 +190,7 @@ conntrack_modules:
|
||||
|
||||
## Enable distributed tracing for kubelet
|
||||
kubelet_tracing: false
|
||||
kubelet_tracing_endpoint: "[::]:4317"
|
||||
kubelet_tracing_endpoint: 0.0.0.0:4317
|
||||
kubelet_tracing_sampling_rate_per_million: 100
|
||||
|
||||
# The maximum number of image pulls in parallel. Set it to a integer great than 1 to enable image pulling in parallel.
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
- name: Install nginx-proxy
|
||||
import_tasks: loadbalancer/nginx-proxy.yml
|
||||
when:
|
||||
- ('kube_control_plane' not in group_names) or (kube_apiserver_bind_address != '::')
|
||||
- ('kube_control_plane' not in group_names) or (kube_apiserver_bind_address != '0.0.0.0')
|
||||
- loadbalancer_apiserver_localhost
|
||||
- loadbalancer_apiserver_type == 'nginx'
|
||||
tags:
|
||||
@@ -36,7 +36,7 @@
|
||||
- name: Install haproxy
|
||||
import_tasks: loadbalancer/haproxy.yml
|
||||
when:
|
||||
- ('kube_control_plane' not in group_names) or (kube_apiserver_bind_address != '::')
|
||||
- ('kube_control_plane' not in group_names) or (kube_apiserver_bind_address != '0.0.0.0')
|
||||
- loadbalancer_apiserver_localhost
|
||||
- loadbalancer_apiserver_type == 'haproxy'
|
||||
tags:
|
||||
|
||||
@@ -29,10 +29,10 @@ containerLogMaxSize: {{ kubelet_logfiles_max_size }}
|
||||
containerRuntimeEndpoint : {{ cri_socket }}
|
||||
maxPods: {{ kubelet_max_pods }}
|
||||
podPidsLimit: {{ kubelet_pod_pids_limit }}
|
||||
address: "{{ kubelet_bind_address }}"
|
||||
address: {{ kubelet_bind_address }}
|
||||
readOnlyPort: {{ kube_read_only_port }}
|
||||
healthzPort: {{ kubelet_healthz_port }}
|
||||
healthzBindAddress: "{{ kubelet_healthz_bind_address }}"
|
||||
healthzBindAddress: {{ kubelet_healthz_bind_address }}
|
||||
kubeletCgroups: {{ kubelet_kubelet_cgroups }}
|
||||
clusterDomain: {{ dns_domain }}
|
||||
{% if kubelet_protect_kernel_defaults | bool %}
|
||||
@@ -130,7 +130,7 @@ topologyManagerScope: {{ kubelet_topology_manager_scope }}
|
||||
{% endif %}
|
||||
{% if kubelet_tracing %}
|
||||
tracing:
|
||||
endpoint: "{{ kubelet_tracing_endpoint }}"
|
||||
endpoint: {{ kubelet_tracing_endpoint }}
|
||||
samplingRatePerMillion: {{ kubelet_tracing_sampling_rate_per_million }}
|
||||
{% endif %}
|
||||
maxParallelImagePulls: {{ kubelet_max_parallel_image_pulls }}
|
||||
|
||||
@@ -22,7 +22,7 @@ defaults
|
||||
{% if loadbalancer_apiserver_healthcheck_port is defined -%}
|
||||
frontend healthz
|
||||
bind 0.0.0.0:{{ loadbalancer_apiserver_healthcheck_port }}
|
||||
{% if ipv6_stack -%}
|
||||
{% if enable_dual_stack_networks -%}
|
||||
bind :::{{ loadbalancer_apiserver_healthcheck_port }}
|
||||
{% endif -%}
|
||||
mode http
|
||||
@@ -31,7 +31,7 @@ frontend healthz
|
||||
|
||||
frontend kube_api_frontend
|
||||
bind 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}
|
||||
{% if ipv6_stack -%}
|
||||
{% if enable_dual_stack_networks -%}
|
||||
bind [::1]:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }};
|
||||
{% endif -%}
|
||||
mode tcp
|
||||
@@ -45,5 +45,5 @@ backend kube_api_backend
|
||||
option httpchk GET /healthz
|
||||
http-check expect status 200
|
||||
{% for host in groups['kube_control_plane'] -%}
|
||||
server {{ host }} {{ hostvars[host]['main_access_ip'] | ansible.utils.ipwrap }}:{{ kube_apiserver_port }} check check-ssl verify none
|
||||
server {{ host }} {{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(hostvars[host]['fallback_ip'])) }}:{{ kube_apiserver_port }} check check-ssl verify none
|
||||
{% endfor -%}
|
||||
|
||||
@@ -14,13 +14,13 @@ stream {
|
||||
upstream kube_apiserver {
|
||||
least_conn;
|
||||
{% for host in groups['kube_control_plane'] -%}
|
||||
server {{ hostvars[host]['main_access_ip'] | ansible.utils.ipwrap }}:{{ kube_apiserver_port }};
|
||||
server {{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(hostvars[host]['fallback_ip'])) }}:{{ kube_apiserver_port }};
|
||||
{% endfor -%}
|
||||
}
|
||||
|
||||
server {
|
||||
listen 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }};
|
||||
{% if ipv6_stack -%}
|
||||
{% if enable_dual_stack_networks -%}
|
||||
listen [::1]:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }};
|
||||
{% endif -%}
|
||||
proxy_pass kube_apiserver;
|
||||
@@ -44,7 +44,7 @@ http {
|
||||
{% if loadbalancer_apiserver_healthcheck_port is defined -%}
|
||||
server {
|
||||
listen {{ loadbalancer_apiserver_healthcheck_port }};
|
||||
{% if ipv6_stack -%}
|
||||
{% if enable_dual_stack_networks -%}
|
||||
listen [::]:{{ loadbalancer_apiserver_healthcheck_port }};
|
||||
{% endif -%}
|
||||
location /healthz {
|
||||
|
||||
@@ -5,7 +5,7 @@ clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
certificate-authority: {{ kube_cert_dir }}/ca.pem
|
||||
server: "{{ kube_apiserver_endpoint }}"
|
||||
server: {{ kube_apiserver_endpoint }}
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
|
||||
@@ -1,62 +1,7 @@
|
||||
---
|
||||
- name: Stop if some versions have a 'v' left at the start
|
||||
# TODO: drop this task after 2.28.0 is released
|
||||
# The 'not defined' tests are exception for applications which version in not defined
|
||||
# in kubespray-defaults, only in their own roles.
|
||||
assert:
|
||||
msg: |
|
||||
All version string used in kubespray have been normalized to not use a leading 'v'.
|
||||
This check will be dropped in the next minor release.
|
||||
that:
|
||||
- argocd_version is not defined or not argocd_version.startswith('v')
|
||||
- not aws_ebs_csi_plugin_version.startswith('v')
|
||||
- not azure_csi_plugin_version.startswith('v')
|
||||
- not calico_version.startswith('v')
|
||||
- not calico_apiserver_version.startswith('v')
|
||||
- not calico_ctl_version.startswith('v')
|
||||
- not calico_typha_version.startswith('v')
|
||||
- not cephfs_provisioner_version.startswith('v')
|
||||
- not cert_manager_version.startswith('v')
|
||||
- not cilium_cli_version.startswith('v')
|
||||
- not cilium_version.startswith('v')
|
||||
- not cinder_csi_plugin_version.startswith('v')
|
||||
- not cni_version.startswith('v')
|
||||
- not dnsautoscaler_version.startswith('v')
|
||||
- not flannel_cni_version.startswith('v')
|
||||
- not flannel_version.startswith('v')
|
||||
- gateway_api_version is not defined or not gateway_api_version.startswith('v')
|
||||
- not gcp_pd_csi_plugin_version.startswith('v')
|
||||
- not helm_version.startswith('v')
|
||||
- not kube_ovn_version.startswith('v')
|
||||
- not kube_router_version.startswith('v')
|
||||
- not kube_version.startswith('v')
|
||||
- kube_vip_version is not defined or not kube_vip_version.startswith('v')
|
||||
- not local_path_provisioner_version.startswith('v')
|
||||
- not local_volume_provisioner_version.startswith('v')
|
||||
- not metallb_version.startswith('v')
|
||||
- not metrics_server_version.startswith('v')
|
||||
- not multus_version.startswith('v')
|
||||
- not netcheck_version.startswith('v')
|
||||
- not rbd_provisioner_version.startswith('v')
|
||||
- not runc_version.startswith('v')
|
||||
- not skopeo_version.startswith('v')
|
||||
- not yq_version.startswith('v')
|
||||
|
||||
- name: Stop if some derived versions have a 'v' left at the start
|
||||
# TODO: drop this task after 2.28.0 is released
|
||||
# The 'not defined' tests are exception for applications which version in not defined
|
||||
# in kubespray-defaults, only in their own roles.
|
||||
assert:
|
||||
msg: |
|
||||
All version string used in kubespray have been normalized to not use a leading 'v'.
|
||||
This check will be dropped in the next minor release.
|
||||
that:
|
||||
- not etcd_version.startswith('v')
|
||||
- not pod_infra_version.startswith('v')
|
||||
|
||||
- name: Stop if any host not in '--limit' does not have a fact cache
|
||||
vars:
|
||||
uncached_hosts: "{{ hostvars | dict2items | selectattr('value.ansible_default_ipv6', 'undefined') | selectattr('value.ansible_default_ipv4', 'undefined') | map(attribute='key') }}"
|
||||
uncached_hosts: "{{ hostvars | dict2items | selectattr('value.ansible_default_ipv4', 'undefined') | map(attribute='key') }}"
|
||||
excluded_hosts: "{{ groups['k8s_cluster'] | difference(query('inventory_hostnames', ansible_limit)) }}"
|
||||
assert:
|
||||
that: uncached_hosts | intersect(excluded_hosts) == []
|
||||
@@ -160,7 +105,6 @@
|
||||
- not ignore_assert_errors
|
||||
- ('k8s_cluster' in group_names)
|
||||
- kube_network_plugin not in ['calico', 'none']
|
||||
- ipv4_stack | bool
|
||||
|
||||
- name: Stop if ip var does not match local ips
|
||||
assert:
|
||||
@@ -181,16 +125,16 @@
|
||||
{%- endif -%}
|
||||
state: present
|
||||
when:
|
||||
- main_access_ip is defined
|
||||
- access_ip is defined
|
||||
- not ignore_assert_errors
|
||||
- ping_access_ip
|
||||
- not is_fedora_coreos
|
||||
- not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
||||
|
||||
- name: Stop if access_ip is not pingable
|
||||
command: ping -c1 {{ main_access_ip }}
|
||||
command: ping -c1 {{ access_ip }}
|
||||
when:
|
||||
- main_access_ip is defined
|
||||
- access_ip is defined
|
||||
- not ignore_assert_errors
|
||||
- ping_access_ip
|
||||
changed_when: false
|
||||
@@ -235,19 +179,12 @@
|
||||
- cloud-provider
|
||||
- facts
|
||||
|
||||
- name: Warn if `enable_dual_stack_networks` is set
|
||||
debug:
|
||||
msg: "WARNING! => `enable_dual_stack_networks` deprecation. Please switch to using ipv4_stack and ipv6_stack."
|
||||
when:
|
||||
- enable_dual_stack_networks is defined
|
||||
|
||||
- name: "Check that kube_service_addresses is a network range"
|
||||
assert:
|
||||
that:
|
||||
- kube_service_addresses | ansible.utils.ipaddr('net')
|
||||
msg: "kube_service_addresses = '{{ kube_service_addresses }}' is not a valid network range"
|
||||
run_once: true
|
||||
when: ipv4_stack | bool
|
||||
|
||||
- name: "Check that kube_pods_subnet is a network range"
|
||||
assert:
|
||||
@@ -255,7 +192,6 @@
|
||||
- kube_pods_subnet | ansible.utils.ipaddr('net')
|
||||
msg: "kube_pods_subnet = '{{ kube_pods_subnet }}' is not a valid network range"
|
||||
run_once: true
|
||||
when: ipv4_stack | bool
|
||||
|
||||
- name: "Check that kube_pods_subnet does not collide with kube_service_addresses"
|
||||
assert:
|
||||
@@ -263,50 +199,13 @@
|
||||
- kube_pods_subnet | ansible.utils.ipaddr(kube_service_addresses) | string == 'None'
|
||||
msg: "kube_pods_subnet cannot be the same network segment as kube_service_addresses"
|
||||
run_once: true
|
||||
when: ipv4_stack | bool
|
||||
|
||||
- name: "Check that ipv4 IP range is enough for the nodes"
|
||||
- name: "Check that IP range is enough for the nodes"
|
||||
assert:
|
||||
that:
|
||||
- 2 ** (kube_network_node_prefix - kube_pods_subnet | ansible.utils.ipaddr('prefix')) >= groups['k8s_cluster'] | length
|
||||
msg: "Not enough ipv4 IPs are available for the desired node count."
|
||||
when:
|
||||
- ipv4_stack | bool
|
||||
- kube_network_plugin != 'calico'
|
||||
run_once: true
|
||||
|
||||
- name: "Check that kube_service_addresses_ipv6 is a network range"
|
||||
assert:
|
||||
that:
|
||||
- kube_service_addresses_ipv6 | ansible.utils.ipaddr('net')
|
||||
msg: "kube_service_addresses_ipv6 = '{{ kube_service_addresses_ipv6 }}' is not a valid network range"
|
||||
run_once: true
|
||||
when: ipv6_stack | bool
|
||||
|
||||
- name: "Check that kube_pods_subnet_ipv6 is a network range"
|
||||
assert:
|
||||
that:
|
||||
- kube_pods_subnet_ipv6 | ansible.utils.ipaddr('net')
|
||||
msg: "kube_pods_subnet_ipv6 = '{{ kube_pods_subnet_ipv6 }}' is not a valid network range"
|
||||
run_once: true
|
||||
when: ipv6_stack | bool
|
||||
|
||||
- name: "Check that kube_pods_subnet_ipv6 does not collide with kube_service_addresses_ipv6"
|
||||
assert:
|
||||
that:
|
||||
- kube_pods_subnet_ipv6 | ansible.utils.ipaddr(kube_service_addresses_ipv6) | string == 'None'
|
||||
msg: "kube_pods_subnet_ipv6 cannot be the same network segment as kube_service_addresses_ipv6"
|
||||
run_once: true
|
||||
when: ipv6_stack | bool
|
||||
|
||||
- name: "Check that ipv6 IP range is enough for the nodes"
|
||||
assert:
|
||||
that:
|
||||
- 2 ** (kube_network_node_prefix_ipv6 - kube_pods_subnet_ipv6 | ansible.utils.ipaddr('prefix')) >= groups['k8s_cluster'] | length
|
||||
msg: "Not enough ipv6 IPs are available for the desired node count."
|
||||
when:
|
||||
- ipv6_stack | bool
|
||||
- kube_network_plugin != 'calico'
|
||||
msg: "Not enough IPs are available for the desired node count."
|
||||
when: kube_network_plugin != 'calico'
|
||||
run_once: true
|
||||
|
||||
- name: Stop if unsupported options selected
|
||||
|
||||
@@ -76,7 +76,6 @@
|
||||
value: "1"
|
||||
state: present
|
||||
reload: true
|
||||
when: ipv4_stack | bool
|
||||
|
||||
- name: Enable ipv6 forwarding
|
||||
ansible.posix.sysctl:
|
||||
@@ -85,7 +84,7 @@
|
||||
value: "1"
|
||||
state: present
|
||||
reload: true
|
||||
when: ipv6_stack | bool
|
||||
when: enable_dual_stack_networks | bool
|
||||
|
||||
- name: Check if we need to set fs.may_detach_mounts
|
||||
stat:
|
||||
|
||||
@@ -2,10 +2,11 @@
|
||||
- name: Hosts | create hosts list from inventory
|
||||
set_fact:
|
||||
etc_hosts_inventory_block: |-
|
||||
{% for item in (groups['k8s_cluster'] + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique %}
|
||||
{{ hostvars[item]['main_access_ip'] }} {{ hostvars[item]['ansible_hostname'] | default(item) }}.{{ dns_domain }} {{ hostvars[item]['ansible_hostname'] | default(item) }}
|
||||
{% if ipv4_stack and ipv6_stack %}
|
||||
{{ hostvars[item]['access_ip6'] | default(hostvars[item]['ip6'] | default(hostvars[item]['ansible_default_ipv6']['address'])) }} {{ hostvars[item]['ansible_hostname'] | default(item) }}.{{ dns_domain }} {{ hostvars[item]['ansible_hostname'] | default(item) }}
|
||||
{% for item in (groups['k8s_cluster'] + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique -%}
|
||||
{% if 'access_ip' in hostvars[item] or 'ip' in hostvars[item] or 'ansible_default_ipv4' in hostvars[item] -%}
|
||||
{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}
|
||||
{%- if ('ansible_hostname' in hostvars[item] and item != hostvars[item]['ansible_hostname']) %} {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }} {{ hostvars[item]['ansible_hostname'] }} {% else %} {{ item }}.{{ dns_domain }} {{ item }} {% endif %}
|
||||
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
delegate_to: localhost
|
||||
|
||||
@@ -4,11 +4,7 @@
|
||||
# 1 is the 2nd item of a tuple in items()
|
||||
block: |-
|
||||
{% for key, val in dhclient_supersede.items() | rejectattr(1, '==', []) -%}
|
||||
{% if key == "domain-name-servers" -%}
|
||||
supersede {{ key }} {{ val | join(',') }};
|
||||
{% else -%}
|
||||
supersede {{ key }} "{{ val | join('","') }}";
|
||||
{% endif -%}
|
||||
{% endfor %}
|
||||
path: "{{ dhclientconffile }}"
|
||||
create: true
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -18,21 +18,17 @@ kubelet_fail_swap_on: true
|
||||
kubelet_swap_behavior: LimitedSwap
|
||||
|
||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||
kube_version: "{{ (kubelet_checksums['amd64'] | dict2items)[0].key }}"
|
||||
kube_version: v1.32.0
|
||||
|
||||
## The minimum version working
|
||||
kube_version_min_required: "{{ (kubelet_checksums['amd64'] | dict2items)[-1].key }}"
|
||||
kube_version_min_required: v1.30.0
|
||||
|
||||
## Kube Proxy mode One of ['iptables', 'ipvs']
|
||||
kube_proxy_mode: ipvs
|
||||
|
||||
# Kubeadm config api version
|
||||
# If kube_version is v1.31 or higher, it will be v1beta4, otherwise it will be v1beta3.
|
||||
kubeadm_config_api_version: "{{ 'v1beta4' if kube_version is version('1.31.0', '>=') else 'v1beta3' }}"
|
||||
|
||||
# Debugging option for the kubeadm config validate command
|
||||
# Set to false only for development and testing scenarios where validation is expected to fail (pre-release Kubernetes versions, etc.)
|
||||
kubeadm_config_validate_enabled: true
|
||||
kubeadm_config_api_version: "{{ 'v1beta4' if kube_version is version('v1.31.0', '>=') else 'v1beta3' }}"
|
||||
|
||||
## The timeout for init first control-plane
|
||||
kubeadm_init_timeout: 300s
|
||||
@@ -139,8 +135,8 @@ resolvconf_mode: host_resolvconf
|
||||
# Deploy netchecker app to verify DNS resolve as an HTTP service
|
||||
deploy_netchecker: false
|
||||
# Ip address of the kubernetes DNS service (called skydns for historical reasons)
|
||||
skydns_server: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(3) | ansible.utils.ipaddr('address') }}"
|
||||
skydns_server_secondary: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(4) | ansible.utils.ipaddr('address') }}"
|
||||
skydns_server: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(3) | ansible.utils.ipaddr('address') }}"
|
||||
skydns_server_secondary: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(4) | ansible.utils.ipaddr('address') }}"
|
||||
dns_domain: "{{ cluster_name }}"
|
||||
docker_dns_search_domains:
|
||||
- 'default.svc.{{ dns_domain }}'
|
||||
@@ -234,39 +230,33 @@ kube_pods_subnet: 10.233.64.0/18
|
||||
kube_network_node_prefix: 24
|
||||
|
||||
# Configure Dual Stack networking (i.e. both IPv4 and IPv6)
|
||||
# enable_dual_stack_networks: false # deprecated
|
||||
|
||||
# Configure IPv4 Stack networking
|
||||
ipv4_stack: true
|
||||
# Configure IPv6 Stack networking
|
||||
ipv6_stack: "{{ enable_dual_stack_networks | default(false) }}"
|
||||
enable_dual_stack_networks: false
|
||||
|
||||
# Kubernetes internal network for IPv6 services, unused block of space.
|
||||
# This is only used if ipv6_stack is set to true
|
||||
# This is only used if enable_dual_stack_networks is set to true
|
||||
# This provides 4096 IPv6 IPs
|
||||
kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116
|
||||
|
||||
# Internal network. When used, it will assign IPv6 addresses from this range to individual pods.
|
||||
# This network must not already be in your network infrastructure!
|
||||
# This is only used if ipv6_stack is set to true.
|
||||
# This is only used if enable_dual_stack_networks is set to true.
|
||||
# This provides room for 256 nodes with 254 pods per node.
|
||||
kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112
|
||||
|
||||
# IPv6 subnet size allocated to each for pods.
|
||||
# This is only used if ipv6_stack is set to true
|
||||
# This is only used if enable_dual_stack_networks is set to true
|
||||
# This provides room for 254 pods per node.
|
||||
kube_network_node_prefix_ipv6: 120
|
||||
|
||||
|
||||
# The virtual cluster IP, real host IPs and ports the API Server will be
|
||||
# listening on.
|
||||
# NOTE: loadbalancer_apiserver_localhost somewhat alters the final API enpdoint
|
||||
# access IP value (automatically evaluated below)
|
||||
kube_apiserver_ip: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}"
|
||||
kube_apiserver_ip: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}"
|
||||
|
||||
# NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost
|
||||
# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on control plane nodes on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too.
|
||||
kube_apiserver_bind_address: "::"
|
||||
kube_apiserver_bind_address: 0.0.0.0
|
||||
|
||||
# https
|
||||
kube_apiserver_port: 6443
|
||||
@@ -506,7 +496,6 @@ authorization_modes: ['Node', 'RBAC']
|
||||
## Examples: https://kubernetes.io/blog/2024/04/26/multi-webhook-and-modular-authorization-made-much-easier/
|
||||
## KEP: https://github.com/kubernetes/enhancements/tree/master/keps/sig-auth/3221-structured-authorization-configuration
|
||||
kube_apiserver_use_authorization_config_file: false
|
||||
kube_apiserver_authorization_config_api_version: "{{ 'v1alpha1' if kube_version is version('1.30.0', '<') else 'v1beta1' if kube_version is version('1.32.0', '<') else 'v1' }}"
|
||||
kube_apiserver_authorization_config_authorizers:
|
||||
- type: Node
|
||||
name: node
|
||||
@@ -619,9 +608,9 @@ ssl_ca_dirs: |-
|
||||
|
||||
# Vars for pointing to kubernetes api endpoints
|
||||
kube_apiserver_count: "{{ groups['kube_control_plane'] | length }}"
|
||||
kube_apiserver_address: "{{ hostvars[inventory_hostname]['main_ip'] }}"
|
||||
kube_apiserver_access_address: "{{ hostvars[inventory_hostname]['main_access_ip'] }}"
|
||||
first_kube_control_plane_address: "{{ hostvars[groups['kube_control_plane'][0]]['main_access_ip'] }}"
|
||||
kube_apiserver_address: "{{ ip | default(hostvars[inventory_hostname]['fallback_ip']) }}"
|
||||
kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}"
|
||||
first_kube_control_plane_address: "{{ hostvars[groups['kube_control_plane'][0]]['access_ip'] | default(hostvars[groups['kube_control_plane'][0]]['ip'] | default(hostvars[groups['kube_control_plane'][0]]['fallback_ip'])) }}"
|
||||
loadbalancer_apiserver_localhost: "{{ loadbalancer_apiserver is not defined }}"
|
||||
loadbalancer_apiserver_type: "nginx"
|
||||
# applied if only external loadbalancer_apiserver is defined, otherwise ignored
|
||||
@@ -632,7 +621,7 @@ kube_apiserver_global_endpoint: |-
|
||||
{%- elif loadbalancer_apiserver_localhost and (loadbalancer_apiserver_port is not defined or loadbalancer_apiserver_port == kube_apiserver_port) -%}
|
||||
https://localhost:{{ kube_apiserver_port }}
|
||||
{%- else -%}
|
||||
https://{{ first_kube_control_plane_address | ansible.utils.ipwrap }}:{{ kube_apiserver_port }}
|
||||
https://{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}
|
||||
{%- endif %}
|
||||
kube_apiserver_endpoint: |-
|
||||
{% if loadbalancer_apiserver is defined -%}
|
||||
@@ -640,9 +629,9 @@ kube_apiserver_endpoint: |-
|
||||
{%- elif ('kube_control_plane' not in group_names) and loadbalancer_apiserver_localhost -%}
|
||||
https://localhost:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }}
|
||||
{%- elif 'kube_control_plane' in group_names -%}
|
||||
https://{{ kube_apiserver_bind_address | regex_replace('::', '127.0.0.1') | ansible.utils.ipwrap }}:{{ kube_apiserver_port }}
|
||||
https://{{ kube_apiserver_bind_address | regex_replace('0\.0\.0\.0', '127.0.0.1') }}:{{ kube_apiserver_port }}
|
||||
{%- else -%}
|
||||
https://{{ first_kube_control_plane_address | ansible.utils.ipwrap }}:{{ kube_apiserver_port }}
|
||||
https://{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}
|
||||
{%- endif %}
|
||||
kube_apiserver_client_cert: "{{ kube_cert_dir }}/ca.crt"
|
||||
kube_apiserver_client_key: "{{ kube_cert_dir }}/ca.key"
|
||||
@@ -654,46 +643,46 @@ etcd_events_cluster_enabled: false
|
||||
etcd_hosts: "{{ groups['etcd'] | default(groups['kube_control_plane']) }}"
|
||||
|
||||
# Vars for pointing to etcd endpoints
|
||||
etcd_address: "{{ hostvars[inventory_hostname]['main_ip'] }}"
|
||||
etcd_access_address: "{{ hostvars[inventory_hostname]['main_access_ip'] }}"
|
||||
etcd_events_access_address: "{{ hostvars[inventory_hostname]['main_access_ip'] }}"
|
||||
etcd_peer_url: "https://{{ etcd_access_address | ansible.utils.ipwrap }}:2380"
|
||||
etcd_client_url: "https://{{ etcd_access_address | ansible.utils.ipwrap }}:2379"
|
||||
etcd_events_peer_url: "https://{{ etcd_events_access_address | ansible.utils.ipwrap }}:2382"
|
||||
etcd_events_client_url: "https://{{ etcd_events_access_address | ansible.utils.ipwrap }}:2383"
|
||||
etcd_address: "{{ ip | default(fallback_ip) }}"
|
||||
etcd_access_address: "{{ access_ip | default(etcd_address) }}"
|
||||
etcd_events_access_address: "{{ access_ip | default(etcd_address) }}"
|
||||
etcd_peer_url: "https://{{ etcd_access_address }}:2380"
|
||||
etcd_client_url: "https://{{ etcd_access_address }}:2379"
|
||||
etcd_events_peer_url: "https://{{ etcd_events_access_address }}:2382"
|
||||
etcd_events_client_url: "https://{{ etcd_events_access_address }}:2383"
|
||||
etcd_access_addresses: |-
|
||||
{% for item in etcd_hosts -%}
|
||||
https://{{ hostvars[item]['main_access_ip'] | ansible.utils.ipwrap }}:2379{% if not loop.last %},{% endif %}
|
||||
https://{{ hostvars[item]['etcd_access_address'] | default(hostvars[item]['ip'] | default(hostvars[item]['fallback_ip'])) }}:2379{% if not loop.last %},{% endif %}
|
||||
{%- endfor %}
|
||||
etcd_events_access_addresses_list: |-
|
||||
[
|
||||
{% for item in etcd_hosts -%}
|
||||
'https://{{ hostvars[item].main_access_ip | ansible.utils.ipwrap }}:2383'{% if not loop.last %},{% endif %}
|
||||
'https://{{ hostvars[item]['etcd_events_access_address'] | default(hostvars[item]['ip'] | default(hostvars[item]['fallback_ip'])) }}:2383'{% if not loop.last %},{% endif %}
|
||||
{%- endfor %}
|
||||
]
|
||||
etcd_metrics_addresses: |-
|
||||
{% for item in etcd_hosts -%}
|
||||
https://{{ hostvars[item]['main_access_ip'] | ansible.utils.ipwrap }}:{{ etcd_metrics_port | default(2381) }}{% if not loop.last %},{% endif %}
|
||||
https://{{ hostvars[item]['etcd_access_address'] | default(hostvars[item]['ip'] | default(hostvars[item]['fallback_ip'])) }}:{{ etcd_metrics_port | default(2381) }}{% if not loop.last %},{% endif %}
|
||||
{%- endfor %}
|
||||
etcd_events_access_addresses: "{{ etcd_events_access_addresses_list | join(',') }}"
|
||||
etcd_events_access_addresses_semicolon: "{{ etcd_events_access_addresses_list | join(';') }}"
|
||||
# user should set etcd_member_name in inventory/mycluster/hosts.ini
|
||||
etcd_member_name: |-
|
||||
{% for host in groups['etcd'] %}
|
||||
{% if inventory_hostname == host %}{{ hostvars[host].etcd_member_name | default("etcd" + loop.index | string) }}{% endif %}
|
||||
{% if inventory_hostname == host %}{{ hostvars[host].etcd_member_name | default("etcd" + loop.index | string) }}{% endif %}
|
||||
{% endfor %}
|
||||
etcd_peer_addresses: |-
|
||||
{% for item in groups['etcd'] -%}
|
||||
{{ hostvars[item].etcd_member_name | default("etcd" + loop.index | string) }}=https://{{ hostvars[item]['main_access_ip'] | ansible.utils.ipwrap }}:2380{% if not loop.last %},{% endif %}
|
||||
{{ hostvars[item].etcd_member_name | default("etcd" + loop.index | string) }}=https://{{ hostvars[item].etcd_access_address | default(hostvars[item].ip | default(hostvars[item]['fallback_ip'])) }}:2380{% if not loop.last %},{% endif %}
|
||||
{%- endfor %}
|
||||
etcd_events_peer_addresses: |-
|
||||
{% for item in groups['etcd'] -%}
|
||||
{{ hostvars[item].etcd_member_name | default("etcd" + loop.index | string) }}-events=https://{{ hostvars[item]['main_access_ip'] | ansible.utils.ipwrap }}:2382{% if not loop.last %},{% endif %}
|
||||
{{ hostvars[item].etcd_member_name | default("etcd" + loop.index | string) }}-events=https://{{ hostvars[item].etcd_events_access_address | default(hostvars[item].ip | default(hostvars[item]['fallback_ip'])) }}:2382{% if not loop.last %},{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
etcd_heartbeat_interval: "250"
|
||||
etcd_election_timeout: "5000"
|
||||
etcd_snapshot_count: "100000"
|
||||
etcd_snapshot_count: "10000"
|
||||
|
||||
certificates_key_size: 2048
|
||||
certificates_duration: 36500
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user