mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2026-03-22 16:18:46 +03:00
Compare commits
124 Commits
e89d509c55
...
component_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f4a14af2c9 | ||
|
|
2f2e0c6163 | ||
|
|
ae8c2a44ac | ||
|
|
78e3f64527 | ||
|
|
166bf4e329 | ||
|
|
c8b1d271a3 | ||
|
|
4d45cb0f74 | ||
|
|
7b0e730624 | ||
|
|
a0a164375d | ||
|
|
32d990d1e4 | ||
|
|
b6aa645f5e | ||
|
|
e3737592f5 | ||
|
|
22fb8f8c98 | ||
|
|
bf18b142d0 | ||
|
|
558764dac6 | ||
|
|
37e321c8bf | ||
|
|
f4ccdb5e72 | ||
|
|
fcecaf6943 | ||
|
|
37f7a86014 | ||
|
|
fff7f10a85 | ||
|
|
dc09298f7e | ||
|
|
680db0c921 | ||
|
|
9977d4dc10 | ||
|
|
1b6129566b | ||
|
|
c3404c3685 | ||
|
|
fba8708486 | ||
|
|
8dacb9cd16 | ||
|
|
df3f0a2341 | ||
|
|
62e90b3122 | ||
|
|
6b5cc5bdfb | ||
|
|
a277cfdee7 | ||
|
|
bc5528f585 | ||
|
|
2740c13c0c | ||
|
|
52b68bccad | ||
|
|
82c4c0afdf | ||
|
|
63a43cf6db | ||
|
|
666a3a9500 | ||
|
|
28f9c126bf | ||
|
|
d41b629be3 | ||
|
|
851abbc2e3 | ||
|
|
17c72367bc | ||
|
|
d91c7d7576 | ||
|
|
14b20ad2a2 | ||
|
|
72cb1356ef | ||
|
|
51304d57e2 | ||
|
|
a0d7bef90e | ||
|
|
a1ec88e290 | ||
|
|
c9ff62944e | ||
|
|
20ab9179af | ||
|
|
5be35c811a | ||
|
|
ad522d4aab | ||
|
|
9c511069cc | ||
|
|
ed270fcab4 | ||
|
|
0615929727 | ||
|
|
48c25d9ebf | ||
|
|
0bffcacbe7 | ||
|
|
c857252225 | ||
|
|
a0f00761ac | ||
|
|
3a3e5d6954 | ||
|
|
2d6e508084 | ||
|
|
6d850a0dc5 | ||
|
|
6a517e165e | ||
|
|
aaaf82f308 | ||
|
|
e80087df93 | ||
|
|
b7491b957b | ||
|
|
5cf8f3eefc | ||
|
|
1cbccf40a5 | ||
|
|
bcdd702e19 | ||
|
|
20693afe82 | ||
|
|
1bbcfd8dd6 | ||
|
|
8d948f918f | ||
|
|
4d8d1b8aff | ||
|
|
d80318301d | ||
|
|
31cce09fbc | ||
|
|
9a90c9d6c8 | ||
|
|
b9e1e8577f | ||
|
|
5d1dd83b07 | ||
|
|
b203586d6b | ||
|
|
88df61357b | ||
|
|
2edf176294 | ||
|
|
39744146b4 | ||
|
|
118b2dce02 | ||
|
|
4c5eda9f1e | ||
|
|
2512e0c50c | ||
|
|
633d39448e | ||
|
|
4d87ac1032 | ||
|
|
2342d0cd57 | ||
|
|
e6a5266bad | ||
|
|
57f7c44718 | ||
|
|
5789dc839c | ||
|
|
3de6fa7220 | ||
|
|
9a9e8814e6 | ||
|
|
87a4f61d76 | ||
|
|
9975b5d525 | ||
|
|
9d06ce1a8d | ||
|
|
bce107ce3d | ||
|
|
7d7a42d931 | ||
|
|
5183679a89 | ||
|
|
b4fe577203 | ||
|
|
bde51ebddf | ||
|
|
381426d6d5 | ||
|
|
b3ee6d6b75 | ||
|
|
7436d63faa | ||
|
|
6138c6a1a2 | ||
|
|
6115eba3c3 | ||
|
|
1c008d79b1 | ||
|
|
b4bbec6772 | ||
|
|
5c6ee4852a | ||
|
|
8190f952c1 | ||
|
|
3edc3d7a36 | ||
|
|
2f3f1d7e65 | ||
|
|
71c69ec12c | ||
|
|
dab0947150 | ||
|
|
5488e7d805 | ||
|
|
ca9873cfcb | ||
|
|
65f33c3ef0 | ||
|
|
5eccf9ea6c | ||
|
|
db599b3475 | ||
|
|
47140083dc | ||
|
|
2d179879a0 | ||
|
|
61b8e4ce84 | ||
|
|
97a3776d8e | ||
|
|
990695de7b | ||
|
|
4059c699dc |
@@ -1,5 +1,4 @@
|
||||
---
|
||||
parseable: true
|
||||
skip_list:
|
||||
# see https://docs.ansible.com/ansible-lint/rules/default_rules.html for a list of all default rules
|
||||
|
||||
@@ -34,6 +33,8 @@ skip_list:
|
||||
# Disable run-once check with free strategy
|
||||
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
||||
- 'run-once[task]'
|
||||
|
||||
- 'jinja[spacing]'
|
||||
exclude_paths:
|
||||
# Generated files
|
||||
- tests/files/custom_cni/cilium.yaml
|
||||
|
||||
6
.github/workflows/auto-label-os.yml
vendored
6
.github/workflows/auto-label-os.yml
vendored
@@ -13,16 +13,16 @@ jobs:
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd
|
||||
|
||||
- name: Parse issue form
|
||||
uses: stefanbuck/github-issue-parser@2ea9b35a8c584529ed00891a8f7e41dc46d0441e
|
||||
uses: stefanbuck/github-issue-parser@10dcc54158ba4c137713d9d69d70a2da63b6bda3
|
||||
id: issue-parser
|
||||
with:
|
||||
template-path: .github/ISSUE_TEMPLATE/bug-report.yaml
|
||||
|
||||
- name: Set labels based on OS field
|
||||
uses: redhat-plumbers-in-action/advanced-issue-labeler@e38e6809c5420d038eed380d49ee9a6ca7c92dbf
|
||||
uses: redhat-plumbers-in-action/advanced-issue-labeler@b80ae64e3e156e9c111b075bfa04b295d54e8e2e
|
||||
with:
|
||||
issue-form: ${{ steps.issue-parser.outputs.jsonString }}
|
||||
section: os
|
||||
|
||||
@@ -13,14 +13,14 @@ jobs:
|
||||
outputs:
|
||||
branches: ${{ steps.get-branches.outputs.data }}
|
||||
steps:
|
||||
- uses: octokit/graphql-action@8ad880e4d437783ea2ab17010324de1075228110
|
||||
- uses: octokit/graphql-action@ddde8ebb2493e79f390e6449c725c21663a67505
|
||||
id: get-branches
|
||||
with:
|
||||
query: |
|
||||
query get_release_branches($owner:String!, $name:String!) {
|
||||
repository(owner:$owner, name:$name) {
|
||||
refs(refPrefix: "refs/heads/",
|
||||
first: 1, # TODO increment once we have release branch with the new checksums format
|
||||
first: 2, # TODO increment once we have release branch with the new checksums format
|
||||
query: "release-",
|
||||
orderBy: {
|
||||
field: ALPHABETICAL,
|
||||
|
||||
6
.github/workflows/upgrade-patch-versions.yml
vendored
6
.github/workflows/upgrade-patch-versions.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
update-patch-versions:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd
|
||||
with:
|
||||
ref: ${{ inputs.branch }}
|
||||
- uses: actions/setup-python@v6
|
||||
@@ -22,14 +22,14 @@ jobs:
|
||||
- run: update-hashes
|
||||
env:
|
||||
API_KEY: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions/cache@v4
|
||||
- uses: actions/cache@v5
|
||||
with:
|
||||
key: pre-commit-hook-propagate
|
||||
path: |
|
||||
~/.cache/pre-commit
|
||||
- run: pre-commit run --all-files propagate-ansible-variables
|
||||
continue-on-error: true
|
||||
- uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e
|
||||
- uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0
|
||||
with:
|
||||
commit-message: Patch versions updates
|
||||
title: Patch versions updates - ${{ inputs.branch }}
|
||||
|
||||
@@ -24,7 +24,7 @@ variables:
|
||||
ANSIBLE_REMOTE_USER: kubespray
|
||||
ANSIBLE_PRIVATE_KEY_FILE: /tmp/id_rsa
|
||||
ANSIBLE_INVENTORY: /tmp/inventory
|
||||
ANSIBLE_STDOUT_CALLBACK: "debug"
|
||||
ANSIBLE_STDOUT_CALLBACK: "default"
|
||||
RESET_CHECK: "false"
|
||||
REMOVE_NODE_CHECK: "false"
|
||||
UPGRADE_TEST: "false"
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
interruptible: true
|
||||
script:
|
||||
- ansible-playbook tests/cloud_playbooks/create-kubevirt.yml
|
||||
-c local -e @"tests/files/${TESTCASE}.yml"
|
||||
-e @"tests/files/${TESTCASE}.yml"
|
||||
- ./tests/scripts/testcases_run.sh
|
||||
variables:
|
||||
ANSIBLE_TIMEOUT: "120"
|
||||
@@ -43,6 +43,7 @@ pr:
|
||||
- fedora39-kube-router
|
||||
- openeuler24-calico
|
||||
- rockylinux9-cilium
|
||||
- rockylinux10-cilium
|
||||
- ubuntu22-calico-all-in-one
|
||||
- ubuntu22-calico-all-in-one-upgrade
|
||||
- ubuntu24-calico-etcd-datastore
|
||||
@@ -127,6 +128,7 @@ pr_extended:
|
||||
- debian12-docker
|
||||
- debian13-calico
|
||||
- rockylinux9-calico
|
||||
- rockylinux10-calico
|
||||
- ubuntu22-all-in-one-docker
|
||||
- ubuntu24-all-in-one-docker
|
||||
- ubuntu24-calico-all-in-one
|
||||
|
||||
@@ -37,7 +37,6 @@ terraform_validate:
|
||||
- hetzner
|
||||
- vsphere
|
||||
- upcloud
|
||||
- nifcloud
|
||||
|
||||
.terraform_apply:
|
||||
extends: .terraform_install
|
||||
@@ -89,11 +88,10 @@ tf-elastx_cleanup:
|
||||
- ./scripts/openstack-cleanup/main.py
|
||||
allow_failure: true
|
||||
|
||||
tf-elastx_ubuntu20-calico:
|
||||
tf-elastx_ubuntu24-calico:
|
||||
extends: .terraform_apply
|
||||
stage: deploy-part1
|
||||
when: on_success
|
||||
allow_failure: true
|
||||
variables:
|
||||
<<: *elastx_variables
|
||||
PROVIDER: openstack
|
||||
@@ -116,5 +114,5 @@ tf-elastx_ubuntu20-calico:
|
||||
TF_VAR_az_list_node: '["sto1"]'
|
||||
TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||
TF_VAR_image: ubuntu-20.04-server-latest
|
||||
TF_VAR_image: ubuntu-24.04-server-latest
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
||||
@@ -36,7 +36,7 @@ vagrant:
|
||||
policy: pull-push # TODO: change to "pull" when not on main
|
||||
stage: deploy-extended
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
|
||||
- if: $PR_LABELS =~ /.*ci-full.*/
|
||||
when: on_success
|
||||
- if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci"
|
||||
when: on_success
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
rev: v6.0.0
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
- id: check-case-conflict
|
||||
@@ -15,13 +15,13 @@ repos:
|
||||
- id: trailing-whitespace
|
||||
|
||||
- repo: https://github.com/adrienverge/yamllint.git
|
||||
rev: v1.35.1
|
||||
rev: v1.37.1
|
||||
hooks:
|
||||
- id: yamllint
|
||||
args: [--strict]
|
||||
|
||||
- repo: https://github.com/shellcheck-py/shellcheck-py
|
||||
rev: v0.10.0.1
|
||||
rev: v0.11.0.1
|
||||
hooks:
|
||||
- id: shellcheck
|
||||
args: ["--severity=error"]
|
||||
@@ -29,7 +29,7 @@ repos:
|
||||
files: "\\.sh$"
|
||||
|
||||
- repo: https://github.com/ansible/ansible-lint
|
||||
rev: v25.1.1
|
||||
rev: v25.11.0
|
||||
hooks:
|
||||
- id: ansible-lint
|
||||
additional_dependencies:
|
||||
@@ -38,7 +38,7 @@ repos:
|
||||
- distlib
|
||||
|
||||
- repo: https://github.com/golangci/misspell
|
||||
rev: v0.6.0
|
||||
rev: v0.7.0
|
||||
hooks:
|
||||
- id: misspell
|
||||
exclude: "OWNERS_ALIASES$"
|
||||
|
||||
@@ -35,8 +35,8 @@ RUN --mount=type=bind,source=requirements.txt,target=requirements.txt \
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
RUN OS_ARCHITECTURE=$(dpkg --print-architecture) \
|
||||
&& curl -L "https://dl.k8s.io/release/v1.34.1/bin/linux/${OS_ARCHITECTURE}/kubectl" -o /usr/local/bin/kubectl \
|
||||
&& echo "$(curl -L "https://dl.k8s.io/release/v1.34.1/bin/linux/${OS_ARCHITECTURE}/kubectl.sha256")" /usr/local/bin/kubectl | sha256sum --check \
|
||||
&& curl -L "https://dl.k8s.io/release/v1.34.5/bin/linux/${OS_ARCHITECTURE}/kubectl" -o /usr/local/bin/kubectl \
|
||||
&& echo "$(curl -L "https://dl.k8s.io/release/v1.34.5/bin/linux/${OS_ARCHITECTURE}/kubectl.sha256")" /usr/local/bin/kubectl | sha256sum --check \
|
||||
&& chmod a+x /usr/local/bin/kubectl
|
||||
|
||||
COPY *.yml ./
|
||||
|
||||
24
README.md
24
README.md
@@ -22,7 +22,7 @@ Ensure you have installed Docker then
|
||||
```ShellSession
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.29.0 bash
|
||||
quay.io/kubespray/kubespray:v2.30.0 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
@@ -89,13 +89,13 @@ vagrant up
|
||||
- **Flatcar Container Linux by Kinvolk**
|
||||
- **Debian** Bookworm, Bullseye, Trixie
|
||||
- **Ubuntu** 22.04, 24.04
|
||||
- **CentOS/RHEL** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **CentOS Stream / RHEL** [9, 10](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **Fedora** 39, 40
|
||||
- **Fedora CoreOS** (see [fcos Note](docs/operating_systems/fcos.md))
|
||||
- **openSUSE** Leap 15.x/Tumbleweed
|
||||
- **Oracle Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **Alma Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **Rocky Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **Oracle Linux** [9, 10](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **Alma Linux** [9, 10](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **Rocky Linux** [9, 10](docs/operating_systems/rhel.md#rhel-8) (experimental in 10: see [Rocky Linux 10 notes](docs/operating_systems/rhel.md#rocky-linux-10))
|
||||
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/operating_systems/kylinlinux.md))
|
||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/operating_systems/amazonlinux.md))
|
||||
- **UOS Linux** (experimental: see [uos linux notes](docs/operating_systems/uoslinux.md))
|
||||
@@ -111,20 +111,20 @@ Note:
|
||||
<!-- BEGIN ANSIBLE MANAGED BLOCK -->
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) 1.34.1
|
||||
- [etcd](https://github.com/etcd-io/etcd) 3.5.23
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) 1.34.5
|
||||
- [etcd](https://github.com/etcd-io/etcd) 3.5.27
|
||||
- [docker](https://www.docker.com/) 28.3
|
||||
- [containerd](https://containerd.io/) 2.1.4
|
||||
- [cri-o](http://cri-o.io/) 1.34.1 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- [containerd](https://containerd.io/) 2.2.2
|
||||
- [cri-o](http://cri-o.io/) 1.34.6 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) 1.8.0
|
||||
- [calico](https://github.com/projectcalico/calico) 3.30.3
|
||||
- [cilium](https://github.com/cilium/cilium) 1.18.3
|
||||
- [calico](https://github.com/projectcalico/calico) 3.30.6
|
||||
- [cilium](https://github.com/cilium/cilium) 1.18.6
|
||||
- [flannel](https://github.com/flannel-io/flannel) 0.27.3
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) 1.12.21
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) 2.1.1
|
||||
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) 4.2.2
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) 0.8.0
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) 1.0.3
|
||||
- Application
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) 1.15.3
|
||||
- [coredns](https://github.com/coredns/coredns) 1.12.1
|
||||
|
||||
@@ -15,7 +15,7 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
|
||||
1. The release issue is closed
|
||||
1. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
1. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||
1. Create/Update Issue for upgradeing kubernetes and [k8s-conformance](https://github.com/cncf/k8s-conformance)
|
||||
1. Create/Update Issue for upgrading kubernetes and [k8s-conformance](https://github.com/cncf/k8s-conformance)
|
||||
|
||||
## Major/minor releases and milestones
|
||||
|
||||
|
||||
9
contrib/collection.sh
Executable file
9
contrib/collection.sh
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash -eux
|
||||
# Install collection from source assuming dependencies are present.
|
||||
# Run in SemaphoreUI this bash script can install Kubespray from the repo
|
||||
NAMESPACE=kubernetes_sigs
|
||||
COLLECTION=kubespray
|
||||
MY_VER=$(grep '^version:' galaxy.yml|cut -d: -f2|sed 's/ //')
|
||||
|
||||
ansible-galaxy collection build --force --output-path .
|
||||
ansible-galaxy collection install --offline --force $NAMESPACE-$COLLECTION-$MY_VER.tar.gz
|
||||
@@ -20,7 +20,6 @@ function create_container_image_tar() {
|
||||
|
||||
kubectl describe cronjobs,jobs,pods --all-namespaces | grep " Image:" | awk '{print $2}' | sort | uniq > "${IMAGES}"
|
||||
# NOTE: etcd and pause cannot be seen as pods.
|
||||
# The pause image is used for --pod-infra-container-image option of kubelet.
|
||||
kubectl cluster-info dump | grep -E "quay.io/coreos/etcd:|registry.k8s.io/pause:" | sed s@\"@@g >> "${IMAGES}"
|
||||
else
|
||||
echo "Getting images from file \"${IMAGES_FROM_FILE}\""
|
||||
|
||||
5
contrib/terraform/nifcloud/.gitignore
vendored
5
contrib/terraform/nifcloud/.gitignore
vendored
@@ -1,5 +0,0 @@
|
||||
*.tfstate*
|
||||
.terraform.lock.hcl
|
||||
.terraform
|
||||
|
||||
sample-inventory/inventory.ini
|
||||
@@ -1,138 +0,0 @@
|
||||
# Kubernetes on NIFCLOUD with Terraform
|
||||
|
||||
Provision a Kubernetes cluster on [NIFCLOUD](https://pfs.nifcloud.com/) using Terraform and Kubespray
|
||||
|
||||
## Overview
|
||||
|
||||
The setup looks like following
|
||||
|
||||
```text
|
||||
Kubernetes cluster
|
||||
+----------------------------+
|
||||
+---------------+ | +--------------------+ |
|
||||
| | | | +--------------------+ |
|
||||
| API server LB +---------> | | | |
|
||||
| | | | | Control Plane/etcd | |
|
||||
+---------------+ | | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------------+ |
|
||||
| ^ |
|
||||
| | |
|
||||
| v |
|
||||
| +--------------------+ |
|
||||
| | +--------------------+ |
|
||||
| | | | |
|
||||
| | | Worker | |
|
||||
| | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------------+ |
|
||||
+----------------------------+
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
* Terraform 1.3.7
|
||||
|
||||
## Quickstart
|
||||
|
||||
### Export Variables
|
||||
|
||||
* Your NIFCLOUD credentials:
|
||||
|
||||
```bash
|
||||
export NIFCLOUD_ACCESS_KEY_ID=<YOUR ACCESS KEY>
|
||||
export NIFCLOUD_SECRET_ACCESS_KEY=<YOUR SECRET ACCESS KEY>
|
||||
```
|
||||
|
||||
* The SSH KEY used to connect to the instance:
|
||||
* FYI: [Cloud Help(SSH Key)](https://pfs.nifcloud.com/help/ssh.htm)
|
||||
|
||||
```bash
|
||||
export TF_VAR_SSHKEY_NAME=<YOUR SSHKEY NAME>
|
||||
```
|
||||
|
||||
* The IP address to connect to bastion server:
|
||||
|
||||
```bash
|
||||
export TF_VAR_working_instance_ip=$(curl ifconfig.me)
|
||||
```
|
||||
|
||||
### Create The Infrastructure
|
||||
|
||||
* Run terraform:
|
||||
|
||||
```bash
|
||||
terraform init
|
||||
terraform apply -var-file ./sample-inventory/cluster.tfvars
|
||||
```
|
||||
|
||||
### Setup The Kubernetes
|
||||
|
||||
* Generate cluster configuration file:
|
||||
|
||||
```bash
|
||||
./generate-inventory.sh > sample-inventory/inventory.ini
|
||||
```
|
||||
|
||||
* Export Variables:
|
||||
|
||||
```bash
|
||||
BASTION_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.bastion_info | to_entries[].value.public_ip')
|
||||
API_LB_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_lb')
|
||||
CP01_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[0].value.private_ip')
|
||||
export ANSIBLE_SSH_ARGS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ProxyCommand=\"ssh root@${BASTION_IP} -W %h:%p\""
|
||||
```
|
||||
|
||||
* Set ssh-agent"
|
||||
|
||||
```bash
|
||||
eval `ssh-agent`
|
||||
ssh-add <THE PATH TO YOUR SSH KEY>
|
||||
```
|
||||
|
||||
* Run cluster.yml playbook:
|
||||
|
||||
```bash
|
||||
cd ./../../../
|
||||
ansible-playbook -i contrib/terraform/nifcloud/inventory/inventory.ini cluster.yml
|
||||
```
|
||||
|
||||
### Connecting to Kubernetes
|
||||
|
||||
* [Install kubectl](https://kubernetes.io/docs/tasks/tools/) on the localhost
|
||||
* Fetching kubeconfig file:
|
||||
|
||||
```bash
|
||||
mkdir -p ~/.kube
|
||||
scp -o ProxyCommand="ssh root@${BASTION_IP} -W %h:%p" root@${CP01_IP}:/etc/kubernetes/admin.conf ~/.kube/config
|
||||
```
|
||||
|
||||
* Rewrite /etc/hosts
|
||||
|
||||
```bash
|
||||
sudo echo "${API_LB_IP} lb-apiserver.kubernetes.local" >> /etc/hosts
|
||||
```
|
||||
|
||||
* Run kubectl
|
||||
|
||||
```bash
|
||||
kubectl get node
|
||||
```
|
||||
|
||||
## Variables
|
||||
|
||||
* `region`: Region where to run the cluster
|
||||
* `az`: Availability zone where to run the cluster
|
||||
* `private_ip_bn`: Private ip address of bastion server
|
||||
* `private_network_cidr`: Subnet of private network
|
||||
* `instances_cp`: Machine to provision as Control Plane. Key of this object will be used as part of the machine' name
|
||||
* `private_ip`: private ip address of machine
|
||||
* `instances_wk`: Machine to provision as Worker Node. Key of this object will be used as part of the machine' name
|
||||
* `private_ip`: private ip address of machine
|
||||
* `instance_key_name`: The key name of the Key Pair to use for the instance
|
||||
* `instance_type_bn`: The instance type of bastion server
|
||||
* `instance_type_wk`: The instance type of worker node
|
||||
* `instance_type_cp`: The instance type of control plane
|
||||
* `image_name`: OS image used for the instance
|
||||
* `working_instance_ip`: The IP address to connect to bastion server
|
||||
* `accounting_type`: Accounting type. (1: monthly, 2: pay per use)
|
||||
@@ -1,64 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Generates a inventory file based on the terraform output.
|
||||
# After provisioning a cluster, simply run this command and supply the terraform state file
|
||||
# Default state file is terraform.tfstate
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
TF_OUT=$(terraform output -json)
|
||||
|
||||
CONTROL_PLANES=$(jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[]' <(echo "${TF_OUT}"))
|
||||
WORKERS=$(jq -r '.kubernetes_cluster.value.worker_info | to_entries[]' <(echo "${TF_OUT}"))
|
||||
mapfile -t CONTROL_PLANE_NAMES < <(jq -r '.key' <(echo "${CONTROL_PLANES}"))
|
||||
mapfile -t WORKER_NAMES < <(jq -r '.key' <(echo "${WORKERS}"))
|
||||
|
||||
API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}"))
|
||||
|
||||
echo "[all]"
|
||||
# Generate control plane hosts
|
||||
i=1
|
||||
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
||||
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${CONTROL_PLANES}"))
|
||||
echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip} etcd_member_name=etcd${i}"
|
||||
i=$(( i + 1 ))
|
||||
done
|
||||
|
||||
# Generate worker hosts
|
||||
for name in "${WORKER_NAMES[@]}"; do
|
||||
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${WORKERS}"))
|
||||
echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip}"
|
||||
done
|
||||
|
||||
API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}"))
|
||||
|
||||
echo ""
|
||||
echo "[all:vars]"
|
||||
echo "upstream_dns_servers=['8.8.8.8','8.8.4.4']"
|
||||
echo "loadbalancer_apiserver={'address':'${API_LB}','port':'6443'}"
|
||||
|
||||
|
||||
echo ""
|
||||
echo "[kube_control_plane]"
|
||||
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
||||
echo "${name}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[etcd]"
|
||||
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
||||
echo "${name}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[kube_node]"
|
||||
for name in "${WORKER_NAMES[@]}"; do
|
||||
echo "${name}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[k8s_cluster:children]"
|
||||
echo "kube_control_plane"
|
||||
echo "kube_node"
|
||||
@@ -1,36 +0,0 @@
|
||||
provider "nifcloud" {
|
||||
region = var.region
|
||||
}
|
||||
|
||||
module "kubernetes_cluster" {
|
||||
source = "./modules/kubernetes-cluster"
|
||||
|
||||
availability_zone = var.az
|
||||
prefix = "dev"
|
||||
|
||||
private_network_cidr = var.private_network_cidr
|
||||
|
||||
instance_key_name = var.instance_key_name
|
||||
instances_cp = var.instances_cp
|
||||
instances_wk = var.instances_wk
|
||||
image_name = var.image_name
|
||||
|
||||
instance_type_bn = var.instance_type_bn
|
||||
instance_type_cp = var.instance_type_cp
|
||||
instance_type_wk = var.instance_type_wk
|
||||
|
||||
private_ip_bn = var.private_ip_bn
|
||||
|
||||
additional_lb_filter = [var.working_instance_ip]
|
||||
}
|
||||
|
||||
resource "nifcloud_security_group_rule" "ssh_from_bastion" {
|
||||
security_group_names = [
|
||||
module.kubernetes_cluster.security_group_name.bastion
|
||||
]
|
||||
type = "IN"
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "TCP"
|
||||
cidr_ip = var.working_instance_ip
|
||||
}
|
||||
@@ -1,301 +0,0 @@
|
||||
#################################################
|
||||
##
|
||||
## Local variables
|
||||
##
|
||||
locals {
|
||||
# e.g. east-11 is 11
|
||||
az_num = reverse(split("-", var.availability_zone))[0]
|
||||
# e.g. east-11 is e11
|
||||
az_short_name = "${substr(reverse(split("-", var.availability_zone))[1], 0, 1)}${local.az_num}"
|
||||
|
||||
# Port used by the protocol
|
||||
port_ssh = 22
|
||||
port_kubectl = 6443
|
||||
port_kubelet = 10250
|
||||
|
||||
# calico: https://docs.tigera.io/calico/latest/getting-started/kubernetes/requirements#network-requirements
|
||||
port_bgp = 179
|
||||
port_vxlan = 4789
|
||||
port_etcd = 2379
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## General
|
||||
##
|
||||
|
||||
# data
|
||||
data "nifcloud_image" "this" {
|
||||
image_name = var.image_name
|
||||
}
|
||||
|
||||
# private lan
|
||||
resource "nifcloud_private_lan" "this" {
|
||||
private_lan_name = "${var.prefix}lan"
|
||||
availability_zone = var.availability_zone
|
||||
cidr_block = var.private_network_cidr
|
||||
accounting_type = var.accounting_type
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Bastion
|
||||
##
|
||||
resource "nifcloud_security_group" "bn" {
|
||||
group_name = "${var.prefix}bn"
|
||||
description = "${var.prefix} bastion"
|
||||
availability_zone = var.availability_zone
|
||||
}
|
||||
|
||||
resource "nifcloud_instance" "bn" {
|
||||
|
||||
instance_id = "${local.az_short_name}${var.prefix}bn01"
|
||||
security_group = nifcloud_security_group.bn.group_name
|
||||
instance_type = var.instance_type_bn
|
||||
|
||||
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
||||
private_ip_address = var.private_ip_bn
|
||||
ssh_port = local.port_ssh
|
||||
hostname = "${local.az_short_name}${var.prefix}bn01"
|
||||
})
|
||||
|
||||
availability_zone = var.availability_zone
|
||||
accounting_type = var.accounting_type
|
||||
image_id = data.nifcloud_image.this.image_id
|
||||
key_name = var.instance_key_name
|
||||
|
||||
network_interface {
|
||||
network_id = "net-COMMON_GLOBAL"
|
||||
}
|
||||
network_interface {
|
||||
network_id = nifcloud_private_lan.this.network_id
|
||||
ip_address = "static"
|
||||
}
|
||||
|
||||
# The image_id changes when the OS image type is demoted from standard to public.
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
image_id,
|
||||
user_data,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Control Plane
|
||||
##
|
||||
resource "nifcloud_security_group" "cp" {
|
||||
group_name = "${var.prefix}cp"
|
||||
description = "${var.prefix} control plane"
|
||||
availability_zone = var.availability_zone
|
||||
}
|
||||
|
||||
resource "nifcloud_instance" "cp" {
|
||||
for_each = var.instances_cp
|
||||
|
||||
instance_id = "${local.az_short_name}${var.prefix}${each.key}"
|
||||
security_group = nifcloud_security_group.cp.group_name
|
||||
instance_type = var.instance_type_cp
|
||||
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
||||
private_ip_address = each.value.private_ip
|
||||
ssh_port = local.port_ssh
|
||||
hostname = "${local.az_short_name}${var.prefix}${each.key}"
|
||||
})
|
||||
|
||||
availability_zone = var.availability_zone
|
||||
accounting_type = var.accounting_type
|
||||
image_id = data.nifcloud_image.this.image_id
|
||||
key_name = var.instance_key_name
|
||||
|
||||
network_interface {
|
||||
network_id = "net-COMMON_GLOBAL"
|
||||
}
|
||||
network_interface {
|
||||
network_id = nifcloud_private_lan.this.network_id
|
||||
ip_address = "static"
|
||||
}
|
||||
|
||||
# The image_id changes when the OS image type is demoted from standard to public.
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
image_id,
|
||||
user_data,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "nifcloud_load_balancer" "this" {
|
||||
load_balancer_name = "${local.az_short_name}${var.prefix}cp"
|
||||
accounting_type = var.accounting_type
|
||||
balancing_type = 1 // Round-Robin
|
||||
load_balancer_port = local.port_kubectl
|
||||
instance_port = local.port_kubectl
|
||||
instances = [for v in nifcloud_instance.cp : v.instance_id]
|
||||
filter = concat(
|
||||
[for k, v in nifcloud_instance.cp : v.public_ip],
|
||||
[for k, v in nifcloud_instance.wk : v.public_ip],
|
||||
var.additional_lb_filter,
|
||||
)
|
||||
filter_type = 1 // Allow
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Worker
|
||||
##
|
||||
resource "nifcloud_security_group" "wk" {
|
||||
group_name = "${var.prefix}wk"
|
||||
description = "${var.prefix} worker"
|
||||
availability_zone = var.availability_zone
|
||||
}
|
||||
|
||||
resource "nifcloud_instance" "wk" {
|
||||
for_each = var.instances_wk
|
||||
|
||||
instance_id = "${local.az_short_name}${var.prefix}${each.key}"
|
||||
security_group = nifcloud_security_group.wk.group_name
|
||||
instance_type = var.instance_type_wk
|
||||
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
||||
private_ip_address = each.value.private_ip
|
||||
ssh_port = local.port_ssh
|
||||
hostname = "${local.az_short_name}${var.prefix}${each.key}"
|
||||
})
|
||||
|
||||
availability_zone = var.availability_zone
|
||||
accounting_type = var.accounting_type
|
||||
image_id = data.nifcloud_image.this.image_id
|
||||
key_name = var.instance_key_name
|
||||
|
||||
network_interface {
|
||||
network_id = "net-COMMON_GLOBAL"
|
||||
}
|
||||
network_interface {
|
||||
network_id = nifcloud_private_lan.this.network_id
|
||||
ip_address = "static"
|
||||
}
|
||||
|
||||
# The image_id changes when the OS image type is demoted from standard to public.
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
image_id,
|
||||
user_data,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Security Group Rule: Kubernetes
|
||||
##
|
||||
|
||||
# ssh
|
||||
resource "nifcloud_security_group_rule" "ssh_from_bastion" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.wk.group_name,
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_ssh
|
||||
to_port = local.port_ssh
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.bn.group_name
|
||||
}
|
||||
|
||||
# kubectl
|
||||
resource "nifcloud_security_group_rule" "kubectl_from_worker" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_kubectl
|
||||
to_port = local.port_kubectl
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||
}
|
||||
|
||||
# kubelet
|
||||
resource "nifcloud_security_group_rule" "kubelet_from_worker" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_kubelet
|
||||
to_port = local.port_kubelet
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||
}
|
||||
|
||||
resource "nifcloud_security_group_rule" "kubelet_from_control_plane" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.wk.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_kubelet
|
||||
to_port = local.port_kubelet
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.cp.group_name
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Security Group Rule: calico
|
||||
##
|
||||
|
||||
# vslan
|
||||
resource "nifcloud_security_group_rule" "vxlan_from_control_plane" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.wk.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_vxlan
|
||||
to_port = local.port_vxlan
|
||||
protocol = "UDP"
|
||||
source_security_group_name = nifcloud_security_group.cp.group_name
|
||||
}
|
||||
|
||||
resource "nifcloud_security_group_rule" "vxlan_from_worker" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_vxlan
|
||||
to_port = local.port_vxlan
|
||||
protocol = "UDP"
|
||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||
}
|
||||
|
||||
# bgp
|
||||
resource "nifcloud_security_group_rule" "bgp_from_control_plane" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.wk.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_bgp
|
||||
to_port = local.port_bgp
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.cp.group_name
|
||||
}
|
||||
|
||||
resource "nifcloud_security_group_rule" "bgp_from_worker" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_bgp
|
||||
to_port = local.port_bgp
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||
}
|
||||
|
||||
# etcd
|
||||
resource "nifcloud_security_group_rule" "etcd_from_worker" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_etcd
|
||||
to_port = local.port_etcd
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
output "control_plane_lb" {
|
||||
description = "The DNS name of LB for control plane"
|
||||
value = nifcloud_load_balancer.this.dns_name
|
||||
}
|
||||
|
||||
output "security_group_name" {
|
||||
description = "The security group used in the cluster"
|
||||
value = {
|
||||
bastion = nifcloud_security_group.bn.group_name,
|
||||
control_plane = nifcloud_security_group.cp.group_name,
|
||||
worker = nifcloud_security_group.wk.group_name,
|
||||
}
|
||||
}
|
||||
|
||||
output "private_network_id" {
|
||||
description = "The private network used in the cluster"
|
||||
value = nifcloud_private_lan.this.id
|
||||
}
|
||||
|
||||
output "bastion_info" {
|
||||
description = "The basion information in cluster"
|
||||
value = { (nifcloud_instance.bn.instance_id) : {
|
||||
instance_id = nifcloud_instance.bn.instance_id,
|
||||
unique_id = nifcloud_instance.bn.unique_id,
|
||||
private_ip = nifcloud_instance.bn.private_ip,
|
||||
public_ip = nifcloud_instance.bn.public_ip,
|
||||
} }
|
||||
}
|
||||
|
||||
output "worker_info" {
|
||||
description = "The worker information in cluster"
|
||||
value = { for v in nifcloud_instance.wk : v.instance_id => {
|
||||
instance_id = v.instance_id,
|
||||
unique_id = v.unique_id,
|
||||
private_ip = v.private_ip,
|
||||
public_ip = v.public_ip,
|
||||
} }
|
||||
}
|
||||
|
||||
output "control_plane_info" {
|
||||
description = "The control plane information in cluster"
|
||||
value = { for v in nifcloud_instance.cp : v.instance_id => {
|
||||
instance_id = v.instance_id,
|
||||
unique_id = v.unique_id,
|
||||
private_ip = v.private_ip,
|
||||
public_ip = v.public_ip,
|
||||
} }
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
#################################################
|
||||
##
|
||||
## IP Address
|
||||
##
|
||||
configure_private_ip_address () {
|
||||
cat << EOS > /etc/netplan/01-netcfg.yaml
|
||||
network:
|
||||
version: 2
|
||||
renderer: networkd
|
||||
ethernets:
|
||||
ens192:
|
||||
dhcp4: yes
|
||||
dhcp6: yes
|
||||
dhcp-identifier: mac
|
||||
ens224:
|
||||
dhcp4: no
|
||||
dhcp6: no
|
||||
addresses: [${private_ip_address}]
|
||||
EOS
|
||||
netplan apply
|
||||
}
|
||||
configure_private_ip_address
|
||||
|
||||
#################################################
|
||||
##
|
||||
## SSH
|
||||
##
|
||||
configure_ssh_port () {
|
||||
sed -i 's/^#*Port [0-9]*/Port ${ssh_port}/' /etc/ssh/sshd_config
|
||||
}
|
||||
configure_ssh_port
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Hostname
|
||||
##
|
||||
hostnamectl set-hostname ${hostname}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Disable swap files genereated by systemd-gpt-auto-generator
|
||||
##
|
||||
systemctl mask "dev-sda3.swap"
|
||||
@@ -1,9 +0,0 @@
|
||||
terraform {
|
||||
required_version = ">=1.3.7"
|
||||
required_providers {
|
||||
nifcloud = {
|
||||
source = "nifcloud/nifcloud"
|
||||
version = ">= 1.8.0, < 2.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
variable "availability_zone" {
|
||||
description = "The availability zone"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "prefix" {
|
||||
description = "The prefix for the entire cluster"
|
||||
type = string
|
||||
validation {
|
||||
condition = length(var.prefix) <= 5
|
||||
error_message = "Must be a less than 5 character long."
|
||||
}
|
||||
}
|
||||
|
||||
variable "private_network_cidr" {
|
||||
description = "The subnet of private network"
|
||||
type = string
|
||||
validation {
|
||||
condition = can(cidrnetmask(var.private_network_cidr))
|
||||
error_message = "Must be a valid IPv4 CIDR block address."
|
||||
}
|
||||
}
|
||||
|
||||
variable "private_ip_bn" {
|
||||
description = "Private IP of bastion server"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instances_cp" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "instances_wk" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "instance_key_name" {
|
||||
description = "The key name of the Key Pair to use for the instance"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_bn" {
|
||||
description = "The instance type of bastion server"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_wk" {
|
||||
description = "The instance type of worker"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_cp" {
|
||||
description = "The instance type of control plane"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "image_name" {
|
||||
description = "The name of image"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "additional_lb_filter" {
|
||||
description = "Additional LB filter"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "accounting_type" {
|
||||
type = string
|
||||
default = "1"
|
||||
validation {
|
||||
condition = anytrue([
|
||||
var.accounting_type == "1", // Monthly
|
||||
var.accounting_type == "2", // Pay per use
|
||||
])
|
||||
error_message = "Must be a 1 or 2."
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
output "kubernetes_cluster" {
|
||||
value = module.kubernetes_cluster
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
region = "jp-west-1"
|
||||
az = "west-11"
|
||||
|
||||
instance_key_name = "deployerkey"
|
||||
|
||||
instance_type_bn = "e-medium"
|
||||
instance_type_cp = "e-medium"
|
||||
instance_type_wk = "e-medium"
|
||||
|
||||
private_network_cidr = "192.168.30.0/24"
|
||||
instances_cp = {
|
||||
"cp01" : { private_ip : "192.168.30.11/24" }
|
||||
"cp02" : { private_ip : "192.168.30.12/24" }
|
||||
"cp03" : { private_ip : "192.168.30.13/24" }
|
||||
}
|
||||
instances_wk = {
|
||||
"wk01" : { private_ip : "192.168.30.21/24" }
|
||||
"wk02" : { private_ip : "192.168.30.22/24" }
|
||||
}
|
||||
private_ip_bn = "192.168.30.10/24"
|
||||
|
||||
image_name = "Ubuntu Server 22.04 LTS"
|
||||
@@ -1 +0,0 @@
|
||||
../../../../inventory/sample/group_vars
|
||||
@@ -1,9 +0,0 @@
|
||||
terraform {
|
||||
required_version = ">=1.3.7"
|
||||
required_providers {
|
||||
nifcloud = {
|
||||
source = "nifcloud/nifcloud"
|
||||
version = "1.8.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
variable "region" {
|
||||
description = "The region"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "az" {
|
||||
description = "The availability zone"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "private_ip_bn" {
|
||||
description = "Private IP of bastion server"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "private_network_cidr" {
|
||||
description = "The subnet of private network"
|
||||
type = string
|
||||
validation {
|
||||
condition = can(cidrnetmask(var.private_network_cidr))
|
||||
error_message = "Must be a valid IPv4 CIDR block address."
|
||||
}
|
||||
}
|
||||
|
||||
variable "instances_cp" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "instances_wk" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "instance_key_name" {
|
||||
description = "The key name of the Key Pair to use for the instance"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_bn" {
|
||||
description = "The instance type of bastion server"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_wk" {
|
||||
description = "The instance type of worker"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_cp" {
|
||||
description = "The instance type of control plane"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "image_name" {
|
||||
description = "The name of image"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "working_instance_ip" {
|
||||
description = "The IP address to connect to bastion server."
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "accounting_type" {
|
||||
type = string
|
||||
default = "2"
|
||||
validation {
|
||||
condition = anytrue([
|
||||
var.accounting_type == "1", // Monthly
|
||||
var.accounting_type == "2", // Pay per use
|
||||
])
|
||||
error_message = "Must be a 1 or 2."
|
||||
}
|
||||
}
|
||||
@@ -281,9 +281,9 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|`k8s_allowed_remote_ips_ipv6` | List of IPv6 CIDR allowed to initiate a SSH connection, empty by default |
|
||||
|`k8s_allowed_egress_ipv6_ips` | List of IPv6 CIDRs allowed for egress traffic, `["::/0"]` by default |
|
||||
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|
||||
|`worker_allowed_ports_ipv6` | List of ports to open on worker nodes for IPv6 CIDR blocks, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "::/0"}]` by default |
|
||||
|`worker_allowed_ports_ipv6` | List of ports to open on worker nodes for IPv6 CIDR blocks, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "::/0"}, { "protocol" = "ipv6-icmp", "port_range_min" = 0, "port_range_max" = 0, "remote_ip_prefix" = "::/0"}]` by default |
|
||||
|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default |
|
||||
|`master_allowed_ports_ipv6` | List of ports to open on master nodes for IPv6 CIDR blocks, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "::/0"}]`, empty by default |
|
||||
|`master_allowed_ports_ipv6` | List of ports to open on master nodes for IPv6 CIDR blocks, `[{ "protocol" = "ipv6-icmp", "port_range_min" = 0, "port_range_max" = 0, "remote_ip_prefix" = "::/0"}]` by default |
|
||||
|`node_root_volume_size_in_gb` | Size of the root volume for nodes, 0 to use ephemeral storage |
|
||||
|`master_root_volume_size_in_gb` | Size of the root volume for masters, 0 to use ephemeral storage |
|
||||
|`master_volume_type` | Volume type of the root volume for control_plane, 'Default' by default |
|
||||
|
||||
@@ -271,7 +271,14 @@ variable "master_allowed_ports" {
|
||||
variable "master_allowed_ports_ipv6" {
|
||||
type = list(any)
|
||||
|
||||
default = []
|
||||
default = [
|
||||
{
|
||||
"protocol" = "ipv6-icmp"
|
||||
"port_range_min" = 0
|
||||
"port_range_max" = 0
|
||||
"remote_ip_prefix" = "::/0"
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
variable "worker_allowed_ports" {
|
||||
@@ -297,6 +304,12 @@ variable "worker_allowed_ports_ipv6" {
|
||||
"port_range_max" = 32767
|
||||
"remote_ip_prefix" = "::/0"
|
||||
},
|
||||
{
|
||||
"protocol" = "ipv6-icmp"
|
||||
"port_range_min" = 0
|
||||
"port_range_max" = 0
|
||||
"remote_ip_prefix" = "::/0"
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,13 @@
|
||||
# Cilium
|
||||
|
||||
## Unprivileged agent configuration
|
||||
|
||||
By default, Cilium is installed with `securityContext.privileged: false`. You need to set the `kube_owner` variable to `root` in the inventory:
|
||||
|
||||
```yml
|
||||
kube_owner: root
|
||||
```
|
||||
|
||||
## IP Address Management (IPAM)
|
||||
|
||||
IP Address Management (IPAM) is responsible for the allocation and management of IP addresses used by network endpoints (container and others) managed by Cilium. The default mode is "Cluster Scope".
|
||||
@@ -237,7 +245,7 @@ cilium_operator_extra_volume_mounts:
|
||||
## Choose Cilium version
|
||||
|
||||
```yml
|
||||
cilium_version: "1.18.3"
|
||||
cilium_version: "1.18.6"
|
||||
```
|
||||
|
||||
## Add variable to config
|
||||
|
||||
@@ -65,9 +65,8 @@ In kubespray, the default runtime name is "runc", and it can be configured with
|
||||
containerd_runc_runtime:
|
||||
name: runc
|
||||
type: "io.containerd.runc.v2"
|
||||
engine: ""
|
||||
root: ""
|
||||
options:
|
||||
Root: ""
|
||||
SystemdCgroup: "false"
|
||||
BinaryName: /usr/local/bin/my-runc
|
||||
base_runtime_spec: cri-base.json
|
||||
|
||||
@@ -45,10 +45,7 @@ Kubespray expects users to use one of the following variables sources for settin
|
||||
| - inventory host_vars | host specific vars overrides, group_vars is usually more practical |
|
||||
| **extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml`` |
|
||||
|
||||
[!IMPORTANT]
|
||||
Extra vars are best used to override kubespray internal variables, for instances, roles/vars/.
|
||||
Those vars are usually **not expected** (by Kubespray developers) to be modified by end users, and not part of Kubespray
|
||||
interface. Thus they can change, disappear, or break stuff unexpectedly.
|
||||
> Extra vars are best used to override kubespray internal variables, for instances, roles/vars/. Those vars are usually **not expected** (by Kubespray developers) to be modified by end users, and not part of Kubespray interface. Thus they can change, disappear, or break stuff unexpectedly.
|
||||
|
||||
## Ansible tags
|
||||
|
||||
@@ -196,11 +193,11 @@ You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mou
|
||||
to access the inventory and SSH key in the container, like this:
|
||||
|
||||
```ShellSession
|
||||
git checkout v2.29.0
|
||||
docker pull quay.io/kubespray/kubespray:v2.29.0
|
||||
git checkout v2.30.0
|
||||
docker pull quay.io/kubespray/kubespray:v2.30.0
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.29.0 bash
|
||||
quay.io/kubespray/kubespray:v2.30.0 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
|
||||
@@ -15,8 +15,8 @@ fedora39 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x
|
||||
fedora40 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
flatcar4081 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
openeuler24 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux10 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu24 | :white_check_mark: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: |
|
||||
|
||||
@@ -33,8 +33,8 @@ fedora39 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora40 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
flatcar4081 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
openeuler24 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu24 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
||||
@@ -51,7 +51,7 @@ fedora39 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora40 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
flatcar4081 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
openeuler24 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu24 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
||||
@@ -21,6 +21,12 @@ metallb_enabled: true
|
||||
metallb_speaker_enabled: true
|
||||
```
|
||||
|
||||
By default, MetalLB resources are deployed into the `metallb-system` namespace. You can override this namespace using a variable.
|
||||
|
||||
```yaml
|
||||
metallb_namespace: woodenlb-system
|
||||
```
|
||||
|
||||
By default only the MetalLB BGP speaker is allowed to run on control plane nodes. If you have a single node cluster or a cluster where control plane are also worker nodes you may need to enable tolerations for the MetalLB controller:
|
||||
|
||||
```yaml
|
||||
|
||||
@@ -38,3 +38,11 @@ you need to ensure they are using iptables-nft.
|
||||
An example how k8s do the autodetection can be found [in this PR](https://github.com/kubernetes/kubernetes/pull/82966)
|
||||
|
||||
The kernel version is lower than the kubernetes 1.32 system validation, please refer to the [kernel requirements](../operations/kernel-requirements.md).
|
||||
|
||||
## Rocky Linux 10
|
||||
|
||||
(Experimental in Kubespray CI)
|
||||
|
||||
The official Rocky Linux 10 cloud image does not include `kernel-module-extra`. Both Kube Proxy and CNI rely on this package, and since it relates to kernel version compatibility (which may require VM reboots, etc.), we haven't found an ideal solution.
|
||||
|
||||
However, some users report that it doesn't affect them (minimal version). Therefore, the Kubespray CI Rocky Linux 10 image is built by Kubespray maintainers using `diskimage-builder`. For detailed methods, please refer to [the comments](https://github.com/kubernetes-sigs/kubespray/pull/12355#issuecomment-3705400093).
|
||||
|
||||
@@ -31,6 +31,8 @@ That's it.
|
||||
|
||||
Append the new host to the inventory and run `cluster.yml`. You can NOT use `scale.yml` for that.
|
||||
|
||||
**Note:** When adding new control plane nodes, always append them to the end of the `kube_control_plane` group in your inventory. Adding control plane nodes in the first position is not supported and will cause the playbook to fail.
|
||||
|
||||
### 2) Restart kube-system/nginx-proxy
|
||||
|
||||
In all hosts, restart nginx-proxy pod. This pod is a local proxy for the apiserver. Kubespray will update its static config, but it needs to be restarted in order to reload.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
namespace: kubernetes_sigs
|
||||
description: Deploy a production ready Kubernetes cluster
|
||||
name: kubespray
|
||||
version: 2.30.0
|
||||
version: 2.30.1
|
||||
readme: README.md
|
||||
authors:
|
||||
- The Kubespray maintainers (https://kubernetes.slack.com/channels/kubespray)
|
||||
|
||||
@@ -38,6 +38,7 @@
|
||||
loadSidebar: 'docs/_sidebar.md',
|
||||
repo: 'https://github.com/kubernetes-sigs/kubespray',
|
||||
auto2top: true,
|
||||
noCompileLinks: ['.*\.ini'],
|
||||
logo: '/logo/logo-clear.png'
|
||||
}
|
||||
</script>
|
||||
|
||||
@@ -11,15 +11,15 @@
|
||||
# containerd_runc_runtime:
|
||||
# name: runc
|
||||
# type: "io.containerd.runc.v2"
|
||||
# engine: ""
|
||||
# root: ""
|
||||
# options:
|
||||
# Root: ""
|
||||
|
||||
# containerd_additional_runtimes:
|
||||
# Example for Kata Containers as additional runtime:
|
||||
# - name: kata
|
||||
# type: "io.containerd.kata.v2"
|
||||
# engine: ""
|
||||
# root: ""
|
||||
# options:
|
||||
# Root: ""
|
||||
|
||||
# containerd_grpc_max_recv_message_size: 16777216
|
||||
# containerd_grpc_max_send_message_size: 16777216
|
||||
|
||||
@@ -22,7 +22,8 @@ local_release_dir: "/tmp/releases"
|
||||
# Random shifts for retrying failed ops like pushing/downloading
|
||||
retry_stagger: 5
|
||||
|
||||
# This is the user that owns tha cluster installation.
|
||||
# This is the user that owns the cluster installation.
|
||||
# Note: cilium needs to set kube_owner to root https://kubespray.io/#/docs/CNI/cilium?id=unprivileged-agent-configuration
|
||||
kube_owner: kube
|
||||
|
||||
# This is the group that the cert creation scripts chgrp the
|
||||
|
||||
@@ -56,8 +56,8 @@ cilium_l2announcements: false
|
||||
#
|
||||
# Only effective when monitor aggregation is set to "medium" or higher.
|
||||
# cilium_monitor_aggregation_flags: "all"
|
||||
# Kube Proxy Replacement mode (strict/partial)
|
||||
# cilium_kube_proxy_replacement: partial
|
||||
# Kube Proxy Replacement mode (true/false)
|
||||
# cilium_kube_proxy_replacement: false
|
||||
|
||||
# If upgrading from Cilium < 1.5, you may want to override some of these options
|
||||
# to prevent service disruptions. See also:
|
||||
|
||||
@@ -47,8 +47,8 @@ RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \
|
||||
&& pip install --no-compile --no-cache-dir pip -U \
|
||||
&& pip install --no-compile --no-cache-dir -r tests/requirements.txt \
|
||||
&& pip install --no-compile --no-cache-dir -r requirements.txt \
|
||||
&& curl -L https://dl.k8s.io/release/v1.34.1/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
|
||||
&& echo $(curl -L https://dl.k8s.io/release/v1.34.1/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \
|
||||
&& curl -L https://dl.k8s.io/release/v1.34.5/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
|
||||
&& echo $(curl -L https://dl.k8s.io/release/v1.34.5/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \
|
||||
&& chmod a+x /usr/local/bin/kubectl \
|
||||
# Install Vagrant
|
||||
&& curl -LO https://releases.hashicorp.com/vagrant/${VAGRANT_VERSION}/vagrant_${VAGRANT_VERSION}-1_$(dpkg --print-architecture).deb \
|
||||
|
||||
@@ -16,6 +16,8 @@
|
||||
- name: Gather and compute network facts
|
||||
import_role:
|
||||
name: network_facts
|
||||
tags:
|
||||
- always
|
||||
- name: Gather minimal facts
|
||||
setup:
|
||||
gather_subset: '!all'
|
||||
|
||||
@@ -55,7 +55,7 @@
|
||||
- { role: kubernetes-apps/kubelet-csr-approver, tags: kubelet-csr-approver }
|
||||
- { role: container-engine, tags: "container-engine", when: deploy_container_engine }
|
||||
- { role: kubernetes/node, tags: node }
|
||||
- { role: kubernetes/control-plane, tags: master, upgrade_cluster_setup: true }
|
||||
- { role: kubernetes/control-plane, tags: control-plane, upgrade_cluster_setup: true }
|
||||
- { role: kubernetes/client, tags: client }
|
||||
- { role: kubernetes/node-label, tags: node-label }
|
||||
- { role: kubernetes/node-taint, tags: node-taint }
|
||||
@@ -100,7 +100,7 @@
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray_defaults }
|
||||
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
|
||||
- { role: win_nodes/kubernetes_patch, tags: ["control-plane", "win_nodes"] }
|
||||
|
||||
- name: Install Calico Route Reflector
|
||||
hosts: calico_rr
|
||||
|
||||
@@ -2,6 +2,6 @@ ansible==10.7.0
|
||||
# Needed for community.crypto module
|
||||
cryptography==46.0.3
|
||||
# Needed for jinja2 json_query templating
|
||||
jmespath==1.0.1
|
||||
jmespath==1.1.0
|
||||
# Needed for ansible.utils.ipaddr
|
||||
netaddr==1.3.0
|
||||
|
||||
@@ -9,6 +9,8 @@ platforms:
|
||||
vm_memory: 512
|
||||
provisioner:
|
||||
name: ansible
|
||||
env:
|
||||
ANSIBLE_ROLES_PATH: ../../../
|
||||
config_options:
|
||||
defaults:
|
||||
callbacks_enabled: profile_tasks
|
||||
|
||||
@@ -9,6 +9,8 @@ platforms:
|
||||
vm_memory: 512
|
||||
provisioner:
|
||||
name: ansible
|
||||
env:
|
||||
ANSIBLE_ROLES_PATH: ../../../
|
||||
config_options:
|
||||
defaults:
|
||||
callbacks_enabled: profile_tasks
|
||||
|
||||
@@ -37,8 +37,3 @@ override_system_hostname: true
|
||||
is_fedora_coreos: false
|
||||
|
||||
skip_http_proxy_on_os_packages: false
|
||||
|
||||
# If this is true, debug information will be displayed but
|
||||
# may contain some private data, so it is recommended to set it to false
|
||||
# in the production environment.
|
||||
unsafe_show_logs: false
|
||||
|
||||
@@ -21,6 +21,8 @@ platforms:
|
||||
vm_memory: 512
|
||||
provisioner:
|
||||
name: ansible
|
||||
env:
|
||||
ANSIBLE_ROLES_PATH: ../../../
|
||||
config_options:
|
||||
defaults:
|
||||
callbacks_enabled: profile_tasks
|
||||
|
||||
@@ -13,10 +13,9 @@ containerd_snapshotter: "overlayfs"
|
||||
containerd_runc_runtime:
|
||||
name: runc
|
||||
type: "io.containerd.runc.v2"
|
||||
engine: ""
|
||||
root: ""
|
||||
base_runtime_spec: cri-base.json
|
||||
options:
|
||||
Root: ""
|
||||
SystemdCgroup: "{{ containerd_use_systemd_cgroup | ternary('true', 'false') }}"
|
||||
BinaryName: "{{ bin_dir }}/runc"
|
||||
|
||||
@@ -24,8 +23,8 @@ containerd_additional_runtimes: []
|
||||
# Example for Kata Containers as additional runtime:
|
||||
# - name: kata
|
||||
# type: "io.containerd.kata.v2"
|
||||
# engine: ""
|
||||
# root: ""
|
||||
# options:
|
||||
# Root: ""
|
||||
|
||||
containerd_base_runtime_spec_rlimit_nofile: 65535
|
||||
|
||||
@@ -36,8 +35,8 @@ containerd_default_base_runtime_spec_patch:
|
||||
hard: "{{ containerd_base_runtime_spec_rlimit_nofile }}"
|
||||
soft: "{{ containerd_base_runtime_spec_rlimit_nofile }}"
|
||||
|
||||
# Can help reduce disk usage
|
||||
# https://github.com/containerd/containerd/discussions/6295
|
||||
# Only for containerd < 2.1; discard unpacked layers to save disk space
|
||||
# https://github.com/containerd/containerd/blob/release/2.1/docs/cri/config.md#image-pull-configuration-since-containerd-v21
|
||||
containerd_discard_unpacked_layers: true
|
||||
|
||||
containerd_base_runtime_specs:
|
||||
|
||||
@@ -34,8 +34,6 @@
|
||||
with_items:
|
||||
- "{{ containerd_systemd_dir }}"
|
||||
- "{{ containerd_cfg_dir }}"
|
||||
- "{{ containerd_storage_dir }}"
|
||||
- "{{ containerd_state_dir }}"
|
||||
|
||||
- name: Containerd | Write containerd proxy drop-in
|
||||
template:
|
||||
|
||||
@@ -52,8 +52,6 @@ oom_score = {{ containerd_oom_score }}
|
||||
{% for runtime in [containerd_runc_runtime] + containerd_additional_runtimes %}
|
||||
[plugins."io.containerd.cri.v1.runtime".containerd.runtimes.{{ runtime.name }}]
|
||||
runtime_type = "{{ runtime.type }}"
|
||||
runtime_engine = "{{ runtime.engine }}"
|
||||
runtime_root = "{{ runtime.root }}"
|
||||
{% if runtime.base_runtime_spec is defined %}
|
||||
base_runtime_spec = "{{ containerd_cfg_dir }}/{{ runtime.base_runtime_spec }}"
|
||||
{% endif %}
|
||||
@@ -78,7 +76,9 @@ oom_score = {{ containerd_oom_score }}
|
||||
|
||||
[plugins."io.containerd.cri.v1.images"]
|
||||
snapshotter = "{{ containerd_snapshotter }}"
|
||||
{% if containerd_discard_unpacked_layers and containerd_version is version('2.1.0', '<') %}
|
||||
discard_unpacked_layers = {{ containerd_discard_unpacked_layers | lower }}
|
||||
{% endif %}
|
||||
image_pull_progress_timeout = "{{ containerd_image_pull_progress_timeout }}"
|
||||
[plugins."io.containerd.cri.v1.images".pinned_images]
|
||||
sandbox = "{{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}"
|
||||
|
||||
@@ -25,6 +25,8 @@ provisioner:
|
||||
group_vars:
|
||||
all:
|
||||
become: true
|
||||
k8s_cluster:
|
||||
container_manager: docker
|
||||
playbooks:
|
||||
create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml
|
||||
prepare: ../../../molecule/prepare.yml
|
||||
|
||||
@@ -32,6 +32,8 @@ crio_registry_auth: []
|
||||
crio_seccomp_profile: ""
|
||||
crio_selinux: "{{ (preinstall_selinux_state == 'enforcing') | lower }}"
|
||||
crio_signature_policy: "{% if ansible_os_family == 'ClearLinux' %}/usr/share/defaults/crio/policy.json{% endif %}"
|
||||
# Set the pull progress timeout
|
||||
crio_pull_progress_timeout: "10s"
|
||||
|
||||
# Override system default for storage driver
|
||||
# crio_storage_driver: "overlay"
|
||||
|
||||
@@ -2,8 +2,6 @@
|
||||
- name: Converge
|
||||
hosts: all
|
||||
become: true
|
||||
vars:
|
||||
container_manager: crio
|
||||
roles:
|
||||
- role: kubespray_defaults
|
||||
- role: container-engine/cri-o
|
||||
|
||||
@@ -41,6 +41,10 @@ provisioner:
|
||||
defaults:
|
||||
callbacks_enabled: profile_tasks
|
||||
timeout: 120
|
||||
inventory:
|
||||
group_vars:
|
||||
k8s_cluster:
|
||||
container_manager: crio
|
||||
playbooks:
|
||||
create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml
|
||||
prepare: ../../../molecule/prepare.yml
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
- name: Test CRI-O cri
|
||||
import_playbook: ../../../molecule/test_cri.yml
|
||||
vars:
|
||||
container_manager: crio
|
||||
cri_socket: unix:///var/run/crio/crio.sock
|
||||
cri_name: cri-o
|
||||
- name: Test running a container with crun
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
{% if crio_registry_auth is defined and crio_registry_auth|length %}
|
||||
{
|
||||
{% for reg in crio_registry_auth %}
|
||||
"auths": {
|
||||
{% for reg in crio_registry_auth %}
|
||||
"{{ reg.registry }}": {
|
||||
"auth": "{{ (reg.username + ':' + reg.password) | string | b64encode }}"
|
||||
}
|
||||
{% if not loop.last %}
|
||||
},
|
||||
},
|
||||
{% else %}
|
||||
}
|
||||
}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
}
|
||||
}
|
||||
{% else %}
|
||||
{}
|
||||
|
||||
@@ -348,6 +348,12 @@ signature_policy = "{{ crio_signature_policy }}"
|
||||
# ignore; the latter will ignore volumes entirely.
|
||||
image_volumes = "mkdir"
|
||||
|
||||
# The timeout for an image pull to make progress until the pull operation gets
|
||||
# canceled. This value will be also used for calculating the pull progress interval
|
||||
# to pull_progress_timeout / 10. Can be set to 0 to disable the timeout as well as
|
||||
# the progress output.
|
||||
pull_progress_timeout = "{{ crio_pull_progress_timeout }}"
|
||||
|
||||
# The crio.network table containers settings pertaining to the management of
|
||||
# CNI plugins.
|
||||
[crio.network]
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
---
|
||||
- name: Crictl | Download crictl
|
||||
include_tasks: "../../../download/tasks/download_file.yml"
|
||||
vars:
|
||||
download: "{{ download_defaults | combine(downloads.crictl) }}"
|
||||
|
||||
- name: Install crictl config
|
||||
template:
|
||||
src: crictl.yaml.j2
|
||||
dest: /etc/crictl.yaml
|
||||
owner: root
|
||||
mode: "0644"
|
||||
|
||||
- name: Copy crictl binary from download dir
|
||||
copy:
|
||||
src: "{{ local_release_dir }}/crictl"
|
||||
dest: "{{ bin_dir }}/crictl"
|
||||
mode: "0755"
|
||||
remote_src: true
|
||||
notify:
|
||||
- Get crictl completion
|
||||
- Install crictl completion
|
||||
@@ -1,3 +1,22 @@
|
||||
---
|
||||
- name: Install crictl
|
||||
include_tasks: crictl.yml
|
||||
- name: Crictl | Download crictl
|
||||
include_tasks: "../../../download/tasks/download_file.yml"
|
||||
vars:
|
||||
download: "{{ download_defaults | combine(downloads.crictl) }}"
|
||||
|
||||
- name: Install crictl config
|
||||
template:
|
||||
src: crictl.yaml.j2
|
||||
dest: /etc/crictl.yaml
|
||||
owner: root
|
||||
mode: "0644"
|
||||
|
||||
- name: Copy crictl binary from download dir
|
||||
copy:
|
||||
src: "{{ local_release_dir }}/crictl"
|
||||
dest: "{{ bin_dir }}/crictl"
|
||||
mode: "0755"
|
||||
remote_src: true
|
||||
notify:
|
||||
- Get crictl completion
|
||||
- Install crictl completion
|
||||
|
||||
@@ -21,6 +21,11 @@ provisioner:
|
||||
defaults:
|
||||
callbacks_enabled: profile_tasks
|
||||
timeout: 120
|
||||
inventory:
|
||||
group_vars:
|
||||
k8s_cluster:
|
||||
gvisor_enabled: true
|
||||
container_manager: containerd
|
||||
playbooks:
|
||||
create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml
|
||||
prepare: ../../../molecule/prepare.yml
|
||||
|
||||
@@ -21,6 +21,11 @@ provisioner:
|
||||
defaults:
|
||||
callbacks_enabled: profile_tasks
|
||||
timeout: 120
|
||||
inventory:
|
||||
group_vars:
|
||||
k8s_cluster:
|
||||
youki_enabled: true
|
||||
container_manager: crio
|
||||
playbooks:
|
||||
create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml
|
||||
prepare: ../../../molecule/prepare.yml
|
||||
|
||||
@@ -5,8 +5,7 @@
|
||||
group: "{{ etcd_cert_group }}"
|
||||
state: directory
|
||||
owner: "{{ etcd_owner }}"
|
||||
mode: "{{ etcd_cert_dir_mode }}"
|
||||
recurse: true
|
||||
mode: "0700"
|
||||
|
||||
- name: "Gen_certs | create etcd script dir (on {{ groups['etcd'][0] }})"
|
||||
file:
|
||||
@@ -145,15 +144,6 @@
|
||||
- ('k8s_cluster' in group_names) and
|
||||
sync_certs | default(false) and inventory_hostname not in groups['etcd']
|
||||
|
||||
- name: Gen_certs | check certificate permissions
|
||||
file:
|
||||
path: "{{ etcd_cert_dir }}"
|
||||
group: "{{ etcd_cert_group }}"
|
||||
state: directory
|
||||
owner: "{{ etcd_owner }}"
|
||||
mode: "{{ etcd_cert_dir_mode }}"
|
||||
recurse: true
|
||||
|
||||
# This is a hack around the fact kubeadm expect the same certs path on all kube_control_plane
|
||||
# TODO: fix certs generation to have the same file everywhere
|
||||
# OR work with kubeadm on node-specific config
|
||||
|
||||
@@ -32,23 +32,16 @@ DNS.{{ counter["dns"] }} = {{ hostvars[host]['etcd_access_address'] }}{{ increme
|
||||
{# This will always expand to inventory_hostname, which can be a completely arbitrary name, that etcd will not know or care about, hence this line is (probably) redundant. #}
|
||||
DNS.{{ counter["dns"] }} = {{ host }}{{ increment(counter, 'dns') }}
|
||||
{% endfor %}
|
||||
{% if apiserver_loadbalancer_domain_name is defined %}
|
||||
DNS.{{ counter["dns"] }} = {{ apiserver_loadbalancer_domain_name }}{{ increment(counter, 'dns') }}
|
||||
{% endif %}
|
||||
{% for etcd_alt_name in etcd_cert_alt_names %}
|
||||
DNS.{{ counter["dns"] }} = {{ etcd_alt_name }}{{ increment(counter, 'dns') }}
|
||||
{% endfor %}
|
||||
{% for host in groups['etcd'] %}
|
||||
{% if hostvars[host]['access_ip'] is defined %}
|
||||
IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip'] }}{{ increment(counter, 'ip') }}
|
||||
{% endif %}
|
||||
{% if hostvars[host]['access_ip6'] is defined %}
|
||||
IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip6'] }}{{ increment(counter, 'ip') }}
|
||||
{% endif %}
|
||||
{% if ipv6_stack %}
|
||||
IP.{{ counter["ip"] }} = {{ hostvars[host]['ip6'] | default(hostvars[host]['fallback_ip6']) }}{{ increment(counter, 'ip') }}
|
||||
{% endif %}
|
||||
IP.{{ counter["ip"] }} = {{ hostvars[host]['main_ip'] }}{{ increment(counter, 'ip') }}
|
||||
{% for address in hostvars[host]['main_access_ips'] %}
|
||||
IP.{{ counter["ip"] }} = {{ address }}{{ increment(counter, 'ip') }}
|
||||
{% endfor %}
|
||||
{% for address in hostvars[host]['main_ips'] %}
|
||||
IP.{{ counter["ip"] }} = {{ address }}{{ increment(counter, 'ip') }}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% for cert_alt_ip in etcd_cert_alt_ips %}
|
||||
IP.{{ counter["ip"] }} = {{ cert_alt_ip }}{{ increment(counter, 'ip') }}
|
||||
|
||||
@@ -18,7 +18,6 @@ etcd_backup_retention_count: -1
|
||||
force_etcd_cert_refresh: true
|
||||
etcd_config_dir: /etc/ssl/etcd
|
||||
etcd_cert_dir: "{{ etcd_config_dir }}/ssl"
|
||||
etcd_cert_dir_mode: "0700"
|
||||
etcd_cert_group: root
|
||||
# Note: This does not set up DNS entries. It simply adds the following DNS
|
||||
# entries to the certificate
|
||||
@@ -117,11 +116,6 @@ etcd_retries: 4
|
||||
# https://groups.google.com/a/kubernetes.io/g/dev/c/B7gJs88XtQc/m/rSgNOzV2BwAJ?utm_medium=email&utm_source=footer
|
||||
etcd_experimental_initial_corrupt_check: true
|
||||
|
||||
# If this is true, debug information will be displayed but
|
||||
# may contain some private data, so it is recommended to set it to false
|
||||
# in the production environment.
|
||||
unsafe_show_logs: false
|
||||
|
||||
# Enable distributed tracing
|
||||
# https://etcd.io/docs/v3.5/op-guide/monitoring/#distributed-tracing
|
||||
etcd_experimental_enable_distributed_tracing: false
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
---
|
||||
gateway_api_enabled: false
|
||||
gateway_api_version: 1.2.1
|
||||
|
||||
# `gateway_api_channel` default is "standard".
|
||||
# "standard" release channel includes all resources that have graduated to GA or beta, including GatewayClass, Gateway, HTTPRoute, and ReferenceGrant.
|
||||
|
||||
@@ -27,11 +27,6 @@ vsphere_csi_aggressive_node_not_ready_timeout: 300
|
||||
|
||||
vsphere_csi_node_affinity: {}
|
||||
|
||||
# If this is true, debug information will be displayed but
|
||||
# may contain some private data, so it is recommended to set it to false
|
||||
# in the production environment.
|
||||
unsafe_show_logs: false
|
||||
|
||||
# https://github.com/kubernetes-sigs/vsphere-csi-driver/blob/master/docs/book/features/volume_snapshot.md#how-to-enable-volume-snapshot--restore-feature-in-vsphere-csi-
|
||||
# according to the above link , we can controler the block-volume-snapshot parameter
|
||||
vsphere_csi_block_volume_snapshot: false
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
---
|
||||
metallb_enabled: false
|
||||
metallb_log_level: info
|
||||
metallb_namespace: "metallb-system"
|
||||
metallb_port: "7472"
|
||||
metallb_memberlist_port: "7946"
|
||||
metallb_speaker_enabled: "{{ metallb_enabled }}"
|
||||
|
||||
@@ -26,6 +26,16 @@ rules:
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
# Services are monitored for service LoadBalancer IP allocation
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- services
|
||||
- services/status
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- watch
|
||||
{% elif calico_datastore == "kdd" %}
|
||||
# Nodes are watched to monitor for deletions.
|
||||
- apiGroups: [""]
|
||||
@@ -104,4 +114,14 @@ rules:
|
||||
- update
|
||||
# watch for changes
|
||||
- watch
|
||||
# Services are monitored for service LoadBalancer IP allocation
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- services
|
||||
- services/status
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- watch
|
||||
{% endif %}
|
||||
|
||||
@@ -2,6 +2,9 @@
|
||||
# disable upgrade cluster
|
||||
upgrade_cluster_setup: false
|
||||
|
||||
# Number of retries (with 5 seconds interval) to check that new control plane nodes
|
||||
# are in Ready condition after joining
|
||||
control_plane_node_become_ready_tries: 24
|
||||
# By default the external API listens on all interfaces, this can be changed to
|
||||
# listen on a specific address/interface.
|
||||
# NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost
|
||||
@@ -240,6 +243,10 @@ auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:00:00"
|
||||
# we can opt out from the default behavior by setting kubeadm_upgrade_auto_cert_renewal to false
|
||||
kubeadm_upgrade_auto_cert_renewal: true
|
||||
|
||||
# Add Subject Alternative Names to the Kubernetes apiserver certificates.
|
||||
# Useful if you access the API from multiples load balancers, for instance.
|
||||
supplementary_addresses_in_ssl_keys: []
|
||||
|
||||
# Bash alias of kubectl to interact with Kubernetes cluster much easier
|
||||
# kubectl_alias: k
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
- name: Kubeadm | Check api is up
|
||||
uri:
|
||||
url: "https://{{ ip | default(fallback_ip) }}:{{ kube_apiserver_port }}/healthz"
|
||||
url: "https://{{ main_ip | ansible.utils.ipwrap }}:{{ kube_apiserver_port }}/healthz"
|
||||
validate_certs: false
|
||||
when: ('kube_control_plane' in group_names)
|
||||
register: _result
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
---
|
||||
|
||||
- name: Check which kube-control nodes are already members of the cluster
|
||||
command: "{{ bin_dir }}/kubectl get nodes --selector=node-role.kubernetes.io/control-plane -o json"
|
||||
register: kube_control_planes_raw
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
|
||||
- name: Set fact joined_control_planes
|
||||
set_fact:
|
||||
joined_control_planes: "{{ ((kube_control_planes_raw.stdout | from_json)['items']) | default([]) | map(attribute='metadata') | map(attribute='name') | list }}"
|
||||
delegate_to: "{{ item }}"
|
||||
loop: "{{ groups['kube_control_plane'] }}"
|
||||
when: kube_control_planes_raw is succeeded
|
||||
run_once: true
|
||||
|
||||
- name: Set fact first_kube_control_plane
|
||||
set_fact:
|
||||
first_kube_control_plane: "{{ joined_control_planes | default([]) | first | default(groups['kube_control_plane'] | first) }}"
|
||||
@@ -11,24 +11,23 @@
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- name: Upload certificates so they are fresh and not expired
|
||||
command: >-
|
||||
{{ bin_dir }}/kubeadm init phase
|
||||
--config {{ kube_config_dir }}/kubeadm-config.yaml
|
||||
upload-certs
|
||||
--upload-certs
|
||||
register: kubeadm_upload_cert
|
||||
- name: Obtain kubeadm certificate key for joining control planes nodes
|
||||
when:
|
||||
- inventory_hostname == first_kube_control_plane
|
||||
- not kube_external_ca_mode
|
||||
|
||||
- name: Parse certificate key if not set
|
||||
set_fact:
|
||||
kubeadm_certificate_key: "{{ hostvars[first_kube_control_plane]['kubeadm_upload_cert'].stdout_lines[-1] | trim }}"
|
||||
run_once: true
|
||||
when:
|
||||
- hostvars[first_kube_control_plane]['kubeadm_upload_cert'] is defined
|
||||
- hostvars[first_kube_control_plane]['kubeadm_upload_cert'] is not skipped
|
||||
block:
|
||||
- name: Upload certificates so they are fresh and not expired
|
||||
command: >-
|
||||
{{ bin_dir }}/kubeadm init phase
|
||||
--config {{ kube_config_dir }}/kubeadm-config.yaml
|
||||
upload-certs
|
||||
--upload-certs
|
||||
register: kubeadm_upload_cert
|
||||
delegate_to: "{{ first_kube_control_plane }}"
|
||||
|
||||
- name: Parse certificate key if not set
|
||||
set_fact:
|
||||
kubeadm_certificate_key: "{{ kubeadm_upload_cert.stdout_lines[-1] | trim }}"
|
||||
|
||||
- name: Wait for k8s apiserver
|
||||
wait_for:
|
||||
@@ -99,3 +98,18 @@
|
||||
when:
|
||||
- inventory_hostname != first_kube_control_plane
|
||||
- kubeadm_already_run is not defined or not kubeadm_already_run.stat.exists
|
||||
|
||||
- name: Wait for new control plane nodes to be Ready
|
||||
when: kubeadm_already_run.stat.exists
|
||||
run_once: true
|
||||
command: >
|
||||
{{ kubectl }} get nodes --selector node-role.kubernetes.io/control-plane
|
||||
-o jsonpath-as-json="{.items[*].status.conditions[?(@.type == 'Ready')]}"
|
||||
register: control_plane_node_ready_conditions
|
||||
retries: "{{ control_plane_node_become_ready_tries }}"
|
||||
delay: 5
|
||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||
until: >
|
||||
control_plane_node_ready_conditions.stdout
|
||||
| from_json | selectattr('status', '==', 'True')
|
||||
| length == (groups['kube_control_plane'] | length)
|
||||
|
||||
@@ -25,9 +25,9 @@
|
||||
|
||||
- name: Kubeadm | aggregate all SANs
|
||||
set_fact:
|
||||
apiserver_sans: "{{ (sans_base + groups['kube_control_plane'] + sans_lb + sans_lb_ip + sans_supp + sans_access_ip + sans_ip + sans_ipv4_address + sans_ipv6_address + sans_override + sans_hostname + sans_fqdn + sans_kube_vip_address) | unique }}"
|
||||
apiserver_sans: "{{ _apiserver_sans | flatten | select | unique }}"
|
||||
vars:
|
||||
sans_base:
|
||||
_apiserver_sans:
|
||||
- "kubernetes"
|
||||
- "kubernetes.default"
|
||||
- "kubernetes.default.svc"
|
||||
@@ -36,17 +36,17 @@
|
||||
- "localhost"
|
||||
- "127.0.0.1"
|
||||
- "::1"
|
||||
sans_lb: "{{ [apiserver_loadbalancer_domain_name] if apiserver_loadbalancer_domain_name is defined else [] }}"
|
||||
sans_lb_ip: "{{ [loadbalancer_apiserver.address] if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined else [] }}"
|
||||
sans_supp: "{{ supplementary_addresses_in_ssl_keys if supplementary_addresses_in_ssl_keys is defined else [] }}"
|
||||
sans_access_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'main_access_ip') | list | select('defined') | list }}"
|
||||
sans_ip: "{{ groups['kube_control_plane'] | map('extract', hostvars, 'main_ip') | list | select('defined') | list }}"
|
||||
sans_ipv4_address: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list | select('defined') | list }}"
|
||||
sans_ipv6_address: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_default_ipv6', 'address']) | list | select('defined') | list }}"
|
||||
sans_override: "{{ [kube_override_hostname] if kube_override_hostname else [] }}"
|
||||
sans_hostname: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_hostname']) | list | select('defined') | list }}"
|
||||
sans_fqdn: "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_fqdn']) | list | select('defined') | list }}"
|
||||
sans_kube_vip_address: "{{ [kube_vip_address] if kube_vip_address is defined and kube_vip_address else [] }}"
|
||||
- "{{ apiserver_loadbalancer_domain_name | d('') }}"
|
||||
- "{{ loadbalancer_apiserver.address | d('') }}"
|
||||
- "{{ supplementary_addresses_in_ssl_keys }}"
|
||||
- "{{ groups['kube_control_plane'] | map('extract', hostvars, 'main_access_ip') }}"
|
||||
- "{{ groups['kube_control_plane'] | map('extract', hostvars, 'main_ip') }}"
|
||||
- "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | select('defined') }}"
|
||||
- "{{ groups['kube_control_plane'] | map('extract', hostvars, ['ansible_default_ipv6', 'address']) | select('defined') }}"
|
||||
- "{{ groups['kube_control_plane'] | map('extract', hostvars, 'ansible_hostname') }}"
|
||||
- "{{ groups['kube_control_plane'] | map('extract', hostvars, 'ansible_fqdn') }}"
|
||||
- "{{ kube_override_hostname }}"
|
||||
- "{{ kube_vip_address }}"
|
||||
tags: facts
|
||||
|
||||
- name: Create audit-policy directory
|
||||
@@ -90,7 +90,7 @@
|
||||
# Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint.
|
||||
- name: Set kubeadm_config_api_fqdn define
|
||||
set_fact:
|
||||
kubeadm_config_api_fqdn: "{{ apiserver_loadbalancer_domain_name | default('lb-apiserver.kubernetes.local') }}"
|
||||
kubeadm_config_api_fqdn: "{{ apiserver_loadbalancer_domain_name }}"
|
||||
when: loadbalancer_apiserver is defined
|
||||
|
||||
- name: Kubeadm | Create kubeadm config
|
||||
@@ -179,9 +179,10 @@
|
||||
timeout -k {{ kubeadm_init_timeout }} {{ kubeadm_init_timeout }}
|
||||
{{ bin_dir }}/kubeadm init
|
||||
--config={{ kube_config_dir }}/kubeadm-config.yaml
|
||||
--ignore-preflight-errors={{ kubeadm_ignore_preflight_errors | join(',') }}
|
||||
--ignore-preflight-errors={{ _ignore_errors | flatten | join(',') }}
|
||||
--skip-phases={{ kubeadm_init_phases_skip | join(',') }}
|
||||
{{ kube_external_ca_mode | ternary('', '--upload-certs') }}
|
||||
_ignore_errors: "{{ kubeadm_ignore_preflight_errors }}"
|
||||
environment:
|
||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||
notify: Control plane | restart kubelet
|
||||
@@ -195,6 +196,15 @@
|
||||
# This retry task is separated from 1st task to show log of failure of 1st task.
|
||||
- name: Kubeadm | Initialize first control plane node (retry)
|
||||
command: "{{ kubeadm_init_first_control_plane_cmd }}"
|
||||
vars:
|
||||
_errors_from_first_try:
|
||||
- 'FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml'
|
||||
- 'FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml'
|
||||
- 'FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml'
|
||||
- 'Port-10250'
|
||||
_ignore_errors:
|
||||
- "{{ kubeadm_ignore_preflight_errors }}"
|
||||
- "{{ _errors_from_first_try if 'all' not in kubeadm_ignore_preflight_errors else [] }}"
|
||||
register: kubeadm_init
|
||||
retries: 2
|
||||
until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr
|
||||
|
||||
@@ -92,9 +92,6 @@
|
||||
- upgrade
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
|
||||
- name: Define nodes already joined to existing cluster and first_kube_control_plane
|
||||
import_tasks: define-first-kube-control.yml
|
||||
|
||||
- name: Include kubeadm setup
|
||||
import_tasks: kubeadm-setup.yml
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ echo "## Check Expiration before renewal ##"
|
||||
{{ bin_dir }}/kubeadm certs check-expiration
|
||||
|
||||
days_buffer=7 # set a time margin, because we should not renew at the last moment
|
||||
calendar={{ auto_renew_certificates_systemd_calendar }}
|
||||
next_time=$(systemctl show k8s-certs-renew.timer -p NextElapseUSecRealtime --value)
|
||||
|
||||
if [ "${next_time}" == "" ]; then
|
||||
|
||||
@@ -3,9 +3,19 @@
|
||||
file:
|
||||
path: "{{ kubeadm_patches_dir }}"
|
||||
state: directory
|
||||
mode: "0640"
|
||||
mode: "0750"
|
||||
when: kubeadm_patches | length > 0
|
||||
|
||||
- name: Kubeadm | List existing kubeadm patches
|
||||
find:
|
||||
paths:
|
||||
- "{{ kubeadm_patches_dir }}"
|
||||
file_type: file
|
||||
use_regex: true
|
||||
patterns:
|
||||
- '^(kube-apiserver|kube-controller-manager|kube-scheduler|etcd|kubeletconfiguration)[0-9]+\+(strategic|json|merge).yaml$'
|
||||
register: existing_kubeadm_patches
|
||||
|
||||
- name: Kubeadm | Copy kubeadm patches from inventory files
|
||||
copy:
|
||||
content: "{{ item.patch | to_yaml }}"
|
||||
@@ -15,3 +25,13 @@
|
||||
loop: "{{ kubeadm_patches }}"
|
||||
loop_control:
|
||||
index_var: suffix
|
||||
register: current_kubeadm_patches
|
||||
|
||||
- name: Kubeadm | Delete old patches
|
||||
loop: "{{ existing_kubeadm_patches.files | map(attribute='path') |
|
||||
difference(
|
||||
current_kubeadm_patches.results | map(attribute='dest')
|
||||
) }}"
|
||||
file:
|
||||
state: absent
|
||||
path: "{{ item }}"
|
||||
|
||||
@@ -61,8 +61,6 @@ eviction_hard_control_plane: {}
|
||||
kubelet_status_update_frequency: 10s
|
||||
|
||||
# kube-vip
|
||||
kube_vip_version: 0.8.0
|
||||
|
||||
kube_vip_arp_enabled: false
|
||||
kube_vip_interface:
|
||||
kube_vip_services_interface:
|
||||
@@ -80,7 +78,6 @@ kube_vip_bgp_peeraddress:
|
||||
kube_vip_bgp_peerpass:
|
||||
kube_vip_bgp_peeras: 65000
|
||||
kube_vip_bgppeers:
|
||||
kube_vip_address:
|
||||
kube_vip_enableServicesElection: false
|
||||
kube_vip_lb_enable: false
|
||||
kube_vip_leasename: plndr-cp-lock
|
||||
|
||||
@@ -32,7 +32,7 @@ frontend healthz
|
||||
frontend kube_api_frontend
|
||||
bind 127.0.0.1:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}
|
||||
{% if ipv6_stack -%}
|
||||
bind [::1]:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }};
|
||||
bind [::1]:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}
|
||||
{% endif -%}
|
||||
mode tcp
|
||||
option tcplog
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Inspired by https://github.com/kube-vip/kube-vip/blob/v0.8.0/pkg/kubevip/config_generator.go#L103
|
||||
# Inspired by https://github.com/kube-vip/kube-vip/blob/v1.0.3/pkg/kubevip/config_generator.go#L103
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
@@ -27,7 +27,7 @@ spec:
|
||||
value: {{ kube_vip_services_interface | string | to_json }}
|
||||
{% endif %}
|
||||
{% if kube_vip_cidr %}
|
||||
- name: vip_cidr
|
||||
- name: vip_{{ "subnet" if kube_vip_version is version('0.9.0', '>=') else "cidr" }}
|
||||
value: {{ kube_vip_cidr | string | to_json }}
|
||||
{% endif %}
|
||||
{% if kube_vip_dns_mode %}
|
||||
@@ -113,6 +113,8 @@ spec:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
- NET_RAW
|
||||
drop:
|
||||
- ALL
|
||||
{% endif %}
|
||||
volumeMounts:
|
||||
- mountPath: /etc/kubernetes/admin.conf
|
||||
|
||||
@@ -74,8 +74,34 @@
|
||||
- not is_fedora_coreos
|
||||
- not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
|
||||
|
||||
- name: Set timezone
|
||||
- name: Gather selinux facts
|
||||
ansible.builtin.setup:
|
||||
gather_subset: selinux
|
||||
when:
|
||||
- ntp_timezone
|
||||
- ansible_os_family == "RedHat"
|
||||
|
||||
- name: Put SELinux in permissive mode, logging actions that would be blocked.
|
||||
ansible.posix.selinux:
|
||||
policy: targeted
|
||||
state: permissive
|
||||
when:
|
||||
- ntp_timezone
|
||||
- ansible_os_family == "RedHat"
|
||||
- ansible_facts.selinux.status == 'enabled'
|
||||
- ansible_facts.selinux.mode == 'enforcing'
|
||||
|
||||
- name: Set ntp_timezone
|
||||
community.general.timezone:
|
||||
name: "{{ ntp_timezone }}"
|
||||
when:
|
||||
- ntp_timezone
|
||||
|
||||
- name: Re-enable SELinux
|
||||
ansible.posix.selinux:
|
||||
policy: targeted
|
||||
state: "{{ preinstall_selinux_state }}"
|
||||
when:
|
||||
- ntp_timezone
|
||||
- ansible_os_family == "RedHat"
|
||||
- ansible_facts.selinux.status == 'enabled'
|
||||
|
||||
@@ -5,7 +5,9 @@ download_cache_dir: /tmp/kubespray_cache
|
||||
# If this is true, debug information will be displayed but
|
||||
# may contain some private data, so it is recommended to set it to false
|
||||
# in the production environment.
|
||||
unsafe_show_logs: false
|
||||
# false by default, unless we're running in CI. (CI_PROJECT_URL should be globally unique even if kubespray happens to run
|
||||
# in gitlab-ci in other contexts
|
||||
unsafe_show_logs: "{{ lookup('env', 'CI_PROJECT_URL') == 'https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray' }}"
|
||||
|
||||
# do not delete remote cache files after using them
|
||||
# NOTE: Setting this parameter to TRUE is only really useful when developing kubespray
|
||||
@@ -114,7 +116,7 @@ flannel_version: 0.27.3
|
||||
flannel_cni_version: 1.7.1-flannel1
|
||||
cni_version: "{{ (cni_binary_checksums['amd64'] | dict2items)[0].key }}"
|
||||
|
||||
cilium_version: "1.18.3"
|
||||
cilium_version: "1.18.6"
|
||||
cilium_cli_version: "{{ (ciliumcli_binary_checksums['amd64'] | dict2items)[0].key }}"
|
||||
cilium_enable_hubble: false
|
||||
|
||||
@@ -140,7 +142,7 @@ scheduler_plugins_version: "{{ scheduler_plugins_supported_versions[kube_major_v
|
||||
|
||||
yq_version: "{{ (yq_checksums['amd64'] | dict2items)[0].key }}"
|
||||
|
||||
gateway_api_version: "1.2.1"
|
||||
gateway_api_version: "{{ (gateway_api_standard_crds_checksums.no_arch | dict2items)[0].key }}"
|
||||
gateway_api_channel: "standard"
|
||||
|
||||
prometheus_operator_crds_version: "{{ (prometheus_operator_crds_checksums.no_arch | dict2items)[0].key }}"
|
||||
@@ -249,7 +251,7 @@ cilium_hubble_ui_image_tag: "v0.13.3"
|
||||
cilium_hubble_ui_backend_image_repo: "{{ quay_image_repo }}/cilium/hubble-ui-backend"
|
||||
cilium_hubble_ui_backend_image_tag: "v0.13.3"
|
||||
cilium_hubble_envoy_image_repo: "{{ quay_image_repo }}/cilium/cilium-envoy"
|
||||
cilium_hubble_envoy_image_tag: "v1.34.7-1757592137-1a52bb680a956879722f48c591a2ca90f7791324"
|
||||
cilium_hubble_envoy_image_tag: "v1.34.10-1762597008-ff7ae7d623be00078865cff1b0672cc5d9bfc6d5"
|
||||
kube_ovn_container_image_repo: "{{ docker_image_repo }}/kubeovn/kube-ovn"
|
||||
kube_ovn_container_image_tag: "v{{ kube_ovn_version }}"
|
||||
kube_ovn_vpc_container_image_repo: "{{ docker_image_repo }}/kubeovn/vpc-nat-gateway"
|
||||
@@ -263,8 +265,9 @@ multus_image_tag: "v{{ multus_version }}"
|
||||
external_openstack_cloud_controller_image_repo: "{{ kube_image_repo }}/provider-os/openstack-cloud-controller-manager"
|
||||
external_openstack_cloud_controller_image_tag: "v1.32.0"
|
||||
|
||||
kube_vip_version: 1.0.3
|
||||
kube_vip_image_repo: "{{ github_image_repo }}/kube-vip/kube-vip{{ '-iptables' if kube_vip_lb_fwdmethod == 'masquerade' else '' }}"
|
||||
kube_vip_image_tag: v0.8.9
|
||||
kube_vip_image_tag: "v{{ kube_vip_version }}"
|
||||
nginx_image_repo: "{{ docker_image_repo }}/library/nginx"
|
||||
nginx_image_tag: 1.28.0-alpine
|
||||
haproxy_image_repo: "{{ docker_image_repo }}/library/haproxy"
|
||||
@@ -784,9 +787,9 @@ downloads:
|
||||
url: "{{ calico_crds_download_url }}"
|
||||
unarchive: true
|
||||
unarchive_extra_opts:
|
||||
- "{{ '--strip=6' if (calico_version is version('3.22.3', '<')) else '--strip=3' }}"
|
||||
- "--strip=3"
|
||||
- "--wildcards"
|
||||
- "{{ '*/_includes/charts/calico/crds/kdd/' if (calico_version is version('3.22.3', '<')) else '*/libcalico-go/config/crd/' }}"
|
||||
- "*/libcalico-go/config/crd/"
|
||||
owner: "root"
|
||||
mode: "0755"
|
||||
groups:
|
||||
|
||||
@@ -96,6 +96,7 @@ ignore_assert_errors: false
|
||||
# kube-vip
|
||||
kube_vip_enabled: false
|
||||
kube_vip_lb_fwdmethod: local
|
||||
kube_vip_address:
|
||||
|
||||
# nginx-proxy configure
|
||||
nginx_config_dir: "/etc/nginx"
|
||||
@@ -632,6 +633,8 @@ ssl_ca_dirs: |-
|
||||
{% endif -%}
|
||||
]
|
||||
|
||||
# used for delegating tasks on a working control plane node
|
||||
first_kube_control_plane: "{{ groups['kube_control_plane'] | first }}"
|
||||
# Vars for pointing to kubernetes api endpoints
|
||||
kube_apiserver_count: "{{ groups['kube_control_plane'] | length }}"
|
||||
kube_apiserver_address: "{{ hostvars[inventory_hostname]['main_ip'] }}"
|
||||
@@ -640,18 +643,18 @@ first_kube_control_plane_address: "{{ hostvars[groups['kube_control_plane'][0]][
|
||||
loadbalancer_apiserver_localhost: "{{ loadbalancer_apiserver is not defined }}"
|
||||
loadbalancer_apiserver_type: "nginx"
|
||||
# applied if only external loadbalancer_apiserver is defined, otherwise ignored
|
||||
apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
|
||||
apiserver_loadbalancer_domain_name: "{{ 'localhost' if loadbalancer_apiserver_localhost else (loadbalancer_apiserver.address | d(undef())) }}"
|
||||
kube_apiserver_global_endpoint: |-
|
||||
{% if loadbalancer_apiserver is defined -%}
|
||||
https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
|
||||
{%- elif loadbalancer_apiserver_localhost and (loadbalancer_apiserver_port is not defined or loadbalancer_apiserver_port == kube_apiserver_port) -%}
|
||||
https://localhost:{{ kube_apiserver_port }}
|
||||
https://{{ apiserver_loadbalancer_domain_name | ansible.utils.ipwrap }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
|
||||
{%- elif loadbalancer_apiserver_localhost -%}
|
||||
https://localhost:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }}
|
||||
{%- else -%}
|
||||
https://{{ first_kube_control_plane_address | ansible.utils.ipwrap }}:{{ kube_apiserver_port }}
|
||||
{%- endif %}
|
||||
kube_apiserver_endpoint: |-
|
||||
{% if loadbalancer_apiserver is defined -%}
|
||||
https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
|
||||
https://{{ apiserver_loadbalancer_domain_name | ansible.utils.ipwrap }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
|
||||
{%- elif ('kube_control_plane' not in group_names) and loadbalancer_apiserver_localhost -%}
|
||||
https://localhost:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }}
|
||||
{%- elif 'kube_control_plane' in group_names -%}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -7,7 +7,7 @@ kube_next: "{{ ((kube_version | split('.'))[1] | int) + 1 }}"
|
||||
kube_major_next_version: "1.{{ kube_next }}"
|
||||
|
||||
pod_infra_supported_versions:
|
||||
'1.34': '3.10'
|
||||
'1.34': '3.10.1'
|
||||
'1.33': '3.10'
|
||||
'1.32': '3.10'
|
||||
|
||||
|
||||
5
roles/network_facts/defaults/main.yml
Normal file
5
roles/network_facts/defaults/main.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
# Additional string host to inject into NO_PROXY
|
||||
additional_no_proxy: ""
|
||||
additional_no_proxy_list: "{{ additional_no_proxy | split(',') }}"
|
||||
no_proxy_exclude_workers: false
|
||||
@@ -1,59 +1,63 @@
|
||||
---
|
||||
- name: Set facts variables
|
||||
tags:
|
||||
- always
|
||||
block:
|
||||
- name: Gather ansible_default_ipv4
|
||||
setup:
|
||||
gather_subset: '!all,network'
|
||||
filter: "ansible_default_ipv4"
|
||||
when: ansible_default_ipv4 is not defined
|
||||
ignore_unreachable: true
|
||||
# Set 127.0.0.1 as fallback IP if we do not have host facts for host
|
||||
# ansible_default_ipv4 isn't what you think.
|
||||
# https://medium.com/opsops/ansible-default-ipv4-is-not-what-you-think-edb8ab154b10
|
||||
# TODO: discard this and update all the location relying on it in "looping on hostvars" templates
|
||||
- name: Set fallback_ip
|
||||
set_fact:
|
||||
fallback_ip: "{{ ansible_default_ipv4.address | d('127.0.0.1') }}"
|
||||
when: fallback_ip is not defined
|
||||
- name: Gather node IPs
|
||||
setup:
|
||||
gather_subset: '!all,!min,network'
|
||||
filter: "ansible_default_ip*"
|
||||
when: ansible_default_ipv4 is not defined or ansible_default_ipv6 is not defined
|
||||
ignore_unreachable: true
|
||||
|
||||
- name: Gather ansible_default_ipv6
|
||||
setup:
|
||||
gather_subset: '!all,network'
|
||||
filter: "ansible_default_ipv6"
|
||||
when: ansible_default_ipv6 is not defined
|
||||
ignore_unreachable: true
|
||||
- name: Set fallback_ip6
|
||||
set_fact:
|
||||
fallback_ip6: "{{ ansible_default_ipv6.address | d('::1') }}"
|
||||
when: fallback_ip6 is not defined
|
||||
- name: Set computed IPs variables
|
||||
vars:
|
||||
fallback_ip: "{{ ansible_default_ipv4.address | d('127.0.0.1') }}"
|
||||
fallback_ip6: "{{ ansible_default_ipv6.address | d('::1') }}"
|
||||
# Set 127.0.0.1 as fallback IP if we do not have host facts for host
|
||||
# ansible_default_ipv4 isn't what you think.
|
||||
_ipv4: "{{ ip | default(fallback_ip) }}"
|
||||
_access_ipv4: "{{ access_ip | default(_ipv4) }}"
|
||||
_ipv6: "{{ ip6 | default(fallback_ip6) }}"
|
||||
_access_ipv6: "{{ access_ip6 | default(_ipv6) }}"
|
||||
_access_ips:
|
||||
- "{{ _access_ipv4 if ipv4_stack }}"
|
||||
- "{{ _access_ipv6 if ipv6_stack }}"
|
||||
_ips:
|
||||
- "{{ _ipv4 if ipv4_stack }}"
|
||||
- "{{ _ipv6 if ipv6_stack }}"
|
||||
set_fact:
|
||||
cacheable: true
|
||||
main_access_ip: "{{ _access_ipv4 if ipv4_stack else _access_ipv6 }}"
|
||||
main_ip: "{{ _ipv4 if ipv4_stack else _ipv6 }}"
|
||||
# Mixed IPs - for dualstack
|
||||
main_access_ips: "{{ _access_ips | select }}"
|
||||
main_ips: "{{ _ips | select }}"
|
||||
|
||||
- name: Set main access ip(access_ip based on ipv4_stack/ipv6_stack options).
|
||||
set_fact:
|
||||
cacheable: true
|
||||
main_access_ip: >-
|
||||
{%- if ipv4_stack -%}
|
||||
{{ access_ip | default(ip | default(fallback_ip)) }}
|
||||
{%- else -%}
|
||||
{{ access_ip6 | default(ip6 | default(fallback_ip6)) }}
|
||||
{%- endif -%}
|
||||
|
||||
- name: Set main ip(ip based on ipv4_stack/ipv6_stack options).
|
||||
set_fact:
|
||||
cacheable: true
|
||||
main_ip: "{{ (ip | default(fallback_ip)) if ipv4_stack else (ip6 | default(fallback_ip6)) }}"
|
||||
|
||||
- name: Set main access ips(mixed ips for dualstack).
|
||||
set_fact:
|
||||
main_access_ips: ["{{ (main_access_ip + ',' + (access_ip6 | default(ip6 | default(fallback_ip6)))) if (ipv4_stack and ipv6_stack) else main_access_ip }}"]
|
||||
|
||||
- name: Set main ips(mixed ips for dualstack).
|
||||
set_fact:
|
||||
main_ips: ["{{ (main_ip + ',' + (ip6 | default(fallback_ip6))) if (ipv4_stack and ipv6_stack) else main_ip }}"]
|
||||
|
||||
- name: Set no_proxy
|
||||
import_tasks: no_proxy.yml
|
||||
when:
|
||||
- http_proxy is defined or https_proxy is defined
|
||||
- no_proxy is not defined
|
||||
- name: Set no_proxy to all assigned cluster IPs and hostnames
|
||||
when:
|
||||
- http_proxy is defined or https_proxy is defined
|
||||
- no_proxy is not defined
|
||||
vars:
|
||||
groups_with_no_proxy:
|
||||
- kube_control_plane
|
||||
- "{{ '' if no_proxy_exclude_workers else 'kube_node' }}" # TODO: exclude by a boolean in inventory rather than global variable
|
||||
- etcd
|
||||
- calico_rr
|
||||
hosts_with_no_proxy: "{{ groups_with_no_proxy | select | map('extract', groups) | select('defined') | flatten }}"
|
||||
_hostnames: "{{ (hosts_with_no_proxy +
|
||||
(hosts_with_no_proxy | map('extract', hostvars, morekeys=['ansible_hostname'])
|
||||
| select('defined')))
|
||||
| unique }}"
|
||||
no_proxy_prepare:
|
||||
- "{{ apiserver_loadbalancer_domain_name | d('') }}"
|
||||
- "{{ loadbalancer_apiserver.address if loadbalancer_apiserver is defined else '' }}"
|
||||
- "{{ hosts_with_no_proxy | map('extract', hostvars, morekeys=['main_access_ip']) }}"
|
||||
- "{{ _hostnames }}"
|
||||
- "{{ _hostnames | map('regex_replace', '$', '.' + dns_domain ) }}"
|
||||
- "{{ additional_no_proxy_list }}"
|
||||
- 127.0.0.1
|
||||
- localhost
|
||||
- "{{ kube_service_subnets }}"
|
||||
- "{{ kube_pods_subnets }}"
|
||||
- svc
|
||||
- "svc.{{ dns_domain }}"
|
||||
set_fact:
|
||||
no_proxy: "{{ no_proxy_prepare | select | flatten | unique | join(',') }}"
|
||||
run_once: true
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
---
|
||||
- name: Set no_proxy to all assigned cluster IPs and hostnames
|
||||
set_fact:
|
||||
# noqa: jinja[spacing]
|
||||
no_proxy_prepare: >-
|
||||
{%- if loadbalancer_apiserver is defined -%}
|
||||
{{ apiserver_loadbalancer_domain_name | default('') }},
|
||||
{{ loadbalancer_apiserver.address | default('') }},
|
||||
{%- endif -%}
|
||||
{%- if no_proxy_exclude_workers | default(false) -%}
|
||||
{% set cluster_or_control_plane = 'kube_control_plane' %}
|
||||
{%- else -%}
|
||||
{% set cluster_or_control_plane = 'k8s_cluster' %}
|
||||
{%- endif -%}
|
||||
{%- for item in (groups[cluster_or_control_plane] + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique -%}
|
||||
{{ hostvars[item]['main_access_ip'] }},
|
||||
{%- if item != hostvars[item].get('ansible_hostname', '') -%}
|
||||
{{ hostvars[item]['ansible_hostname'] }},
|
||||
{{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }},
|
||||
{%- endif -%}
|
||||
{{ item }},{{ item }}.{{ dns_domain }},
|
||||
{%- endfor -%}
|
||||
{%- if additional_no_proxy is defined -%}
|
||||
{{ additional_no_proxy }},
|
||||
{%- endif -%}
|
||||
127.0.0.1,localhost,{{ kube_service_subnets }},{{ kube_pods_subnets }},svc,svc.{{ dns_domain }}
|
||||
delegate_to: localhost
|
||||
connection: local
|
||||
delegate_facts: true
|
||||
become: false
|
||||
run_once: true
|
||||
|
||||
- name: Populates no_proxy to all hosts
|
||||
set_fact:
|
||||
no_proxy: "{{ hostvars.localhost.no_proxy_prepare }}"
|
||||
# noqa: jinja[spacing]
|
||||
proxy_env: "{{ proxy_env | combine({
|
||||
'no_proxy': hostvars.localhost.no_proxy_prepare,
|
||||
'NO_PROXY': hostvars.localhost.no_proxy_prepare
|
||||
}) }}"
|
||||
@@ -61,6 +61,7 @@
|
||||
executable: /bin/bash
|
||||
register: calico_version_on_server
|
||||
changed_when: false
|
||||
check_mode: false
|
||||
|
||||
- name: Assert that current calico version is enough for upgrade
|
||||
assert:
|
||||
|
||||
@@ -126,23 +126,9 @@
|
||||
- ('kube_control_plane' in group_names)
|
||||
- calico_datastore == "kdd"
|
||||
block:
|
||||
- name: Calico | Check if extra directory is needed
|
||||
stat:
|
||||
path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds/{{ 'kdd' if (calico_version is version('3.22.3', '<')) else 'crd' }}"
|
||||
register: kdd_path
|
||||
- name: Calico | Set kdd path when calico < v3.22.3
|
||||
set_fact:
|
||||
calico_kdd_path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds{{ '/kdd' if kdd_path.stat.exists is defined and kdd_path.stat.exists }}"
|
||||
when:
|
||||
- calico_version is version('3.22.3', '<')
|
||||
- name: Calico | Set kdd path when calico > 3.22.2
|
||||
set_fact:
|
||||
calico_kdd_path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds{{ '/crd' if kdd_path.stat.exists is defined and kdd_path.stat.exists }}"
|
||||
when:
|
||||
- calico_version is version('3.22.2', '>')
|
||||
- name: Calico | Create calico manifests for kdd
|
||||
assemble:
|
||||
src: "{{ calico_kdd_path }}"
|
||||
src: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds/crd/"
|
||||
dest: "{{ kube_config_dir }}/kdd-crds.yml"
|
||||
mode: "0644"
|
||||
delimiter: "---\n"
|
||||
|
||||
@@ -235,6 +235,8 @@ rules:
|
||||
resources:
|
||||
- mutatingwebhookconfigurations
|
||||
- validatingwebhookconfigurations
|
||||
- validatingadmissionpolicies # Required for Kubernetes 1.33+
|
||||
- validatingadmissionpolicybindings # Required for Kubernetes 1.33+
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
|
||||
@@ -215,3 +215,17 @@ rules:
|
||||
- calico-cni-plugin
|
||||
verbs:
|
||||
- create
|
||||
{% if calico_version is version('3.29.0', '>=') %}
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: calico-tier-getter
|
||||
rules:
|
||||
- apiGroups:
|
||||
- "projectcalico.org"
|
||||
resources:
|
||||
- "tiers"
|
||||
verbs:
|
||||
- "get"
|
||||
{% endif %}
|
||||
|
||||
@@ -26,3 +26,18 @@ subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-cni-plugin
|
||||
namespace: kube-system
|
||||
{% if calico_version is version('3.29.0', '>=') %}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico-tier-getter
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-tier-getter
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: system:kube-controller-manager
|
||||
{% endif %}
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
namespace: kube-system
|
||||
name: kubernetes-services-endpoint
|
||||
data:
|
||||
{% if calico_bpf_enabled %}
|
||||
{% if calico_bpf_enabled or loadbalancer_apiserver_localhost %}
|
||||
KUBERNETES_SERVICE_HOST: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}"
|
||||
KUBERNETES_SERVICE_PORT: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}"
|
||||
{% endif %}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
cilium_min_version_required: "1.15"
|
||||
|
||||
# remove migrate after 2.29 released
|
||||
cilium_remove_old_resources: false
|
||||
# Log-level
|
||||
cilium_debug: false
|
||||
|
||||
|
||||
@@ -30,13 +30,6 @@
|
||||
when:
|
||||
- cilium_identity_allocation_mode == "kvstore"
|
||||
|
||||
- name: Cilium | Enable portmap addon
|
||||
template:
|
||||
src: 000-cilium-portmap.conflist.j2
|
||||
dest: /etc/cni/net.d/000-cilium-portmap.conflist
|
||||
mode: "0644"
|
||||
when: cilium_enable_portmap
|
||||
|
||||
- name: Cilium | Render values
|
||||
template:
|
||||
src: values.yaml.j2
|
||||
|
||||
@@ -5,10 +5,5 @@
|
||||
- name: Cilium install
|
||||
include_tasks: install.yml
|
||||
|
||||
# Remove after 2.29 released
|
||||
- name: Cilium remove old resources
|
||||
when: cilium_remove_old_resources
|
||||
include_tasks: remove_old_resources.yml
|
||||
|
||||
- name: Cilium apply
|
||||
include_tasks: apply.yml
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
---
|
||||
# Remove after 2.29 released
|
||||
- name: Cilium | Delete Old Resource
|
||||
command: |
|
||||
{{ kubectl }} delete {{ item.kind | lower }} {{ item.name }} \
|
||||
{{ '-n kube-system' if item.kind not in ['ClusterRole', 'ClusterRoleBinding'] else '' }} \
|
||||
loop:
|
||||
- { kind: ServiceAccount, name: cilium }
|
||||
- { kind: ServiceAccount, name: cilium-operator }
|
||||
- { kind: ServiceAccount, name: hubble-generate-certs }
|
||||
- { kind: ServiceAccount, name: hubble-relay }
|
||||
- { kind: ServiceAccount, name: hubble-ui }
|
||||
- { kind: Service, name: hubble-metrics }
|
||||
- { kind: Service, name: hubble-relay-metrics }
|
||||
- { kind: Service, name: hubble-relay }
|
||||
- { kind: Service, name: hubble-ui }
|
||||
- { kind: Service, name: hubble-peer }
|
||||
- { kind: Deployment, name: cilium-operator }
|
||||
- { kind: Deployment, name: hubble-relay }
|
||||
- { kind: Deployment, name: hubble-ui }
|
||||
- { kind: DaemonSet, name: cilium }
|
||||
- { kind: CronJob, name: hubble-generate-certs }
|
||||
- { kind: Job, name: hubble-generate-certs }
|
||||
- { kind: ConfigMap, name: cilium-config }
|
||||
- { kind: ConfigMap, name: ip-masq-agent }
|
||||
- { kind: ConfigMap, name: hubble-relay-config }
|
||||
- { kind: ConfigMap, name: hubble-ui-nginx }
|
||||
- { kind: ClusterRole, name: cilium }
|
||||
- { kind: ClusterRole, name: cilium-operator }
|
||||
- { kind: ClusterRole, name: hubble-generate-certs }
|
||||
- { kind: ClusterRole, name: hubble-relay }
|
||||
- { kind: ClusterRole, name: hubble-ui }
|
||||
- { kind: ClusterRoleBinding, name: cilium }
|
||||
- { kind: ClusterRoleBinding, name: cilium-operator }
|
||||
- { kind: ClusterRoleBinding, name: hubble-generate-certs }
|
||||
- { kind: ClusterRoleBinding, name: hubble-relay }
|
||||
- { kind: ClusterRoleBinding, name: hubble-ui }
|
||||
- { kind: Secret, name: hubble-ca-secret }
|
||||
- { kind: Secret, name: hubble-relay-client-certs }
|
||||
- { kind: Secret, name: hubble-server-certs }
|
||||
register: patch_result
|
||||
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||
failed_when:
|
||||
- patch_result.rc != 0
|
||||
- "'not found' not in patch_result.stderr"
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user