mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 13:54:37 +03:00
Compare commits
11 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
10679ebb5d | ||
|
|
8775dcf92f | ||
|
|
bd382a9c39 | ||
|
|
ffacfe3ede | ||
|
|
7dcc22fe8c | ||
|
|
47ed2b115d | ||
|
|
b9fc4ec43e | ||
|
|
7bd757da5f | ||
|
|
9dc2092042 | ||
|
|
c7cfd32c40 | ||
|
|
a4b0656d9b |
@@ -5,4 +5,4 @@ roles/kubernetes/control-plane/defaults/main/main.yml jinja[spacing]
|
||||
roles/kubernetes/kubeadm/defaults/main.yml jinja[spacing]
|
||||
roles/kubernetes/node/defaults/main.yml jinja[spacing]
|
||||
roles/kubernetes/preinstall/defaults/main.yml jinja[spacing]
|
||||
roles/kubespray-defaults/defaults/main/main.yml jinja[spacing]
|
||||
roles/kubespray-defaults/defaults/main.yaml jinja[spacing]
|
||||
|
||||
44
.github/ISSUE_TEMPLATE/bug-report.md
vendored
Normal file
44
.github/ISSUE_TEMPLATE/bug-report.md
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
---
|
||||
name: Bug Report
|
||||
about: Report a bug encountered while operating Kubernetes
|
||||
labels: kind/bug
|
||||
|
||||
---
|
||||
<!--
|
||||
Please, be ready for followup questions, and please respond in a timely
|
||||
manner. If we can't reproduce a bug or think a feature already exists, we
|
||||
might close your issue. If we're wrong, PLEASE feel free to reopen it and
|
||||
explain why.
|
||||
-->
|
||||
|
||||
**Environment**:
|
||||
- **Cloud provider or hardware configuration:**
|
||||
|
||||
- **OS (`printf "$(uname -srm)\n$(cat /etc/os-release)\n"`):**
|
||||
|
||||
- **Version of Ansible** (`ansible --version`):
|
||||
|
||||
- **Version of Python** (`python --version`):
|
||||
|
||||
|
||||
**Kubespray version (commit) (`git rev-parse --short HEAD`):**
|
||||
|
||||
|
||||
**Network plugin used**:
|
||||
|
||||
|
||||
**Full inventory with variables (`ansible -i inventory/sample/inventory.ini all -m debug -a "var=hostvars[inventory_hostname]"`):**
|
||||
<!-- We recommend using snippets services like https://gist.github.com/ etc. -->
|
||||
|
||||
**Command used to invoke ansible**:
|
||||
|
||||
|
||||
**Output of ansible run**:
|
||||
<!-- We recommend using snippets services like https://gist.github.com/ etc. -->
|
||||
|
||||
**Anything else do we need to know**:
|
||||
<!-- By running scripts/collect-info.yaml you can get a lot of useful informations.
|
||||
Script can be started by:
|
||||
ansible-playbook -i <inventory_file_path> -u <ssh_user> -e ansible_ssh_user=<ssh_user> -b --become-user=root -e dir=`pwd` scripts/collect-info.yaml
|
||||
(If you using CoreOS remember to add '-e ansible_python_interpreter=/opt/bin/python').
|
||||
After running this command you can find logs in `pwd`/logs.tar.gz. You can even upload somewhere entire file and paste link here.-->
|
||||
117
.github/ISSUE_TEMPLATE/bug-report.yaml
vendored
117
.github/ISSUE_TEMPLATE/bug-report.yaml
vendored
@@ -1,117 +0,0 @@
|
||||
---
|
||||
name: Bug Report
|
||||
description: Report a bug encountered while using Kubespray
|
||||
labels: kind/bug
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Please, be ready for followup questions, and please respond in a timely
|
||||
manner. If we can't reproduce a bug or think a feature already exists, we
|
||||
might close your issue. If we're wrong, PLEASE feel free to reopen it and
|
||||
explain why.
|
||||
- type: textarea
|
||||
id: problem
|
||||
attributes:
|
||||
label: What happened?
|
||||
description: |
|
||||
Please provide as much info as possible. Not doing so may result in your bug not being addressed in a timely manner.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: expected
|
||||
attributes:
|
||||
label: What did you expect to happen?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: repro
|
||||
attributes:
|
||||
label: How can we reproduce it (as minimally and precisely as possible)?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: '### Environment'
|
||||
|
||||
- type: textarea
|
||||
id: os
|
||||
attributes:
|
||||
label: OS
|
||||
placeholder: 'printf "$(uname -srm)\n$(cat /etc/os-release)\n"'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: ansible_version
|
||||
attributes:
|
||||
label: Version of Ansible
|
||||
placeholder: 'ansible --version'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: python_version
|
||||
attributes:
|
||||
label: Version of Python
|
||||
placeholder: 'python --version'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: kubespray_version
|
||||
attributes:
|
||||
label: Version of Kubespray (commit)
|
||||
placeholder: 'git rev-parse --short HEAD'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: network_plugin
|
||||
attributes:
|
||||
label: Network plugin used
|
||||
options:
|
||||
- calico
|
||||
- cilium
|
||||
- cni
|
||||
- custom_cni
|
||||
- flannel
|
||||
- kube-ovn
|
||||
- kube-router
|
||||
- macvlan
|
||||
- meta
|
||||
- multus
|
||||
- ovn4nfv
|
||||
- weave
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: inventory
|
||||
attributes:
|
||||
label: Full inventory with variables
|
||||
placeholder: 'ansible -i inventory/sample/inventory.ini all -m debug -a "var=hostvars[inventory_hostname]"'
|
||||
|
||||
- type: input
|
||||
id: ansible_command
|
||||
attributes:
|
||||
label: Command used to invoke ansible
|
||||
|
||||
- type: textarea
|
||||
id: ansible_output
|
||||
attributes:
|
||||
label: Output of ansible run
|
||||
description: We recommend using snippets services like https://gist.github.com/ etc.
|
||||
|
||||
- type: textarea
|
||||
id: anything_else
|
||||
attributes:
|
||||
label: Anything else we need to know
|
||||
description: |
|
||||
By running scripts/collect-info.yaml you can get a lot of useful informations.
|
||||
Script can be started by:
|
||||
ansible-playbook -i <inventory_file_path> -u <ssh_user> -e ansible_ssh_user=<ssh_user> -b --become-user=root -e dir=`pwd` scripts/collect-info.yaml
|
||||
(If you using CoreOS remember to add '-e ansible_python_interpreter=/opt/bin/python').
|
||||
After running this command you can find logs in `pwd`/logs.tar.gz. You can even upload somewhere entire file and paste link here
|
||||
5
.github/ISSUE_TEMPLATE/config.yml
vendored
5
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,5 +0,0 @@
|
||||
---
|
||||
contact_links:
|
||||
- name: Support Request
|
||||
url: https://kubernetes.slack.com/channels/kubespray
|
||||
about: Support request or question relating to Kubernetes
|
||||
11
.github/ISSUE_TEMPLATE/enhancement.md
vendored
Normal file
11
.github/ISSUE_TEMPLATE/enhancement.md
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
name: Enhancement Request
|
||||
about: Suggest an enhancement to the Kubespray project
|
||||
labels: kind/feature
|
||||
|
||||
---
|
||||
<!-- Please only use this template for submitting enhancement requests -->
|
||||
|
||||
**What would you like to be added**:
|
||||
|
||||
**Why is this needed**:
|
||||
20
.github/ISSUE_TEMPLATE/enhancement.yaml
vendored
20
.github/ISSUE_TEMPLATE/enhancement.yaml
vendored
@@ -1,20 +0,0 @@
|
||||
---
|
||||
name: Enhancement Request
|
||||
description: Suggest an enhancement to the Kubespray project
|
||||
labels: kind/feature
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: Please only use this template for submitting enhancement requests
|
||||
- type: textarea
|
||||
id: what
|
||||
attributes:
|
||||
label: What would you like to be added
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: why
|
||||
attributes:
|
||||
label: Why is this needed
|
||||
validations:
|
||||
required: true
|
||||
20
.github/ISSUE_TEMPLATE/failing-test.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/failing-test.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
name: Failing Test
|
||||
about: Report test failures in Kubespray CI jobs
|
||||
labels: kind/failing-test
|
||||
|
||||
---
|
||||
|
||||
<!-- Please only use this template for submitting reports about failing tests in Kubespray CI jobs -->
|
||||
|
||||
**Which jobs are failing**:
|
||||
|
||||
**Which test(s) are failing**:
|
||||
|
||||
**Since when has it been failing**:
|
||||
|
||||
**Testgrid link**:
|
||||
|
||||
**Reason for failure**:
|
||||
|
||||
**Anything else we need to know**:
|
||||
41
.github/ISSUE_TEMPLATE/failing-test.yaml
vendored
41
.github/ISSUE_TEMPLATE/failing-test.yaml
vendored
@@ -1,41 +0,0 @@
|
||||
---
|
||||
name: Failing Test
|
||||
description: Report test failures in Kubespray CI jobs
|
||||
labels: kind/failing-test
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: Please only use this template for submitting reports about failing tests in Kubespray CI jobs
|
||||
- type: textarea
|
||||
id: failing_jobs
|
||||
attributes:
|
||||
label: Which jobs are failing ?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: failing_tests
|
||||
attributes:
|
||||
label: Which tests are failing ?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: since_when
|
||||
attributes:
|
||||
label: Since when has it been failing ?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: failure_reason
|
||||
attributes:
|
||||
label: Reason for failure
|
||||
description: If you don't know and have no guess, just put "Unknown"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: anything_else
|
||||
attributes:
|
||||
label: Anything else we need to know
|
||||
18
.github/ISSUE_TEMPLATE/support.md
vendored
Normal file
18
.github/ISSUE_TEMPLATE/support.md
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
name: Support Request
|
||||
about: Support request or question relating to Kubespray
|
||||
labels: kind/support
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
STOP -- PLEASE READ!
|
||||
|
||||
GitHub is not the right place for support requests.
|
||||
|
||||
If you're looking for help, check [Stack Overflow](https://stackoverflow.com/questions/tagged/kubespray) and the [troubleshooting guide](https://kubernetes.io/docs/tasks/debug-application-cluster/troubleshooting/).
|
||||
|
||||
You can also post your question on the [Kubernetes Slack](http://slack.k8s.io/) or the [Discuss Kubernetes](https://discuss.kubernetes.io/) forum.
|
||||
|
||||
If the matter is security related, please disclose it privately via https://kubernetes.io/security/.
|
||||
-->
|
||||
@@ -9,7 +9,7 @@ stages:
|
||||
- deploy-special
|
||||
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.23.2
|
||||
KUBESPRAY_VERSION: v2.22.1
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
@@ -61,7 +61,7 @@ before_script:
|
||||
script:
|
||||
- ./tests/scripts/testcases_run.sh
|
||||
after_script:
|
||||
- ./tests/scripts/testcases_cleanup.sh
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
|
||||
# For failfast, at least 1 job must be defined in .gitlab-ci.yml
|
||||
# Premoderated with manual actions
|
||||
|
||||
@@ -14,7 +14,7 @@ vagrant-validate:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
VAGRANT_VERSION: 2.3.7
|
||||
VAGRANT_VERSION: 2.3.4
|
||||
script:
|
||||
- ./tests/scripts/vagrant-validate.sh
|
||||
except: ['triggers', 'master']
|
||||
@@ -27,14 +27,6 @@ ansible-lint:
|
||||
- ansible-lint -v
|
||||
except: ['triggers', 'master']
|
||||
|
||||
jinja-syntax-check:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
script:
|
||||
- "find -name '*.j2' -exec tests/scripts/check-templates.py {} +"
|
||||
except: ['triggers', 'master']
|
||||
|
||||
syntax-check:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
|
||||
@@ -1,40 +1,30 @@
|
||||
---
|
||||
|
||||
.molecule:
|
||||
tags: [ffci-vm-med]
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: quay.io/kubespray/vm-kubespray-ci:v6
|
||||
image: $PIPELINE_IMAGE
|
||||
services: []
|
||||
stage: deploy-part1
|
||||
variables:
|
||||
VAGRANT_DEFAULT_PROVIDER: "libvirt"
|
||||
before_script:
|
||||
- groups
|
||||
- python3 -m venv citest
|
||||
- source citest/bin/activate
|
||||
- vagrant plugin expunge --reinstall --force --no-tty
|
||||
- vagrant plugin install vagrant-libvirt
|
||||
- pip install --no-compile --no-cache-dir pip -U
|
||||
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/requirements.txt
|
||||
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/tests/requirements.txt
|
||||
- ./tests/scripts/rebase.sh
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
- tests/scripts/rebase.sh
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh
|
||||
- ./tests/scripts/molecule_run.sh
|
||||
after_script:
|
||||
- ./tests/scripts/molecule_logs.sh
|
||||
- chronic ./tests/scripts/molecule_logs.sh
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- molecule_logs/
|
||||
- molecule_logs/
|
||||
|
||||
# CI template for periodic CI jobs
|
||||
# Enabled when PERIODIC_CI_ENABLED var is set
|
||||
|
||||
.molecule_periodic:
|
||||
only:
|
||||
variables:
|
||||
- $PERIODIC_CI_ENABLED
|
||||
- $PERIODIC_CI_ENABLED
|
||||
allow_failure: true
|
||||
extends: .molecule
|
||||
|
||||
@@ -44,26 +34,26 @@ molecule_full:
|
||||
molecule_no_container_engines:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -e container-engine
|
||||
- ./tests/scripts/molecule_run.sh -e container-engine
|
||||
when: on_success
|
||||
|
||||
molecule_docker:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
|
||||
when: on_success
|
||||
|
||||
molecule_containerd:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/containerd
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/containerd
|
||||
when: on_success
|
||||
|
||||
molecule_cri-o:
|
||||
extends: .molecule
|
||||
stage: deploy-part2
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
||||
allow_failure: true
|
||||
when: on_success
|
||||
|
||||
@@ -73,7 +63,7 @@ molecule_kata:
|
||||
stage: deploy-part3
|
||||
allow_failure: true
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/kata-containers
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/kata-containers
|
||||
when: on_success
|
||||
|
||||
molecule_gvisor:
|
||||
@@ -81,7 +71,7 @@ molecule_gvisor:
|
||||
stage: deploy-part3
|
||||
allow_failure: true
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/gvisor
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/gvisor
|
||||
when: on_success
|
||||
|
||||
molecule_youki:
|
||||
@@ -89,5 +79,5 @@ molecule_youki:
|
||||
stage: deploy-part3
|
||||
allow_failure: true
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/youki
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/youki
|
||||
when: on_success
|
||||
|
||||
@@ -31,8 +31,8 @@ packet_cleanup_old:
|
||||
- make cleanup-packet
|
||||
after_script: []
|
||||
|
||||
# The ubuntu20-calico-all-in-one jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
||||
packet_ubuntu20-calico-all-in-one:
|
||||
# The ubuntu20-calico-aio jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
||||
packet_ubuntu20-calico-aio:
|
||||
stage: deploy-part1
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
@@ -41,27 +41,22 @@ packet_ubuntu20-calico-all-in-one:
|
||||
|
||||
# ### PR JOBS PART2
|
||||
|
||||
packet_ubuntu20-all-in-one-docker:
|
||||
packet_ubuntu20-aio-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu20-calico-all-in-one-hardening:
|
||||
packet_ubuntu20-calico-aio-hardening:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu22-all-in-one-docker:
|
||||
packet_ubuntu22-aio-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu22-calico-all-in-one:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu22-calico-etcd-datastore:
|
||||
packet_ubuntu22-calico-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
@@ -240,7 +235,7 @@ packet_fedora37-calico-swap-selinux:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_amazon-linux-2-all-in-one:
|
||||
packet_amazon-linux-2-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
@@ -265,11 +260,6 @@ packet_debian11-kubelet-csr-approver:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian12-custom-cni-helm:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
# ### PR JOBS PART3
|
||||
# Long jobs (45min+)
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@
|
||||
- tests/scripts/testcases_run.sh
|
||||
after_script:
|
||||
# Cleanup regardless of exit code
|
||||
- ./tests/scripts/testcases_cleanup.sh
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
|
||||
tf-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
@@ -155,7 +155,6 @@ tf-elastx_cleanup:
|
||||
- pip install -r scripts/openstack-cleanup/requirements.txt
|
||||
script:
|
||||
- ./scripts/openstack-cleanup/main.py
|
||||
allow_failure: true
|
||||
|
||||
tf-elastx_ubuntu20-calico:
|
||||
extends: .terraform_apply
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
---
|
||||
|
||||
.vagrant:
|
||||
extends: .testcases
|
||||
variables:
|
||||
@@ -6,31 +7,23 @@
|
||||
SSH_USER: "vagrant"
|
||||
VAGRANT_DEFAULT_PROVIDER: "libvirt"
|
||||
KUBESPRAY_VAGRANT_CONFIG: tests/files/${CI_JOB_NAME}.rb
|
||||
DOCKER_NAME: vagrant
|
||||
VAGRANT_ANSIBLE_TAGS: facts
|
||||
tags: [ffci-vm-large]
|
||||
# only: [/^pr-.*$/]
|
||||
# except: ['triggers']
|
||||
image: quay.io/kubespray/vm-kubespray-ci:v6
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: $PIPELINE_IMAGE
|
||||
services: []
|
||||
before_script:
|
||||
- echo $USER
|
||||
- python3 -m venv citest
|
||||
- source citest/bin/activate
|
||||
- vagrant plugin expunge --reinstall --force --no-tty
|
||||
- vagrant plugin install vagrant-libvirt
|
||||
- pip install --no-compile --no-cache-dir pip -U
|
||||
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/requirements.txt
|
||||
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/testcases_run.sh
|
||||
after_script:
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
allow_failure: true
|
||||
|
||||
vagrant_ubuntu20-calico-dual-stack:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
# FIXME: this test if broken (perma-failing)
|
||||
when: on_success
|
||||
|
||||
vagrant_ubuntu20-weave-medium:
|
||||
stage: deploy-part2
|
||||
@@ -46,7 +39,7 @@ vagrant_ubuntu20-flannel:
|
||||
vagrant_ubuntu20-flannel-collection:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
when: on_success
|
||||
|
||||
vagrant_ubuntu20-kube-router-sep:
|
||||
stage: deploy-part2
|
||||
@@ -58,3 +51,13 @@ vagrant_ubuntu20-kube-router-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
vagrant_fedora37-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_centos7-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
@@ -69,12 +69,3 @@ repos:
|
||||
entry: tests/scripts/md-table/test.sh
|
||||
language: script
|
||||
pass_filenames: false
|
||||
|
||||
- id: jinja-syntax-check
|
||||
name: jinja-syntax-check
|
||||
entry: tests/scripts/check-templates.py
|
||||
language: python
|
||||
types:
|
||||
- jinja
|
||||
additional_dependencies:
|
||||
- Jinja2
|
||||
|
||||
61
Dockerfile
61
Dockerfile
@@ -1,8 +1,5 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# Use imutable image tags rather than mutable tags (like ubuntu:22.04)
|
||||
FROM ubuntu:22.04@sha256:149d67e29f765f4db62aa52161009e99e389544e25a8f43c8c89d4a445a7ca37
|
||||
|
||||
FROM ubuntu:jammy-20230308
|
||||
# Some tools like yamllint need this
|
||||
# Pip needs this as well at the moment to install ansible
|
||||
# (and potentially other packages)
|
||||
@@ -10,37 +7,7 @@ FROM ubuntu:22.04@sha256:149d67e29f765f4db62aa52161009e99e389544e25a8f43c8c89d4a
|
||||
ENV LANG=C.UTF-8 \
|
||||
DEBIAN_FRONTEND=noninteractive \
|
||||
PYTHONDONTWRITEBYTECODE=1
|
||||
|
||||
WORKDIR /kubespray
|
||||
|
||||
# hadolint ignore=DL3008
|
||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
apt-get update -q \
|
||||
&& apt-get install -yq --no-install-recommends \
|
||||
curl \
|
||||
python3 \
|
||||
python3-pip \
|
||||
sshpass \
|
||||
vim \
|
||||
rsync \
|
||||
openssh-client \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/log/*
|
||||
|
||||
RUN --mount=type=bind,source=requirements.txt,target=requirements.txt \
|
||||
--mount=type=cache,sharing=locked,id=pipcache,mode=0777,target=/root/.cache/pip \
|
||||
pip install --no-compile --no-cache-dir -r requirements.txt \
|
||||
&& find /usr -type d -name '*__pycache__' -prune -exec rm -rf {} \;
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
RUN --mount=type=bind,source=roles/kubespray-defaults/defaults/main/main.yml,target=roles/kubespray-defaults/defaults/main/main.yml \
|
||||
KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main/main.yml) \
|
||||
OS_ARCHITECTURE=$(dpkg --print-architecture) \
|
||||
&& curl -L "https://dl.k8s.io/release/${KUBE_VERSION}/bin/linux/${OS_ARCHITECTURE}/kubectl" -o /usr/local/bin/kubectl \
|
||||
&& echo "$(curl -L "https://dl.k8s.io/release/${KUBE_VERSION}/bin/linux/${OS_ARCHITECTURE}/kubectl.sha256")" /usr/local/bin/kubectl | sha256sum --check \
|
||||
&& chmod a+x /usr/local/bin/kubectl
|
||||
|
||||
COPY *.yml ./
|
||||
COPY *.cfg ./
|
||||
COPY roles ./roles
|
||||
@@ -50,3 +17,29 @@ COPY library ./library
|
||||
COPY extra_playbooks ./extra_playbooks
|
||||
COPY playbooks ./playbooks
|
||||
COPY plugins ./plugins
|
||||
|
||||
RUN apt update -q \
|
||||
&& apt install -yq --no-install-recommends \
|
||||
curl \
|
||||
python3 \
|
||||
python3-pip \
|
||||
sshpass \
|
||||
vim \
|
||||
rsync \
|
||||
openssh-client \
|
||||
&& pip install --no-compile --no-cache-dir \
|
||||
ansible==7.6.0 \
|
||||
ansible-core==2.14.6 \
|
||||
cryptography==41.0.1 \
|
||||
jinja2==3.1.2 \
|
||||
netaddr==0.8.0 \
|
||||
jmespath==1.0.1 \
|
||||
MarkupSafe==2.1.3 \
|
||||
ruamel.yaml==0.17.21 \
|
||||
passlib==1.7.4 \
|
||||
&& KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
||||
&& curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
|
||||
&& echo $(curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \
|
||||
&& chmod a+x /usr/local/bin/kubectl \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/log/* \
|
||||
&& find /usr -type d -name '*__pycache__' -prune -exec rm -rf {} \;
|
||||
|
||||
@@ -24,7 +24,6 @@ aliases:
|
||||
- mzaian
|
||||
- mrfreezeex
|
||||
- erikjiang
|
||||
- vannten
|
||||
kubespray-emeritus_approvers:
|
||||
- riverzhang
|
||||
- atoms
|
||||
|
||||
28
README.md
28
README.md
@@ -75,11 +75,11 @@ You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mou
|
||||
to access the inventory and SSH key in the container, like this:
|
||||
|
||||
```ShellSession
|
||||
git checkout v2.24.2
|
||||
docker pull quay.io/kubespray/kubespray:v2.24.2
|
||||
git checkout v2.22.1
|
||||
docker pull quay.io/kubespray/kubespray:v2.22.1
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.23.2 bash
|
||||
quay.io/kubespray/kubespray:v2.22.1 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
@@ -161,28 +161,28 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
## Supported Components
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.28.14
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.16
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.27.7
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.9
|
||||
- [docker](https://www.docker.com/) v20.10 (see note)
|
||||
- [containerd](https://containerd.io/) v1.7.22
|
||||
- [cri-o](http://cri-o.io/) v1.28 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- [containerd](https://containerd.io/) v1.7.5
|
||||
- [cri-o](http://cri-o.io/) v1.27 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.2.0
|
||||
- [calico](https://github.com/projectcalico/calico) v3.26.4
|
||||
- [calico](https://github.com/projectcalico/calico) v3.25.2
|
||||
- [cilium](https://github.com/cilium/cilium) v1.13.4
|
||||
- [flannel](https://github.com/flannel-io/flannel) v0.22.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.11.5
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v2.0.0
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
|
||||
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) v3.8
|
||||
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.5.12
|
||||
- Application
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.13.2
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.11.1
|
||||
- [coredns](https://github.com/coredns/coredns) v1.10.1
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.9.4
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.8.1
|
||||
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.4
|
||||
- [argocd](https://argoproj.github.io/) v2.8.4
|
||||
- [helm](https://helm.sh/) v3.13.1
|
||||
- [argocd](https://argoproj.github.io/) v2.8.0
|
||||
- [helm](https://helm.sh/) v3.12.3
|
||||
- [metallb](https://metallb.universe.tf/) v0.13.9
|
||||
- [registry](https://github.com/distribution/distribution) v2.8.1
|
||||
- Storage Plugin
|
||||
@@ -202,7 +202,7 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Minimum required version of Kubernetes is v1.26**
|
||||
- **Minimum required version of Kubernetes is v1.25**
|
||||
- **Ansible v2.14+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
||||
- The target servers are configured to allow **IPv4 forwarding**.
|
||||
|
||||
5
Vagrantfile
vendored
5
Vagrantfile
vendored
@@ -255,9 +255,7 @@ Vagrant.configure("2") do |config|
|
||||
"kubectl_localhost": "True",
|
||||
"local_path_provisioner_enabled": "#{$local_path_provisioner_enabled}",
|
||||
"local_path_provisioner_claim_root": "#{$local_path_provisioner_claim_root}",
|
||||
"ansible_ssh_user": SUPPORTED_OS[$os][:user],
|
||||
"ansible_ssh_private_key_file": File.join(Dir.home, ".vagrant.d", "insecure_private_key"),
|
||||
"unsafe_show_logs": "True"
|
||||
"ansible_ssh_user": SUPPORTED_OS[$os][:user]
|
||||
}
|
||||
|
||||
# Only execute the Ansible provisioner once, when all the machines are up and ready.
|
||||
@@ -265,7 +263,6 @@ Vagrant.configure("2") do |config|
|
||||
if i == $num_instances
|
||||
node.vm.provision "ansible" do |ansible|
|
||||
ansible.playbook = $playbook
|
||||
ansible.compatibility_mode = "2.0"
|
||||
ansible.verbose = $ansible_verbosity
|
||||
$ansible_inventory_path = File.join( $inventory, "hosts.ini")
|
||||
if File.exist?($ansible_inventory_path)
|
||||
|
||||
@@ -5,9 +5,7 @@
|
||||
Container image collecting script for offline deployment
|
||||
|
||||
This script has two features:
|
||||
|
||||
(1) Get container images from an environment which is deployed online.
|
||||
|
||||
(2) Deploy local container registry and register the container images to the registry.
|
||||
|
||||
Step(1) should be done online site as a preparation, then we bring the gotten images
|
||||
@@ -29,7 +27,7 @@ manage-offline-container-images.sh register
|
||||
|
||||
## generate_list.sh
|
||||
|
||||
This script generates the list of downloaded files and the list of container images by `roles/kubespray-defaults/defaults/main/download.yml` file.
|
||||
This script generates the list of downloaded files and the list of container images by `roles/download/defaults/main/main.yml` file.
|
||||
|
||||
Run this script will execute `generate_list.yml` playbook in kubespray root directory and generate four files,
|
||||
all downloaded files url in files.list, all container images in images.list, jinja2 templates in *.template.
|
||||
|
||||
@@ -5,7 +5,7 @@ CURRENT_DIR=$(cd $(dirname $0); pwd)
|
||||
TEMP_DIR="${CURRENT_DIR}/temp"
|
||||
REPO_ROOT_DIR="${CURRENT_DIR%/contrib/offline}"
|
||||
|
||||
: ${DOWNLOAD_YML:="roles/kubespray-defaults/defaults/main/download.yml"}
|
||||
: ${DOWNLOAD_YML:="roles/download/defaults/main/main.yml"}
|
||||
|
||||
mkdir -p ${TEMP_DIR}
|
||||
|
||||
@@ -19,7 +19,7 @@ sed -n '/^downloads:/,/download_defaults:/p' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/\"//g' > ${TEMP_DIR}/images.list.template
|
||||
|
||||
# add kube-* images to images list template
|
||||
# Those container images are downloaded by kubeadm, then roles/kubespray-defaults/defaults/main/download.yml
|
||||
# Those container images are downloaded by kubeadm, then roles/download/defaults/main/main.yml
|
||||
# doesn't contain those images. That is reason why here needs to put those images into the
|
||||
# list separately.
|
||||
KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy"
|
||||
|
||||
@@ -23,8 +23,8 @@ function create_container_image_tar() {
|
||||
mkdir ${IMAGE_DIR}
|
||||
cd ${IMAGE_DIR}
|
||||
|
||||
sudo ${runtime} pull registry:latest
|
||||
sudo ${runtime} save -o registry-latest.tar registry:latest
|
||||
sudo docker pull registry:latest
|
||||
sudo docker save -o registry-latest.tar registry:latest
|
||||
|
||||
for image in ${IMAGES}
|
||||
do
|
||||
@@ -32,7 +32,7 @@ function create_container_image_tar() {
|
||||
set +e
|
||||
for step in $(seq 1 ${RETRY_COUNT})
|
||||
do
|
||||
sudo ${runtime} pull ${image}
|
||||
sudo docker pull ${image}
|
||||
if [ $? -eq 0 ]; then
|
||||
break
|
||||
fi
|
||||
@@ -42,7 +42,7 @@ function create_container_image_tar() {
|
||||
fi
|
||||
done
|
||||
set -e
|
||||
sudo ${runtime} save -o ${FILE_NAME} ${image}
|
||||
sudo docker save -o ${FILE_NAME} ${image}
|
||||
|
||||
# NOTE: Here removes the following repo parts from each image
|
||||
# so that these parts will be replaced with Kubespray.
|
||||
@@ -95,16 +95,16 @@ function register_container_images() {
|
||||
sed -i s@"HOSTNAME"@"${LOCALHOST_NAME}"@ ${TEMP_DIR}/registries.conf
|
||||
sudo cp ${TEMP_DIR}/registries.conf /etc/containers/registries.conf
|
||||
else
|
||||
echo "runtime package(docker-ce, podman, nerctl, etc.) should be installed"
|
||||
echo "docker package(docker-ce, etc.) should be installed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tar -zxvf ${IMAGE_TAR_FILE}
|
||||
sudo ${runtime} load -i ${IMAGE_DIR}/registry-latest.tar
|
||||
sudo docker load -i ${IMAGE_DIR}/registry-latest.tar
|
||||
set +e
|
||||
sudo ${runtime} container inspect registry >/dev/null 2>&1
|
||||
sudo docker container inspect registry >/dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
sudo ${runtime} run --restart=always -d -p 5000:5000 --name registry registry:latest
|
||||
sudo docker run --restart=always -d -p 5000:5000 --name registry registry:latest
|
||||
fi
|
||||
set -e
|
||||
|
||||
@@ -112,8 +112,8 @@ function register_container_images() {
|
||||
file_name=$(echo ${line} | awk '{print $1}')
|
||||
raw_image=$(echo ${line} | awk '{print $2}')
|
||||
new_image="${LOCALHOST_NAME}:5000/${raw_image}"
|
||||
org_image=$(sudo ${runtime} load -i ${IMAGE_DIR}/${file_name} | head -n1 | awk '{print $3}')
|
||||
image_id=$(sudo ${runtime} image inspect ${org_image} | grep "\"Id\":" | awk -F: '{print $3}'| sed s/'\",'//)
|
||||
org_image=$(sudo docker load -i ${IMAGE_DIR}/${file_name} | head -n1 | awk '{print $3}')
|
||||
image_id=$(sudo docker image inspect ${org_image} | grep "\"Id\":" | awk -F: '{print $3}'| sed s/'\",'//)
|
||||
if [ -z "${file_name}" ]; then
|
||||
echo "Failed to get file_name for line ${line}"
|
||||
exit 1
|
||||
@@ -130,9 +130,9 @@ function register_container_images() {
|
||||
echo "Failed to get image_id for file ${file_name}"
|
||||
exit 1
|
||||
fi
|
||||
sudo ${runtime} load -i ${IMAGE_DIR}/${file_name}
|
||||
sudo ${runtime} tag ${image_id} ${new_image}
|
||||
sudo ${runtime} push ${new_image}
|
||||
sudo docker load -i ${IMAGE_DIR}/${file_name}
|
||||
sudo docker tag ${image_id} ${new_image}
|
||||
sudo docker push ${new_image}
|
||||
done <<< "$(cat ${IMAGE_LIST})"
|
||||
|
||||
echo "Succeeded to register container images to local registry."
|
||||
@@ -143,18 +143,6 @@ function register_container_images() {
|
||||
echo "- quay_image_repo"
|
||||
}
|
||||
|
||||
# get runtime command
|
||||
if command -v nerdctl 1>/dev/null 2>&1; then
|
||||
runtime="nerdctl"
|
||||
elif command -v podman 1>/dev/null 2>&1; then
|
||||
runtime="podman"
|
||||
elif command -v docker 1>/dev/null 2>&1; then
|
||||
runtime="docker"
|
||||
else
|
||||
echo "No supported container runtime found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "${OPTION}" == "create" ]; then
|
||||
create_container_image_tar
|
||||
elif [ "${OPTION}" == "register" ]; then
|
||||
|
||||
@@ -38,7 +38,7 @@ sudo "${runtime}" container inspect nginx >/dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
sudo "${runtime}" run \
|
||||
--restart=always -d -p ${NGINX_PORT}:80 \
|
||||
--volume "${OFFLINE_FILES_DIR}":/usr/share/nginx/html/download \
|
||||
--volume "${OFFLINE_FILES_DIR}:/usr/share/nginx/html/download" \
|
||||
--volume "${CURRENT_DIR}"/nginx.conf:/etc/nginx/nginx.conf \
|
||||
--name nginx nginx:alpine
|
||||
fi
|
||||
|
||||
@@ -50,32 +50,70 @@ Example (this one assumes you are using Ubuntu)
|
||||
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=ubuntu -b --become-user=root --flush-cache
|
||||
```
|
||||
|
||||
## Using other distrib than Ubuntu***
|
||||
***Using other distrib than Ubuntu***
|
||||
If you want to use another distribution than Ubuntu 18.04 (Bionic) LTS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.
|
||||
|
||||
To leverage a Linux distribution other than Ubuntu 18.04 (Bionic) LTS for your Terraform configurations, you can adjust the AMI search filters within the 'data "aws_ami" "distro"' block by utilizing variables in your `terraform.tfvars` file. This approach ensures a flexible configuration that adapts to various Linux distributions without directly modifying the core Terraform files.
|
||||
For example, to use:
|
||||
|
||||
### Example Usages
|
||||
- Debian Jessie, replace 'data "aws_ami" "distro"' in variables.tf with
|
||||
|
||||
- **Debian Jessie**: To configure the usage of Debian Jessie, insert the subsequent lines into your `terraform.tfvars`:
|
||||
```ini
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
```hcl
|
||||
ami_name_pattern = "debian-jessie-amd64-hvm-*"
|
||||
ami_owners = ["379101102735"]
|
||||
```
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["debian-jessie-amd64-hvm-*"]
|
||||
}
|
||||
|
||||
- **Ubuntu 16.04**: To utilize Ubuntu 16.04 instead, apply the following configuration in your `terraform.tfvars`:
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
```hcl
|
||||
ami_name_pattern = "ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-*"
|
||||
ami_owners = ["099720109477"]
|
||||
```
|
||||
owners = ["379101102735"]
|
||||
}
|
||||
```
|
||||
|
||||
- **Centos 7**: For employing Centos 7, incorporate these lines into your `terraform.tfvars`:
|
||||
- Ubuntu 16.04, replace 'data "aws_ami" "distro"' in variables.tf with
|
||||
|
||||
```hcl
|
||||
ami_name_pattern = "dcos-centos7-*"
|
||||
ami_owners = ["688023202711"]
|
||||
```
|
||||
```ini
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
owners = ["099720109477"]
|
||||
}
|
||||
```
|
||||
|
||||
- Centos 7, replace 'data "aws_ami" "distro"' in variables.tf with
|
||||
|
||||
```ini
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["dcos-centos7-*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
owners = ["688023202711"]
|
||||
}
|
||||
```
|
||||
|
||||
## Connecting to Kubernetes
|
||||
|
||||
|
||||
@@ -20,38 +20,20 @@ variable "aws_cluster_name" {
|
||||
description = "Name of AWS Cluster"
|
||||
}
|
||||
|
||||
variable "ami_name_pattern" {
|
||||
description = "The name pattern to use for AMI lookup"
|
||||
type = string
|
||||
default = "debian-10-amd64-*"
|
||||
}
|
||||
|
||||
variable "ami_virtualization_type" {
|
||||
description = "The virtualization type to use for AMI lookup"
|
||||
type = string
|
||||
default = "hvm"
|
||||
}
|
||||
|
||||
variable "ami_owners" {
|
||||
description = "The owners to use for AMI lookup"
|
||||
type = list(string)
|
||||
default = ["136693071363"]
|
||||
}
|
||||
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = [var.ami_name_pattern]
|
||||
values = ["debian-10-amd64-*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = [var.ami_virtualization_type]
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
owners = var.ami_owners
|
||||
owners = ["136693071363"] # Debian-10
|
||||
}
|
||||
|
||||
//AWS VPC Variables
|
||||
|
||||
@@ -7,7 +7,7 @@ terraform {
|
||||
required_providers {
|
||||
equinix = {
|
||||
source = "equinix/equinix"
|
||||
version = "1.24.0"
|
||||
version = "~> 1.14"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ ssh_public_keys = [
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
"size" : "standard.medium",
|
||||
"size" : "Medium",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
@@ -22,7 +22,7 @@ machines = {
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "standard.large",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
@@ -32,7 +32,7 @@ machines = {
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "standard.large",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
@@ -42,7 +42,7 @@ machines = {
|
||||
},
|
||||
"worker-2" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "standard.large",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
|
||||
@@ -1,25 +1,29 @@
|
||||
data "exoscale_template" "os_image" {
|
||||
data "exoscale_compute_template" "os_image" {
|
||||
for_each = var.machines
|
||||
|
||||
zone = var.zone
|
||||
name = each.value.boot_disk.image_name
|
||||
}
|
||||
|
||||
data "exoscale_compute_instance" "master_nodes" {
|
||||
for_each = exoscale_compute_instance.master
|
||||
data "exoscale_compute" "master_nodes" {
|
||||
for_each = exoscale_compute.master
|
||||
|
||||
id = each.value.id
|
||||
zone = var.zone
|
||||
id = each.value.id
|
||||
|
||||
# Since private IP address is not assigned until the nics are created we need this
|
||||
depends_on = [exoscale_nic.master_private_network_nic]
|
||||
}
|
||||
|
||||
data "exoscale_compute_instance" "worker_nodes" {
|
||||
for_each = exoscale_compute_instance.worker
|
||||
data "exoscale_compute" "worker_nodes" {
|
||||
for_each = exoscale_compute.worker
|
||||
|
||||
id = each.value.id
|
||||
zone = var.zone
|
||||
id = each.value.id
|
||||
|
||||
# Since private IP address is not assigned until the nics are created we need this
|
||||
depends_on = [exoscale_nic.worker_private_network_nic]
|
||||
}
|
||||
|
||||
resource "exoscale_private_network" "private_network" {
|
||||
resource "exoscale_network" "private_network" {
|
||||
zone = var.zone
|
||||
name = "${var.prefix}-network"
|
||||
|
||||
@@ -30,29 +34,25 @@ resource "exoscale_private_network" "private_network" {
|
||||
netmask = cidrnetmask(var.private_network_cidr)
|
||||
}
|
||||
|
||||
resource "exoscale_compute_instance" "master" {
|
||||
resource "exoscale_compute" "master" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "master"
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_template.os_image[each.key].id
|
||||
type = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_group_ids = [exoscale_security_group.master_sg.id]
|
||||
network_interface {
|
||||
network_id = exoscale_private_network.private_network.id
|
||||
}
|
||||
elastic_ip_ids = [exoscale_elastic_ip.control_plane_lb.id]
|
||||
display_name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_compute_template.os_image[each.key].id
|
||||
size = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_groups = [exoscale_security_group.master_sg.name]
|
||||
|
||||
user_data = templatefile(
|
||||
"${path.module}/templates/cloud-init.tmpl",
|
||||
{
|
||||
eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address
|
||||
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
||||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||
root_partition_size = each.value.boot_disk.root_partition_size
|
||||
@@ -62,29 +62,25 @@ resource "exoscale_compute_instance" "master" {
|
||||
)
|
||||
}
|
||||
|
||||
resource "exoscale_compute_instance" "worker" {
|
||||
resource "exoscale_compute" "worker" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "worker"
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_template.os_image[each.key].id
|
||||
type = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_group_ids = [exoscale_security_group.worker_sg.id]
|
||||
network_interface {
|
||||
network_id = exoscale_private_network.private_network.id
|
||||
}
|
||||
elastic_ip_ids = [exoscale_elastic_ip.ingress_controller_lb.id]
|
||||
display_name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_compute_template.os_image[each.key].id
|
||||
size = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_groups = [exoscale_security_group.worker_sg.name]
|
||||
|
||||
user_data = templatefile(
|
||||
"${path.module}/templates/cloud-init.tmpl",
|
||||
{
|
||||
eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address
|
||||
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
||||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||
root_partition_size = each.value.boot_disk.root_partition_size
|
||||
@@ -94,33 +90,41 @@ resource "exoscale_compute_instance" "worker" {
|
||||
)
|
||||
}
|
||||
|
||||
resource "exoscale_nic" "master_private_network_nic" {
|
||||
for_each = exoscale_compute.master
|
||||
|
||||
compute_id = each.value.id
|
||||
network_id = exoscale_network.private_network.id
|
||||
}
|
||||
|
||||
resource "exoscale_nic" "worker_private_network_nic" {
|
||||
for_each = exoscale_compute.worker
|
||||
|
||||
compute_id = each.value.id
|
||||
network_id = exoscale_network.private_network.id
|
||||
}
|
||||
|
||||
resource "exoscale_security_group" "master_sg" {
|
||||
name = "${var.prefix}-master-sg"
|
||||
description = "Security group for Kubernetes masters"
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rule" "master_sg_rule_ssh" {
|
||||
resource "exoscale_security_group_rules" "master_sg_rules" {
|
||||
security_group_id = exoscale_security_group.master_sg.id
|
||||
|
||||
for_each = toset(var.ssh_whitelist)
|
||||
# SSH
|
||||
type = "INGRESS"
|
||||
start_port = 22
|
||||
end_port = 22
|
||||
protocol = "TCP"
|
||||
cidr = each.value
|
||||
}
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.ssh_whitelist
|
||||
ports = ["22"]
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rule" "master_sg_rule_k8s_api" {
|
||||
security_group_id = exoscale_security_group.master_sg.id
|
||||
|
||||
for_each = toset(var.api_server_whitelist)
|
||||
# Kubernetes API
|
||||
type = "INGRESS"
|
||||
start_port = 6443
|
||||
end_port = 6443
|
||||
protocol = "TCP"
|
||||
cidr = each.value
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.api_server_whitelist
|
||||
ports = ["6443"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "exoscale_security_group" "worker_sg" {
|
||||
@@ -128,64 +132,62 @@ resource "exoscale_security_group" "worker_sg" {
|
||||
description = "security group for kubernetes worker nodes"
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rule" "worker_sg_rule_ssh" {
|
||||
resource "exoscale_security_group_rules" "worker_sg_rules" {
|
||||
security_group_id = exoscale_security_group.worker_sg.id
|
||||
|
||||
# SSH
|
||||
for_each = toset(var.ssh_whitelist)
|
||||
type = "INGRESS"
|
||||
start_port = 22
|
||||
end_port = 22
|
||||
protocol = "TCP"
|
||||
cidr = each.value
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rule" "worker_sg_rule_http" {
|
||||
security_group_id = exoscale_security_group.worker_sg.id
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.ssh_whitelist
|
||||
ports = ["22"]
|
||||
}
|
||||
|
||||
# HTTP(S)
|
||||
for_each = toset(["80", "443"])
|
||||
type = "INGRESS"
|
||||
start_port = each.value
|
||||
end_port = each.value
|
||||
protocol = "TCP"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = ["0.0.0.0/0"]
|
||||
ports = ["80", "443"]
|
||||
}
|
||||
|
||||
|
||||
resource "exoscale_security_group_rule" "worker_sg_rule_nodeport" {
|
||||
security_group_id = exoscale_security_group.worker_sg.id
|
||||
|
||||
# HTTP(S)
|
||||
for_each = toset(var.nodeport_whitelist)
|
||||
type = "INGRESS"
|
||||
start_port = 30000
|
||||
end_port = 32767
|
||||
protocol = "TCP"
|
||||
cidr = each.value
|
||||
}
|
||||
|
||||
resource "exoscale_elastic_ip" "ingress_controller_lb" {
|
||||
zone = var.zone
|
||||
healthcheck {
|
||||
mode = "http"
|
||||
port = 80
|
||||
uri = "/healthz"
|
||||
interval = 10
|
||||
timeout = 2
|
||||
strikes_ok = 2
|
||||
strikes_fail = 3
|
||||
# Kubernetes Nodeport
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.nodeport_whitelist
|
||||
ports = ["30000-32767"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "exoscale_elastic_ip" "control_plane_lb" {
|
||||
zone = var.zone
|
||||
healthcheck {
|
||||
mode = "tcp"
|
||||
port = 6443
|
||||
interval = 10
|
||||
timeout = 2
|
||||
strikes_ok = 2
|
||||
strikes_fail = 3
|
||||
}
|
||||
resource "exoscale_ipaddress" "ingress_controller_lb" {
|
||||
zone = var.zone
|
||||
healthcheck_mode = "http"
|
||||
healthcheck_port = 80
|
||||
healthcheck_path = "/healthz"
|
||||
healthcheck_interval = 10
|
||||
healthcheck_timeout = 2
|
||||
healthcheck_strikes_ok = 2
|
||||
healthcheck_strikes_fail = 3
|
||||
}
|
||||
|
||||
resource "exoscale_secondary_ipaddress" "ingress_controller_lb" {
|
||||
for_each = exoscale_compute.worker
|
||||
|
||||
compute_id = each.value.id
|
||||
ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
}
|
||||
|
||||
resource "exoscale_ipaddress" "control_plane_lb" {
|
||||
zone = var.zone
|
||||
healthcheck_mode = "tcp"
|
||||
healthcheck_port = 6443
|
||||
healthcheck_interval = 10
|
||||
healthcheck_timeout = 2
|
||||
healthcheck_strikes_ok = 2
|
||||
healthcheck_strikes_fail = 3
|
||||
}
|
||||
|
||||
resource "exoscale_secondary_ipaddress" "control_plane_lb" {
|
||||
for_each = exoscale_compute.master
|
||||
|
||||
compute_id = each.value.id
|
||||
ip_address = exoscale_ipaddress.control_plane_lb.ip_address
|
||||
}
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
output "master_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in exoscale_compute_instance.master :
|
||||
for key, instance in exoscale_compute.master :
|
||||
instance.name => {
|
||||
"private_ip" = contains(keys(data.exoscale_compute_instance.master_nodes), key) ? data.exoscale_compute_instance.master_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute_instance.master[key].ip_address
|
||||
"private_ip" = contains(keys(data.exoscale_compute.master_nodes), key) ? data.exoscale_compute.master_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute.master[key].ip_address
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "worker_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in exoscale_compute_instance.worker :
|
||||
for key, instance in exoscale_compute.worker :
|
||||
instance.name => {
|
||||
"private_ip" = contains(keys(data.exoscale_compute_instance.worker_nodes), key) ? data.exoscale_compute_instance.worker_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute_instance.worker[key].ip_address
|
||||
"private_ip" = contains(keys(data.exoscale_compute.worker_nodes), key) ? data.exoscale_compute.worker_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute.worker[key].ip_address
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -23,9 +23,9 @@ output "cluster_private_network_cidr" {
|
||||
}
|
||||
|
||||
output "ingress_controller_lb_ip_address" {
|
||||
value = exoscale_elastic_ip.ingress_controller_lb.ip_address
|
||||
value = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
}
|
||||
|
||||
output "control_plane_lb_ip_address" {
|
||||
value = exoscale_elastic_ip.control_plane_lb.ip_address
|
||||
value = exoscale_ipaddress.control_plane_lb.ip_address
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
exoscale = {
|
||||
source = "exoscale/exoscale"
|
||||
source = "exoscale/exoscale"
|
||||
version = ">= 0.21"
|
||||
}
|
||||
}
|
||||
|
||||
2
contrib/terraform/openstack/.gitignore
vendored
2
contrib/terraform/openstack/.gitignore
vendored
@@ -1,5 +1,5 @@
|
||||
.terraform
|
||||
*.tfvars
|
||||
!sample-inventory/cluster.tfvars
|
||||
!sample-inventory\/cluster.tfvars
|
||||
*.tfstate
|
||||
*.tfstate.backup
|
||||
|
||||
@@ -318,7 +318,6 @@ k8s_nodes:
|
||||
mount_path: string # Path to where the partition should be mounted
|
||||
partition_start: string # Where the partition should start (e.g. 10GB ). Note, if you set the partition_start to 0 there will be no space left for the root partition
|
||||
partition_end: string # Where the partition should end (e.g. 10GB or -1 for end of volume)
|
||||
netplan_critical_dhcp_interface: string # Name of interface to set the dhcp flag critical = true, to circumvent [this issue](https://bugs.launchpad.net/ubuntu/+source/systemd/+bug/1776013).
|
||||
```
|
||||
|
||||
For example:
|
||||
|
||||
@@ -19,8 +19,8 @@ data "cloudinit_config" "cloudinit" {
|
||||
part {
|
||||
content_type = "text/cloud-config"
|
||||
content = templatefile("${path.module}/templates/cloudinit.yaml.tmpl", {
|
||||
extra_partitions = [],
|
||||
netplan_critical_dhcp_interface = ""
|
||||
# template_file doesn't support lists
|
||||
extra_partitions = ""
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -821,8 +821,7 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
flavor_id = each.value.flavor
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = each.value.cloudinit != null ? templatefile("${path.module}/templates/cloudinit.yaml.tmpl", {
|
||||
extra_partitions = each.value.cloudinit.extra_partitions,
|
||||
netplan_critical_dhcp_interface = each.value.cloudinit.netplan_critical_dhcp_interface,
|
||||
extra_partitions = each.value.cloudinit.extra_partitions
|
||||
}) : data.cloudinit_config.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
%{~ if length(extra_partitions) > 0 || netplan_critical_dhcp_interface != "" }
|
||||
%{~ if length(extra_partitions) > 0 }
|
||||
#cloud-config
|
||||
bootcmd:
|
||||
%{~ for idx, partition in extra_partitions }
|
||||
@@ -8,26 +8,11 @@ bootcmd:
|
||||
%{~ endfor }
|
||||
|
||||
runcmd:
|
||||
%{~ if netplan_critical_dhcp_interface != "" }
|
||||
- netplan apply
|
||||
%{~ endif }
|
||||
%{~ for idx, partition in extra_partitions }
|
||||
- mkdir -p ${partition.mount_path}
|
||||
- chown nobody:nogroup ${partition.mount_path}
|
||||
- mount ${partition.partition_path} ${partition.mount_path}
|
||||
%{~ endfor ~}
|
||||
|
||||
%{~ if netplan_critical_dhcp_interface != "" }
|
||||
write_files:
|
||||
- path: /etc/netplan/90-critical-dhcp.yaml
|
||||
content: |
|
||||
network:
|
||||
version: 2
|
||||
ethernets:
|
||||
${ netplan_critical_dhcp_interface }:
|
||||
dhcp4: true
|
||||
critical: true
|
||||
%{~ endif }
|
||||
%{~ endfor }
|
||||
|
||||
mounts:
|
||||
%{~ for idx, partition in extra_partitions }
|
||||
|
||||
@@ -142,14 +142,13 @@ variable "k8s_nodes" {
|
||||
additional_server_groups = optional(list(string))
|
||||
server_group = optional(string)
|
||||
cloudinit = optional(object({
|
||||
extra_partitions = optional(list(object({
|
||||
extra_partitions = list(object({
|
||||
volume_path = string
|
||||
partition_path = string
|
||||
partition_start = string
|
||||
partition_end = string
|
||||
mount_path = string
|
||||
})), [])
|
||||
netplan_critical_dhcp_interface = optional(string, "")
|
||||
}))
|
||||
}))
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -140,4 +140,4 @@ terraform destroy --var-file cluster-settings.tfvars \
|
||||
* `backend_servers`: List of servers that traffic to the port should be forwarded to.
|
||||
* `server_groups`: Group servers together
|
||||
* `servers`: The servers that should be included in the group.
|
||||
* `anti_affinity_policy`: Defines if a server group is an anti-affinity group. Setting this to "strict" or yes" will result in all servers in the group being placed on separate compute hosts. The value can be "strict", "yes" or "no". "strict" refers to strict policy doesn't allow servers in the same server group to be on the same host. "yes" refers to best-effort policy and tries to put servers on different hosts, but this is not guaranteed.
|
||||
* `anti_affinity`: If anti-affinity should be enabled, try to spread the VMs out on separate nodes.
|
||||
|
||||
@@ -18,7 +18,7 @@ ssh_public_keys = [
|
||||
|
||||
# check list of available plan https://developers.upcloud.com/1.3/7-plans/
|
||||
machines = {
|
||||
"control-plane-0" : {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
# plan to use instead of custom cpu/mem
|
||||
"plan" : null,
|
||||
@@ -133,9 +133,9 @@ loadbalancers = {
|
||||
server_groups = {
|
||||
# "control-plane" = {
|
||||
# servers = [
|
||||
# "control-plane-0"
|
||||
# "master-0"
|
||||
# ]
|
||||
# anti_affinity_policy = "strict"
|
||||
# anti_affinity = true
|
||||
# },
|
||||
# "workers" = {
|
||||
# servers = [
|
||||
@@ -143,6 +143,6 @@ server_groups = {
|
||||
# "worker-1",
|
||||
# "worker-2"
|
||||
# ]
|
||||
# anti_affinity_policy = "yes"
|
||||
# anti_affinity = true
|
||||
# }
|
||||
}
|
||||
@@ -3,7 +3,7 @@ locals {
|
||||
disks = flatten([
|
||||
for node_name, machine in var.machines : [
|
||||
for disk_name, disk in machine.additional_disks : {
|
||||
disk = disk
|
||||
disk = disk
|
||||
disk_name = disk_name
|
||||
node_name = node_name
|
||||
}
|
||||
@@ -13,8 +13,8 @@ locals {
|
||||
lb_backend_servers = flatten([
|
||||
for lb_name, loadbalancer in var.loadbalancers : [
|
||||
for backend_server in loadbalancer.backend_servers : {
|
||||
port = loadbalancer.target_port
|
||||
lb_name = lb_name
|
||||
port = loadbalancer.target_port
|
||||
lb_name = lb_name
|
||||
server_name = backend_server
|
||||
}
|
||||
]
|
||||
@@ -22,7 +22,7 @@ locals {
|
||||
|
||||
# If prefix is set, all resources will be prefixed with "${var.prefix}-"
|
||||
# Else don't prefix with anything
|
||||
resource-prefix = "%{if var.prefix != ""}${var.prefix}-%{endif}"
|
||||
resource-prefix = "%{ if var.prefix != ""}${var.prefix}-%{ endif }"
|
||||
}
|
||||
|
||||
resource "upcloud_network" "private" {
|
||||
@@ -38,7 +38,7 @@ resource "upcloud_network" "private" {
|
||||
|
||||
resource "upcloud_storage" "additional_disks" {
|
||||
for_each = {
|
||||
for disk in local.disks : "${disk.node_name}_${disk.disk_name}" => disk.disk
|
||||
for disk in local.disks: "${disk.node_name}_${disk.disk_name}" => disk.disk
|
||||
}
|
||||
|
||||
size = each.value.size
|
||||
@@ -61,8 +61,8 @@ resource "upcloud_server" "master" {
|
||||
zone = var.zone
|
||||
|
||||
template {
|
||||
storage = var.template_name
|
||||
size = each.value.disk_size
|
||||
storage = var.template_name
|
||||
size = each.value.disk_size
|
||||
}
|
||||
|
||||
# Public network interface
|
||||
@@ -81,14 +81,14 @@ resource "upcloud_server" "master" {
|
||||
ignore_changes = [storage_devices]
|
||||
}
|
||||
|
||||
firewall = var.firewall_enabled
|
||||
firewall = var.firewall_enabled
|
||||
|
||||
dynamic "storage_devices" {
|
||||
for_each = {
|
||||
for disk_key_name, disk in upcloud_storage.additional_disks :
|
||||
disk_key_name => disk
|
||||
# Only add the disk if it matches the node name in the start of its name
|
||||
if length(regexall("^${each.key}_.+", disk_key_name)) > 0
|
||||
disk_key_name => disk
|
||||
# Only add the disk if it matches the node name in the start of its name
|
||||
if length(regexall("^${each.key}_.+", disk_key_name)) > 0
|
||||
}
|
||||
|
||||
content {
|
||||
@@ -138,14 +138,14 @@ resource "upcloud_server" "worker" {
|
||||
ignore_changes = [storage_devices]
|
||||
}
|
||||
|
||||
firewall = var.firewall_enabled
|
||||
firewall = var.firewall_enabled
|
||||
|
||||
dynamic "storage_devices" {
|
||||
for_each = {
|
||||
for disk_key_name, disk in upcloud_storage.additional_disks :
|
||||
disk_key_name => disk
|
||||
# Only add the disk if it matches the node name in the start of its name
|
||||
if length(regexall("^${each.key}_.+", disk_key_name)) > 0
|
||||
disk_key_name => disk
|
||||
# Only add the disk if it matches the node name in the start of its name
|
||||
if length(regexall("^${each.key}_.+", disk_key_name)) > 0
|
||||
}
|
||||
|
||||
content {
|
||||
@@ -162,10 +162,10 @@ resource "upcloud_server" "worker" {
|
||||
}
|
||||
|
||||
resource "upcloud_firewall_rules" "master" {
|
||||
for_each = upcloud_server.master
|
||||
for_each = upcloud_server.master
|
||||
server_id = each.value.id
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.master_allowed_remote_ips
|
||||
|
||||
content {
|
||||
@@ -181,7 +181,7 @@ resource "upcloud_firewall_rules" "master" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = length(var.master_allowed_remote_ips) > 0 ? [1] : []
|
||||
|
||||
content {
|
||||
@@ -197,7 +197,7 @@ resource "upcloud_firewall_rules" "master" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.k8s_allowed_remote_ips
|
||||
|
||||
content {
|
||||
@@ -213,7 +213,7 @@ resource "upcloud_firewall_rules" "master" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = length(var.k8s_allowed_remote_ips) > 0 ? [1] : []
|
||||
|
||||
content {
|
||||
@@ -229,7 +229,7 @@ resource "upcloud_firewall_rules" "master" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.master_allowed_ports
|
||||
|
||||
content {
|
||||
@@ -245,97 +245,97 @@ resource "upcloud_firewall_rules" "master" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "94.237.40.9"
|
||||
source_address_start = "94.237.40.9"
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "94.237.40.9"
|
||||
source_address_start = "94.237.40.9"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "94.237.127.9"
|
||||
source_address_start = "94.237.127.9"
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "94.237.127.9"
|
||||
source_address_start = "94.237.127.9"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "2a04:3540:53::1"
|
||||
source_address_start = "2a04:3540:53::1"
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "2a04:3540:53::1"
|
||||
source_address_start = "2a04:3540:53::1"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "2a04:3544:53::1"
|
||||
source_address_start = "2a04:3544:53::1"
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "2a04:3544:53::1"
|
||||
source_address_start = "2a04:3544:53::1"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "NTP Port"
|
||||
source_port_end = "123"
|
||||
source_port_start = "123"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "255.255.255.255"
|
||||
source_address_start = "0.0.0.0"
|
||||
action = "accept"
|
||||
comment = "NTP Port"
|
||||
source_port_end = "123"
|
||||
source_port_start = "123"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "255.255.255.255"
|
||||
source_address_start = "0.0.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "NTP Port"
|
||||
source_port_end = "123"
|
||||
source_port_start = "123"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
action = "accept"
|
||||
comment = "NTP Port"
|
||||
source_port_end = "123"
|
||||
source_port_start = "123"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
}
|
||||
}
|
||||
|
||||
@@ -351,10 +351,10 @@ resource "upcloud_firewall_rules" "master" {
|
||||
}
|
||||
|
||||
resource "upcloud_firewall_rules" "k8s" {
|
||||
for_each = upcloud_server.worker
|
||||
for_each = upcloud_server.worker
|
||||
server_id = each.value.id
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.k8s_allowed_remote_ips
|
||||
|
||||
content {
|
||||
@@ -370,7 +370,7 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = length(var.k8s_allowed_remote_ips) > 0 ? [1] : []
|
||||
|
||||
content {
|
||||
@@ -386,7 +386,7 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.worker_allowed_ports
|
||||
|
||||
content {
|
||||
@@ -402,97 +402,97 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "94.237.40.9"
|
||||
source_address_start = "94.237.40.9"
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "94.237.40.9"
|
||||
source_address_start = "94.237.40.9"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "94.237.127.9"
|
||||
source_address_start = "94.237.127.9"
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "94.237.127.9"
|
||||
source_address_start = "94.237.127.9"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "2a04:3540:53::1"
|
||||
source_address_start = "2a04:3540:53::1"
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "2a04:3540:53::1"
|
||||
source_address_start = "2a04:3540:53::1"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "2a04:3544:53::1"
|
||||
source_address_start = "2a04:3544:53::1"
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "2a04:3544:53::1"
|
||||
source_address_start = "2a04:3544:53::1"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "NTP Port"
|
||||
source_port_end = "123"
|
||||
source_port_start = "123"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "255.255.255.255"
|
||||
source_address_start = "0.0.0.0"
|
||||
action = "accept"
|
||||
comment = "NTP Port"
|
||||
source_port_end = "123"
|
||||
source_port_start = "123"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "255.255.255.255"
|
||||
source_address_start = "0.0.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "NTP Port"
|
||||
source_port_end = "123"
|
||||
source_port_start = "123"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
action = "accept"
|
||||
comment = "NTP Port"
|
||||
source_port_end = "123"
|
||||
source_port_start = "123"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
}
|
||||
}
|
||||
|
||||
@@ -535,9 +535,9 @@ resource "upcloud_loadbalancer_frontend" "lb_frontend" {
|
||||
|
||||
resource "upcloud_loadbalancer_static_backend_member" "lb_backend_member" {
|
||||
for_each = {
|
||||
for be_server in local.lb_backend_servers :
|
||||
"${be_server.server_name}-lb-backend-${be_server.lb_name}" => be_server
|
||||
if var.loadbalancer_enabled
|
||||
for be_server in local.lb_backend_servers:
|
||||
"${be_server.server_name}-lb-backend-${be_server.lb_name}" => be_server
|
||||
if var.loadbalancer_enabled
|
||||
}
|
||||
|
||||
backend = upcloud_loadbalancer_backend.lb_backend[each.value.lb_name].id
|
||||
@@ -550,9 +550,9 @@ resource "upcloud_loadbalancer_static_backend_member" "lb_backend_member" {
|
||||
}
|
||||
|
||||
resource "upcloud_server_group" "server_groups" {
|
||||
for_each = var.server_groups
|
||||
title = each.key
|
||||
anti_affinity_policy = each.value.anti_affinity_policy
|
||||
labels = {}
|
||||
members = [for server in each.value.servers : merge(upcloud_server.master, upcloud_server.worker)[server].id]
|
||||
for_each = var.server_groups
|
||||
title = each.key
|
||||
anti_affinity = each.value.anti_affinity
|
||||
labels = {}
|
||||
members = [for server in each.value.servers : merge(upcloud_server.master, upcloud_server.worker)[server].id]
|
||||
}
|
||||
@@ -3,8 +3,8 @@ output "master_ip" {
|
||||
value = {
|
||||
for instance in upcloud_server.master :
|
||||
instance.hostname => {
|
||||
"public_ip" : instance.network_interface[0].ip_address
|
||||
"private_ip" : instance.network_interface[1].ip_address
|
||||
"public_ip": instance.network_interface[0].ip_address
|
||||
"private_ip": instance.network_interface[1].ip_address
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -13,8 +13,8 @@ output "worker_ip" {
|
||||
value = {
|
||||
for instance in upcloud_server.worker :
|
||||
instance.hostname => {
|
||||
"public_ip" : instance.network_interface[0].ip_address
|
||||
"private_ip" : instance.network_interface[1].ip_address
|
||||
"public_ip": instance.network_interface[0].ip_address
|
||||
"private_ip": instance.network_interface[1].ip_address
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,11 +15,11 @@ variable "private_network_cidr" {}
|
||||
variable "machines" {
|
||||
description = "Cluster machines"
|
||||
type = map(object({
|
||||
node_type = string
|
||||
plan = string
|
||||
cpu = string
|
||||
mem = string
|
||||
disk_size = number
|
||||
node_type = string
|
||||
plan = string
|
||||
cpu = string
|
||||
mem = string
|
||||
disk_size = number
|
||||
additional_disks = map(object({
|
||||
size = number
|
||||
tier = string
|
||||
@@ -99,7 +99,7 @@ variable "server_groups" {
|
||||
description = "Server groups"
|
||||
|
||||
type = map(object({
|
||||
anti_affinity_policy = string
|
||||
servers = list(string)
|
||||
anti_affinity = bool
|
||||
servers = list(string)
|
||||
}))
|
||||
}
|
||||
@@ -2,8 +2,8 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
upcloud = {
|
||||
source = "UpCloudLtd/upcloud"
|
||||
version = "~>2.12.0"
|
||||
source = "UpCloudLtd/upcloud"
|
||||
version = "~>2.7.1"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
|
||||
@@ -18,7 +18,7 @@ ssh_public_keys = [
|
||||
|
||||
# check list of available plan https://developers.upcloud.com/1.3/7-plans/
|
||||
machines = {
|
||||
"control-plane-0" : {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
# plan to use instead of custom cpu/mem
|
||||
"plan" : null,
|
||||
@@ -28,7 +28,7 @@ machines = {
|
||||
"mem" : "4096"
|
||||
# The size of the storage in GB
|
||||
"disk_size" : 250
|
||||
"additional_disks" : {}
|
||||
"additional_disks": {}
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
@@ -40,7 +40,7 @@ machines = {
|
||||
"mem" : "4096"
|
||||
# The size of the storage in GB
|
||||
"disk_size" : 250
|
||||
"additional_disks" : {
|
||||
"additional_disks": {
|
||||
# "some-disk-name-1": {
|
||||
# "size": 100,
|
||||
# "tier": "maxiops",
|
||||
@@ -61,7 +61,7 @@ machines = {
|
||||
"mem" : "4096"
|
||||
# The size of the storage in GB
|
||||
"disk_size" : 250
|
||||
"additional_disks" : {
|
||||
"additional_disks": {
|
||||
# "some-disk-name-1": {
|
||||
# "size": 100,
|
||||
# "tier": "maxiops",
|
||||
@@ -82,7 +82,7 @@ machines = {
|
||||
"mem" : "4096"
|
||||
# The size of the storage in GB
|
||||
"disk_size" : 250
|
||||
"additional_disks" : {
|
||||
"additional_disks": {
|
||||
# "some-disk-name-1": {
|
||||
# "size": 100,
|
||||
# "tier": "maxiops",
|
||||
@@ -118,7 +118,7 @@ master_allowed_ports = []
|
||||
worker_allowed_ports = []
|
||||
|
||||
loadbalancer_enabled = false
|
||||
loadbalancer_plan = "development"
|
||||
loadbalancer_plan = "development"
|
||||
loadbalancers = {
|
||||
# "http" : {
|
||||
# "port" : 80,
|
||||
@@ -134,9 +134,9 @@ loadbalancers = {
|
||||
server_groups = {
|
||||
# "control-plane" = {
|
||||
# servers = [
|
||||
# "control-plane-0"
|
||||
# "master-0"
|
||||
# ]
|
||||
# anti_affinity_policy = "strict"
|
||||
# anti_affinity = true
|
||||
# },
|
||||
# "workers" = {
|
||||
# servers = [
|
||||
@@ -144,6 +144,6 @@ server_groups = {
|
||||
# "worker-1",
|
||||
# "worker-2"
|
||||
# ]
|
||||
# anti_affinity_policy = "yes"
|
||||
# anti_affinity = true
|
||||
# }
|
||||
}
|
||||
@@ -136,8 +136,8 @@ variable "server_groups" {
|
||||
description = "Server groups"
|
||||
|
||||
type = map(object({
|
||||
anti_affinity_policy = string
|
||||
servers = list(string)
|
||||
anti_affinity = bool
|
||||
servers = list(string)
|
||||
}))
|
||||
|
||||
default = {}
|
||||
|
||||
@@ -3,7 +3,7 @@ terraform {
|
||||
required_providers {
|
||||
upcloud = {
|
||||
source = "UpCloudLtd/upcloud"
|
||||
version = "~>2.12.0"
|
||||
version = "~>2.7.1"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
* CNI
|
||||
* [Calico](docs/calico.md)
|
||||
* [Flannel](docs/flannel.md)
|
||||
* [Cilium](docs/cilium.md)
|
||||
* [Kube Router](docs/kube-router.md)
|
||||
* [Kube OVN](docs/kube-ovn.md)
|
||||
* [Weave](docs/weave.md)
|
||||
@@ -30,6 +29,7 @@
|
||||
* [Equinix Metal](/docs/equinix-metal.md)
|
||||
* [vSphere](/docs/vsphere.md)
|
||||
* [Operating Systems](docs/bootstrap-os.md)
|
||||
* [Debian](docs/debian.md)
|
||||
* [Flatcar Container Linux](docs/flatcar.md)
|
||||
* [Fedora CoreOS](docs/fcos.md)
|
||||
* [OpenSUSE](docs/opensuse.md)
|
||||
|
||||
@@ -32,7 +32,7 @@ Based on the table below and the available python version for your ansible host
|
||||
|
||||
| Ansible Version | Python Version |
|
||||
|-----------------|----------------|
|
||||
| >= 2.15.5 | 3.9-3.11 |
|
||||
| 2.14 | 3.9-3.11 |
|
||||
|
||||
## Inventory
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ Kubespray can be installed as an [Ansible collection](https://docs.ansible.com/a
|
||||
collections:
|
||||
- name: https://github.com/kubernetes-sigs/kubespray
|
||||
type: git
|
||||
version: master # use the appropriate tag or branch for the version you need
|
||||
version: v2.22.1
|
||||
```
|
||||
|
||||
2. Install your collection
|
||||
|
||||
@@ -7,6 +7,10 @@ Kubespray supports multiple ansible versions but only the default (5.x) gets wid
|
||||
|
||||
## CentOS 8
|
||||
|
||||
CentOS 8 / Oracle Linux 8,9 / AlmaLinux 8,9 / Rocky Linux 8,9 ship only with iptables-nft (ie without iptables-legacy similar to RHEL8)
|
||||
The only tested configuration for now is using Calico CNI
|
||||
You need to add `calico_iptables_backend: "NFT"` to your configuration.
|
||||
|
||||
If you have containers that are using iptables in the host network namespace (`hostNetwork=true`),
|
||||
you need to ensure they are using iptables-nft.
|
||||
An example how k8s do the autodetection can be found [in this PR](https://github.com/kubernetes/kubernetes/pull/82966)
|
||||
|
||||
@@ -11,7 +11,7 @@ amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: |
|
||||
debian10 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
|
||||
debian11 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian12 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian12 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora37 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
|
||||
fedora38 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
||||
@@ -44,8 +44,6 @@ containerd_registries_mirrors:
|
||||
image_command_tool: crictl
|
||||
```
|
||||
|
||||
The `containerd_registries` and `containerd_insecure_registries` configs are deprecated.
|
||||
|
||||
### Containerd Runtimes
|
||||
|
||||
Containerd supports multiple runtime configurations that can be used with
|
||||
|
||||
@@ -42,22 +42,6 @@ crio_registries:
|
||||
|
||||
[CRI-O]: https://cri-o.io/
|
||||
|
||||
The following is a method to enable insecure registries.
|
||||
|
||||
```yaml
|
||||
crio_insecure_registries:
|
||||
- 10.0.0.2:5000
|
||||
```
|
||||
|
||||
And you can config authentication for these registries after `crio_insecure_registries`.
|
||||
|
||||
```yaml
|
||||
crio_registry_auth:
|
||||
- registry: 10.0.0.2:5000
|
||||
username: user
|
||||
password: pass
|
||||
```
|
||||
|
||||
## Note about user namespaces
|
||||
|
||||
CRI-O has support for user namespaces. This feature is optional and can be enabled by setting the following two variables.
|
||||
|
||||
41
docs/debian.md
Normal file
41
docs/debian.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# Debian Jessie
|
||||
|
||||
Debian Jessie installation Notes:
|
||||
|
||||
- Add
|
||||
|
||||
```ini
|
||||
GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"
|
||||
```
|
||||
|
||||
to `/etc/default/grub`. Then update with
|
||||
|
||||
```ShellSession
|
||||
sudo update-grub
|
||||
sudo update-grub2
|
||||
sudo reboot
|
||||
```
|
||||
|
||||
- Add the [backports](https://backports.debian.org/Instructions/) which contain Systemd 2.30 and update Systemd.
|
||||
|
||||
```ShellSession
|
||||
apt-get -t jessie-backports install systemd
|
||||
```
|
||||
|
||||
(Necessary because the default Systemd version (2.15) does not support the "Delegate" directive in service files)
|
||||
|
||||
- Add the Ansible repository and install Ansible to get a proper version
|
||||
|
||||
```ShellSession
|
||||
sudo add-apt-repository ppa:ansible/ansible
|
||||
sudo apt-get update
|
||||
sudo apt-get install ansible
|
||||
```
|
||||
|
||||
- Install Jinja2 and Python-Netaddr
|
||||
|
||||
```ShellSession
|
||||
sudo apt-get install python-jinja2=2.8-1~bpo8+1 python-netaddr
|
||||
```
|
||||
|
||||
Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment)
|
||||
@@ -97,9 +97,3 @@ Adding extra options to pass to the docker daemon:
|
||||
## This string should be exactly as you wish it to appear.
|
||||
docker_options: ""
|
||||
```
|
||||
|
||||
For Debian based distributions, set the path to store the GPG key to avoid using the default one used in `apt_key` module (e.g. /etc/apt/trusted.gpg)
|
||||
|
||||
```yaml
|
||||
docker_repo_key_keyring: /etc/apt/trusted.gpg.d/docker.gpg
|
||||
```
|
||||
|
||||
@@ -54,11 +54,6 @@ kube_apiserver_enable_admission_plugins:
|
||||
- PodNodeSelector
|
||||
- PodSecurity
|
||||
kube_apiserver_admission_control_config_file: true
|
||||
# Creates config file for PodNodeSelector
|
||||
# kube_apiserver_admission_plugins_needs_configuration: [PodNodeSelector]
|
||||
# Define the default node selector, by default all the workloads will be scheduled on nodes
|
||||
# with label network=srv1
|
||||
# kube_apiserver_admission_plugins_podnodeselector_default_node_selector: "network=srv1"
|
||||
# EventRateLimit plugin configuration
|
||||
kube_apiserver_admission_event_rate_limits:
|
||||
limit_1:
|
||||
@@ -120,7 +115,7 @@ kube_pod_security_default_enforce: restricted
|
||||
Let's take a deep look to the resultant **kubernetes** configuration:
|
||||
|
||||
* The `anonymous-auth` (on `kube-apiserver`) is set to `true` by default. This is fine, because it is considered safe if you enable `RBAC` for the `authorization-mode`.
|
||||
* The `enable-admission-plugins` includes `PodSecurity` (for more details, please take a look here: <https://kubernetes.io/docs/concepts/security/pod-security-admission/>). Then, we set the `EventRateLimit` plugin, providing additional configuration files (that are automatically created under the hood and mounted inside the `kube-apiserver` container) to make it work.
|
||||
* The `enable-admission-plugins` has not the `PodSecurityPolicy` admission plugin. This because it is going to be definitely removed from **kubernetes** `v1.25`. For this reason we decided to set the newest `PodSecurity` (for more details, please take a look here: <https://kubernetes.io/docs/concepts/security/pod-security-admission/>). Then, we set the `EventRateLimit` plugin, providing additional configuration files (that are automatically created under the hood and mounted inside the `kube-apiserver` container) to make it work.
|
||||
* The `encryption-provider-config` provide encryption at rest. This means that the `kube-apiserver` encrypt data that is going to be stored before they reach `etcd`. So the data is completely unreadable from `etcd` (in case an attacker is able to exploit this).
|
||||
* The `rotateCertificates` in `KubeletConfiguration` is set to `true` along with `serverTLSBootstrap`. This could be used in alternative to `tlsCertFile` and `tlsPrivateKeyFile` parameters. Additionally it automatically generates certificates by itself. By default the CSRs are approved automatically via [kubelet-csr-approver](https://github.com/postfinance/kubelet-csr-approver). You can customize approval configuration by modifying Helm values via `kubelet_csr_approver_values`.
|
||||
See <https://kubernetes.io/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/> for more information on the subject.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# AWS ALB Ingress Controller
|
||||
|
||||
**NOTE:** The current image version is `v1.1.9`. Please file any issues you find and note the version used.
|
||||
**NOTE:** The current image version is `v1.1.6`. Please file any issues you find and note the version used.
|
||||
|
||||
The AWS ALB Ingress Controller satisfies Kubernetes [ingress resources](https://kubernetes.io/docs/concepts/services-networking/ingress/) by provisioning [Application Load Balancers](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/introduction.html).
|
||||
|
||||
|
||||
@@ -70,9 +70,3 @@ If using [control plane load-balancing](https://kube-vip.io/docs/about/architect
|
||||
```yaml
|
||||
kube_vip_lb_enable: true
|
||||
```
|
||||
|
||||
In addition, [load-balancing method](https://kube-vip.io/docs/installation/flags/#environment-variables) could be changed:
|
||||
|
||||
```yaml
|
||||
kube_vip_lb_fwdmethod: masquerade
|
||||
```
|
||||
|
||||
@@ -29,6 +29,10 @@ metallb_config:
|
||||
nodeselector:
|
||||
kubernetes.io/os: linux
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
operator: "Equal"
|
||||
value: ""
|
||||
effect: "NoSchedule"
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
operator: "Equal"
|
||||
value: ""
|
||||
@@ -69,6 +73,7 @@ metallb_config:
|
||||
primary:
|
||||
ip_range:
|
||||
- 192.0.1.0-192.0.1.254
|
||||
auto_assign: true
|
||||
|
||||
pool1:
|
||||
ip_range:
|
||||
@@ -77,8 +82,8 @@ metallb_config:
|
||||
|
||||
pool2:
|
||||
ip_range:
|
||||
- 192.0.3.0/24
|
||||
avoid_buggy_ips: true # When set to true, .0 and .255 addresses will be avoided.
|
||||
- 192.0.2.2-192.0.2.2
|
||||
auto_assign: false
|
||||
```
|
||||
|
||||
## Layer2 Mode
|
||||
|
||||
@@ -95,7 +95,7 @@ If you use the settings like the one above, you'll need to define in your invent
|
||||
|
||||
* `registry_host`: Container image registry. If you _don't_ use the same repository path for the container images that
|
||||
the ones defined
|
||||
in [kubesprays-defaults's role defaults](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/kubespray-defaults/defaults/main/download.yml)
|
||||
in [Download's role defaults](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/download/defaults/main/main.yml)
|
||||
, you need to override the `*_image_repo` for these container images. If you want to make your life easier, use the
|
||||
same repository path, you won't have to override anything else.
|
||||
* `registry_addr`: Container image registry, but only have [domain or ip]:[port].
|
||||
|
||||
@@ -29,6 +29,10 @@ If the RHEL 7/8 hosts are already registered to a valid Red Hat support subscrip
|
||||
|
||||
## RHEL 8
|
||||
|
||||
RHEL 8 ships only with iptables-nft (ie without iptables-legacy)
|
||||
The only tested configuration for now is using Calico CNI
|
||||
You need to use K8S 1.17+ and to add `calico_iptables_backend: "NFT"` to your configuration
|
||||
|
||||
If you have containers that are using iptables in the host network namespace (`hostNetwork=true`),
|
||||
you need to ensure they are using iptables-nft.
|
||||
An example how k8s do the autodetection can be found [in this PR](https://github.com/kubernetes/kubernetes/pull/82966)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Node Layouts
|
||||
|
||||
There are six node layout types: `default`, `separate`, `ha`, `scale`, `all-in-one`, and `node-etcd-client`.
|
||||
There are four node layout types: `default`, `separate`, `ha`, and `scale`.
|
||||
|
||||
`default` is a non-HA two nodes setup with one separate `kube_node`
|
||||
and the `etcd` group merged with the `kube_control_plane`.
|
||||
@@ -16,11 +16,6 @@ in the Ansible inventory. This helps test TLS certificate generation at scale
|
||||
to prevent regressions and profile certain long-running tasks. These nodes are
|
||||
never actually deployed, but certificates are generated for them.
|
||||
|
||||
`all-in-one` layout use a single node for with `kube_control_plane`, `etcd` and `kube_node` merged.
|
||||
|
||||
`node-etcd-client` layout consists of a 4 nodes cluster, all of them in `kube_node`, first 3 in `etcd` and only one `kube_control_plane`.
|
||||
This is necessary to tests setups requiring that nodes are etcd clients (use of cilium as `network_plugin` for instance)
|
||||
|
||||
Note, the canal network plugin deploys flannel as well plus calico policy controller.
|
||||
|
||||
## Test cases
|
||||
|
||||
@@ -186,8 +186,6 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
|
||||
* *containerd_additional_runtimes* - Sets the additional Containerd runtimes used by the Kubernetes CRI plugin.
|
||||
[Default config](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/container-engine/containerd/defaults/main.yml) can be overridden in inventory vars.
|
||||
|
||||
* *crio_criu_support_enabled* - When set to `true`, enables the container checkpoint/restore in CRI-O. It's required to install [CRIU](https://criu.org/Installation) on the host when dumping/restoring checkpoints. And it's recommended to enable the feature gate `ContainerCheckpoint` so that the kubelet get a higher level API to simplify the operations (**Note**: It's still in experimental stage, just for container analytics so far). You can follow the [documentation](https://kubernetes.io/blog/2022/12/05/forensic-container-checkpointing-alpha/).
|
||||
|
||||
* *http_proxy/https_proxy/no_proxy/no_proxy_exclude_workers/additional_no_proxy* - Proxy variables for deploying behind a
|
||||
proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames
|
||||
that correspond to each node.
|
||||
@@ -254,6 +252,8 @@ node_taints:
|
||||
- "node.example.com/external=true:NoSchedule"
|
||||
```
|
||||
|
||||
* *podsecuritypolicy_enabled* - When set to `true`, enables the PodSecurityPolicy admission controller and defines two policies `privileged` (applying to all resources in `kube-system` namespace and kubelet) and `restricted` (applying all other namespaces).
|
||||
Addons deployed in kube-system namespaces are handled.
|
||||
* *kubernetes_audit* - When set to `true`, enables Auditing.
|
||||
The auditing parameters can be tuned via the following variables (which default values are shown below):
|
||||
* `audit_log_path`: /var/log/audit/kube-apiserver-audit.log
|
||||
@@ -271,7 +271,6 @@ node_taints:
|
||||
* `audit_webhook_mode`: batch
|
||||
* `audit_webhook_batch_max_size`: 100
|
||||
* `audit_webhook_batch_max_wait`: 1s
|
||||
* *kubectl_alias* - Bash alias of kubectl to interact with Kubernetes cluster much easier.
|
||||
|
||||
### Custom flags for Kube Components
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
hosts: kube_control_plane[0]
|
||||
tasks:
|
||||
- name: Include kubespray-default variables
|
||||
include_vars: ../roles/kubespray-defaults/defaults/main/main.yml
|
||||
include_vars: ../roles/kubespray-defaults/defaults/main.yaml
|
||||
- name: Copy get_cinder_pvs.sh to master
|
||||
copy:
|
||||
src: get_cinder_pvs.sh
|
||||
|
||||
@@ -2,15 +2,13 @@
|
||||
namespace: kubernetes_sigs
|
||||
description: Deploy a production ready Kubernetes cluster
|
||||
name: kubespray
|
||||
version: 2.24.3
|
||||
version: 2.22.1
|
||||
readme: README.md
|
||||
authors:
|
||||
- luksi1
|
||||
tags:
|
||||
- infrastructure
|
||||
repository: https://github.com/kubernetes-sigs/kubespray
|
||||
dependencies:
|
||||
ansible.utils: '>=2.5.0'
|
||||
build_ignore:
|
||||
- .github
|
||||
- '*.tar.gz'
|
||||
|
||||
@@ -57,7 +57,7 @@ loadbalancer_apiserver_healthcheck_port: 8081
|
||||
# https_proxy: ""
|
||||
# https_proxy_cert_file: ""
|
||||
|
||||
## Refer to roles/kubespray-defaults/defaults/main/main.yml before modifying no_proxy
|
||||
## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
|
||||
# no_proxy: ""
|
||||
|
||||
## Some problems may occur when downloading files over https proxy due to ansible bug
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
# Registries defined within cri-o.
|
||||
# crio_insecure_registries:
|
||||
# - 10.0.0.2:5000
|
||||
|
||||
# Auth config for the registries
|
||||
# crio_registry_auth:
|
||||
# - registry: 10.0.0.2:5000
|
||||
# username: user
|
||||
|
||||
@@ -24,12 +24,3 @@
|
||||
### ETCD: disable peer client cert authentication.
|
||||
# This affects ETCD_PEER_CLIENT_CERT_AUTH variable
|
||||
# etcd_peer_client_auth: true
|
||||
|
||||
## Enable distributed tracing
|
||||
## To enable this experimental feature, set the etcd_experimental_enable_distributed_tracing: true, along with the
|
||||
## etcd_experimental_distributed_tracing_sample_rate to choose how many samples to collect per million spans,
|
||||
## the default sampling rate is 0 https://etcd.io/docs/v3.5/op-guide/monitoring/#distributed-tracing
|
||||
# etcd_experimental_enable_distributed_tracing: false
|
||||
# etcd_experimental_distributed_tracing_sample_rate: 100
|
||||
# etcd_experimental_distributed_tracing_address: "localhost:4317"
|
||||
# etcd_experimental_distributed_tracing_service_name: etcd
|
||||
@@ -99,13 +99,14 @@ rbd_provisioner_enabled: false
|
||||
# Nginx ingress controller deployment
|
||||
ingress_nginx_enabled: false
|
||||
# ingress_nginx_host_network: false
|
||||
# ingress_nginx_service_type: LoadBalancer
|
||||
# ingress_nginx_service_nodeport_http: 30080
|
||||
# ingress_nginx_service_nodeport_https: 30081
|
||||
ingress_publish_status_address: ""
|
||||
# ingress_nginx_nodeselector:
|
||||
# kubernetes.io/os: "linux"
|
||||
# ingress_nginx_tolerations:
|
||||
# - key: "node-role.kubernetes.io/master"
|
||||
# operator: "Equal"
|
||||
# value: ""
|
||||
# effect: "NoSchedule"
|
||||
# - key: "node-role.kubernetes.io/control-plane"
|
||||
# operator: "Equal"
|
||||
# value: ""
|
||||
@@ -139,6 +140,8 @@ ingress_alb_enabled: false
|
||||
cert_manager_enabled: false
|
||||
# cert_manager_namespace: "cert-manager"
|
||||
# cert_manager_tolerations:
|
||||
# - key: node-role.kubernetes.io/master
|
||||
# effect: NoSchedule
|
||||
# - key: node-role.kubernetes.io/control-plane
|
||||
# effect: NoSchedule
|
||||
# cert_manager_affinity:
|
||||
@@ -173,27 +176,33 @@ cert_manager_enabled: false
|
||||
# MetalLB deployment
|
||||
metallb_enabled: false
|
||||
metallb_speaker_enabled: "{{ metallb_enabled }}"
|
||||
# metallb_speaker_nodeselector:
|
||||
# kubernetes.io/os: "linux"
|
||||
# metallb_controller_nodeselector:
|
||||
# kubernetes.io/os: "linux"
|
||||
# metallb_speaker_tolerations:
|
||||
# - key: "node-role.kubernetes.io/master"
|
||||
# operator: "Equal"
|
||||
# value: ""
|
||||
# effect: "NoSchedule"
|
||||
# - key: "node-role.kubernetes.io/control-plane"
|
||||
# operator: "Equal"
|
||||
# value: ""
|
||||
# effect: "NoSchedule"
|
||||
# metallb_controller_tolerations:
|
||||
# - key: "node-role.kubernetes.io/master"
|
||||
# operator: "Equal"
|
||||
# value: ""
|
||||
# effect: "NoSchedule"
|
||||
# - key: "node-role.kubernetes.io/control-plane"
|
||||
# operator: "Equal"
|
||||
# value: ""
|
||||
# effect: "NoSchedule"
|
||||
# metallb_version: v0.13.9
|
||||
# metallb_protocol: "layer2"
|
||||
# metallb_port: "7472"
|
||||
# metallb_memberlist_port: "7946"
|
||||
# metallb_config:
|
||||
# speaker:
|
||||
# nodeselector:
|
||||
# kubernetes.io/os: "linux"
|
||||
# tollerations:
|
||||
# - key: "node-role.kubernetes.io/control-plane"
|
||||
# operator: "Equal"
|
||||
# value: ""
|
||||
# effect: "NoSchedule"
|
||||
# controller:
|
||||
# nodeselector:
|
||||
# kubernetes.io/os: "linux"
|
||||
# tolerations:
|
||||
# - key: "node-role.kubernetes.io/control-plane"
|
||||
# operator: "Equal"
|
||||
# value: ""
|
||||
# effect: "NoSchedule"
|
||||
# address_pools:
|
||||
# primary:
|
||||
# ip_range:
|
||||
@@ -235,7 +244,7 @@ metallb_speaker_enabled: "{{ metallb_enabled }}"
|
||||
# - pool2
|
||||
|
||||
argocd_enabled: false
|
||||
# argocd_version: v2.8.4
|
||||
# argocd_version: v2.8.0
|
||||
# argocd_namespace: argocd
|
||||
# Default password:
|
||||
# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli
|
||||
@@ -250,14 +259,3 @@ argocd_enabled: false
|
||||
# The plugin manager for kubectl
|
||||
krew_enabled: false
|
||||
krew_root_dir: "/usr/local/krew"
|
||||
|
||||
# Kube VIP
|
||||
kube_vip_enabled: false
|
||||
# kube_vip_arp_enabled: true
|
||||
# kube_vip_controlplane_enabled: true
|
||||
# kube_vip_address: 192.168.56.120
|
||||
# loadbalancer_apiserver:
|
||||
# address: "{{ kube_vip_address }}"
|
||||
# port: 6443
|
||||
# kube_vip_interface: eth0
|
||||
# kube_vip_services_enabled: false
|
||||
|
||||
@@ -17,7 +17,7 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
|
||||
kube_api_anonymous_auth: true
|
||||
|
||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||
kube_version: v1.28.10
|
||||
kube_version: v1.27.7
|
||||
|
||||
# Where the binaries will be downloaded.
|
||||
# Note: ensure that you've enough disk space (about 1G)
|
||||
@@ -243,6 +243,15 @@ kubernetes_audit: false
|
||||
# kubelet_config_dir:
|
||||
default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
|
||||
|
||||
# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled)
|
||||
podsecuritypolicy_enabled: false
|
||||
|
||||
# Custom PodSecurityPolicySpec for restricted policy
|
||||
# podsecuritypolicy_restricted_spec: {}
|
||||
|
||||
# Custom PodSecurityPolicySpec for privileged policy
|
||||
# podsecuritypolicy_privileged_spec: {}
|
||||
|
||||
# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
|
||||
# kubeconfig_localhost: false
|
||||
# Use ansible_host as external api ip when copying over kubeconfig.
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
---
|
||||
# custom_cni network plugin configuration
|
||||
# There are two deployment options to choose from, select one
|
||||
|
||||
## OPTION 1 - Static manifest files
|
||||
## With this option, referred manifest file will be deployed
|
||||
## as if the `kubectl apply -f` method was used with it.
|
||||
#
|
||||
## List of Kubernetes resource manifest files
|
||||
## See tests/files/custom_cni/README.md for example
|
||||
# custom_cni_manifests: []
|
||||
|
||||
## OPTION 1 EXAMPLE - Cilium static manifests in Kubespray tree
|
||||
# custom_cni_manifests:
|
||||
# - "{{ playbook_dir }}/../tests/files/custom_cni/cilium.yaml"
|
||||
|
||||
## OPTION 2 - Helm chart application
|
||||
## This allows the CNI backend to be deployed to Kubespray cluster
|
||||
## as common Helm application.
|
||||
#
|
||||
## Helm release name - how the local instance of deployed chart will be named
|
||||
# custom_cni_chart_release_name: ""
|
||||
#
|
||||
## Kubernetes namespace to deploy into
|
||||
# custom_cni_chart_namespace: "kube-system"
|
||||
#
|
||||
## Helm repository name - how the local record of Helm repository will be named
|
||||
# custom_cni_chart_repository_name: ""
|
||||
#
|
||||
## Helm repository URL
|
||||
# custom_cni_chart_repository_url: ""
|
||||
#
|
||||
## Helm chart reference - path to the chart in the repository
|
||||
# custom_cni_chart_ref: ""
|
||||
#
|
||||
## Helm chart version
|
||||
# custom_cni_chart_version: ""
|
||||
#
|
||||
## Custom Helm values to be used for deployment
|
||||
# custom_cni_chart_values: {}
|
||||
|
||||
## OPTION 2 EXAMPLE - Cilium deployed from official public Helm chart
|
||||
# custom_cni_chart_namespace: kube-system
|
||||
# custom_cni_chart_release_name: cilium
|
||||
# custom_cni_chart_repository_name: cilium
|
||||
# custom_cni_chart_repository_url: https://helm.cilium.io
|
||||
# custom_cni_chart_ref: cilium/cilium
|
||||
# custom_cni_chart_version: 1.14.3
|
||||
# custom_cni_chart_values:
|
||||
# cluster:
|
||||
# name: "cilium-demo"
|
||||
@@ -1,10 +1,4 @@
|
||||
# See roles/network_plugin/kube-router/defaults/main.yml
|
||||
|
||||
# Kube router version
|
||||
# Default to v2
|
||||
# kube_router_version: "v2.0.0"
|
||||
# Uncomment to use v1 (Deprecated)
|
||||
# kube_router_version: "v1.6.0"
|
||||
# See roles/network_plugin/kube-router//defaults/main.yml
|
||||
|
||||
# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP
|
||||
# kube_router_run_router: true
|
||||
@@ -25,9 +19,6 @@
|
||||
# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers.
|
||||
# kube_router_advertise_loadbalancer_ip: false
|
||||
|
||||
# Enables BGP graceful restarts
|
||||
# kube_router_bgp_graceful_restart: true
|
||||
|
||||
# Adjust manifest of kube-router daemonset template with DSR needed changes
|
||||
# kube_router_enable_dsr: false
|
||||
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
---
|
||||
requires_ansible: '>=2.15.5'
|
||||
requires_ansible: '>=2.14.0'
|
||||
|
||||
@@ -4,7 +4,7 @@ FROM ubuntu:jammy-20230308
|
||||
# Pip needs this as well at the moment to install ansible
|
||||
# (and potentially other packages)
|
||||
# See: https://github.com/pypa/pip/issues/10219
|
||||
ENV VAGRANT_VERSION=2.4.1 \
|
||||
ENV VAGRANT_VERSION=2.3.4 \
|
||||
VAGRANT_DEFAULT_PROVIDER=libvirt \
|
||||
VAGRANT_ANSIBLE_TAGS=facts \
|
||||
LANG=C.UTF-8 \
|
||||
@@ -30,9 +30,6 @@ RUN apt update -q \
|
||||
software-properties-common \
|
||||
unzip \
|
||||
libvirt-clients \
|
||||
qemu-utils \
|
||||
qemu-kvm \
|
||||
dnsmasq \
|
||||
&& curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
||||
&& add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" \
|
||||
&& apt update -q \
|
||||
@@ -43,12 +40,11 @@ WORKDIR /kubespray
|
||||
|
||||
RUN --mount=type=bind,target=./requirements.txt,src=./requirements.txt \
|
||||
--mount=type=bind,target=./tests/requirements.txt,src=./tests/requirements.txt \
|
||||
--mount=type=bind,target=./roles/kubespray-defaults/defaults/main/main.yml,src=./roles/kubespray-defaults/defaults/main/main.yml \
|
||||
--mount=type=bind,target=./roles/kubespray-defaults/defaults/main.yaml,src=./roles/kubespray-defaults/defaults/main.yaml \
|
||||
update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \
|
||||
&& pip install --no-compile --no-cache-dir pip -U \
|
||||
&& pip install --no-compile --no-cache-dir -r tests/requirements.txt \
|
||||
&& pip install --no-compile --no-cache-dir -r requirements.txt \
|
||||
&& KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main/main.yml) \
|
||||
&& KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
||||
&& curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
|
||||
&& echo $(curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \
|
||||
&& chmod a+x /usr/local/bin/kubectl \
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
gather_facts: false
|
||||
become: no
|
||||
vars:
|
||||
minimal_ansible_version: 2.15.5 # 2.15 versions before 2.15.5 are known to be buggy for kubespray
|
||||
maximal_ansible_version: 2.17.0
|
||||
minimal_ansible_version: 2.14.0
|
||||
maximal_ansible_version: 2.15.0
|
||||
ansible_connection: local
|
||||
tags: always
|
||||
tasks:
|
||||
|
||||
@@ -1,8 +1,20 @@
|
||||
---
|
||||
- name: Common tasks for every playbooks
|
||||
import_playbook: boilerplate.yml
|
||||
- name: Check ansible version
|
||||
import_playbook: ansible_version.yml
|
||||
|
||||
- name: Ensure compatibility with old groups
|
||||
import_playbook: legacy_groups.yml
|
||||
|
||||
- name: Install bastion ssh config
|
||||
hosts: bastion[0]
|
||||
gather_facts: False
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
|
||||
|
||||
- name: Gather facts
|
||||
tags: always
|
||||
import_playbook: facts.yml
|
||||
|
||||
- name: Prepare for etcd install
|
||||
@@ -17,7 +29,35 @@
|
||||
- { role: download, tags: download, when: "not skip_downloads" }
|
||||
|
||||
- name: Install etcd
|
||||
import_playbook: install_etcd.yml
|
||||
hosts: etcd:kube_control_plane
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- role: etcd
|
||||
tags: etcd
|
||||
vars:
|
||||
etcd_cluster_setup: true
|
||||
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
|
||||
when: etcd_deployment_type != "kubeadm"
|
||||
|
||||
- name: Install etcd certs on nodes if required
|
||||
hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- role: etcd
|
||||
tags: etcd
|
||||
vars:
|
||||
etcd_cluster_setup: false
|
||||
etcd_events_cluster_setup: false
|
||||
when:
|
||||
- etcd_deployment_type != "kubeadm"
|
||||
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
||||
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
||||
|
||||
- name: Install Kubernetes nodes
|
||||
hosts: k8s_cluster
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
---
|
||||
- name: Add worker nodes to the etcd play if needed
|
||||
hosts: kube_node
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
tasks:
|
||||
- name: Check if nodes needs etcd client certs (depends on network_plugin)
|
||||
group_by:
|
||||
key: "_kubespray_needs_etcd"
|
||||
when:
|
||||
- kube_network_plugin in ["flannel", "canal", "cilium"] or
|
||||
(cilium_deploy_additionally | default(false)) or
|
||||
(kube_network_plugin == "calico" and calico_datastore == "etcd")
|
||||
- etcd_deployment_type != "kubeadm"
|
||||
tags: etcd
|
||||
|
||||
- name: Install etcd
|
||||
hosts: etcd:kube_control_plane:_kubespray_needs_etcd
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- role: etcd
|
||||
tags: etcd
|
||||
vars:
|
||||
etcd_cluster_setup: true
|
||||
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
|
||||
when: etcd_deployment_type != "kubeadm"
|
||||
@@ -1,8 +1,5 @@
|
||||
---
|
||||
- name: Check ansible version
|
||||
import_playbook: ansible_version.yml
|
||||
|
||||
# These are inventory compatibility tasks to ensure we keep compatibility with old style group names
|
||||
# This is an inventory compatibility playbook to ensure we keep compatibility with old style group names
|
||||
|
||||
- name: Add kube-master nodes to kube_control_plane
|
||||
hosts: kube-master
|
||||
@@ -48,11 +45,3 @@
|
||||
- name: Add nodes to no-floating group
|
||||
group_by:
|
||||
key: 'no_floating'
|
||||
|
||||
- name: Install bastion ssh config
|
||||
hosts: bastion[0]
|
||||
gather_facts: False
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
|
||||
@@ -1,6 +1,17 @@
|
||||
---
|
||||
- name: Common tasks for every playbooks
|
||||
import_playbook: boilerplate.yml
|
||||
- name: Check ansible version
|
||||
import_playbook: ansible_version.yml
|
||||
|
||||
- name: Ensure compatibility with old groups
|
||||
import_playbook: legacy_groups.yml
|
||||
|
||||
- name: Install bastion ssh config
|
||||
hosts: bastion[0]
|
||||
gather_facts: False
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||
|
||||
- name: Recover etcd
|
||||
hosts: etcd[0]
|
||||
|
||||
@@ -1,6 +1,17 @@
|
||||
---
|
||||
- name: Common tasks for every playbooks
|
||||
import_playbook: boilerplate.yml
|
||||
- name: Check ansible version
|
||||
import_playbook: ansible_version.yml
|
||||
|
||||
- name: Ensure compatibility with old groups
|
||||
import_playbook: legacy_groups.yml
|
||||
|
||||
- name: Install bastion ssh config
|
||||
hosts: bastion[0]
|
||||
gather_facts: False
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
|
||||
|
||||
- name: Confirm node removal
|
||||
hosts: "{{ node | default('etcd:k8s_cluster:calico_rr') }}"
|
||||
|
||||
@@ -1,6 +1,17 @@
|
||||
---
|
||||
- name: Common tasks for every playbooks
|
||||
import_playbook: boilerplate.yml
|
||||
- name: Check ansible version
|
||||
import_playbook: ansible_version.yml
|
||||
|
||||
- name: Ensure compatibility with old groups
|
||||
import_playbook: legacy_groups.yml
|
||||
|
||||
- name: Install bastion ssh config
|
||||
hosts: bastion[0]
|
||||
gather_facts: False
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||
|
||||
- name: Gather facts
|
||||
import_playbook: facts.yml
|
||||
|
||||
@@ -1,8 +1,20 @@
|
||||
---
|
||||
- name: Common tasks for every playbooks
|
||||
import_playbook: boilerplate.yml
|
||||
- name: Check ansible version
|
||||
import_playbook: ansible_version.yml
|
||||
|
||||
- name: Ensure compatibility with old groups
|
||||
import_playbook: legacy_groups.yml
|
||||
|
||||
- name: Install bastion ssh config
|
||||
hosts: bastion[0]
|
||||
gather_facts: False
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
|
||||
|
||||
- name: Gather facts
|
||||
tags: always
|
||||
import_playbook: facts.yml
|
||||
|
||||
- name: Generate the etcd certificates beforehand
|
||||
|
||||
@@ -1,8 +1,20 @@
|
||||
---
|
||||
- name: Common tasks for every playbooks
|
||||
import_playbook: boilerplate.yml
|
||||
- name: Check ansible version
|
||||
import_playbook: ansible_version.yml
|
||||
|
||||
- name: Ensure compatibility with old groups
|
||||
import_playbook: legacy_groups.yml
|
||||
|
||||
- name: Install bastion ssh config
|
||||
hosts: bastion[0]
|
||||
gather_facts: False
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
|
||||
|
||||
- name: Gather facts
|
||||
tags: always
|
||||
import_playbook: facts.yml
|
||||
|
||||
- name: Download images to ansible host cache via first kube_control_plane node
|
||||
@@ -36,7 +48,35 @@
|
||||
- { role: container-engine, tags: "container-engine", when: deploy_container_engine }
|
||||
|
||||
- name: Install etcd
|
||||
import_playbook: install_etcd.yml
|
||||
hosts: etcd:kube_control_plane
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- role: etcd
|
||||
tags: etcd
|
||||
vars:
|
||||
etcd_cluster_setup: true
|
||||
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
|
||||
when: etcd_deployment_type != "kubeadm"
|
||||
|
||||
- name: Install etcd certs on nodes if required
|
||||
hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- role: etcd
|
||||
tags: etcd
|
||||
vars:
|
||||
etcd_cluster_setup: false
|
||||
etcd_events_cluster_setup: false
|
||||
when:
|
||||
- etcd_deployment_type != "kubeadm"
|
||||
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
|
||||
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
||||
|
||||
- name: Handle upgrades to master components first to maintain backwards compat.
|
||||
gather_facts: False
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
ansible==8.5.0
|
||||
cryptography==41.0.4
|
||||
ansible==7.6.0
|
||||
cryptography==41.0.1
|
||||
jinja2==3.1.2
|
||||
jmespath==1.0.1
|
||||
MarkupSafe==2.1.3
|
||||
netaddr==0.9.0
|
||||
netaddr==0.8.0
|
||||
pbr==5.11.1
|
||||
ruamel.yaml==0.17.35
|
||||
ruamel.yaml.clib==0.2.8
|
||||
ruamel.yaml==0.17.31
|
||||
ruamel.yaml.clib==0.2.7
|
||||
|
||||
43
roles/adduser/molecule/default/tests/test_default.py
Normal file
43
roles/adduser/molecule/default/tests/test_default.py
Normal file
@@ -0,0 +1,43 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import testinfra.utils.ansible_runner
|
||||
import yaml
|
||||
from ansible.cli.playbook import PlaybookCLI
|
||||
from ansible.playbook import Playbook
|
||||
|
||||
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
|
||||
os.environ["MOLECULE_INVENTORY_FILE"]
|
||||
).get_hosts("all")
|
||||
|
||||
|
||||
def read_playbook(playbook):
|
||||
cli_args = [os.path.realpath(playbook), testinfra_hosts]
|
||||
cli = PlaybookCLI(cli_args)
|
||||
cli.parse()
|
||||
loader, inventory, variable_manager = cli._play_prereqs()
|
||||
|
||||
pb = Playbook.load(cli.args[0], variable_manager, loader)
|
||||
|
||||
for play in pb.get_plays():
|
||||
yield variable_manager.get_vars(play)
|
||||
|
||||
|
||||
def get_playbook():
|
||||
playbooks_path = Path(__file__).parent.parent
|
||||
with open(os.path.join(playbooks_path, "molecule.yml"), "r") as yamlfile:
|
||||
data = yaml.load(yamlfile, Loader=yaml.FullLoader)
|
||||
if "playbooks" in data["provisioner"].keys():
|
||||
if "converge" in data["provisioner"]["playbooks"].keys():
|
||||
return data["provisioner"]["playbooks"]["converge"]
|
||||
else:
|
||||
return os.path.join(playbooks_path, "converge.yml")
|
||||
|
||||
|
||||
def test_user(host):
|
||||
for vars in read_playbook(get_playbook()):
|
||||
assert host.user(vars["user"]["name"]).exists
|
||||
if "group" in vars["user"].keys():
|
||||
assert host.group(vars["user"]["group"]).exists
|
||||
else:
|
||||
assert host.group(vars["user"]["name"]).exists
|
||||
@@ -0,0 +1,40 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import testinfra.utils.ansible_runner
|
||||
import yaml
|
||||
from ansible.cli.playbook import PlaybookCLI
|
||||
from ansible.playbook import Playbook
|
||||
|
||||
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
|
||||
os.environ["MOLECULE_INVENTORY_FILE"]
|
||||
).get_hosts("all")
|
||||
|
||||
|
||||
def read_playbook(playbook):
|
||||
cli_args = [os.path.realpath(playbook), testinfra_hosts]
|
||||
cli = PlaybookCLI(cli_args)
|
||||
cli.parse()
|
||||
loader, inventory, variable_manager = cli._play_prereqs()
|
||||
|
||||
pb = Playbook.load(cli.args[0], variable_manager, loader)
|
||||
|
||||
for play in pb.get_plays():
|
||||
yield variable_manager.get_vars(play)
|
||||
|
||||
|
||||
def get_playbook():
|
||||
playbooks_path = Path(__file__).parent.parent
|
||||
with open(os.path.join(playbooks_path, "molecule.yml"), "r") as yamlfile:
|
||||
data = yaml.load(yamlfile, Loader=yaml.FullLoader)
|
||||
if "playbooks" in data["provisioner"].keys():
|
||||
if "converge" in data["provisioner"]["playbooks"].keys():
|
||||
return data["provisioner"]["playbooks"]["converge"]
|
||||
else:
|
||||
return os.path.join(playbooks_path, "converge.yml")
|
||||
|
||||
|
||||
def test_ssh_config(host):
|
||||
for vars in read_playbook(get_playbook()):
|
||||
assert host.file(vars["ssh_bastion_confing__name"]).exists
|
||||
assert host.file(vars["ssh_bastion_confing__name"]).is_file
|
||||
@@ -80,40 +80,13 @@
|
||||
- { option: "name", value: "CentOS-{{ ansible_distribution_major_version }} - Extras" }
|
||||
- { option: "enabled", value: "1" }
|
||||
- { option: "gpgcheck", value: "0" }
|
||||
- { option: "baseurl", value: "http://vault.centos.org/{{ 'altarch' if (ansible_distribution_major_version | int) <= 7 and ansible_architecture == 'aarch64' else 'centos' }}/{{ ansible_distribution_major_version }}/extras/$basearch/{% if ansible_distribution_major_version | int > 7 %}os/{% endif %}" }
|
||||
- { option: "baseurl", value: "http://mirror.centos.org/{{ 'altarch' if (ansible_distribution_major_version | int) <= 7 and ansible_architecture == 'aarch64' else 'centos' }}/{{ ansible_distribution_major_version }}/extras/$basearch/{% if ansible_distribution_major_version | int > 7 %}os/{% endif %}" }
|
||||
when:
|
||||
- use_oracle_public_repo | default(true)
|
||||
- '''ID="ol"'' in os_release.stdout_lines'
|
||||
- (ansible_distribution_version | float) >= 7.6
|
||||
- (ansible_distribution_version | float) < 9
|
||||
|
||||
# CentOS 7 EOL at July 1, 2024.
|
||||
- name: Check CentOS-Base.repo exists for CentOS 7
|
||||
stat:
|
||||
path: /etc/yum.repos.d/CentOS-Base.repo
|
||||
register: centos_base_repo_stat
|
||||
when:
|
||||
- ansible_distribution_major_version == "7"
|
||||
|
||||
# CentOS 7 EOL at July 1, 2024.
|
||||
- name: Update CentOS 7 CentOS-Base.repo
|
||||
when:
|
||||
- ansible_distribution_major_version == "7"
|
||||
- centos_base_repo_stat.stat.exists
|
||||
become: true
|
||||
block:
|
||||
- name: Disable CentOS 7 mirrorlist in CentOS-Base.repo
|
||||
replace:
|
||||
path: "{{ centos_base_repo_stat.stat.path }}"
|
||||
regexp: '^mirrorlist='
|
||||
replace: '#mirrorlist='
|
||||
|
||||
- name: Update CentOS 7 baseurl in CentOS-Base.repo
|
||||
replace:
|
||||
path: "{{ centos_base_repo_stat.stat.path }}"
|
||||
regexp: '^#baseurl=http:\/\/mirror.centos.org'
|
||||
replace: 'baseurl=http:\/\/vault.centos.org'
|
||||
|
||||
# CentOS ships with python installed
|
||||
|
||||
- name: Check presence of fastestmirror.conf
|
||||
|
||||
@@ -18,7 +18,6 @@ containerd_runc_runtime:
|
||||
base_runtime_spec: cri-base.json
|
||||
options:
|
||||
systemdCgroup: "{{ containerd_use_systemd_cgroup | ternary('true', 'false') }}"
|
||||
binaryName: "{{ bin_dir }}/runc"
|
||||
|
||||
containerd_additional_runtimes: []
|
||||
# Example for Kata Containers as additional runtime:
|
||||
@@ -48,6 +47,9 @@ containerd_metrics_address: ""
|
||||
|
||||
containerd_metrics_grpc_histogram: false
|
||||
|
||||
containerd_registries:
|
||||
"docker.io": "https://registry-1.docker.io"
|
||||
|
||||
containerd_registries_mirrors:
|
||||
- prefix: docker.io
|
||||
mirrors:
|
||||
@@ -102,6 +104,3 @@ containerd_supported_distributions:
|
||||
- "UnionTech"
|
||||
- "UniontechOS"
|
||||
- "openEuler"
|
||||
|
||||
# Enable container device interface
|
||||
enable_cdi: false
|
||||
|
||||
@@ -1,4 +1,10 @@
|
||||
---
|
||||
- name: Restart containerd
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Containerd | restart containerd
|
||||
- Containerd | wait for containerd
|
||||
|
||||
- name: Containerd | restart containerd
|
||||
systemd:
|
||||
name: containerd
|
||||
@@ -6,7 +12,6 @@
|
||||
enabled: yes
|
||||
daemon-reload: yes
|
||||
masked: no
|
||||
listen: Restart containerd
|
||||
|
||||
- name: Containerd | wait for containerd
|
||||
command: "{{ containerd_bin_dir }}/ctr images ls -q"
|
||||
@@ -14,4 +19,3 @@
|
||||
retries: 8
|
||||
delay: 4
|
||||
until: containerd_ready.rc == 0
|
||||
listen: Restart containerd
|
||||
|
||||
@@ -61,9 +61,6 @@
|
||||
src: containerd.service.j2
|
||||
dest: /etc/systemd/system/containerd.service
|
||||
mode: 0644
|
||||
validate: "sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 && systemd-analyze verify %s:containerd.service'"
|
||||
# FIXME: check that systemd version >= 250 (factory-reset.target was introduced in that release)
|
||||
# Remove once we drop support for systemd < 250
|
||||
notify: Restart containerd
|
||||
|
||||
- name: Containerd | Ensure containerd directories exist
|
||||
|
||||
@@ -20,10 +20,6 @@ oom_score = {{ containerd_oom_score }}
|
||||
max_container_log_line_size = {{ containerd_max_container_log_line_size }}
|
||||
enable_unprivileged_ports = {{ containerd_enable_unprivileged_ports | default(false) | lower }}
|
||||
enable_unprivileged_icmp = {{ containerd_enable_unprivileged_icmp | default(false) | lower }}
|
||||
{% if enable_cdi %}
|
||||
enable_cdi = true
|
||||
cdi_spec_dirs = ["/etc/cdi", "/var/run/cdi"]
|
||||
{% endif %}
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd]
|
||||
default_runtime_name = "{{ containerd_default_runtime | default('runc') }}"
|
||||
snapshotter = "{{ containerd_snapshotter | default('overlayfs') }}"
|
||||
@@ -39,11 +35,7 @@ oom_score = {{ containerd_oom_score }}
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.{{ runtime.name }}.options]
|
||||
{% for key, value in runtime.options.items() %}
|
||||
{% if value | string != "true" and value | string != "false" %}
|
||||
{{ key }} = "{{ value }}"
|
||||
{% else %}
|
||||
{{ key }} = {{ value }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% if kata_containers_enabled %}
|
||||
@@ -86,7 +78,7 @@ oom_score = {{ containerd_oom_score }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{% if nri_enabled and containerd_version is version('1.7.0', '>=') %}
|
||||
{% if nri_enabled and containerd_version >= 1.7.0 %}
|
||||
[plugins."io.containerd.nri.v1.nri"]
|
||||
disable = false
|
||||
{% endif %}
|
||||
|
||||
@@ -2,6 +2,7 @@ server = "https://{{ item.prefix }}"
|
||||
{% for mirror in item.mirrors %}
|
||||
[host."{{ mirror.host }}"]
|
||||
capabilities = ["{{ ([ mirror.capabilities ] | flatten ) | join('","') }}"]
|
||||
{% if mirror.skip_verify is defined %}
|
||||
skip_verify = {{ mirror.skip_verify | default('false') | string | lower }}
|
||||
override_path = {{ mirror.override_path | default('false') | string | lower }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
@@ -1,31 +1,35 @@
|
||||
---
|
||||
- name: Restart and enable cri-dockerd
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Cri-dockerd | reload systemd
|
||||
- Cri-dockerd | restart docker.service
|
||||
- Cri-dockerd | reload cri-dockerd.socket
|
||||
- Cri-dockerd | reload cri-dockerd.service
|
||||
- Cri-dockerd | enable cri-dockerd service
|
||||
|
||||
- name: Cri-dockerd | reload systemd
|
||||
systemd:
|
||||
name: cri-dockerd
|
||||
daemon_reload: true
|
||||
masked: no
|
||||
listen: Restart and enable cri-dockerd
|
||||
|
||||
- name: Cri-dockerd | restart docker.service
|
||||
service:
|
||||
name: docker.service
|
||||
state: restarted
|
||||
listen: Restart and enable cri-dockerd
|
||||
|
||||
- name: Cri-dockerd | reload cri-dockerd.socket
|
||||
service:
|
||||
name: cri-dockerd.socket
|
||||
state: restarted
|
||||
listen: Restart and enable cri-dockerd
|
||||
|
||||
- name: Cri-dockerd | reload cri-dockerd.service
|
||||
service:
|
||||
name: cri-dockerd.service
|
||||
state: restarted
|
||||
listen: Restart and enable cri-dockerd
|
||||
|
||||
- name: Cri-dockerd | enable cri-dockerd service
|
||||
service:
|
||||
name: cri-dockerd.service
|
||||
enabled: yes
|
||||
listen: Restart and enable cri-dockerd
|
||||
|
||||
@@ -18,9 +18,6 @@
|
||||
src: "{{ item }}.j2"
|
||||
dest: "/etc/systemd/system/{{ item }}"
|
||||
mode: 0644
|
||||
validate: "sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 && systemd-analyze verify %s:{{ item }}'"
|
||||
# FIXME: check that systemd version >= 250 (factory-reset.target was introduced in that release)
|
||||
# Remove once we drop support for systemd < 250
|
||||
with_items:
|
||||
- cri-dockerd.service
|
||||
- cri-dockerd.socket
|
||||
|
||||
@@ -69,6 +69,10 @@ youki_runtime:
|
||||
type: oci
|
||||
root: /run/youki
|
||||
|
||||
# TODO(cristicalin): remove this after 2.21
|
||||
crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable"
|
||||
crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/"
|
||||
|
||||
# Reserve 16M uids and gids for user namespaces (256 pods * 65536 uids/gids)
|
||||
# at the end of the uid/gid space
|
||||
crio_remap_enable: false
|
||||
@@ -93,6 +97,3 @@ crio_man_files:
|
||||
8:
|
||||
- crio
|
||||
- crio-status
|
||||
|
||||
# If set to true, it will enable the CRIU support in cri-o
|
||||
crio_criu_support_enabled: false
|
||||
|
||||
@@ -1,12 +1,16 @@
|
||||
---
|
||||
- name: Restart crio
|
||||
command: /bin/true
|
||||
notify:
|
||||
- CRI-O | reload systemd
|
||||
- CRI-O | reload crio
|
||||
|
||||
- name: CRI-O | reload systemd
|
||||
systemd:
|
||||
daemon_reload: true
|
||||
listen: Restart crio
|
||||
|
||||
- name: CRI-O | reload crio
|
||||
service:
|
||||
name: crio
|
||||
state: restarted
|
||||
enabled: yes
|
||||
listen: Restart crio
|
||||
|
||||
120
roles/container-engine/cri-o/tasks/cleanup.yaml
Normal file
120
roles/container-engine/cri-o/tasks/cleanup.yaml
Normal file
@@ -0,0 +1,120 @@
|
||||
---
|
||||
# TODO(cristicalin): drop this file after 2.21
|
||||
- name: CRI-O kubic repo name for debian os family
|
||||
set_fact:
|
||||
crio_kubic_debian_repo_name: "{{ ((ansible_distribution == 'Ubuntu') | ternary('x', '')) ~ ansible_distribution ~ '_' ~ ansible_distribution_version }}"
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: Remove legacy CRI-O kubic apt repo key
|
||||
apt_key:
|
||||
url: "https://{{ crio_download_base }}/{{ crio_kubic_debian_repo_name }}/Release.key"
|
||||
state: absent
|
||||
environment: "{{ proxy_env }}"
|
||||
when: crio_kubic_debian_repo_name is defined
|
||||
|
||||
- name: Remove legacy CRI-O kubic apt repo
|
||||
apt_repository:
|
||||
repo: "deb http://{{ crio_download_base }}/{{ crio_kubic_debian_repo_name }}/ /"
|
||||
state: absent
|
||||
filename: devel-kubic-libcontainers-stable
|
||||
when: crio_kubic_debian_repo_name is defined
|
||||
|
||||
- name: Remove legacy CRI-O kubic cri-o apt repo
|
||||
apt_repository:
|
||||
repo: "deb {{ crio_download_crio }}{{ crio_version }}/{{ crio_kubic_debian_repo_name }}/ /"
|
||||
state: absent
|
||||
filename: devel-kubic-libcontainers-stable-cri-o
|
||||
when: crio_kubic_debian_repo_name is defined
|
||||
|
||||
- name: Remove legacy CRI-O kubic yum repo
|
||||
yum_repository:
|
||||
name: devel_kubic_libcontainers_stable
|
||||
description: Stable Releases of Upstream github.com/containers packages (CentOS_$releasever)
|
||||
baseurl: http://{{ crio_download_base }}/CentOS_{{ ansible_distribution_major_version }}/
|
||||
state: absent
|
||||
when:
|
||||
- ansible_os_family == "RedHat"
|
||||
- ansible_distribution not in ["Amazon", "Fedora"]
|
||||
|
||||
- name: Remove legacy CRI-O kubic yum repo
|
||||
yum_repository:
|
||||
name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}"
|
||||
description: "CRI-O {{ crio_version }} (CentOS_$releasever)"
|
||||
baseurl: "{{ crio_download_crio }}{{ crio_version }}/CentOS_{{ ansible_distribution_major_version }}/"
|
||||
state: absent
|
||||
when:
|
||||
- ansible_os_family == "RedHat"
|
||||
- ansible_distribution not in ["Amazon", "Fedora"]
|
||||
|
||||
- name: Remove legacy CRI-O kubic yum repo
|
||||
yum_repository:
|
||||
name: devel_kubic_libcontainers_stable
|
||||
description: Stable Releases of Upstream github.com/containers packages
|
||||
baseurl: http://{{ crio_download_base }}/Fedora_{{ ansible_distribution_major_version }}/
|
||||
state: absent
|
||||
when:
|
||||
- ansible_distribution in ["Fedora"]
|
||||
- not is_ostree
|
||||
|
||||
- name: Remove legacy CRI-O kubic yum repo
|
||||
yum_repository:
|
||||
name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}"
|
||||
description: "CRI-O {{ crio_version }}"
|
||||
baseurl: "{{ crio_download_crio }}{{ crio_version }}/Fedora_{{ ansible_distribution_major_version }}/"
|
||||
state: absent
|
||||
when:
|
||||
- ansible_distribution in ["Fedora"]
|
||||
- not is_ostree
|
||||
|
||||
- name: Remove legacy CRI-O kubic yum repo
|
||||
yum_repository:
|
||||
name: devel_kubic_libcontainers_stable
|
||||
description: Stable Releases of Upstream github.com/containers packages
|
||||
baseurl: http://{{ crio_download_base }}/CentOS_7/
|
||||
state: absent
|
||||
when: ansible_distribution in ["Amazon"]
|
||||
|
||||
- name: Remove legacy CRI-O kubic yum repo
|
||||
yum_repository:
|
||||
name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}"
|
||||
description: "CRI-O {{ crio_version }}"
|
||||
baseurl: "{{ crio_download_crio }}{{ crio_version }}/CentOS_7/"
|
||||
state: absent
|
||||
when: ansible_distribution in ["Amazon"]
|
||||
|
||||
- name: Disable modular repos for CRI-O
|
||||
community.general.ini_file:
|
||||
path: "/etc/yum.repos.d/{{ item.repo }}.repo"
|
||||
section: "{{ item.section }}"
|
||||
option: enabled
|
||||
value: 0
|
||||
mode: 0644
|
||||
become: true
|
||||
when: is_ostree
|
||||
loop:
|
||||
- repo: "fedora-updates-modular"
|
||||
section: "updates-modular"
|
||||
- repo: "fedora-modular"
|
||||
section: "fedora-modular"
|
||||
|
||||
# Disable any older module version if we enabled them before
|
||||
- name: Disable CRI-O ex module
|
||||
command: "rpm-ostree ex module disable cri-o:{{ item }}"
|
||||
become: true
|
||||
when:
|
||||
- is_ostree
|
||||
- ostree_version is defined and ostree_version.stdout is version('2021.9', '>=')
|
||||
with_items:
|
||||
- 1.22
|
||||
- 1.23
|
||||
- 1.24
|
||||
|
||||
- name: Cri-o | remove installed packages
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
when: not is_ostree
|
||||
with_items:
|
||||
- cri-o
|
||||
- cri-o-runc
|
||||
- oci-systemd-hook
|
||||
@@ -27,6 +27,9 @@
|
||||
import_tasks: "setup-amazon.yaml"
|
||||
when: ansible_distribution in ["Amazon"]
|
||||
|
||||
- name: Cri-o | clean up reglacy repos
|
||||
import_tasks: "cleanup.yaml"
|
||||
|
||||
- name: Cri-o | build a list of crio runtimes with Katacontainers runtimes
|
||||
set_fact:
|
||||
crio_runtimes: "{{ crio_runtimes + kata_runtimes }}"
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
- name: CRI-O | Remove cri-o apt repo
|
||||
apt_repository:
|
||||
repo: "deb {{ crio_download_crio }}{{ crio_version }}/{{ crio_kubic_debian_repo_name }}/ /"
|
||||
state: absent
|
||||
state: present
|
||||
filename: devel-kubic-libcontainers-stable-cri-o
|
||||
when: crio_kubic_debian_repo_name is defined
|
||||
tags:
|
||||
|
||||
@@ -273,11 +273,6 @@ pinns_path = ""
|
||||
pinns_path = "{{ bin_dir }}/pinns"
|
||||
{% endif %}
|
||||
|
||||
{% if crio_criu_support_enabled %}
|
||||
# Enable CRIU integration, requires that the criu binary is available in $PATH.
|
||||
enable_criu_support = true
|
||||
{% endif %}
|
||||
|
||||
# The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes.
|
||||
# The runtime to use is picked based on the runtime_handler provided by the CRI.
|
||||
# If no runtime_handler is provided, the runtime will be picked based on the level
|
||||
@@ -382,7 +377,7 @@ enable_metrics = {{ crio_enable_metrics | bool | lower }}
|
||||
# The port on which the metrics server will listen.
|
||||
metrics_port = {{ crio_metrics_port }}
|
||||
|
||||
{% if nri_enabled and crio_version is version('v1.26.0', operator='>=') %}
|
||||
{% if nri_enabled and crio_version >= v1.26.0 %}
|
||||
[crio.nri]
|
||||
|
||||
enable_nri=true
|
||||
|
||||
@@ -5,9 +5,6 @@ docker_cli_version: "{{ docker_version }}"
|
||||
docker_package_info:
|
||||
pkgs:
|
||||
|
||||
# Path where to store repo key
|
||||
# docker_repo_key_keyring: /etc/apt/trusted.gpg.d/docker.gpg
|
||||
|
||||
docker_repo_key_info:
|
||||
repo_keys:
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user