mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 13:54:37 +03:00
Compare commits
16 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
12a65c45e9 | ||
|
|
1f4ca14029 | ||
|
|
e14ab338bf | ||
|
|
d8a8fb03a2 | ||
|
|
a0c142f2e7 | ||
|
|
774d824d0b | ||
|
|
336b323954 | ||
|
|
7dcbe415f8 | ||
|
|
d65e4e61a7 | ||
|
|
f2a364e375 | ||
|
|
2cf23e3104 | ||
|
|
200e0d54a2 | ||
|
|
fc28bfc336 | ||
|
|
733ac8ffa9 | ||
|
|
70450a4882 | ||
|
|
71349c9a17 |
@@ -7,32 +7,34 @@ skip_list:
|
||||
|
||||
# These rules are intentionally skipped:
|
||||
#
|
||||
# [E204]: "Lines should be no longer than 160 chars"
|
||||
# This could be re-enabled with a major rewrite in the future.
|
||||
# For now, there's not enough value gain from strictly limiting line length.
|
||||
# (Disabled in May 2019)
|
||||
- '204'
|
||||
|
||||
# [E701]: "meta/main.yml should contain relevant info"
|
||||
# Roles in Kubespray are not intended to be used/imported by Ansible Galaxy.
|
||||
# While it can be useful to have these metadata available, they are also available in the existing documentation.
|
||||
# (Disabled in May 2019)
|
||||
- '701'
|
||||
|
||||
# [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
|
||||
# Meta roles in Kubespray don't need proper names
|
||||
# (Disabled in June 2021)
|
||||
- 'role-name'
|
||||
|
||||
- 'experimental'
|
||||
# [var-naming] "defaults/main.yml" File defines variable 'apiVersion' that violates variable naming standards
|
||||
# In Kubespray we use variables that use camelCase to match their k8s counterparts
|
||||
# (Disabled in June 2021)
|
||||
- 'var-naming'
|
||||
- 'var-spacing'
|
||||
|
||||
# [fqcn-builtins]
|
||||
# Roles in kubespray don't need fully qualified collection names
|
||||
# (Disabled in Feb 2023)
|
||||
- 'fqcn-builtins'
|
||||
|
||||
# We use template in names
|
||||
- 'name[template]'
|
||||
|
||||
# No changed-when on commands
|
||||
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
||||
- 'no-changed-when'
|
||||
|
||||
# Disable run-once check with free strategy
|
||||
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
||||
- 'run-once[task]'
|
||||
exclude_paths:
|
||||
# Generated files
|
||||
- tests/files/custom_cni/cilium.yaml
|
||||
- venv
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
# This file contains ignores rule violations for ansible-lint
|
||||
inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml jinja[spacing]
|
||||
roles/kubernetes/control-plane/defaults/main/kube-proxy.yml jinja[spacing]
|
||||
roles/kubernetes/control-plane/defaults/main/main.yml jinja[spacing]
|
||||
roles/kubernetes/kubeadm/defaults/main.yml jinja[spacing]
|
||||
roles/kubernetes/node/defaults/main.yml jinja[spacing]
|
||||
roles/kubernetes/preinstall/defaults/main.yml jinja[spacing]
|
||||
roles/kubespray-defaults/defaults/main/main.yml jinja[spacing]
|
||||
44
.github/ISSUE_TEMPLATE/bug-report.md
vendored
Normal file
44
.github/ISSUE_TEMPLATE/bug-report.md
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
---
|
||||
name: Bug Report
|
||||
about: Report a bug encountered while operating Kubernetes
|
||||
labels: kind/bug
|
||||
|
||||
---
|
||||
<!--
|
||||
Please, be ready for followup questions, and please respond in a timely
|
||||
manner. If we can't reproduce a bug or think a feature already exists, we
|
||||
might close your issue. If we're wrong, PLEASE feel free to reopen it and
|
||||
explain why.
|
||||
-->
|
||||
|
||||
**Environment**:
|
||||
- **Cloud provider or hardware configuration:**
|
||||
|
||||
- **OS (`printf "$(uname -srm)\n$(cat /etc/os-release)\n"`):**
|
||||
|
||||
- **Version of Ansible** (`ansible --version`):
|
||||
|
||||
- **Version of Python** (`python --version`):
|
||||
|
||||
|
||||
**Kubespray version (commit) (`git rev-parse --short HEAD`):**
|
||||
|
||||
|
||||
**Network plugin used**:
|
||||
|
||||
|
||||
**Full inventory with variables (`ansible -i inventory/sample/inventory.ini all -m debug -a "var=hostvars[inventory_hostname]"`):**
|
||||
<!-- We recommend using snippets services like https://gist.github.com/ etc. -->
|
||||
|
||||
**Command used to invoke ansible**:
|
||||
|
||||
|
||||
**Output of ansible run**:
|
||||
<!-- We recommend using snippets services like https://gist.github.com/ etc. -->
|
||||
|
||||
**Anything else do we need to know**:
|
||||
<!-- By running scripts/collect-info.yaml you can get a lot of useful informations.
|
||||
Script can be started by:
|
||||
ansible-playbook -i <inventory_file_path> -u <ssh_user> -e ansible_ssh_user=<ssh_user> -b --become-user=root -e dir=`pwd` scripts/collect-info.yaml
|
||||
(If you using CoreOS remember to add '-e ansible_python_interpreter=/opt/bin/python').
|
||||
After running this command you can find logs in `pwd`/logs.tar.gz. You can even upload somewhere entire file and paste link here.-->
|
||||
117
.github/ISSUE_TEMPLATE/bug-report.yaml
vendored
117
.github/ISSUE_TEMPLATE/bug-report.yaml
vendored
@@ -1,117 +0,0 @@
|
||||
---
|
||||
name: Bug Report
|
||||
description: Report a bug encountered while using Kubespray
|
||||
labels: kind/bug
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Please, be ready for followup questions, and please respond in a timely
|
||||
manner. If we can't reproduce a bug or think a feature already exists, we
|
||||
might close your issue. If we're wrong, PLEASE feel free to reopen it and
|
||||
explain why.
|
||||
- type: textarea
|
||||
id: problem
|
||||
attributes:
|
||||
label: What happened?
|
||||
description: |
|
||||
Please provide as much info as possible. Not doing so may result in your bug not being addressed in a timely manner.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: expected
|
||||
attributes:
|
||||
label: What did you expect to happen?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: repro
|
||||
attributes:
|
||||
label: How can we reproduce it (as minimally and precisely as possible)?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: '### Environment'
|
||||
|
||||
- type: textarea
|
||||
id: os
|
||||
attributes:
|
||||
label: OS
|
||||
placeholder: 'printf "$(uname -srm)\n$(cat /etc/os-release)\n"'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: ansible_version
|
||||
attributes:
|
||||
label: Version of Ansible
|
||||
placeholder: 'ansible --version'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: python_version
|
||||
attributes:
|
||||
label: Version of Python
|
||||
placeholder: 'python --version'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: kubespray_version
|
||||
attributes:
|
||||
label: Version of Kubespray (commit)
|
||||
placeholder: 'git rev-parse --short HEAD'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: network_plugin
|
||||
attributes:
|
||||
label: Network plugin used
|
||||
options:
|
||||
- calico
|
||||
- cilium
|
||||
- cni
|
||||
- custom_cni
|
||||
- flannel
|
||||
- kube-ovn
|
||||
- kube-router
|
||||
- macvlan
|
||||
- meta
|
||||
- multus
|
||||
- ovn4nfv
|
||||
- weave
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: inventory
|
||||
attributes:
|
||||
label: Full inventory with variables
|
||||
placeholder: 'ansible -i inventory/sample/inventory.ini all -m debug -a "var=hostvars[inventory_hostname]"'
|
||||
|
||||
- type: input
|
||||
id: ansible_command
|
||||
attributes:
|
||||
label: Command used to invoke ansible
|
||||
|
||||
- type: textarea
|
||||
id: ansible_output
|
||||
attributes:
|
||||
label: Output of ansible run
|
||||
description: We recommend using snippets services like https://gist.github.com/ etc.
|
||||
|
||||
- type: textarea
|
||||
id: anything_else
|
||||
attributes:
|
||||
label: Anything else we need to know
|
||||
description: |
|
||||
By running scripts/collect-info.yaml you can get a lot of useful informations.
|
||||
Script can be started by:
|
||||
ansible-playbook -i <inventory_file_path> -u <ssh_user> -e ansible_ssh_user=<ssh_user> -b --become-user=root -e dir=`pwd` scripts/collect-info.yaml
|
||||
(If you using CoreOS remember to add '-e ansible_python_interpreter=/opt/bin/python').
|
||||
After running this command you can find logs in `pwd`/logs.tar.gz. You can even upload somewhere entire file and paste link here
|
||||
5
.github/ISSUE_TEMPLATE/config.yml
vendored
5
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,5 +0,0 @@
|
||||
---
|
||||
contact_links:
|
||||
- name: Support Request
|
||||
url: https://kubernetes.slack.com/channels/kubespray
|
||||
about: Support request or question relating to Kubernetes
|
||||
11
.github/ISSUE_TEMPLATE/enhancement.md
vendored
Normal file
11
.github/ISSUE_TEMPLATE/enhancement.md
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
name: Enhancement Request
|
||||
about: Suggest an enhancement to the Kubespray project
|
||||
labels: kind/feature
|
||||
|
||||
---
|
||||
<!-- Please only use this template for submitting enhancement requests -->
|
||||
|
||||
**What would you like to be added**:
|
||||
|
||||
**Why is this needed**:
|
||||
20
.github/ISSUE_TEMPLATE/enhancement.yaml
vendored
20
.github/ISSUE_TEMPLATE/enhancement.yaml
vendored
@@ -1,20 +0,0 @@
|
||||
---
|
||||
name: Enhancement Request
|
||||
description: Suggest an enhancement to the Kubespray project
|
||||
labels: kind/feature
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: Please only use this template for submitting enhancement requests
|
||||
- type: textarea
|
||||
id: what
|
||||
attributes:
|
||||
label: What would you like to be added
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: why
|
||||
attributes:
|
||||
label: Why is this needed
|
||||
validations:
|
||||
required: true
|
||||
20
.github/ISSUE_TEMPLATE/failing-test.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/failing-test.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
name: Failing Test
|
||||
about: Report test failures in Kubespray CI jobs
|
||||
labels: kind/failing-test
|
||||
|
||||
---
|
||||
|
||||
<!-- Please only use this template for submitting reports about failing tests in Kubespray CI jobs -->
|
||||
|
||||
**Which jobs are failing**:
|
||||
|
||||
**Which test(s) are failing**:
|
||||
|
||||
**Since when has it been failing**:
|
||||
|
||||
**Testgrid link**:
|
||||
|
||||
**Reason for failure**:
|
||||
|
||||
**Anything else we need to know**:
|
||||
41
.github/ISSUE_TEMPLATE/failing-test.yaml
vendored
41
.github/ISSUE_TEMPLATE/failing-test.yaml
vendored
@@ -1,41 +0,0 @@
|
||||
---
|
||||
name: Failing Test
|
||||
description: Report test failures in Kubespray CI jobs
|
||||
labels: kind/failing-test
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: Please only use this template for submitting reports about failing tests in Kubespray CI jobs
|
||||
- type: textarea
|
||||
id: failing_jobs
|
||||
attributes:
|
||||
label: Which jobs are failing ?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: failing_tests
|
||||
attributes:
|
||||
label: Which tests are failing ?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: since_when
|
||||
attributes:
|
||||
label: Since when has it been failing ?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: failure_reason
|
||||
attributes:
|
||||
label: Reason for failure
|
||||
description: If you don't know and have no guess, just put "Unknown"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: anything_else
|
||||
attributes:
|
||||
label: Anything else we need to know
|
||||
18
.github/ISSUE_TEMPLATE/support.md
vendored
Normal file
18
.github/ISSUE_TEMPLATE/support.md
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
name: Support Request
|
||||
about: Support request or question relating to Kubespray
|
||||
labels: kind/support
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
STOP -- PLEASE READ!
|
||||
|
||||
GitHub is not the right place for support requests.
|
||||
|
||||
If you're looking for help, check [Stack Overflow](https://stackoverflow.com/questions/tagged/kubespray) and the [troubleshooting guide](https://kubernetes.io/docs/tasks/debug-application-cluster/troubleshooting/).
|
||||
|
||||
You can also post your question on the [Kubernetes Slack](http://slack.k8s.io/) or the [Discuss Kubernetes](https://discuss.kubernetes.io/) forum.
|
||||
|
||||
If the matter is security related, please disclose it privately via https://kubernetes.io/security/.
|
||||
-->
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -11,7 +11,7 @@ contrib/offline/offline-files.tar.gz
|
||||
.cache
|
||||
*.bak
|
||||
*.tfstate
|
||||
*.tfstate*backup
|
||||
*.tfstate.backup
|
||||
*.lock.hcl
|
||||
.terraform/
|
||||
contrib/terraform/aws/credentials.tfvars
|
||||
|
||||
@@ -9,7 +9,7 @@ stages:
|
||||
- deploy-special
|
||||
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.23.2
|
||||
KUBESPRAY_VERSION: v2.21.0
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
@@ -33,12 +33,16 @@ variables:
|
||||
MITOGEN_ENABLE: "false"
|
||||
ANSIBLE_LOG_LEVEL: "-vv"
|
||||
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||
TERRAFORM_VERSION: 1.3.7
|
||||
ANSIBLE_MAJOR_VERSION: "2.11"
|
||||
PIPELINE_IMAGE: "$CI_REGISTRY_IMAGE/pipeline:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}"
|
||||
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- PIP_CONSTRAINT=tests/constraints.txt python -m pip install -r tests/requirements-${ANSIBLE_MAJOR_VERSION}.txt
|
||||
- mkdir -p /.ssh
|
||||
|
||||
.job: &job
|
||||
@@ -53,7 +57,6 @@ before_script:
|
||||
.testcases: &testcases
|
||||
<<: *job
|
||||
retry: 1
|
||||
interruptible: true
|
||||
before_script:
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- ./tests/scripts/rebase.sh
|
||||
|
||||
@@ -27,14 +27,6 @@ ansible-lint:
|
||||
- ansible-lint -v
|
||||
except: ['triggers', 'master']
|
||||
|
||||
jinja-syntax-check:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
script:
|
||||
- "find -name '*.j2' -exec tests/scripts/check-templates.py {} +"
|
||||
except: ['triggers', 'master']
|
||||
|
||||
syntax-check:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
@@ -75,6 +67,10 @@ tox-inventory-builder:
|
||||
extends: .job
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- PIP_CONSTRAINT=tests/constraints.txt python -m pip install -r tests/requirements.txt
|
||||
script:
|
||||
- pip3 install tox
|
||||
- cd contrib/inventory_builder && tox
|
||||
|
||||
@@ -9,6 +9,10 @@
|
||||
stage: deploy-part1
|
||||
before_script:
|
||||
- tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- PIP_CONSTRAINT=tests/constraints.txt python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh
|
||||
@@ -54,7 +58,6 @@ molecule_cri-o:
|
||||
stage: deploy-part2
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
||||
allow_failure: true
|
||||
when: on_success
|
||||
|
||||
# Stage 3 container engines don't get as much attention so allow them to fail
|
||||
|
||||
@@ -23,45 +23,50 @@
|
||||
allow_failure: true
|
||||
extends: .packet
|
||||
|
||||
packet_cleanup_old:
|
||||
stage: deploy-part1
|
||||
extends: .packet_periodic
|
||||
script:
|
||||
- cd tests
|
||||
- make cleanup-packet
|
||||
after_script: []
|
||||
|
||||
# The ubuntu20-calico-all-in-one jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
||||
packet_ubuntu20-calico-all-in-one:
|
||||
# The ubuntu20-calico-aio jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
||||
packet_ubuntu20-calico-aio:
|
||||
stage: deploy-part1
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_ubuntu20-calico-aio-ansible-2_11:
|
||||
stage: deploy-part1
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
ANSIBLE_MAJOR_VERSION: "2.11"
|
||||
RESET_CHECK: "true"
|
||||
|
||||
# ### PR JOBS PART2
|
||||
|
||||
packet_ubuntu20-all-in-one-docker:
|
||||
packet_ubuntu18-aio-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu20-calico-all-in-one-hardening:
|
||||
packet_ubuntu20-aio-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu22-all-in-one-docker:
|
||||
packet_ubuntu20-calico-aio-hardening:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu22-calico-all-in-one:
|
||||
packet_ubuntu18-calico-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu22-calico-etcd-datastore:
|
||||
packet_ubuntu22-aio-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu22-calico-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
@@ -75,9 +80,8 @@ packet_almalinux8-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
allow_failure: true
|
||||
|
||||
packet_ubuntu20-crio:
|
||||
packet_ubuntu18-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
@@ -87,7 +91,7 @@ packet_fedora37-crio:
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
|
||||
packet_ubuntu20-flannel-ha:
|
||||
packet_ubuntu16-flannel-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
@@ -117,21 +121,6 @@ packet_debian11-docker:
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian12-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian12-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian12-cilium:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_centos7-calico-ha-once-localhost:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
@@ -144,7 +133,7 @@ packet_centos7-calico-ha-once-localhost:
|
||||
|
||||
packet_almalinux8-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_almalinux8-calico:
|
||||
@@ -187,17 +176,22 @@ packet_opensuse-docker-cilium:
|
||||
|
||||
# ### MANUAL JOBS
|
||||
|
||||
packet_ubuntu20-docker-weave-sep:
|
||||
packet_ubuntu16-docker-weave-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu20-cilium-sep:
|
||||
packet_ubuntu18-cilium-sep:
|
||||
stage: deploy-special
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu20-flannel-ha-once:
|
||||
packet_ubuntu18-flannel-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-flannel-ha-once:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
@@ -240,7 +234,7 @@ packet_fedora37-calico-swap-selinux:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_amazon-linux-2-all-in-one:
|
||||
packet_amazon-linux-2-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
@@ -265,11 +259,6 @@ packet_debian11-kubelet-csr-approver:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian12-custom-cni-helm:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
# ### PR JOBS PART3
|
||||
# Long jobs (45min+)
|
||||
|
||||
@@ -320,18 +309,18 @@ packet_debian11-calico-upgrade-once:
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
|
||||
packet_ubuntu20-calico-ha-recover:
|
||||
packet_ubuntu18-calico-ha-recover:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||
|
||||
packet_ubuntu20-calico-ha-recover-noquorum:
|
||||
packet_ubuntu18-calico-ha-recover-noquorum:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:]:kube_control_plane[1:]"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube_control_plane[1:]"
|
||||
|
||||
@@ -100,13 +100,21 @@ tf-validate-upcloud:
|
||||
PROVIDER: upcloud
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-nifcloud:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: nifcloud
|
||||
|
||||
# tf-packet-ubuntu20-default:
|
||||
# tf-packet-ubuntu16-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
# TF_VAR_number_of_k8s_nodes: "1"
|
||||
# TF_VAR_plan_k8s_masters: t1.small.x86
|
||||
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||
# TF_VAR_metro: ny
|
||||
# TF_VAR_public_key_path: ""
|
||||
# TF_VAR_operating_system: ubuntu_16_04
|
||||
#
|
||||
# tf-packet-ubuntu18-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
@@ -118,7 +126,7 @@ tf-validate-nifcloud:
|
||||
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||
# TF_VAR_metro: am
|
||||
# TF_VAR_public_key_path: ""
|
||||
# TF_VAR_operating_system: ubuntu_20_04
|
||||
# TF_VAR_operating_system: ubuntu_18_04
|
||||
|
||||
.ovh_variables: &ovh_variables
|
||||
OS_AUTH_URL: https://auth.cloud.ovh.net/v3
|
||||
@@ -156,7 +164,7 @@ tf-elastx_cleanup:
|
||||
script:
|
||||
- ./scripts/openstack-cleanup/main.py
|
||||
|
||||
tf-elastx_ubuntu20-calico:
|
||||
tf-elastx_ubuntu18-calico:
|
||||
extends: .terraform_apply
|
||||
stage: deploy-part3
|
||||
when: on_success
|
||||
@@ -186,7 +194,7 @@ tf-elastx_ubuntu20-calico:
|
||||
TF_VAR_az_list_node: '["sto1"]'
|
||||
TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||
TF_VAR_image: ubuntu-20.04-server-latest
|
||||
TF_VAR_image: ubuntu-18.04-server-latest
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
||||
# OVH voucher expired, commenting job until things are sorted out
|
||||
@@ -203,7 +211,7 @@ tf-elastx_ubuntu20-calico:
|
||||
# script:
|
||||
# - ./scripts/openstack-cleanup/main.py
|
||||
|
||||
# tf-ovh_ubuntu20-calico:
|
||||
# tf-ovh_ubuntu18-calico:
|
||||
# extends: .terraform_apply
|
||||
# when: on_success
|
||||
# environment: ovh
|
||||
@@ -229,5 +237,5 @@ tf-elastx_ubuntu20-calico:
|
||||
# TF_VAR_network_name: "Ext-Net"
|
||||
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_image: "Ubuntu 20.04"
|
||||
# TF_VAR_image: "Ubuntu 18.04"
|
||||
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
||||
@@ -13,6 +13,10 @@
|
||||
image: $PIPELINE_IMAGE
|
||||
services: []
|
||||
before_script:
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- PIP_CONSTRAINT=tests/constraints.txt python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/testcases_run.sh
|
||||
@@ -20,12 +24,17 @@
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
allow_failure: true
|
||||
|
||||
vagrant_ubuntu20-calico-dual-stack:
|
||||
vagrant_ubuntu18-calico-dual-stack:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_ubuntu20-weave-medium:
|
||||
vagrant_ubuntu18-flannel:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_ubuntu18-weave-medium:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
@@ -41,13 +50,13 @@ vagrant_ubuntu20-flannel-collection:
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_ubuntu20-kube-router-sep:
|
||||
vagrant_ubuntu16-kube-router-sep:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
# Service proxy test fails connectivity testing
|
||||
vagrant_ubuntu20-kube-router-svc-proxy:
|
||||
vagrant_ubuntu16-kube-router-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
@@ -69,12 +69,3 @@ repos:
|
||||
entry: tests/scripts/md-table/test.sh
|
||||
language: script
|
||||
pass_filenames: false
|
||||
|
||||
- id: jinja-syntax-check
|
||||
name: jinja-syntax-check
|
||||
entry: tests/scripts/check-templates.py
|
||||
language: python
|
||||
types:
|
||||
- jinja
|
||||
additional_dependencies:
|
||||
- Jinja2
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
# See our release notes on [GitHub](https://github.com/kubernetes-sigs/kubespray/releases)
|
||||
@@ -12,7 +12,6 @@ To install development dependencies you can set up a python virtual env with the
|
||||
virtualenv venv
|
||||
source venv/bin/activate
|
||||
pip install -r tests/requirements.txt
|
||||
ansible-galaxy install -r tests/requirements.yml
|
||||
```
|
||||
|
||||
#### Linting
|
||||
|
||||
60
Dockerfile
60
Dockerfile
@@ -1,8 +1,5 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# Use imutable image tags rather than mutable tags (like ubuntu:22.04)
|
||||
FROM ubuntu:22.04@sha256:149d67e29f765f4db62aa52161009e99e389544e25a8f43c8c89d4a445a7ca37
|
||||
|
||||
FROM ubuntu:jammy-20230308
|
||||
# Some tools like yamllint need this
|
||||
# Pip needs this as well at the moment to install ansible
|
||||
# (and potentially other packages)
|
||||
@@ -10,37 +7,7 @@ FROM ubuntu:22.04@sha256:149d67e29f765f4db62aa52161009e99e389544e25a8f43c8c89d4a
|
||||
ENV LANG=C.UTF-8 \
|
||||
DEBIAN_FRONTEND=noninteractive \
|
||||
PYTHONDONTWRITEBYTECODE=1
|
||||
|
||||
WORKDIR /kubespray
|
||||
|
||||
# hadolint ignore=DL3008
|
||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
apt-get update -q \
|
||||
&& apt-get install -yq --no-install-recommends \
|
||||
curl \
|
||||
python3 \
|
||||
python3-pip \
|
||||
sshpass \
|
||||
vim \
|
||||
rsync \
|
||||
openssh-client \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/log/*
|
||||
|
||||
RUN --mount=type=bind,source=requirements.txt,target=requirements.txt \
|
||||
--mount=type=cache,sharing=locked,id=pipcache,mode=0777,target=/root/.cache/pip \
|
||||
pip install --no-compile --no-cache-dir -r requirements.txt \
|
||||
&& find /usr -type d -name '*__pycache__' -prune -exec rm -rf {} \;
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
RUN --mount=type=bind,source=roles/kubespray-defaults/defaults/main/main.yml,target=roles/kubespray-defaults/defaults/main/main.yml \
|
||||
KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main/main.yml) \
|
||||
OS_ARCHITECTURE=$(dpkg --print-architecture) \
|
||||
&& curl -L "https://dl.k8s.io/release/${KUBE_VERSION}/bin/linux/${OS_ARCHITECTURE}/kubectl" -o /usr/local/bin/kubectl \
|
||||
&& echo "$(curl -L "https://dl.k8s.io/release/${KUBE_VERSION}/bin/linux/${OS_ARCHITECTURE}/kubectl.sha256")" /usr/local/bin/kubectl | sha256sum --check \
|
||||
&& chmod a+x /usr/local/bin/kubectl
|
||||
|
||||
COPY *.yml ./
|
||||
COPY *.cfg ./
|
||||
COPY roles ./roles
|
||||
@@ -50,3 +17,28 @@ COPY library ./library
|
||||
COPY extra_playbooks ./extra_playbooks
|
||||
COPY playbooks ./playbooks
|
||||
COPY plugins ./plugins
|
||||
|
||||
RUN apt update -q \
|
||||
&& apt install -yq --no-install-recommends \
|
||||
curl \
|
||||
python3 \
|
||||
python3-pip \
|
||||
sshpass \
|
||||
vim \
|
||||
rsync \
|
||||
openssh-client \
|
||||
&& pip install --no-compile --no-cache-dir \
|
||||
ansible==5.7.1 \
|
||||
ansible-core==2.12.5 \
|
||||
cryptography==3.4.8 \
|
||||
jinja2==3.1.2 \
|
||||
netaddr==0.8.0 \
|
||||
jmespath==1.0.1 \
|
||||
MarkupSafe==2.1.2 \
|
||||
ruamel.yaml==0.17.21 \
|
||||
&& KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
||||
&& curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
|
||||
&& echo $(curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \
|
||||
&& chmod a+x /usr/local/bin/kubectl \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/log/* \
|
||||
&& find /usr -type d -name '*__pycache__' -prune -exec rm -rf {} \;
|
||||
|
||||
@@ -23,8 +23,6 @@ aliases:
|
||||
- cyclinder
|
||||
- mzaian
|
||||
- mrfreezeex
|
||||
- erikjiang
|
||||
- vannten
|
||||
kubespray-emeritus_approvers:
|
||||
- riverzhang
|
||||
- atoms
|
||||
|
||||
52
README.md
52
README.md
@@ -34,7 +34,7 @@ CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inv
|
||||
cat inventory/mycluster/group_vars/all/all.yml
|
||||
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
||||
|
||||
# Clean up old Kubernetes cluster with Ansible Playbook - run the playbook as root
|
||||
# Clean up old Kubernete cluster with Ansible Playbook - run the playbook as root
|
||||
# The option `--become` is required, as for example cleaning up SSL keys in /etc/,
|
||||
# uninstalling old packages and interacting with various systemd daemons.
|
||||
# Without --become the playbook will fail to run!
|
||||
@@ -75,11 +75,11 @@ You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mou
|
||||
to access the inventory and SSH key in the container, like this:
|
||||
|
||||
```ShellSession
|
||||
git checkout v2.24.1
|
||||
docker pull quay.io/kubespray/kubespray:v2.24.1
|
||||
git checkout v2.22.2
|
||||
docker pull quay.io/kubespray/kubespray:v2.22.2
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.23.2 bash
|
||||
quay.io/kubespray/kubespray:v2.22.0 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
@@ -142,8 +142,8 @@ vagrant up
|
||||
## Supported Linux Distributions
|
||||
|
||||
- **Flatcar Container Linux by Kinvolk**
|
||||
- **Debian** Bookworm, Bullseye, Buster
|
||||
- **Ubuntu** 20.04, 22.04
|
||||
- **Debian** Bullseye, Buster
|
||||
- **Ubuntu** 16.04, 18.04, 20.04, 22.04
|
||||
- **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8)
|
||||
- **Fedora** 37, 38
|
||||
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
||||
@@ -161,28 +161,28 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
## Supported Components
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.28.10
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.10
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.26.13
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.6
|
||||
- [docker](https://www.docker.com/) v20.10 (see note)
|
||||
- [containerd](https://containerd.io/) v1.7.13
|
||||
- [cri-o](http://cri-o.io/) v1.27 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.2.0
|
||||
- [calico](https://github.com/projectcalico/calico) v3.26.4
|
||||
- [cilium](https://github.com/cilium/cilium) v1.13.4
|
||||
- [flannel](https://github.com/flannel-io/flannel) v0.22.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.11.5
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v2.0.0
|
||||
- [calico](https://github.com/projectcalico/calico) v3.25.1
|
||||
- [cilium](https://github.com/cilium/cilium) v1.13.0
|
||||
- [flannel](https://github.com/flannel-io/flannel) v0.21.4
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.10.7
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
|
||||
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) v3.8
|
||||
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.5.12
|
||||
- Application
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.13.2
|
||||
- [coredns](https://github.com/coredns/coredns) v1.10.1
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.9.4
|
||||
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.4
|
||||
- [argocd](https://argoproj.github.io/) v2.8.4
|
||||
- [helm](https://helm.sh/) v3.13.1
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.11.1
|
||||
- [coredns](https://github.com/coredns/coredns) v1.9.3
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.7.1
|
||||
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.3
|
||||
- [argocd](https://argoproj.github.io/) v2.7.2
|
||||
- [helm](https://helm.sh/) v3.12.0
|
||||
- [metallb](https://metallb.universe.tf/) v0.13.9
|
||||
- [registry](https://github.com/distribution/distribution) v2.8.1
|
||||
- Storage Plugin
|
||||
@@ -191,19 +191,19 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
|
||||
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
|
||||
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0
|
||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.9.2
|
||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.24
|
||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.4.0
|
||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.23
|
||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
|
||||
|
||||
## Container Runtime Notes
|
||||
|
||||
- Supported Docker versions are 18.09, 19.03, 20.10, 23.0 and 24.0. The *recommended* Docker version is 20.10 (except on Debian bookworm which without supporting for 20.10 and below any more). `Kubelet` might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. the YUM ``versionlock`` plugin or ``apt pin``).
|
||||
- Supported Docker versions are 18.09, 19.03 and 20.10. The *recommended* Docker version is 20.10. `Kubelet` might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. the YUM ``versionlock`` plugin or ``apt pin``).
|
||||
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Minimum required version of Kubernetes is v1.26**
|
||||
- **Ansible v2.14+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- **Minimum required version of Kubernetes is v1.24**
|
||||
- **Ansible v2.11+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
||||
- The target servers are configured to allow **IPv4 forwarding**.
|
||||
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
|
||||
@@ -227,7 +227,7 @@ You can choose among ten network plugins. (default: `calico`, except Vagrant use
|
||||
|
||||
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||
|
||||
- [Calico](https://docs.tigera.io/calico/latest/about/) is a networking and network policy provider. Calico supports a flexible set of networking options
|
||||
- [Calico](https://docs.projectcalico.org/latest/introduction/) is a networking and network policy provider. Calico supports a flexible set of networking options
|
||||
designed to give you the most efficient networking across a range of situations, including non-overlay
|
||||
and overlay networks, with or without BGP. Calico uses the same engine to enforce network policy for hosts,
|
||||
pods, and (if using Istio and Envoy) applications at the service mesh layer.
|
||||
|
||||
11
Vagrantfile
vendored
11
Vagrantfile
vendored
@@ -19,8 +19,9 @@ SUPPORTED_OS = {
|
||||
"flatcar-beta" => {box: "flatcar-beta", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["beta"]},
|
||||
"flatcar-alpha" => {box: "flatcar-alpha", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["alpha"]},
|
||||
"flatcar-edge" => {box: "flatcar-edge", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["edge"]},
|
||||
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
|
||||
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
|
||||
"ubuntu2004" => {box: "generic/ubuntu2004", user: "vagrant"},
|
||||
"ubuntu2204" => {box: "generic/ubuntu2204", user: "vagrant"},
|
||||
"centos" => {box: "centos/7", user: "vagrant"},
|
||||
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
||||
"centos8" => {box: "centos/8", user: "vagrant"},
|
||||
@@ -52,7 +53,7 @@ $shared_folders ||= {}
|
||||
$forwarded_ports ||= {}
|
||||
$subnet ||= "172.18.8"
|
||||
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
|
||||
$os ||= "ubuntu2004"
|
||||
$os ||= "ubuntu1804"
|
||||
$network_plugin ||= "flannel"
|
||||
# Setting multi_networking to true will install Multus: https://github.com/k8snetworkplumbingwg/multus-cni
|
||||
$multi_networking ||= "False"
|
||||
@@ -218,8 +219,8 @@ Vagrant.configure("2") do |config|
|
||||
# Disable swap for each vm
|
||||
node.vm.provision "shell", inline: "swapoff -a"
|
||||
|
||||
# ubuntu2004 and ubuntu2204 have IPv6 explicitly disabled. This undoes that.
|
||||
if ["ubuntu2004", "ubuntu2204"].include? $os
|
||||
# ubuntu1804 and ubuntu2004 have IPv6 explicitly disabled. This undoes that.
|
||||
if ["ubuntu1804", "ubuntu2004"].include? $os
|
||||
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
|
||||
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
|
||||
end
|
||||
@@ -233,7 +234,7 @@ Vagrant.configure("2") do |config|
|
||||
end
|
||||
|
||||
# Disable firewalld on oraclelinux/redhat vms
|
||||
if ["oraclelinux","oraclelinux8","rhel7","rhel8","rockylinux8"].include? $os
|
||||
if ["oraclelinux","oraclelinux8","rhel7","rhel8"].include? $os
|
||||
node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
|
||||
end
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ class SearchEC2Tags(object):
|
||||
hosts[group] = []
|
||||
tag_key = "kubespray-role"
|
||||
tag_value = ["*"+group+"*"]
|
||||
region = os.environ['AWS_REGION']
|
||||
region = os.environ['REGION']
|
||||
|
||||
ec2 = boto3.resource('ec2', region)
|
||||
filters = [{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}]
|
||||
@@ -67,11 +67,6 @@ class SearchEC2Tags(object):
|
||||
if node_labels_tag:
|
||||
ansible_host['node_labels'] = dict([ label.strip().split('=') for label in node_labels_tag[0]['Value'].split(',') ])
|
||||
|
||||
##Set when instance actually has node_taints
|
||||
node_taints_tag = list(filter(lambda t: t['Key'] == 'kubespray-node-taints', instance.tags))
|
||||
if node_taints_tag:
|
||||
ansible_host['node_taints'] = list([ taint.strip() for taint in node_taints_tag[0]['Value'].split(',') ])
|
||||
|
||||
hosts[group].append(dns_name)
|
||||
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
---
|
||||
- name: Generate Azure inventory
|
||||
hosts: localhost
|
||||
- hosts: localhost
|
||||
gather_facts: False
|
||||
roles:
|
||||
- generate-inventory
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
---
|
||||
- name: Generate Azure inventory
|
||||
hosts: localhost
|
||||
- hosts: localhost
|
||||
gather_facts: False
|
||||
roles:
|
||||
- generate-inventory_2
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
---
|
||||
- name: Generate Azure templates
|
||||
hosts: localhost
|
||||
- hosts: localhost
|
||||
gather_facts: False
|
||||
roles:
|
||||
- generate-templates
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
|
||||
- name: Query Azure VMs
|
||||
- name: Query Azure VMs # noqa 301
|
||||
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
||||
register: vm_list_cmd
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
---
|
||||
|
||||
- name: Query Azure VMs IPs
|
||||
- name: Query Azure VMs IPs # noqa 301
|
||||
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
|
||||
register: vm_ip_list_cmd
|
||||
|
||||
- name: Query Azure VMs Roles
|
||||
- name: Query Azure VMs Roles # noqa 301
|
||||
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
||||
register: vm_list_cmd
|
||||
|
||||
- name: Query Azure Load Balancer Public IP
|
||||
- name: Query Azure Load Balancer Public IP # noqa 301
|
||||
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
|
||||
register: lb_pubip_cmd
|
||||
|
||||
|
||||
@@ -24,14 +24,14 @@ bastionIPAddressName: bastion-pubip
|
||||
|
||||
disablePasswordAuthentication: true
|
||||
|
||||
sshKeyPath: "/home/{{ admin_username }}/.ssh/authorized_keys"
|
||||
sshKeyPath: "/home/{{admin_username}}/.ssh/authorized_keys"
|
||||
|
||||
imageReference:
|
||||
publisher: "OpenLogic"
|
||||
offer: "CentOS"
|
||||
sku: "7.5"
|
||||
version: "latest"
|
||||
imageReferenceJson: "{{ imageReference | to_json }}"
|
||||
imageReferenceJson: "{{imageReference|to_json}}"
|
||||
|
||||
storageAccountName: "sa{{ nameSuffix | replace('-', '') }}"
|
||||
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
|
||||
storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
---
|
||||
- name: Create nodes as docker containers
|
||||
hosts: localhost
|
||||
- hosts: localhost
|
||||
gather_facts: False
|
||||
roles:
|
||||
- { role: dind-host }
|
||||
|
||||
- name: Customize each node containers
|
||||
hosts: containers
|
||||
- hosts: containers
|
||||
roles:
|
||||
- { role: dind-cluster }
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
---
|
||||
- name: Set_fact distro_setup
|
||||
- name: set_fact distro_setup
|
||||
set_fact:
|
||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||
|
||||
- name: Set_fact other distro settings
|
||||
- name: set_fact other distro settings
|
||||
set_fact:
|
||||
distro_user: "{{ distro_setup['user'] }}"
|
||||
distro_ssh_service: "{{ distro_setup['ssh_service'] }}"
|
||||
@@ -43,7 +43,7 @@
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items: "{{ distro_extra_packages + ['rsyslog', 'openssh-server'] }}"
|
||||
with_items: "{{ distro_extra_packages + [ 'rsyslog', 'openssh-server' ] }}"
|
||||
|
||||
- name: Start needed services
|
||||
service:
|
||||
@@ -66,8 +66,8 @@
|
||||
dest: "/etc/sudoers.d/{{ distro_user }}"
|
||||
mode: 0640
|
||||
|
||||
- name: "Add my pubkey to {{ distro_user }} user authorized keys"
|
||||
ansible.posix.authorized_key:
|
||||
- name: Add my pubkey to "{{ distro_user }}" user authorized keys
|
||||
authorized_key:
|
||||
user: "{{ distro_user }}"
|
||||
state: present
|
||||
key: "{{ lookup('file', lookup('env', 'HOME') + '/.ssh/id_rsa.pub') }}"
|
||||
key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}"
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
---
|
||||
- name: Set_fact distro_setup
|
||||
- name: set_fact distro_setup
|
||||
set_fact:
|
||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||
|
||||
- name: Set_fact other distro settings
|
||||
- name: set_fact other distro settings
|
||||
set_fact:
|
||||
distro_image: "{{ distro_setup['image'] }}"
|
||||
distro_init: "{{ distro_setup['init'] }}"
|
||||
@@ -13,7 +13,7 @@
|
||||
distro_agetty_svc: "{{ distro_setup['agetty_svc'] }}"
|
||||
|
||||
- name: Create dind node containers from "containers" inventory section
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
image: "{{ distro_image }}"
|
||||
name: "{{ item }}"
|
||||
state: started
|
||||
@@ -53,7 +53,7 @@
|
||||
{{ distro_raw_setup_done }} && echo SKIPPED && exit 0
|
||||
until [ "$(readlink /proc/1/exe)" = "{{ distro_pid1_exe }}" ] ; do sleep 1; done
|
||||
{{ distro_raw_setup }}
|
||||
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||
with_items: "{{ containers.results }}"
|
||||
register: result
|
||||
changed_when: result.stdout.find("SKIPPED") < 0
|
||||
@@ -63,25 +63,26 @@
|
||||
until test -S /var/run/dbus/system_bus_socket; do sleep 1; done
|
||||
systemctl disable {{ distro_agetty_svc }}
|
||||
systemctl stop {{ distro_agetty_svc }}
|
||||
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||
with_items: "{{ containers.results }}"
|
||||
changed_when: false
|
||||
|
||||
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
|
||||
# handle manually
|
||||
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
|
||||
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave) # noqa 301
|
||||
raw: |
|
||||
echo {{ item | hash('sha1') }} > /etc/machine-id.new
|
||||
mv -b /etc/machine-id.new /etc/machine-id
|
||||
cmp /etc/machine-id /etc/machine-id~ || true
|
||||
systemctl daemon-reload
|
||||
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||
with_items: "{{ containers.results }}"
|
||||
|
||||
- name: Early hack image install to adapt for DIND
|
||||
# noqa 302 - this task uses the raw module intentionally
|
||||
raw: |
|
||||
rm -fv /usr/bin/udevadm /usr/sbin/udevadm
|
||||
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||
with_items: "{{ containers.results }}"
|
||||
register: result
|
||||
changed_when: result.stdout.find("removed") >= 0
|
||||
|
||||
@@ -1,27 +1,21 @@
|
||||
[tox]
|
||||
minversion = 1.6
|
||||
skipsdist = True
|
||||
envlist = pep8
|
||||
envlist = pep8, py33
|
||||
|
||||
[testenv]
|
||||
allowlist_externals = py.test
|
||||
whitelist_externals = py.test
|
||||
usedevelop = True
|
||||
deps =
|
||||
-r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
setenv = VIRTUAL_ENV={envdir}
|
||||
passenv =
|
||||
http_proxy
|
||||
HTTP_PROXY
|
||||
https_proxy
|
||||
HTTPS_PROXY
|
||||
no_proxy
|
||||
NO_PROXY
|
||||
passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
|
||||
commands = pytest -vv #{posargs:./tests}
|
||||
|
||||
[testenv:pep8]
|
||||
usedevelop = False
|
||||
allowlist_externals = bash
|
||||
whitelist_externals = bash
|
||||
commands =
|
||||
bash -c "find {toxinidir}/* -type f -name '*.py' -print0 | xargs -0 flake8"
|
||||
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
---
|
||||
- name: Prepare Hypervisor to later install kubespray VMs
|
||||
hosts: localhost
|
||||
- hosts: localhost
|
||||
gather_facts: False
|
||||
become: yes
|
||||
vars:
|
||||
bootstrap_os: none
|
||||
- bootstrap_os: none
|
||||
roles:
|
||||
- { role: kvm-setup }
|
||||
- kvm-setup
|
||||
|
||||
@@ -22,9 +22,9 @@
|
||||
- ntp
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: Create deployment user if required
|
||||
include_tasks: user.yml
|
||||
# Create deployment user if required
|
||||
- include: user.yml
|
||||
when: k8s_deployment_user is defined
|
||||
|
||||
- name: Set proper sysctl values
|
||||
import_tasks: sysctl.yml
|
||||
# Set proper sysctl values
|
||||
- include: sysctl.yml
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
- name: Load br_netfilter module
|
||||
community.general.modprobe:
|
||||
modprobe:
|
||||
name: br_netfilter
|
||||
state: present
|
||||
register: br_netfilter
|
||||
@@ -25,7 +25,7 @@
|
||||
|
||||
|
||||
- name: Enable net.ipv4.ip_forward in sysctl
|
||||
ansible.posix.sysctl:
|
||||
sysctl:
|
||||
name: net.ipv4.ip_forward
|
||||
value: 1
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
@@ -33,7 +33,7 @@
|
||||
reload: yes
|
||||
|
||||
- name: Set bridge-nf-call-{arptables,iptables} to 0
|
||||
ansible.posix.sysctl:
|
||||
sysctl:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
value: 0
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
---
|
||||
- name: Check ansible version
|
||||
import_playbook: kubernetes_sigs.kubespray.ansible_version
|
||||
import_playbook: ansible_version.yml
|
||||
|
||||
- name: Install mitogen
|
||||
hosts: localhost
|
||||
- hosts: localhost
|
||||
strategy: linear
|
||||
vars:
|
||||
mitogen_version: 0.3.2
|
||||
@@ -20,25 +19,24 @@
|
||||
- "{{ playbook_dir }}/plugins/mitogen"
|
||||
- "{{ playbook_dir }}/dist"
|
||||
|
||||
- name: Download mitogen release
|
||||
- name: download mitogen release
|
||||
get_url:
|
||||
url: "{{ mitogen_url }}"
|
||||
dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
||||
validate_certs: true
|
||||
mode: 0644
|
||||
|
||||
- name: Extract archive
|
||||
- name: extract archive
|
||||
unarchive:
|
||||
src: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
||||
dest: "{{ playbook_dir }}/dist/"
|
||||
|
||||
- name: Copy plugin
|
||||
ansible.posix.synchronize:
|
||||
- name: copy plugin
|
||||
synchronize:
|
||||
src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/"
|
||||
dest: "{{ playbook_dir }}/plugins/mitogen"
|
||||
|
||||
- name: Add strategy to ansible.cfg
|
||||
community.general.ini_file:
|
||||
- name: add strategy to ansible.cfg
|
||||
ini_file:
|
||||
path: ansible.cfg
|
||||
mode: 0644
|
||||
section: "{{ item.section | d('defaults') }}"
|
||||
|
||||
@@ -1,29 +1,24 @@
|
||||
---
|
||||
- name: Bootstrap hosts
|
||||
hosts: gfs-cluster
|
||||
- hosts: gfs-cluster
|
||||
gather_facts: false
|
||||
vars:
|
||||
ansible_ssh_pipelining: false
|
||||
roles:
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
|
||||
- name: Gather facts
|
||||
hosts: all
|
||||
- hosts: all
|
||||
gather_facts: true
|
||||
|
||||
- name: Install glusterfs server
|
||||
hosts: gfs-cluster
|
||||
- hosts: gfs-cluster
|
||||
vars:
|
||||
ansible_ssh_pipelining: true
|
||||
roles:
|
||||
- { role: glusterfs/server }
|
||||
|
||||
- name: Install glusterfs servers
|
||||
hosts: k8s_cluster
|
||||
- hosts: k8s_cluster
|
||||
roles:
|
||||
- { role: glusterfs/client }
|
||||
|
||||
- name: Configure Kubernetes to use glusterfs
|
||||
hosts: kube_control_plane[0]
|
||||
- hosts: kube_control_plane[0]
|
||||
roles:
|
||||
- { role: kubernetes-pv }
|
||||
|
||||
@@ -6,12 +6,12 @@ galaxy_info:
|
||||
description: GlusterFS installation for Linux.
|
||||
company: "Midwestern Mac, LLC"
|
||||
license: "license (BSD, MIT)"
|
||||
min_ansible_version: "2.0"
|
||||
min_ansible_version: 2.0
|
||||
platforms:
|
||||
- name: EL
|
||||
versions:
|
||||
- "6"
|
||||
- "7"
|
||||
- 6
|
||||
- 7
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- precise
|
||||
|
||||
@@ -3,19 +3,14 @@
|
||||
# hyperkube and needs to be installed as part of the system.
|
||||
|
||||
# Setup/install tasks.
|
||||
- name: Setup RedHat distros for glusterfs
|
||||
include_tasks: setup-RedHat.yml
|
||||
- include: setup-RedHat.yml
|
||||
when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined
|
||||
|
||||
- name: Setup Debian distros for glusterfs
|
||||
include_tasks: setup-Debian.yml
|
||||
- include: setup-Debian.yml
|
||||
when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined
|
||||
|
||||
- name: Ensure Gluster mount directories exist.
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: 0775
|
||||
file: "path={{ item }} state=directory mode=0775"
|
||||
with_items:
|
||||
- "{{ gluster_mount_dir }}"
|
||||
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
register: glusterfs_ppa_added
|
||||
when: glusterfs_ppa_use
|
||||
|
||||
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa no-handler
|
||||
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa 503
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
|
||||
@@ -1,14 +1,10 @@
|
||||
---
|
||||
- name: Install Prerequisites
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
package: name={{ item }} state=present
|
||||
with_items:
|
||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||
|
||||
- name: Install Packages
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
package: name={{ item }} state=present
|
||||
with_items:
|
||||
- glusterfs-client
|
||||
|
||||
@@ -6,12 +6,12 @@ galaxy_info:
|
||||
description: GlusterFS installation for Linux.
|
||||
company: "Midwestern Mac, LLC"
|
||||
license: "license (BSD, MIT)"
|
||||
min_ansible_version: "2.0"
|
||||
min_ansible_version: 2.0
|
||||
platforms:
|
||||
- name: EL
|
||||
versions:
|
||||
- "6"
|
||||
- "7"
|
||||
- 6
|
||||
- 7
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- precise
|
||||
|
||||
@@ -4,97 +4,78 @@
|
||||
include_vars: "{{ ansible_os_family }}.yml"
|
||||
|
||||
# Install xfs package
|
||||
- name: Install xfs Debian
|
||||
apt:
|
||||
name: xfsprogs
|
||||
state: present
|
||||
- name: install xfs Debian
|
||||
apt: name=xfsprogs state=present
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: Install xfs RedHat
|
||||
package:
|
||||
name: xfsprogs
|
||||
state: present
|
||||
- name: install xfs RedHat
|
||||
package: name=xfsprogs state=present
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
# Format external volumes in xfs
|
||||
- name: Format volumes in xfs
|
||||
community.general.filesystem:
|
||||
fstype: xfs
|
||||
dev: "{{ disk_volume_device_1 }}"
|
||||
filesystem: "fstype=xfs dev={{ disk_volume_device_1 }}"
|
||||
|
||||
# Mount external volumes
|
||||
- name: Mounting new xfs filesystem
|
||||
ansible.posix.mount:
|
||||
name: "{{ gluster_volume_node_mount_dir }}"
|
||||
src: "{{ disk_volume_device_1 }}"
|
||||
fstype: xfs
|
||||
state: mounted
|
||||
- name: mounting new xfs filesystem
|
||||
mount: "name={{ gluster_volume_node_mount_dir }} src={{ disk_volume_device_1 }} fstype=xfs state=mounted"
|
||||
|
||||
# Setup/install tasks.
|
||||
- name: Setup RedHat distros for glusterfs
|
||||
include_tasks: setup-RedHat.yml
|
||||
- include: setup-RedHat.yml
|
||||
when: ansible_os_family == 'RedHat'
|
||||
|
||||
- name: Setup Debian distros for glusterfs
|
||||
include_tasks: setup-Debian.yml
|
||||
- include: setup-Debian.yml
|
||||
when: ansible_os_family == 'Debian'
|
||||
|
||||
- name: Ensure GlusterFS is started and enabled at boot.
|
||||
service:
|
||||
name: "{{ glusterfs_daemon }}"
|
||||
state: started
|
||||
enabled: yes
|
||||
service: "name={{ glusterfs_daemon }} state=started enabled=yes"
|
||||
|
||||
- name: Ensure Gluster brick and mount directories exist.
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: 0775
|
||||
file: "path={{ item }} state=directory mode=0775"
|
||||
with_items:
|
||||
- "{{ gluster_brick_dir }}"
|
||||
- "{{ gluster_mount_dir }}"
|
||||
|
||||
- name: Configure Gluster volume with replicas
|
||||
gluster.gluster.gluster_volume:
|
||||
gluster_volume:
|
||||
state: present
|
||||
name: "{{ gluster_brick_name }}"
|
||||
brick: "{{ gluster_brick_dir }}"
|
||||
replicas: "{{ groups['gfs-cluster'] | length }}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
host: "{{ inventory_hostname }}"
|
||||
force: yes
|
||||
run_once: true
|
||||
when: groups['gfs-cluster'] | length > 1
|
||||
when: groups['gfs-cluster']|length > 1
|
||||
|
||||
- name: Configure Gluster volume without replicas
|
||||
gluster.gluster.gluster_volume:
|
||||
gluster_volume:
|
||||
state: present
|
||||
name: "{{ gluster_brick_name }}"
|
||||
brick: "{{ gluster_brick_dir }}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
host: "{{ inventory_hostname }}"
|
||||
force: yes
|
||||
run_once: true
|
||||
when: groups['gfs-cluster'] | length <= 1
|
||||
when: groups['gfs-cluster']|length <= 1
|
||||
|
||||
- name: Mount glusterfs to retrieve disk size
|
||||
ansible.posix.mount:
|
||||
mount:
|
||||
name: "{{ gluster_mount_dir }}"
|
||||
src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
fstype: glusterfs
|
||||
opts: "defaults,_netdev"
|
||||
state: mounted
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Get Gluster disk size
|
||||
setup:
|
||||
filter: ansible_mounts
|
||||
setup: filter=ansible_mounts
|
||||
register: mounts_data
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Set Gluster disk size to variable
|
||||
set_fact:
|
||||
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024 * 1024 * 1024)) | int }}"
|
||||
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024*1024*1024)) | int }}"
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Create file on GlusterFS
|
||||
@@ -105,9 +86,9 @@
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Unmount glusterfs
|
||||
ansible.posix.mount:
|
||||
mount:
|
||||
name: "{{ gluster_mount_dir }}"
|
||||
fstype: glusterfs
|
||||
src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
state: unmounted
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
register: glusterfs_ppa_added
|
||||
when: glusterfs_ppa_use
|
||||
|
||||
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa no-handler
|
||||
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa 503
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
|
||||
@@ -1,15 +1,11 @@
|
||||
---
|
||||
- name: Install Prerequisites
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
package: name={{ item }} state=present
|
||||
with_items:
|
||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||
|
||||
- name: Install Packages
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
package: name={{ item }} state=present
|
||||
with_items:
|
||||
- glusterfs-server
|
||||
- glusterfs-client
|
||||
|
||||
@@ -18,6 +18,6 @@
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
resource: "{{ item.item.type }}"
|
||||
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
||||
state: "{{ item.changed | ternary('latest', 'present') }}"
|
||||
state: "{{ item.changed | ternary('latest','present') }}"
|
||||
with_items: "{{ gluster_pv.results }}"
|
||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
---
|
||||
- name: Tear down heketi
|
||||
hosts: kube_control_plane[0]
|
||||
- hosts: kube_control_plane[0]
|
||||
roles:
|
||||
- { role: tear-down }
|
||||
|
||||
- name: Teardown disks in heketi
|
||||
hosts: heketi-node
|
||||
- hosts: heketi-node
|
||||
become: yes
|
||||
roles:
|
||||
- { role: tear-down-disks }
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
---
|
||||
- name: Prepare heketi install
|
||||
hosts: heketi-node
|
||||
- hosts: heketi-node
|
||||
roles:
|
||||
- { role: prepare }
|
||||
|
||||
- name: Provision heketi
|
||||
hosts: kube_control_plane[0]
|
||||
- hosts: kube_control_plane[0]
|
||||
tags:
|
||||
- "provision"
|
||||
roles:
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
- "dm_snapshot"
|
||||
- "dm_mirror"
|
||||
- "dm_thin_pool"
|
||||
community.general.modprobe:
|
||||
modprobe:
|
||||
name: "{{ item }}"
|
||||
state: "present"
|
||||
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
---
|
||||
- name: "Stop port forwarding"
|
||||
- name: "stop port forwarding"
|
||||
command: "killall "
|
||||
|
||||
@@ -7,9 +7,9 @@
|
||||
|
||||
- name: "Bootstrap heketi."
|
||||
when:
|
||||
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Service']\")) | length == 0"
|
||||
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Deployment']\")) | length == 0"
|
||||
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Pod']\")) | length == 0"
|
||||
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Service']\"))|length == 0"
|
||||
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Deployment']\"))|length == 0"
|
||||
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod']\"))|length == 0"
|
||||
include_tasks: "bootstrap/deploy.yml"
|
||||
|
||||
# Prepare heketi topology
|
||||
@@ -20,11 +20,11 @@
|
||||
|
||||
- name: "Ensure heketi bootstrap pod is up."
|
||||
assert:
|
||||
that: "(initial_heketi_pod.stdout | from_json | json_query('items[*]')) | length == 1"
|
||||
that: "(initial_heketi_pod.stdout|from_json|json_query('items[*]'))|length == 1"
|
||||
|
||||
- name: Store the initial heketi pod name
|
||||
set_fact:
|
||||
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout | from_json | json_query(\"items[*].metadata.name | [0]\") }}"
|
||||
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout|from_json|json_query(\"items[*].metadata.name|[0]\") }}"
|
||||
|
||||
- name: "Test heketi topology."
|
||||
changed_when: false
|
||||
@@ -32,7 +32,7 @@
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||
|
||||
- name: "Load heketi topology."
|
||||
when: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*]\") | flatten | length == 0"
|
||||
when: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*]\")|flatten|length == 0"
|
||||
include_tasks: "bootstrap/topology.yml"
|
||||
|
||||
# Provision heketi database volume
|
||||
@@ -58,7 +58,7 @@
|
||||
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
||||
when:
|
||||
- "heketi_storage_state.stdout | from_json | json_query(secret_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(endpoints_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(service_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 0"
|
||||
- "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0"
|
||||
- "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0"
|
||||
- "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0"
|
||||
- "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0"
|
||||
|
||||
@@ -17,11 +17,11 @@
|
||||
register: "initial_heketi_state"
|
||||
vars:
|
||||
initial_heketi_state: { stdout: "{}" }
|
||||
pods_query: "items[?kind=='Pod'].status.conditions | [0][?type=='Ready'].status | [0]"
|
||||
deployments_query: "items[?kind=='Deployment'].status.conditions | [0][?type=='Available'].status | [0]"
|
||||
pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]"
|
||||
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
||||
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
||||
until:
|
||||
- "initial_heketi_state.stdout | from_json | json_query(pods_query) == 'True'"
|
||||
- "initial_heketi_state.stdout | from_json | json_query(deployments_query) == 'True'"
|
||||
- "initial_heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
||||
- "initial_heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
||||
retries: 60
|
||||
delay: 5
|
||||
|
||||
@@ -15,10 +15,10 @@
|
||||
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
||||
when:
|
||||
- "heketi_storage_state.stdout | from_json | json_query(secret_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(endpoints_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(service_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 0"
|
||||
- "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0"
|
||||
- "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0"
|
||||
- "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0"
|
||||
- "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0"
|
||||
register: "heketi_storage_result"
|
||||
- name: "Get state of heketi database copy job."
|
||||
command: "{{ bin_dir }}/kubectl get jobs --output=json"
|
||||
@@ -28,6 +28,6 @@
|
||||
heketi_storage_state: { stdout: "{}" }
|
||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job' && status.succeeded==1]"
|
||||
until:
|
||||
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 1"
|
||||
- "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 1"
|
||||
retries: 60
|
||||
delay: 5
|
||||
|
||||
@@ -5,10 +5,10 @@
|
||||
changed_when: false
|
||||
- name: "Delete bootstrap Heketi."
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
|
||||
when: "heketi_resources.stdout | from_json | json_query('items[*]') | length > 0"
|
||||
- name: "Ensure there is nothing left over."
|
||||
when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0"
|
||||
- name: "Ensure there is nothing left over." # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
|
||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
- name: "Copy topology configuration into container."
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
||||
- name: "Load heketi topology." # noqa no-handler
|
||||
- name: "Load heketi topology." # noqa 503
|
||||
when: "render.changed"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||
register: "load_heketi"
|
||||
@@ -22,6 +22,6 @@
|
||||
changed_when: false
|
||||
register: "heketi_topology"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||
until: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\") | flatten | length == groups['heketi-node'] | length"
|
||||
until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length"
|
||||
retries: 60
|
||||
delay: 5
|
||||
|
||||
@@ -6,19 +6,19 @@
|
||||
- name: "Get heketi volumes."
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
||||
with_items: "{{ heketi_volumes.stdout | from_json | json_query(\"volumes[*]\") }}"
|
||||
with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}"
|
||||
loop_control: { loop_var: "volume_id" }
|
||||
register: "volumes_information"
|
||||
- name: "Test heketi database volume."
|
||||
set_fact: { heketi_database_volume_exists: true }
|
||||
with_items: "{{ volumes_information.results }}"
|
||||
loop_control: { loop_var: "volume_information" }
|
||||
vars: { volume: "{{ volume_information.stdout | from_json }}" }
|
||||
vars: { volume: "{{ volume_information.stdout|from_json }}" }
|
||||
when: "volume.name == 'heketidbstorage'"
|
||||
- name: "Provision database volume."
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
|
||||
when: "heketi_database_volume_exists is undefined"
|
||||
- name: "Copy configuration from pod."
|
||||
- name: "Copy configuration from pod." # noqa 301
|
||||
become: true
|
||||
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
||||
- name: "Get heketi volume ids."
|
||||
@@ -28,14 +28,14 @@
|
||||
- name: "Get heketi volumes."
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
||||
with_items: "{{ heketi_volumes.stdout | from_json | json_query(\"volumes[*]\") }}"
|
||||
with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}"
|
||||
loop_control: { loop_var: "volume_id" }
|
||||
register: "volumes_information"
|
||||
- name: "Test heketi database volume."
|
||||
set_fact: { heketi_database_volume_created: true }
|
||||
with_items: "{{ volumes_information.results }}"
|
||||
loop_control: { loop_var: "volume_information" }
|
||||
vars: { volume: "{{ volume_information.stdout | from_json }}" }
|
||||
vars: { volume: "{{ volume_information.stdout|from_json }}" }
|
||||
when: "volume.name == 'heketidbstorage'"
|
||||
- name: "Ensure heketi database volume exists."
|
||||
assert: { that: "heketi_database_volume_created is defined", msg: "Heketi database volume does not exist." }
|
||||
|
||||
@@ -23,8 +23,8 @@
|
||||
changed_when: false
|
||||
vars:
|
||||
daemonset_state: { stdout: "{}" }
|
||||
ready: "{{ daemonset_state.stdout | from_json | json_query(\"status.numberReady\") }}"
|
||||
desired: "{{ daemonset_state.stdout | from_json | json_query(\"status.desiredNumberScheduled\") }}"
|
||||
ready: "{{ daemonset_state.stdout|from_json|json_query(\"status.numberReady\") }}"
|
||||
desired: "{{ daemonset_state.stdout|from_json|json_query(\"status.desiredNumberScheduled\") }}"
|
||||
until: "ready | int >= 3"
|
||||
retries: 60
|
||||
delay: 5
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
changed_when: false
|
||||
|
||||
- name: "Assign storage label"
|
||||
when: "label_present.stdout_lines | length == 0"
|
||||
when: "label_present.stdout_lines|length == 0"
|
||||
command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
|
||||
|
||||
- name: Get storage nodes again
|
||||
@@ -15,5 +15,5 @@
|
||||
|
||||
- name: Ensure the label has been set
|
||||
assert:
|
||||
that: "label_present | length > 0"
|
||||
that: "label_present|length > 0"
|
||||
msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs."
|
||||
|
||||
@@ -24,11 +24,11 @@
|
||||
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
||||
command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
|
||||
until:
|
||||
- "heketi_state.stdout | from_json | json_query(pods_query) == 'True'"
|
||||
- "heketi_state.stdout | from_json | json_query(deployments_query) == 'True'"
|
||||
- "heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
||||
- "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
||||
retries: 60
|
||||
delay: 5
|
||||
|
||||
- name: Set the Heketi pod name
|
||||
set_fact:
|
||||
heketi_pod_name: "{{ heketi_state.stdout | from_json | json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
|
||||
heketi_pod_name: "{{ heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
- name: "Render storage class configuration."
|
||||
become: true
|
||||
vars:
|
||||
endpoint_address: "{{ (heketi_service.stdout | from_json).spec.clusterIP }}"
|
||||
endpoint_address: "{{ (heketi_service.stdout|from_json).spec.clusterIP }}"
|
||||
template:
|
||||
src: "storageclass.yml.j2"
|
||||
dest: "{{ kube_config_dir }}/storageclass.yml"
|
||||
|
||||
@@ -11,16 +11,16 @@
|
||||
src: "topology.json.j2"
|
||||
dest: "{{ kube_config_dir }}/topology.json"
|
||||
mode: 0644
|
||||
- name: "Copy topology configuration into container." # noqa no-handler
|
||||
- name: "Copy topology configuration into container." # noqa 503
|
||||
when: "rendering.changed"
|
||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
||||
- name: "Load heketi topology." # noqa no-handler
|
||||
- name: "Load heketi topology." # noqa 503
|
||||
when: "rendering.changed"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||
- name: "Get heketi topology."
|
||||
register: "heketi_topology"
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||
until: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\") | flatten | length == groups['heketi-node'] | length"
|
||||
until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length"
|
||||
retries: 60
|
||||
delay: 5
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
changed_when: false
|
||||
|
||||
- name: "Remove volume groups."
|
||||
- name: "Remove volume groups." # noqa 301
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||
become: true
|
||||
@@ -30,7 +30,7 @@
|
||||
with_items: "{{ volume_groups.stdout_lines }}"
|
||||
loop_control: { loop_var: "volume_group" }
|
||||
|
||||
- name: "Remove physical volume from cluster disks."
|
||||
- name: "Remove physical volume from cluster disks." # noqa 301
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||
become: true
|
||||
|
||||
@@ -1,43 +1,43 @@
|
||||
---
|
||||
- name: Remove storage class.
|
||||
- name: Remove storage class. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down heketi.
|
||||
- name: Tear down heketi. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down heketi.
|
||||
- name: Tear down heketi. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down bootstrap.
|
||||
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
|
||||
- name: Ensure there is nothing left over.
|
||||
- name: Ensure there is nothing left over. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
|
||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
- name: Ensure there is nothing left over.
|
||||
- name: Ensure there is nothing left over. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
|
||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
- name: Tear down glusterfs.
|
||||
- name: Tear down glusterfs. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi storage service.
|
||||
- name: Remove heketi storage service. # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi gluster role binding
|
||||
- name: Remove heketi gluster role binding # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi config secret
|
||||
- name: Remove heketi config secret # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi db backup
|
||||
- name: Remove heketi db backup # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi service account
|
||||
- name: Remove heketi service account # noqa 301
|
||||
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Get secrets
|
||||
@@ -46,6 +46,6 @@
|
||||
changed_when: false
|
||||
- name: Remove heketi storage secret
|
||||
vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
|
||||
command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout | from_json | json_query(storage_query) }}"
|
||||
command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}"
|
||||
when: "storage_query is defined"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
|
||||
@@ -5,9 +5,7 @@
|
||||
Container image collecting script for offline deployment
|
||||
|
||||
This script has two features:
|
||||
|
||||
(1) Get container images from an environment which is deployed online.
|
||||
|
||||
(2) Deploy local container registry and register the container images to the registry.
|
||||
|
||||
Step(1) should be done online site as a preparation, then we bring the gotten images
|
||||
@@ -29,7 +27,7 @@ manage-offline-container-images.sh register
|
||||
|
||||
## generate_list.sh
|
||||
|
||||
This script generates the list of downloaded files and the list of container images by `roles/kubespray-defaults/defaults/main/download.yml` file.
|
||||
This script generates the list of downloaded files and the list of container images by `roles/download/defaults/main.yml` file.
|
||||
|
||||
Run this script will execute `generate_list.yml` playbook in kubespray root directory and generate four files,
|
||||
all downloaded files url in files.list, all container images in images.list, jinja2 templates in *.template.
|
||||
|
||||
@@ -5,7 +5,7 @@ CURRENT_DIR=$(cd $(dirname $0); pwd)
|
||||
TEMP_DIR="${CURRENT_DIR}/temp"
|
||||
REPO_ROOT_DIR="${CURRENT_DIR%/contrib/offline}"
|
||||
|
||||
: ${DOWNLOAD_YML:="roles/kubespray-defaults/defaults/main/download.yml"}
|
||||
: ${DOWNLOAD_YML:="roles/download/defaults/main.yml"}
|
||||
|
||||
mkdir -p ${TEMP_DIR}
|
||||
|
||||
@@ -19,7 +19,7 @@ sed -n '/^downloads:/,/download_defaults:/p' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/\"//g' > ${TEMP_DIR}/images.list.template
|
||||
|
||||
# add kube-* images to images list template
|
||||
# Those container images are downloaded by kubeadm, then roles/kubespray-defaults/defaults/main/download.yml
|
||||
# Those container images are downloaded by kubeadm, then roles/download/defaults/main.yml
|
||||
# doesn't contain those images. That is reason why here needs to put those images into the
|
||||
# list separately.
|
||||
KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy"
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
---
|
||||
- name: Collect container images for offline deployment
|
||||
hosts: localhost
|
||||
- hosts: localhost
|
||||
become: no
|
||||
|
||||
roles:
|
||||
@@ -12,11 +11,9 @@
|
||||
|
||||
tasks:
|
||||
# Generate files.list and images.list files from templates.
|
||||
- name: Collect container images for offline deployment
|
||||
template:
|
||||
- template:
|
||||
src: ./contrib/offline/temp/{{ item }}.list.template
|
||||
dest: ./contrib/offline/temp/{{ item }}.list
|
||||
mode: 0644
|
||||
with_items:
|
||||
- files
|
||||
- images
|
||||
|
||||
@@ -23,8 +23,8 @@ function create_container_image_tar() {
|
||||
mkdir ${IMAGE_DIR}
|
||||
cd ${IMAGE_DIR}
|
||||
|
||||
sudo ${runtime} pull registry:latest
|
||||
sudo ${runtime} save -o registry-latest.tar registry:latest
|
||||
sudo docker pull registry:latest
|
||||
sudo docker save -o registry-latest.tar registry:latest
|
||||
|
||||
for image in ${IMAGES}
|
||||
do
|
||||
@@ -32,7 +32,7 @@ function create_container_image_tar() {
|
||||
set +e
|
||||
for step in $(seq 1 ${RETRY_COUNT})
|
||||
do
|
||||
sudo ${runtime} pull ${image}
|
||||
sudo docker pull ${image}
|
||||
if [ $? -eq 0 ]; then
|
||||
break
|
||||
fi
|
||||
@@ -42,7 +42,7 @@ function create_container_image_tar() {
|
||||
fi
|
||||
done
|
||||
set -e
|
||||
sudo ${runtime} save -o ${FILE_NAME} ${image}
|
||||
sudo docker save -o ${FILE_NAME} ${image}
|
||||
|
||||
# NOTE: Here removes the following repo parts from each image
|
||||
# so that these parts will be replaced with Kubespray.
|
||||
@@ -95,16 +95,16 @@ function register_container_images() {
|
||||
sed -i s@"HOSTNAME"@"${LOCALHOST_NAME}"@ ${TEMP_DIR}/registries.conf
|
||||
sudo cp ${TEMP_DIR}/registries.conf /etc/containers/registries.conf
|
||||
else
|
||||
echo "runtime package(docker-ce, podman, nerctl, etc.) should be installed"
|
||||
echo "docker package(docker-ce, etc.) should be installed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tar -zxvf ${IMAGE_TAR_FILE}
|
||||
sudo ${runtime} load -i ${IMAGE_DIR}/registry-latest.tar
|
||||
sudo docker load -i ${IMAGE_DIR}/registry-latest.tar
|
||||
set +e
|
||||
sudo ${runtime} container inspect registry >/dev/null 2>&1
|
||||
sudo docker container inspect registry >/dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
sudo ${runtime} run --restart=always -d -p 5000:5000 --name registry registry:latest
|
||||
sudo docker run --restart=always -d -p 5000:5000 --name registry registry:latest
|
||||
fi
|
||||
set -e
|
||||
|
||||
@@ -112,8 +112,8 @@ function register_container_images() {
|
||||
file_name=$(echo ${line} | awk '{print $1}')
|
||||
raw_image=$(echo ${line} | awk '{print $2}')
|
||||
new_image="${LOCALHOST_NAME}:5000/${raw_image}"
|
||||
org_image=$(sudo ${runtime} load -i ${IMAGE_DIR}/${file_name} | head -n1 | awk '{print $3}')
|
||||
image_id=$(sudo ${runtime} image inspect ${org_image} | grep "\"Id\":" | awk -F: '{print $3}'| sed s/'\",'//)
|
||||
org_image=$(sudo docker load -i ${IMAGE_DIR}/${file_name} | head -n1 | awk '{print $3}')
|
||||
image_id=$(sudo docker image inspect ${org_image} | grep "\"Id\":" | awk -F: '{print $3}'| sed s/'\",'//)
|
||||
if [ -z "${file_name}" ]; then
|
||||
echo "Failed to get file_name for line ${line}"
|
||||
exit 1
|
||||
@@ -130,9 +130,9 @@ function register_container_images() {
|
||||
echo "Failed to get image_id for file ${file_name}"
|
||||
exit 1
|
||||
fi
|
||||
sudo ${runtime} load -i ${IMAGE_DIR}/${file_name}
|
||||
sudo ${runtime} tag ${image_id} ${new_image}
|
||||
sudo ${runtime} push ${new_image}
|
||||
sudo docker load -i ${IMAGE_DIR}/${file_name}
|
||||
sudo docker tag ${image_id} ${new_image}
|
||||
sudo docker push ${new_image}
|
||||
done <<< "$(cat ${IMAGE_LIST})"
|
||||
|
||||
echo "Succeeded to register container images to local registry."
|
||||
@@ -143,18 +143,6 @@ function register_container_images() {
|
||||
echo "- quay_image_repo"
|
||||
}
|
||||
|
||||
# get runtime command
|
||||
if command -v nerdctl 1>/dev/null 2>&1; then
|
||||
runtime="nerdctl"
|
||||
elif command -v podman 1>/dev/null 2>&1; then
|
||||
runtime="podman"
|
||||
elif command -v docker 1>/dev/null 2>&1; then
|
||||
runtime="docker"
|
||||
else
|
||||
echo "No supported container runtime found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "${OPTION}" == "create" ]; then
|
||||
create_container_image_tar
|
||||
elif [ "${OPTION}" == "register" ]; then
|
||||
|
||||
@@ -38,7 +38,7 @@ sudo "${runtime}" container inspect nginx >/dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
sudo "${runtime}" run \
|
||||
--restart=always -d -p ${NGINX_PORT}:80 \
|
||||
--volume "${OFFLINE_FILES_DIR}":/usr/share/nginx/html/download \
|
||||
--volume "${CURRENT_DIR}"/nginx.conf:/etc/nginx/nginx.conf \
|
||||
--volume "${OFFLINE_FILES_DIR}:/usr/share/nginx/html/download" \
|
||||
--volume "$(pwd)"/nginx.conf:/etc/nginx/nginx.conf \
|
||||
--name nginx nginx:alpine
|
||||
fi
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
---
|
||||
- name: Disable firewalld/ufw
|
||||
hosts: all
|
||||
- hosts: all
|
||||
roles:
|
||||
- { role: prepare }
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
---
|
||||
- name: Disable firewalld and ufw
|
||||
when:
|
||||
- disable_service_firewall is defined and disable_service_firewall
|
||||
block:
|
||||
- block:
|
||||
- name: List services
|
||||
service_facts:
|
||||
|
||||
@@ -12,7 +9,7 @@
|
||||
state: stopped
|
||||
enabled: no
|
||||
when:
|
||||
"'firewalld.service' in services and services['firewalld.service'].status != 'not-found'"
|
||||
"'firewalld.service' in services"
|
||||
|
||||
- name: Disable service ufw
|
||||
systemd:
|
||||
@@ -20,4 +17,7 @@
|
||||
state: stopped
|
||||
enabled: no
|
||||
when:
|
||||
"'ufw.service' in services and services['ufw.service'].status != 'not-found'"
|
||||
"'ufw.service' in services"
|
||||
|
||||
when:
|
||||
- disable_service_firewall is defined and disable_service_firewall
|
||||
|
||||
@@ -50,32 +50,70 @@ Example (this one assumes you are using Ubuntu)
|
||||
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=ubuntu -b --become-user=root --flush-cache
|
||||
```
|
||||
|
||||
## Using other distrib than Ubuntu***
|
||||
***Using other distrib than Ubuntu***
|
||||
If you want to use another distribution than Ubuntu 18.04 (Bionic) LTS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.
|
||||
|
||||
To leverage a Linux distribution other than Ubuntu 18.04 (Bionic) LTS for your Terraform configurations, you can adjust the AMI search filters within the 'data "aws_ami" "distro"' block by utilizing variables in your `terraform.tfvars` file. This approach ensures a flexible configuration that adapts to various Linux distributions without directly modifying the core Terraform files.
|
||||
For example, to use:
|
||||
|
||||
### Example Usages
|
||||
- Debian Jessie, replace 'data "aws_ami" "distro"' in variables.tf with
|
||||
|
||||
- **Debian Jessie**: To configure the usage of Debian Jessie, insert the subsequent lines into your `terraform.tfvars`:
|
||||
```ini
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
```hcl
|
||||
ami_name_pattern = "debian-jessie-amd64-hvm-*"
|
||||
ami_owners = ["379101102735"]
|
||||
```
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["debian-jessie-amd64-hvm-*"]
|
||||
}
|
||||
|
||||
- **Ubuntu 16.04**: To utilize Ubuntu 16.04 instead, apply the following configuration in your `terraform.tfvars`:
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
```hcl
|
||||
ami_name_pattern = "ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-*"
|
||||
ami_owners = ["099720109477"]
|
||||
```
|
||||
owners = ["379101102735"]
|
||||
}
|
||||
```
|
||||
|
||||
- **Centos 7**: For employing Centos 7, incorporate these lines into your `terraform.tfvars`:
|
||||
- Ubuntu 16.04, replace 'data "aws_ami" "distro"' in variables.tf with
|
||||
|
||||
```hcl
|
||||
ami_name_pattern = "dcos-centos7-*"
|
||||
ami_owners = ["688023202711"]
|
||||
```
|
||||
```ini
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
owners = ["099720109477"]
|
||||
}
|
||||
```
|
||||
|
||||
- Centos 7, replace 'data "aws_ami" "distro"' in variables.tf with
|
||||
|
||||
```ini
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["dcos-centos7-*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
owners = ["688023202711"]
|
||||
}
|
||||
```
|
||||
|
||||
## Connecting to Kubernetes
|
||||
|
||||
|
||||
@@ -20,38 +20,20 @@ variable "aws_cluster_name" {
|
||||
description = "Name of AWS Cluster"
|
||||
}
|
||||
|
||||
variable "ami_name_pattern" {
|
||||
description = "The name pattern to use for AMI lookup"
|
||||
type = string
|
||||
default = "debian-10-amd64-*"
|
||||
}
|
||||
|
||||
variable "ami_virtualization_type" {
|
||||
description = "The virtualization type to use for AMI lookup"
|
||||
type = string
|
||||
default = "hvm"
|
||||
}
|
||||
|
||||
variable "ami_owners" {
|
||||
description = "The owners to use for AMI lookup"
|
||||
type = list(string)
|
||||
default = ["136693071363"]
|
||||
}
|
||||
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = [var.ami_name_pattern]
|
||||
values = ["debian-10-amd64-*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = [var.ami_virtualization_type]
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
owners = var.ami_owners
|
||||
owners = ["136693071363"] # Debian-10
|
||||
}
|
||||
|
||||
//AWS VPC Variables
|
||||
|
||||
@@ -7,7 +7,7 @@ terraform {
|
||||
required_providers {
|
||||
equinix = {
|
||||
source = "equinix/equinix"
|
||||
version = "1.24.0"
|
||||
version = "~> 1.14"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
5
contrib/terraform/nifcloud/.gitignore
vendored
5
contrib/terraform/nifcloud/.gitignore
vendored
@@ -1,5 +0,0 @@
|
||||
*.tfstate*
|
||||
.terraform.lock.hcl
|
||||
.terraform
|
||||
|
||||
sample-inventory/inventory.ini
|
||||
@@ -1,137 +0,0 @@
|
||||
# Kubernetes on NIFCLOUD with Terraform
|
||||
|
||||
Provision a Kubernetes cluster on [NIFCLOUD](https://pfs.nifcloud.com/) using Terraform and Kubespray
|
||||
|
||||
## Overview
|
||||
|
||||
The setup looks like following
|
||||
|
||||
```text
|
||||
Kubernetes cluster
|
||||
+----------------------------+
|
||||
+---------------+ | +--------------------+ |
|
||||
| | | | +--------------------+ |
|
||||
| API server LB +---------> | | | |
|
||||
| | | | | Control Plane/etcd | |
|
||||
+---------------+ | | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------------+ |
|
||||
| ^ |
|
||||
| | |
|
||||
| v |
|
||||
| +--------------------+ |
|
||||
| | +--------------------+ |
|
||||
| | | | |
|
||||
| | | Worker | |
|
||||
| | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------------+ |
|
||||
+----------------------------+
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
* Terraform 1.3.7
|
||||
|
||||
## Quickstart
|
||||
|
||||
### Export Variables
|
||||
|
||||
* Your NIFCLOUD credentials:
|
||||
|
||||
```bash
|
||||
export NIFCLOUD_ACCESS_KEY_ID=<YOUR ACCESS KEY>
|
||||
export NIFCLOUD_SECRET_ACCESS_KEY=<YOUR SECRET ACCESS KEY>
|
||||
```
|
||||
|
||||
* The SSH KEY used to connect to the instance:
|
||||
* FYI: [Cloud Help(SSH Key)](https://pfs.nifcloud.com/help/ssh.htm)
|
||||
|
||||
```bash
|
||||
export TF_VAR_SSHKEY_NAME=<YOUR SSHKEY NAME>
|
||||
```
|
||||
|
||||
* The IP address to connect to bastion server:
|
||||
|
||||
```bash
|
||||
export TF_VAR_working_instance_ip=$(curl ifconfig.me)
|
||||
```
|
||||
|
||||
### Create The Infrastructure
|
||||
|
||||
* Run terraform:
|
||||
|
||||
```bash
|
||||
terraform init
|
||||
terraform apply -var-file ./sample-inventory/cluster.tfvars
|
||||
```
|
||||
|
||||
### Setup The Kubernetes
|
||||
|
||||
* Generate cluster configuration file:
|
||||
|
||||
```bash
|
||||
./generate-inventory.sh > sample-inventory/inventory.ini
|
||||
|
||||
* Export Variables:
|
||||
|
||||
```bash
|
||||
BASTION_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.bastion_info | to_entries[].value.public_ip')
|
||||
API_LB_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_lb')
|
||||
CP01_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[0].value.private_ip')
|
||||
export ANSIBLE_SSH_ARGS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ProxyCommand=\"ssh root@${BASTION_IP} -W %h:%p\""
|
||||
```
|
||||
|
||||
* Set ssh-agent"
|
||||
|
||||
```bash
|
||||
eval `ssh-agent`
|
||||
ssh-add <THE PATH TO YOUR SSH KEY>
|
||||
```
|
||||
|
||||
* Run cluster.yml playbook:
|
||||
|
||||
```bash
|
||||
cd ./../../../
|
||||
ansible-playbook -i contrib/terraform/nifcloud/inventory/inventory.ini cluster.yml
|
||||
```
|
||||
|
||||
### Connecting to Kubernetes
|
||||
|
||||
* [Install kubectl](https://kubernetes.io/docs/tasks/tools/) on the localhost
|
||||
* Fetching kubeconfig file:
|
||||
|
||||
```bash
|
||||
mkdir -p ~/.kube
|
||||
scp -o ProxyCommand="ssh root@${BASTION_IP} -W %h:%p" root@${CP01_IP}:/etc/kubernetes/admin.conf ~/.kube/config
|
||||
```
|
||||
|
||||
* Rewrite /etc/hosts
|
||||
|
||||
```bash
|
||||
sudo echo "${API_LB_IP} lb-apiserver.kubernetes.local" >> /etc/hosts
|
||||
```
|
||||
|
||||
* Run kubectl
|
||||
|
||||
```bash
|
||||
kubectl get node
|
||||
```
|
||||
|
||||
## Variables
|
||||
|
||||
* `region`: Region where to run the cluster
|
||||
* `az`: Availability zone where to run the cluster
|
||||
* `private_ip_bn`: Private ip address of bastion server
|
||||
* `private_network_cidr`: Subnet of private network
|
||||
* `instances_cp`: Machine to provision as Control Plane. Key of this object will be used as part of the machine' name
|
||||
* `private_ip`: private ip address of machine
|
||||
* `instances_wk`: Machine to provision as Worker Node. Key of this object will be used as part of the machine' name
|
||||
* `private_ip`: private ip address of machine
|
||||
* `instance_key_name`: The key name of the Key Pair to use for the instance
|
||||
* `instance_type_bn`: The instance type of bastion server
|
||||
* `instance_type_wk`: The instance type of worker node
|
||||
* `instance_type_cp`: The instance type of control plane
|
||||
* `image_name`: OS image used for the instance
|
||||
* `working_instance_ip`: The IP address to connect to bastion server
|
||||
* `accounting_type`: Accounting type. (1: monthly, 2: pay per use)
|
||||
@@ -1,64 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Generates a inventory file based on the terraform output.
|
||||
# After provisioning a cluster, simply run this command and supply the terraform state file
|
||||
# Default state file is terraform.tfstate
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
TF_OUT=$(terraform output -json)
|
||||
|
||||
CONTROL_PLANES=$(jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[]' <(echo "${TF_OUT}"))
|
||||
WORKERS=$(jq -r '.kubernetes_cluster.value.worker_info | to_entries[]' <(echo "${TF_OUT}"))
|
||||
mapfile -t CONTROL_PLANE_NAMES < <(jq -r '.key' <(echo "${CONTROL_PLANES}"))
|
||||
mapfile -t WORKER_NAMES < <(jq -r '.key' <(echo "${WORKERS}"))
|
||||
|
||||
API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}"))
|
||||
|
||||
echo "[all]"
|
||||
# Generate control plane hosts
|
||||
i=1
|
||||
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
||||
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${CONTROL_PLANES}"))
|
||||
echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip} etcd_member_name=etcd${i}"
|
||||
i=$(( i + 1 ))
|
||||
done
|
||||
|
||||
# Generate worker hosts
|
||||
for name in "${WORKER_NAMES[@]}"; do
|
||||
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${WORKERS}"))
|
||||
echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip}"
|
||||
done
|
||||
|
||||
API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}"))
|
||||
|
||||
echo ""
|
||||
echo "[all:vars]"
|
||||
echo "upstream_dns_servers=['8.8.8.8','8.8.4.4']"
|
||||
echo "loadbalancer_apiserver={'address':'${API_LB}','port':'6443'}"
|
||||
|
||||
|
||||
echo ""
|
||||
echo "[kube_control_plane]"
|
||||
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
||||
echo "${name}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[etcd]"
|
||||
for name in "${CONTROL_PLANE_NAMES[@]}"; do
|
||||
echo "${name}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[kube_node]"
|
||||
for name in "${WORKER_NAMES[@]}"; do
|
||||
echo "${name}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[k8s_cluster:children]"
|
||||
echo "kube_control_plane"
|
||||
echo "kube_node"
|
||||
@@ -1,36 +0,0 @@
|
||||
provider "nifcloud" {
|
||||
region = var.region
|
||||
}
|
||||
|
||||
module "kubernetes_cluster" {
|
||||
source = "./modules/kubernetes-cluster"
|
||||
|
||||
availability_zone = var.az
|
||||
prefix = "dev"
|
||||
|
||||
private_network_cidr = var.private_network_cidr
|
||||
|
||||
instance_key_name = var.instance_key_name
|
||||
instances_cp = var.instances_cp
|
||||
instances_wk = var.instances_wk
|
||||
image_name = var.image_name
|
||||
|
||||
instance_type_bn = var.instance_type_bn
|
||||
instance_type_cp = var.instance_type_cp
|
||||
instance_type_wk = var.instance_type_wk
|
||||
|
||||
private_ip_bn = var.private_ip_bn
|
||||
|
||||
additional_lb_filter = [var.working_instance_ip]
|
||||
}
|
||||
|
||||
resource "nifcloud_security_group_rule" "ssh_from_bastion" {
|
||||
security_group_names = [
|
||||
module.kubernetes_cluster.security_group_name.bastion
|
||||
]
|
||||
type = "IN"
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "TCP"
|
||||
cidr_ip = var.working_instance_ip
|
||||
}
|
||||
@@ -1,301 +0,0 @@
|
||||
#################################################
|
||||
##
|
||||
## Local variables
|
||||
##
|
||||
locals {
|
||||
# e.g. east-11 is 11
|
||||
az_num = reverse(split("-", var.availability_zone))[0]
|
||||
# e.g. east-11 is e11
|
||||
az_short_name = "${substr(reverse(split("-", var.availability_zone))[1], 0, 1)}${local.az_num}"
|
||||
|
||||
# Port used by the protocol
|
||||
port_ssh = 22
|
||||
port_kubectl = 6443
|
||||
port_kubelet = 10250
|
||||
|
||||
# calico: https://docs.tigera.io/calico/latest/getting-started/kubernetes/requirements#network-requirements
|
||||
port_bgp = 179
|
||||
port_vxlan = 4789
|
||||
port_etcd = 2379
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## General
|
||||
##
|
||||
|
||||
# data
|
||||
data "nifcloud_image" "this" {
|
||||
image_name = var.image_name
|
||||
}
|
||||
|
||||
# private lan
|
||||
resource "nifcloud_private_lan" "this" {
|
||||
private_lan_name = "${var.prefix}lan"
|
||||
availability_zone = var.availability_zone
|
||||
cidr_block = var.private_network_cidr
|
||||
accounting_type = var.accounting_type
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Bastion
|
||||
##
|
||||
resource "nifcloud_security_group" "bn" {
|
||||
group_name = "${var.prefix}bn"
|
||||
description = "${var.prefix} bastion"
|
||||
availability_zone = var.availability_zone
|
||||
}
|
||||
|
||||
resource "nifcloud_instance" "bn" {
|
||||
|
||||
instance_id = "${local.az_short_name}${var.prefix}bn01"
|
||||
security_group = nifcloud_security_group.bn.group_name
|
||||
instance_type = var.instance_type_bn
|
||||
|
||||
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
||||
private_ip_address = var.private_ip_bn
|
||||
ssh_port = local.port_ssh
|
||||
hostname = "${local.az_short_name}${var.prefix}bn01"
|
||||
})
|
||||
|
||||
availability_zone = var.availability_zone
|
||||
accounting_type = var.accounting_type
|
||||
image_id = data.nifcloud_image.this.image_id
|
||||
key_name = var.instance_key_name
|
||||
|
||||
network_interface {
|
||||
network_id = "net-COMMON_GLOBAL"
|
||||
}
|
||||
network_interface {
|
||||
network_id = nifcloud_private_lan.this.network_id
|
||||
ip_address = "static"
|
||||
}
|
||||
|
||||
# The image_id changes when the OS image type is demoted from standard to public.
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
image_id,
|
||||
user_data,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Control Plane
|
||||
##
|
||||
resource "nifcloud_security_group" "cp" {
|
||||
group_name = "${var.prefix}cp"
|
||||
description = "${var.prefix} control plane"
|
||||
availability_zone = var.availability_zone
|
||||
}
|
||||
|
||||
resource "nifcloud_instance" "cp" {
|
||||
for_each = var.instances_cp
|
||||
|
||||
instance_id = "${local.az_short_name}${var.prefix}${each.key}"
|
||||
security_group = nifcloud_security_group.cp.group_name
|
||||
instance_type = var.instance_type_cp
|
||||
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
||||
private_ip_address = each.value.private_ip
|
||||
ssh_port = local.port_ssh
|
||||
hostname = "${local.az_short_name}${var.prefix}${each.key}"
|
||||
})
|
||||
|
||||
availability_zone = var.availability_zone
|
||||
accounting_type = var.accounting_type
|
||||
image_id = data.nifcloud_image.this.image_id
|
||||
key_name = var.instance_key_name
|
||||
|
||||
network_interface {
|
||||
network_id = "net-COMMON_GLOBAL"
|
||||
}
|
||||
network_interface {
|
||||
network_id = nifcloud_private_lan.this.network_id
|
||||
ip_address = "static"
|
||||
}
|
||||
|
||||
# The image_id changes when the OS image type is demoted from standard to public.
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
image_id,
|
||||
user_data,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "nifcloud_load_balancer" "this" {
|
||||
load_balancer_name = "${local.az_short_name}${var.prefix}cp"
|
||||
accounting_type = var.accounting_type
|
||||
balancing_type = 1 // Round-Robin
|
||||
load_balancer_port = local.port_kubectl
|
||||
instance_port = local.port_kubectl
|
||||
instances = [for v in nifcloud_instance.cp : v.instance_id]
|
||||
filter = concat(
|
||||
[for k, v in nifcloud_instance.cp : v.public_ip],
|
||||
[for k, v in nifcloud_instance.wk : v.public_ip],
|
||||
var.additional_lb_filter,
|
||||
)
|
||||
filter_type = 1 // Allow
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Worker
|
||||
##
|
||||
resource "nifcloud_security_group" "wk" {
|
||||
group_name = "${var.prefix}wk"
|
||||
description = "${var.prefix} worker"
|
||||
availability_zone = var.availability_zone
|
||||
}
|
||||
|
||||
resource "nifcloud_instance" "wk" {
|
||||
for_each = var.instances_wk
|
||||
|
||||
instance_id = "${local.az_short_name}${var.prefix}${each.key}"
|
||||
security_group = nifcloud_security_group.wk.group_name
|
||||
instance_type = var.instance_type_wk
|
||||
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
|
||||
private_ip_address = each.value.private_ip
|
||||
ssh_port = local.port_ssh
|
||||
hostname = "${local.az_short_name}${var.prefix}${each.key}"
|
||||
})
|
||||
|
||||
availability_zone = var.availability_zone
|
||||
accounting_type = var.accounting_type
|
||||
image_id = data.nifcloud_image.this.image_id
|
||||
key_name = var.instance_key_name
|
||||
|
||||
network_interface {
|
||||
network_id = "net-COMMON_GLOBAL"
|
||||
}
|
||||
network_interface {
|
||||
network_id = nifcloud_private_lan.this.network_id
|
||||
ip_address = "static"
|
||||
}
|
||||
|
||||
# The image_id changes when the OS image type is demoted from standard to public.
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
image_id,
|
||||
user_data,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Security Group Rule: Kubernetes
|
||||
##
|
||||
|
||||
# ssh
|
||||
resource "nifcloud_security_group_rule" "ssh_from_bastion" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.wk.group_name,
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_ssh
|
||||
to_port = local.port_ssh
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.bn.group_name
|
||||
}
|
||||
|
||||
# kubectl
|
||||
resource "nifcloud_security_group_rule" "kubectl_from_worker" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_kubectl
|
||||
to_port = local.port_kubectl
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||
}
|
||||
|
||||
# kubelet
|
||||
resource "nifcloud_security_group_rule" "kubelet_from_worker" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_kubelet
|
||||
to_port = local.port_kubelet
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||
}
|
||||
|
||||
resource "nifcloud_security_group_rule" "kubelet_from_control_plane" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.wk.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_kubelet
|
||||
to_port = local.port_kubelet
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.cp.group_name
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Security Group Rule: calico
|
||||
##
|
||||
|
||||
# vslan
|
||||
resource "nifcloud_security_group_rule" "vxlan_from_control_plane" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.wk.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_vxlan
|
||||
to_port = local.port_vxlan
|
||||
protocol = "UDP"
|
||||
source_security_group_name = nifcloud_security_group.cp.group_name
|
||||
}
|
||||
|
||||
resource "nifcloud_security_group_rule" "vxlan_from_worker" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_vxlan
|
||||
to_port = local.port_vxlan
|
||||
protocol = "UDP"
|
||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||
}
|
||||
|
||||
# bgp
|
||||
resource "nifcloud_security_group_rule" "bgp_from_control_plane" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.wk.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_bgp
|
||||
to_port = local.port_bgp
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.cp.group_name
|
||||
}
|
||||
|
||||
resource "nifcloud_security_group_rule" "bgp_from_worker" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_bgp
|
||||
to_port = local.port_bgp
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||
}
|
||||
|
||||
# etcd
|
||||
resource "nifcloud_security_group_rule" "etcd_from_worker" {
|
||||
security_group_names = [
|
||||
nifcloud_security_group.cp.group_name,
|
||||
]
|
||||
type = "IN"
|
||||
from_port = local.port_etcd
|
||||
to_port = local.port_etcd
|
||||
protocol = "TCP"
|
||||
source_security_group_name = nifcloud_security_group.wk.group_name
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
output "control_plane_lb" {
|
||||
description = "The DNS name of LB for control plane"
|
||||
value = nifcloud_load_balancer.this.dns_name
|
||||
}
|
||||
|
||||
output "security_group_name" {
|
||||
description = "The security group used in the cluster"
|
||||
value = {
|
||||
bastion = nifcloud_security_group.bn.group_name,
|
||||
control_plane = nifcloud_security_group.cp.group_name,
|
||||
worker = nifcloud_security_group.wk.group_name,
|
||||
}
|
||||
}
|
||||
|
||||
output "private_network_id" {
|
||||
description = "The private network used in the cluster"
|
||||
value = nifcloud_private_lan.this.id
|
||||
}
|
||||
|
||||
output "bastion_info" {
|
||||
description = "The basion information in cluster"
|
||||
value = { (nifcloud_instance.bn.instance_id) : {
|
||||
instance_id = nifcloud_instance.bn.instance_id,
|
||||
unique_id = nifcloud_instance.bn.unique_id,
|
||||
private_ip = nifcloud_instance.bn.private_ip,
|
||||
public_ip = nifcloud_instance.bn.public_ip,
|
||||
} }
|
||||
}
|
||||
|
||||
output "worker_info" {
|
||||
description = "The worker information in cluster"
|
||||
value = { for v in nifcloud_instance.wk : v.instance_id => {
|
||||
instance_id = v.instance_id,
|
||||
unique_id = v.unique_id,
|
||||
private_ip = v.private_ip,
|
||||
public_ip = v.public_ip,
|
||||
} }
|
||||
}
|
||||
|
||||
output "control_plane_info" {
|
||||
description = "The control plane information in cluster"
|
||||
value = { for v in nifcloud_instance.cp : v.instance_id => {
|
||||
instance_id = v.instance_id,
|
||||
unique_id = v.unique_id,
|
||||
private_ip = v.private_ip,
|
||||
public_ip = v.public_ip,
|
||||
} }
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
#################################################
|
||||
##
|
||||
## IP Address
|
||||
##
|
||||
configure_private_ip_address () {
|
||||
cat << EOS > /etc/netplan/01-netcfg.yaml
|
||||
network:
|
||||
version: 2
|
||||
renderer: networkd
|
||||
ethernets:
|
||||
ens192:
|
||||
dhcp4: yes
|
||||
dhcp6: yes
|
||||
dhcp-identifier: mac
|
||||
ens224:
|
||||
dhcp4: no
|
||||
dhcp6: no
|
||||
addresses: [${private_ip_address}]
|
||||
EOS
|
||||
netplan apply
|
||||
}
|
||||
configure_private_ip_address
|
||||
|
||||
#################################################
|
||||
##
|
||||
## SSH
|
||||
##
|
||||
configure_ssh_port () {
|
||||
sed -i 's/^#*Port [0-9]*/Port ${ssh_port}/' /etc/ssh/sshd_config
|
||||
}
|
||||
configure_ssh_port
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Hostname
|
||||
##
|
||||
hostnamectl set-hostname ${hostname}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Disable swap files genereated by systemd-gpt-auto-generator
|
||||
##
|
||||
systemctl mask "dev-sda3.swap"
|
||||
@@ -1,9 +0,0 @@
|
||||
terraform {
|
||||
required_version = ">=1.3.7"
|
||||
required_providers {
|
||||
nifcloud = {
|
||||
source = "nifcloud/nifcloud"
|
||||
version = ">= 1.8.0, < 2.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
variable "availability_zone" {
|
||||
description = "The availability zone"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "prefix" {
|
||||
description = "The prefix for the entire cluster"
|
||||
type = string
|
||||
validation {
|
||||
condition = length(var.prefix) <= 5
|
||||
error_message = "Must be a less than 5 character long."
|
||||
}
|
||||
}
|
||||
|
||||
variable "private_network_cidr" {
|
||||
description = "The subnet of private network"
|
||||
type = string
|
||||
validation {
|
||||
condition = can(cidrnetmask(var.private_network_cidr))
|
||||
error_message = "Must be a valid IPv4 CIDR block address."
|
||||
}
|
||||
}
|
||||
|
||||
variable "private_ip_bn" {
|
||||
description = "Private IP of bastion server"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instances_cp" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "instances_wk" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "instance_key_name" {
|
||||
description = "The key name of the Key Pair to use for the instance"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_bn" {
|
||||
description = "The instance type of bastion server"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_wk" {
|
||||
description = "The instance type of worker"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_cp" {
|
||||
description = "The instance type of control plane"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "image_name" {
|
||||
description = "The name of image"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "additional_lb_filter" {
|
||||
description = "Additional LB filter"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "accounting_type" {
|
||||
type = string
|
||||
default = "1"
|
||||
validation {
|
||||
condition = anytrue([
|
||||
var.accounting_type == "1", // Monthly
|
||||
var.accounting_type == "2", // Pay per use
|
||||
])
|
||||
error_message = "Must be a 1 or 2."
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
output "kubernetes_cluster" {
|
||||
value = module.kubernetes_cluster
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
region = "jp-west-1"
|
||||
az = "west-11"
|
||||
|
||||
instance_key_name = "deployerkey"
|
||||
|
||||
instance_type_bn = "e-medium"
|
||||
instance_type_cp = "e-medium"
|
||||
instance_type_wk = "e-medium"
|
||||
|
||||
private_network_cidr = "192.168.30.0/24"
|
||||
instances_cp = {
|
||||
"cp01" : { private_ip : "192.168.30.11/24" }
|
||||
"cp02" : { private_ip : "192.168.30.12/24" }
|
||||
"cp03" : { private_ip : "192.168.30.13/24" }
|
||||
}
|
||||
instances_wk = {
|
||||
"wk01" : { private_ip : "192.168.30.21/24" }
|
||||
"wk02" : { private_ip : "192.168.30.22/24" }
|
||||
}
|
||||
private_ip_bn = "192.168.30.10/24"
|
||||
|
||||
image_name = "Ubuntu Server 22.04 LTS"
|
||||
@@ -1 +0,0 @@
|
||||
../../../../inventory/sample/group_vars
|
||||
@@ -1,9 +0,0 @@
|
||||
terraform {
|
||||
required_version = ">=1.3.7"
|
||||
required_providers {
|
||||
nifcloud = {
|
||||
source = "nifcloud/nifcloud"
|
||||
version = "1.8.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
variable "region" {
|
||||
description = "The region"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "az" {
|
||||
description = "The availability zone"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "private_ip_bn" {
|
||||
description = "Private IP of bastion server"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "private_network_cidr" {
|
||||
description = "The subnet of private network"
|
||||
type = string
|
||||
validation {
|
||||
condition = can(cidrnetmask(var.private_network_cidr))
|
||||
error_message = "Must be a valid IPv4 CIDR block address."
|
||||
}
|
||||
}
|
||||
|
||||
variable "instances_cp" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "instances_wk" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "instance_key_name" {
|
||||
description = "The key name of the Key Pair to use for the instance"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_bn" {
|
||||
description = "The instance type of bastion server"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_wk" {
|
||||
description = "The instance type of worker"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_type_cp" {
|
||||
description = "The instance type of control plane"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "image_name" {
|
||||
description = "The name of image"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "working_instance_ip" {
|
||||
description = "The IP address to connect to bastion server."
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "accounting_type" {
|
||||
type = string
|
||||
default = "2"
|
||||
validation {
|
||||
condition = anytrue([
|
||||
var.accounting_type == "1", // Monthly
|
||||
var.accounting_type == "2", // Pay per use
|
||||
])
|
||||
error_message = "Must be a 1 or 2."
|
||||
}
|
||||
}
|
||||
2
contrib/terraform/openstack/.gitignore
vendored
2
contrib/terraform/openstack/.gitignore
vendored
@@ -1,5 +1,5 @@
|
||||
.terraform
|
||||
*.tfvars
|
||||
!sample-inventory/cluster.tfvars
|
||||
!sample-inventory\/cluster.tfvars
|
||||
*.tfstate
|
||||
*.tfstate.backup
|
||||
|
||||
@@ -318,7 +318,6 @@ k8s_nodes:
|
||||
mount_path: string # Path to where the partition should be mounted
|
||||
partition_start: string # Where the partition should start (e.g. 10GB ). Note, if you set the partition_start to 0 there will be no space left for the root partition
|
||||
partition_end: string # Where the partition should end (e.g. 10GB or -1 for end of volume)
|
||||
netplan_critical_dhcp_interface: string # Name of interface to set the dhcp flag critical = true, to circumvent [this issue](https://bugs.launchpad.net/ubuntu/+source/systemd/+bug/1776013).
|
||||
```
|
||||
|
||||
For example:
|
||||
|
||||
@@ -19,8 +19,8 @@ data "cloudinit_config" "cloudinit" {
|
||||
part {
|
||||
content_type = "text/cloud-config"
|
||||
content = templatefile("${path.module}/templates/cloudinit.yaml.tmpl", {
|
||||
extra_partitions = [],
|
||||
netplan_critical_dhcp_interface = ""
|
||||
# template_file doesn't support lists
|
||||
extra_partitions = ""
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -821,8 +821,7 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
flavor_id = each.value.flavor
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = each.value.cloudinit != null ? templatefile("${path.module}/templates/cloudinit.yaml.tmpl", {
|
||||
extra_partitions = each.value.cloudinit.extra_partitions,
|
||||
netplan_critical_dhcp_interface = each.value.cloudinit.netplan_critical_dhcp_interface,
|
||||
extra_partitions = each.value.cloudinit.extra_partitions
|
||||
}) : data.cloudinit_config.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
%{~ if length(extra_partitions) > 0 || netplan_critical_dhcp_interface != "" }
|
||||
%{~ if length(extra_partitions) > 0 }
|
||||
#cloud-config
|
||||
bootcmd:
|
||||
%{~ for idx, partition in extra_partitions }
|
||||
@@ -8,26 +8,11 @@ bootcmd:
|
||||
%{~ endfor }
|
||||
|
||||
runcmd:
|
||||
%{~ if netplan_critical_dhcp_interface != "" }
|
||||
- netplan apply
|
||||
%{~ endif }
|
||||
%{~ for idx, partition in extra_partitions }
|
||||
- mkdir -p ${partition.mount_path}
|
||||
- chown nobody:nogroup ${partition.mount_path}
|
||||
- mount ${partition.partition_path} ${partition.mount_path}
|
||||
%{~ endfor ~}
|
||||
|
||||
%{~ if netplan_critical_dhcp_interface != "" }
|
||||
write_files:
|
||||
- path: /etc/netplan/90-critical-dhcp.yaml
|
||||
content: |
|
||||
network:
|
||||
version: 2
|
||||
ethernets:
|
||||
${ netplan_critical_dhcp_interface }:
|
||||
dhcp4: true
|
||||
critical: true
|
||||
%{~ endif }
|
||||
%{~ endfor }
|
||||
|
||||
mounts:
|
||||
%{~ for idx, partition in extra_partitions }
|
||||
|
||||
@@ -142,14 +142,13 @@ variable "k8s_nodes" {
|
||||
additional_server_groups = optional(list(string))
|
||||
server_group = optional(string)
|
||||
cloudinit = optional(object({
|
||||
extra_partitions = optional(list(object({
|
||||
extra_partitions = list(object({
|
||||
volume_path = string
|
||||
partition_path = string
|
||||
partition_start = string
|
||||
partition_end = string
|
||||
mount_path = string
|
||||
})), [])
|
||||
netplan_critical_dhcp_interface = optional(string, "")
|
||||
}))
|
||||
}))
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -140,4 +140,4 @@ terraform destroy --var-file cluster-settings.tfvars \
|
||||
* `backend_servers`: List of servers that traffic to the port should be forwarded to.
|
||||
* `server_groups`: Group servers together
|
||||
* `servers`: The servers that should be included in the group.
|
||||
* `anti_affinity_policy`: Defines if a server group is an anti-affinity group. Setting this to "strict" or yes" will result in all servers in the group being placed on separate compute hosts. The value can be "strict", "yes" or "no". "strict" refers to strict policy doesn't allow servers in the same server group to be on the same host. "yes" refers to best-effort policy and tries to put servers on different hosts, but this is not guaranteed.
|
||||
* `anti_affinity`: If anti-affinity should be enabled, try to spread the VMs out on separate nodes.
|
||||
|
||||
@@ -18,7 +18,7 @@ ssh_public_keys = [
|
||||
|
||||
# check list of available plan https://developers.upcloud.com/1.3/7-plans/
|
||||
machines = {
|
||||
"control-plane-0" : {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
# plan to use instead of custom cpu/mem
|
||||
"plan" : null,
|
||||
@@ -133,9 +133,9 @@ loadbalancers = {
|
||||
server_groups = {
|
||||
# "control-plane" = {
|
||||
# servers = [
|
||||
# "control-plane-0"
|
||||
# "master-0"
|
||||
# ]
|
||||
# anti_affinity_policy = "strict"
|
||||
# anti_affinity = true
|
||||
# },
|
||||
# "workers" = {
|
||||
# servers = [
|
||||
@@ -143,6 +143,6 @@ server_groups = {
|
||||
# "worker-1",
|
||||
# "worker-2"
|
||||
# ]
|
||||
# anti_affinity_policy = "yes"
|
||||
# anti_affinity = true
|
||||
# }
|
||||
}
|
||||
@@ -22,7 +22,7 @@ locals {
|
||||
|
||||
# If prefix is set, all resources will be prefixed with "${var.prefix}-"
|
||||
# Else don't prefix with anything
|
||||
resource-prefix = "%{if var.prefix != ""}${var.prefix}-%{endif}"
|
||||
resource-prefix = "%{ if var.prefix != ""}${var.prefix}-%{ endif }"
|
||||
}
|
||||
|
||||
resource "upcloud_network" "private" {
|
||||
@@ -38,7 +38,7 @@ resource "upcloud_network" "private" {
|
||||
|
||||
resource "upcloud_storage" "additional_disks" {
|
||||
for_each = {
|
||||
for disk in local.disks : "${disk.node_name}_${disk.disk_name}" => disk.disk
|
||||
for disk in local.disks: "${disk.node_name}_${disk.disk_name}" => disk.disk
|
||||
}
|
||||
|
||||
size = each.value.size
|
||||
@@ -165,7 +165,7 @@ resource "upcloud_firewall_rules" "master" {
|
||||
for_each = upcloud_server.master
|
||||
server_id = each.value.id
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.master_allowed_remote_ips
|
||||
|
||||
content {
|
||||
@@ -181,7 +181,7 @@ resource "upcloud_firewall_rules" "master" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = length(var.master_allowed_remote_ips) > 0 ? [1] : []
|
||||
|
||||
content {
|
||||
@@ -197,7 +197,7 @@ resource "upcloud_firewall_rules" "master" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.k8s_allowed_remote_ips
|
||||
|
||||
content {
|
||||
@@ -213,7 +213,7 @@ resource "upcloud_firewall_rules" "master" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = length(var.k8s_allowed_remote_ips) > 0 ? [1] : []
|
||||
|
||||
content {
|
||||
@@ -229,7 +229,7 @@ resource "upcloud_firewall_rules" "master" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.master_allowed_ports
|
||||
|
||||
content {
|
||||
@@ -245,7 +245,7 @@ resource "upcloud_firewall_rules" "master" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||
|
||||
content {
|
||||
@@ -261,7 +261,7 @@ resource "upcloud_firewall_rules" "master" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||
|
||||
content {
|
||||
@@ -277,7 +277,7 @@ resource "upcloud_firewall_rules" "master" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||
|
||||
content {
|
||||
@@ -293,7 +293,7 @@ resource "upcloud_firewall_rules" "master" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||
|
||||
content {
|
||||
@@ -309,7 +309,7 @@ resource "upcloud_firewall_rules" "master" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["udp"] : []
|
||||
|
||||
content {
|
||||
@@ -325,7 +325,7 @@ resource "upcloud_firewall_rules" "master" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["udp"] : []
|
||||
|
||||
content {
|
||||
@@ -354,7 +354,7 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
for_each = upcloud_server.worker
|
||||
server_id = each.value.id
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.k8s_allowed_remote_ips
|
||||
|
||||
content {
|
||||
@@ -370,7 +370,7 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = length(var.k8s_allowed_remote_ips) > 0 ? [1] : []
|
||||
|
||||
content {
|
||||
@@ -386,7 +386,7 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.worker_allowed_ports
|
||||
|
||||
content {
|
||||
@@ -402,7 +402,7 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||
|
||||
content {
|
||||
@@ -418,7 +418,7 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||
|
||||
content {
|
||||
@@ -434,7 +434,7 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||
|
||||
content {
|
||||
@@ -450,7 +450,7 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||
|
||||
content {
|
||||
@@ -466,7 +466,7 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["udp"] : []
|
||||
|
||||
content {
|
||||
@@ -482,7 +482,7 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["udp"] : []
|
||||
|
||||
content {
|
||||
@@ -535,7 +535,7 @@ resource "upcloud_loadbalancer_frontend" "lb_frontend" {
|
||||
|
||||
resource "upcloud_loadbalancer_static_backend_member" "lb_backend_member" {
|
||||
for_each = {
|
||||
for be_server in local.lb_backend_servers :
|
||||
for be_server in local.lb_backend_servers:
|
||||
"${be_server.server_name}-lb-backend-${be_server.lb_name}" => be_server
|
||||
if var.loadbalancer_enabled
|
||||
}
|
||||
@@ -552,7 +552,7 @@ resource "upcloud_loadbalancer_static_backend_member" "lb_backend_member" {
|
||||
resource "upcloud_server_group" "server_groups" {
|
||||
for_each = var.server_groups
|
||||
title = each.key
|
||||
anti_affinity_policy = each.value.anti_affinity_policy
|
||||
anti_affinity = each.value.anti_affinity
|
||||
labels = {}
|
||||
members = [for server in each.value.servers : merge(upcloud_server.master, upcloud_server.worker)[server].id]
|
||||
}
|
||||
@@ -3,8 +3,8 @@ output "master_ip" {
|
||||
value = {
|
||||
for instance in upcloud_server.master :
|
||||
instance.hostname => {
|
||||
"public_ip" : instance.network_interface[0].ip_address
|
||||
"private_ip" : instance.network_interface[1].ip_address
|
||||
"public_ip": instance.network_interface[0].ip_address
|
||||
"private_ip": instance.network_interface[1].ip_address
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -13,8 +13,8 @@ output "worker_ip" {
|
||||
value = {
|
||||
for instance in upcloud_server.worker :
|
||||
instance.hostname => {
|
||||
"public_ip" : instance.network_interface[0].ip_address
|
||||
"private_ip" : instance.network_interface[1].ip_address
|
||||
"public_ip": instance.network_interface[0].ip_address
|
||||
"private_ip": instance.network_interface[1].ip_address
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,7 +99,7 @@ variable "server_groups" {
|
||||
description = "Server groups"
|
||||
|
||||
type = map(object({
|
||||
anti_affinity_policy = string
|
||||
anti_affinity = bool
|
||||
servers = list(string)
|
||||
}))
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user