mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-13 21:34:40 +03:00
Compare commits
196 Commits
v2.27.1
...
test/flatc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
36e74a0e7b | ||
|
|
2cb3bcc3b6 | ||
|
|
d993c58880 | ||
|
|
79fbfdf271 | ||
|
|
cfaf397d4a | ||
|
|
2f404de77c | ||
|
|
d304966d75 | ||
|
|
4ce5510c1a | ||
|
|
8032b8281d | ||
|
|
45ecceb3e1 | ||
|
|
5a6ef1dafa | ||
|
|
0ae9ab36ce | ||
|
|
cf48915657 | ||
|
|
6f74ef17f7 | ||
|
|
fe2ab898b8 | ||
|
|
c8b8567781 | ||
|
|
bf86c14d35 | ||
|
|
e47eb4bc7f | ||
|
|
5222f48978 | ||
|
|
7b6b7318b2 | ||
|
|
f02d313fee | ||
|
|
7c9870d15b | ||
|
|
c8ea1468d1 | ||
|
|
ad26fe77f5 | ||
|
|
aae53a9df5 | ||
|
|
1513254622 | ||
|
|
4a5b524b98 | ||
|
|
aa0c0851f8 | ||
|
|
0fc56ed344 | ||
|
|
5c4e597987 | ||
|
|
ef133fd93d | ||
|
|
f6ca3bf477 | ||
|
|
b9e251ac7a | ||
|
|
43fceebdd3 | ||
|
|
862aec4dc6 | ||
|
|
4f3b214ef5 | ||
|
|
a4843eaf5e | ||
|
|
0f9f9fb569 | ||
|
|
e7c70d6169 | ||
|
|
cd9c21b7d6 | ||
|
|
36cd894d58 | ||
|
|
b38312d007 | ||
|
|
f3682d85d3 | ||
|
|
3ace8963b1 | ||
|
|
b551fe083d | ||
|
|
ae583e2a28 | ||
|
|
247b45bca6 | ||
|
|
c20388dbbb | ||
|
|
d5a5e6a93c | ||
|
|
bab6a9bf64 | ||
|
|
f70ace5300 | ||
|
|
c00fc9f221 | ||
|
|
5abaf8cdf4 | ||
|
|
02438442b9 | ||
|
|
03b40e71a3 | ||
|
|
e0920b33d7 | ||
|
|
56d37da105 | ||
|
|
fffc1b4ac0 | ||
|
|
c79b3ce46b | ||
|
|
0c59cc84dd | ||
|
|
16b090c5ff | ||
|
|
9e861cf816 | ||
|
|
04a8adb17a | ||
|
|
986f461ef1 | ||
|
|
96aadc3614 | ||
|
|
0efb415ec6 | ||
|
|
71ae3c78e2 | ||
|
|
315313dd10 | ||
|
|
f70c33d71a | ||
|
|
ac4c41e4e6 | ||
|
|
611f645907 | ||
|
|
e4905f1d1d | ||
|
|
acc843a5fa | ||
|
|
e62bbe0c76 | ||
|
|
d0f91adde4 | ||
|
|
a8d494fb95 | ||
|
|
119fa5b0c0 | ||
|
|
c8d75effcb | ||
|
|
aa6aa1522b | ||
|
|
e2d86c3413 | ||
|
|
7de9350c07 | ||
|
|
5d7236ea5f | ||
|
|
7c611890c3 | ||
|
|
6d4714b66e | ||
|
|
299178e587 | ||
|
|
fec663a27a | ||
|
|
f9a263090a | ||
|
|
800c84dcc9 | ||
|
|
f6ae46c9d8 | ||
|
|
8467724aab | ||
|
|
12270243f5 | ||
|
|
977e41ac5e | ||
|
|
359467b525 | ||
|
|
8f4b7f9f5d | ||
|
|
95f059d2c1 | ||
|
|
358bacf7ea | ||
|
|
0632f23a63 | ||
|
|
a665b43854 | ||
|
|
7590d95976 | ||
|
|
87f7363e46 | ||
|
|
1b2e66cd30 | ||
|
|
768fbeff0b | ||
|
|
7d21a54dc7 | ||
|
|
e0a141ab12 | ||
|
|
e332375293 | ||
|
|
a60ec1dbde | ||
|
|
31c470137f | ||
|
|
011e839f52 | ||
|
|
d7962fb46e | ||
|
|
dbb9900085 | ||
|
|
e24216bedc | ||
|
|
a51e7dd07d | ||
|
|
c557adf911 | ||
|
|
d10a2cd4c6 | ||
|
|
cfad1bd420 | ||
|
|
08b77b5350 | ||
|
|
fe0a1f4e42 | ||
|
|
624937d137 | ||
|
|
4373c1be1d | ||
|
|
59e1638ae1 | ||
|
|
6af849089e | ||
|
|
46e1fbcdd9 | ||
|
|
1567e8ee6c | ||
|
|
76c0a3aa75 | ||
|
|
e107022b4b | ||
|
|
ebcf9c3fff | ||
|
|
d23c1464c9 | ||
|
|
cbd0b7bbc3 | ||
|
|
67a73764e4 | ||
|
|
fba31beb07 | ||
|
|
775361206c | ||
|
|
12a2c5eaa8 | ||
|
|
ed789c9b97 | ||
|
|
85d9e3e2ae | ||
|
|
98cdb5348c | ||
|
|
f53552e56b | ||
|
|
277ab7339a | ||
|
|
191f71afea | ||
|
|
bfe858ba06 | ||
|
|
f8c4d5a899 | ||
|
|
9008c40d0e | ||
|
|
5a7e1be070 | ||
|
|
2a7b50a016 | ||
|
|
d2e51e777c | ||
|
|
89476b48e5 | ||
|
|
3f01d4725d | ||
|
|
a142f40e25 | ||
|
|
0e91000a04 | ||
|
|
e73c2d081c | ||
|
|
5862bff044 | ||
|
|
b548ccbe7f | ||
|
|
a5142e7dfd | ||
|
|
3930919283 | ||
|
|
b104bb7a57 | ||
|
|
bc36e9d440 | ||
|
|
d8629b8e7e | ||
|
|
c84336b48c | ||
|
|
403a73ac11 | ||
|
|
5ca23e3bfe | ||
|
|
4d3f06e69e | ||
|
|
d17bd286ea | ||
|
|
55cff4f3d3 | ||
|
|
76e07daa12 | ||
|
|
a551922c84 | ||
|
|
ba3258d7f0 | ||
|
|
9b56840d51 | ||
|
|
4351b47ebe | ||
|
|
b08c5e8b14 | ||
|
|
3527cb1916 | ||
|
|
81790cab91 | ||
|
|
9fbc566d98 | ||
|
|
ff768cc9fe | ||
|
|
ff3d9a0443 | ||
|
|
6608efb2c4 | ||
|
|
479fda6355 | ||
|
|
3a44411aa1 | ||
|
|
9334bc1fee | ||
|
|
c94daa4ff5 | ||
|
|
5be8155394 | ||
|
|
08913c4aa0 | ||
|
|
38dd224ffe | ||
|
|
24c59cee59 | ||
|
|
2be54b2bd7 | ||
|
|
ae68766015 | ||
|
|
9f58ba60f3 | ||
|
|
a6219c84c9 | ||
|
|
7941be127d | ||
|
|
c938dfa634 | ||
|
|
5a353cb04f | ||
|
|
1f186ed451 | ||
|
|
8443f370d4 | ||
|
|
1801debaea | ||
|
|
369be00960 | ||
|
|
ae1805587b | ||
|
|
55d1e4a4b5 | ||
|
|
ac9b76eb2e |
@@ -37,5 +37,9 @@ exclude_paths:
|
||||
- tests/files/custom_cni/cilium.yaml
|
||||
- venv
|
||||
- .github
|
||||
- .ansible
|
||||
- .cache
|
||||
- .gitlab-ci.yml
|
||||
- .gitlab-ci
|
||||
mock_modules:
|
||||
- gluster.gluster.gluster_volume
|
||||
|
||||
28
.github/ISSUE_TEMPLATE/bug-report.yaml
vendored
28
.github/ISSUE_TEMPLATE/bug-report.yaml
vendored
@@ -36,11 +36,35 @@ body:
|
||||
attributes:
|
||||
value: '### Environment'
|
||||
|
||||
- type: textarea
|
||||
- type: dropdown
|
||||
id: os
|
||||
attributes:
|
||||
label: OS
|
||||
placeholder: 'printf "$(uname -srm)\n$(cat /etc/os-release)\n"'
|
||||
options:
|
||||
- 'RHEL 9'
|
||||
- 'RHEL 8'
|
||||
- 'Fedora 40'
|
||||
- 'Ubuntu 24'
|
||||
- 'Ubuntu 22'
|
||||
- 'Ubuntu 20'
|
||||
- 'Debian 12'
|
||||
- 'Debian 11'
|
||||
- 'Flatcar Container Linux'
|
||||
- 'openSUSE Leap'
|
||||
- 'openSUSE Tumbleweed'
|
||||
- 'Oracle Linux 9'
|
||||
- 'Oracle Linux 8'
|
||||
- 'AlmaLinux 9'
|
||||
- 'AlmaLinux 8'
|
||||
- 'Rocky Linux 9'
|
||||
- 'Rocky Linux 8'
|
||||
- 'Amazon Linux 2'
|
||||
- 'Kylin Linux Advanced Server V10'
|
||||
- 'UOS Linux 20'
|
||||
- 'openEuler 24'
|
||||
- 'openEuler 22'
|
||||
- 'openEuler 20'
|
||||
- 'Other|Unsupported'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
|
||||
12
.github/dependabot.yml
vendored
12
.github/dependabot.yml
vendored
@@ -7,3 +7,15 @@ updates:
|
||||
labels:
|
||||
- dependencies
|
||||
- release-note-none
|
||||
groups:
|
||||
molecule:
|
||||
patterns:
|
||||
- molecule
|
||||
- molecule-plugins*
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
labels:
|
||||
- release-note-none
|
||||
- ci-short
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
32
.github/workflows/auto-label-os.yml
vendored
Normal file
32
.github/workflows/auto-label-os.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: Issue labeler
|
||||
on:
|
||||
issues:
|
||||
types: [opened]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
label-component:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
|
||||
- name: Parse issue form
|
||||
uses: stefanbuck/github-issue-parser@2ea9b35a8c584529ed00891a8f7e41dc46d0441e
|
||||
id: issue-parser
|
||||
with:
|
||||
template-path: .github/ISSUE_TEMPLATE/bug-report.yaml
|
||||
|
||||
- name: Set labels based on OS field
|
||||
uses: redhat-plumbers-in-action/advanced-issue-labeler@39087a4b30cb98d57f25f34d617a6af8163c17d9
|
||||
with:
|
||||
issue-form: ${{ steps.issue-parser.outputs.jsonString }}
|
||||
section: os
|
||||
block-list: |
|
||||
None
|
||||
Other
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
54
.github/workflows/upgrade-patch-versions-schedule.yml
vendored
Normal file
54
.github/workflows/upgrade-patch-versions-schedule.yml
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
name: Upgrade Kubespray components with new patches versions - all branches
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '22 2 * * *' # every day, 02:22 UTC
|
||||
workflow_dispatch:
|
||||
|
||||
permissions: {}
|
||||
jobs:
|
||||
get-releases-branches:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
branches: ${{ steps.get-branches.outputs.data }}
|
||||
steps:
|
||||
- uses: octokit/graphql-action@8ad880e4d437783ea2ab17010324de1075228110
|
||||
id: get-branches
|
||||
with:
|
||||
query: |
|
||||
query get_release_branches($owner:String!, $name:String!) {
|
||||
repository(owner:$owner, name:$name) {
|
||||
refs(refPrefix: "refs/heads/",
|
||||
first: 0, # TODO increment once we have release branch with the new checksums format
|
||||
query: "release-",
|
||||
orderBy: {
|
||||
field: ALPHABETICAL,
|
||||
direction: DESC
|
||||
}) {
|
||||
nodes {
|
||||
name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
variables: |
|
||||
owner: ${{ github.repository_owner }}
|
||||
name: ${{ github.event.repository.name }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
update-versions:
|
||||
needs: get-releases-branches
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
branch:
|
||||
- name: ${{ github.event.repository.default_branch }}
|
||||
- ${{ fromJSON(needs.get-releases-branches.outputs.branches).repository.refs.nodes }}
|
||||
uses: ./.github/workflows/upgrade-patch-versions.yml
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
name: Update patch updates on ${{ matrix.branch.name }}
|
||||
with:
|
||||
branch: ${{ matrix.branch.name }}
|
||||
44
.github/workflows/upgrade-patch-versions.yml
vendored
Normal file
44
.github/workflows/upgrade-patch-versions.yml
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
branch:
|
||||
description: Which branch to update with new patch versions
|
||||
default: master
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
update-patch-versions:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
ref: ${{ inputs.branch }}
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.13'
|
||||
cache: 'pip'
|
||||
- run: pip install scripts/component_hash_update pre-commit
|
||||
- run: update-hashes
|
||||
env:
|
||||
API_KEY: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
key: pre-commit-hook-propagate
|
||||
path: |
|
||||
~/.cache/pre-commit
|
||||
- run: pre-commit run --all-files propagate-ansible-variables
|
||||
continue-on-error: true
|
||||
- uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e
|
||||
with:
|
||||
commit-message: Patch versions updates
|
||||
title: Patch versions updates - ${{ inputs.branch }}
|
||||
labels: bot
|
||||
branch: component_hash_update/${{ inputs.branch }}
|
||||
sign-commits: true
|
||||
body: |
|
||||
/kind feature
|
||||
|
||||
```release-note
|
||||
NONE
|
||||
```
|
||||
@@ -6,19 +6,24 @@ stages:
|
||||
- deploy-extended
|
||||
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.26.0
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
GIT_CONFIG_COUNT: 2
|
||||
GIT_CONFIG_KEY_0: user.email
|
||||
GIT_CONFIG_VALUE_0: "ci@kubespray.io"
|
||||
GIT_CONFIG_KEY_1: user.name
|
||||
GIT_CONFIG_VALUE_1: "Kubespray CI"
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
ANSIBLE_STDOUT_CALLBACK: "debug"
|
||||
MAGIC: "ci check this"
|
||||
GS_ACCESS_KEY_ID: $GS_KEY
|
||||
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
||||
CONTAINER_ENGINE: docker
|
||||
SSH_USER: root
|
||||
GCE_PREEMPTIBLE: "false"
|
||||
ANSIBLE_KEEP_REMOTE_FILES: "1"
|
||||
ANSIBLE_CONFIG: ./tests/ansible.cfg
|
||||
ANSIBLE_REMOTE_USER: kubespray
|
||||
ANSIBLE_PRIVATE_KEY_FILE: /tmp/id_rsa
|
||||
ANSIBLE_INVENTORY: /tmp/inventory
|
||||
RESET_CHECK: "false"
|
||||
REMOVE_NODE_CHECK: "false"
|
||||
UPGRADE_TEST: "false"
|
||||
@@ -26,12 +31,12 @@ variables:
|
||||
ANSIBLE_VERBOSITY: 2
|
||||
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
|
||||
TERRAFORM_VERSION: 1.3.7
|
||||
TF_VERSION: 1.3.7
|
||||
PIPELINE_IMAGE: "$CI_REGISTRY_IMAGE/pipeline:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}"
|
||||
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- mkdir -p /.ssh
|
||||
- mkdir -p cluster-dump $ANSIBLE_INVENTORY
|
||||
|
||||
.job: &job
|
||||
tags:
|
||||
@@ -43,29 +48,17 @@ before_script:
|
||||
- cluster-dump/
|
||||
needs:
|
||||
- pipeline-image
|
||||
variables:
|
||||
ANSIBLE_STDOUT_CALLBACK: "debug"
|
||||
|
||||
.job-moderated:
|
||||
extends: .job
|
||||
needs:
|
||||
- pipeline-image
|
||||
- ci-not-authorized
|
||||
- check-galaxy-version # lint
|
||||
- pre-commit # lint
|
||||
- vagrant-validate # lint
|
||||
|
||||
.testcases: &testcases
|
||||
extends: .job-moderated
|
||||
retry: 1
|
||||
interruptible: true
|
||||
before_script:
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- ./tests/scripts/rebase.sh
|
||||
- ./tests/scripts/testcases_prepare.sh
|
||||
script:
|
||||
- ./tests/scripts/testcases_run.sh
|
||||
after_script:
|
||||
- ./tests/scripts/testcases_cleanup.sh
|
||||
|
||||
# For failfast, at least 1 job must be defined in .gitlab-ci.yml
|
||||
# Premoderated with manual actions
|
||||
ci-not-authorized:
|
||||
@@ -97,6 +90,6 @@ include:
|
||||
- .gitlab-ci/build.yml
|
||||
- .gitlab-ci/lint.yml
|
||||
- .gitlab-ci/terraform.yml
|
||||
- .gitlab-ci/packet.yml
|
||||
- .gitlab-ci/kubevirt.yml
|
||||
- .gitlab-ci/vagrant.yml
|
||||
- .gitlab-ci/molecule.yml
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
.build-container:
|
||||
pipeline-image:
|
||||
cache:
|
||||
key: $CI_COMMIT_REF_SLUG
|
||||
paths:
|
||||
@@ -11,22 +11,19 @@
|
||||
name: gcr.io/kaniko-project/executor:debug
|
||||
entrypoint: ['']
|
||||
variables:
|
||||
TAG: $CI_COMMIT_SHORT_SHA
|
||||
PROJECT_DIR: $CI_PROJECT_DIR
|
||||
DOCKERFILE: Dockerfile
|
||||
GODEBUG: "http2client=0"
|
||||
before_script:
|
||||
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"auth\":\"$(echo -n ${CI_REGISTRY_USER}:${CI_REGISTRY_PASSWORD} | base64)\"}}}" > /kaniko/.docker/config.json
|
||||
# TODO: remove the override
|
||||
# currently rebase.sh depends on bash (not available in the kaniko image)
|
||||
# once we have a simpler rebase (which should be easy if the target branch ref is available as variable
|
||||
# we'll be able to rebase here as well hopefully
|
||||
before_script: []
|
||||
script:
|
||||
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"auth\":\"$(echo -n ${CI_REGISTRY_USER}:${CI_REGISTRY_PASSWORD} | base64)\"}}}" > /kaniko/.docker/config.json
|
||||
- /kaniko/executor --cache=true
|
||||
--cache-dir=image-cache
|
||||
--context $PROJECT_DIR
|
||||
--dockerfile $PROJECT_DIR/$DOCKERFILE
|
||||
--context $CI_PROJECT_DIR
|
||||
--dockerfile $CI_PROJECT_DIR/pipeline.Dockerfile
|
||||
--label 'git-branch'=$CI_COMMIT_REF_SLUG
|
||||
--label 'git-tag=$CI_COMMIT_TAG'
|
||||
--destination $PIPELINE_IMAGE
|
||||
|
||||
pipeline-image:
|
||||
extends: .build-container
|
||||
variables:
|
||||
DOCKERFILE: pipeline.Dockerfile
|
||||
--log-timestamp=true
|
||||
|
||||
148
.gitlab-ci/kubevirt.yml
Normal file
148
.gitlab-ci/kubevirt.yml
Normal file
@@ -0,0 +1,148 @@
|
||||
---
|
||||
.kubevirt:
|
||||
extends: .job-moderated
|
||||
interruptible: true
|
||||
script:
|
||||
- ansible-playbook tests/cloud_playbooks/create-kubevirt.yml
|
||||
-c local -e @"tests/files/${TESTCASE}.yml"
|
||||
- ./tests/scripts/testcases_run.sh
|
||||
variables:
|
||||
ANSIBLE_TIMEOUT: "120"
|
||||
tags:
|
||||
- ffci
|
||||
needs:
|
||||
- pipeline-image
|
||||
- ci-not-authorized
|
||||
|
||||
# TODO: generate testcases matrixes from the files in tests/files/
|
||||
# this is needed to avoid the need for PR rebasing when a job was added or remvoed in the target branch
|
||||
# (currently, a removed job in the target branch breaks the tests, because the
|
||||
# pipeline definition is parsed by gitlab before the rebase.sh script)
|
||||
# CI template for PRs
|
||||
pr:
|
||||
stage: deploy-part1
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*ci-short.*/
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
|
||||
when: on_success
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
extends: .kubevirt
|
||||
parallel:
|
||||
matrix:
|
||||
- TESTCASE:
|
||||
- almalinux8-calico
|
||||
- almalinux9-crio
|
||||
- almalinux9-kube-ovn
|
||||
- debian11-calico-collection
|
||||
- debian11-macvlan
|
||||
- debian12-cilium
|
||||
- fedora39-kube-router
|
||||
# FIXME: this test if broken (perma-failing)
|
||||
- openeuler24-calico
|
||||
- opensuse15-6-calico
|
||||
- rockylinux8-calico
|
||||
- rockylinux9-cilium
|
||||
- ubuntu20-calico-all-in-one-hardening
|
||||
- ubuntu20-cilium-sep
|
||||
- ubuntu20-flannel-collection
|
||||
- ubuntu20-kube-router-sep
|
||||
- ubuntu20-kube-router-svc-proxy
|
||||
- ubuntu22-calico-all-in-one
|
||||
- ubuntu22-calico-all-in-one-upgrade
|
||||
- ubuntu24-calico-etcd-datastore
|
||||
- flatcar4081-calico
|
||||
|
||||
# The ubuntu20-calico-all-in-one jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
||||
ubuntu20-calico-all-in-one:
|
||||
stage: deploy-part1
|
||||
extends: .kubevirt
|
||||
variables:
|
||||
TESTCASE: ubuntu20-calico-all-in-one
|
||||
rules:
|
||||
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
|
||||
when: on_success
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
|
||||
pr_full:
|
||||
extends: .kubevirt
|
||||
stage: deploy-extended
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*ci-full.*/
|
||||
when: on_success
|
||||
# Else run as manual
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
parallel:
|
||||
matrix:
|
||||
- TESTCASE:
|
||||
- almalinux9-calico-ha-ebpf
|
||||
- almalinux9-calico-nodelocaldns-secondary
|
||||
- debian11-custom-cni
|
||||
- debian11-kubelet-csr-approver
|
||||
- debian12-custom-cni-helm
|
||||
- fedora39-calico-swap-selinux
|
||||
- fedora39-crio
|
||||
- ubuntu20-all-in-one-docker
|
||||
- ubuntu20-calico-ha-wireguard
|
||||
- ubuntu20-flannel-ha
|
||||
- ubuntu20-flannel-ha-once
|
||||
|
||||
# Need an update of the container image to use schema v2
|
||||
# update: quay.io/kubespray/vm-amazon-linux-2:latest
|
||||
manual:
|
||||
extends: pr_full
|
||||
parallel:
|
||||
matrix:
|
||||
- TESTCASE:
|
||||
- amazon-linux-2-all-in-one
|
||||
rules:
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
|
||||
pr_extended:
|
||||
extends: .kubevirt
|
||||
stage: deploy-extended
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
|
||||
when: on_success
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
parallel:
|
||||
matrix:
|
||||
- TESTCASE:
|
||||
- almalinux9-calico
|
||||
- almalinux9-calico-remove-node
|
||||
- almalinux9-docker
|
||||
- debian11-docker
|
||||
- debian12-calico
|
||||
- debian12-docker
|
||||
- opensuse15-6-docker-cilium
|
||||
- rockylinux9-calico
|
||||
- ubuntu20-calico-etcd-kubeadm
|
||||
- ubuntu20-flannel
|
||||
- ubuntu22-all-in-one-docker
|
||||
- ubuntu24-all-in-one-docker
|
||||
- ubuntu24-calico-all-in-one
|
||||
|
||||
# Enabled when PERIODIC_CI_ENABLED var is set
|
||||
periodic:
|
||||
only:
|
||||
variables:
|
||||
- $PERIODIC_CI_ENABLED
|
||||
allow_failure: true
|
||||
extends: .kubevirt
|
||||
parallel:
|
||||
matrix:
|
||||
- TESTCASE:
|
||||
- debian11-calico-upgrade
|
||||
- debian11-calico-upgrade-once
|
||||
- debian12-cilium-svc-proxy
|
||||
- fedora39-calico-selinux
|
||||
- fedora40-docker-calico
|
||||
- ubuntu20-calico-etcd-kubeadm-upgrade-ha
|
||||
- ubuntu20-calico-ha-recover
|
||||
- ubuntu20-calico-ha-recover-noquorum
|
||||
@@ -3,15 +3,16 @@ pre-commit:
|
||||
stage: test
|
||||
tags:
|
||||
- ffci
|
||||
image: 'ghcr.io/pre-commit-ci/runner-image@sha256:aaf2c7b38b22286f2d381c11673bec571c28f61dd086d11b43a1c9444a813cef'
|
||||
image: 'ghcr.io/pre-commit-ci/runner-image@sha256:fe01a6ec51b298412990b88627c3973b1146c7304f930f469bafa29ba60bcde9'
|
||||
variables:
|
||||
PRE_COMMIT_HOME: /pre-commit-cache
|
||||
PRE_COMMIT_HOME: ${CI_PROJECT_DIR}/.cache/pre-commit
|
||||
script:
|
||||
- pre-commit run --all-files
|
||||
- pre-commit run --all-files --show-diff-on-failure
|
||||
cache:
|
||||
key: pre-commit-all
|
||||
key: pre-commit-2
|
||||
paths:
|
||||
- /pre-commit-cache
|
||||
- ${PRE_COMMIT_HOME}
|
||||
when: 'always'
|
||||
needs: []
|
||||
|
||||
vagrant-validate:
|
||||
@@ -23,13 +24,3 @@ vagrant-validate:
|
||||
script:
|
||||
- ./tests/scripts/vagrant-validate.sh
|
||||
except: ['triggers', 'master']
|
||||
|
||||
|
||||
# TODO: convert to pre-commit hook
|
||||
check-galaxy-version:
|
||||
needs: []
|
||||
stage: test
|
||||
tags: [ffci]
|
||||
image: python:3
|
||||
script:
|
||||
- tests/scripts/check_galaxy_version.sh
|
||||
|
||||
@@ -1,29 +1,13 @@
|
||||
---
|
||||
.molecule:
|
||||
tags: [ffci-vm-med]
|
||||
tags: [ffci]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: quay.io/kubespray/vm-kubespray-ci:v13
|
||||
services: []
|
||||
stage: deploy-part1
|
||||
needs: []
|
||||
image: $PIPELINE_IMAGE
|
||||
needs:
|
||||
- pipeline-image
|
||||
# - ci-not-authorized
|
||||
variables:
|
||||
VAGRANT_DEFAULT_PROVIDER: "libvirt"
|
||||
VAGRANT_HOME: "$CI_PROJECT_DIR/.vagrant.d"
|
||||
PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip"
|
||||
before_script:
|
||||
- mkdir -p $VAGRANT_HOME
|
||||
- groups
|
||||
- python3 -m venv citest
|
||||
- source citest/bin/activate
|
||||
- vagrant plugin expunge --reinstall --force --no-tty
|
||||
- vagrant plugin install vagrant-libvirt
|
||||
- pip install --no-compile --no-cache-dir pip -U
|
||||
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/requirements.txt
|
||||
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/tests/requirements.txt
|
||||
- ./tests/scripts/rebase.sh
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh
|
||||
after_script:
|
||||
@@ -32,72 +16,39 @@
|
||||
when: always
|
||||
paths:
|
||||
- molecule_logs/
|
||||
cache:
|
||||
key: $CI_JOB_NAME_SLUG
|
||||
paths:
|
||||
- .vagrant.d/boxes
|
||||
- .cache/pip
|
||||
policy: pull-push # TODO: change to "pull" when not on main
|
||||
|
||||
molecule:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i $ROLE
|
||||
parallel:
|
||||
matrix:
|
||||
- ROLE:
|
||||
- container-engine/cri-dockerd
|
||||
- container-engine/containerd
|
||||
- container-engine/cri-o
|
||||
- adduser
|
||||
- bastion-ssh-config
|
||||
- bootstrap-os
|
||||
|
||||
# CI template for periodic CI jobs
|
||||
# Enabled when PERIODIC_CI_ENABLED var is set
|
||||
.molecule_periodic:
|
||||
molecule_full:
|
||||
only:
|
||||
variables:
|
||||
- $PERIODIC_CI_ENABLED
|
||||
allow_failure: true
|
||||
extends: .molecule
|
||||
|
||||
molecule_full:
|
||||
extends: .molecule_periodic
|
||||
|
||||
molecule_no_container_engines:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -e container-engine
|
||||
when: on_success
|
||||
|
||||
molecule_docker:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
|
||||
when: on_success
|
||||
|
||||
molecule_containerd:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/containerd
|
||||
when: on_success
|
||||
|
||||
molecule_cri-o:
|
||||
extends: .molecule
|
||||
stage: deploy-part1
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
||||
allow_failure: true
|
||||
when: on_success
|
||||
|
||||
# # Stage 3 container engines don't get as much attention so allow them to fail
|
||||
# molecule_kata:
|
||||
# extends: .molecule
|
||||
# stage: deploy-extended
|
||||
# script:
|
||||
# - ./tests/scripts/molecule_run.sh -i container-engine/kata-containers
|
||||
# when: manual
|
||||
# # FIXME: this test is broken (perma-failing)
|
||||
|
||||
molecule_gvisor:
|
||||
extends: .molecule
|
||||
stage: deploy-extended
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/gvisor
|
||||
when: manual
|
||||
# FIXME: this test is broken (perma-failing)
|
||||
|
||||
molecule_youki:
|
||||
extends: .molecule
|
||||
stage: deploy-extended
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/youki
|
||||
when: manual
|
||||
# FIXME: this test is broken (perma-failing)
|
||||
extends: molecule
|
||||
parallel:
|
||||
matrix:
|
||||
- ROLE:
|
||||
- container-engine/cri-dockerd
|
||||
- container-engine/containerd
|
||||
- container-engine/cri-o
|
||||
- adduser
|
||||
- bastion-ssh-config
|
||||
- bootstrap-os
|
||||
# FIXME : tests below are perma-failing
|
||||
- container-engine/kata-containers
|
||||
- container-engine/gvisor
|
||||
- container-engine/youki
|
||||
|
||||
@@ -1,246 +0,0 @@
|
||||
---
|
||||
.packet:
|
||||
extends: .testcases
|
||||
variables:
|
||||
ANSIBLE_TIMEOUT: "120"
|
||||
CI_PLATFORM: packet
|
||||
SSH_USER: kubespray
|
||||
tags:
|
||||
- ffci
|
||||
needs:
|
||||
- pipeline-image
|
||||
- ci-not-authorized
|
||||
|
||||
# CI template for PRs
|
||||
.packet_pr:
|
||||
stage: deploy-part1
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*ci-short.*/
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
|
||||
when: on_success
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
extends: .packet
|
||||
|
||||
## Uncomment this to have multiple stages
|
||||
# needs:
|
||||
# - packet_ubuntu20-calico-all-in-one
|
||||
|
||||
.packet_pr_short:
|
||||
stage: deploy-part1
|
||||
extends: .packet
|
||||
rules:
|
||||
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
|
||||
when: on_success
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
|
||||
.packet_pr_manual:
|
||||
extends: .packet_pr
|
||||
stage: deploy-extended
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*ci-full.*/
|
||||
when: on_success
|
||||
# Else run as manual
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
|
||||
.packet_pr_extended:
|
||||
extends: .packet_pr
|
||||
stage: deploy-extended
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
|
||||
when: on_success
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
|
||||
# CI template for periodic CI jobs
|
||||
# Enabled when PERIODIC_CI_ENABLED var is set
|
||||
.packet_periodic:
|
||||
only:
|
||||
variables:
|
||||
- $PERIODIC_CI_ENABLED
|
||||
allow_failure: true
|
||||
extends: .packet
|
||||
|
||||
# The ubuntu20-calico-all-in-one jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
||||
packet_ubuntu20-calico-all-in-one:
|
||||
stage: deploy-part1
|
||||
extends: .packet_pr_short
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
# ### PR JOBS PART2
|
||||
|
||||
packet_ubuntu20-crio:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_ubuntu22-calico-all-in-one:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_ubuntu22-calico-all-in-one-upgrade:
|
||||
extends: .packet_pr
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
|
||||
packet_ubuntu24-calico-etcd-datastore:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_almalinux8-crio:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_almalinux8-kube-ovn:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_debian11-calico-collection:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_debian11-macvlan:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_debian12-cilium:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_rockylinux8-calico:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_rockylinux9-cilium:
|
||||
extends: .packet_pr
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_amazon-linux-2-all-in-one:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_opensuse-docker-cilium:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_ubuntu20-cilium-sep:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_openeuler24-calico:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_ubuntu20-calico-all-in-one-hardening:
|
||||
extends: .packet_pr
|
||||
|
||||
## Extended
|
||||
packet_debian11-docker:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_debian12-docker:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_debian12-calico:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_almalinux8-calico-remove-node:
|
||||
extends: .packet_pr_extended
|
||||
variables:
|
||||
REMOVE_NODE_CHECK: "true"
|
||||
REMOVE_NODE_NAME: "instance-3"
|
||||
|
||||
packet_rockylinux9-calico:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_almalinux8-calico:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_almalinux8-docker:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_ubuntu24-calico-all-in-one:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_ubuntu20-calico-etcd-kubeadm:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_ubuntu24-all-in-one-docker:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_ubuntu22-all-in-one-docker:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
# ### MANUAL JOBS
|
||||
packet_fedora39-crio:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_ubuntu20-flannel-ha:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_ubuntu20-all-in-one-docker:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_ubuntu20-flannel-ha-once:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_fedora39-calico-swap-selinux:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_almalinux8-calico-ha-ebpf:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_almalinux8-calico-nodelocaldns-secondary:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_debian11-custom-cni:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_debian11-kubelet-csr-approver:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_debian12-custom-cni-helm:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_ubuntu20-calico-ha-wireguard:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
# PERIODIC
|
||||
packet_fedora40-docker-calico:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_fedora39-calico-selinux:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
|
||||
packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
variables:
|
||||
UPGRADE_TEST: basic
|
||||
|
||||
|
||||
packet_debian11-calico-upgrade-once:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
|
||||
packet_ubuntu20-calico-ha-recover:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
|
||||
|
||||
packet_ubuntu20-calico-ha-recover-noquorum:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:]:kube_control_plane[1:]"
|
||||
|
||||
packet_debian11-calico-upgrade:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
|
||||
packet_debian12-cilium-svc-proxy:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
@@ -1,17 +0,0 @@
|
||||
---
|
||||
# stub pipeline for dynamic generation
|
||||
pre-commit:
|
||||
tags:
|
||||
- light
|
||||
image: 'ghcr.io/pre-commit-ci/runner-image@sha256:aaf2c7b38b22286f2d381c11673bec571c28f61dd086d11b43a1c9444a813cef'
|
||||
variables:
|
||||
PRE_COMMIT_HOME: /pre-commit-cache
|
||||
script:
|
||||
- pre-commit run --all-files
|
||||
cache:
|
||||
key: pre-commit-$HOOK_ID
|
||||
paths:
|
||||
- /pre-commit-cache
|
||||
parallel:
|
||||
matrix:
|
||||
- HOOK_ID:
|
||||
@@ -5,28 +5,21 @@
|
||||
needs:
|
||||
- ci-not-authorized
|
||||
- pipeline-image
|
||||
variables:
|
||||
TF_VAR_public_key_path: "${ANSIBLE_PRIVATE_KEY_FILE}.pub"
|
||||
TF_VAR_ssh_private_key_path: $ANSIBLE_PRIVATE_KEY_FILE
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
TERRAFORM_STATE_ROOT: $CI_PROJECT_DIR
|
||||
stage: deploy-part1
|
||||
before_script:
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- ./tests/scripts/rebase.sh
|
||||
- ./tests/scripts/testcases_prepare.sh
|
||||
- mkdir -p cluster-dump $ANSIBLE_INVENTORY
|
||||
- ./tests/scripts/terraform_install.sh
|
||||
# Set Ansible config
|
||||
- cp ansible.cfg ~/.ansible.cfg
|
||||
# Prepare inventory
|
||||
- cp contrib/terraform/$PROVIDER/sample-inventory/cluster.tfvars .
|
||||
- ln -s contrib/terraform/$PROVIDER/hosts
|
||||
- ln -rs -t $ANSIBLE_INVENTORY contrib/terraform/$PROVIDER/hosts
|
||||
- terraform -chdir="contrib/terraform/$PROVIDER" init
|
||||
# Copy SSH keypair
|
||||
- mkdir -p ~/.ssh
|
||||
- echo "$PACKET_PRIVATE_KEY" | base64 -d > ~/.ssh/id_rsa
|
||||
- chmod 400 ~/.ssh/id_rsa
|
||||
- echo "$PACKET_PUBLIC_KEY" | base64 -d > ~/.ssh/id_rsa.pub
|
||||
- mkdir -p contrib/terraform/$PROVIDER/group_vars
|
||||
# Random subnet to avoid routing conflicts
|
||||
- export TF_VAR_subnet_cidr="10.$(( $RANDOM % 256 )).$(( $RANDOM % 256 )).0/24"
|
||||
|
||||
.terraform_validate:
|
||||
terraform_validate:
|
||||
extends: .terraform_install
|
||||
tags: [ffci]
|
||||
only: ['master', /^pr-.*$/]
|
||||
@@ -36,6 +29,17 @@
|
||||
stage: test
|
||||
needs:
|
||||
- pipeline-image
|
||||
parallel:
|
||||
matrix:
|
||||
- PROVIDER:
|
||||
- openstack
|
||||
- equinix
|
||||
- aws
|
||||
- exoscale
|
||||
- hetzner
|
||||
- vsphere
|
||||
- upcloud
|
||||
- nifcloud
|
||||
|
||||
.terraform_apply:
|
||||
extends: .terraform_install
|
||||
@@ -43,99 +47,24 @@
|
||||
stage: deploy-extended
|
||||
when: manual
|
||||
only: [/^pr-.*$/]
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- cluster-dump/
|
||||
variables:
|
||||
ANSIBLE_INVENTORY_UNPARSED_FAILED: "true"
|
||||
ANSIBLE_INVENTORY: hosts
|
||||
CI_PLATFORM: tf
|
||||
TF_VAR_ssh_user: $SSH_USER
|
||||
ANSIBLE_REMOTE_USER: ubuntu # the openstack terraform module does not handle custom user correctly
|
||||
ANSIBLE_SSH_RETRIES: 15
|
||||
TF_VAR_ssh_user: $ANSIBLE_REMOTE_USER
|
||||
TF_VAR_cluster_name: $CI_JOB_ID
|
||||
script:
|
||||
# Set Ansible config
|
||||
- cp ansible.cfg ~/.ansible.cfg
|
||||
- ssh-keygen -N '' -f $ANSIBLE_PRIVATE_KEY_FILE -t rsa
|
||||
- mkdir -p contrib/terraform/$PROVIDER/group_vars
|
||||
# Random subnet to avoid routing conflicts
|
||||
- export TF_VAR_subnet_cidr="10.$(( $RANDOM % 256 )).$(( $RANDOM % 256 )).0/24"
|
||||
- terraform -chdir="contrib/terraform/$PROVIDER" apply -auto-approve -parallelism=1
|
||||
- tests/scripts/testcases_run.sh
|
||||
after_script:
|
||||
# Cleanup regardless of exit code
|
||||
- ./tests/scripts/testcases_cleanup.sh
|
||||
|
||||
tf-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-equinix:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: equinix
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-aws:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: aws
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-exoscale:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: exoscale
|
||||
|
||||
tf-validate-hetzner:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: hetzner
|
||||
|
||||
tf-validate-vsphere:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: vsphere
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-upcloud:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: upcloud
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-nifcloud:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: nifcloud
|
||||
|
||||
# tf-packet-ubuntu20-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
# TF_VAR_number_of_k8s_nodes: "1"
|
||||
# TF_VAR_plan_k8s_masters: t1.small.x86
|
||||
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||
# TF_VAR_metro: am
|
||||
# TF_VAR_public_key_path: ""
|
||||
# TF_VAR_operating_system: ubuntu_20_04
|
||||
|
||||
.ovh_variables: &ovh_variables
|
||||
OS_AUTH_URL: https://auth.cloud.ovh.net/v3
|
||||
OS_PROJECT_ID: 8d3cd5d737d74227ace462dee0b903fe
|
||||
OS_PROJECT_NAME: "9361447987648822"
|
||||
OS_USER_DOMAIN_NAME: Default
|
||||
OS_PROJECT_DOMAIN_ID: default
|
||||
OS_USERNAME: 8XuhBMfkKVrk
|
||||
OS_REGION_NAME: UK1
|
||||
OS_INTERFACE: public
|
||||
OS_IDENTITY_API_VERSION: "3"
|
||||
- terraform -chdir="contrib/terraform/$PROVIDER" destroy -auto-approve
|
||||
|
||||
# Elastx is generously donating resources for Kubespray on Openstack CI
|
||||
# Contacts: @gix @bl0m1
|
||||
@@ -169,11 +98,8 @@ tf-elastx_ubuntu20-calico:
|
||||
allow_failure: true
|
||||
variables:
|
||||
<<: *elastx_variables
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
ANSIBLE_TIMEOUT: "60"
|
||||
SSH_USER: ubuntu
|
||||
TF_VAR_number_of_k8s_masters: "1"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip: "0"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
@@ -194,46 +120,3 @@ tf-elastx_ubuntu20-calico:
|
||||
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||
TF_VAR_image: ubuntu-20.04-server-latest
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
||||
# OVH voucher expired, commenting job until things are sorted out
|
||||
|
||||
# tf-ovh_cleanup:
|
||||
# stage: unit-tests
|
||||
# tags: [light]
|
||||
# image: python
|
||||
# environment: ovh
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# before_script:
|
||||
# - pip install -r scripts/openstack-cleanup/requirements.txt
|
||||
# script:
|
||||
# - ./scripts/openstack-cleanup/main.py
|
||||
|
||||
# tf-ovh_ubuntu20-calico:
|
||||
# extends: .terraform_apply
|
||||
# when: on_success
|
||||
# environment: ovh
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# PROVIDER: openstack
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# ANSIBLE_TIMEOUT: "60"
|
||||
# SSH_USER: ubuntu
|
||||
# TF_VAR_number_of_k8s_masters: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
# TF_VAR_number_of_etcd: "0"
|
||||
# TF_VAR_number_of_k8s_nodes: "0"
|
||||
# TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||
# TF_VAR_number_of_bastions: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||
# TF_VAR_use_neutron: "0"
|
||||
# TF_VAR_floatingip_pool: "Ext-Net"
|
||||
# TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||
# TF_VAR_network_name: "Ext-Net"
|
||||
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_image: "Ubuntu 20.04"
|
||||
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
.vagrant:
|
||||
extends: .testcases
|
||||
vagrant:
|
||||
extends: .job-moderated
|
||||
needs:
|
||||
- ci-not-authorized
|
||||
variables:
|
||||
CI_PLATFORM: "vagrant"
|
||||
SSH_USER: "vagrant"
|
||||
VAGRANT_DEFAULT_PROVIDER: "libvirt"
|
||||
KUBESPRAY_VAGRANT_CONFIG: tests/files/${CI_JOB_NAME}.rb
|
||||
KUBESPRAY_VAGRANT_CONFIG: tests/files/${TESTCASE}.rb
|
||||
DOCKER_NAME: vagrant
|
||||
VAGRANT_ANSIBLE_TAGS: facts
|
||||
VAGRANT_HOME: "$CI_PROJECT_DIR/.vagrant.d"
|
||||
@@ -28,44 +28,22 @@
|
||||
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- vagrant up
|
||||
- ./tests/scripts/testcases_run.sh
|
||||
after_script:
|
||||
- vagrant destroy -f
|
||||
cache:
|
||||
key: $CI_JOB_NAME_SLUG
|
||||
paths:
|
||||
- .vagrant.d/boxes
|
||||
- .cache/pip
|
||||
policy: pull-push # TODO: change to "pull" when not on main
|
||||
|
||||
vagrant_ubuntu20-calico-dual-stack:
|
||||
stage: deploy-extended
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
# FIXME: this test if broken (perma-failing)
|
||||
|
||||
vagrant_ubuntu20-flannel:
|
||||
stage: deploy-part1
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
allow_failure: false
|
||||
|
||||
vagrant_ubuntu20-flannel-collection:
|
||||
stage: deploy-extended
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
vagrant_ubuntu20-kube-router-sep:
|
||||
stage: deploy-extended
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
# Service proxy test fails connectivity testing
|
||||
vagrant_ubuntu20-kube-router-svc-proxy:
|
||||
stage: deploy-extended
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
vagrant_fedora39-kube-router:
|
||||
stage: deploy-extended
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
# FIXME: this test if broken (perma-failing)
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
|
||||
when: on_success
|
||||
parallel:
|
||||
matrix:
|
||||
- TESTCASE:
|
||||
- ubuntu24-calico-dual-stack
|
||||
- ubuntu24-calico-ipv6only-stack
|
||||
|
||||
@@ -20,12 +20,6 @@ repos:
|
||||
- id: yamllint
|
||||
args: [--strict]
|
||||
|
||||
- repo: https://github.com/markdownlint/markdownlint
|
||||
rev: v0.12.0
|
||||
hooks:
|
||||
- id: markdownlint
|
||||
exclude: "^.github|(^docs/_sidebar\\.md$)"
|
||||
|
||||
- repo: https://github.com/shellcheck-py/shellcheck-py
|
||||
rev: v0.10.0.1
|
||||
hooks:
|
||||
@@ -35,7 +29,7 @@ repos:
|
||||
files: "\\.sh$"
|
||||
|
||||
- repo: https://github.com/ansible/ansible-lint
|
||||
rev: v24.12.2
|
||||
rev: v25.1.1
|
||||
hooks:
|
||||
- id: ansible-lint
|
||||
additional_dependencies:
|
||||
@@ -51,12 +45,6 @@ repos:
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: check-readme-versions
|
||||
name: check-readme-versions
|
||||
entry: tests/scripts/check_readme_versions.sh
|
||||
language: script
|
||||
pass_filenames: false
|
||||
|
||||
- id: collection-build-install
|
||||
name: Build and install kubernetes-sigs.kubespray Ansible collection
|
||||
language: python
|
||||
@@ -82,6 +70,14 @@ repos:
|
||||
- pathlib
|
||||
- pyaml
|
||||
|
||||
- id: check-galaxy-version
|
||||
name: Verify correct version for galaxy.yml
|
||||
entry: scripts/galaxy_version.py
|
||||
language: python
|
||||
pass_filenames: false
|
||||
additional_dependencies:
|
||||
- ruamel.yaml
|
||||
|
||||
- id: jinja-syntax-check
|
||||
name: jinja-syntax-check
|
||||
entry: tests/scripts/check-templates.py
|
||||
@@ -90,3 +86,25 @@ repos:
|
||||
- jinja
|
||||
additional_dependencies:
|
||||
- jinja2
|
||||
|
||||
- id: propagate-ansible-variables
|
||||
name: Update static files referencing default kubespray values
|
||||
language: python
|
||||
additional_dependencies:
|
||||
- ansible-core>=2.16.4
|
||||
entry: scripts/propagate_ansible_variables.yml
|
||||
pass_filenames: false
|
||||
|
||||
- id: check-checksums-sorted
|
||||
name: Check that our checksums are correctly sorted by version
|
||||
entry: scripts/assert-sorted-checksums.yml
|
||||
language: python
|
||||
pass_filenames: false
|
||||
additional_dependencies:
|
||||
- ansible
|
||||
|
||||
- repo: https://github.com/markdownlint/markdownlint
|
||||
rev: v0.12.0
|
||||
hooks:
|
||||
- id: markdownlint
|
||||
exclude: "^.github|(^docs/_sidebar\\.md$)"
|
||||
|
||||
@@ -34,11 +34,9 @@ RUN --mount=type=bind,source=requirements.txt,target=requirements.txt \
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
RUN --mount=type=bind,source=roles/kubespray-defaults/defaults/main/main.yml,target=roles/kubespray-defaults/defaults/main/main.yml \
|
||||
KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main/main.yml) \
|
||||
OS_ARCHITECTURE=$(dpkg --print-architecture) \
|
||||
&& curl -L "https://dl.k8s.io/release/${KUBE_VERSION}/bin/linux/${OS_ARCHITECTURE}/kubectl" -o /usr/local/bin/kubectl \
|
||||
&& echo "$(curl -L "https://dl.k8s.io/release/${KUBE_VERSION}/bin/linux/${OS_ARCHITECTURE}/kubectl.sha256")" /usr/local/bin/kubectl | sha256sum --check \
|
||||
RUN OS_ARCHITECTURE=$(dpkg --print-architecture) \
|
||||
&& curl -L "https://dl.k8s.io/release/v1.32.3/bin/linux/${OS_ARCHITECTURE}/kubectl" -o /usr/local/bin/kubectl \
|
||||
&& echo "$(curl -L "https://dl.k8s.io/release/v1.32.3/bin/linux/${OS_ARCHITECTURE}/kubectl.sha256")" /usr/local/bin/kubectl | sha256sum --check \
|
||||
&& chmod a+x /usr/local/bin/kubectl
|
||||
|
||||
COPY *.yml ./
|
||||
|
||||
98
README.md
98
README.md
@@ -15,6 +15,18 @@ You can get your invite [here](http://slack.k8s.io/)
|
||||
|
||||
Below are several ways to use Kubespray to deploy a Kubernetes cluster.
|
||||
|
||||
### Docker
|
||||
|
||||
Ensure you have installed Docker then
|
||||
|
||||
```ShellSession
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.27.0 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
|
||||
### Ansible
|
||||
|
||||
#### Usage
|
||||
@@ -77,57 +89,61 @@ vagrant up
|
||||
- **Flatcar Container Linux by Kinvolk**
|
||||
- **Debian** Bookworm, Bullseye
|
||||
- **Ubuntu** 20.04, 22.04, 24.04
|
||||
- **CentOS/RHEL** [8, 9](docs/operating_systems/centos.md#centos-8)
|
||||
- **CentOS/RHEL** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **Fedora** 39, 40
|
||||
- **Fedora CoreOS** (see [fcos Note](docs/operating_systems/fcos.md))
|
||||
- **openSUSE** Leap 15.x/Tumbleweed
|
||||
- **Oracle Linux** [8, 9](docs/operating_systems/centos.md#centos-8)
|
||||
- **Alma Linux** [8, 9](docs/operating_systems/centos.md#centos-8)
|
||||
- **Rocky Linux** [8, 9](docs/operating_systems/centos.md#centos-8)
|
||||
- **Oracle Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **Alma Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **Rocky Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/operating_systems/kylinlinux.md))
|
||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/operating_systems/amazonlinux.md))
|
||||
- **UOS Linux** (experimental: see [uos linux notes](docs/operating_systems/uoslinux.md))
|
||||
- **openEuler** (experimental: see [openEuler notes](docs/operating_systems/openeuler.md))
|
||||
|
||||
Note: Upstart/SysV init based OS types are not supported.
|
||||
Note:
|
||||
|
||||
- Upstart/SysV init based OS types are not supported.
|
||||
- [Kernel requirements](docs/operations/kernel-requirements.md) (please read if the OS kernel version is < 4.19).
|
||||
|
||||
## Supported Components
|
||||
|
||||
<!-- BEGIN ANSIBLE MANAGED BLOCK -->
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.31.4
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.16
|
||||
- [docker](https://www.docker.com/) v26.1
|
||||
- [containerd](https://containerd.io/) v1.7.24
|
||||
- [cri-o](http://cri-o.io/) v1.31.0 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) 1.32.3
|
||||
- [etcd](https://github.com/etcd-io/etcd) 3.5.16
|
||||
- [docker](https://www.docker.com/) 28.0
|
||||
- [containerd](https://containerd.io/) 2.0.3
|
||||
- [cri-o](http://cri-o.io/) 1.32.0 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.2.0
|
||||
- [calico](https://github.com/projectcalico/calico) v3.29.1
|
||||
- [cilium](https://github.com/cilium/cilium) v1.15.9
|
||||
- [flannel](https://github.com/flannel-io/flannel) v0.22.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.12.21
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v2.0.0
|
||||
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) v3.8
|
||||
- [weave](https://github.com/rajch/weave) v2.8.7
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.8.0
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) 1.4.1
|
||||
- [calico](https://github.com/projectcalico/calico) 3.29.2
|
||||
- [cilium](https://github.com/cilium/cilium) 1.15.9
|
||||
- [flannel](https://github.com/flannel-io/flannel) 0.22.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) 1.12.21
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) 2.1.1
|
||||
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) 4.1.0
|
||||
- [weave](https://github.com/rajch/weave) 2.8.7
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) 0.8.0
|
||||
- Application
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.15.3
|
||||
- [coredns](https://github.com/coredns/coredns) v1.11.3
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.12.0
|
||||
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.4
|
||||
- [argocd](https://argoproj.github.io/) v2.11.0
|
||||
- [helm](https://helm.sh/) v3.16.4
|
||||
- [metallb](https://metallb.universe.tf/) v0.13.9
|
||||
- [registry](https://github.com/distribution/distribution) v2.8.1
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) 1.15.3
|
||||
- [coredns](https://github.com/coredns/coredns) 1.11.3
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) 1.12.1
|
||||
- [argocd](https://argoproj.github.io/) 2.14.5
|
||||
- [helm](https://helm.sh/) 3.16.4
|
||||
- [metallb](https://metallb.universe.tf/) 0.13.9
|
||||
- [registry](https://github.com/distribution/distribution) 2.8.1
|
||||
- Storage Plugin
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
|
||||
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
|
||||
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.30.0
|
||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.9.2
|
||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.24
|
||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
|
||||
- [node-feature-discovery](https://github.com/kubernetes-sigs/node-feature-discovery) v0.16.4
|
||||
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) 0.5.0
|
||||
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) 1.10.0
|
||||
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) 1.30.0
|
||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) 1.9.2
|
||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) 0.0.24
|
||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) 2.5.0
|
||||
- [node-feature-discovery](https://github.com/kubernetes-sigs/node-feature-discovery) 0.16.4
|
||||
|
||||
<!-- END ANSIBLE MANAGED BLOCK -->
|
||||
|
||||
## Container Runtime Notes
|
||||
|
||||
@@ -135,7 +151,7 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Minimum required version of Kubernetes is v1.29**
|
||||
- **Minimum required version of Kubernetes is v1.30**
|
||||
- **Ansible v2.14+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/operations/offline-environment.md))
|
||||
- The target servers are configured to allow **IPv4 forwarding**.
|
||||
@@ -149,10 +165,10 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
Hardware:
|
||||
These limits are safeguarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide.
|
||||
|
||||
- Master
|
||||
- Memory: 1500 MB
|
||||
- Node
|
||||
- Memory: 1024 MB
|
||||
- Control Plane
|
||||
- Memory: 2 GB
|
||||
- Worker Node
|
||||
- Memory: 1 GB
|
||||
|
||||
## Network Plugins
|
||||
|
||||
|
||||
@@ -12,7 +12,6 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
|
||||
1. (For major releases) On the `master` branch: bump the version in `galaxy.yml` to the next expected major release (X.y.0 with y = Y + 1), make a Pull Request.
|
||||
1. (For minor releases) On the `release-X.Y` branch: bump the version in `galaxy.yml` to the next expected minor release (X.Y.z with z = Z + 1), make a Pull Request.
|
||||
1. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) container images are built and tagged. See the following `Container image creation` section for the details.
|
||||
1. (Only for major releases) The `KUBESPRAY_VERSION` in `.gitlab-ci.yml` is upgraded to the version we just released # TODO clarify this, this variable is for testing upgrades.
|
||||
1. The release issue is closed
|
||||
1. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
1. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||
@@ -46,7 +45,7 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
|
||||
|
||||
* Minor releases can change components' versions, but not the major `kube_version`.
|
||||
Greater `kube_version` requires a new major or minor release. For example, if Kubespray v2.0.0
|
||||
is bound to `kube_version: 1.4.x`, `calico_version: 0.22.0`, `etcd_version: v3.0.6`,
|
||||
is bound to `kube_version: 1.4.x`, `calico_version: 0.22.0`, `etcd_version: 3.0.6`,
|
||||
then Kubespray v2.1.0 may be bound to only minor changes to `kube_version`, like v1.5.1
|
||||
and *any* changes to other components, like etcd v4, or calico 1.2.3.
|
||||
And Kubespray v3.x.x shall be bound to `kube_version: 2.x.x` respectively.
|
||||
|
||||
34
Vagrantfile
vendored
34
Vagrantfile
vendored
@@ -26,13 +26,14 @@ SUPPORTED_OS = {
|
||||
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
||||
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
||||
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
||||
"almalinux9" => {box: "almalinux/9", user: "vagrant"},
|
||||
"rockylinux8" => {box: "rockylinux/8", user: "vagrant"},
|
||||
"rockylinux9" => {box: "rockylinux/9", user: "vagrant"},
|
||||
"fedora39" => {box: "fedora/39-cloud-base", user: "vagrant"},
|
||||
"fedora40" => {box: "fedora/40-cloud-base", user: "vagrant"},
|
||||
"fedora39-arm64" => {box: "bento/fedora-39-arm64", user: "vagrant"},
|
||||
"fedora40-arm64" => {box: "bento/fedora-40", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.6.x86_64", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
||||
@@ -57,18 +58,27 @@ $subnet ||= "172.18.8"
|
||||
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
|
||||
$os ||= "ubuntu2004"
|
||||
$network_plugin ||= "flannel"
|
||||
$inventory ||= "inventory/sample"
|
||||
$inventories ||= [$inventory]
|
||||
$inventories ||= []
|
||||
# Setting multi_networking to true will install Multus: https://github.com/k8snetworkplumbingwg/multus-cni
|
||||
$multi_networking ||= "False"
|
||||
$download_run_once ||= "True"
|
||||
$download_force_cache ||= "False"
|
||||
# Modify those to have separate groups (for instance, to test separate etcd:)
|
||||
# first_control_plane = 1
|
||||
# first_etcd = 4
|
||||
# control_plane_instances = 3
|
||||
# etcd_instances = 3
|
||||
$first_node ||= 1
|
||||
$first_control_plane ||= 1
|
||||
$first_etcd ||= 1
|
||||
|
||||
# The first three nodes are etcd servers
|
||||
$etcd_instances ||= [$num_instances, 3].min
|
||||
# The first two nodes are kube masters
|
||||
$kube_master_instances ||= [$num_instances, 2].min
|
||||
$control_plane_instances ||= [$num_instances, 2].min
|
||||
# All nodes are kube nodes
|
||||
$kube_node_instances ||= $num_instances
|
||||
$kube_node_instances ||= $num_instances - $first_node + 1
|
||||
|
||||
# The following only works when using the libvirt provider
|
||||
$kube_node_instances_with_disks ||= false
|
||||
$kube_node_instances_with_disks_size ||= "20G"
|
||||
@@ -210,14 +220,20 @@ Vagrant.configure("2") do |config|
|
||||
end
|
||||
|
||||
ip = "#{$subnet}.#{i+100}"
|
||||
ip6 = "#{$subnet_ipv6}::#{i+100}"
|
||||
node.vm.network :private_network,
|
||||
:ip => ip,
|
||||
:libvirt__guest_ipv6 => 'yes',
|
||||
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
|
||||
:libvirt__ipv6_address => ip6,
|
||||
:libvirt__ipv6_prefix => "64",
|
||||
:libvirt__forward_mode => "none",
|
||||
:libvirt__dhcp_enabled => false
|
||||
|
||||
# libvirt__ipv6_address does not work as intended, the address is obtained with the desired prefix, but auto-generated(like fd3c:b398:698:756:5054:ff:fe48:c61e/64)
|
||||
# add default route for detect ansible_default_ipv6
|
||||
# TODO: fix libvirt__ipv6 or use $subnet in shell
|
||||
config.vm.provision "shell", inline: "ip -6 r a fd3c:b398:698:756::/64 dev eth1;ip -6 r add default via fd3c:b398:0698:0756::1 dev eth1 || true"
|
||||
|
||||
# Disable swap for each vm
|
||||
node.vm.provision "shell", inline: "swapoff -a"
|
||||
|
||||
@@ -291,9 +307,9 @@ Vagrant.configure("2") do |config|
|
||||
ansible.tags = [$ansible_tags]
|
||||
end
|
||||
ansible.groups = {
|
||||
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
|
||||
"kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
||||
"kube_node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
|
||||
"etcd" => ["#{$instance_name_prefix}-[#{$first_etcd}:#{$etcd_instances + $first_etcd - 1}]"],
|
||||
"kube_control_plane" => ["#{$instance_name_prefix}-[#{$first_control_plane}:#{$control_plane_instances + $first_control_plane - 1}]"],
|
||||
"kube_node" => ["#{$instance_name_prefix}-[#{$first_node}:#{$kube_node_instances + $first_node - 1}]"],
|
||||
"k8s_cluster:children" => ["kube_control_plane", "kube_node"],
|
||||
}
|
||||
end
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
# Kubespray on KVM Virtual Machines hypervisor preparation
|
||||
|
||||
A simple playbook to ensure your system has the right settings to enable Kubespray
|
||||
deployment on VMs.
|
||||
|
||||
This playbook does not create Virtual Machines, nor does it run Kubespray itself.
|
||||
|
||||
## User creation
|
||||
|
||||
If you want to create a user for running Kubespray deployment, you should specify
|
||||
both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`.
|
||||
@@ -1,2 +0,0 @@
|
||||
#k8s_deployment_user: kubespray
|
||||
#k8s_deployment_user_pkey_path: /tmp/ssh_rsa
|
||||
@@ -1,9 +0,0 @@
|
||||
---
|
||||
- name: Prepare Hypervisor to later install kubespray VMs
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
become: true
|
||||
vars:
|
||||
bootstrap_os: none
|
||||
roles:
|
||||
- { role: kvm-setup }
|
||||
@@ -1,30 +0,0 @@
|
||||
---
|
||||
|
||||
- name: Install required packages
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- bind-utils
|
||||
- ntp
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
- name: Install required packages
|
||||
apt:
|
||||
upgrade: true
|
||||
update_cache: true
|
||||
cache_valid_time: 3600
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
install_recommends: false
|
||||
with_items:
|
||||
- dnsutils
|
||||
- ntp
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: Create deployment user if required
|
||||
include_tasks: user.yml
|
||||
when: k8s_deployment_user is defined
|
||||
|
||||
- name: Set proper sysctl values
|
||||
import_tasks: sysctl.yml
|
||||
@@ -1,46 +0,0 @@
|
||||
---
|
||||
- name: Load br_netfilter module
|
||||
community.general.modprobe:
|
||||
name: br_netfilter
|
||||
state: present
|
||||
register: br_netfilter
|
||||
|
||||
- name: Add br_netfilter into /etc/modules
|
||||
lineinfile:
|
||||
dest: /etc/modules
|
||||
state: present
|
||||
line: 'br_netfilter'
|
||||
when: br_netfilter is defined and ansible_os_family == 'Debian'
|
||||
|
||||
- name: Add br_netfilter into /etc/modules-load.d/kubespray.conf
|
||||
copy:
|
||||
dest: /etc/modules-load.d/kubespray.conf
|
||||
content: |-
|
||||
### This file is managed by Ansible
|
||||
br-netfilter
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
when: br_netfilter is defined
|
||||
|
||||
|
||||
- name: Enable net.ipv4.ip_forward in sysctl
|
||||
ansible.posix.sysctl:
|
||||
name: net.ipv4.ip_forward
|
||||
value: 1
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
state: present
|
||||
reload: true
|
||||
|
||||
- name: Set bridge-nf-call-{arptables,iptables} to 0
|
||||
ansible.posix.sysctl:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
value: 0
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
reload: true
|
||||
with_items:
|
||||
- net.bridge.bridge-nf-call-arptables
|
||||
- net.bridge.bridge-nf-call-ip6tables
|
||||
- net.bridge.bridge-nf-call-iptables
|
||||
when: br_netfilter is defined
|
||||
@@ -1,47 +0,0 @@
|
||||
---
|
||||
- name: Create user {{ k8s_deployment_user }}
|
||||
user:
|
||||
name: "{{ k8s_deployment_user }}"
|
||||
groups: adm
|
||||
shell: /bin/bash
|
||||
|
||||
- name: Ensure that .ssh exists
|
||||
file:
|
||||
path: "/home/{{ k8s_deployment_user }}/.ssh"
|
||||
state: directory
|
||||
owner: "{{ k8s_deployment_user }}"
|
||||
group: "{{ k8s_deployment_user }}"
|
||||
mode: "0700"
|
||||
|
||||
- name: Configure sudo for deployment user
|
||||
copy:
|
||||
content: |
|
||||
%{{ k8s_deployment_user }} ALL=(ALL) NOPASSWD: ALL
|
||||
dest: "/etc/sudoers.d/55-k8s-deployment"
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
|
||||
- name: Write private SSH key
|
||||
copy:
|
||||
src: "{{ k8s_deployment_user_pkey_path }}"
|
||||
dest: "/home/{{ k8s_deployment_user }}/.ssh/id_rsa"
|
||||
mode: "0400"
|
||||
owner: "{{ k8s_deployment_user }}"
|
||||
group: "{{ k8s_deployment_user }}"
|
||||
when: k8s_deployment_user_pkey_path is defined
|
||||
|
||||
- name: Write public SSH key
|
||||
shell: "ssh-keygen -y -f /home/{{ k8s_deployment_user }}/.ssh/id_rsa \
|
||||
> /home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
|
||||
args:
|
||||
creates: "/home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
|
||||
when: k8s_deployment_user_pkey_path is defined
|
||||
|
||||
- name: Fix ssh-pub-key permissions
|
||||
file:
|
||||
path: "/home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
|
||||
mode: "0600"
|
||||
owner: "{{ k8s_deployment_user }}"
|
||||
group: "{{ k8s_deployment_user }}"
|
||||
when: k8s_deployment_user_pkey_path is defined
|
||||
@@ -1,15 +0,0 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
@@ -1,51 +0,0 @@
|
||||
---
|
||||
- name: Check ansible version
|
||||
import_playbook: kubernetes_sigs.kubespray.ansible_version
|
||||
|
||||
- name: Install mitogen
|
||||
hosts: localhost
|
||||
strategy: linear
|
||||
vars:
|
||||
mitogen_version: 0.3.2
|
||||
mitogen_url: https://github.com/mitogen-hq/mitogen/archive/refs/tags/v{{ mitogen_version }}.tar.gz
|
||||
ansible_connection: local
|
||||
tasks:
|
||||
- name: Create mitogen plugin dir
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
become: false
|
||||
loop:
|
||||
- "{{ playbook_dir }}/plugins/mitogen"
|
||||
- "{{ playbook_dir }}/dist"
|
||||
|
||||
- name: Download mitogen release
|
||||
get_url:
|
||||
url: "{{ mitogen_url }}"
|
||||
dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
||||
validate_certs: true
|
||||
mode: "0644"
|
||||
|
||||
- name: Extract archive
|
||||
unarchive:
|
||||
src: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
||||
dest: "{{ playbook_dir }}/dist/"
|
||||
|
||||
- name: Copy plugin
|
||||
ansible.posix.synchronize:
|
||||
src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/"
|
||||
dest: "{{ playbook_dir }}/plugins/mitogen"
|
||||
|
||||
- name: Add strategy to ansible.cfg
|
||||
community.general.ini_file:
|
||||
path: ansible.cfg
|
||||
mode: "0644"
|
||||
section: "{{ item.section | d('defaults') }}"
|
||||
option: "{{ item.option }}"
|
||||
value: "{{ item.value }}"
|
||||
with_items:
|
||||
- option: strategy
|
||||
value: mitogen_linear
|
||||
- option: strategy_plugins
|
||||
value: plugins/mitogen/ansible_mitogen/plugins/strategy
|
||||
@@ -1,92 +0,0 @@
|
||||
# Deploying a Kubespray Kubernetes Cluster with GlusterFS
|
||||
|
||||
You can either deploy using Ansible on its own by supplying your own inventory file or by using Terraform to create the VMs and then providing a dynamic inventory to Ansible. The following two sections are self-contained, you don't need to go through one to use the other. So, if you want to provision with Terraform, you can skip the **Using an Ansible inventory** section, and if you want to provision with a pre-built ansible inventory, you can neglect the **Using Terraform and Ansible** section.
|
||||
|
||||
## Using an Ansible inventory
|
||||
|
||||
In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group.
|
||||
|
||||
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/sample/k8s_gfs_inventory`. Make sure that the settings on `inventory/sample/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
|
||||
|
||||
```shell
|
||||
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./cluster.yml
|
||||
```
|
||||
|
||||
This will provision your Kubernetes cluster. Then, to provision and configure the GlusterFS cluster, from the same directory execute:
|
||||
|
||||
```shell
|
||||
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=<correct-user>` variable in the inventory file that you just created, for each machine/VM:
|
||||
|
||||
```shell
|
||||
k8s-master-1 ansible_ssh_host=192.168.0.147 ip=192.168.0.147 ansible_ssh_user=core
|
||||
k8s-master-node-1 ansible_ssh_host=192.168.0.148 ip=192.168.0.148 ansible_ssh_user=core
|
||||
k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_user=core
|
||||
```
|
||||
|
||||
## Using Terraform and Ansible
|
||||
|
||||
First step is to fill in a `my-kubespray-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
|
||||
|
||||
```ini
|
||||
cluster_name = "cluster1"
|
||||
number_of_k8s_masters = "1"
|
||||
number_of_k8s_masters_no_floating_ip = "2"
|
||||
number_of_k8s_nodes_no_floating_ip = "0"
|
||||
number_of_k8s_nodes = "0"
|
||||
public_key_path = "~/.ssh/my-desired-key.pub"
|
||||
image = "Ubuntu 16.04"
|
||||
ssh_user = "ubuntu"
|
||||
flavor_k8s_node = "node-flavor-id-in-your-openstack"
|
||||
flavor_k8s_master = "master-flavor-id-in-your-openstack"
|
||||
network_name = "k8s-network"
|
||||
floatingip_pool = "net_external"
|
||||
|
||||
# GlusterFS variables
|
||||
flavor_gfs_node = "gluster-flavor-id-in-your-openstack"
|
||||
image_gfs = "Ubuntu 16.04"
|
||||
number_of_gfs_nodes_no_floating_ip = "3"
|
||||
gfs_volume_size_in_gb = "50"
|
||||
ssh_user_gfs = "ubuntu"
|
||||
```
|
||||
|
||||
As explained in the general terraform/openstack guide, you need to source your OpenStack credentials file, add your ssh-key to the ssh-agent and setup environment variables for terraform:
|
||||
|
||||
```shell
|
||||
$ source ~/.stackrc
|
||||
$ eval $(ssh-agent -s)
|
||||
$ ssh-add ~/.ssh/my-desired-key
|
||||
$ echo Setting up Terraform creds && \
|
||||
export TF_VAR_username=${OS_USERNAME} && \
|
||||
export TF_VAR_password=${OS_PASSWORD} && \
|
||||
export TF_VAR_tenant=${OS_TENANT_NAME} && \
|
||||
export TF_VAR_auth_url=${OS_AUTH_URL}
|
||||
```
|
||||
|
||||
Then, standing on the kubespray directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
|
||||
|
||||
```shell
|
||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
|
||||
```
|
||||
|
||||
This will create both your Kubernetes and Gluster VMs. Make sure that the ansible file `contrib/terraform/openstack/group_vars/all.yml` includes any ansible variable that you want to setup (like, for instance, the type of machine for bootstrapping).
|
||||
|
||||
Then, provision your Kubernetes (kubespray) cluster with the following ansible call:
|
||||
|
||||
```shell
|
||||
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml
|
||||
```
|
||||
|
||||
Finally, provision the glusterfs nodes and add the Persistent Volume setup for GlusterFS in Kubernetes through the following ansible call:
|
||||
|
||||
```shell
|
||||
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
If you need to destroy the cluster, you can run:
|
||||
|
||||
```shell
|
||||
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
|
||||
```
|
||||
@@ -1,29 +0,0 @@
|
||||
---
|
||||
- name: Bootstrap hosts
|
||||
hosts: gfs-cluster
|
||||
gather_facts: false
|
||||
vars:
|
||||
ansible_ssh_pipelining: false
|
||||
roles:
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
|
||||
- name: Gather facts
|
||||
hosts: all
|
||||
gather_facts: true
|
||||
|
||||
- name: Install glusterfs server
|
||||
hosts: gfs-cluster
|
||||
vars:
|
||||
ansible_ssh_pipelining: true
|
||||
roles:
|
||||
- { role: glusterfs/server }
|
||||
|
||||
- name: Install glusterfs servers
|
||||
hosts: k8s_cluster
|
||||
roles:
|
||||
- { role: glusterfs/client }
|
||||
|
||||
- name: Configure Kubernetes to use glusterfs
|
||||
hosts: kube_control_plane[0]
|
||||
roles:
|
||||
- { role: kubernetes-pv }
|
||||
@@ -1 +0,0 @@
|
||||
../../../inventory/local/group_vars
|
||||
@@ -1,43 +0,0 @@
|
||||
# ## Configure 'ip' variable to bind kubernetes services on a
|
||||
# ## different ip than the default iface
|
||||
# node1 ansible_ssh_host=95.54.0.12 # ip=10.3.0.1
|
||||
# node2 ansible_ssh_host=95.54.0.13 # ip=10.3.0.2
|
||||
# node3 ansible_ssh_host=95.54.0.14 # ip=10.3.0.3
|
||||
# node4 ansible_ssh_host=95.54.0.15 # ip=10.3.0.4
|
||||
# node5 ansible_ssh_host=95.54.0.16 # ip=10.3.0.5
|
||||
# node6 ansible_ssh_host=95.54.0.17 # ip=10.3.0.6
|
||||
#
|
||||
# ## GlusterFS nodes
|
||||
# ## Set disk_volume_device_1 to desired device for gluster brick, if different to /dev/vdb (default).
|
||||
# ## As in the previous case, you can set ip to give direct communication on internal IPs
|
||||
# gfs_node1 ansible_ssh_host=95.54.0.18 # disk_volume_device_1=/dev/vdc ip=10.3.0.7
|
||||
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
|
||||
# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
|
||||
|
||||
# [kube_control_plane]
|
||||
# node1
|
||||
# node2
|
||||
|
||||
# [etcd]
|
||||
# node1
|
||||
# node2
|
||||
# node3
|
||||
|
||||
# [kube_node]
|
||||
# node2
|
||||
# node3
|
||||
# node4
|
||||
# node5
|
||||
# node6
|
||||
|
||||
# [k8s_cluster:children]
|
||||
# kube_node
|
||||
# kube_control_plane
|
||||
|
||||
# [gfs-cluster]
|
||||
# gfs_node1
|
||||
# gfs_node2
|
||||
# gfs_node3
|
||||
|
||||
# [network-storage:children]
|
||||
# gfs-cluster
|
||||
@@ -1 +0,0 @@
|
||||
../../../../roles/bootstrap-os
|
||||
@@ -1,50 +0,0 @@
|
||||
# Ansible Role: GlusterFS
|
||||
|
||||
[](https://travis-ci.org/geerlingguy/ansible-role-glusterfs)
|
||||
|
||||
Installs and configures GlusterFS on Linux.
|
||||
|
||||
## Requirements
|
||||
|
||||
For GlusterFS to connect between servers, TCP ports `24007`, `24008`, and `24009`/`49152`+ (that port, plus an additional incremented port for each additional server in the cluster; the latter if GlusterFS is version 3.4+), and TCP/UDP port `111` must be open. You can open these using whatever firewall you wish (this can easily be configured using the `geerlingguy.firewall` role).
|
||||
|
||||
This role performs basic installation and setup of Gluster, but it does not configure or mount bricks (volumes), since that step is easier to do in a series of plays in your own playbook. Ansible 1.9+ includes the [`gluster_volume`](https://docs.ansible.com/ansible/latest/collections/gluster/gluster/gluster_volume_module.html) module to ease the management of Gluster volumes.
|
||||
|
||||
## Role Variables
|
||||
|
||||
Available variables are listed below, along with default values (see `defaults/main.yml`):
|
||||
|
||||
```yaml
|
||||
glusterfs_default_release: ""
|
||||
```
|
||||
|
||||
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
|
||||
|
||||
```yaml
|
||||
glusterfs_ppa_use: true
|
||||
glusterfs_ppa_version: "3.5"
|
||||
```
|
||||
|
||||
For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/) for more info.
|
||||
|
||||
## Dependencies
|
||||
|
||||
None.
|
||||
|
||||
## Example Playbook
|
||||
|
||||
```yaml
|
||||
- hosts: server
|
||||
roles:
|
||||
- geerlingguy.glusterfs
|
||||
```
|
||||
|
||||
For a real-world use example, read through [Simple GlusterFS Setup with Ansible](http://www.jeffgeerling.com/blog/simple-glusterfs-setup-ansible), a blog post by this role's author, which is included in Chapter 8 of [Ansible for DevOps](https://www.ansiblefordevops.com/).
|
||||
|
||||
## License
|
||||
|
||||
MIT / BSD
|
||||
|
||||
## Author Information
|
||||
|
||||
This role was created in 2015 by [Jeff Geerling](http://www.jeffgeerling.com/), author of [Ansible for DevOps](https://www.ansiblefordevops.com/).
|
||||
@@ -1,11 +0,0 @@
|
||||
---
|
||||
# For Ubuntu.
|
||||
glusterfs_default_release: ""
|
||||
glusterfs_ppa_use: true
|
||||
glusterfs_ppa_version: "4.1"
|
||||
|
||||
# Gluster configuration.
|
||||
gluster_mount_dir: /mnt/gluster
|
||||
gluster_volume_node_mount_dir: /mnt/xfs-drive-gluster
|
||||
gluster_brick_dir: "{{ gluster_volume_node_mount_dir }}/brick"
|
||||
gluster_brick_name: gluster
|
||||
@@ -1,30 +0,0 @@
|
||||
---
|
||||
dependencies: []
|
||||
|
||||
galaxy_info:
|
||||
author: geerlingguy
|
||||
description: GlusterFS installation for Linux.
|
||||
company: "Midwestern Mac, LLC"
|
||||
license: "license (BSD, MIT)"
|
||||
min_ansible_version: "2.0"
|
||||
platforms:
|
||||
- name: EL
|
||||
versions:
|
||||
- "6"
|
||||
- "7"
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- precise
|
||||
- trusty
|
||||
- xenial
|
||||
- name: Debian
|
||||
versions:
|
||||
- wheezy
|
||||
- jessie
|
||||
galaxy_tags:
|
||||
- system
|
||||
- networking
|
||||
- cloud
|
||||
- clustering
|
||||
- files
|
||||
- sharing
|
||||
@@ -1,21 +0,0 @@
|
||||
---
|
||||
# This is meant for Ubuntu and RedHat installations, where apparently the glusterfs-client is not used from inside
|
||||
# hyperkube and needs to be installed as part of the system.
|
||||
|
||||
# Setup/install tasks.
|
||||
- name: Setup RedHat distros for glusterfs
|
||||
include_tasks: setup-RedHat.yml
|
||||
when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined
|
||||
|
||||
- name: Setup Debian distros for glusterfs
|
||||
include_tasks: setup-Debian.yml
|
||||
when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined
|
||||
|
||||
- name: Ensure Gluster mount directories exist.
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: "0775"
|
||||
with_items:
|
||||
- "{{ gluster_mount_dir }}"
|
||||
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
|
||||
@@ -1,24 +0,0 @@
|
||||
---
|
||||
- name: Add PPA for GlusterFS.
|
||||
apt_repository:
|
||||
repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
|
||||
state: present
|
||||
update_cache: true
|
||||
register: glusterfs_ppa_added
|
||||
when: glusterfs_ppa_use
|
||||
|
||||
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa no-handler
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- glusterfs-client
|
||||
when: glusterfs_ppa_added.changed
|
||||
|
||||
- name: Ensure GlusterFS client is installed.
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
default_release: "{{ glusterfs_default_release }}"
|
||||
with_items:
|
||||
- glusterfs-client
|
||||
@@ -1,14 +0,0 @@
|
||||
---
|
||||
- name: Install Prerequisites
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||
|
||||
- name: Install Packages
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- glusterfs-client
|
||||
@@ -1,13 +0,0 @@
|
||||
---
|
||||
# For Ubuntu.
|
||||
glusterfs_default_release: ""
|
||||
glusterfs_ppa_use: true
|
||||
glusterfs_ppa_version: "3.12"
|
||||
|
||||
# Gluster configuration.
|
||||
gluster_mount_dir: /mnt/gluster
|
||||
gluster_volume_node_mount_dir: /mnt/xfs-drive-gluster
|
||||
gluster_brick_dir: "{{ gluster_volume_node_mount_dir }}/brick"
|
||||
gluster_brick_name: gluster
|
||||
# Default device to mount for xfs formatting, terraform overrides this by setting the variable in the inventory.
|
||||
disk_volume_device_1: /dev/vdb
|
||||
@@ -1,30 +0,0 @@
|
||||
---
|
||||
dependencies: []
|
||||
|
||||
galaxy_info:
|
||||
author: geerlingguy
|
||||
description: GlusterFS installation for Linux.
|
||||
company: "Midwestern Mac, LLC"
|
||||
license: "license (BSD, MIT)"
|
||||
min_ansible_version: "2.0"
|
||||
platforms:
|
||||
- name: EL
|
||||
versions:
|
||||
- "6"
|
||||
- "7"
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- precise
|
||||
- trusty
|
||||
- xenial
|
||||
- name: Debian
|
||||
versions:
|
||||
- wheezy
|
||||
- jessie
|
||||
galaxy_tags:
|
||||
- system
|
||||
- networking
|
||||
- cloud
|
||||
- clustering
|
||||
- files
|
||||
- sharing
|
||||
@@ -1,113 +0,0 @@
|
||||
---
|
||||
# Include variables and define needed variables.
|
||||
- name: Include OS-specific variables.
|
||||
include_vars: "{{ ansible_os_family }}.yml"
|
||||
|
||||
# Install xfs package
|
||||
- name: Install xfs Debian
|
||||
apt:
|
||||
name: xfsprogs
|
||||
state: present
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: Install xfs RedHat
|
||||
package:
|
||||
name: xfsprogs
|
||||
state: present
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
# Format external volumes in xfs
|
||||
- name: Format volumes in xfs
|
||||
community.general.filesystem:
|
||||
fstype: xfs
|
||||
dev: "{{ disk_volume_device_1 }}"
|
||||
|
||||
# Mount external volumes
|
||||
- name: Mounting new xfs filesystem
|
||||
ansible.posix.mount:
|
||||
name: "{{ gluster_volume_node_mount_dir }}"
|
||||
src: "{{ disk_volume_device_1 }}"
|
||||
fstype: xfs
|
||||
state: mounted
|
||||
|
||||
# Setup/install tasks.
|
||||
- name: Setup RedHat distros for glusterfs
|
||||
include_tasks: setup-RedHat.yml
|
||||
when: ansible_os_family == 'RedHat'
|
||||
|
||||
- name: Setup Debian distros for glusterfs
|
||||
include_tasks: setup-Debian.yml
|
||||
when: ansible_os_family == 'Debian'
|
||||
|
||||
- name: Ensure GlusterFS is started and enabled at boot.
|
||||
service:
|
||||
name: "{{ glusterfs_daemon }}"
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- name: Ensure Gluster brick and mount directories exist.
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: "0775"
|
||||
with_items:
|
||||
- "{{ gluster_brick_dir }}"
|
||||
- "{{ gluster_mount_dir }}"
|
||||
|
||||
- name: Configure Gluster volume with replicas
|
||||
gluster.gluster.gluster_volume:
|
||||
state: present
|
||||
name: "{{ gluster_brick_name }}"
|
||||
brick: "{{ gluster_brick_dir }}"
|
||||
replicas: "{{ groups['gfs-cluster'] | length }}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
host: "{{ inventory_hostname }}"
|
||||
force: true
|
||||
run_once: true
|
||||
when: groups['gfs-cluster'] | length > 1
|
||||
|
||||
- name: Configure Gluster volume without replicas
|
||||
gluster.gluster.gluster_volume:
|
||||
state: present
|
||||
name: "{{ gluster_brick_name }}"
|
||||
brick: "{{ gluster_brick_dir }}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
host: "{{ inventory_hostname }}"
|
||||
force: true
|
||||
run_once: true
|
||||
when: groups['gfs-cluster'] | length <= 1
|
||||
|
||||
- name: Mount glusterfs to retrieve disk size
|
||||
ansible.posix.mount:
|
||||
name: "{{ gluster_mount_dir }}"
|
||||
src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
fstype: glusterfs
|
||||
opts: "defaults,_netdev"
|
||||
state: mounted
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Get Gluster disk size
|
||||
setup:
|
||||
filter: ansible_mounts
|
||||
register: mounts_data
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Set Gluster disk size to variable
|
||||
set_fact:
|
||||
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024 * 1024 * 1024)) | int }}"
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Create file on GlusterFS
|
||||
template:
|
||||
dest: "{{ gluster_mount_dir }}/.test-file.txt"
|
||||
src: test-file.txt
|
||||
mode: "0644"
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Unmount glusterfs
|
||||
ansible.posix.mount:
|
||||
name: "{{ gluster_mount_dir }}"
|
||||
fstype: glusterfs
|
||||
src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
state: unmounted
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
@@ -1,26 +0,0 @@
|
||||
---
|
||||
- name: Add PPA for GlusterFS.
|
||||
apt_repository:
|
||||
repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
|
||||
state: present
|
||||
update_cache: true
|
||||
register: glusterfs_ppa_added
|
||||
when: glusterfs_ppa_use
|
||||
|
||||
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa no-handler
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- glusterfs-server
|
||||
- glusterfs-client
|
||||
when: glusterfs_ppa_added.changed
|
||||
|
||||
- name: Ensure GlusterFS is installed.
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
default_release: "{{ glusterfs_default_release }}"
|
||||
with_items:
|
||||
- glusterfs-server
|
||||
- glusterfs-client
|
||||
@@ -1,15 +0,0 @@
|
||||
---
|
||||
- name: Install Prerequisites
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||
|
||||
- name: Install Packages
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- glusterfs-server
|
||||
- glusterfs-client
|
||||
@@ -1 +0,0 @@
|
||||
test file
|
||||
@@ -1,2 +0,0 @@
|
||||
---
|
||||
glusterfs_daemon: glusterd
|
||||
@@ -1,2 +0,0 @@
|
||||
---
|
||||
glusterfs_daemon: glusterd
|
||||
@@ -1,23 +0,0 @@
|
||||
---
|
||||
- name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV
|
||||
template:
|
||||
src: "{{ item.file }}"
|
||||
dest: "{{ kube_config_dir }}/{{ item.dest }}"
|
||||
mode: "0644"
|
||||
with_items:
|
||||
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
|
||||
register: gluster_pv
|
||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||
|
||||
- name: Kubernetes Apps | Set GlusterFS endpoint and PV
|
||||
kube:
|
||||
name: glusterfs
|
||||
namespace: default
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
resource: "{{ item.item.type }}"
|
||||
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
||||
state: "{{ item.changed | ternary('latest', 'present') }}"
|
||||
with_items: "{{ gluster_pv.results }}"
|
||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"kind": "Service",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "glusterfs"
|
||||
},
|
||||
"spec": {
|
||||
"ports": [
|
||||
{"port": 1}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
{
|
||||
"kind": "Endpoints",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "glusterfs"
|
||||
},
|
||||
"subsets": [
|
||||
{% for host in groups['gfs-cluster'] %}
|
||||
{
|
||||
"addresses": [
|
||||
{
|
||||
"ip": "{{hostvars[host]['ip']|default(hostvars[host].ansible_default_ipv4['address'])}}"
|
||||
}
|
||||
],
|
||||
"ports": [
|
||||
{
|
||||
"port": 1
|
||||
}
|
||||
]
|
||||
}{%- if not loop.last %}, {% endif -%}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: glusterfs
|
||||
spec:
|
||||
capacity:
|
||||
storage: "{{ hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb }}Gi"
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
glusterfs:
|
||||
endpoints: glusterfs
|
||||
path: gluster
|
||||
readOnly: false
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
@@ -1,3 +0,0 @@
|
||||
---
|
||||
dependencies:
|
||||
- {role: kubernetes-pv/ansible, tags: apps}
|
||||
@@ -1,27 +0,0 @@
|
||||
# Deploy Heketi/Glusterfs into Kubespray/Kubernetes
|
||||
|
||||
This playbook aims to automate [this](https://github.com/heketi/heketi/blob/master/docs/admin/install-kubernetes.md) tutorial. It deploys heketi/glusterfs into kubernetes and sets up a storageclass.
|
||||
|
||||
## Important notice
|
||||
|
||||
> Due to resource limits on the current project maintainers and general lack of contributions we are considering placing Heketi into a [near-maintenance mode](https://github.com/heketi/heketi#important-notice)
|
||||
|
||||
## Client Setup
|
||||
|
||||
Heketi provides a CLI that provides users with a means to administer the deployment and configuration of GlusterFS in Kubernetes. [Download and install the heketi-cli](https://github.com/heketi/heketi/releases) on your client machine.
|
||||
|
||||
## Install
|
||||
|
||||
Copy the inventory.yml.sample over to inventory/sample/k8s_heketi_inventory.yml and change it according to your setup.
|
||||
|
||||
```shell
|
||||
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi.yml
|
||||
```
|
||||
|
||||
## Tear down
|
||||
|
||||
```shell
|
||||
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml
|
||||
```
|
||||
|
||||
Add `--extra-vars "heketi_remove_lvm=true"` to the command above to remove LVM packages from the system
|
||||
@@ -1,11 +0,0 @@
|
||||
---
|
||||
- name: Tear down heketi
|
||||
hosts: kube_control_plane[0]
|
||||
roles:
|
||||
- { role: tear-down }
|
||||
|
||||
- name: Teardown disks in heketi
|
||||
hosts: heketi-node
|
||||
become: true
|
||||
roles:
|
||||
- { role: tear-down-disks }
|
||||
@@ -1,12 +0,0 @@
|
||||
---
|
||||
- name: Prepare heketi install
|
||||
hosts: heketi-node
|
||||
roles:
|
||||
- { role: prepare }
|
||||
|
||||
- name: Provision heketi
|
||||
hosts: kube_control_plane[0]
|
||||
tags:
|
||||
- "provision"
|
||||
roles:
|
||||
- { role: provision }
|
||||
@@ -1,33 +0,0 @@
|
||||
all:
|
||||
vars:
|
||||
heketi_admin_key: "11elfeinhundertundelf"
|
||||
heketi_user_key: "!!einseinseins"
|
||||
glusterfs_daemonset:
|
||||
readiness_probe:
|
||||
timeout_seconds: 3
|
||||
initial_delay_seconds: 3
|
||||
liveness_probe:
|
||||
timeout_seconds: 3
|
||||
initial_delay_seconds: 10
|
||||
children:
|
||||
k8s_cluster:
|
||||
vars:
|
||||
kubelet_fail_swap_on: false
|
||||
children:
|
||||
kube_control_plane:
|
||||
hosts:
|
||||
node1:
|
||||
etcd:
|
||||
hosts:
|
||||
node2:
|
||||
kube_node:
|
||||
hosts: &kube_nodes
|
||||
node1:
|
||||
node2:
|
||||
node3:
|
||||
node4:
|
||||
heketi-node:
|
||||
vars:
|
||||
disk_volume_device_1: "/dev/vdb"
|
||||
hosts:
|
||||
<<: *kube_nodes
|
||||
@@ -1 +0,0 @@
|
||||
jmespath
|
||||
@@ -1,24 +0,0 @@
|
||||
---
|
||||
- name: "Load lvm kernel modules"
|
||||
become: true
|
||||
with_items:
|
||||
- "dm_snapshot"
|
||||
- "dm_mirror"
|
||||
- "dm_thin_pool"
|
||||
community.general.modprobe:
|
||||
name: "{{ item }}"
|
||||
state: "present"
|
||||
|
||||
- name: "Install glusterfs mount utils (RedHat)"
|
||||
become: true
|
||||
package:
|
||||
name: "glusterfs-fuse"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'RedHat'"
|
||||
|
||||
- name: "Install glusterfs mount utils (Debian)"
|
||||
become: true
|
||||
apt:
|
||||
name: "glusterfs-client"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'Debian'"
|
||||
@@ -1 +0,0 @@
|
||||
---
|
||||
@@ -1,3 +0,0 @@
|
||||
---
|
||||
- name: "Stop port forwarding"
|
||||
command: "killall "
|
||||
@@ -1,64 +0,0 @@
|
||||
---
|
||||
# Bootstrap heketi
|
||||
- name: "Get state of heketi service, deployment and pods."
|
||||
register: "initial_heketi_state"
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
||||
|
||||
- name: "Bootstrap heketi."
|
||||
when:
|
||||
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Service']\")) | length == 0"
|
||||
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Deployment']\")) | length == 0"
|
||||
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Pod']\")) | length == 0"
|
||||
include_tasks: "bootstrap/deploy.yml"
|
||||
|
||||
# Prepare heketi topology
|
||||
- name: "Get heketi initial pod state."
|
||||
register: "initial_heketi_pod"
|
||||
command: "{{ bin_dir }}/kubectl get pods --selector=deploy-heketi=pod,glusterfs=heketi-pod,name=deploy-heketi --output=json"
|
||||
changed_when: false
|
||||
|
||||
- name: "Ensure heketi bootstrap pod is up."
|
||||
assert:
|
||||
that: "(initial_heketi_pod.stdout | from_json | json_query('items[*]')) | length == 1"
|
||||
|
||||
- name: Store the initial heketi pod name
|
||||
set_fact:
|
||||
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout | from_json | json_query(\"items[*].metadata.name | [0]\") }}"
|
||||
|
||||
- name: "Test heketi topology."
|
||||
changed_when: false
|
||||
register: "heketi_topology"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||
|
||||
- name: "Load heketi topology."
|
||||
when: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*]\") | flatten | length == 0"
|
||||
include_tasks: "bootstrap/topology.yml"
|
||||
|
||||
# Provision heketi database volume
|
||||
- name: "Prepare heketi volumes."
|
||||
include_tasks: "bootstrap/volumes.yml"
|
||||
|
||||
# Remove bootstrap heketi
|
||||
- name: "Tear down bootstrap."
|
||||
include_tasks: "bootstrap/tear-down.yml"
|
||||
|
||||
# Prepare heketi storage
|
||||
- name: "Test heketi storage."
|
||||
command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json"
|
||||
changed_when: false
|
||||
register: "heketi_storage_state"
|
||||
|
||||
# ensure endpoints actually exist before trying to move database data to it
|
||||
- name: "Create heketi storage."
|
||||
include_tasks: "bootstrap/storage.yml"
|
||||
vars:
|
||||
secret_query: "items[?metadata.name=='heketi-storage-secret' && kind=='Secret']"
|
||||
endpoints_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Endpoints']"
|
||||
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
||||
when:
|
||||
- "heketi_storage_state.stdout | from_json | json_query(secret_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(endpoints_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(service_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 0"
|
||||
@@ -1,27 +0,0 @@
|
||||
---
|
||||
- name: "Kubernetes Apps | Lay Down Heketi Bootstrap"
|
||||
become: true
|
||||
template:
|
||||
src: "heketi-bootstrap.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-bootstrap.json"
|
||||
mode: "0640"
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/heketi-bootstrap.json"
|
||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||
- name: "Wait for heketi bootstrap to complete."
|
||||
changed_when: false
|
||||
register: "initial_heketi_state"
|
||||
vars:
|
||||
initial_heketi_state: { stdout: "{}" }
|
||||
pods_query: "items[?kind=='Pod'].status.conditions | [0][?type=='Ready'].status | [0]"
|
||||
deployments_query: "items[?kind=='Deployment'].status.conditions | [0][?type=='Available'].status | [0]"
|
||||
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
||||
until:
|
||||
- "initial_heketi_state.stdout | from_json | json_query(pods_query) == 'True'"
|
||||
- "initial_heketi_state.stdout | from_json | json_query(deployments_query) == 'True'"
|
||||
retries: 60
|
||||
delay: 5
|
||||
@@ -1,33 +0,0 @@
|
||||
---
|
||||
- name: "Test heketi storage."
|
||||
command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json"
|
||||
changed_when: false
|
||||
register: "heketi_storage_state"
|
||||
- name: "Create heketi storage."
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
||||
state: "present"
|
||||
vars:
|
||||
secret_query: "items[?metadata.name=='heketi-storage-secret' && kind=='Secret']"
|
||||
endpoints_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Endpoints']"
|
||||
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
||||
when:
|
||||
- "heketi_storage_state.stdout | from_json | json_query(secret_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(endpoints_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(service_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 0"
|
||||
register: "heketi_storage_result"
|
||||
- name: "Get state of heketi database copy job."
|
||||
command: "{{ bin_dir }}/kubectl get jobs --output=json"
|
||||
changed_when: false
|
||||
register: "heketi_storage_state"
|
||||
vars:
|
||||
heketi_storage_state: { stdout: "{}" }
|
||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job' && status.succeeded==1]"
|
||||
until:
|
||||
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 1"
|
||||
retries: 60
|
||||
delay: 5
|
||||
@@ -1,14 +0,0 @@
|
||||
---
|
||||
- name: "Get existing Heketi deploy resources."
|
||||
command: "{{ bin_dir }}/kubectl get all --selector=\"deploy-heketi\" -o=json"
|
||||
register: "heketi_resources"
|
||||
changed_when: false
|
||||
- name: "Delete bootstrap Heketi."
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
|
||||
when: "heketi_resources.stdout | from_json | json_query('items[*]') | length > 0"
|
||||
- name: "Ensure there is nothing left over."
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
@@ -1,27 +0,0 @@
|
||||
---
|
||||
- name: "Get heketi topology."
|
||||
changed_when: false
|
||||
register: "heketi_topology"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||
- name: "Render heketi topology template."
|
||||
become: true
|
||||
vars: { nodes: "{{ groups['heketi-node'] }}" }
|
||||
register: "render"
|
||||
template:
|
||||
src: "topology.json.j2"
|
||||
dest: "{{ kube_config_dir }}/topology.json"
|
||||
mode: "0644"
|
||||
- name: "Copy topology configuration into container."
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
||||
- name: "Load heketi topology." # noqa no-handler
|
||||
when: "render.changed"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||
register: "load_heketi"
|
||||
- name: "Get heketi topology."
|
||||
changed_when: false
|
||||
register: "heketi_topology"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||
until: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\") | flatten | length == groups['heketi-node'] | length"
|
||||
retries: 60
|
||||
delay: 5
|
||||
@@ -1,41 +0,0 @@
|
||||
---
|
||||
- name: "Get heketi volume ids."
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume list --json"
|
||||
changed_when: false
|
||||
register: "heketi_volumes"
|
||||
- name: "Get heketi volumes."
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
||||
with_items: "{{ heketi_volumes.stdout | from_json | json_query(\"volumes[*]\") }}"
|
||||
loop_control: { loop_var: "volume_id" }
|
||||
register: "volumes_information"
|
||||
- name: "Test heketi database volume."
|
||||
set_fact: { heketi_database_volume_exists: true }
|
||||
with_items: "{{ volumes_information.results }}"
|
||||
loop_control: { loop_var: "volume_information" }
|
||||
vars: { volume: "{{ volume_information.stdout | from_json }}" }
|
||||
when: "volume.name == 'heketidbstorage'"
|
||||
- name: "Provision database volume."
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
|
||||
when: "heketi_database_volume_exists is undefined"
|
||||
- name: "Copy configuration from pod."
|
||||
become: true
|
||||
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
||||
- name: "Get heketi volume ids."
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume list --json"
|
||||
changed_when: false
|
||||
register: "heketi_volumes"
|
||||
- name: "Get heketi volumes."
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
|
||||
with_items: "{{ heketi_volumes.stdout | from_json | json_query(\"volumes[*]\") }}"
|
||||
loop_control: { loop_var: "volume_id" }
|
||||
register: "volumes_information"
|
||||
- name: "Test heketi database volume."
|
||||
set_fact: { heketi_database_volume_created: true }
|
||||
with_items: "{{ volumes_information.results }}"
|
||||
loop_control: { loop_var: "volume_information" }
|
||||
vars: { volume: "{{ volume_information.stdout | from_json }}" }
|
||||
when: "volume.name == 'heketidbstorage'"
|
||||
- name: "Ensure heketi database volume exists."
|
||||
assert: { that: "heketi_database_volume_created is defined", msg: "Heketi database volume does not exist." }
|
||||
@@ -1,4 +0,0 @@
|
||||
---
|
||||
- name: "Clean up left over jobs."
|
||||
command: "{{ bin_dir }}/kubectl delete jobs,pods --selector=\"deploy-heketi\""
|
||||
changed_when: false
|
||||
@@ -1,44 +0,0 @@
|
||||
---
|
||||
- name: "Kubernetes Apps | Lay Down GlusterFS Daemonset"
|
||||
template:
|
||||
src: "glusterfs-daemonset.json.j2"
|
||||
dest: "{{ kube_config_dir }}/glusterfs-daemonset.json"
|
||||
mode: "0644"
|
||||
become: true
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/glusterfs-daemonset.json"
|
||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||
- name: "Kubernetes Apps | Label GlusterFS nodes"
|
||||
include_tasks: "glusterfs/label.yml"
|
||||
with_items: "{{ groups['heketi-node'] }}"
|
||||
loop_control:
|
||||
loop_var: "node"
|
||||
- name: "Kubernetes Apps | Wait for daemonset to become available."
|
||||
register: "daemonset_state"
|
||||
command: "{{ bin_dir }}/kubectl get daemonset glusterfs --output=json --ignore-not-found=true"
|
||||
changed_when: false
|
||||
vars:
|
||||
daemonset_state: { stdout: "{}" }
|
||||
ready: "{{ daemonset_state.stdout | from_json | json_query(\"status.numberReady\") }}"
|
||||
desired: "{{ daemonset_state.stdout | from_json | json_query(\"status.desiredNumberScheduled\") }}"
|
||||
until: "ready | int >= 3"
|
||||
retries: 60
|
||||
delay: 5
|
||||
|
||||
- name: "Kubernetes Apps | Lay Down Heketi Service Account"
|
||||
template:
|
||||
src: "heketi-service-account.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-service-account.json"
|
||||
mode: "0644"
|
||||
become: true
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Service Account"
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/heketi-service-account.json"
|
||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||
@@ -1,19 +0,0 @@
|
||||
---
|
||||
- name: Get storage nodes
|
||||
register: "label_present"
|
||||
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
|
||||
changed_when: false
|
||||
|
||||
- name: "Assign storage label"
|
||||
when: "label_present.stdout_lines | length == 0"
|
||||
command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
|
||||
|
||||
- name: Get storage nodes again
|
||||
register: "label_present"
|
||||
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
|
||||
changed_when: false
|
||||
|
||||
- name: Ensure the label has been set
|
||||
assert:
|
||||
that: "label_present | length > 0"
|
||||
msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs."
|
||||
@@ -1,34 +0,0 @@
|
||||
---
|
||||
- name: "Kubernetes Apps | Lay Down Heketi"
|
||||
become: true
|
||||
template:
|
||||
src: "heketi-deployment.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-deployment.json"
|
||||
mode: "0644"
|
||||
register: "rendering"
|
||||
|
||||
- name: "Kubernetes Apps | Install and configure Heketi"
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/heketi-deployment.json"
|
||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||
|
||||
- name: "Ensure heketi is up and running."
|
||||
changed_when: false
|
||||
register: "heketi_state"
|
||||
vars:
|
||||
heketi_state:
|
||||
stdout: "{}"
|
||||
pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]"
|
||||
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
||||
command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
|
||||
until:
|
||||
- "heketi_state.stdout | from_json | json_query(pods_query) == 'True'"
|
||||
- "heketi_state.stdout | from_json | json_query(deployments_query) == 'True'"
|
||||
retries: 60
|
||||
delay: 5
|
||||
|
||||
- name: Set the Heketi pod name
|
||||
set_fact:
|
||||
heketi_pod_name: "{{ heketi_state.stdout | from_json | json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"
|
||||
@@ -1,30 +0,0 @@
|
||||
---
|
||||
- name: "Kubernetes Apps | GlusterFS"
|
||||
include_tasks: "glusterfs.yml"
|
||||
|
||||
- name: "Kubernetes Apps | Heketi Secrets"
|
||||
include_tasks: "secret.yml"
|
||||
|
||||
- name: "Kubernetes Apps | Test Heketi"
|
||||
register: "heketi_service_state"
|
||||
command: "{{ bin_dir }}/kubectl get service heketi-storage-endpoints -o=name --ignore-not-found=true"
|
||||
changed_when: false
|
||||
|
||||
- name: "Kubernetes Apps | Bootstrap Heketi"
|
||||
when: "heketi_service_state.stdout == \"\""
|
||||
include_tasks: "bootstrap.yml"
|
||||
|
||||
- name: "Kubernetes Apps | Heketi"
|
||||
include_tasks: "heketi.yml"
|
||||
|
||||
- name: "Kubernetes Apps | Heketi Topology"
|
||||
include_tasks: "topology.yml"
|
||||
|
||||
- name: "Kubernetes Apps | Heketi Storage"
|
||||
include_tasks: "storage.yml"
|
||||
|
||||
- name: "Kubernetes Apps | Storage Class"
|
||||
include_tasks: "storageclass.yml"
|
||||
|
||||
- name: "Clean up"
|
||||
include_tasks: "cleanup.yml"
|
||||
@@ -1,45 +0,0 @@
|
||||
---
|
||||
- name: Get clusterrolebindings
|
||||
register: "clusterrolebinding_state"
|
||||
command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
||||
changed_when: false
|
||||
|
||||
- name: "Kubernetes Apps | Deploy cluster role binding."
|
||||
when: "clusterrolebinding_state.stdout | length == 0"
|
||||
command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
|
||||
|
||||
- name: Get clusterrolebindings again
|
||||
register: "clusterrolebinding_state"
|
||||
command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
||||
changed_when: false
|
||||
|
||||
- name: Make sure that clusterrolebindings are present now
|
||||
assert:
|
||||
that: "clusterrolebinding_state.stdout | length > 0"
|
||||
msg: "Cluster role binding is not present."
|
||||
|
||||
- name: Get the heketi-config-secret secret
|
||||
register: "secret_state"
|
||||
command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
||||
changed_when: false
|
||||
|
||||
- name: "Render Heketi secret configuration."
|
||||
become: true
|
||||
template:
|
||||
src: "heketi.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi.json"
|
||||
mode: "0644"
|
||||
|
||||
- name: "Deploy Heketi config secret"
|
||||
when: "secret_state.stdout | length == 0"
|
||||
command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
|
||||
|
||||
- name: Get the heketi-config-secret secret again
|
||||
register: "secret_state"
|
||||
command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
||||
changed_when: false
|
||||
|
||||
- name: Make sure the heketi-config-secret secret exists now
|
||||
assert:
|
||||
that: "secret_state.stdout | length > 0"
|
||||
msg: "Heketi config secret is not present."
|
||||
@@ -1,15 +0,0 @@
|
||||
---
|
||||
- name: "Kubernetes Apps | Lay Down Heketi Storage"
|
||||
become: true
|
||||
vars: { nodes: "{{ groups['heketi-node'] }}" }
|
||||
template:
|
||||
src: "heketi-storage.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-storage.json"
|
||||
mode: "0644"
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Storage"
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/heketi-storage.json"
|
||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||
@@ -1,26 +0,0 @@
|
||||
---
|
||||
- name: "Test storage class."
|
||||
command: "{{ bin_dir }}/kubectl get storageclass gluster --ignore-not-found=true --output=json"
|
||||
register: "storageclass"
|
||||
changed_when: false
|
||||
- name: "Test heketi service."
|
||||
command: "{{ bin_dir }}/kubectl get service heketi --ignore-not-found=true --output=json"
|
||||
register: "heketi_service"
|
||||
changed_when: false
|
||||
- name: "Ensure heketi service is available."
|
||||
assert: { that: "heketi_service.stdout != \"\"" }
|
||||
- name: "Render storage class configuration."
|
||||
become: true
|
||||
vars:
|
||||
endpoint_address: "{{ (heketi_service.stdout | from_json).spec.clusterIP }}"
|
||||
template:
|
||||
src: "storageclass.yml.j2"
|
||||
dest: "{{ kube_config_dir }}/storageclass.yml"
|
||||
mode: "0644"
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Storace Class"
|
||||
kube:
|
||||
name: "GlusterFS"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/storageclass.yml"
|
||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||
@@ -1,26 +0,0 @@
|
||||
---
|
||||
- name: "Get heketi topology."
|
||||
register: "heketi_topology"
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||
- name: "Render heketi topology template."
|
||||
become: true
|
||||
vars: { nodes: "{{ groups['heketi-node'] }}" }
|
||||
register: "rendering"
|
||||
template:
|
||||
src: "topology.json.j2"
|
||||
dest: "{{ kube_config_dir }}/topology.json"
|
||||
mode: "0644"
|
||||
- name: "Copy topology configuration into container." # noqa no-handler
|
||||
when: "rendering.changed"
|
||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
||||
- name: "Load heketi topology." # noqa no-handler
|
||||
when: "rendering.changed"
|
||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||
- name: "Get heketi topology."
|
||||
register: "heketi_topology"
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||
until: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\") | flatten | length == groups['heketi-node'] | length"
|
||||
retries: 60
|
||||
delay: 5
|
||||
@@ -1,149 +0,0 @@
|
||||
{
|
||||
"kind": "DaemonSet",
|
||||
"apiVersion": "apps/v1",
|
||||
"metadata": {
|
||||
"name": "glusterfs",
|
||||
"labels": {
|
||||
"glusterfs": "deployment"
|
||||
},
|
||||
"annotations": {
|
||||
"description": "GlusterFS Daemon Set",
|
||||
"tags": "glusterfs"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"glusterfs-node": "daemonset"
|
||||
}
|
||||
},
|
||||
"template": {
|
||||
"metadata": {
|
||||
"name": "glusterfs",
|
||||
"labels": {
|
||||
"glusterfs-node": "daemonset"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"nodeSelector": {
|
||||
"storagenode" : "glusterfs"
|
||||
},
|
||||
"hostNetwork": true,
|
||||
"containers": [
|
||||
{
|
||||
"image": "gluster/gluster-centos:gluster4u0_centos7",
|
||||
"imagePullPolicy": "IfNotPresent",
|
||||
"name": "glusterfs",
|
||||
"volumeMounts": [
|
||||
{
|
||||
"name": "glusterfs-heketi",
|
||||
"mountPath": "/var/lib/heketi"
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-run",
|
||||
"mountPath": "/run"
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-lvm",
|
||||
"mountPath": "/run/lvm"
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-etc",
|
||||
"mountPath": "/etc/glusterfs"
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-logs",
|
||||
"mountPath": "/var/log/glusterfs"
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-config",
|
||||
"mountPath": "/var/lib/glusterd"
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-dev",
|
||||
"mountPath": "/dev"
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-cgroup",
|
||||
"mountPath": "/sys/fs/cgroup"
|
||||
}
|
||||
],
|
||||
"securityContext": {
|
||||
"capabilities": {},
|
||||
"privileged": true
|
||||
},
|
||||
"readinessProbe": {
|
||||
"timeoutSeconds": {{ glusterfs_daemonset.readiness_probe.timeout_seconds }},
|
||||
"initialDelaySeconds": {{ glusterfs_daemonset.readiness_probe.initial_delay_seconds }},
|
||||
"exec": {
|
||||
"command": [
|
||||
"/bin/bash",
|
||||
"-c",
|
||||
"systemctl status glusterd.service"
|
||||
]
|
||||
}
|
||||
},
|
||||
"livenessProbe": {
|
||||
"timeoutSeconds": {{ glusterfs_daemonset.liveness_probe.timeout_seconds }},
|
||||
"initialDelaySeconds": {{ glusterfs_daemonset.liveness_probe.initial_delay_seconds }},
|
||||
"exec": {
|
||||
"command": [
|
||||
"/bin/bash",
|
||||
"-c",
|
||||
"systemctl status glusterd.service"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"volumes": [
|
||||
{
|
||||
"name": "glusterfs-heketi",
|
||||
"hostPath": {
|
||||
"path": "/var/lib/heketi"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-run"
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-lvm",
|
||||
"hostPath": {
|
||||
"path": "/run/lvm"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-etc",
|
||||
"hostPath": {
|
||||
"path": "/etc/glusterfs"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-logs",
|
||||
"hostPath": {
|
||||
"path": "/var/log/glusterfs"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-config",
|
||||
"hostPath": {
|
||||
"path": "/var/lib/glusterd"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-dev",
|
||||
"hostPath": {
|
||||
"path": "/dev"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "glusterfs-cgroup",
|
||||
"hostPath": {
|
||||
"path": "/sys/fs/cgroup"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,138 +0,0 @@
|
||||
{
|
||||
"kind": "List",
|
||||
"apiVersion": "v1",
|
||||
"items": [
|
||||
{
|
||||
"kind": "Service",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "deploy-heketi",
|
||||
"labels": {
|
||||
"glusterfs": "heketi-service",
|
||||
"deploy-heketi": "support"
|
||||
},
|
||||
"annotations": {
|
||||
"description": "Exposes Heketi Service"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"selector": {
|
||||
"name": "deploy-heketi"
|
||||
},
|
||||
"ports": [
|
||||
{
|
||||
"name": "deploy-heketi",
|
||||
"port": 8080,
|
||||
"targetPort": 8080
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"kind": "Deployment",
|
||||
"apiVersion": "apps/v1",
|
||||
"metadata": {
|
||||
"name": "deploy-heketi",
|
||||
"labels": {
|
||||
"glusterfs": "heketi-deployment",
|
||||
"deploy-heketi": "deployment"
|
||||
},
|
||||
"annotations": {
|
||||
"description": "Defines how to deploy Heketi"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"name": "deploy-heketi"
|
||||
}
|
||||
},
|
||||
"replicas": 1,
|
||||
"template": {
|
||||
"metadata": {
|
||||
"name": "deploy-heketi",
|
||||
"labels": {
|
||||
"name": "deploy-heketi",
|
||||
"glusterfs": "heketi-pod",
|
||||
"deploy-heketi": "pod"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"serviceAccountName": "heketi-service-account",
|
||||
"containers": [
|
||||
{
|
||||
"image": "heketi/heketi:9",
|
||||
"imagePullPolicy": "Always",
|
||||
"name": "deploy-heketi",
|
||||
"env": [
|
||||
{
|
||||
"name": "HEKETI_EXECUTOR",
|
||||
"value": "kubernetes"
|
||||
},
|
||||
{
|
||||
"name": "HEKETI_DB_PATH",
|
||||
"value": "/var/lib/heketi/heketi.db"
|
||||
},
|
||||
{
|
||||
"name": "HEKETI_FSTAB",
|
||||
"value": "/var/lib/heketi/fstab"
|
||||
},
|
||||
{
|
||||
"name": "HEKETI_SNAPSHOT_LIMIT",
|
||||
"value": "14"
|
||||
},
|
||||
{
|
||||
"name": "HEKETI_KUBE_GLUSTER_DAEMONSET",
|
||||
"value": "y"
|
||||
}
|
||||
],
|
||||
"ports": [
|
||||
{
|
||||
"containerPort": 8080
|
||||
}
|
||||
],
|
||||
"volumeMounts": [
|
||||
{
|
||||
"name": "db",
|
||||
"mountPath": "/var/lib/heketi"
|
||||
},
|
||||
{
|
||||
"name": "config",
|
||||
"mountPath": "/etc/heketi"
|
||||
}
|
||||
],
|
||||
"readinessProbe": {
|
||||
"timeoutSeconds": 3,
|
||||
"initialDelaySeconds": 3,
|
||||
"httpGet": {
|
||||
"path": "/hello",
|
||||
"port": 8080
|
||||
}
|
||||
},
|
||||
"livenessProbe": {
|
||||
"timeoutSeconds": 3,
|
||||
"initialDelaySeconds": 10,
|
||||
"httpGet": {
|
||||
"path": "/hello",
|
||||
"port": 8080
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"volumes": [
|
||||
{
|
||||
"name": "db"
|
||||
},
|
||||
{
|
||||
"name": "config",
|
||||
"secret": {
|
||||
"secretName": "heketi-config-secret"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,164 +0,0 @@
|
||||
{
|
||||
"kind": "List",
|
||||
"apiVersion": "v1",
|
||||
"items": [
|
||||
{
|
||||
"kind": "Secret",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "heketi-db-backup",
|
||||
"labels": {
|
||||
"glusterfs": "heketi-db",
|
||||
"heketi": "db"
|
||||
}
|
||||
},
|
||||
"data": {
|
||||
},
|
||||
"type": "Opaque"
|
||||
},
|
||||
{
|
||||
"kind": "Service",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "heketi",
|
||||
"labels": {
|
||||
"glusterfs": "heketi-service",
|
||||
"deploy-heketi": "support"
|
||||
},
|
||||
"annotations": {
|
||||
"description": "Exposes Heketi Service"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"selector": {
|
||||
"name": "heketi"
|
||||
},
|
||||
"ports": [
|
||||
{
|
||||
"name": "heketi",
|
||||
"port": 8080,
|
||||
"targetPort": 8080
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"kind": "Deployment",
|
||||
"apiVersion": "apps/v1",
|
||||
"metadata": {
|
||||
"name": "heketi",
|
||||
"labels": {
|
||||
"glusterfs": "heketi-deployment"
|
||||
},
|
||||
"annotations": {
|
||||
"description": "Defines how to deploy Heketi"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"name": "heketi"
|
||||
}
|
||||
},
|
||||
"replicas": 1,
|
||||
"template": {
|
||||
"metadata": {
|
||||
"name": "heketi",
|
||||
"labels": {
|
||||
"name": "heketi",
|
||||
"glusterfs": "heketi-pod"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"serviceAccountName": "heketi-service-account",
|
||||
"containers": [
|
||||
{
|
||||
"image": "heketi/heketi:9",
|
||||
"imagePullPolicy": "Always",
|
||||
"name": "heketi",
|
||||
"env": [
|
||||
{
|
||||
"name": "HEKETI_EXECUTOR",
|
||||
"value": "kubernetes"
|
||||
},
|
||||
{
|
||||
"name": "HEKETI_DB_PATH",
|
||||
"value": "/var/lib/heketi/heketi.db"
|
||||
},
|
||||
{
|
||||
"name": "HEKETI_FSTAB",
|
||||
"value": "/var/lib/heketi/fstab"
|
||||
},
|
||||
{
|
||||
"name": "HEKETI_SNAPSHOT_LIMIT",
|
||||
"value": "14"
|
||||
},
|
||||
{
|
||||
"name": "HEKETI_KUBE_GLUSTER_DAEMONSET",
|
||||
"value": "y"
|
||||
}
|
||||
],
|
||||
"ports": [
|
||||
{
|
||||
"containerPort": 8080
|
||||
}
|
||||
],
|
||||
"volumeMounts": [
|
||||
{
|
||||
"mountPath": "/backupdb",
|
||||
"name": "heketi-db-secret"
|
||||
},
|
||||
{
|
||||
"name": "db",
|
||||
"mountPath": "/var/lib/heketi"
|
||||
},
|
||||
{
|
||||
"name": "config",
|
||||
"mountPath": "/etc/heketi"
|
||||
}
|
||||
],
|
||||
"readinessProbe": {
|
||||
"timeoutSeconds": 3,
|
||||
"initialDelaySeconds": 3,
|
||||
"httpGet": {
|
||||
"path": "/hello",
|
||||
"port": 8080
|
||||
}
|
||||
},
|
||||
"livenessProbe": {
|
||||
"timeoutSeconds": 3,
|
||||
"initialDelaySeconds": 10,
|
||||
"httpGet": {
|
||||
"path": "/hello",
|
||||
"port": 8080
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"volumes": [
|
||||
{
|
||||
"name": "db",
|
||||
"glusterfs": {
|
||||
"endpoints": "heketi-storage-endpoints",
|
||||
"path": "heketidbstorage"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "heketi-db-secret",
|
||||
"secret": {
|
||||
"secretName": "heketi-db-backup"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "config",
|
||||
"secret": {
|
||||
"secretName": "heketi-config-secret"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "ServiceAccount",
|
||||
"metadata": {
|
||||
"name": "heketi-service-account"
|
||||
}
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "List",
|
||||
"items": [
|
||||
{
|
||||
"kind": "Endpoints",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "heketi-storage-endpoints",
|
||||
"creationTimestamp": null
|
||||
},
|
||||
"subsets": [
|
||||
{% set nodeblocks = [] %}
|
||||
{% for node in nodes %}
|
||||
{% set nodeblock %}
|
||||
{
|
||||
"addresses": [
|
||||
{
|
||||
"ip": "{{ hostvars[node].ip }}"
|
||||
}
|
||||
],
|
||||
"ports": [
|
||||
{
|
||||
"port": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
{% endset %}
|
||||
{% if nodeblocks.append(nodeblock) %}{% endif %}
|
||||
{% endfor %}
|
||||
{{ nodeblocks|join(',') }}
|
||||
]
|
||||
},
|
||||
{
|
||||
"kind": "Service",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "heketi-storage-endpoints",
|
||||
"creationTimestamp": null
|
||||
},
|
||||
"spec": {
|
||||
"ports": [
|
||||
{
|
||||
"port": 1,
|
||||
"targetPort": 0
|
||||
}
|
||||
]
|
||||
},
|
||||
"status": {
|
||||
"loadBalancer": {}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
{
|
||||
"_port_comment": "Heketi Server Port Number",
|
||||
"port": "8080",
|
||||
|
||||
"_use_auth": "Enable JWT authorization. Please enable for deployment",
|
||||
"use_auth": true,
|
||||
|
||||
"_jwt": "Private keys for access",
|
||||
"jwt": {
|
||||
"_admin": "Admin has access to all APIs",
|
||||
"admin": {
|
||||
"key": "{{ heketi_admin_key }}"
|
||||
},
|
||||
"_user": "User only has access to /volumes endpoint",
|
||||
"user": {
|
||||
"key": "{{ heketi_user_key }}"
|
||||
}
|
||||
},
|
||||
|
||||
"_glusterfs_comment": "GlusterFS Configuration",
|
||||
"glusterfs": {
|
||||
"_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
|
||||
"executor": "kubernetes",
|
||||
|
||||
"_db_comment": "Database file name",
|
||||
"db": "/var/lib/heketi/heketi.db",
|
||||
|
||||
"kubeexec": {
|
||||
"rebalance_on_expansion": true
|
||||
},
|
||||
|
||||
"sshexec": {
|
||||
"rebalance_on_expansion": true,
|
||||
"keyfile": "/etc/heketi/private_key",
|
||||
"fstab": "/etc/fstab",
|
||||
"port": "22",
|
||||
"user": "root",
|
||||
"sudo": false
|
||||
}
|
||||
},
|
||||
|
||||
"_backup_db_to_kube_secret": "Backup the heketi database to a Kubernetes secret when running in Kubernetes. Default is off.",
|
||||
"backup_db_to_kube_secret": false
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: gluster
|
||||
annotations:
|
||||
storageclass.beta.kubernetes.io/is-default-class: "true"
|
||||
provisioner: kubernetes.io/glusterfs
|
||||
parameters:
|
||||
resturl: "http://{{ endpoint_address }}:8080"
|
||||
restuser: "admin"
|
||||
restuserkey: "{{ heketi_admin_key }}"
|
||||
@@ -1,34 +0,0 @@
|
||||
{
|
||||
"clusters": [
|
||||
{
|
||||
"nodes": [
|
||||
{% set nodeblocks = [] %}
|
||||
{% for node in nodes %}
|
||||
{% set nodeblock %}
|
||||
{
|
||||
"node": {
|
||||
"hostnames": {
|
||||
"manage": [
|
||||
"{{ node }}"
|
||||
],
|
||||
"storage": [
|
||||
"{{ hostvars[node].ip }}"
|
||||
]
|
||||
},
|
||||
"zone": 1
|
||||
},
|
||||
"devices": [
|
||||
{
|
||||
"name": "{{ hostvars[node]['disk_volume_device_1'] }}",
|
||||
"destroydata": false
|
||||
}
|
||||
]
|
||||
}
|
||||
{% endset %}
|
||||
{% if nodeblocks.append(nodeblock) %}{% endif %}
|
||||
{% endfor %}
|
||||
{{ nodeblocks|join(',') }}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,2 +0,0 @@
|
||||
---
|
||||
heketi_remove_lvm: false
|
||||
@@ -1,52 +0,0 @@
|
||||
---
|
||||
- name: "Install lvm utils (RedHat)"
|
||||
become: true
|
||||
package:
|
||||
name: "lvm2"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'RedHat'"
|
||||
|
||||
- name: "Install lvm utils (Debian)"
|
||||
become: true
|
||||
apt:
|
||||
name: "lvm2"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'Debian'"
|
||||
|
||||
- name: "Get volume group information."
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||
become: true
|
||||
shell: "pvs {{ disk_volume_device_1 }} --option vg_name | tail -n+2"
|
||||
register: "volume_groups"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
changed_when: false
|
||||
|
||||
- name: "Remove volume groups."
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||
become: true
|
||||
command: "vgremove {{ volume_group }} --yes"
|
||||
with_items: "{{ volume_groups.stdout_lines }}"
|
||||
loop_control: { loop_var: "volume_group" }
|
||||
|
||||
- name: "Remove physical volume from cluster disks."
|
||||
environment:
|
||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||
become: true
|
||||
command: "pvremove {{ disk_volume_device_1 }} --yes"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
|
||||
- name: "Remove lvm utils (RedHat)"
|
||||
become: true
|
||||
package:
|
||||
name: "lvm2"
|
||||
state: "absent"
|
||||
when: "ansible_os_family == 'RedHat' and heketi_remove_lvm"
|
||||
|
||||
- name: "Remove lvm utils (Debian)"
|
||||
become: true
|
||||
apt:
|
||||
name: "lvm2"
|
||||
state: "absent"
|
||||
when: "ansible_os_family == 'Debian' and heketi_remove_lvm"
|
||||
@@ -1,51 +0,0 @@
|
||||
---
|
||||
- name: Remove storage class.
|
||||
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down heketi.
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down heketi.
|
||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Tear down bootstrap.
|
||||
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
|
||||
- name: Ensure there is nothing left over.
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
- name: Ensure there is nothing left over.
|
||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
||||
register: "heketi_result"
|
||||
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
|
||||
retries: 60
|
||||
delay: 5
|
||||
- name: Tear down glusterfs.
|
||||
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi storage service.
|
||||
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi gluster role binding
|
||||
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi config secret
|
||||
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi db backup
|
||||
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Remove heketi service account
|
||||
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
- name: Get secrets
|
||||
command: "{{ bin_dir }}/kubectl get secrets --output=\"json\""
|
||||
register: "secrets"
|
||||
changed_when: false
|
||||
- name: Remove heketi storage secret
|
||||
vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" }
|
||||
command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout | from_json | json_query(storage_query) }}"
|
||||
when: "storage_query is defined"
|
||||
ignore_errors: true # noqa ignore-errors
|
||||
@@ -67,3 +67,23 @@ Step(2) download files and run nginx container
|
||||
```
|
||||
|
||||
when nginx container is running, it can be accessed through <http://127.0.0.1:8080/>.
|
||||
|
||||
## upload2artifactory.py
|
||||
|
||||
After the steps above, this script can recursively upload each file under a directory to a generic repository in Artifactory.
|
||||
|
||||
Environment Variables:
|
||||
|
||||
- USERNAME -- At least permissions'Deploy/Cache' and 'Delete/Overwrite'.
|
||||
- TOKEN -- Generate this with 'Set Me Up' in your user.
|
||||
- BASE_URL -- The URL including the repository name.
|
||||
|
||||
Step(3) (optional) upload files to Artifactory
|
||||
|
||||
```shell
|
||||
cd kubespray/contrib/offline/offline-files
|
||||
export USERNAME=admin
|
||||
export TOKEN=...
|
||||
export BASE_URL=https://artifactory.example.com/artifactory/a-generic-repo/
|
||||
./upload2artifactory.py
|
||||
```
|
||||
|
||||
@@ -24,7 +24,7 @@ sed -n '/^downloads:/,/download_defaults:/p' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
# list separately.
|
||||
KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy"
|
||||
for i in $KUBE_IMAGES; do
|
||||
echo "{{ kube_image_repo }}/$i:{{ kube_version }}" >> ${TEMP_DIR}/images.list.template
|
||||
echo "{{ kube_image_repo }}/$i:v{{ kube_version }}" >> ${TEMP_DIR}/images.list.template
|
||||
done
|
||||
|
||||
# run ansible to expand templates
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
OPTION=$1
|
||||
CURRENT_DIR=$(cd $(dirname $0); pwd)
|
||||
@@ -118,6 +118,8 @@ function register_container_images() {
|
||||
cp ${CURRENT_DIR}/registries.conf ${TEMP_DIR}/registries.conf
|
||||
sed -i s@"HOSTNAME"@"$(hostname)"@ ${TEMP_DIR}/registries.conf
|
||||
sudo cp ${TEMP_DIR}/registries.conf /etc/containers/registries.conf
|
||||
elif [ "$(uname)" == "Darwin" ]; then
|
||||
echo "This is a Mac, no configuration changes are required"
|
||||
else
|
||||
echo "runtime package(docker-ce, podman, nerctl, etc.) should be installed"
|
||||
exit 1
|
||||
|
||||
65
contrib/offline/upload2artifactory.py
Executable file
65
contrib/offline/upload2artifactory.py
Executable file
@@ -0,0 +1,65 @@
|
||||
#!/usr/bin/env python3
|
||||
"""This is a helper script to manage-offline-files.sh.
|
||||
|
||||
After running manage-offline-files.sh, you can run upload2artifactory.py
|
||||
to recursively upload each file to a generic repository in Artifactory.
|
||||
|
||||
This script recurses the current working directory and is intended to
|
||||
be started from 'kubespray/contrib/offline/offline-files'
|
||||
|
||||
Environment Variables:
|
||||
USERNAME -- At least permissions'Deploy/Cache' and 'Delete/Overwrite'.
|
||||
TOKEN -- Generate this with 'Set Me Up' in your user.
|
||||
BASE_URL -- The URL including the repository name.
|
||||
|
||||
"""
|
||||
import os
|
||||
import urllib.request
|
||||
import base64
|
||||
|
||||
|
||||
def upload_file(file_path, destination_url, username, token):
|
||||
"""Helper function to upload a single file"""
|
||||
try:
|
||||
with open(file_path, 'rb') as f:
|
||||
file_data = f.read()
|
||||
|
||||
request = urllib.request.Request(destination_url, data=file_data, method='PUT') # NOQA
|
||||
auth_header = base64.b64encode(f"{username}:{token}".encode()).decode()
|
||||
request.add_header("Authorization", f"Basic {auth_header}")
|
||||
|
||||
with urllib.request.urlopen(request) as response:
|
||||
if response.status in [200, 201]:
|
||||
print(f"Success: Uploaded {file_path}")
|
||||
else:
|
||||
print(f"Failed: {response.status} {response.read().decode('utf-8')}") # NOQA
|
||||
except urllib.error.HTTPError as e:
|
||||
print(f"HTTPError: {e.code} {e.reason} for {file_path}")
|
||||
except urllib.error.URLError as e:
|
||||
print(f"URLError: {e.reason} for {file_path}")
|
||||
except OSError as e:
|
||||
print(f"OSError: {e.strerror} for {file_path}")
|
||||
|
||||
|
||||
def upload_files(base_url, username, token):
|
||||
""" Recurse current dir and upload each file using urllib.request """
|
||||
for root, _, files in os.walk(os.getcwd()):
|
||||
for file in files:
|
||||
file_path = os.path.join(root, file)
|
||||
relative_path = os.path.relpath(file_path, os.getcwd())
|
||||
destination_url = f"{base_url}/{relative_path}"
|
||||
|
||||
print(f"Uploading {file_path} to {destination_url}")
|
||||
upload_file(file_path, destination_url, username, token)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
a_user = os.getenv("USERNAME")
|
||||
a_token = os.getenv("TOKEN")
|
||||
a_url = os.getenv("BASE_URL")
|
||||
if not a_user or not a_token or not a_url:
|
||||
print(
|
||||
"Error: Environment variables USERNAME, TOKEN, and BASE_URL must be set." # NOQA
|
||||
)
|
||||
exit()
|
||||
upload_files(a_url, a_user, a_token)
|
||||
@@ -1,3 +0,0 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
approvers:
|
||||
- miouge1
|
||||
@@ -102,7 +102,8 @@ Please read the instructions in both repos on how to install it.
|
||||
You can teardown your infrastructure using the following Terraform command:
|
||||
|
||||
```bash
|
||||
terraform destroy --var-file default.tfvars ../../contrib/terraform/hetzner
|
||||
cd ./kubespray
|
||||
terraform -chdir=./contrib/terraform/hetzner/ destroy --var-file=../../../inventory/$CLUSTER/default.tfvars
|
||||
```
|
||||
|
||||
## Variables
|
||||
|
||||
@@ -15,7 +15,7 @@ resource "openstack_networking_floatingip_v2" "k8s_master" {
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "k8s_masters" {
|
||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 ? { for key, value in var.k8s_masters : key => value if value.floating_ip && (lookup(value, "reserved_floating_ip", "") == "") } : {}
|
||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 ? { for key, value in var.k8s_masters : key => value if value.floating_ip && (lookup(value, "reserved_floating_ip", "") == "") } : tomap({})
|
||||
pool = var.floatingip_pool
|
||||
depends_on = [null_resource.dummy_dependency]
|
||||
}
|
||||
@@ -40,7 +40,7 @@ resource "openstack_networking_floatingip_v2" "bastion" {
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "k8s_nodes" {
|
||||
for_each = var.number_of_k8s_nodes == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip && (lookup(value, "reserved_floating_ip", "") == "") } : {}
|
||||
for_each = var.number_of_k8s_nodes == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip && (lookup(value, "reserved_floating_ip", "") == "") } : tomap({})
|
||||
pool = var.floatingip_pool
|
||||
depends_on = [null_resource.dummy_dependency]
|
||||
}
|
||||
|
||||
@@ -273,6 +273,7 @@ def openstack_host(resource, module_name):
|
||||
'access_ip_v4': raw_attrs['access_ip_v4'],
|
||||
'access_ip_v6': raw_attrs['access_ip_v6'],
|
||||
'access_ip': raw_attrs['access_ip_v4'],
|
||||
'access_ip6': raw_attrs['access_ip_v6'],
|
||||
'ip': raw_attrs['network.0.fixed_ip_v4'],
|
||||
'flavor': parse_dict(raw_attrs, 'flavor',
|
||||
sep='_'),
|
||||
|
||||
@@ -2,35 +2,6 @@
|
||||
|
||||
Provision a Kubernetes cluster on [UpCloud](https://upcloud.com/) using Terraform and Kubespray
|
||||
|
||||
## Overview
|
||||
|
||||
The setup looks like following
|
||||
|
||||
```text
|
||||
Kubernetes cluster
|
||||
+--------------------------+
|
||||
| +--------------+ |
|
||||
| | +--------------+ |
|
||||
| --> | | | |
|
||||
| | | Master/etcd | |
|
||||
| | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------+ |
|
||||
| ^ |
|
||||
| | |
|
||||
| v |
|
||||
| +--------------+ |
|
||||
| | +--------------+ |
|
||||
| --> | | | |
|
||||
| | | Worker | |
|
||||
| | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------+ |
|
||||
+--------------------------+
|
||||
```
|
||||
|
||||
The nodes uses a private network for node to node communication and a public interface for all external communication.
|
||||
|
||||
## Requirements
|
||||
|
||||
* Terraform 0.13.0 or newer
|
||||
@@ -100,6 +71,8 @@ terraform destroy --var-file cluster-settings.tfvars \
|
||||
* `template_name`: The name or UUID of a base image
|
||||
* `username`: a user to access the nodes, defaults to "ubuntu"
|
||||
* `private_network_cidr`: CIDR to use for the private network, defaults to "172.16.0.0/24"
|
||||
* `dns_servers`: DNS servers that will be used by the nodes. Until [this is solved](https://github.com/UpCloudLtd/terraform-provider-upcloud/issues/562) this is done using user_data to reconfigure resolved. Defaults to `[]`
|
||||
* `use_public_ips`: If a NIC connencted to the Public network should be attached to all nodes by default. Can be overridden by `force_public_ip` if this is set to `false`. Defaults to `true`
|
||||
* `ssh_public_keys`: List of public SSH keys to install on all machines
|
||||
* `zone`: The zone where to run the cluster
|
||||
* `machines`: Machines to provision. Key of this object will be used as the name of the machine
|
||||
@@ -108,6 +81,8 @@ terraform destroy --var-file cluster-settings.tfvars \
|
||||
* `cpu`: number of cpu cores
|
||||
* `mem`: memory size in MB
|
||||
* `disk_size`: The size of the storage in GB
|
||||
* `force_public_ip`: If `use_public_ips` is set to `false`, this forces a public NIC onto the machine anyway when set to `true`. Useful if you're migrating from public nodes to only private. Defaults to `false`
|
||||
* `dns_servers`: This works the same way as the global `dns_severs` but only applies to a single node. If set to `[]` while the global `dns_servers` is set to something else, then it will not add the user_data and thus will not be recreated. Useful if you're migrating from public nodes to only private. Defaults to `null`
|
||||
* `additional_disks`: Additional disks to attach to the node.
|
||||
* `size`: The size of the additional disk in GB
|
||||
* `tier`: The tier of disk to use (`maxiops` is the only one you can choose atm)
|
||||
@@ -134,10 +109,65 @@ terraform destroy --var-file cluster-settings.tfvars \
|
||||
* `end_address`: End of address range to allow
|
||||
* `loadbalancer_enabled`: Enable managed load balancer
|
||||
* `loadbalancer_plan`: Plan to use for load balancer *(development|production-small)*
|
||||
* `loadbalancer_legacy_network`: If the loadbalancer should use the deprecated network field instead of networks blocks. You probably want to have this set to false (default value)
|
||||
* `loadbalancers`: Ports to load balance and which machines to forward to. Key of this object will be used as the name of the load balancer frontends/backends
|
||||
* `port`: Port to load balance.
|
||||
* `target_port`: Port to the backend servers.
|
||||
* `backend_servers`: List of servers that traffic to the port should be forwarded to.
|
||||
* `proxy_protocol`: If the loadbalancer should set up the backend using proxy protocol.
|
||||
* `router_enable`: If a router should be connected to the private network or not
|
||||
* `gateways`: Gateways that should be connected to the router, requires router_enable is set to true
|
||||
* `features`: List of features for the gateway
|
||||
* `plan`: Plan to use for the gateway
|
||||
* `connections`: The connections and tunnel to create for the gateway
|
||||
* `type`: What type of connection
|
||||
* `local_routes`: Map of local routes for the connection
|
||||
* `type`: Type of route
|
||||
* `static_network`: Destination prefix of the route; needs to be a valid IPv4 prefix
|
||||
* `remote_routes`: Map of local routes for the connection
|
||||
* `type`: Type of route
|
||||
* `static_network`: Destination prefix of the route; needs to be a valid IPv4 prefix
|
||||
* `tunnels`: The tunnels to create for this connection
|
||||
* `remote_address`: The remote address for the tunnel
|
||||
* `ipsec_properties`: Set properties of IPSec, if not set, defaults will be used
|
||||
* `child_rekey_time`: IKE child SA rekey time in seconds
|
||||
* `dpd_delay`: Delay before sending Dead Peer Detection packets if no traffic is detected, in seconds
|
||||
* `dpd_timeout`: Timeout period for DPD reply before considering the peer to be dead, in seconds
|
||||
* `ike_lifetime`: Maximum IKE SA lifetime in seconds()
|
||||
* `rekey_time`: IKE SA rekey time in seconds
|
||||
* `phase1_algorithms`: List of Phase 1: Proposal algorithms
|
||||
* `phase1_dh_group_numbers`: List of Phase 1 Diffie-Hellman group numbers
|
||||
* `phase1_integrity_algorithms`: List of Phase 1 integrity algorithms
|
||||
* `phase2_algorithms`: List of Phase 2: Security Association algorithms
|
||||
* `phase2_dh_group_numbers`: List of Phase 2 Diffie-Hellman group numbers
|
||||
* `phase2_integrity_algorithms`: List of Phase 2 integrity algorithms
|
||||
* `gateway_vpn_psks`: Separate variable for providing psks for connection tunnels. Environment variable can be exported in the following format `export TF_VAR_gateway_vpn_psks='{"${gateway-name}-${connecton-name}-tunnel":{psk:"..."}}'`
|
||||
* `static_routes`: Static routes to apply to the router, requires `router_enable` is set to true
|
||||
* `network_peerings`: Other UpCloud private networks to peer with, requires `router_enable` is set to true
|
||||
* `server_groups`: Group servers together
|
||||
* `servers`: The servers that should be included in the group.
|
||||
* `anti_affinity_policy`: Defines if a server group is an anti-affinity group. Setting this to "strict" or yes" will result in all servers in the group being placed on separate compute hosts. The value can be "strict", "yes" or "no". "strict" refers to strict policy doesn't allow servers in the same server group to be on the same host. "yes" refers to best-effort policy and tries to put servers on different hosts, but this is not guaranteed.
|
||||
|
||||
## Migration
|
||||
|
||||
When `null_resource.inventories` and `data.template_file.inventory` was changed to `local_file.inventory` the old state file needs to be cleaned of the old state.
|
||||
The error messages you'll see if you encounter this is:
|
||||
|
||||
```text
|
||||
Error: failed to read schema for null_resource.inventories in registry.terraform.io/hashicorp/null: failed to instantiate provider "registry.terraform.io/hashicorp/null" to obtain schema: unavailable provider "registry.terraform.io/hashicorp/null"
|
||||
Error: failed to read schema for data.template_file.inventory in registry.terraform.io/hashicorp/template: failed to instantiate provider "registry.terraform.io/hashicorp/template" to obtain schema: unavailable provider "registry.terraform.io/hashicorp/template"
|
||||
```
|
||||
|
||||
This can be fixed with the following lines
|
||||
|
||||
```bash
|
||||
terraform state rm -state=terraform.tfstate null_resource.inventories
|
||||
terraform state rm -state=terraform.tfstate data.template_file.inventory
|
||||
```
|
||||
|
||||
### Public to Private only migration
|
||||
|
||||
Since there's no way to remove the public NIC on a machine without recreating its private NIC it's not possible to inplace change a cluster to only use private IPs.
|
||||
The way to migrate is to first set `use_public_ips` to `false`, `dns_servers` to some DNS servers and then update all existing servers to have `force_public_ip` set to `true` and `dns_severs` set to `[]`.
|
||||
After that you can add new nodes without `force_public_ip` and `dns_servers` set and create them.
|
||||
Add the new nodes into the cluster and when all of them are added, remove the old nodes.
|
||||
|
||||
@@ -122,11 +122,11 @@ k8s_allowed_remote_ips = [
|
||||
master_allowed_ports = []
|
||||
worker_allowed_ports = []
|
||||
|
||||
loadbalancer_enabled = false
|
||||
loadbalancer_plan = "development"
|
||||
loadbalancer_proxy_protocol = false
|
||||
loadbalancer_enabled = false
|
||||
loadbalancer_plan = "development"
|
||||
loadbalancers = {
|
||||
# "http" : {
|
||||
# "proxy_protocol" : false
|
||||
# "port" : 80,
|
||||
# "target_port" : 80,
|
||||
# "backend_servers" : [
|
||||
@@ -153,3 +153,46 @@ server_groups = {
|
||||
# anti_affinity_policy = "yes"
|
||||
# }
|
||||
}
|
||||
|
||||
router_enable = false
|
||||
gateways = {
|
||||
# "gateway" : {
|
||||
# features: [ "vpn" ]
|
||||
# plan = "production"
|
||||
# connections = {
|
||||
# "connection" = {
|
||||
# name = "connection"
|
||||
# type = "ipsec"
|
||||
# remote_routes = {
|
||||
# "them" = {
|
||||
# type = "static"
|
||||
# static_network = "1.2.3.4/24"
|
||||
# }
|
||||
# }
|
||||
# local_routes = {
|
||||
# "me" = {
|
||||
# type = "static"
|
||||
# static_network = "4.3.2.1/24"
|
||||
# }
|
||||
# }
|
||||
# tunnels = {
|
||||
# "tunnel1" = {
|
||||
# remote_address = "1.2.3.4"
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
}
|
||||
# gateway_vpn_psks = {} # Should be loaded as an environment variable
|
||||
static_routes = {
|
||||
# "route": {
|
||||
# route: "1.2.3.4/24"
|
||||
# nexthop: "4.3.2.1"
|
||||
# }
|
||||
}
|
||||
network_peerings = {
|
||||
# "peering": {
|
||||
# remote_network: "uuid"
|
||||
# }
|
||||
}
|
||||
|
||||
@@ -20,23 +20,32 @@ module "kubernetes" {
|
||||
username = var.username
|
||||
|
||||
private_network_cidr = var.private_network_cidr
|
||||
dns_servers = var.dns_servers
|
||||
use_public_ips = var.use_public_ips
|
||||
|
||||
machines = var.machines
|
||||
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
|
||||
firewall_enabled = var.firewall_enabled
|
||||
firewall_default_deny_in = var.firewall_default_deny_in
|
||||
firewall_default_deny_out = var.firewall_default_deny_out
|
||||
master_allowed_remote_ips = var.master_allowed_remote_ips
|
||||
k8s_allowed_remote_ips = var.k8s_allowed_remote_ips
|
||||
master_allowed_ports = var.master_allowed_ports
|
||||
worker_allowed_ports = var.worker_allowed_ports
|
||||
firewall_enabled = var.firewall_enabled
|
||||
firewall_default_deny_in = var.firewall_default_deny_in
|
||||
firewall_default_deny_out = var.firewall_default_deny_out
|
||||
master_allowed_remote_ips = var.master_allowed_remote_ips
|
||||
k8s_allowed_remote_ips = var.k8s_allowed_remote_ips
|
||||
bastion_allowed_remote_ips = var.bastion_allowed_remote_ips
|
||||
master_allowed_ports = var.master_allowed_ports
|
||||
worker_allowed_ports = var.worker_allowed_ports
|
||||
|
||||
loadbalancer_enabled = var.loadbalancer_enabled
|
||||
loadbalancer_plan = var.loadbalancer_plan
|
||||
loadbalancer_outbound_proxy_protocol = var.loadbalancer_proxy_protocol ? "v2" : ""
|
||||
loadbalancers = var.loadbalancers
|
||||
loadbalancer_enabled = var.loadbalancer_enabled
|
||||
loadbalancer_plan = var.loadbalancer_plan
|
||||
loadbalancer_legacy_network = var.loadbalancer_legacy_network
|
||||
loadbalancers = var.loadbalancers
|
||||
|
||||
router_enable = var.router_enable
|
||||
gateways = var.gateways
|
||||
gateway_vpn_psks = var.gateway_vpn_psks
|
||||
static_routes = var.static_routes
|
||||
network_peerings = var.network_peerings
|
||||
|
||||
server_groups = var.server_groups
|
||||
}
|
||||
@@ -45,32 +54,12 @@ module "kubernetes" {
|
||||
# Generate ansible inventory
|
||||
#
|
||||
|
||||
data "template_file" "inventory" {
|
||||
template = file("${path.module}/templates/inventory.tpl")
|
||||
|
||||
vars = {
|
||||
connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s etcd_member_name=etcd%d",
|
||||
keys(module.kubernetes.master_ip),
|
||||
values(module.kubernetes.master_ip).*.public_ip,
|
||||
values(module.kubernetes.master_ip).*.private_ip,
|
||||
range(1, length(module.kubernetes.master_ip) + 1)))
|
||||
connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s",
|
||||
keys(module.kubernetes.worker_ip),
|
||||
values(module.kubernetes.worker_ip).*.public_ip,
|
||||
values(module.kubernetes.worker_ip).*.private_ip))
|
||||
list_master = join("\n", formatlist("%s",
|
||||
keys(module.kubernetes.master_ip)))
|
||||
list_worker = join("\n", formatlist("%s",
|
||||
keys(module.kubernetes.worker_ip)))
|
||||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "inventories" {
|
||||
provisioner "local-exec" {
|
||||
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
|
||||
}
|
||||
|
||||
triggers = {
|
||||
template = data.template_file.inventory.rendered
|
||||
}
|
||||
resource "local_file" "inventory" {
|
||||
content = templatefile("${path.module}/templates/inventory.tpl", {
|
||||
master_ip = module.kubernetes.master_ip
|
||||
worker_ip = module.kubernetes.worker_ip
|
||||
bastion_ip = module.kubernetes.bastion_ip
|
||||
username = var.username
|
||||
})
|
||||
filename = var.inventory_file
|
||||
}
|
||||
|
||||
@@ -20,9 +20,77 @@ locals {
|
||||
]
|
||||
])
|
||||
|
||||
gateway_connections = flatten([
|
||||
for gateway_name, gateway in var.gateways : [
|
||||
for connection_name, connection in gateway.connections : {
|
||||
"gateway_id" = upcloud_gateway.gateway[gateway_name].id
|
||||
"gateway_name" = gateway_name
|
||||
"connection_name" = connection_name
|
||||
"type" = connection.type
|
||||
"local_routes" = connection.local_routes
|
||||
"remote_routes" = connection.remote_routes
|
||||
}
|
||||
]
|
||||
])
|
||||
|
||||
gateway_connection_tunnels = flatten([
|
||||
for gateway_name, gateway in var.gateways : [
|
||||
for connection_name, connection in gateway.connections : [
|
||||
for tunnel_name, tunnel in connection.tunnels : {
|
||||
"gateway_id" = upcloud_gateway.gateway[gateway_name].id
|
||||
"gateway_name" = gateway_name
|
||||
"connection_id" = upcloud_gateway_connection.gateway_connection["${gateway_name}-${connection_name}"].id
|
||||
"connection_name" = connection_name
|
||||
"tunnel_name" = tunnel_name
|
||||
"local_address_name" = tolist(upcloud_gateway.gateway[gateway_name].address).0.name
|
||||
"remote_address" = tunnel.remote_address
|
||||
"ipsec_properties" = tunnel.ipsec_properties
|
||||
}
|
||||
]
|
||||
]
|
||||
])
|
||||
|
||||
# If prefix is set, all resources will be prefixed with "${var.prefix}-"
|
||||
# Else don't prefix with anything
|
||||
resource-prefix = "%{if var.prefix != ""}${var.prefix}-%{endif}"
|
||||
|
||||
master_ip = {
|
||||
for instance in upcloud_server.master :
|
||||
instance.hostname => {
|
||||
for nic in instance.network_interface :
|
||||
nic.type => nic.ip_address
|
||||
if nic.ip_address != null
|
||||
}
|
||||
}
|
||||
worker_ip = {
|
||||
for instance in upcloud_server.worker :
|
||||
instance.hostname => {
|
||||
for nic in instance.network_interface :
|
||||
nic.type => nic.ip_address
|
||||
if nic.ip_address != null
|
||||
}
|
||||
}
|
||||
|
||||
bastion_ip = {
|
||||
for instance in upcloud_server.bastion :
|
||||
instance.hostname => {
|
||||
for nic in instance.network_interface :
|
||||
nic.type => nic.ip_address
|
||||
if nic.ip_address != null
|
||||
}
|
||||
}
|
||||
|
||||
node_user_data = {
|
||||
for name, machine in var.machines :
|
||||
name => <<EOF
|
||||
%{ if ( length(machine.dns_servers != null ? machine.dns_servers : [] ) > 0 ) || ( length(var.dns_servers) > 0 && machine.dns_servers == null ) ~}
|
||||
#!/bin/bash
|
||||
echo -e "[Resolve]\nDNS=${ join(" ", length(machine.dns_servers != null ? machine.dns_servers : []) > 0 ? machine.dns_servers : var.dns_servers) }" > /etc/systemd/resolved.conf
|
||||
|
||||
systemctl restart systemd-resolved
|
||||
%{ endif ~}
|
||||
EOF
|
||||
}
|
||||
}
|
||||
|
||||
resource "upcloud_network" "private" {
|
||||
@@ -30,10 +98,16 @@ resource "upcloud_network" "private" {
|
||||
zone = var.zone
|
||||
|
||||
ip_network {
|
||||
address = var.private_network_cidr
|
||||
dhcp = true
|
||||
family = "IPv4"
|
||||
address = var.private_network_cidr
|
||||
dhcp_default_route = var.router_enable
|
||||
# TODO: When support for dhcp_dns for private networks are in, remove the user_data and enable it here.
|
||||
# See more here https://github.com/UpCloudLtd/terraform-provider-upcloud/issues/562
|
||||
# dhcp_dns = length(var.private_network_dns) > 0 ? var.private_network_dns : null
|
||||
dhcp = true
|
||||
family = "IPv4"
|
||||
}
|
||||
|
||||
router = var.router_enable ? upcloud_router.router[0].id : null
|
||||
}
|
||||
|
||||
resource "upcloud_storage" "additional_disks" {
|
||||
@@ -56,8 +130,8 @@ resource "upcloud_server" "master" {
|
||||
|
||||
hostname = "${local.resource-prefix}${each.key}"
|
||||
plan = each.value.plan
|
||||
cpu = each.value.plan == null ? null : each.value.cpu
|
||||
mem = each.value.plan == null ? null : each.value.mem
|
||||
cpu = each.value.cpu
|
||||
mem = each.value.mem
|
||||
zone = var.zone
|
||||
server_group = each.value.server_group == null ? null : upcloud_server_group.server_groups[each.value.server_group].id
|
||||
|
||||
@@ -66,9 +140,12 @@ resource "upcloud_server" "master" {
|
||||
size = each.value.disk_size
|
||||
}
|
||||
|
||||
# Public network interface
|
||||
network_interface {
|
||||
type = "public"
|
||||
dynamic "network_interface" {
|
||||
for_each = each.value.force_public_ip || var.use_public_ips ? [1] : []
|
||||
|
||||
content {
|
||||
type = "public"
|
||||
}
|
||||
}
|
||||
|
||||
# Private network interface
|
||||
@@ -103,6 +180,9 @@ resource "upcloud_server" "master" {
|
||||
keys = var.ssh_public_keys
|
||||
create_password = false
|
||||
}
|
||||
|
||||
metadata = local.node_user_data[each.key] != "" ? true : null
|
||||
user_data = local.node_user_data[each.key] != "" ? local.node_user_data[each.key] : null
|
||||
}
|
||||
|
||||
resource "upcloud_server" "worker" {
|
||||
@@ -114,8 +194,8 @@ resource "upcloud_server" "worker" {
|
||||
|
||||
hostname = "${local.resource-prefix}${each.key}"
|
||||
plan = each.value.plan
|
||||
cpu = each.value.plan == null ? null : each.value.cpu
|
||||
mem = each.value.plan == null ? null : each.value.mem
|
||||
cpu = each.value.cpu
|
||||
mem = each.value.mem
|
||||
zone = var.zone
|
||||
server_group = each.value.server_group == null ? null : upcloud_server_group.server_groups[each.value.server_group].id
|
||||
|
||||
@@ -125,9 +205,12 @@ resource "upcloud_server" "worker" {
|
||||
size = each.value.disk_size
|
||||
}
|
||||
|
||||
# Public network interface
|
||||
network_interface {
|
||||
type = "public"
|
||||
dynamic "network_interface" {
|
||||
for_each = each.value.force_public_ip || var.use_public_ips ? [1] : []
|
||||
|
||||
content {
|
||||
type = "public"
|
||||
}
|
||||
}
|
||||
|
||||
# Private network interface
|
||||
@@ -162,6 +245,63 @@ resource "upcloud_server" "worker" {
|
||||
keys = var.ssh_public_keys
|
||||
create_password = false
|
||||
}
|
||||
|
||||
metadata = local.node_user_data[each.key] != "" ? true : null
|
||||
user_data = local.node_user_data[each.key] != "" ? local.node_user_data[each.key] : null
|
||||
}
|
||||
|
||||
resource "upcloud_server" "bastion" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "bastion"
|
||||
}
|
||||
|
||||
hostname = "${local.resource-prefix}${each.key}"
|
||||
plan = each.value.plan
|
||||
cpu = each.value.cpu
|
||||
mem = each.value.mem
|
||||
zone = var.zone
|
||||
server_group = each.value.server_group == null ? null : upcloud_server_group.server_groups[each.value.server_group].id
|
||||
|
||||
|
||||
template {
|
||||
storage = var.template_name
|
||||
size = each.value.disk_size
|
||||
}
|
||||
|
||||
# Private network interface
|
||||
network_interface {
|
||||
type = "private"
|
||||
network = upcloud_network.private.id
|
||||
}
|
||||
|
||||
# Private network interface
|
||||
network_interface {
|
||||
type = "public"
|
||||
}
|
||||
|
||||
firewall = var.firewall_enabled
|
||||
|
||||
dynamic "storage_devices" {
|
||||
for_each = {
|
||||
for disk_key_name, disk in upcloud_storage.additional_disks :
|
||||
disk_key_name => disk
|
||||
# Only add the disk if it matches the node name in the start of its name
|
||||
if length(regexall("^${each.key}_.+", disk_key_name)) > 0
|
||||
}
|
||||
|
||||
content {
|
||||
storage = storage_devices.value.id
|
||||
}
|
||||
}
|
||||
|
||||
# Include at least one public SSH key
|
||||
login {
|
||||
user = var.username
|
||||
keys = var.ssh_public_keys
|
||||
create_password = false
|
||||
}
|
||||
}
|
||||
|
||||
resource "upcloud_firewall_rules" "master" {
|
||||
@@ -510,22 +650,84 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
}
|
||||
}
|
||||
|
||||
resource "upcloud_firewall_rules" "bastion" {
|
||||
for_each = upcloud_server.bastion
|
||||
server_id = each.value.id
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
for_each = var.bastion_allowed_remote_ips
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "Allow bastion SSH access from this network"
|
||||
destination_port_end = "22"
|
||||
destination_port_start = "22"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = "tcp"
|
||||
source_address_end = firewall_rule.value.end_address
|
||||
source_address_start = firewall_rule.value.start_address
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "firewall_rule" {
|
||||
for_each = length(var.bastion_allowed_remote_ips) > 0 ? [1] : []
|
||||
|
||||
content {
|
||||
action = "drop"
|
||||
comment = "Drop bastion SSH access from other networks"
|
||||
destination_port_end = "22"
|
||||
destination_port_start = "22"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = "tcp"
|
||||
source_address_end = "255.255.255.255"
|
||||
source_address_start = "0.0.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
firewall_rule {
|
||||
action = var.firewall_default_deny_in ? "drop" : "accept"
|
||||
direction = "in"
|
||||
}
|
||||
|
||||
firewall_rule {
|
||||
action = var.firewall_default_deny_out ? "drop" : "accept"
|
||||
direction = "out"
|
||||
}
|
||||
}
|
||||
|
||||
resource "upcloud_loadbalancer" "lb" {
|
||||
count = var.loadbalancer_enabled ? 1 : 0
|
||||
configured_status = "started"
|
||||
name = "${local.resource-prefix}lb"
|
||||
plan = var.loadbalancer_plan
|
||||
zone = var.private_cloud ? var.public_zone : var.zone
|
||||
networks {
|
||||
name = "Private-Net"
|
||||
type = "private"
|
||||
family = "IPv4"
|
||||
network = upcloud_network.private.id
|
||||
network = var.loadbalancer_legacy_network ? upcloud_network.private.id : null
|
||||
|
||||
dynamic "networks" {
|
||||
for_each = var.loadbalancer_legacy_network ? [] : [1]
|
||||
|
||||
content {
|
||||
name = "Private-Net"
|
||||
type = "private"
|
||||
family = "IPv4"
|
||||
network = upcloud_network.private.id
|
||||
}
|
||||
}
|
||||
networks {
|
||||
name = "Public-Net"
|
||||
type = "public"
|
||||
family = "IPv4"
|
||||
|
||||
dynamic "networks" {
|
||||
for_each = var.loadbalancer_legacy_network ? [] : [1]
|
||||
|
||||
content {
|
||||
name = "Public-Net"
|
||||
type = "public"
|
||||
family = "IPv4"
|
||||
}
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [ maintenance_dow, maintenance_time ]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -535,7 +737,7 @@ resource "upcloud_loadbalancer_backend" "lb_backend" {
|
||||
loadbalancer = upcloud_loadbalancer.lb[0].id
|
||||
name = "lb-backend-${each.key}"
|
||||
properties {
|
||||
outbound_proxy_protocol = var.loadbalancer_outbound_proxy_protocol
|
||||
outbound_proxy_protocol = each.value.proxy_protocol ? "v2" : ""
|
||||
}
|
||||
}
|
||||
|
||||
@@ -547,8 +749,21 @@ resource "upcloud_loadbalancer_frontend" "lb_frontend" {
|
||||
mode = "tcp"
|
||||
port = each.value.port
|
||||
default_backend_name = upcloud_loadbalancer_backend.lb_backend[each.key].name
|
||||
networks {
|
||||
name = "Public-Net"
|
||||
|
||||
dynamic "networks" {
|
||||
for_each = var.loadbalancer_legacy_network ? [] : [1]
|
||||
|
||||
content {
|
||||
name = "Public-Net"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "networks" {
|
||||
for_each = each.value.allow_internal_frontend ? [1] : []
|
||||
|
||||
content{
|
||||
name = "Private-Net"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -561,7 +776,7 @@ resource "upcloud_loadbalancer_static_backend_member" "lb_backend_member" {
|
||||
|
||||
backend = upcloud_loadbalancer_backend.lb_backend[each.value.lb_name].id
|
||||
name = "${local.resource-prefix}${each.key}"
|
||||
ip = merge(upcloud_server.master, upcloud_server.worker)[each.value.server_name].network_interface[1].ip_address
|
||||
ip = merge(local.master_ip, local.worker_ip)["${local.resource-prefix}${each.value.server_name}"].private
|
||||
port = each.value.port
|
||||
weight = 100
|
||||
max_sessions = var.loadbalancer_plan == "production-small" ? 50000 : 1000
|
||||
@@ -579,3 +794,111 @@ resource "upcloud_server_group" "server_groups" {
|
||||
ignore_changes = [members]
|
||||
}
|
||||
}
|
||||
|
||||
resource "upcloud_router" "router" {
|
||||
count = var.router_enable ? 1 : 0
|
||||
|
||||
name = "${local.resource-prefix}router"
|
||||
|
||||
dynamic "static_route" {
|
||||
for_each = var.static_routes
|
||||
|
||||
content {
|
||||
name = static_route.key
|
||||
|
||||
nexthop = static_route.value["nexthop"]
|
||||
route = static_route.value["route"]
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "upcloud_gateway" "gateway" {
|
||||
for_each = var.router_enable ? var.gateways : {}
|
||||
name = "${local.resource-prefix}${each.key}-gateway"
|
||||
zone = var.private_cloud ? var.public_zone : var.zone
|
||||
|
||||
features = each.value.features
|
||||
plan = each.value.plan
|
||||
|
||||
router {
|
||||
id = upcloud_router.router[0].id
|
||||
}
|
||||
}
|
||||
|
||||
resource "upcloud_gateway_connection" "gateway_connection" {
|
||||
for_each = {
|
||||
for gc in local.gateway_connections : "${gc.gateway_name}-${gc.connection_name}" => gc
|
||||
}
|
||||
|
||||
gateway = each.value.gateway_id
|
||||
name = "${local.resource-prefix}${each.key}-gateway-connection"
|
||||
type = each.value.type
|
||||
|
||||
dynamic "local_route" {
|
||||
for_each = each.value.local_routes
|
||||
|
||||
content {
|
||||
name = local_route.key
|
||||
type = local_route.value["type"]
|
||||
static_network = local_route.value["static_network"]
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "remote_route" {
|
||||
for_each = each.value.remote_routes
|
||||
|
||||
content {
|
||||
name = remote_route.key
|
||||
type = remote_route.value["type"]
|
||||
static_network = remote_route.value["static_network"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "upcloud_gateway_connection_tunnel" "gateway_connection_tunnel" {
|
||||
for_each = {
|
||||
for gct in local.gateway_connection_tunnels : "${gct.gateway_name}-${gct.connection_name}-${gct.tunnel_name}-tunnel" => gct
|
||||
}
|
||||
|
||||
connection_id = each.value.connection_id
|
||||
name = each.key
|
||||
local_address_name = each.value.local_address_name
|
||||
remote_address = each.value.remote_address
|
||||
|
||||
ipsec_auth_psk {
|
||||
psk = var.gateway_vpn_psks[each.key].psk
|
||||
}
|
||||
|
||||
dynamic "ipsec_properties" {
|
||||
for_each = each.value.ipsec_properties != null ? { "ip": each.value.ipsec_properties } : {}
|
||||
|
||||
content {
|
||||
child_rekey_time = ipsec_properties.value["child_rekey_time"]
|
||||
dpd_delay = ipsec_properties.value["dpd_delay"]
|
||||
dpd_timeout = ipsec_properties.value["dpd_timeout"]
|
||||
ike_lifetime = ipsec_properties.value["ike_lifetime"]
|
||||
rekey_time = ipsec_properties.value["rekey_time"]
|
||||
phase1_algorithms = ipsec_properties.value["phase1_algorithms"]
|
||||
phase1_dh_group_numbers = ipsec_properties.value["phase1_dh_group_numbers"]
|
||||
phase1_integrity_algorithms = ipsec_properties.value["phase1_integrity_algorithms"]
|
||||
phase2_algorithms = ipsec_properties.value["phase2_algorithms"]
|
||||
phase2_dh_group_numbers = ipsec_properties.value["phase2_dh_group_numbers"]
|
||||
phase2_integrity_algorithms = ipsec_properties.value["phase2_integrity_algorithms"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "upcloud_network_peering" "peering" {
|
||||
for_each = var.network_peerings
|
||||
|
||||
name = "${local.resource-prefix}${each.key}"
|
||||
|
||||
network {
|
||||
uuid = upcloud_network.private.id
|
||||
}
|
||||
|
||||
peer_network {
|
||||
uuid = each.value.remote_network
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,22 +1,13 @@
|
||||
|
||||
output "master_ip" {
|
||||
value = {
|
||||
for instance in upcloud_server.master :
|
||||
instance.hostname => {
|
||||
"public_ip" : instance.network_interface[0].ip_address
|
||||
"private_ip" : instance.network_interface[1].ip_address
|
||||
}
|
||||
}
|
||||
value = local.master_ip
|
||||
}
|
||||
|
||||
output "worker_ip" {
|
||||
value = {
|
||||
for instance in upcloud_server.worker :
|
||||
instance.hostname => {
|
||||
"public_ip" : instance.network_interface[0].ip_address
|
||||
"private_ip" : instance.network_interface[1].ip_address
|
||||
}
|
||||
}
|
||||
value = local.worker_ip
|
||||
}
|
||||
|
||||
output "bastion_ip" {
|
||||
value = local.bastion_ip
|
||||
}
|
||||
|
||||
output "loadbalancer_domain" {
|
||||
|
||||
@@ -20,15 +20,21 @@ variable "username" {}
|
||||
|
||||
variable "private_network_cidr" {}
|
||||
|
||||
variable "dns_servers" {}
|
||||
|
||||
variable "use_public_ips" {}
|
||||
|
||||
variable "machines" {
|
||||
description = "Cluster machines"
|
||||
type = map(object({
|
||||
node_type = string
|
||||
plan = string
|
||||
cpu = string
|
||||
mem = string
|
||||
cpu = optional(number)
|
||||
mem = optional(number)
|
||||
disk_size = number
|
||||
server_group : string
|
||||
force_public_ip : optional(bool, false)
|
||||
dns_servers : optional(set(string))
|
||||
additional_disks = map(object({
|
||||
size = number
|
||||
tier = string
|
||||
@@ -58,6 +64,13 @@ variable "k8s_allowed_remote_ips" {
|
||||
}))
|
||||
}
|
||||
|
||||
variable "bastion_allowed_remote_ips" {
|
||||
type = list(object({
|
||||
start_address = string
|
||||
end_address = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "master_allowed_ports" {
|
||||
type = list(object({
|
||||
protocol = string
|
||||
@@ -94,17 +107,20 @@ variable "loadbalancer_plan" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "loadbalancer_outbound_proxy_protocol" {
|
||||
type = string
|
||||
variable "loadbalancer_legacy_network" {
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "loadbalancers" {
|
||||
description = "Load balancers"
|
||||
|
||||
type = map(object({
|
||||
port = number
|
||||
target_port = number
|
||||
backend_servers = list(string)
|
||||
proxy_protocol = bool
|
||||
port = number
|
||||
target_port = number
|
||||
allow_internal_frontend = optional(bool)
|
||||
backend_servers = list(string)
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -115,3 +131,72 @@ variable "server_groups" {
|
||||
anti_affinity_policy = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "router_enable" {
|
||||
description = "If a router should be enabled and connected to the private network or not"
|
||||
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "gateways" {
|
||||
description = "Gateways that should be connected to the router, requires router_enable is set to true"
|
||||
|
||||
type = map(object({
|
||||
features = list(string)
|
||||
plan = optional(string)
|
||||
connections = optional(map(object({
|
||||
type = string
|
||||
local_routes = optional(map(object({
|
||||
type = string
|
||||
static_network = string
|
||||
})))
|
||||
remote_routes = optional(map(object({
|
||||
type = string
|
||||
static_network = string
|
||||
})))
|
||||
tunnels = optional(map(object({
|
||||
remote_address = string
|
||||
ipsec_properties = optional(object({
|
||||
child_rekey_time = number
|
||||
dpd_delay = number
|
||||
dpd_timeout = number
|
||||
ike_lifetime = number
|
||||
rekey_time = number
|
||||
phase1_algorithms = set(string)
|
||||
phase1_dh_group_numbers = set(string)
|
||||
phase1_integrity_algorithms = set(string)
|
||||
phase2_algorithms = set(string)
|
||||
phase2_dh_group_numbers = set(string)
|
||||
phase2_integrity_algorithms = set(string)
|
||||
}))
|
||||
})))
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
variable "gateway_vpn_psks" {
|
||||
description = "Separate variable for providing psks for connection tunnels"
|
||||
|
||||
type = map(object({
|
||||
psk = string
|
||||
}))
|
||||
default = {}
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "static_routes" {
|
||||
description = "Static routes to apply to the router, requires router_enable is set to true"
|
||||
|
||||
type = map(object({
|
||||
nexthop = string
|
||||
route = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "network_peerings" {
|
||||
description = "Other UpCloud private networks to peer with, requires router_enable is set to true"
|
||||
|
||||
type = map(object({
|
||||
remote_network = string
|
||||
}))
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user