mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 13:54:37 +03:00
Compare commits
2 Commits
symlinketc
...
test-preco
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
55f9167af4 | ||
|
|
56f87fb091 |
@@ -37,7 +37,3 @@ exclude_paths:
|
||||
- tests/files/custom_cni/cilium.yaml
|
||||
- venv
|
||||
- .github
|
||||
- .ansible
|
||||
- .cache
|
||||
mock_modules:
|
||||
- gluster.gluster.gluster_volume
|
||||
|
||||
28
.github/ISSUE_TEMPLATE/bug-report.yaml
vendored
28
.github/ISSUE_TEMPLATE/bug-report.yaml
vendored
@@ -36,35 +36,11 @@ body:
|
||||
attributes:
|
||||
value: '### Environment'
|
||||
|
||||
- type: dropdown
|
||||
- type: textarea
|
||||
id: os
|
||||
attributes:
|
||||
label: OS
|
||||
options:
|
||||
- 'RHEL 9'
|
||||
- 'RHEL 8'
|
||||
- 'Fedora 40'
|
||||
- 'Ubuntu 24'
|
||||
- 'Ubuntu 22'
|
||||
- 'Ubuntu 20'
|
||||
- 'Debian 12'
|
||||
- 'Debian 11'
|
||||
- 'Flatcar Container Linux'
|
||||
- 'openSUSE Leap'
|
||||
- 'openSUSE Tumbleweed'
|
||||
- 'Oracle Linux 9'
|
||||
- 'Oracle Linux 8'
|
||||
- 'AlmaLinux 9'
|
||||
- 'AlmaLinux 8'
|
||||
- 'Rocky Linux 9'
|
||||
- 'Rocky Linux 8'
|
||||
- 'Amazon Linux 2'
|
||||
- 'Kylin Linux Advanced Server V10'
|
||||
- 'UOS Linux 20'
|
||||
- 'openEuler 24'
|
||||
- 'openEuler 22'
|
||||
- 'openEuler 20'
|
||||
- 'Other|Unsupported'
|
||||
placeholder: 'printf "$(uname -srm)\n$(cat /etc/os-release)\n"'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
|
||||
1
.github/ISSUE_TEMPLATE/config.yml
vendored
1
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,5 +1,4 @@
|
||||
---
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Support Request
|
||||
url: https://kubernetes.slack.com/channels/kubespray
|
||||
|
||||
9
.github/dependabot.yml
vendored
9
.github/dependabot.yml
vendored
@@ -4,11 +4,4 @@ updates:
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
labels:
|
||||
- dependencies
|
||||
- release-note-none
|
||||
groups:
|
||||
molecule:
|
||||
patterns:
|
||||
- molecule
|
||||
- molecule-plugins*
|
||||
labels: [ "dependencies" ]
|
||||
|
||||
32
.github/workflows/auto-label-os.yml
vendored
32
.github/workflows/auto-label-os.yml
vendored
@@ -1,32 +0,0 @@
|
||||
name: Issue labeler
|
||||
on:
|
||||
issues:
|
||||
types: [opened]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
label-component:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Parse issue form
|
||||
uses: stefanbuck/github-issue-parser@v3
|
||||
id: issue-parser
|
||||
with:
|
||||
template-path: .github/ISSUE_TEMPLATE/bug-report.yaml
|
||||
|
||||
- name: Set labels based on OS field
|
||||
uses: redhat-plumbers-in-action/advanced-issue-labeler@v2
|
||||
with:
|
||||
issue-form: ${{ steps.issue-parser.outputs.jsonString }}
|
||||
section: os
|
||||
block-list: |
|
||||
None
|
||||
Other
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -1,34 +1,37 @@
|
||||
---
|
||||
stages:
|
||||
- build
|
||||
- test
|
||||
- unit-tests
|
||||
- deploy-part1
|
||||
- deploy-extended
|
||||
- moderator
|
||||
- deploy-part2
|
||||
- deploy-part3
|
||||
- deploy-special
|
||||
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.25.0
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
GIT_CONFIG_COUNT: 2
|
||||
GIT_CONFIG_KEY_0: user.email
|
||||
GIT_CONFIG_VALUE_0: "ci@kubespray.io"
|
||||
GIT_CONFIG_KEY_1: user.name
|
||||
GIT_CONFIG_VALUE_1: "Kubespray CI"
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
MAGIC: "ci check this"
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_JOB_ID"
|
||||
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
||||
CI_TEST_REGISTRY_MIRROR: "./tests/common/_docker_hub_registry_mirror.yml"
|
||||
CI_TEST_SETTING: "./tests/common/_kubespray_test_settings.yml"
|
||||
GS_ACCESS_KEY_ID: $GS_KEY
|
||||
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
||||
CONTAINER_ENGINE: docker
|
||||
SSH_USER: root
|
||||
GCE_PREEMPTIBLE: "false"
|
||||
ANSIBLE_KEEP_REMOTE_FILES: "1"
|
||||
ANSIBLE_CONFIG: ./tests/ansible.cfg
|
||||
ANSIBLE_REMOTE_USER: kubespray
|
||||
ANSIBLE_PRIVATE_KEY_FILE: /tmp/id_rsa
|
||||
ANSIBLE_INVENTORY: /tmp/inventory
|
||||
ANSIBLE_INVENTORY: ./inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini
|
||||
IDEMPOT_CHECK: "false"
|
||||
RESET_CHECK: "false"
|
||||
REMOVE_NODE_CHECK: "false"
|
||||
UPGRADE_TEST: "false"
|
||||
MITOGEN_ENABLE: "false"
|
||||
ANSIBLE_VERBOSITY: 2
|
||||
ANSIBLE_LOG_LEVEL: "-vv"
|
||||
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
|
||||
TERRAFORM_VERSION: 1.3.7
|
||||
@@ -40,27 +43,16 @@ before_script:
|
||||
|
||||
.job: &job
|
||||
tags:
|
||||
- ffci
|
||||
- packet
|
||||
image: $PIPELINE_IMAGE
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- cluster-dump/
|
||||
needs:
|
||||
- pipeline-image
|
||||
variables:
|
||||
ANSIBLE_STDOUT_CALLBACK: "debug"
|
||||
|
||||
.job-moderated:
|
||||
extends: .job
|
||||
needs:
|
||||
- pipeline-image
|
||||
- ci-not-authorized
|
||||
- pre-commit # lint
|
||||
- vagrant-validate # lint
|
||||
|
||||
.testcases: &testcases
|
||||
extends: .job-moderated
|
||||
<<: *job
|
||||
retry: 1
|
||||
interruptible: true
|
||||
before_script:
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
@@ -69,38 +61,23 @@ before_script:
|
||||
script:
|
||||
- ./tests/scripts/testcases_run.sh
|
||||
after_script:
|
||||
- ./tests/scripts/testcases_cleanup.sh
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
|
||||
# For failfast, at least 1 job must be defined in .gitlab-ci.yml
|
||||
# Premoderated with manual actions
|
||||
ci-not-authorized:
|
||||
stage: build
|
||||
before_script: []
|
||||
after_script: []
|
||||
rules:
|
||||
# LGTM or ok-to-test labels
|
||||
- if: $PR_LABELS =~ /.*,(lgtm|approved|ok-to-test).*|^(lgtm|approved|ok-to-test).*/i
|
||||
variables:
|
||||
CI_OK_TO_TEST: '0'
|
||||
when: always
|
||||
- if: $CI_PIPELINE_SOURCE == "schedule" || $CI_PIPELINE_SOURCE == "trigger"
|
||||
variables:
|
||||
CI_OK_TO_TEST: '0'
|
||||
- if: $CI_COMMIT_BRANCH == "master"
|
||||
variables:
|
||||
CI_OK_TO_TEST: '0'
|
||||
- when: always
|
||||
variables:
|
||||
CI_OK_TO_TEST: '1'
|
||||
ci-authorized:
|
||||
extends: .job
|
||||
stage: moderator
|
||||
script:
|
||||
- exit $CI_OK_TO_TEST
|
||||
tags:
|
||||
- ffci
|
||||
needs: []
|
||||
- /bin/sh scripts/premoderator.sh
|
||||
except: ['triggers', 'master']
|
||||
# Disable ci moderator
|
||||
only: []
|
||||
|
||||
include:
|
||||
- .gitlab-ci/build.yml
|
||||
- .gitlab-ci/lint.yml
|
||||
- .gitlab-ci/shellcheck.yml
|
||||
- .gitlab-ci/terraform.yml
|
||||
- .gitlab-ci/packet.yml
|
||||
- .gitlab-ci/vagrant.yml
|
||||
|
||||
@@ -1,33 +1,40 @@
|
||||
---
|
||||
.build-container:
|
||||
cache:
|
||||
key: $CI_COMMIT_REF_SLUG
|
||||
paths:
|
||||
- image-cache
|
||||
tags:
|
||||
- ffci
|
||||
.build:
|
||||
stage: build
|
||||
image:
|
||||
name: gcr.io/kaniko-project/executor:debug
|
||||
entrypoint: ['']
|
||||
name: moby/buildkit:rootless
|
||||
entrypoint: [""]
|
||||
variables:
|
||||
TAG: $CI_COMMIT_SHORT_SHA
|
||||
PROJECT_DIR: $CI_PROJECT_DIR
|
||||
DOCKERFILE: Dockerfile
|
||||
GODEBUG: "http2client=0"
|
||||
BUILDKITD_FLAGS: --oci-worker-no-process-sandbox
|
||||
before_script:
|
||||
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"auth\":\"$(echo -n ${CI_REGISTRY_USER}:${CI_REGISTRY_PASSWORD} | base64)\"}}}" > /kaniko/.docker/config.json
|
||||
script:
|
||||
- /kaniko/executor --cache=true
|
||||
--cache-dir=image-cache
|
||||
--context $PROJECT_DIR
|
||||
--dockerfile $PROJECT_DIR/$DOCKERFILE
|
||||
--label 'git-branch'=$CI_COMMIT_REF_SLUG
|
||||
--label 'git-tag=$CI_COMMIT_TAG'
|
||||
--destination $PIPELINE_IMAGE
|
||||
--log-timestamp=true
|
||||
- mkdir ~/.docker
|
||||
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > ~/.docker/config.json
|
||||
|
||||
pipeline-image:
|
||||
extends: .build-container
|
||||
variables:
|
||||
DOCKERFILE: pipeline.Dockerfile
|
||||
pipeline image:
|
||||
extends: .build
|
||||
script:
|
||||
- |
|
||||
buildctl-daemonless.sh build \
|
||||
--frontend=dockerfile.v0 \
|
||||
--local context=. \
|
||||
--local dockerfile=. \
|
||||
--opt filename=./pipeline.Dockerfile \
|
||||
--output type=image,name=$PIPELINE_IMAGE,push=true \
|
||||
--import-cache type=registry,ref=$CI_REGISTRY_IMAGE/pipeline:cache
|
||||
rules:
|
||||
- if: '$CI_COMMIT_REF_NAME != $CI_DEFAULT_BRANCH'
|
||||
|
||||
pipeline image and build cache:
|
||||
extends: .build
|
||||
script:
|
||||
- |
|
||||
buildctl-daemonless.sh build \
|
||||
--frontend=dockerfile.v0 \
|
||||
--local context=. \
|
||||
--local dockerfile=. \
|
||||
--opt filename=./pipeline.Dockerfile \
|
||||
--output type=image,name=$PIPELINE_IMAGE,push=true \
|
||||
--import-cache type=registry,ref=$CI_REGISTRY_IMAGE/pipeline:cache \
|
||||
--export-cache type=registry,ref=$CI_REGISTRY_IMAGE/pipeline:cache,mode=max
|
||||
rules:
|
||||
- if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH'
|
||||
|
||||
@@ -1,26 +1,126 @@
|
||||
---
|
||||
pre-commit:
|
||||
stage: test
|
||||
tags:
|
||||
- ffci
|
||||
image: 'ghcr.io/pre-commit-ci/runner-image@sha256:fe01a6ec51b298412990b88627c3973b1146c7304f930f469bafa29ba60bcde9'
|
||||
yamllint:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
PRE_COMMIT_HOME: ${CI_PROJECT_DIR}/.cache/pre-commit
|
||||
LANG: C.UTF-8
|
||||
script:
|
||||
- pre-commit run --all-files --show-diff-on-failure
|
||||
cache:
|
||||
key: pre-commit-2
|
||||
paths:
|
||||
- ${PRE_COMMIT_HOME}
|
||||
when: 'always'
|
||||
needs: []
|
||||
- yamllint --strict .
|
||||
except: ['triggers', 'master']
|
||||
|
||||
vagrant-validate:
|
||||
extends: .job
|
||||
stage: test
|
||||
tags: [ffci]
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
VAGRANT_VERSION: 2.3.7
|
||||
script:
|
||||
- ./tests/scripts/vagrant-validate.sh
|
||||
- ./tests/scripts/vagrant-validate.sh
|
||||
except: ['triggers', 'master']
|
||||
|
||||
ansible-lint:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
script:
|
||||
- ansible-lint -v
|
||||
except: ['triggers', 'master']
|
||||
|
||||
jinja-syntax-check:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
script:
|
||||
- "find -name '*.j2' -exec tests/scripts/check-templates.py {} +"
|
||||
except: ['triggers', 'master']
|
||||
|
||||
syntax-check:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
ANSIBLE_INVENTORY: inventory/local-tests.cfg
|
||||
ANSIBLE_REMOTE_USER: root
|
||||
ANSIBLE_BECOME: "true"
|
||||
ANSIBLE_BECOME_USER: root
|
||||
ANSIBLE_VERBOSITY: "3"
|
||||
script:
|
||||
- ansible-playbook --syntax-check cluster.yml
|
||||
- ansible-playbook --syntax-check playbooks/cluster.yml
|
||||
- ansible-playbook --syntax-check upgrade-cluster.yml
|
||||
- ansible-playbook --syntax-check playbooks/upgrade_cluster.yml
|
||||
- ansible-playbook --syntax-check reset.yml
|
||||
- ansible-playbook --syntax-check playbooks/reset.yml
|
||||
- ansible-playbook --syntax-check extra_playbooks/upgrade-only-k8s.yml
|
||||
except: ['triggers', 'master']
|
||||
|
||||
collection-build-install-sanity-check:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
ANSIBLE_COLLECTIONS_PATH: "./ansible_collections"
|
||||
script:
|
||||
- ansible-galaxy collection build
|
||||
- ansible-galaxy collection install kubernetes_sigs-kubespray-$(grep "^version:" galaxy.yml | awk '{print $2}').tar.gz
|
||||
- ansible-galaxy collection list $(egrep -i '(name:\s+|namespace:\s+)' galaxy.yml | awk '{print $2}' | tr '\n' '.' | sed 's|\.$||g') | grep "^kubernetes_sigs.kubespray"
|
||||
- test -f ansible_collections/kubernetes_sigs/kubespray/playbooks/cluster.yml
|
||||
- test -f ansible_collections/kubernetes_sigs/kubespray/playbooks/reset.yml
|
||||
except: ['triggers', 'master']
|
||||
|
||||
tox-inventory-builder:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
extends: .job
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
script:
|
||||
- pip3 install tox
|
||||
- cd contrib/inventory_builder && tox
|
||||
except: ['triggers', 'master']
|
||||
|
||||
markdownlint:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: node
|
||||
before_script:
|
||||
- npm install -g markdownlint-cli@0.22.0
|
||||
script:
|
||||
- markdownlint $(find . -name '*.md' | grep -vF './.git') --ignore docs/_sidebar.md --ignore contrib/dind/README.md
|
||||
|
||||
generate-sidebar:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
script:
|
||||
- scripts/gen_docs_sidebar.sh
|
||||
- git diff --exit-code
|
||||
|
||||
check-readme-versions:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python:3
|
||||
script:
|
||||
- tests/scripts/check_readme_versions.sh
|
||||
|
||||
check-galaxy-version:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python:3
|
||||
script:
|
||||
- tests/scripts/check_galaxy_version.sh
|
||||
|
||||
check-typo:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python:3
|
||||
script:
|
||||
- tests/scripts/check_typo.sh
|
||||
|
||||
ci-matrix:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python:3
|
||||
script:
|
||||
- tests/scripts/md-table/test.sh
|
||||
|
||||
@@ -1,56 +1,83 @@
|
||||
---
|
||||
|
||||
.molecule:
|
||||
tags: [ffci]
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
stage: deploy-part1
|
||||
image: $PIPELINE_IMAGE
|
||||
needs:
|
||||
- pipeline-image
|
||||
# - ci-not-authorized
|
||||
services: []
|
||||
stage: deploy-part1
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- tests/scripts/rebase.sh
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh
|
||||
- ./tests/scripts/molecule_run.sh
|
||||
after_script:
|
||||
- ./tests/scripts/molecule_logs.sh
|
||||
- chronic ./tests/scripts/molecule_logs.sh
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- molecule_logs/
|
||||
|
||||
molecule:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i $ROLE
|
||||
parallel:
|
||||
matrix:
|
||||
- ROLE:
|
||||
- container-engine/cri-dockerd
|
||||
- container-engine/containerd
|
||||
- container-engine/cri-o
|
||||
- adduser
|
||||
- bastion-ssh-config
|
||||
- bootstrap-os
|
||||
- molecule_logs/
|
||||
|
||||
# CI template for periodic CI jobs
|
||||
# Enabled when PERIODIC_CI_ENABLED var is set
|
||||
molecule_full:
|
||||
.molecule_periodic:
|
||||
only:
|
||||
variables:
|
||||
- $PERIODIC_CI_ENABLED
|
||||
- $PERIODIC_CI_ENABLED
|
||||
allow_failure: true
|
||||
extends: molecule
|
||||
parallel:
|
||||
matrix:
|
||||
- ROLE:
|
||||
- container-engine/cri-dockerd
|
||||
- container-engine/containerd
|
||||
- container-engine/cri-o
|
||||
- adduser
|
||||
- bastion-ssh-config
|
||||
- bootstrap-os
|
||||
# FIXME : tests below are perma-failing
|
||||
- container-engine/kata-containers
|
||||
- container-engine/gvisor
|
||||
- container-engine/youki
|
||||
extends: .molecule
|
||||
|
||||
molecule_full:
|
||||
extends: .molecule_periodic
|
||||
|
||||
molecule_no_container_engines:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -e container-engine
|
||||
when: on_success
|
||||
|
||||
molecule_docker:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
|
||||
when: on_success
|
||||
|
||||
molecule_containerd:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/containerd
|
||||
when: on_success
|
||||
|
||||
molecule_cri-o:
|
||||
extends: .molecule
|
||||
stage: deploy-part2
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
||||
allow_failure: true
|
||||
when: on_success
|
||||
|
||||
# Stage 3 container engines don't get as much attention so allow them to fail
|
||||
molecule_kata:
|
||||
extends: .molecule
|
||||
stage: deploy-part3
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/kata-containers
|
||||
when: manual
|
||||
# FIXME: this test is broken (perma-failing)
|
||||
|
||||
molecule_gvisor:
|
||||
extends: .molecule
|
||||
stage: deploy-part3
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/gvisor
|
||||
when: manual
|
||||
# FIXME: this test is broken (perma-failing)
|
||||
|
||||
molecule_youki:
|
||||
extends: .molecule
|
||||
stage: deploy-part3
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/youki
|
||||
when: manual
|
||||
# FIXME: this test is broken (perma-failing)
|
||||
|
||||
@@ -6,56 +6,14 @@
|
||||
CI_PLATFORM: packet
|
||||
SSH_USER: kubespray
|
||||
tags:
|
||||
- ffci
|
||||
needs:
|
||||
- pipeline-image
|
||||
- ci-not-authorized
|
||||
- packet
|
||||
except: [triggers]
|
||||
|
||||
# CI template for PRs
|
||||
.packet_pr:
|
||||
stage: deploy-part1
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*ci-short.*/
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
|
||||
when: on_success
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
only: [/^pr-.*$/]
|
||||
extends: .packet
|
||||
|
||||
## Uncomment this to have multiple stages
|
||||
# needs:
|
||||
# - packet_ubuntu20-calico-all-in-one
|
||||
|
||||
.packet_pr_short:
|
||||
stage: deploy-part1
|
||||
extends: .packet
|
||||
rules:
|
||||
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
|
||||
when: on_success
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
|
||||
.packet_pr_manual:
|
||||
extends: .packet_pr
|
||||
stage: deploy-extended
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*ci-full.*/
|
||||
when: on_success
|
||||
# Else run as manual
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
|
||||
.packet_pr_extended:
|
||||
extends: .packet_pr
|
||||
stage: deploy-extended
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
|
||||
when: on_success
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
|
||||
# CI template for periodic CI jobs
|
||||
# Enabled when PERIODIC_CI_ENABLED var is set
|
||||
.packet_periodic:
|
||||
@@ -65,193 +23,325 @@
|
||||
allow_failure: true
|
||||
extends: .packet
|
||||
|
||||
packet_cleanup_old:
|
||||
stage: deploy-part1
|
||||
extends: .packet_periodic
|
||||
script:
|
||||
- cd tests
|
||||
- make cleanup-packet
|
||||
after_script: []
|
||||
|
||||
# The ubuntu20-calico-all-in-one jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
||||
packet_ubuntu20-calico-all-in-one:
|
||||
stage: deploy-part1
|
||||
extends: .packet_pr_short
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
# ### PR JOBS PART2
|
||||
|
||||
packet_ubuntu20-crio:
|
||||
extends: .packet_pr_manual
|
||||
packet_ubuntu20-all-in-one-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu20-calico-all-in-one-hardening:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu22-all-in-one-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu22-calico-all-in-one:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu22-calico-all-in-one-upgrade:
|
||||
packet_ubuntu24-all-in-one-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu24-calico-all-in-one:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu24-calico-etcd-datastore:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_almalinux9-crio:
|
||||
packet_centos7-flannel-addons-ha:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
|
||||
packet_almalinux9-kube-ovn:
|
||||
packet_almalinux8-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
allow_failure: true
|
||||
|
||||
packet_debian11-calico-collection:
|
||||
packet_ubuntu20-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
|
||||
packet_debian11-macvlan:
|
||||
packet_fedora37-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
|
||||
packet_ubuntu20-flannel-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian10-cilium-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_debian10-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian10-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian11-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian11-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian12-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian12-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian12-cilium:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_centos7-calico-ha-once-localhost:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
# This will instruct Docker not to start over TLS.
|
||||
DOCKER_TLS_CERTDIR: ""
|
||||
services:
|
||||
- docker:19.03.9-dind
|
||||
|
||||
packet_almalinux8-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_almalinux8-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_rockylinux8-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_rockylinux9-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_rockylinux9-cilium:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
# Need an update of the container image to use schema v2
|
||||
# update: quay.io/kubespray/vm-amazon-linux-2:latest
|
||||
packet_amazon-linux-2-all-in-one:
|
||||
extends: .packet_pr_manual
|
||||
rules:
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
|
||||
packet_opensuse15-6-calico:
|
||||
packet_almalinux8-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_amazon-linux-2-all-in-one:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_fedora38-docker-weave:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
allow_failure: true
|
||||
|
||||
packet_opensuse-docker-cilium:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
# ### MANUAL JOBS
|
||||
|
||||
packet_ubuntu20-docker-weave-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu20-cilium-sep:
|
||||
stage: deploy-special
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_openeuler24-calico:
|
||||
packet_ubuntu20-flannel-ha-once:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu20-calico-all-in-one-hardening:
|
||||
# Calico HA eBPF
|
||||
packet_almalinux8-calico-ha-ebpf:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
## Extended
|
||||
packet_debian11-docker:
|
||||
extends: .packet_pr_extended
|
||||
packet_debian10-macvlan:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian12-docker:
|
||||
extends: .packet_pr_extended
|
||||
packet_centos7-calico-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian12-calico:
|
||||
extends: .packet_pr_extended
|
||||
packet_centos7-multus-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_almalinux9-calico-remove-node:
|
||||
extends: .packet_pr_extended
|
||||
packet_fedora38-docker-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_fedora37-calico-selinux:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_fedora37-calico-swap-selinux:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_almalinux8-calico-nodelocaldns-secondary:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora38-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_debian11-custom-cni:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian11-kubelet-csr-approver:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian12-custom-cni-helm:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
# ### PR JOBS PART3
|
||||
# Long jobs (45min+)
|
||||
|
||||
packet_centos7-weave-upgrade-ha:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: basic
|
||||
|
||||
packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: basic
|
||||
|
||||
# Calico HA Wireguard
|
||||
packet_ubuntu20-calico-ha-wireguard:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian11-calico-upgrade:
|
||||
stage: deploy-part3
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
|
||||
packet_almalinux8-calico-remove-node:
|
||||
stage: deploy-part3
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
REMOVE_NODE_CHECK: "true"
|
||||
REMOVE_NODE_NAME: "instance-3"
|
||||
|
||||
packet_rockylinux9-calico:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_almalinux9-calico:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_almalinux9-docker:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_opensuse15-6-docker-cilium:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_ubuntu24-calico-all-in-one:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_ubuntu20-calico-etcd-kubeadm:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_ubuntu24-all-in-one-docker:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_ubuntu22-all-in-one-docker:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
# ### MANUAL JOBS
|
||||
packet_fedora39-crio:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_ubuntu20-flannel-ha:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_ubuntu20-all-in-one-docker:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_ubuntu20-flannel-ha-once:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_fedora39-calico-swap-selinux:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_almalinux9-calico-ha-ebpf:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_almalinux9-calico-nodelocaldns-secondary:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_debian11-custom-cni:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_debian11-kubelet-csr-approver:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_debian12-custom-cni-helm:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_ubuntu20-calico-ha-wireguard:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
# PERIODIC
|
||||
packet_fedora40-docker-calico:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_fedora39-calico-selinux:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
|
||||
packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
variables:
|
||||
UPGRADE_TEST: basic
|
||||
|
||||
stage: deploy-part3
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian11-calico-upgrade-once:
|
||||
stage: deploy-extended
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
|
||||
packet_ubuntu20-calico-ha-recover:
|
||||
stage: deploy-extended
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
|
||||
|
||||
packet_ubuntu20-calico-ha-recover-noquorum:
|
||||
stage: deploy-extended
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:]:kube_control_plane[1:]"
|
||||
|
||||
packet_debian11-calico-upgrade:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
|
||||
packet_debian12-cilium-svc-proxy:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
|
||||
16
.gitlab-ci/shellcheck.yml
Normal file
16
.gitlab-ci/shellcheck.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
shellcheck:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
SHELLCHECK_VERSION: v0.7.1
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- curl --silent --location "https://github.com/koalaman/shellcheck/releases/download/"${SHELLCHECK_VERSION}"/shellcheck-"${SHELLCHECK_VERSION}".linux.x86_64.tar.xz" | tar -xJv
|
||||
- cp shellcheck-"${SHELLCHECK_VERSION}"/shellcheck /usr/bin/
|
||||
- shellcheck --version
|
||||
script:
|
||||
# Run shellcheck for all *.sh
|
||||
- find . -name '*.sh' -not -path './.git/*' | xargs shellcheck --severity error
|
||||
except: ['triggers', 'master']
|
||||
@@ -2,10 +2,6 @@
|
||||
# Tests for contrib/terraform/
|
||||
.terraform_install:
|
||||
extends: .job
|
||||
needs:
|
||||
- ci-not-authorized
|
||||
- pipeline-image
|
||||
stage: deploy-part1
|
||||
before_script:
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- ./tests/scripts/rebase.sh
|
||||
@@ -28,19 +24,17 @@
|
||||
|
||||
.terraform_validate:
|
||||
extends: .terraform_install
|
||||
tags: [ffci]
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
only: ['master', /^pr-.*$/]
|
||||
script:
|
||||
- terraform -chdir="contrib/terraform/$PROVIDER" validate
|
||||
- terraform -chdir="contrib/terraform/$PROVIDER" fmt -check -diff
|
||||
stage: test
|
||||
needs:
|
||||
- pipeline-image
|
||||
|
||||
.terraform_apply:
|
||||
extends: .terraform_install
|
||||
tags: [ffci]
|
||||
stage: deploy-extended
|
||||
tags: [light]
|
||||
stage: deploy-part3
|
||||
when: manual
|
||||
only: [/^pr-.*$/]
|
||||
artifacts:
|
||||
@@ -57,7 +51,7 @@
|
||||
- tests/scripts/testcases_run.sh
|
||||
after_script:
|
||||
# Cleanup regardless of exit code
|
||||
- ./tests/scripts/testcases_cleanup.sh
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
|
||||
tf-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
@@ -152,7 +146,8 @@ tf-validate-nifcloud:
|
||||
TF_VAR_router_id: "ab95917c-41fb-4881-b507-3a6dfe9403df"
|
||||
|
||||
tf-elastx_cleanup:
|
||||
tags: [ffci]
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python
|
||||
variables:
|
||||
<<: *elastx_variables
|
||||
@@ -160,11 +155,10 @@ tf-elastx_cleanup:
|
||||
- pip install -r scripts/openstack-cleanup/requirements.txt
|
||||
script:
|
||||
- ./scripts/openstack-cleanup/main.py
|
||||
allow_failure: true
|
||||
|
||||
tf-elastx_ubuntu20-calico:
|
||||
extends: .terraform_apply
|
||||
stage: deploy-part1
|
||||
stage: deploy-part3
|
||||
when: on_success
|
||||
allow_failure: true
|
||||
variables:
|
||||
|
||||
@@ -1,81 +1,64 @@
|
||||
---
|
||||
|
||||
.vagrant:
|
||||
extends: .testcases
|
||||
needs:
|
||||
- ci-not-authorized
|
||||
variables:
|
||||
CI_PLATFORM: "vagrant"
|
||||
SSH_USER: "vagrant"
|
||||
VAGRANT_DEFAULT_PROVIDER: "libvirt"
|
||||
KUBESPRAY_VAGRANT_CONFIG: tests/files/${CI_JOB_NAME}.rb
|
||||
DOCKER_NAME: vagrant
|
||||
VAGRANT_ANSIBLE_TAGS: facts
|
||||
VAGRANT_HOME: "$CI_PROJECT_DIR/.vagrant.d"
|
||||
PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip"
|
||||
tags: [ffci-vm-large]
|
||||
# only: [/^pr-.*$/]
|
||||
# except: ['triggers']
|
||||
image: quay.io/kubespray/vm-kubespray-ci:v13
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: $PIPELINE_IMAGE
|
||||
services: []
|
||||
before_script:
|
||||
- echo $USER
|
||||
- python3 -m venv citest
|
||||
- source citest/bin/activate
|
||||
- vagrant plugin expunge --reinstall --force --no-tty
|
||||
- vagrant plugin install vagrant-libvirt
|
||||
- pip install --no-compile --no-cache-dir pip -U
|
||||
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/requirements.txt
|
||||
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/testcases_run.sh
|
||||
cache:
|
||||
key: $CI_JOB_NAME_SLUG
|
||||
paths:
|
||||
- .vagrant.d/boxes
|
||||
- .cache/pip
|
||||
policy: pull-push # TODO: change to "pull" when not on main
|
||||
after_script:
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
|
||||
vagrant_ubuntu24-calico-dual-stack:
|
||||
stage: deploy-extended
|
||||
vagrant_ubuntu20-calico-dual-stack:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
|
||||
when: on_success
|
||||
allow_failure: false
|
||||
when: manual
|
||||
# FIXME: this test if broken (perma-failing)
|
||||
|
||||
vagrant_ubuntu24-calico-ipv6only-stack:
|
||||
stage: deploy-extended
|
||||
vagrant_ubuntu20-weave-medium:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
|
||||
when: on_success
|
||||
allow_failure: false
|
||||
when: manual
|
||||
|
||||
vagrant_ubuntu20-flannel:
|
||||
stage: deploy-part1
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
allow_failure: false
|
||||
|
||||
vagrant_ubuntu20-flannel-collection:
|
||||
stage: deploy-extended
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
when: on_success
|
||||
|
||||
vagrant_ubuntu20-kube-router-sep:
|
||||
stage: deploy-extended
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
# Service proxy test fails connectivity testing
|
||||
vagrant_ubuntu20-kube-router-svc-proxy:
|
||||
stage: deploy-extended
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
vagrant_fedora39-kube-router:
|
||||
stage: deploy-extended
|
||||
vagrant_fedora37-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
# FIXME: this test if broken (perma-failing)
|
||||
|
||||
vagrant_centos7-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
3
.markdownlint.yaml
Normal file
3
.markdownlint.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
MD013: false
|
||||
MD029: false
|
||||
@@ -1,4 +0,0 @@
|
||||
all
|
||||
exclude_rule 'MD013'
|
||||
exclude_rule 'MD029'
|
||||
rule 'MD007', :indent => 2
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
rev: v3.4.0
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
- id: check-case-conflict
|
||||
@@ -15,43 +15,52 @@ repos:
|
||||
- id: trailing-whitespace
|
||||
|
||||
- repo: https://github.com/adrienverge/yamllint.git
|
||||
rev: v1.35.1
|
||||
rev: v1.27.1
|
||||
hooks:
|
||||
- id: yamllint
|
||||
args: [--strict]
|
||||
|
||||
- repo: https://github.com/shellcheck-py/shellcheck-py
|
||||
rev: v0.10.0.1
|
||||
- repo: https://github.com/markdownlint/markdownlint
|
||||
rev: v0.11.0
|
||||
hooks:
|
||||
- id: markdownlint
|
||||
args: [-r, "~MD013,~MD029"]
|
||||
exclude: "^.git"
|
||||
|
||||
- repo: https://github.com/jumanjihouse/pre-commit-hooks
|
||||
rev: 3.0.0
|
||||
hooks:
|
||||
- id: shellcheck
|
||||
args: ["--severity=error"]
|
||||
args: [--severity, "error"]
|
||||
exclude: "^.git"
|
||||
files: "\\.sh$"
|
||||
|
||||
- repo: https://github.com/ansible/ansible-lint
|
||||
rev: v25.1.1
|
||||
hooks:
|
||||
- id: ansible-lint
|
||||
additional_dependencies:
|
||||
- jmespath==1.0.1
|
||||
- netaddr==1.3.0
|
||||
- distlib
|
||||
|
||||
- repo: https://github.com/golangci/misspell
|
||||
rev: v0.6.0
|
||||
hooks:
|
||||
- id: misspell
|
||||
exclude: "OWNERS_ALIASES$"
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: collection-build-install
|
||||
name: Build and install kubernetes-sigs.kubespray Ansible collection
|
||||
- id: ansible-lint
|
||||
name: ansible-lint
|
||||
entry: ansible-lint -v
|
||||
language: python
|
||||
pass_filenames: false
|
||||
additional_dependencies:
|
||||
- ansible-core>=2.16.4
|
||||
- distlib
|
||||
entry: tests/scripts/collection-build-install.sh
|
||||
- .[community]
|
||||
|
||||
- id: ansible-syntax-check
|
||||
name: ansible-syntax-check
|
||||
entry: env ANSIBLE_INVENTORY=inventory/local-tests.cfg ANSIBLE_REMOTE_USER=root ANSIBLE_BECOME="true" ANSIBLE_BECOME_USER=root ANSIBLE_VERBOSITY="3" ansible-playbook --syntax-check
|
||||
language: python
|
||||
files: "^cluster.yml|^upgrade-cluster.yml|^reset.yml|^extra_playbooks/upgrade-only-k8s.yml"
|
||||
|
||||
- id: tox-inventory-builder
|
||||
name: tox-inventory-builder
|
||||
entry: bash -c "cd contrib/inventory_builder && tox"
|
||||
language: python
|
||||
pass_filenames: false
|
||||
|
||||
- id: check-readme-versions
|
||||
name: check-readme-versions
|
||||
entry: tests/scripts/check_readme_versions.sh
|
||||
language: script
|
||||
pass_filenames: false
|
||||
|
||||
- id: generate-docs-sidebar
|
||||
@@ -62,21 +71,9 @@ repos:
|
||||
|
||||
- id: ci-matrix
|
||||
name: ci-matrix
|
||||
entry: tests/scripts/md-table/main.py
|
||||
language: python
|
||||
entry: tests/scripts/md-table/test.sh
|
||||
language: script
|
||||
pass_filenames: false
|
||||
additional_dependencies:
|
||||
- jinja2
|
||||
- pathlib
|
||||
- pyaml
|
||||
|
||||
- id: check-galaxy-version
|
||||
name: Verify correct version for galaxy.yml
|
||||
entry: scripts/galaxy_version.py
|
||||
language: python
|
||||
pass_filenames: false
|
||||
additional_dependencies:
|
||||
- ruamel.yaml
|
||||
|
||||
- id: jinja-syntax-check
|
||||
name: jinja-syntax-check
|
||||
@@ -85,26 +82,4 @@ repos:
|
||||
types:
|
||||
- jinja
|
||||
additional_dependencies:
|
||||
- jinja2
|
||||
|
||||
- id: propagate-ansible-variables
|
||||
name: Update static files referencing default kubespray values
|
||||
language: python
|
||||
additional_dependencies:
|
||||
- ansible-core>=2.16.4
|
||||
entry: scripts/propagate_ansible_variables.yml
|
||||
pass_filenames: false
|
||||
|
||||
- id: check-checksums-sorted
|
||||
name: Check that our checksums are correctly sorted by version
|
||||
entry: scripts/assert-sorted-checksums.yml
|
||||
language: python
|
||||
pass_filenames: false
|
||||
additional_dependencies:
|
||||
- ansible
|
||||
|
||||
- repo: https://github.com/markdownlint/markdownlint
|
||||
rev: v0.12.0
|
||||
hooks:
|
||||
- id: markdownlint
|
||||
exclude: "^.github|(^docs/_sidebar\\.md$)"
|
||||
- Jinja2
|
||||
|
||||
10
.yamllint
10
.yamllint
@@ -6,7 +6,7 @@ ignore: |
|
||||
.github/
|
||||
# Generated file
|
||||
tests/files/custom_cni/cilium.yaml
|
||||
# https://ansible.readthedocs.io/projects/lint/rules/yaml/
|
||||
|
||||
rules:
|
||||
braces:
|
||||
min-spaces-inside: 0
|
||||
@@ -14,15 +14,9 @@ rules:
|
||||
brackets:
|
||||
min-spaces-inside: 0
|
||||
max-spaces-inside: 1
|
||||
comments:
|
||||
min-spaces-from-content: 1
|
||||
# https://github.com/adrienverge/yamllint/issues/384
|
||||
comments-indentation: false
|
||||
indentation:
|
||||
spaces: 2
|
||||
indent-sequences: consistent
|
||||
line-length: disable
|
||||
new-line-at-end-of-file: disable
|
||||
octal-values:
|
||||
forbid-implicit-octal: true # yamllint defaults to false
|
||||
forbid-explicit-octal: true # yamllint defaults to false
|
||||
truthy: disable
|
||||
|
||||
@@ -34,9 +34,11 @@ RUN --mount=type=bind,source=requirements.txt,target=requirements.txt \
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
RUN OS_ARCHITECTURE=$(dpkg --print-architecture) \
|
||||
&& curl -L "https://dl.k8s.io/release/v1.32.3/bin/linux/${OS_ARCHITECTURE}/kubectl" -o /usr/local/bin/kubectl \
|
||||
&& echo "$(curl -L "https://dl.k8s.io/release/v1.32.3/bin/linux/${OS_ARCHITECTURE}/kubectl.sha256")" /usr/local/bin/kubectl | sha256sum --check \
|
||||
RUN --mount=type=bind,source=roles/kubespray-defaults/defaults/main/main.yml,target=roles/kubespray-defaults/defaults/main/main.yml \
|
||||
KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main/main.yml) \
|
||||
OS_ARCHITECTURE=$(dpkg --print-architecture) \
|
||||
&& curl -L "https://dl.k8s.io/release/${KUBE_VERSION}/bin/linux/${OS_ARCHITECTURE}/kubectl" -o /usr/local/bin/kubectl \
|
||||
&& echo "$(curl -L "https://dl.k8s.io/release/${KUBE_VERSION}/bin/linux/${OS_ARCHITECTURE}/kubectl.sha256")" /usr/local/bin/kubectl | sha256sum --check \
|
||||
&& chmod a+x /usr/local/bin/kubectl
|
||||
|
||||
COPY *.yml ./
|
||||
|
||||
7
Makefile
Normal file
7
Makefile
Normal file
@@ -0,0 +1,7 @@
|
||||
mitogen:
|
||||
@echo Mitogen support is deprecated.
|
||||
@echo Please run the following command manually:
|
||||
@echo ansible-playbook -c local mitogen.yml -vv
|
||||
clean:
|
||||
rm -rf dist/
|
||||
rm *.retry
|
||||
@@ -6,17 +6,15 @@ aliases:
|
||||
- mzaian
|
||||
- oomichi
|
||||
- yankay
|
||||
- ant31
|
||||
- vannten
|
||||
kubespray-reviewers:
|
||||
- cyclinder
|
||||
- erikjiang
|
||||
- mrfreezeex
|
||||
- mzaian
|
||||
- tico88612
|
||||
- vannten
|
||||
- yankay
|
||||
kubespray-emeritus_approvers:
|
||||
- ant31
|
||||
- atoms
|
||||
- chadswen
|
||||
- luckysb
|
||||
|
||||
183
README.md
183
README.md
@@ -1,3 +1,5 @@
|
||||
TEST CI
|
||||
|
||||
# Deploy a Production Ready Kubernetes Cluster
|
||||
|
||||

|
||||
@@ -5,7 +7,7 @@
|
||||
If you have questions, check the documentation at [kubespray.io](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
||||
You can get your invite [here](http://slack.k8s.io/)
|
||||
|
||||
- Can be deployed on **[AWS](docs/cloud_providers/aws.md), GCE, [Azure](docs/cloud_providers/azure.md), [OpenStack](docs/cloud_controllers/openstack.md), [vSphere](docs/cloud_controllers/vsphere.md), [Equinix Metal](docs/cloud_providers/equinix-metal.md) (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
||||
- Can be deployed on **[AWS](docs/cloud_providers/aws.md), GCE, [Azure](docs/cloud_providers/azure.md), [OpenStack](docs/cloud_providers/openstack.md), [vSphere](docs/cloud_providers/vsphere.md), [Equinix Metal](docs/cloud_providers/equinix-metal.md) (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
||||
- **Highly available** cluster
|
||||
- **Composable** (Choice of the network plugin for instance)
|
||||
- Supports most popular **Linux distributions**
|
||||
@@ -15,23 +17,74 @@ You can get your invite [here](http://slack.k8s.io/)
|
||||
|
||||
Below are several ways to use Kubespray to deploy a Kubernetes cluster.
|
||||
|
||||
### Docker
|
||||
|
||||
Ensure you have installed Docker then
|
||||
|
||||
```ShellSession
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.27.0 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
|
||||
### Ansible
|
||||
|
||||
#### Usage
|
||||
|
||||
See [Getting started](/docs/getting_started/getting-started.md)
|
||||
Install Ansible according to [Ansible installation guide](/docs/ansible/ansible.md#installing-ansible)
|
||||
then run the following steps:
|
||||
|
||||
```ShellSession
|
||||
# Copy ``inventory/sample`` as ``inventory/mycluster``
|
||||
cp -rfp inventory/sample inventory/mycluster
|
||||
|
||||
# Update Ansible inventory file with inventory builder
|
||||
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
||||
CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
||||
|
||||
# Review and change parameters under ``inventory/mycluster/group_vars``
|
||||
cat inventory/mycluster/group_vars/all/all.yml
|
||||
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
||||
|
||||
# Clean up old Kubernetes cluster with Ansible Playbook - run the playbook as root
|
||||
# The option `--become` is required, as for example cleaning up SSL keys in /etc/,
|
||||
# uninstalling old packages and interacting with various systemd daemons.
|
||||
# Without --become the playbook will fail to run!
|
||||
# And be mind it will remove the current kubernetes cluster (if it's running)!
|
||||
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root reset.yml
|
||||
|
||||
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
||||
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
||||
# installing packages and interacting with various systemd daemons.
|
||||
# Without --become the playbook will fail to run!
|
||||
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml
|
||||
```
|
||||
|
||||
Note: When Ansible is already installed via system packages on the control node,
|
||||
Python packages installed via `sudo pip install -r requirements.txt` will go to
|
||||
a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on
|
||||
Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on
|
||||
Ubuntu). As a consequence, the `ansible-playbook` command will fail with:
|
||||
|
||||
```raw
|
||||
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
|
||||
```
|
||||
|
||||
This likely indicates that a task depends on a module present in ``requirements.txt``.
|
||||
|
||||
One way of addressing this is to uninstall the system Ansible package then
|
||||
reinstall Ansible via ``pip``, but this not always possible and one must
|
||||
take care regarding package versions.
|
||||
A workaround consists of setting the `ANSIBLE_LIBRARY`
|
||||
and `ANSIBLE_MODULE_UTILS` environment variables respectively to
|
||||
the `ansible/modules` and `ansible/module_utils` subdirectories of the ``pip``
|
||||
installation location, which is the ``Location`` shown by running
|
||||
`pip show [package]` before executing `ansible-playbook`.
|
||||
|
||||
A simple way to ensure you get all the correct version of Ansible is to use
|
||||
the [pre-built docker image from Quay](https://quay.io/repository/kubespray/kubespray?tab=tags).
|
||||
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/)
|
||||
to access the inventory and SSH key in the container, like this:
|
||||
|
||||
```ShellSession
|
||||
git checkout v2.25.0
|
||||
docker pull quay.io/kubespray/kubespray:v2.25.0
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.25.0 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
|
||||
#### Collection
|
||||
|
||||
@@ -72,9 +125,12 @@ vagrant up
|
||||
- [Fedora CoreOS bootstrap](docs/operating_systems/fcos.md)
|
||||
- [openSUSE setup](docs/operating_systems/opensuse.md)
|
||||
- [Downloaded artifacts](docs/advanced/downloads.md)
|
||||
- [Cloud providers](docs/cloud_providers/cloud.md)
|
||||
- [OpenStack](docs/cloud_providers/openstack.md)
|
||||
- [AWS](docs/cloud_providers/aws.md)
|
||||
- [Azure](docs/cloud_providers/azure.md)
|
||||
- [vSphere](docs/cloud_providers/vsphere.md)
|
||||
- [Equinix Metal](docs/cloud_providers/equinix-metal.md)
|
||||
- [OpenStack](docs/cloud_controllers/openstack.md)
|
||||
- [vSphere](docs/cloud_controllers/vsphere.md)
|
||||
- [Large deployments](docs/operations/large-deployments.md)
|
||||
- [Adding/replacing a node](docs/operations/nodes.md)
|
||||
- [Upgrades basics](docs/operations/upgrades.md)
|
||||
@@ -87,73 +143,68 @@ vagrant up
|
||||
## Supported Linux Distributions
|
||||
|
||||
- **Flatcar Container Linux by Kinvolk**
|
||||
- **Debian** Bookworm, Bullseye
|
||||
- **Ubuntu** 20.04, 22.04, 24.04
|
||||
- **CentOS/RHEL** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **Fedora** 39, 40
|
||||
- **Debian** Bookworm, Bullseye, Buster
|
||||
- **Ubuntu** 20.04, 22.04
|
||||
- **CentOS/RHEL** 7, [8, 9](docs/operating_systems/centos.md#centos-8)
|
||||
- **Fedora** 37, 38
|
||||
- **Fedora CoreOS** (see [fcos Note](docs/operating_systems/fcos.md))
|
||||
- **openSUSE** Leap 15.x/Tumbleweed
|
||||
- **Oracle Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **Alma Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **Rocky Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8)
|
||||
- **Oracle Linux** 7, [8, 9](docs/operating_systems/centos.md#centos-8)
|
||||
- **Alma Linux** [8, 9](docs/operating_systems/centos.md#centos-8)
|
||||
- **Rocky Linux** [8, 9](docs/operating_systems/centos.md#centos-8)
|
||||
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/operating_systems/kylinlinux.md))
|
||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/operating_systems/amazonlinux.md))
|
||||
- **UOS Linux** (experimental: see [uos linux notes](docs/operating_systems/uoslinux.md))
|
||||
- **openEuler** (experimental: see [openEuler notes](docs/operating_systems/openeuler.md))
|
||||
|
||||
Note:
|
||||
|
||||
- Upstart/SysV init based OS types are not supported.
|
||||
- [Kernel requirements](docs/operations/kernel-requirements.md) (please read if the OS kernel version is < 4.19).
|
||||
Note: Upstart/SysV init based OS types are not supported.
|
||||
|
||||
## Supported Components
|
||||
|
||||
<!-- BEGIN ANSIBLE MANAGED BLOCK -->
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) 1.32.3
|
||||
- [etcd](https://github.com/etcd-io/etcd) 3.5.16
|
||||
- [docker](https://www.docker.com/) 28.0
|
||||
- [containerd](https://containerd.io/) 2.0.3
|
||||
- [cri-o](http://cri-o.io/) 1.32.0 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.29.5
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.12
|
||||
- [docker](https://www.docker.com/) v24.0 (see [Note](#container-runtime-notes))
|
||||
- [containerd](https://containerd.io/) v1.7.16
|
||||
- [cri-o](http://cri-o.io/) v1.29.1 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) 1.4.1
|
||||
- [calico](https://github.com/projectcalico/calico) 3.29.2
|
||||
- [cilium](https://github.com/cilium/cilium) 1.15.9
|
||||
- [flannel](https://github.com/flannel-io/flannel) 0.22.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) 1.12.21
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) 2.0.0
|
||||
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) 4.1.0
|
||||
- [weave](https://github.com/rajch/weave) 2.8.7
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) 0.8.0
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.2.0
|
||||
- [calico](https://github.com/projectcalico/calico) v3.27.3
|
||||
- [cilium](https://github.com/cilium/cilium) v1.15.4
|
||||
- [flannel](https://github.com/flannel-io/flannel) v0.22.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.11.5
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v2.0.0
|
||||
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) v3.8
|
||||
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.8.0
|
||||
- Application
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) 1.15.3
|
||||
- [coredns](https://github.com/coredns/coredns) 1.11.3
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) 1.12.1
|
||||
- [argocd](https://argoproj.github.io/) 2.14.5
|
||||
- [helm](https://helm.sh/) 3.16.4
|
||||
- [metallb](https://metallb.universe.tf/) 0.13.9
|
||||
- [registry](https://github.com/distribution/distribution) 2.8.1
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.13.2
|
||||
- [coredns](https://github.com/coredns/coredns) v1.11.1
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.10.1
|
||||
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.4
|
||||
- [argocd](https://argoproj.github.io/) v2.11.0
|
||||
- [helm](https://helm.sh/) v3.14.2
|
||||
- [metallb](https://metallb.universe.tf/) v0.13.9
|
||||
- [registry](https://github.com/distribution/distribution) v2.8.1
|
||||
- Storage Plugin
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) 2.1.0-k8s1.11
|
||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) 2.1.1-k8s1.11
|
||||
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) 0.5.0
|
||||
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) 1.10.0
|
||||
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) 1.30.0
|
||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) 1.9.2
|
||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) 0.0.24
|
||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) 2.5.0
|
||||
- [node-feature-discovery](https://github.com/kubernetes-sigs/node-feature-discovery) 0.16.4
|
||||
|
||||
<!-- END ANSIBLE MANAGED BLOCK -->
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
|
||||
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
|
||||
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.29.0
|
||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.9.2
|
||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.24
|
||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
|
||||
- [node-feature-discovery](https://github.com/kubernetes-sigs/node-feature-discovery) v0.14.2
|
||||
|
||||
## Container Runtime Notes
|
||||
|
||||
- Supported Docker versions are 18.09, 19.03, 20.10, 23.0 and 24.0. The *recommended* Docker version is 24.0. `Kubelet` might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. the YUM ``versionlock`` plugin or ``apt pin``).
|
||||
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Minimum required version of Kubernetes is v1.30**
|
||||
- **Minimum required version of Kubernetes is v1.28**
|
||||
- **Ansible v2.14+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/operations/offline-environment.md))
|
||||
- The target servers are configured to allow **IPv4 forwarding**.
|
||||
@@ -167,10 +218,10 @@ Note:
|
||||
Hardware:
|
||||
These limits are safeguarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide.
|
||||
|
||||
- Control Plane
|
||||
- Memory: 2 GB
|
||||
- Worker Node
|
||||
- Memory: 1 GB
|
||||
- Master
|
||||
- Memory: 1500 MB
|
||||
- Node
|
||||
- Memory: 1024 MB
|
||||
|
||||
## Network Plugins
|
||||
|
||||
|
||||
@@ -12,10 +12,10 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
|
||||
1. (For major releases) On the `master` branch: bump the version in `galaxy.yml` to the next expected major release (X.y.0 with y = Y + 1), make a Pull Request.
|
||||
1. (For minor releases) On the `release-X.Y` branch: bump the version in `galaxy.yml` to the next expected minor release (X.Y.z with z = Z + 1), make a Pull Request.
|
||||
1. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) container images are built and tagged. See the following `Container image creation` section for the details.
|
||||
1. (Only for major releases) The `KUBESPRAY_VERSION` in `.gitlab-ci.yml` is upgraded to the version we just released # TODO clarify this, this variable is for testing upgrades.
|
||||
1. The release issue is closed
|
||||
1. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
1. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||
1. Create/Update Issue for upgradeing kubernetes and [k8s-conformance](https://github.com/cncf/k8s-conformance)
|
||||
|
||||
## Major/minor releases and milestones
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
#
|
||||
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||
# INSTRUCTIONS AT https://kubernetes.io/security/
|
||||
mattymo
|
||||
floryut
|
||||
ant31
|
||||
VannTen
|
||||
yankay
|
||||
oomichi
|
||||
cristicalin
|
||||
|
||||
77
Vagrantfile
vendored
77
Vagrantfile
vendored
@@ -1,7 +1,7 @@
|
||||
# -*- mode: ruby -*-
|
||||
# # vi: set ft=ruby :
|
||||
|
||||
# For help on using kubespray with vagrant, check out docs/developers/vagrant.md
|
||||
# For help on using kubespray with vagrant, check out docs/vagrant.md
|
||||
|
||||
require 'fileutils'
|
||||
|
||||
@@ -22,21 +22,21 @@ SUPPORTED_OS = {
|
||||
"ubuntu2004" => {box: "generic/ubuntu2004", user: "vagrant"},
|
||||
"ubuntu2204" => {box: "generic/ubuntu2204", user: "vagrant"},
|
||||
"ubuntu2404" => {box: "bento/ubuntu-24.04", user: "vagrant"},
|
||||
"centos" => {box: "centos/7", user: "vagrant"},
|
||||
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
||||
"centos8" => {box: "centos/8", user: "vagrant"},
|
||||
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
||||
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
||||
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
||||
"almalinux9" => {box: "almalinux/9", user: "vagrant"},
|
||||
"rockylinux8" => {box: "rockylinux/8", user: "vagrant"},
|
||||
"rockylinux9" => {box: "rockylinux/9", user: "vagrant"},
|
||||
"fedora39" => {box: "fedora/39-cloud-base", user: "vagrant"},
|
||||
"fedora40" => {box: "fedora/40-cloud-base", user: "vagrant"},
|
||||
"fedora39-arm64" => {box: "bento/fedora-39-arm64", user: "vagrant"},
|
||||
"fedora40-arm64" => {box: "bento/fedora-40", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.6.x86_64", user: "vagrant"},
|
||||
"fedora37" => {box: "fedora/37-cloud-base", user: "vagrant"},
|
||||
"fedora38" => {box: "fedora/38-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
||||
"rhel7" => {box: "generic/rhel7", user: "vagrant"},
|
||||
"rhel8" => {box: "generic/rhel8", user: "vagrant"},
|
||||
"debian11" => {box: "debian/bullseye64", user: "vagrant"},
|
||||
"debian12" => {box: "debian/bookworm64", user: "vagrant"},
|
||||
@@ -58,27 +58,16 @@ $subnet ||= "172.18.8"
|
||||
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
|
||||
$os ||= "ubuntu2004"
|
||||
$network_plugin ||= "flannel"
|
||||
$inventories ||= []
|
||||
# Setting multi_networking to true will install Multus: https://github.com/k8snetworkplumbingwg/multus-cni
|
||||
$multi_networking ||= "False"
|
||||
$download_run_once ||= "True"
|
||||
$download_force_cache ||= "False"
|
||||
# Modify those to have separate groups (for instance, to test separate etcd:)
|
||||
# first_control_plane = 1
|
||||
# first_etcd = 4
|
||||
# control_plane_instances = 3
|
||||
# etcd_instances = 3
|
||||
$first_node ||= 1
|
||||
$first_control_plane ||= 1
|
||||
$first_etcd ||= 1
|
||||
|
||||
# The first three nodes are etcd servers
|
||||
$etcd_instances ||= [$num_instances, 3].min
|
||||
# The first two nodes are kube masters
|
||||
$control_plane_instances ||= [$num_instances, 2].min
|
||||
$kube_master_instances ||= [$num_instances, 2].min
|
||||
# All nodes are kube nodes
|
||||
$kube_node_instances ||= $num_instances - $first_node + 1
|
||||
|
||||
$kube_node_instances ||= $num_instances
|
||||
# The following only works when using the libvirt provider
|
||||
$kube_node_instances_with_disks ||= false
|
||||
$kube_node_instances_with_disks_size ||= "20G"
|
||||
@@ -107,6 +96,19 @@ if ! SUPPORTED_OS.key?($os)
|
||||
end
|
||||
|
||||
$box = SUPPORTED_OS[$os][:box]
|
||||
# if $inventory is not set, try to use example
|
||||
$inventory = "inventory/sample" if ! $inventory
|
||||
$inventory = File.absolute_path($inventory, File.dirname(__FILE__))
|
||||
|
||||
# if $inventory has a hosts.ini file use it, otherwise copy over
|
||||
# vars etc to where vagrant expects dynamic inventory to be
|
||||
if ! File.exist?(File.join(File.dirname($inventory), "hosts.ini"))
|
||||
$vagrant_ansible = File.join(File.absolute_path($vagrant_dir), "provisioners", "ansible")
|
||||
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
|
||||
$vagrant_inventory = File.join($vagrant_ansible,"inventory")
|
||||
FileUtils.rm_f($vagrant_inventory)
|
||||
FileUtils.ln_s($inventory, $vagrant_inventory)
|
||||
end
|
||||
|
||||
if Vagrant.has_plugin?("vagrant-proxyconf")
|
||||
$no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
|
||||
@@ -205,7 +207,7 @@ Vagrant.configure("2") do |config|
|
||||
node.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true
|
||||
end
|
||||
|
||||
if ["rhel8"].include? $os
|
||||
if ["rhel7","rhel8"].include? $os
|
||||
# Vagrant synced_folder rsync options cannot be used for RHEL boxes as Rsync package cannot
|
||||
# be installed until the host is registered with a valid Red Hat support subscription
|
||||
node.vm.synced_folder ".", "/vagrant", disabled: false
|
||||
@@ -220,20 +222,14 @@ Vagrant.configure("2") do |config|
|
||||
end
|
||||
|
||||
ip = "#{$subnet}.#{i+100}"
|
||||
ip6 = "#{$subnet_ipv6}::#{i+100}"
|
||||
node.vm.network :private_network,
|
||||
:ip => ip,
|
||||
:libvirt__guest_ipv6 => 'yes',
|
||||
:libvirt__ipv6_address => ip6,
|
||||
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
|
||||
:libvirt__ipv6_prefix => "64",
|
||||
:libvirt__forward_mode => "none",
|
||||
:libvirt__dhcp_enabled => false
|
||||
|
||||
# libvirt__ipv6_address does not work as intended, the address is obtained with the desired prefix, but auto-generated(like fd3c:b398:698:756:5054:ff:fe48:c61e/64)
|
||||
# add default route for detect ansible_default_ipv6
|
||||
# TODO: fix libvirt__ipv6 or use $subnet in shell
|
||||
config.vm.provision "shell", inline: "ip -6 r a fd3c:b398:698:756::/64 dev eth1;ip -6 r add default via fd3c:b398:0698:0756::1 dev eth1 || true"
|
||||
|
||||
# Disable swap for each vm
|
||||
node.vm.provision "shell", inline: "swapoff -a"
|
||||
|
||||
@@ -242,16 +238,15 @@ Vagrant.configure("2") do |config|
|
||||
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
|
||||
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
|
||||
end
|
||||
# Hack for fedora39/40 to get the IP address of the second interface
|
||||
if ["fedora39", "fedora40", "fedora39-arm64", "fedora40-arm64"].include? $os
|
||||
# Hack for fedora37/38 to get the IP address of the second interface
|
||||
if ["fedora37", "fedora38"].include? $os
|
||||
config.vm.provision "shell", inline: <<-SHELL
|
||||
nmcli conn modify 'Wired connection 2' ipv4.addresses $(cat /etc/sysconfig/network-scripts/ifcfg-eth1 | grep IPADDR | cut -d "=" -f2)/24
|
||||
nmcli conn modify 'Wired connection 2' ipv4.addresses $(cat /etc/sysconfig/network-scripts/ifcfg-eth1 | grep IPADDR | cut -d "=" -f2)
|
||||
nmcli conn modify 'Wired connection 2' ipv4.method manual
|
||||
service NetworkManager restart
|
||||
SHELL
|
||||
end
|
||||
|
||||
|
||||
# Rockylinux boxes needs UEFI
|
||||
if ["rockylinux8", "rockylinux9"].include? $os
|
||||
config.vm.provider "libvirt" do |domain|
|
||||
@@ -260,7 +255,7 @@ Vagrant.configure("2") do |config|
|
||||
end
|
||||
|
||||
# Disable firewalld on oraclelinux/redhat vms
|
||||
if ["oraclelinux","oraclelinux8", "rhel8","rockylinux8"].include? $os
|
||||
if ["oraclelinux","oraclelinux8","rhel7","rhel8","rockylinux8"].include? $os
|
||||
node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
|
||||
end
|
||||
|
||||
@@ -283,7 +278,6 @@ Vagrant.configure("2") do |config|
|
||||
"local_path_provisioner_enabled": "#{$local_path_provisioner_enabled}",
|
||||
"local_path_provisioner_claim_root": "#{$local_path_provisioner_claim_root}",
|
||||
"ansible_ssh_user": SUPPORTED_OS[$os][:user],
|
||||
"ansible_ssh_private_key_file": File.join(Dir.home, ".vagrant.d", "insecure_private_key"),
|
||||
"unsafe_show_logs": "True"
|
||||
}
|
||||
|
||||
@@ -294,22 +288,23 @@ Vagrant.configure("2") do |config|
|
||||
ansible.playbook = $playbook
|
||||
ansible.compatibility_mode = "2.0"
|
||||
ansible.verbose = $ansible_verbosity
|
||||
$ansible_inventory_path = File.join( $inventory, "hosts.ini")
|
||||
if File.exist?($ansible_inventory_path)
|
||||
ansible.inventory_path = $ansible_inventory_path
|
||||
end
|
||||
ansible.become = true
|
||||
ansible.limit = "all,localhost"
|
||||
ansible.host_key_checking = false
|
||||
ansible.raw_arguments = ["--forks=#{$num_instances}",
|
||||
"--flush-cache",
|
||||
"-e ansible_become_pass=vagrant"] +
|
||||
$inventories.map {|inv| ["-i", inv]}.flatten
|
||||
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "-e ansible_become_pass=vagrant"]
|
||||
ansible.host_vars = host_vars
|
||||
ansible.extra_vars = $extra_vars
|
||||
if $ansible_tags != ""
|
||||
ansible.tags = [$ansible_tags]
|
||||
end
|
||||
ansible.groups = {
|
||||
"etcd" => ["#{$instance_name_prefix}-[#{$first_etcd}:#{$etcd_instances + $first_etcd - 1}]"],
|
||||
"kube_control_plane" => ["#{$instance_name_prefix}-[#{$first_control_plane}:#{$control_plane_instances + $first_control_plane - 1}]"],
|
||||
"kube_node" => ["#{$instance_name_prefix}-[#{$first_node}:#{$kube_node_instances + $first_node - 1}]"],
|
||||
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
|
||||
"kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
||||
"kube_node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
|
||||
"k8s_cluster:children" => ["kube_control_plane", "kube_node"],
|
||||
}
|
||||
end
|
||||
|
||||
@@ -11,7 +11,6 @@ gathering = smart
|
||||
fact_caching = jsonfile
|
||||
fact_caching_connection = /tmp
|
||||
fact_caching_timeout = 86400
|
||||
timeout = 300
|
||||
stdout_callback = default
|
||||
display_skipped_hosts = no
|
||||
library = ./library
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
- name: Generate Azure inventory
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
gather_facts: False
|
||||
roles:
|
||||
- generate-inventory
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
- name: Generate Azure inventory
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
gather_facts: False
|
||||
roles:
|
||||
- generate-inventory_2
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
- name: Generate Azure templates
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
gather_facts: False
|
||||
roles:
|
||||
- generate-templates
|
||||
|
||||
@@ -12,4 +12,4 @@
|
||||
template:
|
||||
src: inventory.j2
|
||||
dest: "{{ playbook_dir }}/inventory"
|
||||
mode: "0644"
|
||||
mode: 0644
|
||||
|
||||
@@ -22,10 +22,10 @@
|
||||
template:
|
||||
src: inventory.j2
|
||||
dest: "{{ playbook_dir }}/inventory"
|
||||
mode: "0644"
|
||||
mode: 0644
|
||||
|
||||
- name: Generate Load Balancer variables
|
||||
template:
|
||||
src: loadbalancer_vars.j2
|
||||
dest: "{{ playbook_dir }}/loadbalancer_vars.yml"
|
||||
mode: "0644"
|
||||
mode: 0644
|
||||
|
||||
@@ -8,13 +8,13 @@
|
||||
path: "{{ base_dir }}"
|
||||
state: directory
|
||||
recurse: true
|
||||
mode: "0755"
|
||||
mode: 0755
|
||||
|
||||
- name: Store json files in base_dir
|
||||
template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ base_dir }}/{{ item }}"
|
||||
mode: "0644"
|
||||
mode: 0644
|
||||
with_items:
|
||||
- network.json
|
||||
- storage.json
|
||||
|
||||
177
contrib/dind/README.md
Normal file
177
contrib/dind/README.md
Normal file
@@ -0,0 +1,177 @@
|
||||
# Kubespray DIND experimental setup
|
||||
|
||||
This ansible playbook creates local docker containers
|
||||
to serve as Kubernetes "nodes", which in turn will run
|
||||
"normal" Kubernetes docker containers, a mode usually
|
||||
called DIND (Docker-IN-Docker).
|
||||
|
||||
The playbook has two roles:
|
||||
|
||||
- dind-host: creates the "nodes" as containers in localhost, with
|
||||
appropriate settings for DIND (privileged, volume mapping for dind
|
||||
storage, etc).
|
||||
- dind-cluster: customizes each node container to have required
|
||||
system packages installed, and some utils (swapoff, lsattr)
|
||||
symlinked to /bin/true to ease mimicking a real node.
|
||||
|
||||
This playbook has been test with Ubuntu 16.04 as host and ubuntu:16.04
|
||||
as docker images (note that dind-cluster has specific customization
|
||||
for these images).
|
||||
|
||||
The playbook also creates a `/tmp/kubespray.dind.inventory_builder.sh`
|
||||
helper (wraps up running `contrib/inventory_builder/inventory.py` with
|
||||
node containers IPs and prefix).
|
||||
|
||||
## Deploying
|
||||
|
||||
See below for a complete successful run:
|
||||
|
||||
1. Create the node containers
|
||||
|
||||
```shell
|
||||
# From the kubespray root dir
|
||||
cd contrib/dind
|
||||
pip install -r requirements.txt
|
||||
|
||||
ansible-playbook -i hosts dind-cluster.yaml
|
||||
|
||||
# Back to kubespray root
|
||||
cd ../..
|
||||
```
|
||||
|
||||
NOTE: if the playbook run fails with something like below error
|
||||
message, you may need to specifically set `ansible_python_interpreter`,
|
||||
see `./hosts` file for an example expanded localhost entry.
|
||||
|
||||
```shell
|
||||
failed: [localhost] (item=kube-node1) => {"changed": false, "item": "kube-node1", "msg": "Failed to import docker or docker-py - No module named requests.exceptions. Try `pip install docker` or `pip install docker-py` (Python 2.6)"}
|
||||
```
|
||||
|
||||
2. Customize kubespray-dind.yaml
|
||||
|
||||
Note that there's coupling between above created node containers
|
||||
and `kubespray-dind.yaml` settings, in particular regarding selected `node_distro`
|
||||
(as set in `group_vars/all/all.yaml`), and docker settings.
|
||||
|
||||
```shell
|
||||
$EDITOR contrib/dind/kubespray-dind.yaml
|
||||
```
|
||||
|
||||
3. Prepare the inventory and run the playbook
|
||||
|
||||
```shell
|
||||
INVENTORY_DIR=inventory/local-dind
|
||||
mkdir -p ${INVENTORY_DIR}
|
||||
rm -f ${INVENTORY_DIR}/hosts.ini
|
||||
CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
|
||||
|
||||
ansible-playbook --become -e ansible_ssh_user=debian -i ${INVENTORY_DIR}/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml
|
||||
```
|
||||
|
||||
NOTE: You could also test other distros without editing files by
|
||||
passing `--extra-vars` as per below commandline,
|
||||
replacing `DISTRO` by either `debian`, `ubuntu`, `centos`, `fedora`:
|
||||
|
||||
```shell
|
||||
cd contrib/dind
|
||||
ansible-playbook -i hosts dind-cluster.yaml --extra-vars node_distro=DISTRO
|
||||
|
||||
cd ../..
|
||||
CONFIG_FILE=inventory/local-dind/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
|
||||
ansible-playbook --become -e ansible_ssh_user=DISTRO -i inventory/local-dind/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml --extra-vars bootstrap_os=DISTRO
|
||||
```
|
||||
|
||||
## Resulting deployment
|
||||
|
||||
See below to get an idea on how a completed deployment looks like,
|
||||
from the host where you ran kubespray playbooks.
|
||||
|
||||
### node_distro: debian
|
||||
|
||||
Running from an Ubuntu Xenial host:
|
||||
|
||||
```shell
|
||||
$ uname -a
|
||||
Linux ip-xx-xx-xx-xx 4.4.0-1069-aws #79-Ubuntu SMP Mon Sep 24
|
||||
15:01:41 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
|
||||
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
1835dd183b75 debian:9.5 "sh -c 'apt-get -qy …" 43 minutes ago Up 43 minutes kube-node5
|
||||
30b0af8d2924 debian:9.5 "sh -c 'apt-get -qy …" 43 minutes ago Up 43 minutes kube-node4
|
||||
3e0d1510c62f debian:9.5 "sh -c 'apt-get -qy …" 43 minutes ago Up 43 minutes kube-node3
|
||||
738993566f94 debian:9.5 "sh -c 'apt-get -qy …" 44 minutes ago Up 44 minutes kube-node2
|
||||
c581ef662ed2 debian:9.5 "sh -c 'apt-get -qy …" 44 minutes ago Up 44 minutes kube-node1
|
||||
|
||||
$ docker exec kube-node1 kubectl get node
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
kube-node1 Ready master,node 18m v1.12.1
|
||||
kube-node2 Ready master,node 17m v1.12.1
|
||||
kube-node3 Ready node 17m v1.12.1
|
||||
kube-node4 Ready node 17m v1.12.1
|
||||
kube-node5 Ready node 17m v1.12.1
|
||||
|
||||
$ docker exec kube-node1 kubectl get pod --all-namespaces
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
default netchecker-agent-67489 1/1 Running 0 2m51s
|
||||
default netchecker-agent-6qq6s 1/1 Running 0 2m51s
|
||||
default netchecker-agent-fsw92 1/1 Running 0 2m51s
|
||||
default netchecker-agent-fw6tl 1/1 Running 0 2m51s
|
||||
default netchecker-agent-hostnet-8f2zb 1/1 Running 0 3m
|
||||
default netchecker-agent-hostnet-gq7ml 1/1 Running 0 3m
|
||||
default netchecker-agent-hostnet-jfkgv 1/1 Running 0 3m
|
||||
default netchecker-agent-hostnet-kwfwx 1/1 Running 0 3m
|
||||
default netchecker-agent-hostnet-r46nm 1/1 Running 0 3m
|
||||
default netchecker-agent-lxdrn 1/1 Running 0 2m51s
|
||||
default netchecker-server-864bd4c897-9vstl 1/1 Running 0 2m40s
|
||||
default sh-68fcc6db45-qf55h 1/1 Running 1 12m
|
||||
kube-system coredns-7598f59475-6vknq 1/1 Running 0 14m
|
||||
kube-system coredns-7598f59475-l5q5x 1/1 Running 0 14m
|
||||
kube-system kube-apiserver-kube-node1 1/1 Running 0 17m
|
||||
kube-system kube-apiserver-kube-node2 1/1 Running 0 18m
|
||||
kube-system kube-controller-manager-kube-node1 1/1 Running 0 18m
|
||||
kube-system kube-controller-manager-kube-node2 1/1 Running 0 18m
|
||||
kube-system kube-proxy-5xx9d 1/1 Running 0 17m
|
||||
kube-system kube-proxy-cdqq4 1/1 Running 0 17m
|
||||
kube-system kube-proxy-n64ls 1/1 Running 0 17m
|
||||
kube-system kube-proxy-pswmj 1/1 Running 0 18m
|
||||
kube-system kube-proxy-x89qw 1/1 Running 0 18m
|
||||
kube-system kube-scheduler-kube-node1 1/1 Running 4 17m
|
||||
kube-system kube-scheduler-kube-node2 1/1 Running 4 18m
|
||||
kube-system kubernetes-dashboard-5db4d9f45f-548rl 1/1 Running 0 14m
|
||||
kube-system nginx-proxy-kube-node3 1/1 Running 4 17m
|
||||
kube-system nginx-proxy-kube-node4 1/1 Running 4 17m
|
||||
kube-system nginx-proxy-kube-node5 1/1 Running 4 17m
|
||||
kube-system weave-net-42bfr 2/2 Running 0 16m
|
||||
kube-system weave-net-6gt8m 2/2 Running 0 16m
|
||||
kube-system weave-net-88nnc 2/2 Running 0 16m
|
||||
kube-system weave-net-shckr 2/2 Running 0 16m
|
||||
kube-system weave-net-xr46t 2/2 Running 0 16m
|
||||
|
||||
$ docker exec kube-node1 curl -s http://localhost:31081/api/v1/connectivity_check
|
||||
{"Message":"All 10 pods successfully reported back to the server","Absent":null,"Outdated":null}
|
||||
```
|
||||
|
||||
## Using ./run-test-distros.sh
|
||||
|
||||
You can use `./run-test-distros.sh` to run a set of tests via DIND,
|
||||
and excerpt from this script, to get an idea:
|
||||
|
||||
```shell
|
||||
# The SPEC file(s) must have two arrays as e.g.
|
||||
# DISTROS=(debian centos)
|
||||
# EXTRAS=(
|
||||
# 'kube_network_plugin=calico'
|
||||
# 'kube_network_plugin=flannel'
|
||||
# 'kube_network_plugin=weave'
|
||||
# )
|
||||
# that will be tested in a "combinatory" way (e.g. from above there'll be
|
||||
# be 6 test runs), creating a sequenced <spec_filename>-nn.out with each output.
|
||||
#
|
||||
# Each $EXTRAS element will be whitespace split, and passed as --extra-vars
|
||||
# to main kubespray ansible-playbook run.
|
||||
```
|
||||
|
||||
See e.g. `test-some_distros-most_CNIs.env` and
|
||||
`test-some_distros-kube_router_combo.env` in particular for a richer
|
||||
set of CNI specific `--extra-vars` combo.
|
||||
11
contrib/dind/dind-cluster.yaml
Normal file
11
contrib/dind/dind-cluster.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
- name: Create nodes as docker containers
|
||||
hosts: localhost
|
||||
gather_facts: False
|
||||
roles:
|
||||
- { role: dind-host }
|
||||
|
||||
- name: Customize each node containers
|
||||
hosts: containers
|
||||
roles:
|
||||
- { role: dind-cluster }
|
||||
3
contrib/dind/group_vars/all/all.yaml
Normal file
3
contrib/dind/group_vars/all/all.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
# See distro.yaml for supported node_distro images
|
||||
node_distro: debian
|
||||
41
contrib/dind/group_vars/all/distro.yaml
Normal file
41
contrib/dind/group_vars/all/distro.yaml
Normal file
@@ -0,0 +1,41 @@
|
||||
---
|
||||
distro_settings:
|
||||
debian: &DEBIAN
|
||||
image: "debian:9.5"
|
||||
user: "debian"
|
||||
pid1_exe: /lib/systemd/systemd
|
||||
init: |
|
||||
sh -c "apt-get -qy update && apt-get -qy install systemd-sysv dbus && exec /sbin/init"
|
||||
raw_setup: apt-get -qy update && apt-get -qy install dbus python sudo iproute2
|
||||
raw_setup_done: test -x /usr/bin/sudo
|
||||
agetty_svc: getty@*
|
||||
ssh_service: ssh
|
||||
extra_packages: []
|
||||
ubuntu:
|
||||
<<: *DEBIAN
|
||||
image: "ubuntu:16.04"
|
||||
user: "ubuntu"
|
||||
init: |
|
||||
/sbin/init
|
||||
centos: &CENTOS
|
||||
image: "centos:7"
|
||||
user: "centos"
|
||||
pid1_exe: /usr/lib/systemd/systemd
|
||||
init: |
|
||||
/sbin/init
|
||||
raw_setup: yum -qy install policycoreutils dbus python sudo iproute iptables
|
||||
raw_setup_done: test -x /usr/bin/sudo
|
||||
agetty_svc: getty@* serial-getty@*
|
||||
ssh_service: sshd
|
||||
extra_packages: []
|
||||
fedora:
|
||||
<<: *CENTOS
|
||||
image: "fedora:latest"
|
||||
user: "fedora"
|
||||
raw_setup: yum -qy install policycoreutils dbus python sudo iproute iptables; mkdir -p /etc/modules-load.d
|
||||
extra_packages:
|
||||
- hostname
|
||||
- procps
|
||||
- findutils
|
||||
- kmod
|
||||
- iputils
|
||||
15
contrib/dind/hosts
Normal file
15
contrib/dind/hosts
Normal file
@@ -0,0 +1,15 @@
|
||||
[local]
|
||||
# If you created a virtualenv for ansible, you may need to specify running the
|
||||
# python binary from there instead:
|
||||
#localhost ansible_connection=local ansible_python_interpreter=/home/user/kubespray/.venv/bin/python
|
||||
localhost ansible_connection=local
|
||||
|
||||
[containers]
|
||||
kube-node1
|
||||
kube-node2
|
||||
kube-node3
|
||||
kube-node4
|
||||
kube-node5
|
||||
|
||||
[containers:vars]
|
||||
ansible_connection=docker
|
||||
22
contrib/dind/kubespray-dind.yaml
Normal file
22
contrib/dind/kubespray-dind.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
# kubespray-dind.yaml: minimal kubespray ansible playbook usable for DIND
|
||||
# See contrib/dind/README.md
|
||||
kube_api_anonymous_auth: true
|
||||
|
||||
kubelet_fail_swap_on: false
|
||||
|
||||
# Docker nodes need to have been created with same "node_distro: debian"
|
||||
# at contrib/dind/group_vars/all/all.yaml
|
||||
bootstrap_os: debian
|
||||
|
||||
docker_version: latest
|
||||
|
||||
docker_storage_options: -s overlay2 --storage-opt overlay2.override_kernel_check=true -g /dind/docker
|
||||
|
||||
dns_mode: coredns
|
||||
|
||||
deploy_netchecker: True
|
||||
netcheck_agent_image_repo: quay.io/l23network/k8s-netchecker-agent
|
||||
netcheck_server_image_repo: quay.io/l23network/k8s-netchecker-server
|
||||
netcheck_agent_image_tag: v1.0
|
||||
netcheck_server_image_tag: v1.0
|
||||
1
contrib/dind/requirements.txt
Normal file
1
contrib/dind/requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
docker
|
||||
73
contrib/dind/roles/dind-cluster/tasks/main.yaml
Normal file
73
contrib/dind/roles/dind-cluster/tasks/main.yaml
Normal file
@@ -0,0 +1,73 @@
|
||||
---
|
||||
- name: Set_fact distro_setup
|
||||
set_fact:
|
||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||
|
||||
- name: Set_fact other distro settings
|
||||
set_fact:
|
||||
distro_user: "{{ distro_setup['user'] }}"
|
||||
distro_ssh_service: "{{ distro_setup['ssh_service'] }}"
|
||||
distro_extra_packages: "{{ distro_setup['extra_packages'] }}"
|
||||
|
||||
- name: Null-ify some linux tools to ease DIND
|
||||
file:
|
||||
src: "/bin/true"
|
||||
dest: "{{ item }}"
|
||||
state: link
|
||||
force: yes
|
||||
with_items:
|
||||
# DIND box may have swap enable, don't bother
|
||||
- /sbin/swapoff
|
||||
# /etc/hosts handling would fail on trying to copy file attributes on edit,
|
||||
# void it by successfully returning nil output
|
||||
- /usr/bin/lsattr
|
||||
# disable selinux-isms, sp needed if running on non-Selinux host
|
||||
- /usr/sbin/semodule
|
||||
|
||||
- name: Void installing dpkg docs and man pages on Debian based distros
|
||||
copy:
|
||||
content: |
|
||||
# Delete locales
|
||||
path-exclude=/usr/share/locale/*
|
||||
# Delete man pages
|
||||
path-exclude=/usr/share/man/*
|
||||
# Delete docs
|
||||
path-exclude=/usr/share/doc/*
|
||||
path-include=/usr/share/doc/*/copyright
|
||||
dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
|
||||
mode: 0644
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
|
||||
- name: Install system packages to better match a full-fledge node
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items: "{{ distro_extra_packages + ['rsyslog', 'openssh-server'] }}"
|
||||
|
||||
- name: Start needed services
|
||||
service:
|
||||
name: "{{ item }}"
|
||||
state: started
|
||||
with_items:
|
||||
- rsyslog
|
||||
- "{{ distro_ssh_service }}"
|
||||
|
||||
- name: Create distro user "{{ distro_user }}"
|
||||
user:
|
||||
name: "{{ distro_user }}"
|
||||
uid: 1000
|
||||
# groups: sudo
|
||||
append: yes
|
||||
|
||||
- name: Allow password-less sudo to "{{ distro_user }}"
|
||||
copy:
|
||||
content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL"
|
||||
dest: "/etc/sudoers.d/{{ distro_user }}"
|
||||
mode: 0640
|
||||
|
||||
- name: "Add my pubkey to {{ distro_user }} user authorized keys"
|
||||
ansible.posix.authorized_key:
|
||||
user: "{{ distro_user }}"
|
||||
state: present
|
||||
key: "{{ lookup('file', lookup('env', 'HOME') + '/.ssh/id_rsa.pub') }}"
|
||||
87
contrib/dind/roles/dind-host/tasks/main.yaml
Normal file
87
contrib/dind/roles/dind-host/tasks/main.yaml
Normal file
@@ -0,0 +1,87 @@
|
||||
---
|
||||
- name: Set_fact distro_setup
|
||||
set_fact:
|
||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||
|
||||
- name: Set_fact other distro settings
|
||||
set_fact:
|
||||
distro_image: "{{ distro_setup['image'] }}"
|
||||
distro_init: "{{ distro_setup['init'] }}"
|
||||
distro_pid1_exe: "{{ distro_setup['pid1_exe'] }}"
|
||||
distro_raw_setup: "{{ distro_setup['raw_setup'] }}"
|
||||
distro_raw_setup_done: "{{ distro_setup['raw_setup_done'] }}"
|
||||
distro_agetty_svc: "{{ distro_setup['agetty_svc'] }}"
|
||||
|
||||
- name: Create dind node containers from "containers" inventory section
|
||||
community.docker.docker_container:
|
||||
image: "{{ distro_image }}"
|
||||
name: "{{ item }}"
|
||||
state: started
|
||||
hostname: "{{ item }}"
|
||||
command: "{{ distro_init }}"
|
||||
# recreate: yes
|
||||
privileged: true
|
||||
tmpfs:
|
||||
- /sys/module/nf_conntrack/parameters
|
||||
volumes:
|
||||
- /boot:/boot
|
||||
- /lib/modules:/lib/modules
|
||||
- "{{ item }}:/dind/docker"
|
||||
register: containers
|
||||
with_items: "{{ groups.containers }}"
|
||||
tags:
|
||||
- addresses
|
||||
|
||||
- name: Gather list of containers IPs
|
||||
set_fact:
|
||||
addresses: "{{ containers.results | map(attribute='ansible_facts') | map(attribute='docker_container') | map(attribute='NetworkSettings') | map(attribute='IPAddress') | list }}"
|
||||
tags:
|
||||
- addresses
|
||||
|
||||
- name: Create inventory_builder helper already set with the list of node containers' IPs
|
||||
template:
|
||||
src: inventory_builder.sh.j2
|
||||
dest: /tmp/kubespray.dind.inventory_builder.sh
|
||||
mode: 0755
|
||||
tags:
|
||||
- addresses
|
||||
|
||||
- name: Install needed packages into node containers via raw, need to wait for possible systemd packages to finish installing
|
||||
raw: |
|
||||
# agetty processes churn a lot of cpu time failing on inexistent ttys, early STOP them, to rip them in below task
|
||||
pkill -STOP agetty || true
|
||||
{{ distro_raw_setup_done }} && echo SKIPPED && exit 0
|
||||
until [ "$(readlink /proc/1/exe)" = "{{ distro_pid1_exe }}" ] ; do sleep 1; done
|
||||
{{ distro_raw_setup }}
|
||||
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||
with_items: "{{ containers.results }}"
|
||||
register: result
|
||||
changed_when: result.stdout.find("SKIPPED") < 0
|
||||
|
||||
- name: Remove gettys from node containers
|
||||
raw: |
|
||||
until test -S /var/run/dbus/system_bus_socket; do sleep 1; done
|
||||
systemctl disable {{ distro_agetty_svc }}
|
||||
systemctl stop {{ distro_agetty_svc }}
|
||||
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||
with_items: "{{ containers.results }}"
|
||||
changed_when: false
|
||||
|
||||
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
|
||||
# handle manually
|
||||
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
|
||||
raw: |
|
||||
echo {{ item | hash('sha1') }} > /etc/machine-id.new
|
||||
mv -b /etc/machine-id.new /etc/machine-id
|
||||
cmp /etc/machine-id /etc/machine-id~ || true
|
||||
systemctl daemon-reload
|
||||
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||
with_items: "{{ containers.results }}"
|
||||
|
||||
- name: Early hack image install to adapt for DIND
|
||||
raw: |
|
||||
rm -fv /usr/bin/udevadm /usr/sbin/udevadm
|
||||
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||
with_items: "{{ containers.results }}"
|
||||
register: result
|
||||
changed_when: result.stdout.find("removed") >= 0
|
||||
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
# NOTE: if you change HOST_PREFIX, you also need to edit ./hosts [containers] section
|
||||
HOST_PREFIX=kube-node python3 contrib/inventory_builder/inventory.py {% for ip in addresses %} {{ ip }} {% endfor %}
|
||||
93
contrib/dind/run-test-distros.sh
Executable file
93
contrib/dind/run-test-distros.sh
Executable file
@@ -0,0 +1,93 @@
|
||||
#!/bin/bash
|
||||
# Q&D test'em all: creates full DIND kubespray deploys
|
||||
# for each distro, verifying it via netchecker.
|
||||
|
||||
info() {
|
||||
local msg="$*"
|
||||
local date="$(date -Isec)"
|
||||
echo "INFO: [$date] $msg"
|
||||
}
|
||||
pass_or_fail() {
|
||||
local rc="$?"
|
||||
local msg="$*"
|
||||
local date="$(date -Isec)"
|
||||
[ $rc -eq 0 ] && echo "PASS: [$date] $msg" || echo "FAIL: [$date] $msg"
|
||||
return $rc
|
||||
}
|
||||
test_distro() {
|
||||
local distro=${1:?};shift
|
||||
local extra="${*:-}"
|
||||
local prefix="${distro[${extra}]}"
|
||||
ansible-playbook -i hosts dind-cluster.yaml -e node_distro=$distro
|
||||
pass_or_fail "$prefix: dind-nodes" || return 1
|
||||
(cd ../..
|
||||
INVENTORY_DIR=inventory/local-dind
|
||||
mkdir -p ${INVENTORY_DIR}
|
||||
rm -f ${INVENTORY_DIR}/hosts.ini
|
||||
CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
|
||||
# expand $extra with -e in front of each word
|
||||
extra_args=""; for extra_arg in $extra; do extra_args="$extra_args -e $extra_arg"; done
|
||||
ansible-playbook --become -e ansible_ssh_user=$distro -i \
|
||||
${INVENTORY_DIR}/hosts.ini cluster.yml \
|
||||
-e @contrib/dind/kubespray-dind.yaml -e bootstrap_os=$distro ${extra_args}
|
||||
pass_or_fail "$prefix: kubespray"
|
||||
) || return 1
|
||||
local node0=${NODES[0]}
|
||||
docker exec ${node0} kubectl get pod --all-namespaces
|
||||
pass_or_fail "$prefix: kube-api" || return 1
|
||||
let retries=60
|
||||
while ((retries--)); do
|
||||
# Some CNI may set NodePort on "main" node interface address (thus no localhost NodePort)
|
||||
# e.g. kube-router: https://github.com/cloudnativelabs/kube-router/pull/217
|
||||
docker exec ${node0} curl -m2 -s http://${NETCHECKER_HOST:?}:31081/api/v1/connectivity_check | grep successfully && break
|
||||
sleep 2
|
||||
done
|
||||
[ $retries -ge 0 ]
|
||||
pass_or_fail "$prefix: netcheck" || return 1
|
||||
}
|
||||
|
||||
NODES=($(egrep ^kube_node hosts))
|
||||
NETCHECKER_HOST=localhost
|
||||
|
||||
: ${OUTPUT_DIR:=./out}
|
||||
mkdir -p ${OUTPUT_DIR}
|
||||
|
||||
# The SPEC file(s) must have two arrays as e.g.
|
||||
# DISTROS=(debian centos)
|
||||
# EXTRAS=(
|
||||
# 'kube_network_plugin=calico'
|
||||
# 'kube_network_plugin=flannel'
|
||||
# 'kube_network_plugin=weave'
|
||||
# )
|
||||
# that will be tested in a "combinatory" way (e.g. from above there'll be
|
||||
# be 6 test runs), creating a sequenced <spec_filename>-nn.out with each output.
|
||||
#
|
||||
# Each $EXTRAS element will be whitespace split, and passed as --extra-vars
|
||||
# to main kubespray ansible-playbook run.
|
||||
|
||||
SPECS=${*:?Missing SPEC files, e.g. test-most_distros-some_CNIs.env}
|
||||
for spec in ${SPECS}; do
|
||||
unset DISTROS EXTRAS
|
||||
echo "Loading file=${spec} ..."
|
||||
. ${spec} || continue
|
||||
: ${DISTROS:?} || continue
|
||||
echo "DISTROS:" "${DISTROS[@]}"
|
||||
echo "EXTRAS->"
|
||||
printf " %s\n" "${EXTRAS[@]}"
|
||||
let n=1
|
||||
for distro in "${DISTROS[@]}"; do
|
||||
for extra in "${EXTRAS[@]:-NULL}"; do
|
||||
# Magic value to let this for run once:
|
||||
[[ ${extra} == NULL ]] && unset extra
|
||||
docker rm -f "${NODES[@]}"
|
||||
printf -v file_out "%s/%s-%02d.out" ${OUTPUT_DIR} ${spec} $((n++))
|
||||
{
|
||||
info "${distro}[${extra}] START: file_out=${file_out}"
|
||||
time test_distro ${distro} ${extra}
|
||||
} |& tee ${file_out}
|
||||
# sleeping for the sake of the human to verify if they want
|
||||
sleep 2m
|
||||
done
|
||||
done
|
||||
done
|
||||
egrep -H '^(....:|real)' $(ls -tr ${OUTPUT_DIR}/*.out)
|
||||
11
contrib/dind/test-most_distros-some_CNIs.env
Normal file
11
contrib/dind/test-most_distros-some_CNIs.env
Normal file
@@ -0,0 +1,11 @@
|
||||
# Test spec file: used from ./run-test-distros.sh, will run
|
||||
# each distro in $DISTROS overloading main kubespray ansible-playbook run
|
||||
# Get all DISTROS from distro.yaml (shame no yaml parsing, but nuff anyway)
|
||||
# DISTROS="${*:-$(egrep -o '^ \w+' group_vars/all/distro.yaml|paste -s)}"
|
||||
DISTROS=(debian ubuntu centos fedora)
|
||||
|
||||
# Each line below will be added as --extra-vars to main playbook run
|
||||
EXTRAS=(
|
||||
'kube_network_plugin=calico'
|
||||
'kube_network_plugin=weave'
|
||||
)
|
||||
6
contrib/dind/test-some_distros-kube_router_combo.env
Normal file
6
contrib/dind/test-some_distros-kube_router_combo.env
Normal file
@@ -0,0 +1,6 @@
|
||||
DISTROS=(debian centos)
|
||||
NETCHECKER_HOST=${NODES[0]}
|
||||
EXTRAS=(
|
||||
'kube_network_plugin=kube-router {"kube_router_run_service_proxy":false}'
|
||||
'kube_network_plugin=kube-router {"kube_router_run_service_proxy":true}'
|
||||
)
|
||||
8
contrib/dind/test-some_distros-most_CNIs.env
Normal file
8
contrib/dind/test-some_distros-most_CNIs.env
Normal file
@@ -0,0 +1,8 @@
|
||||
DISTROS=(debian centos)
|
||||
EXTRAS=(
|
||||
'kube_network_plugin=calico {}'
|
||||
'kube_network_plugin=canal {}'
|
||||
'kube_network_plugin=cilium {}'
|
||||
'kube_network_plugin=flannel {}'
|
||||
'kube_network_plugin=weave {}'
|
||||
)
|
||||
480
contrib/inventory_builder/inventory.py
Normal file
480
contrib/inventory_builder/inventory.py
Normal file
@@ -0,0 +1,480 @@
|
||||
#!/usr/bin/env python3
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Usage: inventory.py ip1 [ip2 ...]
|
||||
# Examples: inventory.py 10.10.1.3 10.10.1.4 10.10.1.5
|
||||
#
|
||||
# Advanced usage:
|
||||
# Add another host after initial creation: inventory.py 10.10.1.5
|
||||
# Add range of hosts: inventory.py 10.10.1.3-10.10.1.5
|
||||
# Add hosts with different ip and access ip:
|
||||
# inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.1.3
|
||||
# Add hosts with a specific hostname, ip, and optional access ip:
|
||||
# inventory.py first,10.0.0.1,192.168.10.1 second,10.0.0.2 last,10.0.0.3
|
||||
# Delete a host: inventory.py -10.10.1.3
|
||||
# Delete a host by id: inventory.py -node1
|
||||
#
|
||||
# Load a YAML or JSON file with inventory data: inventory.py load hosts.yaml
|
||||
# YAML file should be in the following format:
|
||||
# group1:
|
||||
# host1:
|
||||
# ip: X.X.X.X
|
||||
# var: val
|
||||
# group2:
|
||||
# host2:
|
||||
# ip: X.X.X.X
|
||||
|
||||
from collections import OrderedDict
|
||||
from ipaddress import ip_address
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
ROLES = ['all', 'kube_control_plane', 'kube_node', 'etcd', 'k8s_cluster',
|
||||
'calico_rr']
|
||||
PROTECTED_NAMES = ROLES
|
||||
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames',
|
||||
'load', 'add']
|
||||
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
||||
'0': False, 'no': False, 'false': False, 'off': False}
|
||||
yaml = YAML()
|
||||
yaml.Representer.add_representer(OrderedDict, yaml.Representer.represent_dict)
|
||||
|
||||
|
||||
def get_var_as_bool(name, default):
|
||||
value = os.environ.get(name, '')
|
||||
return _boolean_states.get(value.lower(), default)
|
||||
|
||||
# Configurable as shell vars start
|
||||
|
||||
|
||||
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.yaml")
|
||||
# Remove the reference of KUBE_MASTERS after some deprecation cycles.
|
||||
KUBE_CONTROL_HOSTS = int(os.environ.get("KUBE_CONTROL_HOSTS",
|
||||
os.environ.get("KUBE_MASTERS", 2)))
|
||||
# Reconfigures cluster distribution at scale
|
||||
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
|
||||
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("MASSIVE_SCALE_THRESHOLD", 200))
|
||||
|
||||
DEBUG = get_var_as_bool("DEBUG", True)
|
||||
HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
|
||||
USE_REAL_HOSTNAME = get_var_as_bool("USE_REAL_HOSTNAME", False)
|
||||
|
||||
# Configurable as shell vars end
|
||||
|
||||
|
||||
class KubesprayInventory(object):
|
||||
|
||||
def __init__(self, changed_hosts=None, config_file=None):
|
||||
self.config_file = config_file
|
||||
self.yaml_config = {}
|
||||
loadPreviousConfig = False
|
||||
printHostnames = False
|
||||
# See whether there are any commands to process
|
||||
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
|
||||
if changed_hosts[0] == "add":
|
||||
loadPreviousConfig = True
|
||||
changed_hosts = changed_hosts[1:]
|
||||
elif changed_hosts[0] == "print_hostnames":
|
||||
loadPreviousConfig = True
|
||||
printHostnames = True
|
||||
else:
|
||||
self.parse_command(changed_hosts[0], changed_hosts[1:])
|
||||
sys.exit(0)
|
||||
|
||||
# If the user wants to remove a node, we need to load the config anyway
|
||||
if changed_hosts and changed_hosts[0][0] == "-":
|
||||
loadPreviousConfig = True
|
||||
|
||||
if self.config_file and loadPreviousConfig: # Load previous YAML file
|
||||
try:
|
||||
self.hosts_file = open(config_file, 'r')
|
||||
self.yaml_config = yaml.load(self.hosts_file)
|
||||
except OSError as e:
|
||||
# I am assuming we are catching "cannot open file" exceptions
|
||||
print(e)
|
||||
sys.exit(1)
|
||||
|
||||
if printHostnames:
|
||||
self.print_hostnames()
|
||||
sys.exit(0)
|
||||
|
||||
self.ensure_required_groups(ROLES)
|
||||
|
||||
if changed_hosts:
|
||||
changed_hosts = self.range2ips(changed_hosts)
|
||||
self.hosts = self.build_hostnames(changed_hosts,
|
||||
loadPreviousConfig)
|
||||
self.purge_invalid_hosts(self.hosts.keys(), PROTECTED_NAMES)
|
||||
self.set_all(self.hosts)
|
||||
self.set_k8s_cluster()
|
||||
etcd_hosts_count = 3 if len(self.hosts.keys()) >= 3 else 1
|
||||
self.set_etcd(list(self.hosts.keys())[:etcd_hosts_count])
|
||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||
self.set_kube_control_plane(list(self.hosts.keys())[
|
||||
etcd_hosts_count:(etcd_hosts_count + KUBE_CONTROL_HOSTS)])
|
||||
else:
|
||||
self.set_kube_control_plane(
|
||||
list(self.hosts.keys())[:KUBE_CONTROL_HOSTS])
|
||||
self.set_kube_node(self.hosts.keys())
|
||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||
self.set_calico_rr(list(self.hosts.keys())[:etcd_hosts_count])
|
||||
else: # Show help if no options
|
||||
self.show_help()
|
||||
sys.exit(0)
|
||||
|
||||
self.write_config(self.config_file)
|
||||
|
||||
def write_config(self, config_file):
|
||||
if config_file:
|
||||
with open(self.config_file, 'w') as f:
|
||||
yaml.dump(self.yaml_config, f)
|
||||
|
||||
else:
|
||||
print("WARNING: Unable to save config. Make sure you set "
|
||||
"CONFIG_FILE env var.")
|
||||
|
||||
def debug(self, msg):
|
||||
if DEBUG:
|
||||
print("DEBUG: {0}".format(msg))
|
||||
|
||||
def get_ip_from_opts(self, optstring):
|
||||
if 'ip' in optstring:
|
||||
return optstring['ip']
|
||||
else:
|
||||
raise ValueError("IP parameter not found in options")
|
||||
|
||||
def ensure_required_groups(self, groups):
|
||||
for group in groups:
|
||||
if group == 'all':
|
||||
self.debug("Adding group {0}".format(group))
|
||||
if group not in self.yaml_config:
|
||||
all_dict = OrderedDict([('hosts', OrderedDict({})),
|
||||
('children', OrderedDict({}))])
|
||||
self.yaml_config = {'all': all_dict}
|
||||
else:
|
||||
self.debug("Adding group {0}".format(group))
|
||||
if group not in self.yaml_config['all']['children']:
|
||||
self.yaml_config['all']['children'][group] = {'hosts': {}}
|
||||
|
||||
def get_host_id(self, host):
|
||||
'''Returns integer host ID (without padding) from a given hostname.'''
|
||||
try:
|
||||
short_hostname = host.split('.')[0]
|
||||
return int(re.findall("\\d+$", short_hostname)[-1])
|
||||
except IndexError:
|
||||
raise ValueError("Host name must end in an integer")
|
||||
|
||||
# Keeps already specified hosts,
|
||||
# and adds or removes the hosts provided as an argument
|
||||
def build_hostnames(self, changed_hosts, loadPreviousConfig=False):
|
||||
existing_hosts = OrderedDict()
|
||||
highest_host_id = 0
|
||||
# Load already existing hosts from the YAML
|
||||
if loadPreviousConfig:
|
||||
try:
|
||||
for host in self.yaml_config['all']['hosts']:
|
||||
# Read configuration of an existing host
|
||||
hostConfig = self.yaml_config['all']['hosts'][host]
|
||||
existing_hosts[host] = hostConfig
|
||||
# If the existing host seems
|
||||
# to have been created automatically, detect its ID
|
||||
if host.startswith(HOST_PREFIX):
|
||||
host_id = self.get_host_id(host)
|
||||
if host_id > highest_host_id:
|
||||
highest_host_id = host_id
|
||||
except Exception as e:
|
||||
# I am assuming we are catching automatically
|
||||
# created hosts without IDs
|
||||
print(e)
|
||||
sys.exit(1)
|
||||
|
||||
# FIXME(mattymo): Fix condition where delete then add reuses highest id
|
||||
next_host_id = highest_host_id + 1
|
||||
next_host = ""
|
||||
|
||||
all_hosts = existing_hosts.copy()
|
||||
for host in changed_hosts:
|
||||
# Delete the host from config the hostname/IP has a "-" prefix
|
||||
if host[0] == "-":
|
||||
realhost = host[1:]
|
||||
if self.exists_hostname(all_hosts, realhost):
|
||||
self.debug("Marked {0} for deletion.".format(realhost))
|
||||
all_hosts.pop(realhost)
|
||||
elif self.exists_ip(all_hosts, realhost):
|
||||
self.debug("Marked {0} for deletion.".format(realhost))
|
||||
self.delete_host_by_ip(all_hosts, realhost)
|
||||
# Host/Argument starts with a digit,
|
||||
# then we assume its an IP address
|
||||
elif host[0].isdigit():
|
||||
if ',' in host:
|
||||
ip, access_ip = host.split(',')
|
||||
else:
|
||||
ip = host
|
||||
access_ip = host
|
||||
if self.exists_hostname(all_hosts, host):
|
||||
self.debug("Skipping existing host {0}.".format(host))
|
||||
continue
|
||||
elif self.exists_ip(all_hosts, ip):
|
||||
self.debug("Skipping existing host {0}.".format(ip))
|
||||
continue
|
||||
|
||||
if USE_REAL_HOSTNAME:
|
||||
cmd = ("ssh -oStrictHostKeyChecking=no "
|
||||
+ access_ip + " 'hostname -s'")
|
||||
next_host = subprocess.check_output(cmd, shell=True)
|
||||
next_host = next_host.strip().decode('ascii')
|
||||
else:
|
||||
# Generates a hostname because we have only an IP address
|
||||
next_host = "{0}{1}".format(HOST_PREFIX, next_host_id)
|
||||
next_host_id += 1
|
||||
# Uses automatically generated node name
|
||||
# in case we dont provide it.
|
||||
all_hosts[next_host] = {'ansible_host': access_ip,
|
||||
'ip': ip,
|
||||
'access_ip': access_ip}
|
||||
# Host/Argument starts with a letter, then we assume its a hostname
|
||||
elif host[0].isalpha():
|
||||
if ',' in host:
|
||||
try:
|
||||
hostname, ip, access_ip = host.split(',')
|
||||
except Exception:
|
||||
hostname, ip = host.split(',')
|
||||
access_ip = ip
|
||||
if self.exists_hostname(all_hosts, host):
|
||||
self.debug("Skipping existing host {0}.".format(host))
|
||||
continue
|
||||
elif self.exists_ip(all_hosts, ip):
|
||||
self.debug("Skipping existing host {0}.".format(ip))
|
||||
continue
|
||||
all_hosts[hostname] = {'ansible_host': access_ip,
|
||||
'ip': ip,
|
||||
'access_ip': access_ip}
|
||||
return all_hosts
|
||||
|
||||
# Expand IP ranges into individual addresses
|
||||
def range2ips(self, hosts):
|
||||
reworked_hosts = []
|
||||
|
||||
def ips(start_address, end_address):
|
||||
try:
|
||||
# Python 3.x
|
||||
start = int(ip_address(start_address))
|
||||
end = int(ip_address(end_address))
|
||||
except Exception:
|
||||
# Python 2.7
|
||||
start = int(ip_address(str(start_address)))
|
||||
end = int(ip_address(str(end_address)))
|
||||
return [ip_address(ip).exploded for ip in range(start, end + 1)]
|
||||
|
||||
for host in hosts:
|
||||
if '-' in host and not (host.startswith('-') or host[0].isalpha()):
|
||||
start, end = host.strip().split('-')
|
||||
try:
|
||||
reworked_hosts.extend(ips(start, end))
|
||||
except ValueError:
|
||||
raise Exception("Range of ip_addresses isn't valid")
|
||||
else:
|
||||
reworked_hosts.append(host)
|
||||
return reworked_hosts
|
||||
|
||||
def exists_hostname(self, existing_hosts, hostname):
|
||||
return hostname in existing_hosts.keys()
|
||||
|
||||
def exists_ip(self, existing_hosts, ip):
|
||||
for host_opts in existing_hosts.values():
|
||||
if ip == self.get_ip_from_opts(host_opts):
|
||||
return True
|
||||
return False
|
||||
|
||||
def delete_host_by_ip(self, existing_hosts, ip):
|
||||
for hostname, host_opts in existing_hosts.items():
|
||||
if ip == self.get_ip_from_opts(host_opts):
|
||||
del existing_hosts[hostname]
|
||||
return
|
||||
raise ValueError("Unable to find host by IP: {0}".format(ip))
|
||||
|
||||
def purge_invalid_hosts(self, hostnames, protected_names=[]):
|
||||
for role in self.yaml_config['all']['children']:
|
||||
if role != 'k8s_cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa
|
||||
all_hosts = self.yaml_config['all']['children'][role]['hosts'].copy() # noqa
|
||||
for host in all_hosts.keys():
|
||||
if host not in hostnames and host not in protected_names:
|
||||
self.debug(
|
||||
"Host {0} removed from role {1}".format(host, role)) # noqa
|
||||
del self.yaml_config['all']['children'][role]['hosts'][host] # noqa
|
||||
# purge from all
|
||||
if self.yaml_config['all']['hosts']:
|
||||
all_hosts = self.yaml_config['all']['hosts'].copy()
|
||||
for host in all_hosts.keys():
|
||||
if host not in hostnames and host not in protected_names:
|
||||
self.debug("Host {0} removed from role all".format(host))
|
||||
del self.yaml_config['all']['hosts'][host]
|
||||
|
||||
def add_host_to_group(self, group, host, opts=""):
|
||||
self.debug("adding host {0} to group {1}".format(host, group))
|
||||
if group == 'all':
|
||||
if self.yaml_config['all']['hosts'] is None:
|
||||
self.yaml_config['all']['hosts'] = {host: None}
|
||||
self.yaml_config['all']['hosts'][host] = opts
|
||||
elif group != 'k8s_cluster:children':
|
||||
if self.yaml_config['all']['children'][group]['hosts'] is None:
|
||||
self.yaml_config['all']['children'][group]['hosts'] = {
|
||||
host: None}
|
||||
else:
|
||||
self.yaml_config['all']['children'][group]['hosts'][host] = None # noqa
|
||||
|
||||
def set_kube_control_plane(self, hosts):
|
||||
for host in hosts:
|
||||
self.add_host_to_group('kube_control_plane', host)
|
||||
|
||||
def set_all(self, hosts):
|
||||
for host, opts in hosts.items():
|
||||
self.add_host_to_group('all', host, opts)
|
||||
|
||||
def set_k8s_cluster(self):
|
||||
k8s_cluster = {'children': {'kube_control_plane': None,
|
||||
'kube_node': None}}
|
||||
self.yaml_config['all']['children']['k8s_cluster'] = k8s_cluster
|
||||
|
||||
def set_calico_rr(self, hosts):
|
||||
for host in hosts:
|
||||
if host in self.yaml_config['all']['children']['kube_control_plane']: # noqa
|
||||
self.debug("Not adding {0} to calico_rr group because it "
|
||||
"conflicts with kube_control_plane "
|
||||
"group".format(host))
|
||||
continue
|
||||
if host in self.yaml_config['all']['children']['kube_node']:
|
||||
self.debug("Not adding {0} to calico_rr group because it "
|
||||
"conflicts with kube_node group".format(host))
|
||||
continue
|
||||
self.add_host_to_group('calico_rr', host)
|
||||
|
||||
def set_kube_node(self, hosts):
|
||||
for host in hosts:
|
||||
if len(self.yaml_config['all']['hosts']) >= SCALE_THRESHOLD:
|
||||
if host in self.yaml_config['all']['children']['etcd']['hosts']: # noqa
|
||||
self.debug("Not adding {0} to kube_node group because of "
|
||||
"scale deployment and host is in etcd "
|
||||
"group.".format(host))
|
||||
continue
|
||||
if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa
|
||||
if host in self.yaml_config['all']['children']['kube_control_plane']['hosts']: # noqa
|
||||
self.debug("Not adding {0} to kube_node group because of "
|
||||
"scale deployment and host is in "
|
||||
"kube_control_plane group.".format(host))
|
||||
continue
|
||||
self.add_host_to_group('kube_node', host)
|
||||
|
||||
def set_etcd(self, hosts):
|
||||
for host in hosts:
|
||||
self.add_host_to_group('etcd', host)
|
||||
|
||||
def load_file(self, files=None):
|
||||
'''Directly loads JSON to inventory.'''
|
||||
|
||||
if not files:
|
||||
raise Exception("No input file specified.")
|
||||
|
||||
import json
|
||||
|
||||
for filename in list(files):
|
||||
# Try JSON
|
||||
try:
|
||||
with open(filename, 'r') as f:
|
||||
data = json.load(f)
|
||||
except ValueError:
|
||||
raise Exception("Cannot read %s as JSON, or CSV", filename)
|
||||
|
||||
self.ensure_required_groups(ROLES)
|
||||
self.set_k8s_cluster()
|
||||
for group, hosts in data.items():
|
||||
self.ensure_required_groups([group])
|
||||
for host, opts in hosts.items():
|
||||
optstring = {'ansible_host': opts['ip'],
|
||||
'ip': opts['ip'],
|
||||
'access_ip': opts['ip']}
|
||||
self.add_host_to_group('all', host, optstring)
|
||||
self.add_host_to_group(group, host)
|
||||
self.write_config(self.config_file)
|
||||
|
||||
def parse_command(self, command, args=None):
|
||||
if command == 'help':
|
||||
self.show_help()
|
||||
elif command == 'print_cfg':
|
||||
self.print_config()
|
||||
elif command == 'print_ips':
|
||||
self.print_ips()
|
||||
elif command == 'print_hostnames':
|
||||
self.print_hostnames()
|
||||
elif command == 'load':
|
||||
self.load_file(args)
|
||||
else:
|
||||
raise Exception("Invalid command specified.")
|
||||
|
||||
def show_help(self):
|
||||
help_text = '''Usage: inventory.py ip1 [ip2 ...]
|
||||
Examples: inventory.py 10.10.1.3 10.10.1.4 10.10.1.5
|
||||
|
||||
Available commands:
|
||||
help - Display this message
|
||||
print_cfg - Write inventory file to stdout
|
||||
print_ips - Write a space-delimited list of IPs from "all" group
|
||||
print_hostnames - Write a space-delimited list of Hostnames from "all" group
|
||||
add - Adds specified hosts into an already existing inventory
|
||||
|
||||
Advanced usage:
|
||||
Create new or overwrite old inventory file: inventory.py 10.10.1.5
|
||||
Add another host after initial creation: inventory.py add 10.10.1.6
|
||||
Add range of hosts: inventory.py 10.10.1.3-10.10.1.5
|
||||
Add hosts with different ip and access ip: inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.10.3
|
||||
Add hosts with a specific hostname, ip, and optional access ip: first,10.0.0.1,192.168.10.1 second,10.0.0.2 last,10.0.0.3
|
||||
Delete a host: inventory.py -10.10.1.3
|
||||
Delete a host by id: inventory.py -node1
|
||||
|
||||
Configurable env vars:
|
||||
DEBUG Enable debug printing. Default: True
|
||||
CONFIG_FILE File to write config to Default: ./inventory/sample/hosts.yaml
|
||||
HOST_PREFIX Host prefix for generated hosts. Default: node
|
||||
KUBE_CONTROL_HOSTS Set the number of kube-control-planes. Default: 2
|
||||
SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50
|
||||
MASSIVE_SCALE_THRESHOLD Separate K8s control-plane and ETCD if # of nodes >= 200
|
||||
''' # noqa
|
||||
print(help_text)
|
||||
|
||||
def print_config(self):
|
||||
yaml.dump(self.yaml_config, sys.stdout)
|
||||
|
||||
def print_hostnames(self):
|
||||
print(' '.join(self.yaml_config['all']['hosts'].keys()))
|
||||
|
||||
def print_ips(self):
|
||||
ips = []
|
||||
for host, opts in self.yaml_config['all']['hosts'].items():
|
||||
ips.append(self.get_ip_from_opts(opts))
|
||||
print(' '.join(ips))
|
||||
|
||||
|
||||
def main(argv=None):
|
||||
if not argv:
|
||||
argv = sys.argv[1:]
|
||||
KubesprayInventory(argv, CONFIG_FILE)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
3
contrib/inventory_builder/requirements.txt
Normal file
3
contrib/inventory_builder/requirements.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
configparser>=3.3.0
|
||||
ipaddress
|
||||
ruamel.yaml>=0.15.88
|
||||
3
contrib/inventory_builder/setup.cfg
Normal file
3
contrib/inventory_builder/setup.cfg
Normal file
@@ -0,0 +1,3 @@
|
||||
[metadata]
|
||||
name = kubespray-inventory-builder
|
||||
version = 0.1
|
||||
29
contrib/inventory_builder/setup.py
Normal file
29
contrib/inventory_builder/setup.py
Normal file
@@ -0,0 +1,29 @@
|
||||
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
|
||||
import setuptools
|
||||
|
||||
# In python < 2.7.4, a lazy loading of package `pbr` will break
|
||||
# setuptools if some other modules registered functions in `atexit`.
|
||||
# solution from: http://bugs.python.org/issue15881#msg170215
|
||||
try:
|
||||
import multiprocessing # noqa
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
setuptools.setup(
|
||||
setup_requires=[],
|
||||
pbr=False)
|
||||
3
contrib/inventory_builder/test-requirements.txt
Normal file
3
contrib/inventory_builder/test-requirements.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
hacking>=0.10.2
|
||||
mock>=1.3.0
|
||||
pytest>=2.8.0
|
||||
595
contrib/inventory_builder/tests/test_inventory.py
Normal file
595
contrib/inventory_builder/tests/test_inventory.py
Normal file
@@ -0,0 +1,595 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import inventory
|
||||
from io import StringIO
|
||||
import unittest
|
||||
from unittest import mock
|
||||
|
||||
from collections import OrderedDict
|
||||
import sys
|
||||
|
||||
path = "./contrib/inventory_builder/"
|
||||
if path not in sys.path:
|
||||
sys.path.append(path)
|
||||
|
||||
import inventory # noqa
|
||||
|
||||
|
||||
class TestInventoryPrintHostnames(unittest.TestCase):
|
||||
|
||||
@mock.patch('ruamel.yaml.YAML.load')
|
||||
def test_print_hostnames(self, load_mock):
|
||||
mock_io = mock.mock_open(read_data='')
|
||||
load_mock.return_value = OrderedDict({'all': {'hosts': {
|
||||
'node1': {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'},
|
||||
'node2': {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'}}}})
|
||||
with mock.patch('builtins.open', mock_io):
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
with mock.patch('sys.stdout', new_callable=StringIO) as stdout:
|
||||
inventory.KubesprayInventory(
|
||||
changed_hosts=["print_hostnames"],
|
||||
config_file="file")
|
||||
self.assertEqual("node1 node2\n", stdout.getvalue())
|
||||
self.assertEqual(cm.exception.code, 0)
|
||||
|
||||
|
||||
class TestInventory(unittest.TestCase):
|
||||
@mock.patch('inventory.sys')
|
||||
def setUp(self, sys_mock):
|
||||
sys_mock.exit = mock.Mock()
|
||||
super(TestInventory, self).setUp()
|
||||
self.data = ['10.90.3.2', '10.90.3.3', '10.90.3.4']
|
||||
self.inv = inventory.KubesprayInventory()
|
||||
|
||||
def test_get_ip_from_opts(self):
|
||||
optstring = {'ansible_host': '10.90.3.2',
|
||||
'ip': '10.90.3.2',
|
||||
'access_ip': '10.90.3.2'}
|
||||
expected = "10.90.3.2"
|
||||
result = self.inv.get_ip_from_opts(optstring)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_get_ip_from_opts_invalid(self):
|
||||
optstring = "notanaddr=value something random!chars:D"
|
||||
self.assertRaisesRegex(ValueError, "IP parameter not found",
|
||||
self.inv.get_ip_from_opts, optstring)
|
||||
|
||||
def test_ensure_required_groups(self):
|
||||
groups = ['group1', 'group2']
|
||||
self.inv.ensure_required_groups(groups)
|
||||
for group in groups:
|
||||
self.assertIn(group, self.inv.yaml_config['all']['children'])
|
||||
|
||||
def test_get_host_id(self):
|
||||
hostnames = ['node99', 'no99de01', '01node01', 'node1.domain',
|
||||
'node3.xyz123.aaa']
|
||||
expected = [99, 1, 1, 1, 3]
|
||||
for hostname, expected in zip(hostnames, expected):
|
||||
result = self.inv.get_host_id(hostname)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_get_host_id_invalid(self):
|
||||
bad_hostnames = ['node', 'no99de', '01node', 'node.111111']
|
||||
for hostname in bad_hostnames:
|
||||
self.assertRaisesRegex(ValueError, "Host name must end in an",
|
||||
self.inv.get_host_id, hostname)
|
||||
|
||||
def test_build_hostnames_add_duplicate(self):
|
||||
changed_hosts = ['10.90.0.2']
|
||||
expected = OrderedDict([('node3',
|
||||
{'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'})])
|
||||
self.inv.yaml_config['all']['hosts'] = expected
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two(self):
|
||||
changed_hosts = ['10.90.0.2', '10.90.0.3']
|
||||
expected = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = OrderedDict()
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_three(self):
|
||||
changed_hosts = ['10.90.0.2', '10.90.0.3', '10.90.0.4']
|
||||
expected = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'}),
|
||||
('node3', {'ansible_host': '10.90.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '10.90.0.4'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_one(self):
|
||||
changed_hosts = ['10.90.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_delete_first(self):
|
||||
changed_hosts = ['-10.90.0.2']
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing_hosts
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_delete_by_hostname(self):
|
||||
changed_hosts = ['-node1']
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing_hosts
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_exists_hostname_positive(self):
|
||||
hostname = 'node1'
|
||||
expected = True
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
result = self.inv.exists_hostname(existing_hosts, hostname)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_exists_hostname_negative(self):
|
||||
hostname = 'node99'
|
||||
expected = False
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
result = self.inv.exists_hostname(existing_hosts, hostname)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_exists_ip_positive(self):
|
||||
ip = '10.90.0.2'
|
||||
expected = True
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
result = self.inv.exists_ip(existing_hosts, ip)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_exists_ip_negative(self):
|
||||
ip = '10.90.0.200'
|
||||
expected = False
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
result = self.inv.exists_ip(existing_hosts, ip)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_delete_host_by_ip_positive(self):
|
||||
ip = '10.90.0.2'
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
self.inv.delete_host_by_ip(existing_hosts, ip)
|
||||
self.assertEqual(expected, existing_hosts)
|
||||
|
||||
def test_delete_host_by_ip_negative(self):
|
||||
ip = '10.90.0.200'
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
self.assertRaisesRegex(ValueError, "Unable to find host",
|
||||
self.inv.delete_host_by_ip, existing_hosts, ip)
|
||||
|
||||
def test_purge_invalid_hosts(self):
|
||||
proper_hostnames = ['node1', 'node2']
|
||||
bad_host = 'doesnotbelong2'
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'}),
|
||||
('doesnotbelong2', {'whateveropts=ilike'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing_hosts
|
||||
self.inv.purge_invalid_hosts(proper_hostnames)
|
||||
self.assertNotIn(
|
||||
bad_host, self.inv.yaml_config['all']['hosts'].keys())
|
||||
|
||||
def test_add_host_to_group(self):
|
||||
group = 'etcd'
|
||||
host = 'node1'
|
||||
opts = {'ip': '10.90.0.2'}
|
||||
|
||||
self.inv.add_host_to_group(group, host, opts)
|
||||
self.assertEqual(
|
||||
self.inv.yaml_config['all']['children'][group]['hosts'].get(host),
|
||||
None)
|
||||
|
||||
def test_set_kube_control_plane(self):
|
||||
group = 'kube_control_plane'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_kube_control_plane([host])
|
||||
self.assertIn(
|
||||
host, self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
|
||||
def test_set_all(self):
|
||||
hosts = OrderedDict([
|
||||
('node1', 'opt1'),
|
||||
('node2', 'opt2')])
|
||||
|
||||
self.inv.set_all(hosts)
|
||||
for host, opt in hosts.items():
|
||||
self.assertEqual(
|
||||
self.inv.yaml_config['all']['hosts'].get(host), opt)
|
||||
|
||||
def test_set_k8s_cluster(self):
|
||||
group = 'k8s_cluster'
|
||||
expected_hosts = ['kube_node', 'kube_control_plane']
|
||||
|
||||
self.inv.set_k8s_cluster()
|
||||
for host in expected_hosts:
|
||||
self.assertIn(
|
||||
host,
|
||||
self.inv.yaml_config['all']['children'][group]['children'])
|
||||
|
||||
def test_set_kube_node(self):
|
||||
group = 'kube_node'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_kube_node([host])
|
||||
self.assertIn(
|
||||
host, self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
|
||||
def test_set_etcd(self):
|
||||
group = 'etcd'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_etcd([host])
|
||||
self.assertIn(
|
||||
host, self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
|
||||
def test_scale_scenario_one(self):
|
||||
num_nodes = 50
|
||||
hosts = OrderedDict()
|
||||
|
||||
for hostid in range(1, num_nodes+1):
|
||||
hosts["node" + str(hostid)] = ""
|
||||
|
||||
self.inv.set_all(hosts)
|
||||
self.inv.set_etcd(list(hosts.keys())[0:3])
|
||||
self.inv.set_kube_control_plane(list(hosts.keys())[0:2])
|
||||
self.inv.set_kube_node(hosts.keys())
|
||||
for h in range(3):
|
||||
self.assertFalse(
|
||||
list(hosts.keys())[h] in
|
||||
self.inv.yaml_config['all']['children']['kube_node']['hosts'])
|
||||
|
||||
def test_scale_scenario_two(self):
|
||||
num_nodes = 500
|
||||
hosts = OrderedDict()
|
||||
|
||||
for hostid in range(1, num_nodes+1):
|
||||
hosts["node" + str(hostid)] = ""
|
||||
|
||||
self.inv.set_all(hosts)
|
||||
self.inv.set_etcd(list(hosts.keys())[0:3])
|
||||
self.inv.set_kube_control_plane(list(hosts.keys())[3:5])
|
||||
self.inv.set_kube_node(hosts.keys())
|
||||
for h in range(5):
|
||||
self.assertFalse(
|
||||
list(hosts.keys())[h] in
|
||||
self.inv.yaml_config['all']['children']['kube_node']['hosts'])
|
||||
|
||||
def test_range2ips_range(self):
|
||||
changed_hosts = ['10.90.0.2', '10.90.0.4-10.90.0.6', '10.90.0.8']
|
||||
expected = ['10.90.0.2',
|
||||
'10.90.0.4',
|
||||
'10.90.0.5',
|
||||
'10.90.0.6',
|
||||
'10.90.0.8']
|
||||
result = self.inv.range2ips(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_range2ips_incorrect_range(self):
|
||||
host_range = ['10.90.0.4-a.9b.c.e']
|
||||
self.assertRaisesRegex(Exception, "Range of ip_addresses isn't valid",
|
||||
self.inv.range2ips, host_range)
|
||||
|
||||
def test_build_hostnames_create_with_one_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_create_with_two_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2', '10.90.0.3,192.168.0.3']
|
||||
expected = OrderedDict([
|
||||
('node1', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node2', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_create_with_three_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2',
|
||||
'10.90.0.3,192.168.0.3',
|
||||
'10.90.0.4,192.168.0.4']
|
||||
expected = OrderedDict([
|
||||
('node1', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node2', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node3', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_overwrite_one_with_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
existing = OrderedDict([('node5',
|
||||
{'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_overwrite_three_with_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
existing = OrderedDict([
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_different_ips_add_duplicate(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node3',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
existing = expected
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two_different_ips_into_one_existing(self):
|
||||
changed_hosts = ['10.90.0.3,192.168.0.3', '10.90.0.4,192.168.0.4']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two_different_ips_into_two_existing(self):
|
||||
changed_hosts = ['10.90.0.4,192.168.0.4', '10.90.0.5,192.168.0.5']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two_different_ips_into_three_existing(self):
|
||||
changed_hosts = ['10.90.0.5,192.168.0.5', '10.90.0.6,192.168.0.6']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'}),
|
||||
('node6', {'ansible_host': '192.168.0.6',
|
||||
'ip': '10.90.0.6',
|
||||
'access_ip': '192.168.0.6'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
# Add two IP addresses into a config that has
|
||||
# three already defined IP addresses. One of the IP addresses
|
||||
# is a duplicate.
|
||||
def test_build_hostnames_add_two_duplicate_one_overlap(self):
|
||||
changed_hosts = ['10.90.0.4,192.168.0.4', '10.90.0.5,192.168.0.5']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
# Add two duplicate IP addresses into a config that has
|
||||
# three already defined IP addresses
|
||||
def test_build_hostnames_add_two_duplicate_two_overlap(self):
|
||||
changed_hosts = ['10.90.0.3,192.168.0.3', '10.90.0.4,192.168.0.4']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
34
contrib/inventory_builder/tox.ini
Normal file
34
contrib/inventory_builder/tox.ini
Normal file
@@ -0,0 +1,34 @@
|
||||
[tox]
|
||||
minversion = 1.6
|
||||
skipsdist = True
|
||||
envlist = pep8
|
||||
|
||||
[testenv]
|
||||
allowlist_externals = py.test
|
||||
usedevelop = True
|
||||
deps =
|
||||
-r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
setenv = VIRTUAL_ENV={envdir}
|
||||
passenv =
|
||||
http_proxy
|
||||
HTTP_PROXY
|
||||
https_proxy
|
||||
HTTPS_PROXY
|
||||
no_proxy
|
||||
NO_PROXY
|
||||
commands = pytest -vv #{posargs:./tests}
|
||||
|
||||
[testenv:pep8]
|
||||
usedevelop = False
|
||||
allowlist_externals = bash
|
||||
commands =
|
||||
bash -c "find {toxinidir}/* -type f -name '*.py' -print0 | xargs -0 flake8"
|
||||
|
||||
[testenv:venv]
|
||||
commands = {posargs}
|
||||
|
||||
[flake8]
|
||||
show-source = true
|
||||
builtins = _
|
||||
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg
|
||||
@@ -1,8 +1,8 @@
|
||||
---
|
||||
- name: Prepare Hypervisor to later install kubespray VMs
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
become: true
|
||||
gather_facts: False
|
||||
become: yes
|
||||
vars:
|
||||
bootstrap_os: none
|
||||
roles:
|
||||
|
||||
@@ -11,12 +11,12 @@
|
||||
|
||||
- name: Install required packages
|
||||
apt:
|
||||
upgrade: true
|
||||
update_cache: true
|
||||
upgrade: yes
|
||||
update_cache: yes
|
||||
cache_valid_time: 3600
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
install_recommends: false
|
||||
install_recommends: no
|
||||
with_items:
|
||||
- dnsutils
|
||||
- ntp
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
br-netfilter
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
mode: 0644
|
||||
when: br_netfilter is defined
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
value: 1
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
state: present
|
||||
reload: true
|
||||
reload: yes
|
||||
|
||||
- name: Set bridge-nf-call-{arptables,iptables} to 0
|
||||
ansible.posix.sysctl:
|
||||
@@ -38,7 +38,7 @@
|
||||
state: present
|
||||
value: 0
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
reload: true
|
||||
reload: yes
|
||||
with_items:
|
||||
- net.bridge.bridge-nf-call-arptables
|
||||
- net.bridge.bridge-nf-call-ip6tables
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
state: directory
|
||||
owner: "{{ k8s_deployment_user }}"
|
||||
group: "{{ k8s_deployment_user }}"
|
||||
mode: "0700"
|
||||
mode: 0700
|
||||
|
||||
- name: Configure sudo for deployment user
|
||||
copy:
|
||||
@@ -20,13 +20,13 @@
|
||||
dest: "/etc/sudoers.d/55-k8s-deployment"
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
mode: 0644
|
||||
|
||||
- name: Write private SSH key
|
||||
copy:
|
||||
src: "{{ k8s_deployment_user_pkey_path }}"
|
||||
dest: "/home/{{ k8s_deployment_user }}/.ssh/id_rsa"
|
||||
mode: "0400"
|
||||
mode: 0400
|
||||
owner: "{{ k8s_deployment_user }}"
|
||||
group: "{{ k8s_deployment_user }}"
|
||||
when: k8s_deployment_user_pkey_path is defined
|
||||
@@ -41,7 +41,7 @@
|
||||
- name: Fix ssh-pub-key permissions
|
||||
file:
|
||||
path: "/home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
|
||||
mode: "0600"
|
||||
mode: 0600
|
||||
owner: "{{ k8s_deployment_user }}"
|
||||
group: "{{ k8s_deployment_user }}"
|
||||
when: k8s_deployment_user_pkey_path is defined
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
mode: 0755
|
||||
become: false
|
||||
loop:
|
||||
- "{{ playbook_dir }}/plugins/mitogen"
|
||||
@@ -25,7 +25,7 @@
|
||||
url: "{{ mitogen_url }}"
|
||||
dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
||||
validate_certs: true
|
||||
mode: "0644"
|
||||
mode: 0644
|
||||
|
||||
- name: Extract archive
|
||||
unarchive:
|
||||
@@ -40,7 +40,7 @@
|
||||
- name: Add strategy to ansible.cfg
|
||||
community.general.ini_file:
|
||||
path: ansible.cfg
|
||||
mode: "0644"
|
||||
mode: 0644
|
||||
section: "{{ item.section | d('defaults') }}"
|
||||
option: "{{ item.option }}"
|
||||
value: "{{ item.value }}"
|
||||
|
||||
@@ -21,7 +21,7 @@ glusterfs_default_release: ""
|
||||
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
|
||||
|
||||
```yaml
|
||||
glusterfs_ppa_use: true
|
||||
glusterfs_ppa_use: yes
|
||||
glusterfs_ppa_version: "3.5"
|
||||
```
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
# For Ubuntu.
|
||||
glusterfs_default_release: ""
|
||||
glusterfs_ppa_use: true
|
||||
glusterfs_ppa_use: yes
|
||||
glusterfs_ppa_version: "4.1"
|
||||
|
||||
# Gluster configuration.
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: "0775"
|
||||
mode: 0775
|
||||
with_items:
|
||||
- "{{ gluster_mount_dir }}"
|
||||
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
apt_repository:
|
||||
repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
|
||||
state: present
|
||||
update_cache: true
|
||||
update_cache: yes
|
||||
register: glusterfs_ppa_added
|
||||
when: glusterfs_ppa_use
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
# For Ubuntu.
|
||||
glusterfs_default_release: ""
|
||||
glusterfs_ppa_use: true
|
||||
glusterfs_ppa_use: yes
|
||||
glusterfs_ppa_version: "3.12"
|
||||
|
||||
# Gluster configuration.
|
||||
|
||||
@@ -43,13 +43,13 @@
|
||||
service:
|
||||
name: "{{ glusterfs_daemon }}"
|
||||
state: started
|
||||
enabled: true
|
||||
enabled: yes
|
||||
|
||||
- name: Ensure Gluster brick and mount directories exist.
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: "0775"
|
||||
mode: 0775
|
||||
with_items:
|
||||
- "{{ gluster_brick_dir }}"
|
||||
- "{{ gluster_mount_dir }}"
|
||||
@@ -62,7 +62,7 @@
|
||||
replicas: "{{ groups['gfs-cluster'] | length }}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
host: "{{ inventory_hostname }}"
|
||||
force: true
|
||||
force: yes
|
||||
run_once: true
|
||||
when: groups['gfs-cluster'] | length > 1
|
||||
|
||||
@@ -73,7 +73,7 @@
|
||||
brick: "{{ gluster_brick_dir }}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
host: "{{ inventory_hostname }}"
|
||||
force: true
|
||||
force: yes
|
||||
run_once: true
|
||||
when: groups['gfs-cluster'] | length <= 1
|
||||
|
||||
@@ -101,7 +101,7 @@
|
||||
template:
|
||||
dest: "{{ gluster_mount_dir }}/.test-file.txt"
|
||||
src: test-file.txt
|
||||
mode: "0644"
|
||||
mode: 0644
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Unmount glusterfs
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
apt_repository:
|
||||
repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
|
||||
state: present
|
||||
update_cache: true
|
||||
update_cache: yes
|
||||
register: glusterfs_ppa_added
|
||||
when: glusterfs_ppa_use
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
template:
|
||||
src: "{{ item.file }}"
|
||||
dest: "{{ kube_config_dir }}/{{ item.dest }}"
|
||||
mode: "0644"
|
||||
mode: 0644
|
||||
with_items:
|
||||
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||
|
||||
@@ -6,6 +6,6 @@
|
||||
|
||||
- name: Teardown disks in heketi
|
||||
hosts: heketi-node
|
||||
become: true
|
||||
become: yes
|
||||
roles:
|
||||
- { role: tear-down-disks }
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
template:
|
||||
src: "heketi-bootstrap.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-bootstrap.json"
|
||||
mode: "0640"
|
||||
mode: 0640
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
|
||||
kube:
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
template:
|
||||
src: "topology.json.j2"
|
||||
dest: "{{ kube_config_dir }}/topology.json"
|
||||
mode: "0644"
|
||||
mode: 0644
|
||||
- name: "Copy topology configuration into container."
|
||||
changed_when: false
|
||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
template:
|
||||
src: "glusterfs-daemonset.json.j2"
|
||||
dest: "{{ kube_config_dir }}/glusterfs-daemonset.json"
|
||||
mode: "0644"
|
||||
mode: 0644
|
||||
become: true
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
|
||||
@@ -33,7 +33,7 @@
|
||||
template:
|
||||
src: "heketi-service-account.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-service-account.json"
|
||||
mode: "0644"
|
||||
mode: 0644
|
||||
become: true
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Service Account"
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
template:
|
||||
src: "heketi-deployment.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-deployment.json"
|
||||
mode: "0644"
|
||||
mode: 0644
|
||||
register: "rendering"
|
||||
|
||||
- name: "Kubernetes Apps | Install and configure Heketi"
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
template:
|
||||
src: "heketi.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi.json"
|
||||
mode: "0644"
|
||||
mode: 0644
|
||||
|
||||
- name: "Deploy Heketi config secret"
|
||||
when: "secret_state.stdout | length == 0"
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
template:
|
||||
src: "heketi-storage.json.j2"
|
||||
dest: "{{ kube_config_dir }}/heketi-storage.json"
|
||||
mode: "0644"
|
||||
mode: 0644
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Heketi Storage"
|
||||
kube:
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
template:
|
||||
src: "storageclass.yml.j2"
|
||||
dest: "{{ kube_config_dir }}/storageclass.yml"
|
||||
mode: "0644"
|
||||
mode: 0644
|
||||
register: "rendering"
|
||||
- name: "Kubernetes Apps | Install and configure Storace Class"
|
||||
kube:
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
template:
|
||||
src: "topology.json.j2"
|
||||
dest: "{{ kube_config_dir }}/topology.json"
|
||||
mode: "0644"
|
||||
mode: 0644
|
||||
- name: "Copy topology configuration into container." # noqa no-handler
|
||||
when: "rendering.changed"
|
||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
||||
|
||||
@@ -67,23 +67,3 @@ Step(2) download files and run nginx container
|
||||
```
|
||||
|
||||
when nginx container is running, it can be accessed through <http://127.0.0.1:8080/>.
|
||||
|
||||
## upload2artifactory.py
|
||||
|
||||
After the steps above, this script can recursively upload each file under a directory to a generic repository in Artifactory.
|
||||
|
||||
Environment Variables:
|
||||
|
||||
- USERNAME -- At least permissions'Deploy/Cache' and 'Delete/Overwrite'.
|
||||
- TOKEN -- Generate this with 'Set Me Up' in your user.
|
||||
- BASE_URL -- The URL including the repository name.
|
||||
|
||||
Step(3) (optional) upload files to Artifactory
|
||||
|
||||
```shell
|
||||
cd kubespray/contrib/offline/offline-files
|
||||
export USERNAME=admin
|
||||
export TOKEN=...
|
||||
export BASE_URL=https://artifactory.example.com/artifactory/a-generic-repo/
|
||||
./upload2artifactory.py
|
||||
```
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
- name: Collect container images for offline deployment
|
||||
hosts: localhost
|
||||
become: false
|
||||
become: no
|
||||
|
||||
roles:
|
||||
# Just load default variables from roles.
|
||||
@@ -16,7 +16,7 @@
|
||||
template:
|
||||
src: ./contrib/offline/temp/{{ item }}.list.template
|
||||
dest: ./contrib/offline/temp/{{ item }}.list
|
||||
mode: "0644"
|
||||
mode: 0644
|
||||
with_items:
|
||||
- files
|
||||
- images
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
|
||||
OPTION=$1
|
||||
CURRENT_DIR=$(cd $(dirname $0); pwd)
|
||||
@@ -118,8 +118,6 @@ function register_container_images() {
|
||||
cp ${CURRENT_DIR}/registries.conf ${TEMP_DIR}/registries.conf
|
||||
sed -i s@"HOSTNAME"@"$(hostname)"@ ${TEMP_DIR}/registries.conf
|
||||
sudo cp ${TEMP_DIR}/registries.conf /etc/containers/registries.conf
|
||||
elif [ "$(uname)" == "Darwin" ]; then
|
||||
echo "This is a Mac, no configuration changes are required"
|
||||
else
|
||||
echo "runtime package(docker-ce, podman, nerctl, etc.) should be installed"
|
||||
exit 1
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""This is a helper script to manage-offline-files.sh.
|
||||
|
||||
After running manage-offline-files.sh, you can run upload2artifactory.py
|
||||
to recursively upload each file to a generic repository in Artifactory.
|
||||
|
||||
This script recurses the current working directory and is intended to
|
||||
be started from 'kubespray/contrib/offline/offline-files'
|
||||
|
||||
Environment Variables:
|
||||
USERNAME -- At least permissions'Deploy/Cache' and 'Delete/Overwrite'.
|
||||
TOKEN -- Generate this with 'Set Me Up' in your user.
|
||||
BASE_URL -- The URL including the repository name.
|
||||
|
||||
"""
|
||||
import os
|
||||
import urllib.request
|
||||
import base64
|
||||
|
||||
|
||||
def upload_file(file_path, destination_url, username, token):
|
||||
"""Helper function to upload a single file"""
|
||||
try:
|
||||
with open(file_path, 'rb') as f:
|
||||
file_data = f.read()
|
||||
|
||||
request = urllib.request.Request(destination_url, data=file_data, method='PUT') # NOQA
|
||||
auth_header = base64.b64encode(f"{username}:{token}".encode()).decode()
|
||||
request.add_header("Authorization", f"Basic {auth_header}")
|
||||
|
||||
with urllib.request.urlopen(request) as response:
|
||||
if response.status in [200, 201]:
|
||||
print(f"Success: Uploaded {file_path}")
|
||||
else:
|
||||
print(f"Failed: {response.status} {response.read().decode('utf-8')}") # NOQA
|
||||
except urllib.error.HTTPError as e:
|
||||
print(f"HTTPError: {e.code} {e.reason} for {file_path}")
|
||||
except urllib.error.URLError as e:
|
||||
print(f"URLError: {e.reason} for {file_path}")
|
||||
except OSError as e:
|
||||
print(f"OSError: {e.strerror} for {file_path}")
|
||||
|
||||
|
||||
def upload_files(base_url, username, token):
|
||||
""" Recurse current dir and upload each file using urllib.request """
|
||||
for root, _, files in os.walk(os.getcwd()):
|
||||
for file in files:
|
||||
file_path = os.path.join(root, file)
|
||||
relative_path = os.path.relpath(file_path, os.getcwd())
|
||||
destination_url = f"{base_url}/{relative_path}"
|
||||
|
||||
print(f"Uploading {file_path} to {destination_url}")
|
||||
upload_file(file_path, destination_url, username, token)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
a_user = os.getenv("USERNAME")
|
||||
a_token = os.getenv("TOKEN")
|
||||
a_url = os.getenv("BASE_URL")
|
||||
if not a_user or not a_token or not a_url:
|
||||
print(
|
||||
"Error: Environment variables USERNAME, TOKEN, and BASE_URL must be set." # NOQA
|
||||
)
|
||||
exit()
|
||||
upload_files(a_url, a_user, a_token)
|
||||
@@ -7,17 +7,17 @@
|
||||
service_facts:
|
||||
|
||||
- name: Disable service firewalld
|
||||
systemd_service:
|
||||
systemd:
|
||||
name: firewalld
|
||||
state: stopped
|
||||
enabled: false
|
||||
enabled: no
|
||||
when:
|
||||
"'firewalld.service' in services and services['firewalld.service'].status != 'not-found'"
|
||||
|
||||
- name: Disable service ufw
|
||||
systemd_service:
|
||||
systemd:
|
||||
name: ufw
|
||||
state: stopped
|
||||
enabled: false
|
||||
enabled: no
|
||||
when:
|
||||
"'ufw.service' in services and services['ufw.service'].status != 'not-found'"
|
||||
|
||||
62
contrib/packaging/rpm/kubespray.spec
Normal file
62
contrib/packaging/rpm/kubespray.spec
Normal file
@@ -0,0 +1,62 @@
|
||||
%global srcname kubespray
|
||||
|
||||
%{!?upstream_version: %global upstream_version %{version}%{?milestone}}
|
||||
|
||||
Name: kubespray
|
||||
Version: master
|
||||
Release: %(git describe | sed -r 's/v(\S+-?)-(\S+)-(\S+)/\1.dev\2+\3/')
|
||||
Summary: Ansible modules for installing Kubernetes
|
||||
|
||||
Group: System Environment/Libraries
|
||||
License: ASL 2.0
|
||||
Url: https://github.com/kubernetes-sigs/kubespray
|
||||
Source0: https://github.com/kubernetes-sigs/kubespray/archive/%{upstream_version}.tar.gz#/%{name}-%{release}.tar.gz
|
||||
|
||||
BuildArch: noarch
|
||||
BuildRequires: git
|
||||
BuildRequires: python2
|
||||
BuildRequires: python2-devel
|
||||
BuildRequires: python2-setuptools
|
||||
BuildRequires: python-d2to1
|
||||
BuildRequires: python2-pbr
|
||||
|
||||
Requires: ansible >= 2.5.0
|
||||
Requires: python-jinja2 >= 2.10
|
||||
Requires: python-netaddr
|
||||
Requires: python-pbr
|
||||
|
||||
%description
|
||||
|
||||
Ansible-kubespray is a set of Ansible modules and playbooks for
|
||||
installing a Kubernetes cluster. If you have questions, join us
|
||||
on the https://slack.k8s.io, channel '#kubespray'.
|
||||
|
||||
%prep
|
||||
%autosetup -n %{name}-%{upstream_version} -S git
|
||||
|
||||
|
||||
%build
|
||||
export PBR_VERSION=%{release}
|
||||
%{__python2} setup.py build bdist_rpm
|
||||
|
||||
|
||||
%install
|
||||
export PBR_VERSION=%{release}
|
||||
export SKIP_PIP_INSTALL=1
|
||||
%{__python2} setup.py install --skip-build --root %{buildroot} bdist_rpm
|
||||
|
||||
|
||||
%files
|
||||
%doc %{_docdir}/%{name}/README.md
|
||||
%doc %{_docdir}/%{name}/inventory/sample/hosts.ini
|
||||
%config %{_sysconfdir}/%{name}/ansible.cfg
|
||||
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/all.yml
|
||||
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s_cluster.yml
|
||||
%license %{_docdir}/%{name}/LICENSE
|
||||
%{python2_sitelib}/%{srcname}-%{release}-py%{python2_version}.egg-info
|
||||
%{_datarootdir}/%{name}/roles/
|
||||
%{_datarootdir}/%{name}/playbooks/
|
||||
%defattr(-,root,root)
|
||||
|
||||
|
||||
%changelog
|
||||
3
contrib/terraform/OWNERS
Normal file
3
contrib/terraform/OWNERS
Normal file
@@ -0,0 +1,3 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
approvers:
|
||||
- miouge1
|
||||
@@ -12,8 +12,8 @@ ${list_master}
|
||||
${list_worker}
|
||||
|
||||
[k8s_cluster:children]
|
||||
kube_control_plane
|
||||
kube_node
|
||||
kube-master
|
||||
kube-node
|
||||
|
||||
[k8s_cluster:vars]
|
||||
network_id=${network_id}
|
||||
|
||||
@@ -72,7 +72,6 @@ The setup looks like following
|
||||
|
||||
```bash
|
||||
./generate-inventory.sh > sample-inventory/inventory.ini
|
||||
```
|
||||
|
||||
* Export Variables:
|
||||
|
||||
|
||||
@@ -60,17 +60,17 @@ You can create many different kubernetes topologies by setting the number of
|
||||
different classes of hosts. For each class there are options for allocating
|
||||
floating IP addresses or not.
|
||||
|
||||
- Control plane nodes with etcd
|
||||
- Control plane nodes without etcd
|
||||
- Master nodes with etcd
|
||||
- Master nodes without etcd
|
||||
- Standalone etcd hosts
|
||||
- Kubernetes worker nodes
|
||||
|
||||
Note that the Ansible script will report an invalid configuration if you wind up
|
||||
with an even number of etcd instances since that is not a valid configuration. This
|
||||
restriction includes standalone etcd nodes that are deployed in a cluster along with
|
||||
control plane nodes with etcd replicas. As an example, if you have three control plane
|
||||
nodes with etcd replicas and three standalone etcd nodes, the script will fail since
|
||||
there are now six total etcd replicas.
|
||||
master nodes with etcd replicas. As an example, if you have three master nodes with
|
||||
etcd replicas and three standalone etcd nodes, the script will fail since there are
|
||||
now six total etcd replicas.
|
||||
|
||||
### GlusterFS shared file system
|
||||
|
||||
@@ -258,8 +258,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|`bastion_fips` | A list of floating IPs that you have already pre-allocated; they will be attached to bastion node instead of creating new random floating IPs. |
|
||||
|`external_net` | UUID of the external network that will be routed to |
|
||||
|`flavor_k8s_master`,`flavor_k8s_node`,`flavor_etcd`, `flavor_bastion`,`flavor_gfs_node` | Flavor depends on your openstack installation, you can get available flavor IDs through `openstack flavor list` |
|
||||
|`image`,`image_gfs`, `image_master` | Name of the image to use in provisioning the compute resources. Should already be loaded into glance. |
|
||||
|`image_uuid`,`image_gfs_uuid`, `image_master_uuid` | UUID of the image to use in provisioning the compute resources. Should already be loaded into glance. |
|
||||
|`image`,`image_gfs` | Name of the image to use in provisioning the compute resources. Should already be loaded into glance. |
|
||||
|`ssh_user`,`ssh_user_gfs` | The username to ssh into the image with. This usually depends on the image you have selected |
|
||||
|`public_key_path` | Path on your local workstation to the public key file you wish to use in creating the key pairs |
|
||||
|`number_of_k8s_masters`, `number_of_k8s_masters_no_floating_ip` | Number of nodes that serve as both master and etcd. These can be provisioned with or without floating IP addresses|
|
||||
@@ -300,10 +299,10 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|`force_null_port_security` | Set `null` instead of `true` or `false` for `port_security`. `false` by default |
|
||||
|`k8s_nodes` | Map containing worker node definition, see explanation below |
|
||||
|`k8s_masters` | Map containing master node definition, see explanation for k8s_nodes and `sample-inventory/cluster.tfvars` |
|
||||
|`k8s_master_loadbalancer_enabled` | Enable and use an Octavia load balancer for the K8s master nodes |
|
||||
|`k8s_master_loadbalancer_listener_port` | Define via which port the K8s Api should be exposed. `6443` by default |
|
||||
|`k8s_master_loadbalancer_server_port` | Define via which port the K8S api is available on the master nodes. `6443` by default |
|
||||
|`k8s_master_loadbalancer_public_ip` | Specify if an existing floating IP should be used for the load balancer. A new floating IP is assigned by default |
|
||||
| `k8s_master_loadbalancer_enabled`| Enable and use an Octavia load balancer for the K8s master nodes |
|
||||
| `k8s_master_loadbalancer_listener_port` | Define via which port the K8s Api should be exposed. `6443` by default |
|
||||
| `k8s_master_loadbalancer_server_port` | Define via which port the K8S api is available on the mas. `6443` by default |
|
||||
| `k8s_master_loadbalancer_public_ip` | Specify if an existing floating IP should be used for the load balancer. A new floating IP is assigned by default |
|
||||
|
||||
##### k8s_nodes
|
||||
|
||||
@@ -318,8 +317,7 @@ k8s_nodes:
|
||||
node-name:
|
||||
az: string # Name of the AZ
|
||||
flavor: string # Flavor ID to use
|
||||
floating_ip: bool # If floating IPs should be used or not
|
||||
reserved_floating_ip: string # If floating_ip is true use existing floating IP, if reserved_floating_ip is an empty string and floating_ip is true, a new floating IP will be created
|
||||
floating_ip: bool # If floating IPs should be created or not
|
||||
extra_groups: string # (optional) Additional groups to add for kubespray, defaults to no groups
|
||||
image_id: string # (optional) Image ID to use, defaults to var.image_id or var.image
|
||||
root_volume_size_in_gb: number # (optional) Size of the block storage to use as root disk, defaults to var.node_root_volume_size_in_gb or to use volume from flavor otherwise
|
||||
@@ -621,7 +619,7 @@ Edit `inventory/$CLUSTER/group_vars/k8s_cluster/k8s_cluster.yml`:
|
||||
|
||||
- Set variable **kube_network_plugin** to your desired networking plugin.
|
||||
- **flannel** works out-of-the-box
|
||||
- **calico** requires [configuring OpenStack Neutron ports](/docs/cloud_controllers/openstack.md) to allow service and pod subnets
|
||||
- **calico** requires [configuring OpenStack Neutron ports](/docs/cloud_providers/openstack.md) to allow service and pod subnets
|
||||
|
||||
```yml
|
||||
# Choose network plugin (calico, weave or flannel)
|
||||
|
||||
@@ -89,15 +89,11 @@ variable "k8s_node_fips" {
|
||||
}
|
||||
|
||||
variable "k8s_masters_fips" {
|
||||
type = map(object({
|
||||
address = string
|
||||
}))
|
||||
type = map
|
||||
}
|
||||
|
||||
variable "k8s_nodes_fips" {
|
||||
type = map(object({
|
||||
address = string
|
||||
}))
|
||||
type = map
|
||||
}
|
||||
|
||||
variable "bastion_fips" {
|
||||
@@ -140,9 +136,8 @@ variable "k8s_masters" {
|
||||
type = map(object({
|
||||
az = string
|
||||
flavor = string
|
||||
etcd = bool
|
||||
floating_ip = bool
|
||||
reserved_floating_ip = optional(string)
|
||||
etcd = bool
|
||||
image_id = optional(string)
|
||||
root_volume_size_in_gb = optional(number)
|
||||
volume_type = optional(string)
|
||||
@@ -155,7 +150,6 @@ variable "k8s_nodes" {
|
||||
az = string
|
||||
flavor = string
|
||||
floating_ip = bool
|
||||
reserved_floating_ip = optional(string)
|
||||
extra_groups = optional(string)
|
||||
image_id = optional(string)
|
||||
root_volume_size_in_gb = optional(number)
|
||||
|
||||
@@ -15,7 +15,7 @@ resource "openstack_networking_floatingip_v2" "k8s_master" {
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "k8s_masters" {
|
||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 ? { for key, value in var.k8s_masters : key => value if value.floating_ip && (lookup(value, "reserved_floating_ip", "") == "") } : tomap({})
|
||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 ? { for key, value in var.k8s_masters : key => value if value.floating_ip } : {}
|
||||
pool = var.floatingip_pool
|
||||
depends_on = [null_resource.dummy_dependency]
|
||||
}
|
||||
@@ -40,7 +40,7 @@ resource "openstack_networking_floatingip_v2" "bastion" {
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "k8s_nodes" {
|
||||
for_each = var.number_of_k8s_nodes == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip && (lookup(value, "reserved_floating_ip", "") == "") } : tomap({})
|
||||
for_each = var.number_of_k8s_nodes == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip } : {}
|
||||
pool = var.floatingip_pool
|
||||
depends_on = [null_resource.dummy_dependency]
|
||||
}
|
||||
|
||||
@@ -1,33 +1,10 @@
|
||||
locals {
|
||||
k8s_masters_reserved_fips = {
|
||||
for key, value in var.k8s_masters : key => {
|
||||
address = value.reserved_floating_ip
|
||||
} if value.floating_ip && (lookup(value, "reserved_floating_ip", "") != "")
|
||||
}
|
||||
k8s_masters_create_fips = {
|
||||
for key, value in openstack_networking_floatingip_v2.k8s_masters : key => {
|
||||
address = value.address
|
||||
}
|
||||
}
|
||||
k8s_nodes_reserved_fips = {
|
||||
for key, value in var.k8s_nodes : key => {
|
||||
address = value.reserved_floating_ip
|
||||
} if value.floating_ip && (lookup(value, "reserved_floating_ip", "") != "")
|
||||
}
|
||||
k8s_nodes_create_fips = {
|
||||
for key, value in openstack_networking_floatingip_v2.k8s_nodes : key => {
|
||||
address = value.address
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# If k8s_master_fips is already defined as input, keep the same value since new FIPs have not been created.
|
||||
output "k8s_master_fips" {
|
||||
value = length(var.k8s_master_fips) > 0 ? var.k8s_master_fips : openstack_networking_floatingip_v2.k8s_master[*].address
|
||||
}
|
||||
|
||||
output "k8s_masters_fips" {
|
||||
value = merge(local.k8s_masters_create_fips, local.k8s_masters_reserved_fips)
|
||||
value = openstack_networking_floatingip_v2.k8s_masters
|
||||
}
|
||||
|
||||
# If k8s_master_fips is already defined as input, keep the same value since new FIPs have not been created.
|
||||
@@ -40,7 +17,7 @@ output "k8s_node_fips" {
|
||||
}
|
||||
|
||||
output "k8s_nodes_fips" {
|
||||
value = merge(local.k8s_nodes_create_fips, local.k8s_nodes_reserved_fips)
|
||||
value = openstack_networking_floatingip_v2.k8s_nodes
|
||||
}
|
||||
|
||||
output "bastion_fips" {
|
||||
|
||||
@@ -98,7 +98,7 @@ PARSERS = {}
|
||||
def _clean_dc(dcname):
|
||||
# Consul DCs are strictly alphanumeric with underscores and hyphens -
|
||||
# ensure that the consul_dc attribute meets these requirements.
|
||||
return re.sub(r'[^\w_\-]', '-', dcname)
|
||||
return re.sub('[^\w_\-]', '-', dcname)
|
||||
|
||||
|
||||
def iterhosts(resources):
|
||||
@@ -273,7 +273,6 @@ def openstack_host(resource, module_name):
|
||||
'access_ip_v4': raw_attrs['access_ip_v4'],
|
||||
'access_ip_v6': raw_attrs['access_ip_v6'],
|
||||
'access_ip': raw_attrs['access_ip_v4'],
|
||||
'access_ip6': raw_attrs['access_ip_v6'],
|
||||
'ip': raw_attrs['network.0.fixed_ip_v4'],
|
||||
'flavor': parse_dict(raw_attrs, 'flavor',
|
||||
sep='_'),
|
||||
@@ -305,7 +304,7 @@ def openstack_host(resource, module_name):
|
||||
try:
|
||||
if 'metadata.prefer_ipv6' in raw_attrs and raw_attrs['metadata.prefer_ipv6'] == "1":
|
||||
attrs.update({
|
||||
'ansible_host': re.sub(r"[\[\]]", "", raw_attrs['access_ip_v6']),
|
||||
'ansible_host': re.sub("[\[\]]", "", raw_attrs['access_ip_v6']),
|
||||
'publicly_routable': True,
|
||||
})
|
||||
else:
|
||||
@@ -369,7 +368,7 @@ def iter_host_ips(hosts, ips):
|
||||
'ansible_host': ip,
|
||||
})
|
||||
|
||||
if 'use_access_ip' in host[1]['metadata'] and host[1]['metadata']['use_access_ip'] == "0" and 'access_ip' in host[1]:
|
||||
if 'use_access_ip' in host[1]['metadata'] and host[1]['metadata']['use_access_ip'] == "0":
|
||||
host[1].pop('access_ip')
|
||||
|
||||
yield host
|
||||
|
||||
@@ -134,40 +134,10 @@ terraform destroy --var-file cluster-settings.tfvars \
|
||||
* `end_address`: End of address range to allow
|
||||
* `loadbalancer_enabled`: Enable managed load balancer
|
||||
* `loadbalancer_plan`: Plan to use for load balancer *(development|production-small)*
|
||||
* `loadbalancer_legacy_network`: If the loadbalancer should use the deprecated network field instead of networks blocks. You probably want to have this set to false (default value)
|
||||
* `loadbalancers`: Ports to load balance and which machines to forward to. Key of this object will be used as the name of the load balancer frontends/backends
|
||||
* `port`: Port to load balance.
|
||||
* `target_port`: Port to the backend servers.
|
||||
* `backend_servers`: List of servers that traffic to the port should be forwarded to.
|
||||
* `router_enable`: If a router should be connected to the private network or not
|
||||
* `gateways`: Gateways that should be connected to the router, requires router_enable is set to true
|
||||
* `features`: List of features for the gateway
|
||||
* `plan`: Plan to use for the gateway
|
||||
* `connections`: The connections and tunnel to create for the gateway
|
||||
* `type`: What type of connection
|
||||
* `local_routes`: Map of local routes for the connection
|
||||
* `type`: Type of route
|
||||
* `static_network`: Destination prefix of the route; needs to be a valid IPv4 prefix
|
||||
* `remote_routes`: Map of local routes for the connection
|
||||
* `type`: Type of route
|
||||
* `static_network`: Destination prefix of the route; needs to be a valid IPv4 prefix
|
||||
* `tunnels`: The tunnels to create for this connection
|
||||
* `remote_address`: The remote address for the tunnel
|
||||
* `ipsec_properties`: Set properties of IPSec, if not set, defaults will be used
|
||||
* `child_rekey_time`: IKE child SA rekey time in seconds
|
||||
* `dpd_delay`: Delay before sending Dead Peer Detection packets if no traffic is detected, in seconds
|
||||
* `dpd_timeout`: Timeout period for DPD reply before considering the peer to be dead, in seconds
|
||||
* `ike_lifetime`: Maximum IKE SA lifetime in seconds()
|
||||
* `rekey_time`: IKE SA rekey time in seconds
|
||||
* `phase1_algorithms`: List of Phase 1: Proposal algorithms
|
||||
* `phase1_dh_group_numbers`: List of Phase 1 Diffie-Hellman group numbers
|
||||
* `phase1_integrity_algorithms`: List of Phase 1 integrity algorithms
|
||||
* `phase2_algorithms`: List of Phase 2: Security Association algorithms
|
||||
* `phase2_dh_group_numbers`: List of Phase 2 Diffie-Hellman group numbers
|
||||
* `phase2_integrity_algorithms`: List of Phase 2 integrity algorithms
|
||||
* `gateway_vpn_psks`: Separate variable for providing psks for connection tunnels. Environment variable can be exported in the following format `export TF_VAR_gateway_vpn_psks='{"${gateway-name}-${connecton-name}-tunnel":{psk:"..."}}'`
|
||||
* `static_routes`: Static routes to apply to the router, requires `router_enable` is set to true
|
||||
* `network_peerings`: Other UpCloud private networks to peer with, requires `router_enable` is set to true
|
||||
* `server_groups`: Group servers together
|
||||
* `servers`: The servers that should be included in the group.
|
||||
* `anti_affinity_policy`: Defines if a server group is an anti-affinity group. Setting this to "strict" or yes" will result in all servers in the group being placed on separate compute hosts. The value can be "strict", "yes" or "no". "strict" refers to strict policy doesn't allow servers in the same server group to be on the same host. "yes" refers to best-effort policy and tries to put servers on different hosts, but this is not guaranteed.
|
||||
|
||||
@@ -1,11 +1,5 @@
|
||||
# See: https://developers.upcloud.com/1.3/5-zones/
|
||||
zone = "fi-hel1"
|
||||
private_cloud = false
|
||||
|
||||
# Only used if private_cloud = true, public zone equivalent
|
||||
# For example use finnish public zone for finnish private zone
|
||||
public_zone = "fi-hel2"
|
||||
|
||||
zone = "fi-hel1"
|
||||
username = "ubuntu"
|
||||
|
||||
# Prefix to use for all resources to separate them from other resources
|
||||
@@ -153,46 +147,3 @@ server_groups = {
|
||||
# anti_affinity_policy = "yes"
|
||||
# }
|
||||
}
|
||||
|
||||
router_enable = false
|
||||
gateways = {
|
||||
# "gateway" : {
|
||||
# features: [ "vpn" ]
|
||||
# plan = "production"
|
||||
# connections = {
|
||||
# "connection" = {
|
||||
# name = "connection"
|
||||
# type = "ipsec"
|
||||
# remote_routes = {
|
||||
# "them" = {
|
||||
# type = "static"
|
||||
# static_network = "1.2.3.4/24"
|
||||
# }
|
||||
# }
|
||||
# local_routes = {
|
||||
# "me" = {
|
||||
# type = "static"
|
||||
# static_network = "4.3.2.1/24"
|
||||
# }
|
||||
# }
|
||||
# tunnels = {
|
||||
# "tunnel1" = {
|
||||
# remote_address = "1.2.3.4"
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
}
|
||||
# gateway_vpn_psks = {} # Should be loaded as an environment variable
|
||||
static_routes = {
|
||||
# "route": {
|
||||
# route: "1.2.3.4/24"
|
||||
# nexthop: "4.3.2.1"
|
||||
# }
|
||||
}
|
||||
network_peerings = {
|
||||
# "peering": {
|
||||
# remote_network: "uuid"
|
||||
# }
|
||||
}
|
||||
|
||||
@@ -11,10 +11,8 @@ provider "upcloud" {
|
||||
module "kubernetes" {
|
||||
source = "./modules/kubernetes-cluster"
|
||||
|
||||
prefix = var.prefix
|
||||
zone = var.zone
|
||||
private_cloud = var.private_cloud
|
||||
public_zone = var.public_zone
|
||||
prefix = var.prefix
|
||||
zone = var.zone
|
||||
|
||||
template_name = var.template_name
|
||||
username = var.username
|
||||
@@ -36,15 +34,8 @@ module "kubernetes" {
|
||||
loadbalancer_enabled = var.loadbalancer_enabled
|
||||
loadbalancer_plan = var.loadbalancer_plan
|
||||
loadbalancer_outbound_proxy_protocol = var.loadbalancer_proxy_protocol ? "v2" : ""
|
||||
loadbalancer_legacy_network = var.loadbalancer_legacy_network
|
||||
loadbalancers = var.loadbalancers
|
||||
|
||||
router_enable = var.router_enable
|
||||
gateways = var.gateways
|
||||
gateway_vpn_psks = var.gateway_vpn_psks
|
||||
static_routes = var.static_routes
|
||||
network_peerings = var.network_peerings
|
||||
|
||||
server_groups = var.server_groups
|
||||
}
|
||||
|
||||
|
||||
@@ -20,36 +20,6 @@ locals {
|
||||
]
|
||||
])
|
||||
|
||||
gateway_connections = flatten([
|
||||
for gateway_name, gateway in var.gateways : [
|
||||
for connection_name, connection in gateway.connections : {
|
||||
"gateway_id" = upcloud_gateway.gateway[gateway_name].id
|
||||
"gateway_name" = gateway_name
|
||||
"connection_name" = connection_name
|
||||
"type" = connection.type
|
||||
"local_routes" = connection.local_routes
|
||||
"remote_routes" = connection.remote_routes
|
||||
}
|
||||
]
|
||||
])
|
||||
|
||||
gateway_connection_tunnels = flatten([
|
||||
for gateway_name, gateway in var.gateways : [
|
||||
for connection_name, connection in gateway.connections : [
|
||||
for tunnel_name, tunnel in connection.tunnels : {
|
||||
"gateway_id" = upcloud_gateway.gateway[gateway_name].id
|
||||
"gateway_name" = gateway_name
|
||||
"connection_id" = upcloud_gateway_connection.gateway_connection["${gateway_name}-${connection_name}"].id
|
||||
"connection_name" = connection_name
|
||||
"tunnel_name" = tunnel_name
|
||||
"local_address_name" = tolist(upcloud_gateway.gateway[gateway_name].address).0.name
|
||||
"remote_address" = tunnel.remote_address
|
||||
"ipsec_properties" = tunnel.ipsec_properties
|
||||
}
|
||||
]
|
||||
]
|
||||
])
|
||||
|
||||
# If prefix is set, all resources will be prefixed with "${var.prefix}-"
|
||||
# Else don't prefix with anything
|
||||
resource-prefix = "%{if var.prefix != ""}${var.prefix}-%{endif}"
|
||||
@@ -60,13 +30,10 @@ resource "upcloud_network" "private" {
|
||||
zone = var.zone
|
||||
|
||||
ip_network {
|
||||
address = var.private_network_cidr
|
||||
dhcp_default_route = var.router_enable
|
||||
dhcp = true
|
||||
family = "IPv4"
|
||||
address = var.private_network_cidr
|
||||
dhcp = true
|
||||
family = "IPv4"
|
||||
}
|
||||
|
||||
router = var.router_enable ? upcloud_router.router[0].id : null
|
||||
}
|
||||
|
||||
resource "upcloud_storage" "additional_disks" {
|
||||
@@ -87,12 +54,11 @@ resource "upcloud_server" "master" {
|
||||
if machine.node_type == "master"
|
||||
}
|
||||
|
||||
hostname = "${local.resource-prefix}${each.key}"
|
||||
plan = each.value.plan
|
||||
cpu = each.value.plan == null ? null : each.value.cpu
|
||||
mem = each.value.plan == null ? null : each.value.mem
|
||||
zone = var.zone
|
||||
server_group = each.value.server_group == null ? null : upcloud_server_group.server_groups[each.value.server_group].id
|
||||
hostname = "${local.resource-prefix}${each.key}"
|
||||
plan = each.value.plan
|
||||
cpu = each.value.plan == null ? each.value.cpu : null
|
||||
mem = each.value.plan == null ? each.value.mem : null
|
||||
zone = var.zone
|
||||
|
||||
template {
|
||||
storage = var.template_name
|
||||
@@ -145,13 +111,11 @@ resource "upcloud_server" "worker" {
|
||||
if machine.node_type == "worker"
|
||||
}
|
||||
|
||||
hostname = "${local.resource-prefix}${each.key}"
|
||||
plan = each.value.plan
|
||||
cpu = each.value.plan == null ? null : each.value.cpu
|
||||
mem = each.value.plan == null ? null : each.value.mem
|
||||
zone = var.zone
|
||||
server_group = each.value.server_group == null ? null : upcloud_server_group.server_groups[each.value.server_group].id
|
||||
|
||||
hostname = "${local.resource-prefix}${each.key}"
|
||||
plan = each.value.plan
|
||||
cpu = each.value.plan == null ? each.value.cpu : null
|
||||
mem = each.value.plan == null ? each.value.mem : null
|
||||
zone = var.zone
|
||||
|
||||
template {
|
||||
storage = var.template_name
|
||||
@@ -548,33 +512,8 @@ resource "upcloud_loadbalancer" "lb" {
|
||||
configured_status = "started"
|
||||
name = "${local.resource-prefix}lb"
|
||||
plan = var.loadbalancer_plan
|
||||
zone = var.private_cloud ? var.public_zone : var.zone
|
||||
network = var.loadbalancer_legacy_network ? upcloud_network.private.id : null
|
||||
|
||||
dynamic "networks" {
|
||||
for_each = var.loadbalancer_legacy_network ? [] : [1]
|
||||
|
||||
content {
|
||||
name = "Private-Net"
|
||||
type = "private"
|
||||
family = "IPv4"
|
||||
network = upcloud_network.private.id
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "networks" {
|
||||
for_each = var.loadbalancer_legacy_network ? [] : [1]
|
||||
|
||||
content {
|
||||
name = "Public-Net"
|
||||
type = "public"
|
||||
family = "IPv4"
|
||||
}
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [ maintenance_dow, maintenance_time ]
|
||||
}
|
||||
zone = var.zone
|
||||
network = upcloud_network.private.id
|
||||
}
|
||||
|
||||
resource "upcloud_loadbalancer_backend" "lb_backend" {
|
||||
@@ -595,22 +534,6 @@ resource "upcloud_loadbalancer_frontend" "lb_frontend" {
|
||||
mode = "tcp"
|
||||
port = each.value.port
|
||||
default_backend_name = upcloud_loadbalancer_backend.lb_backend[each.key].name
|
||||
|
||||
dynamic "networks" {
|
||||
for_each = var.loadbalancer_legacy_network ? [] : [1]
|
||||
|
||||
content {
|
||||
name = "Public-Net"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "networks" {
|
||||
for_each = each.value.allow_internal_frontend ? [1] : []
|
||||
|
||||
content{
|
||||
name = "Private-Net"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "upcloud_loadbalancer_static_backend_member" "lb_backend_member" {
|
||||
@@ -634,117 +557,5 @@ resource "upcloud_server_group" "server_groups" {
|
||||
title = each.key
|
||||
anti_affinity_policy = each.value.anti_affinity_policy
|
||||
labels = {}
|
||||
# Managed upstream via upcloud_server resource
|
||||
members = []
|
||||
lifecycle {
|
||||
ignore_changes = [members]
|
||||
}
|
||||
}
|
||||
|
||||
resource "upcloud_router" "router" {
|
||||
count = var.router_enable ? 1 : 0
|
||||
|
||||
name = "${local.resource-prefix}router"
|
||||
|
||||
dynamic "static_route" {
|
||||
for_each = var.static_routes
|
||||
|
||||
content {
|
||||
name = static_route.key
|
||||
|
||||
nexthop = static_route.value["nexthop"]
|
||||
route = static_route.value["route"]
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "upcloud_gateway" "gateway" {
|
||||
for_each = var.router_enable ? var.gateways : {}
|
||||
name = "${local.resource-prefix}${each.key}-gateway"
|
||||
zone = var.zone
|
||||
|
||||
features = each.value.features
|
||||
plan = each.value.plan
|
||||
|
||||
router {
|
||||
id = upcloud_router.router[0].id
|
||||
}
|
||||
}
|
||||
|
||||
resource "upcloud_gateway_connection" "gateway_connection" {
|
||||
for_each = {
|
||||
for gc in local.gateway_connections : "${gc.gateway_name}-${gc.connection_name}" => gc
|
||||
}
|
||||
|
||||
gateway = each.value.gateway_id
|
||||
name = "${local.resource-prefix}${each.key}-gateway-connection"
|
||||
type = each.value.type
|
||||
|
||||
dynamic "local_route" {
|
||||
for_each = each.value.local_routes
|
||||
|
||||
content {
|
||||
name = local_route.key
|
||||
type = local_route.value["type"]
|
||||
static_network = local_route.value["static_network"]
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "remote_route" {
|
||||
for_each = each.value.remote_routes
|
||||
|
||||
content {
|
||||
name = remote_route.key
|
||||
type = remote_route.value["type"]
|
||||
static_network = remote_route.value["static_network"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "upcloud_gateway_connection_tunnel" "gateway_connection_tunnel" {
|
||||
for_each = {
|
||||
for gct in local.gateway_connection_tunnels : "${gct.gateway_name}-${gct.connection_name}-${gct.tunnel_name}-tunnel" => gct
|
||||
}
|
||||
|
||||
connection_id = each.value.connection_id
|
||||
name = each.key
|
||||
local_address_name = each.value.local_address_name
|
||||
remote_address = each.value.remote_address
|
||||
|
||||
ipsec_auth_psk {
|
||||
psk = var.gateway_vpn_psks[each.key].psk
|
||||
}
|
||||
|
||||
dynamic "ipsec_properties" {
|
||||
for_each = each.value.ipsec_properties != null ? { "ip": each.value.ipsec_properties } : {}
|
||||
|
||||
content {
|
||||
child_rekey_time = ipsec_properties.value["child_rekey_time"]
|
||||
dpd_delay = ipsec_properties.value["dpd_delay"]
|
||||
dpd_timeout = ipsec_properties.value["dpd_timeout"]
|
||||
ike_lifetime = ipsec_properties.value["ike_lifetime"]
|
||||
rekey_time = ipsec_properties.value["rekey_time"]
|
||||
phase1_algorithms = ipsec_properties.value["phase1_algorithms"]
|
||||
phase1_dh_group_numbers = ipsec_properties.value["phase1_dh_group_numbers"]
|
||||
phase1_integrity_algorithms = ipsec_properties.value["phase1_integrity_algorithms"]
|
||||
phase2_algorithms = ipsec_properties.value["phase2_algorithms"]
|
||||
phase2_dh_group_numbers = ipsec_properties.value["phase2_dh_group_numbers"]
|
||||
phase2_integrity_algorithms = ipsec_properties.value["phase2_integrity_algorithms"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "upcloud_network_peering" "peering" {
|
||||
for_each = var.network_peerings
|
||||
|
||||
name = "${local.resource-prefix}${each.key}"
|
||||
|
||||
network {
|
||||
uuid = upcloud_network.private.id
|
||||
}
|
||||
|
||||
peer_network {
|
||||
uuid = each.value.remote_network
|
||||
}
|
||||
members = [for server in each.value.servers : merge(upcloud_server.master, upcloud_server.worker)[server].id]
|
||||
}
|
||||
|
||||
@@ -6,14 +6,6 @@ variable "zone" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "private_cloud" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "public_zone" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "template_name" {}
|
||||
|
||||
variable "username" {}
|
||||
@@ -28,7 +20,6 @@ variable "machines" {
|
||||
cpu = string
|
||||
mem = string
|
||||
disk_size = number
|
||||
server_group : string
|
||||
additional_disks = map(object({
|
||||
size = number
|
||||
tier = string
|
||||
@@ -98,19 +89,13 @@ variable "loadbalancer_outbound_proxy_protocol" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "loadbalancer_legacy_network" {
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "loadbalancers" {
|
||||
description = "Load balancers"
|
||||
|
||||
type = map(object({
|
||||
port = number
|
||||
target_port = number
|
||||
allow_internal_frontend = optional(bool)
|
||||
backend_servers = list(string)
|
||||
port = number
|
||||
target_port = number
|
||||
backend_servers = list(string)
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -119,74 +104,6 @@ variable "server_groups" {
|
||||
|
||||
type = map(object({
|
||||
anti_affinity_policy = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "router_enable" {
|
||||
description = "If a router should be enabled and connected to the private network or not"
|
||||
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "gateways" {
|
||||
description = "Gateways that should be connected to the router, requires router_enable is set to true"
|
||||
|
||||
type = map(object({
|
||||
features = list(string)
|
||||
plan = optional(string)
|
||||
connections = optional(map(object({
|
||||
type = string
|
||||
local_routes = optional(map(object({
|
||||
type = string
|
||||
static_network = string
|
||||
})))
|
||||
remote_routes = optional(map(object({
|
||||
type = string
|
||||
static_network = string
|
||||
})))
|
||||
tunnels = optional(map(object({
|
||||
remote_address = string
|
||||
ipsec_properties = optional(object({
|
||||
child_rekey_time = number
|
||||
dpd_delay = number
|
||||
dpd_timeout = number
|
||||
ike_lifetime = number
|
||||
rekey_time = number
|
||||
phase1_algorithms = set(string)
|
||||
phase1_dh_group_numbers = set(string)
|
||||
phase1_integrity_algorithms = set(string)
|
||||
phase2_algorithms = set(string)
|
||||
phase2_dh_group_numbers = set(string)
|
||||
phase2_integrity_algorithms = set(string)
|
||||
}))
|
||||
})))
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
variable "gateway_vpn_psks" {
|
||||
description = "Separate variable for providing psks for connection tunnels"
|
||||
|
||||
type = map(object({
|
||||
psk = string
|
||||
}))
|
||||
default = {}
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "static_routes" {
|
||||
description = "Static routes to apply to the router, requires router_enable is set to true"
|
||||
|
||||
type = map(object({
|
||||
nexthop = string
|
||||
route = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "network_peerings" {
|
||||
description = "Other UpCloud private networks to peer with, requires router_enable is set to true"
|
||||
|
||||
type = map(object({
|
||||
remote_network = string
|
||||
servers = list(string)
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ terraform {
|
||||
required_providers {
|
||||
upcloud = {
|
||||
source = "UpCloudLtd/upcloud"
|
||||
version = "~>5.9.0"
|
||||
version = "~>2.12.0"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
|
||||
@@ -9,15 +9,6 @@ variable "zone" {
|
||||
description = "The zone where to run the cluster"
|
||||
}
|
||||
|
||||
variable "private_cloud" {
|
||||
description = "Whether the environment is in the private cloud region"
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "public_zone" {
|
||||
description = "The public zone equivalent if the cluster is running in a private cloud zone"
|
||||
}
|
||||
|
||||
variable "template_name" {
|
||||
description = "Block describing the preconfigured operating system"
|
||||
}
|
||||
@@ -41,7 +32,6 @@ variable "machines" {
|
||||
cpu = string
|
||||
mem = string
|
||||
disk_size = number
|
||||
server_group : string
|
||||
additional_disks = map(object({
|
||||
size = number
|
||||
tier = string
|
||||
@@ -136,21 +126,13 @@ variable "loadbalancer_proxy_protocol" {
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "loadbalancer_legacy_network" {
|
||||
description = "If the loadbalancer should use the deprecated network field instead of networks blocks. You probably want to have this set to false"
|
||||
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "loadbalancers" {
|
||||
description = "Load balancers"
|
||||
|
||||
type = map(object({
|
||||
port = number
|
||||
target_port = number
|
||||
allow_internal_frontend = optional(bool, false)
|
||||
backend_servers = list(string)
|
||||
port = number
|
||||
target_port = number
|
||||
backend_servers = list(string)
|
||||
}))
|
||||
default = {}
|
||||
}
|
||||
@@ -160,80 +142,8 @@ variable "server_groups" {
|
||||
|
||||
type = map(object({
|
||||
anti_affinity_policy = string
|
||||
servers = list(string)
|
||||
}))
|
||||
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "router_enable" {
|
||||
description = "If a router should be enabled and connected to the private network or not"
|
||||
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "gateways" {
|
||||
description = "Gateways that should be connected to the router, requires router_enable is set to true"
|
||||
|
||||
type = map(object({
|
||||
features = list(string)
|
||||
plan = optional(string)
|
||||
connections = optional(map(object({
|
||||
type = string
|
||||
local_routes = optional(map(object({
|
||||
type = string
|
||||
static_network = string
|
||||
})), {})
|
||||
remote_routes = optional(map(object({
|
||||
type = string
|
||||
static_network = string
|
||||
})), {})
|
||||
tunnels = optional(map(object({
|
||||
remote_address = string
|
||||
ipsec_properties = optional(object({
|
||||
child_rekey_time = number
|
||||
dpd_delay = number
|
||||
dpd_timeout = number
|
||||
ike_lifetime = number
|
||||
rekey_time = number
|
||||
phase1_algorithms = set(string)
|
||||
phase1_dh_group_numbers = set(string)
|
||||
phase1_integrity_algorithms = set(string)
|
||||
phase2_algorithms = set(string)
|
||||
phase2_dh_group_numbers = set(string)
|
||||
phase2_integrity_algorithms = set(string)
|
||||
}))
|
||||
})), {})
|
||||
})), {})
|
||||
}))
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "gateway_vpn_psks" {
|
||||
description = "Separate variable for providing psks for connection tunnels"
|
||||
|
||||
type = map(object({
|
||||
psk = string
|
||||
}))
|
||||
default = {}
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "static_routes" {
|
||||
description = "Static routes to apply to the router, requires router_enable is set to true"
|
||||
|
||||
type = map(object({
|
||||
nexthop = string
|
||||
route = string
|
||||
}))
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "network_peerings" {
|
||||
description = "Other UpCloud private networks to peer with, requires router_enable is set to true"
|
||||
|
||||
type = map(object({
|
||||
remote_network = string
|
||||
}))
|
||||
default = {}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ terraform {
|
||||
required_providers {
|
||||
upcloud = {
|
||||
source = "UpCloudLtd/upcloud"
|
||||
version = "~>5.9.0"
|
||||
version = "~>2.12.0"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
|
||||
@@ -89,12 +89,9 @@ node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx
|
||||
|
||||
Peers can be defined using the `peers` variable (see docs/calico_peer_example examples).
|
||||
In order to define global peers, the `peers` variable can be defined in group_vars with the "scope" attribute of each global peer set to "global".
|
||||
In order to define peers on a per node basis, the `peers` variable must be defined in hostvars or group_vars with the "scope" attribute unset or set to "node".
|
||||
|
||||
In order to define peers on a per node basis, the `peers` variable must be defined in hostvars.
|
||||
NB: Ansible's `hash_behaviour` is by default set to "replace", thus defining both global and per node peers would end up with having only per node peers. If having both global and per node peers defined was meant to happen, global peers would have to be defined in hostvars for each host (as well as per node peers)
|
||||
|
||||
NB²: Peers definition at node scope can be customized with additional fields `filters`, `sourceAddress` and `numAllowedLocalASNumbers` (see <https://docs.tigera.io/calico/latest/reference/resources/bgppeer> for details)
|
||||
|
||||
Since calico 3.4, Calico supports advertising Kubernetes service cluster IPs over BGP, just as it advertises pod IPs.
|
||||
This can be enabled by setting the following variable as follow in group_vars (k8s_cluster/k8s-net-calico.yml)
|
||||
|
||||
@@ -130,7 +127,8 @@ recommended here:
|
||||
You need to edit your inventory and add:
|
||||
|
||||
* `calico_rr` group with nodes in it. `calico_rr` can be combined with
|
||||
`kube_node` and/or `kube_control_plane`.
|
||||
`kube_node` and/or `kube_control_plane`. `calico_rr` group also must be a child
|
||||
group of `k8s_cluster` group.
|
||||
* `cluster_id` by route reflector node/group (see details [here](https://hub.docker.com/r/calico/routereflector/))
|
||||
|
||||
Here's an example of Kubespray inventory with standalone route reflectors:
|
||||
@@ -159,6 +157,11 @@ node3
|
||||
node4
|
||||
node5
|
||||
|
||||
[k8s_cluster:children]
|
||||
kube_node
|
||||
kube_control_plane
|
||||
calico_rr
|
||||
|
||||
[calico_rr]
|
||||
rr0
|
||||
rr1
|
||||
@@ -421,7 +424,7 @@ calico_wireguard_enabled: true
|
||||
|
||||
The following OSes will require enabling the EPEL repo in order to bring in wireguard tools:
|
||||
|
||||
* CentOS 8
|
||||
* CentOS 7 & 8
|
||||
* AlmaLinux 8
|
||||
* Rocky Linux 8
|
||||
* Amazon Linux 2
|
||||
|
||||
@@ -45,144 +45,10 @@ cilium_pool_mask_size Specifies the size allocated to node.ipam.podCIDRs from cl
|
||||
cilium_pool_mask_size_ipv6: "120"
|
||||
```
|
||||
|
||||
### IP Load Balancer Pools
|
||||
|
||||
Cilium's IP Load Balancer Pools can be configured with the `cilium_loadbalancer_ip_pools` variable:
|
||||
|
||||
```yml
|
||||
cilium_loadbalancer_ip_pools:
|
||||
- name: "blue-pool"
|
||||
cidrs:
|
||||
- "10.0.10.0/24"
|
||||
```
|
||||
|
||||
For further information, check [LB IPAM documentation](https://docs.cilium.io/en/stable/network/lb-ipam/)
|
||||
|
||||
### BGP Control Plane
|
||||
|
||||
Cilium's BGP Control Plane can be enabled by setting `cilium_enable_bgp_control_plane` to `true`.:
|
||||
|
||||
```yml
|
||||
cilium_enable_bgp_control_plane: true
|
||||
```
|
||||
|
||||
For further information, check [BGP Peering Policy documentation](https://docs.cilium.io/en/latest/network/bgp-control-plane/bgp-control-plane-v1/)
|
||||
|
||||
### BGP Control Plane Resources (New bgpv2 API v1.16+)
|
||||
|
||||
Cilium BGP control plane is managed by a set of custom resources which provide a flexible way to configure BGP peers, policies, and advertisements.
|
||||
|
||||
Cilium's BGP Instances can be configured with the `cilium_bgp_cluster_configs` variable:
|
||||
|
||||
```yml
|
||||
cilium_bgp_cluster_configs:
|
||||
- name: "cilium-bgp"
|
||||
spec:
|
||||
bgpInstances:
|
||||
- name: "instance-64512"
|
||||
localASN: 64512
|
||||
peers:
|
||||
- name: "peer-64512-tor1"
|
||||
peerASN: 64512
|
||||
peerAddress: '10.47.1.1'
|
||||
peerConfigRef:
|
||||
name: "cilium-peer"
|
||||
nodeSelector:
|
||||
matchExpressions:
|
||||
- {key: somekey, operator: NotIn, values: ['never-used-value']}
|
||||
```
|
||||
|
||||
Cillium's BGP Peers can be configured with the `cilium_bgp_peer_configs` variable:
|
||||
|
||||
```yml
|
||||
cilium_bgp_peer_configs:
|
||||
- name: cilium-peer
|
||||
spec:
|
||||
# authSecretRef: bgp-auth-secret
|
||||
gracefulRestart:
|
||||
enabled: true
|
||||
restartTimeSeconds: 15
|
||||
families:
|
||||
- afi: ipv4
|
||||
safi: unicast
|
||||
advertisements:
|
||||
matchLabels:
|
||||
advertise: "bgp"
|
||||
- afi: ipv6
|
||||
safi: unicast
|
||||
advertisements:
|
||||
matchLabels:
|
||||
advertise: "bgp"
|
||||
```
|
||||
|
||||
Cillium's BGP Advertisements can be configured with the `cilium_bgp_advertisements` variable:
|
||||
|
||||
```yml
|
||||
cilium_bgp_advertisements:
|
||||
- name: bgp-advertisements
|
||||
labels:
|
||||
advertise: bgp
|
||||
spec:
|
||||
advertisements:
|
||||
- advertisementType: "PodCIDR"
|
||||
attributes:
|
||||
communities:
|
||||
standard: [ "64512:99" ]
|
||||
- advertisementType: "Service"
|
||||
service:
|
||||
addresses:
|
||||
- ClusterIP
|
||||
- ExternalIP
|
||||
- LoadBalancerIP
|
||||
selector:
|
||||
matchExpressions:
|
||||
- {key: somekey, operator: NotIn, values: ['never-used-value']}
|
||||
```
|
||||
|
||||
Cillium's BGP Node Config Overrides can be configured with the `cilium_bgp_node_config_overrides` variable:
|
||||
|
||||
```yml
|
||||
cilium_bgp_node_config_overrides:
|
||||
- name: bgpv2-cplane-dev-multi-homing-worker
|
||||
spec:
|
||||
bgpInstances:
|
||||
- name: "instance-65000"
|
||||
routerID: "192.168.10.1"
|
||||
localPort: 1790
|
||||
peers:
|
||||
- name: "peer-65000-tor1"
|
||||
localAddress: fd00:10:0:2::2
|
||||
- name: "peer-65000-tor2"
|
||||
localAddress: fd00:11:0:2::2
|
||||
```
|
||||
|
||||
For further information, check [BGP Control Plane Resources documentation](https://docs.cilium.io/en/latest/network/bgp-control-plane/bgp-control-plane-v2/)
|
||||
|
||||
### BGP Peering Policies (Legacy < v1.16)
|
||||
|
||||
Cilium's BGP Peering Policies can be configured with the `cilium_bgp_peering_policies` variable:
|
||||
|
||||
```yml
|
||||
cilium_bgp_peering_policies:
|
||||
- name: "01-bgp-peering-policy"
|
||||
spec:
|
||||
virtualRouters:
|
||||
- localASN: 64512
|
||||
exportPodCIDR: false
|
||||
neighbors:
|
||||
- peerAddress: '10.47.1.1/24'
|
||||
peerASN: 64512
|
||||
serviceSelector:
|
||||
matchExpressions:
|
||||
- {key: somekey, operator: NotIn, values: ['never-used-value']}
|
||||
```
|
||||
|
||||
For further information, check [BGP Peering Policy documentation](https://docs.cilium.io/en/latest/network/bgp-control-plane/bgp-control-plane-v1/#bgp-peering-policy-legacy)
|
||||
|
||||
## Kube-proxy replacement with Cilium
|
||||
|
||||
Cilium can run without kube-proxy by setting `cilium_kube_proxy_replacement`
|
||||
to `strict` (< v1.16) or `true` (Cilium v1.16+ no longer accepts `strict`, however this is converted to `true` by kubespray when running v1.16+).
|
||||
to `strict`.
|
||||
|
||||
Without kube-proxy, cilium needs to know the address of the kube-apiserver
|
||||
and this must be set globally for all Cilium components (agents and operators).
|
||||
@@ -266,7 +132,7 @@ Wireguard option is only available in Cilium 1.10.0 and newer.
|
||||
|
||||
### IPsec Encryption
|
||||
|
||||
For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/stable/security/network/encryption-ipsec/)
|
||||
For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/stable/gettingstarted/encryption-ipsec/)
|
||||
|
||||
To enable IPsec encryption, you just need to set three variables.
|
||||
|
||||
@@ -291,7 +157,7 @@ echo "cilium_ipsec_key: "$(echo -n "3 rfc4106(gcm(aes)) $(echo $(dd if=/dev/uran
|
||||
|
||||
### Wireguard Encryption
|
||||
|
||||
For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/stable/security/network/encryption-wireguard/)
|
||||
For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/stable/gettingstarted/encryption-wireguard/)
|
||||
|
||||
To enable Wireguard encryption, you just need to set two variables.
|
||||
|
||||
@@ -304,14 +170,14 @@ Kubespray currently supports Linux distributions with Wireguard Kernel mode on L
|
||||
|
||||
## Bandwidth Manager
|
||||
|
||||
Cilium's bandwidth manager supports the kubernetes.io/egress-bandwidth Pod annotation.
|
||||
Cilium’s bandwidth manager supports the kubernetes.io/egress-bandwidth Pod annotation.
|
||||
|
||||
Bandwidth enforcement currently does not work in combination with L7 Cilium Network Policies.
|
||||
In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods.
|
||||
|
||||
Bandwidth Manager requires a v5.1.x or more recent Linux kernel.
|
||||
|
||||
For further information, make sure to check the official [Cilium documentation](https://docs.cilium.io/en/latest/network/kubernetes/bandwidth-manager/)
|
||||
For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/v1.12/gettingstarted/bandwidth-manager/)
|
||||
|
||||
To use this function, set the following parameters
|
||||
|
||||
@@ -319,26 +185,6 @@ To use this function, set the following parameters
|
||||
cilium_enable_bandwidth_manager: true
|
||||
```
|
||||
|
||||
## Host Firewall
|
||||
|
||||
Host Firewall enforces security policies for Kubernetes nodes. It is disable by default, since it can break the cluster connectivity.
|
||||
|
||||
```yaml
|
||||
cilium_enable_host_firewall: true
|
||||
```
|
||||
|
||||
For further information, check [host firewall documentation](https://docs.cilium.io/en/latest/security/host-firewall/)
|
||||
|
||||
## Policy Audit Mode
|
||||
|
||||
When _Policy Audit Mode_ is enabled, no network policy is enforced. This feature helps to validate the impact of host policies before enforcing them.
|
||||
|
||||
```yaml
|
||||
cilium_policy_audit_mode: true
|
||||
```
|
||||
|
||||
It is disable by default, and should not be enabled in production.
|
||||
|
||||
## Install Cilium Hubble
|
||||
|
||||
k8s-net-cilium.yml:
|
||||
|
||||
@@ -17,22 +17,6 @@ kube_network_plugin_multus: true
|
||||
|
||||
will install Multus and Calico and configure Multus to use Calico as the primary network plugin.
|
||||
|
||||
Namespace isolation enables a mode where Multus only allows pods to access custom resources (the `NetworkAttachmentDefinitions`) within the namespace where that pod resides. To enable namespace isolation:
|
||||
|
||||
```yml
|
||||
multus_namespace_isolation: true
|
||||
```
|
||||
|
||||
### Cilium compatibility
|
||||
|
||||
If you are using `cilium` as the primary CNI you'll have to set `cilium_cni_exclusive` to `false` to avoid cillium reverting multus config.
|
||||
|
||||
```yml
|
||||
kube_network_plugin: cilium
|
||||
kube_network_plugin_multus: true
|
||||
cilium_cni_exclusive: false
|
||||
```
|
||||
|
||||
## Using Multus
|
||||
|
||||
Once Multus is installed, you can create CNI configurations (as a CRD objects) for additional networks, in this case a macvlan CNI configuration is defined. You may replace the config field with any valid CNI configuration where the CNI binary is available on the nodes.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user