Compare commits

..

17 Commits

Author SHA1 Message Date
Florian Ruynat
d28a6d68f9 Add Kubernetes hashes 1.18.9/1.17.12 and set default (#6701) 2020-09-18 02:08:45 -07:00
ahmadali shafiee
f4b6bce69f [2.13] add missing 'Set up proxy environment' tasks (#6591)
* Add proxy_env calculation to reset.yml (#6558)

* * add proxy_env definition to remove_node.yml resolving #6430 (#6431)

Co-authored-by: Florian Ruynat <florian234@hotmail.com>
Co-authored-by: nniehoff <github@nickniehoff.net>
2020-09-17 02:46:45 -07:00
Florian Ruynat
8e53d81146 Add Kubernetes 1.16.15 hashes (#6626) 2020-09-07 01:23:41 -07:00
Maxime Guyot
28ee071bd6 Update base image to v2.12.9 (#6492) 2020-08-04 05:10:19 -07:00
Florian Ruynat
0ba1c8ff0a Fix test if openstack_cacert is a base64 string (#6421) (#6475) 2020-08-01 00:29:41 -07:00
Florian Ruynat
d6f859619c Update hashes and set default version to 1.17.9 (#6435) 2020-07-30 00:50:31 -07:00
Florian Ruynat
8a3bd6ba72 Move healthz check to secure ports (#6446) (#6457) 2020-07-29 05:11:47 -07:00
Lovro Seder
d3fca7d47a Fix shellcheck url (#6463) 2020-07-28 06:03:07 -07:00
LucasBoisserie
587f7b33a7 Modify the populate no_proxy task to use a combine rather than relying on the hash_behaviour setting to be set to merge rather than replace (#6112) (#6346)
Co-authored-by: Cody Seavey <seavey.cody@gmail.com>
2020-07-07 08:05:59 -07:00
Florian Ruynat
d3e8fd808e Update hashes and set default version to 1.17.8 (#6336) 2020-06-30 01:58:07 -07:00
Maxime Guyot
6bfc133d6c [2.13] Update base image to v2.12.7 (#6295)
* Use v2.12.7 base image for CI

* Use 19.03.9 in localhost CI (#6201)
2020-06-22 04:32:39 -07:00
Florian Ruynat
3d6b9d6c15 Update hashes and set default to 1.17.7 (#6286) 2020-06-18 23:43:58 -07:00
Etienne Champetier
39fa9503d9 [2.13] Bump CNI plugins to 0.8.6 (#6228)
https://github.com/containernetworking/plugins/releases/tag/v0.8.6
https://github.com/kubernetes/kubernetes/issues/91507

Signed-off-by: Etienne Champetier <champetier.etienne@gmail.com>
(cherry picked from commit 41b44739b1)
2020-06-09 05:23:18 -07:00
spaced
051e08e31c [2.13] Backport CRI-O bugfixes (#6230)
* Enable crio 1.18 (#6197)

* fix CRI-O repos for centos distributions (#6224)

* fix CRI-O repos for centos distributions

* fix CRI-O repos for centos distributions
- revert workarounds

* fix CRI-O repos for centos distributions
- use https for centos repos

* avoid 302 redirects for centos repos

* Use OS packaging default value for apparmor_profile in crio.conf (#6125)

Co-authored-by: jeanfabrice <github@bobo-rousselin.com>
2020-06-04 07:09:15 -07:00
bozzo
dd539cf360 Fix resolv.conf configuration for Fedora CoreOS. (#6155) 2020-05-28 08:02:02 -07:00
Florian Ruynat
31094b1768 Add new Kubernetes version hashes and set default to 1.17.6 (#6183)
* Add new Kubernetes version hashes and set default to 1.17.6

* Do not rebase against master when commiting a release branch
2020-05-25 02:43:11 -07:00
Florian Ruynat
1f4ef0e86e [2.13] Backport recent bugfixes and mainly docker-cli issue (#6179)
* Reorder tests in packet file (#6067)

* Skip molecule tests for Ubuntu 18.04 (#6077)

* Fix kubernetes-dashboard template identation (#6066)

The 98e7a07fba commit udpates the
dashboard version to 2.0.0 but it enable skip login flag wasn't
updated. This change updates its identation to avoid issues when
dashboard_skip_login is enabled.

* Disable OVH CI (#6114)

* Create namespace when dashboard deployment uses customized namespace. (#6107)

* Create namespace when dashboard deployment uses customized namespace.

* Fix syntax.

* Fix apiserver port when upgrading (#6136)

* Fix docker fedora packages (#6097)

* Match docker-cli version with docker-engine version (when available)

* Disable upgrade jobs to allow release 2.13.1 (docker-cli bug)

Co-authored-by: Maxime Guyot <Miouge1@users.noreply.github.com>
Co-authored-by: Victor Morales <electrocucaracha@gmail.com>
Co-authored-by: petruha <5363545+p37ruh4@users.noreply.github.com>
Co-authored-by: Mateus Caruccio <mateus.caruccio@getupcloud.com>
2020-05-22 07:30:37 -07:00
896 changed files with 11251 additions and 45207 deletions

View File

@@ -2,8 +2,15 @@
parseable: true parseable: true
skip_list: skip_list:
# see https://docs.ansible.com/ansible-lint/rules/default_rules.html for a list of all default rules # see https://docs.ansible.com/ansible-lint/rules/default_rules.html for a list of all default rules
# The following rules throw errors.
# DO NOT add any other rules to this skip_list, instead use local `# noqa` with a comment explaining WHY it is necessary # These either still need to be corrected in the repository and the rules re-enabled or documented why they are skipped on purpose.
- '301'
- '302'
- '303'
- '305'
- '306'
- '404'
- '503'
# These rules are intentionally skipped: # These rules are intentionally skipped:
# #

View File

@@ -1,15 +0,0 @@
root = true
[*.{yaml,yml,yml.j2,yaml.j2}]
indent_style = space
indent_size = 2
trim_trailing_whitespace = true
insert_final_newline = true
charset = utf-8
[{Dockerfile}]
indent_style = space
indent_size = 2
trim_trailing_whitespace = true
insert_final_newline = true
charset = utf-8

View File

@@ -1,7 +1,7 @@
--- ---
name: Support Request name: Support Request
about: Support request or question relating to Kubespray about: Support request or question relating to Kubespray
labels: kind/support labels: triage/support
--- ---

2
.gitignore vendored
View File

@@ -1,7 +1,6 @@
.vagrant .vagrant
*.retry *.retry
**/vagrant_ansible_inventory **/vagrant_ansible_inventory
*.iml
temp temp
.idea .idea
.tox .tox
@@ -15,7 +14,6 @@ contrib/terraform/aws/credentials.tfvars
**/*.sw[pon] **/*.sw[pon]
*~ *~
vagrant/ vagrant/
plugins/mitogen
# Ansible inventory # Ansible inventory
inventory/* inventory/*

View File

@@ -8,14 +8,13 @@ stages:
- deploy-special - deploy-special
variables: variables:
KUBESPRAY_VERSION: v2.15.1 KUBESPRAY_VERSION: v2.12.9
FAILFASTCI_NAMESPACE: 'kargo-ci' FAILFASTCI_NAMESPACE: 'kargo-ci'
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray' GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
ANSIBLE_FORCE_COLOR: "true" ANSIBLE_FORCE_COLOR: "true"
MAGIC: "ci check this" MAGIC: "ci check this"
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID" TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml" CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
CI_TEST_REGISTRY_MIRROR: "./tests/common/_docker_hub_registry_mirror.yml"
GS_ACCESS_KEY_ID: $GS_KEY GS_ACCESS_KEY_ID: $GS_KEY
GS_SECRET_ACCESS_KEY: $GS_SECRET GS_SECRET_ACCESS_KEY: $GS_SECRET
CONTAINER_ENGINE: docker CONTAINER_ENGINE: docker
@@ -30,9 +29,7 @@ variables:
MITOGEN_ENABLE: "false" MITOGEN_ENABLE: "false"
ANSIBLE_LOG_LEVEL: "-vv" ANSIBLE_LOG_LEVEL: "-vv"
RECOVER_CONTROL_PLANE_TEST: "false" RECOVER_CONTROL_PLANE_TEST: "false"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]" RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
TERRAFORM_14_VERSION: 0.14.10
TERRAFORM_13_VERSION: 0.13.6
before_script: before_script:
- ./tests/scripts/rebase.sh - ./tests/scripts/rebase.sh
@@ -45,7 +42,6 @@ before_script:
- packet - packet
image: quay.io/kubespray/kubespray:$KUBESPRAY_VERSION image: quay.io/kubespray/kubespray:$KUBESPRAY_VERSION
artifacts: artifacts:
when: always
paths: paths:
- cluster-dump/ - cluster-dump/

View File

@@ -14,7 +14,7 @@ vagrant-validate:
stage: unit-tests stage: unit-tests
tags: [light] tags: [light]
variables: variables:
VAGRANT_VERSION: 2.2.15 VAGRANT_VERSION: 2.2.4
script: script:
- ./tests/scripts/vagrant-validate.sh - ./tests/scripts/vagrant-validate.sh
except: ['triggers', 'master'] except: ['triggers', 'master']
@@ -64,9 +64,9 @@ markdownlint:
tags: [light] tags: [light]
image: node image: node
before_script: before_script:
- npm install -g markdownlint-cli@0.22.0 - npm install -g markdownlint-cli
script: script:
- markdownlint $(find . -name '*.md' | grep -vF './.git') --ignore docs/_sidebar.md --ignore contrib/dind/README.md - markdownlint README.md docs --ignore docs/_sidebar.md
ci-matrix: ci-matrix:
stage: unit-tests stage: unit-tests

View File

@@ -1,54 +1,43 @@
--- ---
.packet: .packet: &packet
extends: .testcases extends: .testcases
variables: variables:
CI_PLATFORM: packet CI_PLATFORM: "packet"
SSH_USER: kubespray SSH_USER: "kubespray"
tags: tags:
- packet - packet
except: [triggers]
# CI template for PRs
.packet_pr:
only: [/^pr-.*$/] only: [/^pr-.*$/]
extends: .packet except: ['triggers']
# CI template for periodic CI jobs
# Enabled when PERIODIC_CI_ENABLED var is set
.packet_periodic:
only:
variables:
- $PERIODIC_CI_ENABLED
allow_failure: true
extends: .packet
packet_ubuntu18-calico-aio: packet_ubuntu18-calico-aio:
stage: deploy-part1 stage: deploy-part1
extends: .packet_pr extends: .packet
when: on_success when: on_success
# Future AIO job # Future AIO job
packet_ubuntu20-calico-aio: packet_ubuntu20-calico-aio:
stage: deploy-part1 stage: deploy-part1
extends: .packet_pr extends: .packet
when: on_success when: manual
# ### PR JOBS PART2 # ### PR JOBS PART2
packet_centos7-flannel-containerd-addons-ha: packet_centos7-flannel-containerd-addons-ha:
extends: .packet_pr extends: .packet
stage: deploy-part2 stage: deploy-part2
when: on_success when: on_success
variables: variables:
MITOGEN_ENABLE: "true" MITOGEN_ENABLE: "true"
packet_centos8-crio: packet_centos7-crio:
extends: .packet_pr extends: .packet
stage: deploy-part2 stage: deploy-part2
when: on_success when: on_success
variables:
MITOGEN_ENABLE: "true"
packet_ubuntu18-crio: packet_ubuntu18-crio:
extends: .packet_pr extends: .packet
stage: deploy-part2 stage: deploy-part2
when: manual when: manual
variables: variables:
@@ -56,44 +45,34 @@ packet_ubuntu18-crio:
packet_ubuntu16-canal-kubeadm-ha: packet_ubuntu16-canal-kubeadm-ha:
stage: deploy-part2 stage: deploy-part2
extends: .packet_periodic extends: .packet
when: on_success when: on_success
packet_ubuntu16-canal-sep: packet_ubuntu16-canal-sep:
stage: deploy-special stage: deploy-special
extends: .packet_pr extends: .packet
when: manual when: manual
packet_ubuntu16-flannel-ha: packet_ubuntu16-flannel-ha:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: manual when: manual
packet_ubuntu16-kube-router-sep: packet_ubuntu16-kube-router-sep:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: manual when: manual
packet_ubuntu16-kube-router-svc-proxy:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_debian10-cilium-svc-proxy:
stage: deploy-part2
extends: .packet_periodic
when: on_success
packet_debian10-containerd: packet_debian10-containerd:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: on_success when: on_success
variables: variables:
MITOGEN_ENABLE: "true" MITOGEN_ENABLE: "true"
packet_centos7-calico-ha-once-localhost: packet_centos7-calico-ha-once-localhost:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: on_success when: on_success
variables: variables:
# This will instruct Docker not to start over TLS. # This will instruct Docker not to start over TLS.
@@ -103,132 +82,128 @@ packet_centos7-calico-ha-once-localhost:
packet_centos8-kube-ovn: packet_centos8-kube-ovn:
stage: deploy-part2 stage: deploy-part2
extends: .packet_periodic extends: .packet
when: on_success when: on_success
packet_centos8-calico: packet_centos8-calico:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: on_success when: on_success
packet_fedora32-weave: packet_fedora30-weave:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: on_success when: on_success
packet_opensuse-canal: packet_opensuse-canal:
stage: deploy-part2 stage: deploy-part2
extends: .packet_periodic extends: .packet
when: on_success when: on_success
packet_ubuntu18-ovn4nfv: # Contiv does not work in k8s v1.16
stage: deploy-part2 # packet_ubuntu16-contiv-sep:
extends: .packet_periodic # stage: deploy-part2
when: on_success # extends: .packet
# when: on_success
# ### MANUAL JOBS # ### MANUAL JOBS
packet_ubuntu16-weave-sep: packet_ubuntu16-weave-sep:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: manual when: manual
packet_ubuntu18-cilium-sep: packet_ubuntu18-cilium-sep:
stage: deploy-special stage: deploy-special
extends: .packet_pr extends: .packet
when: manual when: manual
packet_ubuntu18-flannel-containerd-ha: packet_ubuntu18-flannel-containerd-ha:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: manual when: manual
packet_ubuntu18-flannel-containerd-ha-once: packet_ubuntu18-flannel-containerd-ha-once:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: manual when: manual
packet_debian9-macvlan: packet_debian9-macvlan:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: manual when: manual
packet_centos7-calico-ha: packet_centos7-calico-ha:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: manual when: manual
packet_centos7-kube-router: packet_centos7-kube-router:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: manual when: manual
packet_centos7-multus-calico: packet_centos7-multus-calico:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: manual when: manual
packet_oracle7-canal-ha: packet_oracle7-canal-ha:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: manual when: manual
packet_fedora33-calico: packet_fedora31-flannel:
stage: deploy-part2 stage: deploy-part2
extends: .packet_periodic extends: .packet
when: on_success when: on_success
variables: variables:
MITOGEN_ENABLE: "true" MITOGEN_ENABLE: "true"
packet_amazon-linux-2-aio: packet_amazon-linux-2-aio:
stage: deploy-part2 stage: deploy-part2
extends: .packet_pr extends: .packet
when: manual when: manual
packet_fedora32-kube-ovn-containerd:
stage: deploy-part2
extends: .packet_periodic
when: on_success
# ### PR JOBS PART3 # ### PR JOBS PART3
# Long jobs (45min+) # Long jobs (45min+)
packet_centos7-weave-upgrade-ha: packet_centos7-weave-upgrade-ha:
stage: deploy-part3 stage: deploy-part3
extends: .packet_periodic extends: .packet
when: on_success when: manual
variables: variables:
UPGRADE_TEST: basic UPGRADE_TEST: basic
MITOGEN_ENABLE: "false" MITOGEN_ENABLE: "false"
packet_debian9-calico-upgrade: packet_debian9-calico-upgrade:
stage: deploy-part3 stage: deploy-part3
extends: .packet_pr extends: .packet
when: on_success when: manual
variables: variables:
UPGRADE_TEST: graceful UPGRADE_TEST: graceful
MITOGEN_ENABLE: "false" MITOGEN_ENABLE: "false"
packet_debian9-calico-upgrade-once: packet_debian9-calico-upgrade-once:
stage: deploy-part3 stage: deploy-part3
extends: .packet_periodic extends: .packet
when: on_success when: manual
variables: variables:
UPGRADE_TEST: graceful UPGRADE_TEST: graceful
MITOGEN_ENABLE: "false" MITOGEN_ENABLE: "false"
packet_ubuntu18-calico-ha-recover: packet_ubuntu18-calico-ha-recover:
stage: deploy-part3 stage: deploy-part3
extends: .packet_periodic extends: .packet
when: on_success when: on_success
variables: variables:
RECOVER_CONTROL_PLANE_TEST: "true" RECOVER_CONTROL_PLANE_TEST: "true"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]" RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
packet_ubuntu18-calico-ha-recover-noquorum: packet_ubuntu18-calico-ha-recover-noquorum:
stage: deploy-part3 stage: deploy-part3
extends: .packet_periodic extends: .packet
when: on_success when: on_success
variables: variables:
RECOVER_CONTROL_PLANE_TEST: "true" RECOVER_CONTROL_PLANE_TEST: "true"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube_control_plane[1:]" RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube-master[1:]"

View File

@@ -4,7 +4,7 @@ shellcheck:
stage: unit-tests stage: unit-tests
tags: [light] tags: [light]
variables: variables:
SHELLCHECK_VERSION: v0.7.1 SHELLCHECK_VERSION: v0.6.0
before_script: before_script:
- ./tests/scripts/rebase.sh - ./tests/scripts/rebase.sh
- curl --silent --location "https://github.com/koalaman/shellcheck/releases/download/"${SHELLCHECK_VERSION}"/shellcheck-"${SHELLCHECK_VERSION}".linux.x86_64.tar.xz" | tar -xJv - curl --silent --location "https://github.com/koalaman/shellcheck/releases/download/"${SHELLCHECK_VERSION}"/shellcheck-"${SHELLCHECK_VERSION}".linux.x86_64.tar.xz" | tar -xJv
@@ -12,5 +12,5 @@ shellcheck:
- shellcheck --version - shellcheck --version
script: script:
# Run shellcheck for all *.sh except contrib/ # Run shellcheck for all *.sh except contrib/
- find . -name '*.sh' -not -path './contrib/*' -not -path './.git/*' | xargs shellcheck --severity error - find . -name '*.sh' -not -path './contrib/*' | xargs shellcheck --severity error
except: ['triggers', 'master'] except: ['triggers', 'master']

View File

@@ -18,9 +18,6 @@
- echo "$PACKET_PRIVATE_KEY" | base64 -d > ~/.ssh/id_rsa - echo "$PACKET_PRIVATE_KEY" | base64 -d > ~/.ssh/id_rsa
- chmod 400 ~/.ssh/id_rsa - chmod 400 ~/.ssh/id_rsa
- echo "$PACKET_PUBLIC_KEY" | base64 -d > ~/.ssh/id_rsa.pub - echo "$PACKET_PUBLIC_KEY" | base64 -d > ~/.ssh/id_rsa.pub
- mkdir -p group_vars
# Random subnet to avoid routing conflicts
- export TF_VAR_subnet_cidr="10.$(( $RANDOM % 256 )).$(( $RANDOM % 256 )).0/24"
.terraform_validate: .terraform_validate:
extends: .terraform_install extends: .terraform_install
@@ -38,7 +35,6 @@
when: manual when: manual
only: [/^pr-.*$/] only: [/^pr-.*$/]
artifacts: artifacts:
when: always
paths: paths:
- cluster-dump/ - cluster-dump/
variables: variables:
@@ -53,92 +49,31 @@
# Cleanup regardless of exit code # Cleanup regardless of exit code
- chronic ./tests/scripts/testcases_cleanup.sh - chronic ./tests/scripts/testcases_cleanup.sh
tf-0.13.x-validate-openstack: tf-validate-openstack:
extends: .terraform_validate extends: .terraform_validate
variables: variables:
TF_VERSION: $TERRAFORM_13_VERSION TF_VERSION: 0.12.24
PROVIDER: openstack PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME CLUSTER: $CI_COMMIT_REF_NAME
tf-0.13.x-validate-packet: tf-validate-packet:
extends: .terraform_validate extends: .terraform_validate
variables: variables:
TF_VERSION: $TERRAFORM_13_VERSION TF_VERSION: 0.12.24
PROVIDER: packet PROVIDER: packet
CLUSTER: $CI_COMMIT_REF_NAME CLUSTER: $CI_COMMIT_REF_NAME
tf-0.13.x-validate-aws: tf-validate-aws:
extends: .terraform_validate extends: .terraform_validate
variables: variables:
TF_VERSION: $TERRAFORM_13_VERSION TF_VERSION: 0.12.24
PROVIDER: aws PROVIDER: aws
CLUSTER: $CI_COMMIT_REF_NAME CLUSTER: $CI_COMMIT_REF_NAME
tf-0.13.x-validate-exoscale:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_13_VERSION
PROVIDER: exoscale
tf-0.13.x-validate-vsphere:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_13_VERSION
PROVIDER: vsphere
CLUSTER: $CI_COMMIT_REF_NAME
tf-0.13.x-validate-upcloud:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_13_VERSION
PROVIDER: upcloud
CLUSTER: $CI_COMMIT_REF_NAME
tf-0.14.x-validate-openstack:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_14_VERSION
PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME
tf-0.14.x-validate-packet:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_14_VERSION
PROVIDER: packet
CLUSTER: $CI_COMMIT_REF_NAME
tf-0.14.x-validate-aws:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_14_VERSION
PROVIDER: aws
CLUSTER: $CI_COMMIT_REF_NAME
tf-0.14.x-validate-exoscale:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_14_VERSION
PROVIDER: exoscale
tf-0.14.x-validate-vsphere:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_14_VERSION
PROVIDER: vsphere
CLUSTER: $CI_COMMIT_REF_NAME
tf-0.14.x-validate-upcloud:
extends: .terraform_validate
variables:
TF_VERSION: $TERRAFORM_14_VERSION
PROVIDER: upcloud
CLUSTER: $CI_COMMIT_REF_NAME
# tf-packet-ubuntu16-default: # tf-packet-ubuntu16-default:
# extends: .terraform_apply # extends: .terraform_apply
# variables: # variables:
# TF_VERSION: $TERRAFORM_14_VERSION # TF_VERSION: 0.12.24
# PROVIDER: packet # PROVIDER: packet
# CLUSTER: $CI_COMMIT_REF_NAME # CLUSTER: $CI_COMMIT_REF_NAME
# TF_VAR_number_of_k8s_masters: "1" # TF_VAR_number_of_k8s_masters: "1"
@@ -152,7 +87,7 @@ tf-0.14.x-validate-upcloud:
# tf-packet-ubuntu18-default: # tf-packet-ubuntu18-default:
# extends: .terraform_apply # extends: .terraform_apply
# variables: # variables:
# TF_VERSION: $TERRAFORM_14_VERSION # TF_VERSION: 0.12.24
# PROVIDER: packet # PROVIDER: packet
# CLUSTER: $CI_COMMIT_REF_NAME # CLUSTER: $CI_COMMIT_REF_NAME
# TF_VAR_number_of_k8s_masters: "1" # TF_VAR_number_of_k8s_masters: "1"
@@ -163,116 +98,80 @@ tf-0.14.x-validate-upcloud:
# TF_VAR_public_key_path: "" # TF_VAR_public_key_path: ""
# TF_VAR_operating_system: ubuntu_18_04 # TF_VAR_operating_system: ubuntu_18_04
.ovh_variables: &ovh_variables # .ovh_variables: &ovh_variables
OS_AUTH_URL: https://auth.cloud.ovh.net/v3 # OS_AUTH_URL: https://auth.cloud.ovh.net/v3
OS_PROJECT_ID: 8d3cd5d737d74227ace462dee0b903fe # OS_PROJECT_ID: 8d3cd5d737d74227ace462dee0b903fe
OS_PROJECT_NAME: "9361447987648822" # OS_PROJECT_NAME: "9361447987648822"
OS_USER_DOMAIN_NAME: Default # OS_USER_DOMAIN_NAME: Default
OS_PROJECT_DOMAIN_ID: default # OS_PROJECT_DOMAIN_ID: default
OS_USERNAME: 8XuhBMfkKVrk # OS_USERNAME: 8XuhBMfkKVrk
OS_REGION_NAME: UK1 # OS_REGION_NAME: UK1
OS_INTERFACE: public # OS_INTERFACE: public
OS_IDENTITY_API_VERSION: "3" # OS_IDENTITY_API_VERSION: "3"
# Elastx is generously donating resources for Kubespray on Openstack CI # tf-ovh_cleanup:
# Contacts: @gix @bl0m1 # stage: unit-tests
.elastx_variables: &elastx_variables # tags: [light]
OS_AUTH_URL: https://ops.elastx.cloud:5000 # image: python
OS_PROJECT_ID: 564c6b461c6b44b1bb19cdb9c2d928e4 # variables:
OS_PROJECT_NAME: kubespray_ci # <<: *ovh_variables
OS_USER_DOMAIN_NAME: Default # before_script:
OS_PROJECT_DOMAIN_ID: default # - pip install -r scripts/openstack-cleanup/requirements.txt
OS_USERNAME: kubespray@root314.com # script:
OS_REGION_NAME: se-sto # - ./scripts/openstack-cleanup/main.py
OS_INTERFACE: public
OS_IDENTITY_API_VERSION: "3"
TF_VAR_router_id: "ab95917c-41fb-4881-b507-3a6dfe9403df"
# Since ELASTX is in Stockholm, Mitogen helps with latency
MITOGEN_ENABLE: "false"
# Mitogen doesn't support interpreter discovery yet
ANSIBLE_PYTHON_INTERPRETER: "/usr/bin/python3"
tf-elastx_cleanup: # tf-ovh_ubuntu18-calico:
stage: unit-tests # extends: .terraform_apply
tags: [light] # when: on_success
image: python # variables:
variables: # <<: *ovh_variables
<<: *elastx_variables # TF_VERSION: 0.12.24
before_script: # PROVIDER: openstack
- pip install -r scripts/openstack-cleanup/requirements.txt # CLUSTER: $CI_COMMIT_REF_NAME
script: # ANSIBLE_TIMEOUT: "60"
- ./scripts/openstack-cleanup/main.py # SSH_USER: ubuntu
# TF_VAR_number_of_k8s_masters: "0"
# TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
# TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
# TF_VAR_number_of_etcd: "0"
# TF_VAR_number_of_k8s_nodes: "0"
# TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
# TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
# TF_VAR_number_of_bastions: "0"
# TF_VAR_number_of_k8s_masters_no_etcd: "0"
# TF_VAR_use_neutron: "0"
# TF_VAR_floatingip_pool: "Ext-Net"
# TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
# TF_VAR_network_name: "Ext-Net"
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
# TF_VAR_image: "Ubuntu 18.04"
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
tf-elastx_ubuntu18-calico: # tf-ovh_coreos-calico:
extends: .terraform_apply # extends: .terraform_apply
stage: deploy-part3 # when: on_success
when: on_success # variables:
variables: # <<: *ovh_variables
<<: *elastx_variables # TF_VERSION: 0.12.24
TF_VERSION: $TERRAFORM_14_VERSION # PROVIDER: openstack
PROVIDER: openstack # CLUSTER: $CI_COMMIT_REF_NAME
CLUSTER: $CI_COMMIT_REF_NAME # ANSIBLE_TIMEOUT: "60"
ANSIBLE_TIMEOUT: "60" # SSH_USER: core
SSH_USER: ubuntu # TF_VAR_number_of_k8s_masters: "0"
TF_VAR_number_of_k8s_masters: "1" # TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
TF_VAR_number_of_k8s_masters_no_floating_ip: "0" # TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0" # TF_VAR_number_of_etcd: "0"
TF_VAR_number_of_etcd: "0" # TF_VAR_number_of_k8s_nodes: "0"
TF_VAR_number_of_k8s_nodes: "1" # TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
TF_VAR_number_of_k8s_nodes_no_floating_ip: "0" # TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
TF_VAR_number_of_gfs_nodes_no_floating_ip: "0" # TF_VAR_number_of_bastions: "0"
TF_VAR_number_of_bastions: "0" # TF_VAR_number_of_k8s_masters_no_etcd: "0"
TF_VAR_number_of_k8s_masters_no_etcd: "0" # TF_VAR_use_neutron: "0"
TF_VAR_floatingip_pool: "elx-public1" # TF_VAR_floatingip_pool: "Ext-Net"
TF_VAR_dns_nameservers: '["1.1.1.1", "8.8.8.8", "8.8.4.4"]' # TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
TF_VAR_use_access_ip: "0" # TF_VAR_network_name: "Ext-Net"
TF_VAR_external_net: "600b8501-78cb-4155-9c9f-23dfcba88828" # TF_VAR_flavor_k8s_master: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
TF_VAR_network_name: "ci-$CI_JOB_ID" # TF_VAR_flavor_k8s_node: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
TF_VAR_az_list: '["sto1"]' # TF_VAR_image: "CoreOS Stable"
TF_VAR_az_list_node: '["sto1"]' # TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
TF_VAR_image: ubuntu-18.04-server-latest
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
tf-ovh_cleanup:
stage: unit-tests
tags: [light]
image: python
environment: ovh
variables:
<<: *ovh_variables
before_script:
- pip install -r scripts/openstack-cleanup/requirements.txt
script:
- ./scripts/openstack-cleanup/main.py
tf-ovh_ubuntu18-calico:
extends: .terraform_apply
when: on_success
environment: ovh
variables:
<<: *ovh_variables
TF_VERSION: $TERRAFORM_14_VERSION
PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME
ANSIBLE_TIMEOUT: "60"
SSH_USER: ubuntu
TF_VAR_number_of_k8s_masters: "0"
TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
TF_VAR_number_of_etcd: "0"
TF_VAR_number_of_k8s_nodes: "0"
TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
TF_VAR_number_of_bastions: "0"
TF_VAR_number_of_k8s_masters_no_etcd: "0"
TF_VAR_use_neutron: "0"
TF_VAR_floatingip_pool: "Ext-Net"
TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
TF_VAR_network_name: "Ext-Net"
TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
TF_VAR_image: "Ubuntu 18.04"
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'

View File

@@ -20,7 +20,7 @@ molecule_tests:
extends: .testcases extends: .testcases
variables: variables:
CI_PLATFORM: "vagrant" CI_PLATFORM: "vagrant"
SSH_USER: "vagrant" SSH_USER: "kubespray"
VAGRANT_DEFAULT_PROVIDER: "libvirt" VAGRANT_DEFAULT_PROVIDER: "libvirt"
KUBESPRAY_VAGRANT_CONFIG: tests/files/${CI_JOB_NAME}.rb KUBESPRAY_VAGRANT_CONFIG: tests/files/${CI_JOB_NAME}.rb
tags: [c3.small.x86] tags: [c3.small.x86]
@@ -34,14 +34,9 @@ molecule_tests:
- python -m pip install -r tests/requirements.txt - python -m pip install -r tests/requirements.txt
- ./tests/scripts/vagrant_clean.sh - ./tests/scripts/vagrant_clean.sh
script: script:
- ./tests/scripts/testcases_run.sh - vagrant up
after_script: after_script:
- chronic ./tests/scripts/testcases_cleanup.sh - vagrant destroy --force
vagrant_ubuntu18-calico-dual-stack:
stage: deploy-part2
extends: .vagrant
when: on_success
vagrant_ubuntu18-flannel: vagrant_ubuntu18-flannel:
stage: deploy-part2 stage: deploy-part2
@@ -51,9 +46,4 @@ vagrant_ubuntu18-flannel:
vagrant_ubuntu18-weave-medium: vagrant_ubuntu18-weave-medium:
stage: deploy-part2 stage: deploy-part2
extends: .vagrant extends: .vagrant
when: manual when: manual
vagrant_ubuntu20-flannel:
stage: deploy-part2
extends: .vagrant
when: on_success

View File

@@ -1,9 +1,6 @@
--- ---
extends: default extends: default
ignore: |
.git/
rules: rules:
braces: braces:
min-spaces-inside: 0 min-spaces-inside: 0

View File

@@ -10,7 +10,7 @@ To install development dependencies you can use `pip install -r tests/requiremen
#### Linting #### Linting
Kubespray uses `yamllint` and `ansible-lint`. To run them locally use `yamllint .` and `ansible-lint` Kubespray uses `yamllint` and `ansible-lint`. To run them locally use `yamllint .` and `./tests/scripts/ansible-lint.sh`
#### Molecule #### Molecule

View File

@@ -1,30 +1,21 @@
# Use imutable image tags rather than mutable tags (like ubuntu:18.04) FROM ubuntu:18.04
FROM ubuntu:bionic-20200807
RUN apt update -y \
&& apt install -y \
libssl-dev python3-dev sshpass apt-transport-https jq moreutils \
ca-certificates curl gnupg2 software-properties-common python3-pip rsync \
&& rm -rf /var/lib/apt/lists/*
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
&& add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable" \
&& apt update -y && apt-get install --no-install-recommends -y docker-ce \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /kubespray
WORKDIR /kubespray WORKDIR /kubespray
RUN apt update -y && \
apt install -y \
libssl-dev python3-dev sshpass apt-transport-https jq moreutils \
ca-certificates curl gnupg2 software-properties-common python3-pip rsync
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable" \
&& apt update -y && apt-get install docker-ce -y
COPY . . COPY . .
RUN /usr/bin/python3 -m pip install pip -U \ RUN /usr/bin/python3 -m pip install pip -U && /usr/bin/python3 -m pip install -r tests/requirements.txt && python3 -m pip install -r requirements.txt && update-alternatives --install /usr/bin/python python /usr/bin/python3 1
&& /usr/bin/python3 -m pip install -r tests/requirements.txt \ RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.17.5/bin/linux/amd64/kubectl \
&& python3 -m pip install -r requirements.txt \ && chmod a+x kubectl && cp kubectl /usr/local/bin/kubectl
&& update-alternatives --install /usr/bin/python python /usr/bin/python3 1
RUN KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/amd64/kubectl \
&& chmod a+x kubectl \
&& mv kubectl /usr/local/bin/kubectl
# Some tools like yamllint need this # Some tools like yamllint need this
ENV LANG=C.UTF-8 ENV LANG=C.UTF-8

2
OWNERS
View File

@@ -4,5 +4,3 @@ approvers:
- kubespray-approvers - kubespray-approvers
reviewers: reviewers:
- kubespray-reviewers - kubespray-reviewers
emeritus_approvers:
- kubespray-emeritus_approvers

View File

@@ -1,18 +1,19 @@
aliases: aliases:
kubespray-approvers: kubespray-approvers:
- ant31
- mattymo - mattymo
- atoms
- chadswen - chadswen
- mirwan - mirwan
- miouge1 - miouge1
- riverzhang
- verwilst
- woopstar - woopstar
- luckysb - luckysb
- floryut
kubespray-reviewers: kubespray-reviewers:
- jjungnickel
- archifleks
- holmsten - holmsten
- bozzo - bozzo
- floryut
- eppo - eppo
- oomichi
kubespray-emeritus_approvers:
- riverzhang
- atoms
- ant31

View File

@@ -5,7 +5,7 @@
If you have questions, check the documentation at [kubespray.io](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**. If you have questions, check the documentation at [kubespray.io](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
You can get your invite [here](http://slack.k8s.io/) You can get your invite [here](http://slack.k8s.io/)
- Can be deployed on **[AWS](docs/aws.md), GCE, [Azure](docs/azure.md), [OpenStack](docs/openstack.md), [vSphere](docs/vsphere.md), [Packet](docs/packet.md) (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal** - Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere, Packet (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
- **Highly available** cluster - **Highly available** cluster
- **Composable** (Choice of the network plugin for instance) - **Composable** (Choice of the network plugin for instance)
- Supports most popular **Linux distributions** - Supports most popular **Linux distributions**
@@ -32,7 +32,7 @@ CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inv
# Review and change parameters under ``inventory/mycluster/group_vars`` # Review and change parameters under ``inventory/mycluster/group_vars``
cat inventory/mycluster/group_vars/all/all.yml cat inventory/mycluster/group_vars/all/all.yml
cat inventory/mycluster/group_vars/k8s_cluster/k8s_cluster.yml cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
# Deploy Kubespray with Ansible Playbook - run the playbook as root # Deploy Kubespray with Ansible Playbook - run the playbook as root
# The option `--become` is required, as for example writing SSL keys in /etc/, # The option `--become` is required, as for example writing SSL keys in /etc/,
@@ -48,23 +48,11 @@ As a consequence, `ansible-playbook` command will fail with:
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path. ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
``` ```
probably pointing on a task depending on a module present in requirements.txt. probably pointing on a task depending on a module present in requirements.txt (i.e. "unseal vault").
One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible. One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible.
A workaround consists of setting `ANSIBLE_LIBRARY` and `ANSIBLE_MODULE_UTILS` environment variables respectively to the `ansible/modules` and `ansible/module_utils` subdirectories of pip packages installation location, which can be found in the Location field of the output of `pip show [package]` before executing `ansible-playbook`. A workaround consists of setting `ANSIBLE_LIBRARY` and `ANSIBLE_MODULE_UTILS` environment variables respectively to the `ansible/modules` and `ansible/module_utils` subdirectories of pip packages installation location, which can be found in the Location field of the output of `pip show [package]` before executing `ansible-playbook`.
A simple way to ensure you get all the correct version of Ansible is to use the [pre-built docker image from Quay](https://quay.io/repository/kubespray/kubespray?tab=tags).
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
```ShellSession
docker pull quay.io/kubespray/kubespray:v2.15.1
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
quay.io/kubespray/kubespray:v2.15.1 bash
# Inside the container you may now run the kubespray playbooks:
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
```
### Vagrant ### Vagrant
For Vagrant we need to install python dependencies for provisioning tasks. For Vagrant we need to install python dependencies for provisioning tasks.
@@ -87,7 +75,6 @@ vagrant up
- [Requirements](#requirements) - [Requirements](#requirements)
- [Kubespray vs ...](docs/comparisons.md) - [Kubespray vs ...](docs/comparisons.md)
- [Getting started](docs/getting-started.md) - [Getting started](docs/getting-started.md)
- [Setting up your first cluster](docs/setting-up-your-first-cluster.md)
- [Ansible inventory and tags](docs/ansible.md) - [Ansible inventory and tags](docs/ansible.md)
- [Integration with existing ansible repo](docs/integration.md) - [Integration with existing ansible repo](docs/integration.md)
- [Deployment data variables](docs/vars.md) - [Deployment data variables](docs/vars.md)
@@ -95,7 +82,7 @@ vagrant up
- [HA mode](docs/ha-mode.md) - [HA mode](docs/ha-mode.md)
- [Network plugins](#network-plugins) - [Network plugins](#network-plugins)
- [Vagrant install](docs/vagrant.md) - [Vagrant install](docs/vagrant.md)
- [Flatcar Container Linux bootstrap](docs/flatcar.md) - [CoreOS bootstrap](docs/coreos.md)
- [Fedora CoreOS bootstrap](docs/fcos.md) - [Fedora CoreOS bootstrap](docs/fcos.md)
- [Debian Jessie setup](docs/debian.md) - [Debian Jessie setup](docs/debian.md)
- [openSUSE setup](docs/opensuse.md) - [openSUSE setup](docs/opensuse.md)
@@ -109,63 +96,55 @@ vagrant up
- [Large deployments](docs/large-deployments.md) - [Large deployments](docs/large-deployments.md)
- [Adding/replacing a node](docs/nodes.md) - [Adding/replacing a node](docs/nodes.md)
- [Upgrades basics](docs/upgrades.md) - [Upgrades basics](docs/upgrades.md)
- [Air-Gap installation](docs/offline-environment.md)
- [Roadmap](docs/roadmap.md) - [Roadmap](docs/roadmap.md)
## Supported Linux Distributions ## Supported Linux Distributions
- **Flatcar Container Linux by Kinvolk** - **Container Linux by CoreOS**
- **Debian** Buster, Jessie, Stretch, Wheezy - **Debian** Buster, Jessie, Stretch, Wheezy
- **Ubuntu** 16.04, 18.04, 20.04 - **Ubuntu** 16.04, 18.04
- **CentOS/RHEL** 7, [8](docs/centos8.md) - **CentOS/RHEL** 7, 8 (experimental: see [centos 8 notes](docs/centos8.md)
- **Fedora** 32, 33 - **Fedora** 30, 31
- **Fedora CoreOS** (experimental: see [fcos Note](docs/fcos.md)) - **Fedora CoreOS** (experimental: see [fcos Note](docs/fcos.md))
- **openSUSE** Leap 15.x/Tumbleweed - **openSUSE** Leap 42.3/Tumbleweed
- **Oracle Linux** 7, [8](docs/centos8.md) - **Oracle Linux** 7
- **Alma Linux** [8](docs/centos8.md)
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md)
Note: Upstart/SysV init based OS types are not supported. Note: Upstart/SysV init based OS types are not supported.
## Supported Components ## Supported Components
- Core - Core
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.20.7 - [kubernetes](https://github.com/kubernetes/kubernetes) v1.17.12
- [etcd](https://github.com/coreos/etcd) v3.4.13 - [etcd](https://github.com/coreos/etcd) v3.3.12
- [docker](https://www.docker.com/) v19.03 (see note) - [docker](https://www.docker.com/) v18.06 (see note)
- [containerd](https://containerd.io/) v1.4.4 - [containerd](https://containerd.io/) v1.2.13
- [cri-o](http://cri-o.io/) v1.20 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS) - [cri-o](http://cri-o.io/) v1.17 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
- Network Plugin - Network Plugin
- [cni-plugins](https://github.com/containernetworking/plugins) v0.9.1 - [cni-plugins](https://github.com/containernetworking/plugins) v0.8.6
- [calico](https://github.com/projectcalico/calico) v3.17.4 - [calico](https://github.com/projectcalico/calico) v3.13.2
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions) - [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
- [cilium](https://github.com/cilium/cilium) v1.8.9 - [cilium](https://github.com/cilium/cilium) v1.7.2
- [flanneld](https://github.com/coreos/flannel) v0.13.0 - [contiv](https://github.com/contiv/install) v1.2.1
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.6.2 - [flanneld](https://github.com/coreos/flannel) v0.12.0
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.2.2 - [kube-router](https://github.com/cloudnativelabs/kube-router) v0.4.0
- [multus](https://github.com/intel/multus-cni) v3.7.0 - [multus](https://github.com/intel/multus-cni) v3.4.1
- [ovn4nfv](https://github.com/opnfv/ovn4nfv-k8s-plugin) v1.1.0 - [weave](https://github.com/weaveworks/weave) v2.6.2
- [weave](https://github.com/weaveworks/weave) v2.8.1
- Application - Application
- [ambassador](https://github.com/datawire/ambassador): v1.5
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11 - [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11 - [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
- [cert-manager](https://github.com/jetstack/cert-manager) v0.16.1 - [cert-manager](https://github.com/jetstack/cert-manager) v0.11.1
- [coredns](https://github.com/coredns/coredns) v1.7.0 - [coredns](https://github.com/coredns/coredns) v1.6.5
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.43.0 - [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.30.0
## Container Runtime Notes Note: The list of validated [docker versions](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.16.md) was updated to 1.13.1, 17.03, 17.06, 17.09, 18.06, 18.09. kubeadm now properly recognizes Docker 18.09.0 and newer, but still treats 18.06 as the default supported version. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
- The list of available docker version is 18.09, 19.03 and 20.10. The recommended docker version is 19.03. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
## Requirements ## Requirements
- **Minimum required version of Kubernetes is v1.19** - **Minimum required version of Kubernetes is v1.15**
- **Ansible v2.9.x, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands, Ansible 2.10.x is experimentally supported for now** - **Ansible v2.9+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md)) - The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/downloads.md#offline-environment))
- The target servers are configured to allow **IPv4 forwarding**. - The target servers are configured to allow **IPv4 forwarding**.
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**. - **Your ssh key must be copied** to all the servers part of your inventory.
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to. - The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
in order to avoid any issue during deployment you should disable your firewall. in order to avoid any issue during deployment you should disable your firewall.
- If kubespray is ran from non-root user account, correct privilege escalation method - If kubespray is ran from non-root user account, correct privilege escalation method
@@ -195,7 +174,8 @@ You can choose between 10 network plugins. (default: `calico`, except Vagrant us
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic. - [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
- [ovn4nfv](docs/ovn4nfv.md): [ovn4nfv-k8s-plugins](https://github.com/opnfv/ovn4nfv-k8s-plugin) is the network controller, OVS agent and CNI server to offer basic SFC and OVN overlay networking. - [contiv](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. - [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
(Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)). (Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)).
@@ -215,14 +195,6 @@ The choice is defined with the variable `kube_network_plugin`. There is also an
option to leverage built-in cloud provider networking instead. option to leverage built-in cloud provider networking instead.
See also [Network checker](docs/netcheck.md). See also [Network checker](docs/netcheck.md).
## Ingress Plugins
- [ambassador](docs/ambassador.md): the Ambassador Ingress Controller and API gateway.
- [nginx](https://kubernetes.github.io/ingress-nginx): the NGINX Ingress Controller.
- [metallb](docs/metallb.md): the MetalLB bare-metal service LoadBalancer provider.
## Community docs and resources ## Community docs and resources
- [kubernetes.io/docs/setup/production-environment/tools/kubespray/](https://kubernetes.io/docs/setup/production-environment/tools/kubespray/) - [kubernetes.io/docs/setup/production-environment/tools/kubespray/](https://kubernetes.io/docs/setup/production-environment/tools/kubespray/)
@@ -237,8 +209,7 @@ See also [Network checker](docs/netcheck.md).
## CI Tests ## CI Tests
[![Build graphs](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/badges/master/pipeline.svg)](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines) [![Build graphs](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/badges/master/build.svg)](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines)
CI/end-to-end tests sponsored by: [CNCF](https://cncf.io), [Packet](https://www.packet.com/), [OVHcloud](https://www.ovhcloud.com/), [ELASTX](https://elastx.se/).
CI/end-to-end tests sponsored by Google (GCE)
See the [test matrix](docs/test_cases.md) for details. See the [test matrix](docs/test_cases.md) for details.

85
Vagrantfile vendored
View File

@@ -9,31 +9,32 @@ Vagrant.require_version ">= 2.0.0"
CONFIG = File.join(File.dirname(__FILE__), ENV['KUBESPRAY_VAGRANT_CONFIG'] || 'vagrant/config.rb') CONFIG = File.join(File.dirname(__FILE__), ENV['KUBESPRAY_VAGRANT_CONFIG'] || 'vagrant/config.rb')
COREOS_URL_TEMPLATE = "https://storage.googleapis.com/%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json"
FLATCAR_URL_TEMPLATE = "https://%s.release.flatcar-linux.net/amd64-usr/current/flatcar_production_vagrant.json" FLATCAR_URL_TEMPLATE = "https://%s.release.flatcar-linux.net/amd64-usr/current/flatcar_production_vagrant.json"
# Uniq disk UUID for libvirt # Uniq disk UUID for libvirt
DISK_UUID = Time.now.utc.to_i DISK_UUID = Time.now.utc.to_i
SUPPORTED_OS = { SUPPORTED_OS = {
"coreos-stable" => {box: "coreos-stable", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
"coreos-alpha" => {box: "coreos-alpha", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
"coreos-beta" => {box: "coreos-beta", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
"flatcar-stable" => {box: "flatcar-stable", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["stable"]}, "flatcar-stable" => {box: "flatcar-stable", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["stable"]},
"flatcar-beta" => {box: "flatcar-beta", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["beta"]}, "flatcar-beta" => {box: "flatcar-beta", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["beta"]},
"flatcar-alpha" => {box: "flatcar-alpha", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["alpha"]}, "flatcar-alpha" => {box: "flatcar-alpha", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["alpha"]},
"flatcar-edge" => {box: "flatcar-edge", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["edge"]}, "flatcar-edge" => {box: "flatcar-edge", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["edge"]},
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"}, "ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"}, "ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
"ubuntu2004" => {box: "generic/ubuntu2004", user: "vagrant"}, "ubuntu2004" => {box: "geerlingguy/ubuntu2004", user: "vagrant"},
"centos" => {box: "centos/7", user: "vagrant"}, "centos" => {box: "centos/7", user: "vagrant"},
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"}, "centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
"centos8" => {box: "centos/8", user: "vagrant"}, "centos8" => {box: "centos/8", user: "vagrant"},
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"}, "centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
"fedora32" => {box: "fedora/32-cloud-base", user: "vagrant"}, "fedora30" => {box: "fedora/30-cloud-base", user: "vagrant"},
"fedora33" => {box: "fedora/33-cloud-base", user: "vagrant"}, "fedora31" => {box: "fedora/31-cloud-base", user: "vagrant"},
"opensuse" => {box: "bento/opensuse-leap-15.2", user: "vagrant"}, "opensuse" => {box: "bento/opensuse-leap-15.1", user: "vagrant"},
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"}, "opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"}, "oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
"rhel7" => {box: "generic/rhel7", user: "vagrant"},
"rhel8" => {box: "generic/rhel8", user: "vagrant"},
} }
if File.exist?(CONFIG) if File.exist?(CONFIG)
@@ -45,17 +46,14 @@ $num_instances ||= 3
$instance_name_prefix ||= "k8s" $instance_name_prefix ||= "k8s"
$vm_gui ||= false $vm_gui ||= false
$vm_memory ||= 2048 $vm_memory ||= 2048
$vm_cpus ||= 2 $vm_cpus ||= 1
$shared_folders ||= {} $shared_folders ||= {}
$forwarded_ports ||= {} $forwarded_ports ||= {}
$subnet ||= "172.18.8" $subnet ||= "172.18.8"
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
$os ||= "ubuntu1804" $os ||= "ubuntu1804"
$network_plugin ||= "flannel" $network_plugin ||= "flannel"
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni # Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
$multi_networking ||= false $multi_networking ||= false
$download_run_once ||= "True"
$download_force_cache ||= "True"
# The first three nodes are etcd servers # The first three nodes are etcd servers
$etcd_instances ||= $num_instances $etcd_instances ||= $num_instances
# The first two nodes are kube masters # The first two nodes are kube masters
@@ -70,9 +68,8 @@ $override_disk_size ||= false
$disk_size ||= "20GB" $disk_size ||= "20GB"
$local_path_provisioner_enabled ||= false $local_path_provisioner_enabled ||= false
$local_path_provisioner_claim_root ||= "/opt/local-path-provisioner/" $local_path_provisioner_claim_root ||= "/opt/local-path-provisioner/"
$libvirt_nested ||= false
$playbook ||= "cluster.yml" $playbook = "cluster.yml"
host_vars = {} host_vars = {}
@@ -86,16 +83,16 @@ $inventory = File.absolute_path($inventory, File.dirname(__FILE__))
if ! File.exist?(File.join(File.dirname($inventory), "hosts.ini")) if ! File.exist?(File.join(File.dirname($inventory), "hosts.ini"))
$vagrant_ansible = File.join(File.dirname(__FILE__), ".vagrant", "provisioners", "ansible") $vagrant_ansible = File.join(File.dirname(__FILE__), ".vagrant", "provisioners", "ansible")
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible) FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
$vagrant_inventory = File.join($vagrant_ansible,"inventory") if ! File.exist?(File.join($vagrant_ansible,"inventory"))
FileUtils.rm_f($vagrant_inventory) FileUtils.ln_s($inventory, File.join($vagrant_ansible,"inventory"))
FileUtils.ln_s($inventory, $vagrant_inventory) end
end end
if Vagrant.has_plugin?("vagrant-proxyconf") if Vagrant.has_plugin?("vagrant-proxyconf")
$no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost" $no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
(1..$num_instances).each do |i| (1..$num_instances).each do |i|
$no_proxy += ",#{$subnet}.#{i+100}" $no_proxy += ",#{$subnet}.#{i+100}"
end end
end end
Vagrant.configure("2") do |config| Vagrant.configure("2") do |config|
@@ -145,12 +142,9 @@ Vagrant.configure("2") do |config|
vb.gui = $vm_gui vb.gui = $vm_gui
vb.linked_clone = true vb.linked_clone = true
vb.customize ["modifyvm", :id, "--vram", "8"] # ubuntu defaults to 256 MB which is a waste of precious RAM vb.customize ["modifyvm", :id, "--vram", "8"] # ubuntu defaults to 256 MB which is a waste of precious RAM
vb.customize ["modifyvm", :id, "--audio", "none"]
end end
node.vm.provider :libvirt do |lv| node.vm.provider :libvirt do |lv|
lv.nested = $libvirt_nested
lv.cpu_mode = "host-model"
lv.memory = $vm_memory lv.memory = $vm_memory
lv.cpus = $vm_cpus lv.cpus = $vm_cpus
lv.default_prefix = 'kubespray' lv.default_prefix = 'kubespray'
@@ -180,55 +174,30 @@ Vagrant.configure("2") do |config|
node.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true node.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true
end end
if ["rhel7","rhel8"].include? $os node.vm.synced_folder ".", "/vagrant", disabled: false, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] , rsync__exclude: ['.git','venv']
# Vagrant synced_folder rsync options cannot be used for RHEL boxes as Rsync package cannot $shared_folders.each do |src, dst|
# be installed until the host is registered with a valid Red Hat support subscription node.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
node.vm.synced_folder ".", "/vagrant", disabled: false
$shared_folders.each do |src, dst|
node.vm.synced_folder src, dst
end
else
node.vm.synced_folder ".", "/vagrant", disabled: false, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] , rsync__exclude: ['.git','venv']
$shared_folders.each do |src, dst|
node.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
end
end end
ip = "#{$subnet}.#{i+100}" ip = "#{$subnet}.#{i+100}"
node.vm.network :private_network, ip: ip, node.vm.network :private_network, ip: ip
:libvirt__guest_ipv6 => 'yes',
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
:libvirt__ipv6_prefix => "64",
:libvirt__forward_mode => "none",
:libvirt__dhcp_enabled => false
# Disable swap for each vm # Disable swap for each vm
node.vm.provision "shell", inline: "swapoff -a" node.vm.provision "shell", inline: "swapoff -a"
# ubuntu1804 and ubuntu2004 have IPv6 explicitly disabled. This undoes that.
if ["ubuntu1804", "ubuntu2004"].include? $os
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
end
# Disable firewalld on oraclelinux/redhat vms
if ["oraclelinux","oraclelinux8","rhel7","rhel8"].include? $os
node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
end
host_vars[vm_name] = { host_vars[vm_name] = {
"ip": ip, "ip": ip,
"flannel_interface": "eth1", "flannel_interface": "eth1",
"kube_network_plugin": $network_plugin, "kube_network_plugin": $network_plugin,
"kube_network_plugin_multus": $multi_networking, "kube_network_plugin_multus": $multi_networking,
"download_run_once": $download_run_once, "download_run_once": "True",
"download_localhost": "False", "download_localhost": "False",
"download_cache_dir": ENV['HOME'] + "/kubespray_cache", "download_cache_dir": ENV['HOME'] + "/kubespray_cache",
# Make kubespray cache even when download_run_once is false # Make kubespray cache even when download_run_once is false
"download_force_cache": $download_force_cache, "download_force_cache": "True",
# Keeping the cache on the nodes can improve provisioning speed while debugging kubespray # Keeping the cache on the nodes can improve provisioning speed while debugging kubespray
"download_keep_remote_cache": "False", "download_keep_remote_cache": "False",
"docker_rpm_keepcache": "1", "docker_keepcache": "1",
# These two settings will put kubectl and admin.config in $inventory/artifacts # These two settings will put kubectl and admin.config in $inventory/artifacts
"kubeconfig_localhost": "True", "kubeconfig_localhost": "True",
"kubectl_localhost": "True", "kubectl_localhost": "True",
@@ -253,9 +222,9 @@ Vagrant.configure("2") do |config|
#ansible.tags = ['download'] #ansible.tags = ['download']
ansible.groups = { ansible.groups = {
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"], "etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
"kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"], "kube-master" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
"kube_node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"], "kube-node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
"k8s_cluster:children" => ["kube_control_plane", "kube_node"], "k8s-cluster:children" => ["kube-master", "kube-node"],
} }
end end
end end

View File

@@ -1,2 +1,2 @@
--- ---
theme: jekyll-theme-slate theme: jekyll-theme-slate

View File

@@ -3,30 +3,13 @@
gather_facts: false gather_facts: false
become: no become: no
vars: vars:
minimal_ansible_version: 2.9.0 minimal_ansible_version: 2.8.0
maximal_ansible_version: 2.11.0
ansible_connection: local ansible_connection: local
tasks: tasks:
- name: "Check {{ minimal_ansible_version }} <= Ansible version < {{ maximal_ansible_version }}" - name: "Check ansible version >={{ minimal_ansible_version }}"
assert: assert:
msg: "Ansible must be between {{ minimal_ansible_version }} and {{ maximal_ansible_version }}" msg: "Ansible must be {{ minimal_ansible_version }} or higher"
that: that:
- ansible_version.string is version(minimal_ansible_version, ">=") - ansible_version.string is version(minimal_ansible_version, ">=")
- ansible_version.string is version(maximal_ansible_version, "<")
tags:
- check
- name: "Check that python netaddr is installed"
assert:
msg: "Python netaddr is not present"
that: "'127.0.0.1' | ipaddr"
tags:
- check
# CentOS 7 provides too old jinja version
- name: "Check that jinja is not too old (install via pip)"
assert:
msg: "Your Jinja version is too old, install via pip"
that: "{% set test %}It works{% endset %}{{ test == 'It works' }}"
tags: tags:
- check - check

View File

@@ -2,43 +2,50 @@
- name: Check ansible version - name: Check ansible version
import_playbook: ansible_version.yml import_playbook: ansible_version.yml
- name: Ensure compatibility with old groups - hosts: all
import_playbook: legacy_groups.yml gather_facts: false
tasks:
- name: "Set up proxy environment"
set_fact:
proxy_env:
http_proxy: "{{ http_proxy | default ('') }}"
HTTP_PROXY: "{{ http_proxy | default ('') }}"
https_proxy: "{{ https_proxy | default ('') }}"
HTTPS_PROXY: "{{ https_proxy | default ('') }}"
no_proxy: "{{ no_proxy | default ('') }}"
NO_PROXY: "{{ no_proxy | default ('') }}"
no_log: true
- hosts: bastion[0] - hosts: bastion[0]
gather_facts: False gather_facts: False
environment: "{{ proxy_disable_env }}"
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] } - { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
- hosts: k8s_cluster:etcd - hosts: k8s-cluster:etcd
strategy: linear strategy: linear
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
gather_facts: false gather_facts: false
environment: "{{ proxy_disable_env }}"
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- { role: bootstrap-os, tags: bootstrap-os} - { role: bootstrap-os, tags: bootstrap-os}
- name: Gather facts - name: Gather facts
tags: always
import_playbook: facts.yml import_playbook: facts.yml
- hosts: k8s_cluster:etcd - hosts: k8s-cluster:etcd
gather_facts: False gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- { role: kubernetes/preinstall, tags: preinstall } - { role: kubernetes/preinstall, tags: preinstall }
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) } - { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) }
- { role: download, tags: download, when: "not skip_downloads" } - { role: download, tags: download, when: "not skip_downloads" }
environment: "{{ proxy_env }}"
- hosts: etcd - hosts: etcd
gather_facts: False gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- role: etcd - role: etcd
@@ -48,10 +55,9 @@
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}" etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
when: not etcd_kubeadm_enabled| default(false) when: not etcd_kubeadm_enabled| default(false)
- hosts: k8s_cluster - hosts: k8s-cluster
gather_facts: False gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- role: etcd - role: etcd
@@ -61,54 +67,50 @@
etcd_events_cluster_setup: false etcd_events_cluster_setup: false
when: not etcd_kubeadm_enabled| default(false) when: not etcd_kubeadm_enabled| default(false)
- hosts: k8s_cluster - hosts: k8s-cluster
gather_facts: False gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- { role: kubernetes/node, tags: node } - { role: kubernetes/node, tags: node }
environment: "{{ proxy_env }}"
- hosts: kube_control_plane - hosts: kube-master
gather_facts: False gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- { role: kubernetes/control-plane, tags: master } - { role: kubernetes/master, tags: master }
- { role: kubernetes/client, tags: client } - { role: kubernetes/client, tags: client }
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles } - { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
- hosts: k8s_cluster - hosts: k8s-cluster
gather_facts: False gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- { role: kubernetes/kubeadm, tags: kubeadm} - { role: kubernetes/kubeadm, tags: kubeadm}
- { role: network_plugin, tags: network } - { role: network_plugin, tags: network }
- { role: kubernetes/node-label, tags: node-label } - { role: kubernetes/node-label, tags: node-label }
- hosts: calico_rr - hosts: calico-rr
gather_facts: False gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] } - { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
- hosts: kube_control_plane[0] - hosts: kube-master[0]
gather_facts: False gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] } - { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
- hosts: kube_control_plane - hosts: kube-master
gather_facts: False gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller } - { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
@@ -117,18 +119,17 @@
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller } - { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner } - { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
- hosts: kube_control_plane - hosts: kube-master
gather_facts: False gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- { role: kubernetes-apps, tags: apps } - { role: kubernetes-apps, tags: apps }
environment: "{{ proxy_env }}"
- hosts: k8s_cluster - hosts: k8s-cluster
gather_facts: False gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true } - { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }

View File

@@ -35,7 +35,7 @@ class SearchEC2Tags(object):
hosts['_meta'] = { 'hostvars': {} } hosts['_meta'] = { 'hostvars': {} }
##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value. ##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
for group in ["kube_control_plane", "kube_node", "etcd"]: for group in ["kube-master", "kube-node", "etcd"]:
hosts[group] = [] hosts[group] = []
tag_key = "kubespray-role" tag_key = "kubespray-role"
tag_value = ["*"+group+"*"] tag_value = ["*"+group+"*"]
@@ -70,7 +70,7 @@ class SearchEC2Tags(object):
hosts[group].append(dns_name) hosts[group].append(dns_name)
hosts['_meta']['hostvars'][dns_name] = ansible_host hosts['_meta']['hostvars'][dns_name] = ansible_host
hosts['k8s_cluster'] = {'children':['kube_control_plane', 'kube_node']} hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
print(json.dumps(hosts, sort_keys=True, indent=2)) print(json.dumps(hosts, sort_keys=True, indent=2))
SearchEC2Tags() SearchEC2Tags()

View File

@@ -1 +0,0 @@
boto3 # Apache-2.0

View File

@@ -24,14 +24,14 @@ experience.
You can enable the use of a Bastion Host by changing **use_bastion** in group_vars/all to **true**. The generated You can enable the use of a Bastion Host by changing **use_bastion** in group_vars/all to **true**. The generated
templates will then include an additional bastion VM which can then be used to connect to the masters and nodes. The option templates will then include an additional bastion VM which can then be used to connect to the masters and nodes. The option
also removes all public IPs from all other VMs. also removes all public IPs from all other VMs.
## Generating and applying ## Generating and applying
To generate and apply the templates, call: To generate and apply the templates, call:
```shell ```shell
./apply-rg.sh <resource_group_name> $ ./apply-rg.sh <resource_group_name>
``` ```
If you change something in the configuration (e.g. number of nodes) later, you can call this again and Azure will If you change something in the configuration (e.g. number of nodes) later, you can call this again and Azure will
@@ -42,23 +42,24 @@ take care about creating/modifying whatever is needed.
If you need to delete all resources from a resource group, simply call: If you need to delete all resources from a resource group, simply call:
```shell ```shell
./clear-rg.sh <resource_group_name> $ ./clear-rg.sh <resource_group_name>
``` ```
**WARNING** this really deletes everything from your resource group, including everything that was later created by you! **WARNING** this really deletes everything from your resource group, including everything that was later created by you!
## Generating an inventory for kubespray ## Generating an inventory for kubespray
After you have applied the templates, you can generate an inventory with this call: After you have applied the templates, you can generate an inventory with this call:
```shell ```shell
./generate-inventory.sh <resource_group_name> $ ./generate-inventory.sh <resource_group_name>
``` ```
It will create the file ./inventory which can then be used with kubespray, e.g.: It will create the file ./inventory which can then be used with kubespray, e.g.:
```shell ```shell
cd kubespray-root-dir $ cd kubespray-root-dir
sudo pip3 install -r requirements.txt $ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all/all.yml" cluster.yml
ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all/all.yml" cluster.yml
``` ```

View File

@@ -9,11 +9,18 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
exit 1 exit 1
fi fi
ansible-playbook generate-templates.yml if az &>/dev/null; then
echo "azure cli 2.0 found, using it instead of 1.0"
az deployment group create --template-file ./.generated/network.json -g $AZURE_RESOURCE_GROUP ./apply-rg_2.sh "$AZURE_RESOURCE_GROUP"
az deployment group create --template-file ./.generated/storage.json -g $AZURE_RESOURCE_GROUP elif azure &>/dev/null; then
az deployment group create --template-file ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP ansible-playbook generate-templates.yml
az deployment group create --template-file ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/masters.json -g $AZURE_RESOURCE_GROUP azure group deployment create -f ./.generated/network.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/minions.json -g $AZURE_RESOURCE_GROUP azure group deployment create -f ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
else
echo "Azure cli not found"
fi

19
contrib/azurerm/apply-rg_2.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/usr/bin/env bash
set -e
AZURE_RESOURCE_GROUP="$1"
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
echo "AZURE_RESOURCE_GROUP is missing"
exit 1
fi
ansible-playbook generate-templates.yml
az deployment group create --template-file ./.generated/network.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/minions.json -g $AZURE_RESOURCE_GROUP

View File

@@ -9,6 +9,10 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
exit 1 exit 1
fi fi
ansible-playbook generate-templates.yml if az &>/dev/null; then
echo "azure cli 2.0 found, using it instead of 1.0"
az group deployment create -g "$AZURE_RESOURCE_GROUP" --template-file ./.generated/clear-rg.json --mode Complete ./clear-rg_2.sh "$AZURE_RESOURCE_GROUP"
else
ansible-playbook generate-templates.yml
azure group deployment create -g "$AZURE_RESOURCE_GROUP" -f ./.generated/clear-rg.json -m Complete
fi

14
contrib/azurerm/clear-rg_2.sh Executable file
View File

@@ -0,0 +1,14 @@
#!/usr/bin/env bash
set -e
AZURE_RESOURCE_GROUP="$1"
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
echo "AZURE_RESOURCE_GROUP is missing"
exit 1
fi
ansible-playbook generate-templates.yml
az group deployment create -g "$AZURE_RESOURCE_GROUP" --template-file ./.generated/clear-rg.json --mode Complete

View File

@@ -1,6 +1,6 @@
--- ---
- name: Query Azure VMs # noqa 301 - name: Query Azure VMs
command: azure vm list-ip-address --json {{ azure_resource_group }} command: azure vm list-ip-address --json {{ azure_resource_group }}
register: vm_list_cmd register: vm_list_cmd

View File

@@ -7,9 +7,9 @@
{% endif %} {% endif %}
{% endfor %} {% endfor %}
[kube_control_plane] [kube-master]
{% for vm in vm_list %} {% for vm in vm_list %}
{% if 'kube_control_plane' in vm.tags.roles %} {% if 'kube-master' in vm.tags.roles %}
{{ vm.name }} {{ vm.name }}
{% endif %} {% endif %}
{% endfor %} {% endfor %}
@@ -21,13 +21,13 @@
{% endif %} {% endif %}
{% endfor %} {% endfor %}
[kube_node] [kube-node]
{% for vm in vm_list %} {% for vm in vm_list %}
{% if 'kube_node' in vm.tags.roles %} {% if 'kube-node' in vm.tags.roles %}
{{ vm.name }} {{ vm.name }}
{% endif %} {% endif %}
{% endfor %} {% endfor %}
[k8s_cluster:children] [k8s-cluster:children]
kube_node kube-node
kube_control_plane kube-master

View File

@@ -1,14 +1,14 @@
--- ---
- name: Query Azure VMs IPs # noqa 301 - name: Query Azure VMs IPs
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }} command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
register: vm_ip_list_cmd register: vm_ip_list_cmd
- name: Query Azure VMs Roles # noqa 301 - name: Query Azure VMs Roles
command: az vm list -o json --resource-group {{ azure_resource_group }} command: az vm list -o json --resource-group {{ azure_resource_group }}
register: vm_list_cmd register: vm_list_cmd
- name: Query Azure Load Balancer Public IP # noqa 301 - name: Query Azure Load Balancer Public IP
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
register: lb_pubip_cmd register: lb_pubip_cmd

View File

@@ -7,9 +7,9 @@
{% endif %} {% endif %}
{% endfor %} {% endfor %}
[kube_control_plane] [kube-master]
{% for vm in vm_roles_list %} {% for vm in vm_roles_list %}
{% if 'kube_control_plane' in vm.tags.roles %} {% if 'kube-master' in vm.tags.roles %}
{{ vm.name }} {{ vm.name }}
{% endif %} {% endif %}
{% endfor %} {% endfor %}
@@ -21,14 +21,14 @@
{% endif %} {% endif %}
{% endfor %} {% endfor %}
[kube_node] [kube-node]
{% for vm in vm_roles_list %} {% for vm in vm_roles_list %}
{% if 'kube_node' in vm.tags.roles %} {% if 'kube-node' in vm.tags.roles %}
{{ vm.name }} {{ vm.name }}
{% endif %} {% endif %}
{% endfor %} {% endfor %}
[k8s_cluster:children] [k8s-cluster:children]
kube_node kube-node
kube_control_plane kube-master

View File

@@ -144,7 +144,7 @@
"[concat('Microsoft.Network/networkInterfaces/', 'master-{{i}}-nic')]" "[concat('Microsoft.Network/networkInterfaces/', 'master-{{i}}-nic')]"
], ],
"tags": { "tags": {
"roles": "kube_control_plane,etcd" "roles": "kube-master,etcd"
}, },
"apiVersion": "{{apiVersion}}", "apiVersion": "{{apiVersion}}",
"properties": { "properties": {

View File

@@ -61,7 +61,7 @@
"[concat('Microsoft.Network/networkInterfaces/', 'minion-{{i}}-nic')]" "[concat('Microsoft.Network/networkInterfaces/', 'minion-{{i}}-nic')]"
], ],
"tags": { "tags": {
"roles": "kube_node" "roles": "kube-node"
}, },
"apiVersion": "{{apiVersion}}", "apiVersion": "{{apiVersion}}",
"properties": { "properties": {
@@ -112,4 +112,4 @@
} {% if not loop.last %},{% endif %} } {% if not loop.last %},{% endif %}
{% endfor %} {% endfor %}
] ]
} }

View File

@@ -6,7 +6,6 @@ to serve as Kubernetes "nodes", which in turn will run
called DIND (Docker-IN-Docker). called DIND (Docker-IN-Docker).
The playbook has two roles: The playbook has two roles:
- dind-host: creates the "nodes" as containers in localhost, with - dind-host: creates the "nodes" as containers in localhost, with
appropriate settings for DIND (privileged, volume mapping for dind appropriate settings for DIND (privileged, volume mapping for dind
storage, etc). storage, etc).
@@ -28,7 +27,7 @@ See below for a complete successful run:
1. Create the node containers 1. Create the node containers
```shell ~~~~
# From the kubespray root dir # From the kubespray root dir
cd contrib/dind cd contrib/dind
pip install -r requirements.txt pip install -r requirements.txt
@@ -37,15 +36,15 @@ ansible-playbook -i hosts dind-cluster.yaml
# Back to kubespray root # Back to kubespray root
cd ../.. cd ../..
``` ~~~~
NOTE: if the playbook run fails with something like below error NOTE: if the playbook run fails with something like below error
message, you may need to specifically set `ansible_python_interpreter`, message, you may need to specifically set `ansible_python_interpreter`,
see `./hosts` file for an example expanded localhost entry. see `./hosts` file for an example expanded localhost entry.
```shell ~~~
failed: [localhost] (item=kube-node1) => {"changed": false, "item": "kube-node1", "msg": "Failed to import docker or docker-py - No module named requests.exceptions. Try `pip install docker` or `pip install docker-py` (Python 2.6)"} failed: [localhost] (item=kube-node1) => {"changed": false, "item": "kube-node1", "msg": "Failed to import docker or docker-py - No module named requests.exceptions. Try `pip install docker` or `pip install docker-py` (Python 2.6)"}
``` ~~~
2. Customize kubespray-dind.yaml 2. Customize kubespray-dind.yaml
@@ -53,33 +52,33 @@ Note that there's coupling between above created node containers
and `kubespray-dind.yaml` settings, in particular regarding selected `node_distro` and `kubespray-dind.yaml` settings, in particular regarding selected `node_distro`
(as set in `group_vars/all/all.yaml`), and docker settings. (as set in `group_vars/all/all.yaml`), and docker settings.
```shell ~~~
$EDITOR contrib/dind/kubespray-dind.yaml $EDITOR contrib/dind/kubespray-dind.yaml
``` ~~~
3. Prepare the inventory and run the playbook 3. Prepare the inventory and run the playbook
```shell ~~~
INVENTORY_DIR=inventory/local-dind INVENTORY_DIR=inventory/local-dind
mkdir -p ${INVENTORY_DIR} mkdir -p ${INVENTORY_DIR}
rm -f ${INVENTORY_DIR}/hosts.ini rm -f ${INVENTORY_DIR}/hosts.ini
CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
ansible-playbook --become -e ansible_ssh_user=debian -i ${INVENTORY_DIR}/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml ansible-playbook --become -e ansible_ssh_user=debian -i ${INVENTORY_DIR}/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml
``` ~~~
NOTE: You could also test other distros without editing files by NOTE: You could also test other distros without editing files by
passing `--extra-vars` as per below commandline, passing `--extra-vars` as per below commandline,
replacing `DISTRO` by either `debian`, `ubuntu`, `centos`, `fedora`: replacing `DISTRO` by either `debian`, `ubuntu`, `centos`, `fedora`:
```shell ~~~
cd contrib/dind cd contrib/dind
ansible-playbook -i hosts dind-cluster.yaml --extra-vars node_distro=DISTRO ansible-playbook -i hosts dind-cluster.yaml --extra-vars node_distro=DISTRO
cd ../.. cd ../..
CONFIG_FILE=inventory/local-dind/hosts.ini /tmp/kubespray.dind.inventory_builder.sh CONFIG_FILE=inventory/local-dind/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
ansible-playbook --become -e ansible_ssh_user=DISTRO -i inventory/local-dind/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml --extra-vars bootstrap_os=DISTRO ansible-playbook --become -e ansible_ssh_user=DISTRO -i inventory/local-dind/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml --extra-vars bootstrap_os=DISTRO
``` ~~~
## Resulting deployment ## Resulting deployment
@@ -90,7 +89,7 @@ from the host where you ran kubespray playbooks.
Running from an Ubuntu Xenial host: Running from an Ubuntu Xenial host:
```shell ~~~
$ uname -a $ uname -a
Linux ip-xx-xx-xx-xx 4.4.0-1069-aws #79-Ubuntu SMP Mon Sep 24 Linux ip-xx-xx-xx-xx 4.4.0-1069-aws #79-Ubuntu SMP Mon Sep 24
15:01:41 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux 15:01:41 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
@@ -150,14 +149,14 @@ kube-system weave-net-xr46t 2/2 Running 0
$ docker exec kube-node1 curl -s http://localhost:31081/api/v1/connectivity_check $ docker exec kube-node1 curl -s http://localhost:31081/api/v1/connectivity_check
{"Message":"All 10 pods successfully reported back to the server","Absent":null,"Outdated":null} {"Message":"All 10 pods successfully reported back to the server","Absent":null,"Outdated":null}
``` ~~~
## Using ./run-test-distros.sh ## Using ./run-test-distros.sh
You can use `./run-test-distros.sh` to run a set of tests via DIND, You can use `./run-test-distros.sh` to run a set of tests via DIND,
and excerpt from this script, to get an idea: and excerpt from this script, to get an idea:
```shell ~~~
# The SPEC file(s) must have two arrays as e.g. # The SPEC file(s) must have two arrays as e.g.
# DISTROS=(debian centos) # DISTROS=(debian centos)
# EXTRAS=( # EXTRAS=(
@@ -170,7 +169,7 @@ and excerpt from this script, to get an idea:
# #
# Each $EXTRAS element will be whitespace split, and passed as --extra-vars # Each $EXTRAS element will be whitespace split, and passed as --extra-vars
# to main kubespray ansible-playbook run. # to main kubespray ansible-playbook run.
``` ~~~
See e.g. `test-some_distros-most_CNIs.env` and See e.g. `test-some_distros-most_CNIs.env` and
`test-some_distros-kube_router_combo.env` in particular for a richer `test-some_distros-kube_router_combo.env` in particular for a richer

View File

@@ -69,7 +69,7 @@
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian, # Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
# handle manually # handle manually
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave) # noqa 301 - name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
raw: | raw: |
echo {{ item | hash('sha1') }} > /etc/machine-id.new echo {{ item | hash('sha1') }} > /etc/machine-id.new
mv -b /etc/machine-id.new /etc/machine-id mv -b /etc/machine-id.new /etc/machine-id

View File

@@ -46,7 +46,7 @@ test_distro() {
pass_or_fail "$prefix: netcheck" || return 1 pass_or_fail "$prefix: netcheck" || return 1
} }
NODES=($(egrep ^kube_node hosts)) NODES=($(egrep ^kube-node hosts))
NETCHECKER_HOST=localhost NETCHECKER_HOST=localhost
: ${OUTPUT_DIR:=./out} : ${OUTPUT_DIR:=./out}

View File

@@ -41,11 +41,10 @@ from ruamel.yaml import YAML
import os import os
import re import re
import subprocess
import sys import sys
ROLES = ['all', 'kube_control_plane', 'kube_node', 'etcd', 'k8s_cluster', ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster',
'calico_rr'] 'calico-rr']
PROTECTED_NAMES = ROLES PROTECTED_NAMES = ROLES
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames', AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames',
'load'] 'load']
@@ -63,16 +62,13 @@ def get_var_as_bool(name, default):
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.yaml") CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.yaml")
# Remove the reference of KUBE_MASTERS after some deprecation cycles. KUBE_MASTERS = int(os.environ.get("KUBE_MASTERS_MASTERS", 2))
KUBE_CONTROL_HOSTS = int(os.environ.get("KUBE_CONTROL_HOSTS",
os.environ.get("KUBE_MASTERS", 2)))
# Reconfigures cluster distribution at scale # Reconfigures cluster distribution at scale
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50)) SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("MASSIVE_SCALE_THRESHOLD", 200)) MASSIVE_SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 200))
DEBUG = get_var_as_bool("DEBUG", True) DEBUG = get_var_as_bool("DEBUG", True)
HOST_PREFIX = os.environ.get("HOST_PREFIX", "node") HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
USE_REAL_HOSTNAME = get_var_as_bool("USE_REAL_HOSTNAME", False)
# Configurable as shell vars end # Configurable as shell vars end
@@ -104,11 +100,10 @@ class KubesprayInventory(object):
etcd_hosts_count = 3 if len(self.hosts.keys()) >= 3 else 1 etcd_hosts_count = 3 if len(self.hosts.keys()) >= 3 else 1
self.set_etcd(list(self.hosts.keys())[:etcd_hosts_count]) self.set_etcd(list(self.hosts.keys())[:etcd_hosts_count])
if len(self.hosts) >= SCALE_THRESHOLD: if len(self.hosts) >= SCALE_THRESHOLD:
self.set_kube_control_plane(list(self.hosts.keys())[ self.set_kube_master(list(self.hosts.keys())[
etcd_hosts_count:(etcd_hosts_count + KUBE_CONTROL_HOSTS)]) etcd_hosts_count:(etcd_hosts_count + KUBE_MASTERS)])
else: else:
self.set_kube_control_plane( self.set_kube_master(list(self.hosts.keys())[:KUBE_MASTERS])
list(self.hosts.keys())[:KUBE_CONTROL_HOSTS])
self.set_kube_node(self.hosts.keys()) self.set_kube_node(self.hosts.keys())
if len(self.hosts) >= SCALE_THRESHOLD: if len(self.hosts) >= SCALE_THRESHOLD:
self.set_calico_rr(list(self.hosts.keys())[:etcd_hosts_count]) self.set_calico_rr(list(self.hosts.keys())[:etcd_hosts_count])
@@ -172,7 +167,6 @@ class KubesprayInventory(object):
# FIXME(mattymo): Fix condition where delete then add reuses highest id # FIXME(mattymo): Fix condition where delete then add reuses highest id
next_host_id = highest_host_id + 1 next_host_id = highest_host_id + 1
next_host = ""
all_hosts = existing_hosts.copy() all_hosts = existing_hosts.copy()
for host in changed_hosts: for host in changed_hosts:
@@ -197,14 +191,8 @@ class KubesprayInventory(object):
self.debug("Skipping existing host {0}.".format(ip)) self.debug("Skipping existing host {0}.".format(ip))
continue continue
if USE_REAL_HOSTNAME: next_host = "{0}{1}".format(HOST_PREFIX, next_host_id)
cmd = ("ssh -oStrictHostKeyChecking=no " next_host_id += 1
+ access_ip + " 'hostname -s'")
next_host = subprocess.check_output(cmd, shell=True)
next_host = next_host.strip().decode('ascii')
else:
next_host = "{0}{1}".format(HOST_PREFIX, next_host_id)
next_host_id += 1
all_hosts[next_host] = {'ansible_host': access_ip, all_hosts[next_host] = {'ansible_host': access_ip,
'ip': ip, 'ip': ip,
'access_ip': access_ip} 'access_ip': access_ip}
@@ -241,7 +229,7 @@ class KubesprayInventory(object):
return [ip_address(ip).exploded for ip in range(start, end + 1)] return [ip_address(ip).exploded for ip in range(start, end + 1)]
for host in hosts: for host in hosts:
if '-' in host and not (host.startswith('-') or host[0].isalpha()): if '-' in host and not host.startswith('-'):
start, end = host.strip().split('-') start, end = host.strip().split('-')
try: try:
reworked_hosts.extend(ips(start, end)) reworked_hosts.extend(ips(start, end))
@@ -269,7 +257,7 @@ class KubesprayInventory(object):
def purge_invalid_hosts(self, hostnames, protected_names=[]): def purge_invalid_hosts(self, hostnames, protected_names=[]):
for role in self.yaml_config['all']['children']: for role in self.yaml_config['all']['children']:
if role != 'k8s_cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa if role != 'k8s-cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa
all_hosts = self.yaml_config['all']['children'][role]['hosts'].copy() # noqa all_hosts = self.yaml_config['all']['children'][role]['hosts'].copy() # noqa
for host in all_hosts.keys(): for host in all_hosts.keys():
if host not in hostnames and host not in protected_names: if host not in hostnames and host not in protected_names:
@@ -290,54 +278,52 @@ class KubesprayInventory(object):
if self.yaml_config['all']['hosts'] is None: if self.yaml_config['all']['hosts'] is None:
self.yaml_config['all']['hosts'] = {host: None} self.yaml_config['all']['hosts'] = {host: None}
self.yaml_config['all']['hosts'][host] = opts self.yaml_config['all']['hosts'][host] = opts
elif group != 'k8s_cluster:children': elif group != 'k8s-cluster:children':
if self.yaml_config['all']['children'][group]['hosts'] is None: if self.yaml_config['all']['children'][group]['hosts'] is None:
self.yaml_config['all']['children'][group]['hosts'] = { self.yaml_config['all']['children'][group]['hosts'] = {
host: None} host: None}
else: else:
self.yaml_config['all']['children'][group]['hosts'][host] = None # noqa self.yaml_config['all']['children'][group]['hosts'][host] = None # noqa
def set_kube_control_plane(self, hosts): def set_kube_master(self, hosts):
for host in hosts: for host in hosts:
self.add_host_to_group('kube_control_plane', host) self.add_host_to_group('kube-master', host)
def set_all(self, hosts): def set_all(self, hosts):
for host, opts in hosts.items(): for host, opts in hosts.items():
self.add_host_to_group('all', host, opts) self.add_host_to_group('all', host, opts)
def set_k8s_cluster(self): def set_k8s_cluster(self):
k8s_cluster = {'children': {'kube_control_plane': None, k8s_cluster = {'children': {'kube-master': None, 'kube-node': None}}
'kube_node': None}} self.yaml_config['all']['children']['k8s-cluster'] = k8s_cluster
self.yaml_config['all']['children']['k8s_cluster'] = k8s_cluster
def set_calico_rr(self, hosts): def set_calico_rr(self, hosts):
for host in hosts: for host in hosts:
if host in self.yaml_config['all']['children']['kube_control_plane']: # noqa if host in self.yaml_config['all']['children']['kube-master']:
self.debug("Not adding {0} to calico_rr group because it " self.debug("Not adding {0} to calico-rr group because it "
"conflicts with kube_control_plane " "conflicts with kube-master group".format(host))
"group".format(host))
continue continue
if host in self.yaml_config['all']['children']['kube_node']: if host in self.yaml_config['all']['children']['kube-node']:
self.debug("Not adding {0} to calico_rr group because it " self.debug("Not adding {0} to calico-rr group because it "
"conflicts with kube_node group".format(host)) "conflicts with kube-node group".format(host))
continue continue
self.add_host_to_group('calico_rr', host) self.add_host_to_group('calico-rr', host)
def set_kube_node(self, hosts): def set_kube_node(self, hosts):
for host in hosts: for host in hosts:
if len(self.yaml_config['all']['hosts']) >= SCALE_THRESHOLD: if len(self.yaml_config['all']['hosts']) >= SCALE_THRESHOLD:
if host in self.yaml_config['all']['children']['etcd']['hosts']: # noqa if host in self.yaml_config['all']['children']['etcd']['hosts']: # noqa
self.debug("Not adding {0} to kube_node group because of " self.debug("Not adding {0} to kube-node group because of "
"scale deployment and host is in etcd " "scale deployment and host is in etcd "
"group.".format(host)) "group.".format(host))
continue continue
if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa
if host in self.yaml_config['all']['children']['kube_control_plane']['hosts']: # noqa if host in self.yaml_config['all']['children']['kube-master']['hosts']: # noqa
self.debug("Not adding {0} to kube_node group because of " self.debug("Not adding {0} to kube-node group because of "
"scale deployment and host is in " "scale deployment and host is in kube-master "
"kube_control_plane group.".format(host)) "group.".format(host))
continue continue
self.add_host_to_group('kube_node', host) self.add_host_to_group('kube-node', host)
def set_etcd(self, hosts): def set_etcd(self, hosts):
for host in hosts: for host in hosts:
@@ -407,9 +393,8 @@ Configurable env vars:
DEBUG Enable debug printing. Default: True DEBUG Enable debug printing. Default: True
CONFIG_FILE File to write config to Default: ./inventory/sample/hosts.yaml CONFIG_FILE File to write config to Default: ./inventory/sample/hosts.yaml
HOST_PREFIX Host prefix for generated hosts. Default: node HOST_PREFIX Host prefix for generated hosts. Default: node
KUBE_CONTROL_HOSTS Set the number of kube-control-planes. Default: 2
SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50 SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50
MASSIVE_SCALE_THRESHOLD Separate K8s control-plane and ETCD if # of nodes >= 200 MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
''' # noqa ''' # noqa
print(help_text) print(help_text)

View File

@@ -13,8 +13,8 @@
# under the License. # under the License.
import inventory import inventory
import mock
import unittest import unittest
from unittest import mock
from collections import OrderedDict from collections import OrderedDict
import sys import sys
@@ -51,7 +51,7 @@ class TestInventory(unittest.TestCase):
groups = ['group1', 'group2'] groups = ['group1', 'group2']
self.inv.ensure_required_groups(groups) self.inv.ensure_required_groups(groups)
for group in groups: for group in groups:
self.assertIn(group, self.inv.yaml_config['all']['children']) self.assertTrue(group in self.inv.yaml_config['all']['children'])
def test_get_host_id(self): def test_get_host_id(self):
hostnames = ['node99', 'no99de01', '01node01', 'node1.domain', hostnames = ['node99', 'no99de01', '01node01', 'node1.domain',
@@ -209,8 +209,8 @@ class TestInventory(unittest.TestCase):
('doesnotbelong2', {'whateveropts=ilike'})]) ('doesnotbelong2', {'whateveropts=ilike'})])
self.inv.yaml_config['all']['hosts'] = existing_hosts self.inv.yaml_config['all']['hosts'] = existing_hosts
self.inv.purge_invalid_hosts(proper_hostnames) self.inv.purge_invalid_hosts(proper_hostnames)
self.assertNotIn( self.assertTrue(
bad_host, self.inv.yaml_config['all']['hosts'].keys()) bad_host not in self.inv.yaml_config['all']['hosts'].keys())
def test_add_host_to_group(self): def test_add_host_to_group(self):
group = 'etcd' group = 'etcd'
@@ -222,13 +222,13 @@ class TestInventory(unittest.TestCase):
self.inv.yaml_config['all']['children'][group]['hosts'].get(host), self.inv.yaml_config['all']['children'][group]['hosts'].get(host),
None) None)
def test_set_kube_control_plane(self): def test_set_kube_master(self):
group = 'kube_control_plane' group = 'kube-master'
host = 'node1' host = 'node1'
self.inv.set_kube_control_plane([host]) self.inv.set_kube_master([host])
self.assertIn( self.assertTrue(
host, self.inv.yaml_config['all']['children'][group]['hosts']) host in self.inv.yaml_config['all']['children'][group]['hosts'])
def test_set_all(self): def test_set_all(self):
hosts = OrderedDict([ hosts = OrderedDict([
@@ -241,30 +241,30 @@ class TestInventory(unittest.TestCase):
self.inv.yaml_config['all']['hosts'].get(host), opt) self.inv.yaml_config['all']['hosts'].get(host), opt)
def test_set_k8s_cluster(self): def test_set_k8s_cluster(self):
group = 'k8s_cluster' group = 'k8s-cluster'
expected_hosts = ['kube_node', 'kube_control_plane'] expected_hosts = ['kube-node', 'kube-master']
self.inv.set_k8s_cluster() self.inv.set_k8s_cluster()
for host in expected_hosts: for host in expected_hosts:
self.assertIn( self.assertTrue(
host, host in
self.inv.yaml_config['all']['children'][group]['children']) self.inv.yaml_config['all']['children'][group]['children'])
def test_set_kube_node(self): def test_set_kube_node(self):
group = 'kube_node' group = 'kube-node'
host = 'node1' host = 'node1'
self.inv.set_kube_node([host]) self.inv.set_kube_node([host])
self.assertIn( self.assertTrue(
host, self.inv.yaml_config['all']['children'][group]['hosts']) host in self.inv.yaml_config['all']['children'][group]['hosts'])
def test_set_etcd(self): def test_set_etcd(self):
group = 'etcd' group = 'etcd'
host = 'node1' host = 'node1'
self.inv.set_etcd([host]) self.inv.set_etcd([host])
self.assertIn( self.assertTrue(
host, self.inv.yaml_config['all']['children'][group]['hosts']) host in self.inv.yaml_config['all']['children'][group]['hosts'])
def test_scale_scenario_one(self): def test_scale_scenario_one(self):
num_nodes = 50 num_nodes = 50
@@ -275,12 +275,12 @@ class TestInventory(unittest.TestCase):
self.inv.set_all(hosts) self.inv.set_all(hosts)
self.inv.set_etcd(list(hosts.keys())[0:3]) self.inv.set_etcd(list(hosts.keys())[0:3])
self.inv.set_kube_control_plane(list(hosts.keys())[0:2]) self.inv.set_kube_master(list(hosts.keys())[0:2])
self.inv.set_kube_node(hosts.keys()) self.inv.set_kube_node(hosts.keys())
for h in range(3): for h in range(3):
self.assertFalse( self.assertFalse(
list(hosts.keys())[h] in list(hosts.keys())[h] in
self.inv.yaml_config['all']['children']['kube_node']['hosts']) self.inv.yaml_config['all']['children']['kube-node']['hosts'])
def test_scale_scenario_two(self): def test_scale_scenario_two(self):
num_nodes = 500 num_nodes = 500
@@ -291,12 +291,12 @@ class TestInventory(unittest.TestCase):
self.inv.set_all(hosts) self.inv.set_all(hosts)
self.inv.set_etcd(list(hosts.keys())[0:3]) self.inv.set_etcd(list(hosts.keys())[0:3])
self.inv.set_kube_control_plane(list(hosts.keys())[3:5]) self.inv.set_kube_master(list(hosts.keys())[3:5])
self.inv.set_kube_node(hosts.keys()) self.inv.set_kube_node(hosts.keys())
for h in range(5): for h in range(5):
self.assertFalse( self.assertFalse(
list(hosts.keys())[h] in list(hosts.keys())[h] in
self.inv.yaml_config['all']['children']['kube_node']['hosts']) self.inv.yaml_config['all']['children']['kube-node']['hosts'])
def test_range2ips_range(self): def test_range2ips_range(self):
changed_hosts = ['10.90.0.2', '10.90.0.4-10.90.0.6', '10.90.0.8'] changed_hosts = ['10.90.0.2', '10.90.0.4-10.90.0.6', '10.90.0.8']

View File

@@ -5,7 +5,7 @@ deployment on VMs.
This playbook does not create Virtual Machines, nor does it run Kubespray itself. This playbook does not create Virtual Machines, nor does it run Kubespray itself.
## User creation ### User creation
If you want to create a user for running Kubespray deployment, you should specify If you want to create a user for running Kubespray deployment, you should specify
both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`. both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`.

12
contrib/metallb/README.md Normal file
View File

@@ -0,0 +1,12 @@
# Deploy MetalLB into Kubespray/Kubernetes
```
MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation. In short, it allows you to create Kubernetes services of type “LoadBalancer” in clusters that dont run on a cloud provider, and thus cannot simply hook into paid products to provide load-balancers.
```
This playbook aims to automate [this](https://metallb.universe.tf/concepts/layer2/). It deploys MetalLB into kubernetes and sets up a layer 2 loadbalancer.
## Install
```
Defaults can be found in contrib/metallb/roles/provision/defaults/main.yml. You can override the defaults by copying the contents of this file to somewhere in inventory/mycluster/group_vars such as inventory/mycluster/groups_vars/k8s-cluster/addons.yml and making any adjustments as required.
ansible-playbook --ask-become -i inventory/sample/hosts.ini contrib/metallb/metallb.yml
```

1
contrib/metallb/library Symbolic link
View File

@@ -0,0 +1 @@
../../library

View File

@@ -0,0 +1,12 @@
---
- hosts: bastion[0]
gather_facts: False
roles:
- { role: kubespray-defaults}
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
- hosts: kube-master[0]
tags:
- "provision"
roles:
- { role: kubespray-defaults}
- { role: provision }

View File

@@ -0,0 +1,16 @@
---
metallb:
ip_range:
- "10.5.0.50-10.5.0.99"
protocol: "layer2"
# additional_address_pools:
# kube_service_pool:
# ip_range:
# - 10.5.1.50-10.5.1.99"
# protocol: "layer2"
# auto_assign: false
limits:
cpu: "100m"
memory: "100Mi"
port: "7472"
version: v0.7.3

View File

@@ -0,0 +1,40 @@
---
- name: "Kubernetes Apps | Check cluster settings for MetalLB"
fail:
msg: "MetalLB require kube_proxy_strict_arp = true, see https://github.com/danderson/metallb/issues/153#issuecomment-518651132"
when:
- "kube_proxy_mode == 'ipvs' and not kube_proxy_strict_arp"
- name: Kubernetes Apps | Check AppArmor status
command: which apparmor_parser
register: apparmor_status
when:
- podsecuritypolicy_enabled
- inventory_hostname == groups['kube-master'][0]
failed_when: false
- name: Kubernetes Apps | Set apparmor_enabled
set_fact:
apparmor_enabled: "{{ apparmor_status.rc == 0 }}"
when:
- podsecuritypolicy_enabled
- inventory_hostname == groups['kube-master'][0]
- name: "Kubernetes Apps | Lay Down MetalLB"
become: true
template: { src: "{{ item }}.j2", dest: "{{ kube_config_dir }}/{{ item }}" }
with_items: ["metallb.yml", "metallb-config.yml"]
register: "rendering"
when:
- "inventory_hostname == groups['kube-master'][0]"
- name: "Kubernetes Apps | Install and configure MetalLB"
kube:
name: "MetalLB"
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/{{ item.item }}"
state: "{{ item.changed | ternary('latest','present') }}"
become: true
with_items: "{{ rendering.results }}"
when:
- "inventory_hostname == groups['kube-master'][0]"

View File

@@ -0,0 +1,25 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
config: |
address-pools:
- name: loadbalanced
protocol: {{ metallb.protocol }}
addresses:
{% for ip_range in metallb.ip_range %}
- {{ ip_range }}
{% endfor %}
{% if metallb.additional_address_pools is defined %}{% for pool in metallb.additional_address_pools %}
- name: {{ pool }}
protocol: {{ metallb.additional_address_pools[pool].protocol }}
addresses:
{% for ip_range in metallb.additional_address_pools[pool].ip_range %}
- {{ ip_range }}
{% endfor %}
auto-assign: {{ metallb.additional_address_pools[pool].auto_assign }}
{% endfor %}
{% endif %}

View File

@@ -0,0 +1,263 @@
apiVersion: v1
kind: Namespace
metadata:
name: metallb-system
labels:
app: metallb
---
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: metallb-system
name: controller
labels:
app: metallb
---
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: metallb-system
name: speaker
labels:
app: metallb
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: metallb-system:controller
labels:
app: metallb
rules:
- apiGroups: [""]
resources: ["services"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["services/status"]
verbs: ["update"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: metallb-system:speaker
labels:
app: metallb
rules:
- apiGroups: [""]
resources: ["services", "endpoints", "nodes"]
verbs: ["get", "list", "watch"]
{% if podsecuritypolicy_enabled %}
- apiGroups: ["policy"]
resourceNames: ["metallb"]
resources: ["podsecuritypolicies"]
verbs: ["use"]
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: metallb
annotations:
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
{% if apparmor_enabled %}
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
{% endif %}
labels:
app: metallb
spec:
privileged: true
allowPrivilegeEscalation: false
allowedCapabilities:
- net_raw
volumes:
- secret
hostNetwork: true
hostPorts:
- min: {{ metallb.port }}
max: {{ metallb.port }}
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
readOnlyRootFilesystem: true
{% endif %}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
namespace: metallb-system
name: config-watcher
labels:
app: metallb
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create"]
---
## Role bindings
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: metallb-system:controller
labels:
app: metallb
subjects:
- kind: ServiceAccount
name: controller
namespace: metallb-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metallb-system:controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: metallb-system:speaker
labels:
app: metallb
subjects:
- kind: ServiceAccount
name: speaker
namespace: metallb-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metallb-system:speaker
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
namespace: metallb-system
name: config-watcher
labels:
app: metallb
subjects:
- kind: ServiceAccount
name: controller
- kind: ServiceAccount
name: speaker
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: config-watcher
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
namespace: metallb-system
name: speaker
labels:
app: metallb
component: speaker
spec:
selector:
matchLabels:
app: metallb
component: speaker
template:
metadata:
labels:
app: metallb
component: speaker
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "{{ metallb.port }}"
spec:
serviceAccountName: speaker
terminationGracePeriodSeconds: 0
hostNetwork: true
containers:
- name: speaker
image: metallb/speaker:{{ metallb.version }}
imagePullPolicy: IfNotPresent
args:
- --port={{ metallb.port }}
- --config=config
env:
- name: METALLB_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
ports:
- name: monitoring
containerPort: {{ metallb.port }}
resources:
limits:
cpu: {{ metallb.limits.cpu }}
memory: {{ metallb.limits.memory }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- all
add:
- net_raw
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: metallb-system
name: controller
labels:
app: metallb
component: controller
spec:
revisionHistoryLimit: 3
selector:
matchLabels:
app: metallb
component: controller
template:
metadata:
labels:
app: metallb
component: controller
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "{{ metallb.port }}"
spec:
serviceAccountName: controller
terminationGracePeriodSeconds: 0
securityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody
containers:
- name: controller
image: metallb/controller:{{ metallb.version }}
imagePullPolicy: IfNotPresent
args:
- --port={{ metallb.port }}
- --config=config
ports:
- name: monitoring
containerPort: {{ metallb.port }}
resources:
limits:
cpu: {{ metallb.limits.cpu }}
memory: {{ metallb.limits.memory }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- all
readOnlyRootFilesystem: true
---

View File

@@ -8,19 +8,19 @@ In the same directory of this ReadMe file you should find a file named `inventor
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/sample/k8s_gfs_inventory`. Make sure that the settings on `inventory/sample/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu): Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/sample/k8s_gfs_inventory`. Make sure that the settings on `inventory/sample/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
```shell ```
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./cluster.yml ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./cluster.yml
``` ```
This will provision your Kubernetes cluster. Then, to provision and configure the GlusterFS cluster, from the same directory execute: This will provision your Kubernetes cluster. Then, to provision and configure the GlusterFS cluster, from the same directory execute:
```shell ```
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml
``` ```
If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=<correct-user>` variable in the inventory file that you just created, for each machine/VM: If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=<correct-user>` variable in the inventory file that you just created, for each machine/VM:
```shell ```
k8s-master-1 ansible_ssh_host=192.168.0.147 ip=192.168.0.147 ansible_ssh_user=core k8s-master-1 ansible_ssh_host=192.168.0.147 ip=192.168.0.147 ansible_ssh_user=core
k8s-master-node-1 ansible_ssh_host=192.168.0.148 ip=192.168.0.148 ansible_ssh_user=core k8s-master-node-1 ansible_ssh_host=192.168.0.148 ip=192.168.0.148 ansible_ssh_user=core
k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_user=core k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_user=core
@@ -30,7 +30,7 @@ k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_us
First step is to fill in a `my-kubespray-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like: First step is to fill in a `my-kubespray-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
```ini ```
cluster_name = "cluster1" cluster_name = "cluster1"
number_of_k8s_masters = "1" number_of_k8s_masters = "1"
number_of_k8s_masters_no_floating_ip = "2" number_of_k8s_masters_no_floating_ip = "2"
@@ -39,7 +39,7 @@ number_of_k8s_nodes = "0"
public_key_path = "~/.ssh/my-desired-key.pub" public_key_path = "~/.ssh/my-desired-key.pub"
image = "Ubuntu 16.04" image = "Ubuntu 16.04"
ssh_user = "ubuntu" ssh_user = "ubuntu"
flavor_k8s_node = "node-flavor-id-in-your-openstack" flavor_k8s_node = "node-flavor-id-in-your-openstack"
flavor_k8s_master = "master-flavor-id-in-your-openstack" flavor_k8s_master = "master-flavor-id-in-your-openstack"
network_name = "k8s-network" network_name = "k8s-network"
floatingip_pool = "net_external" floatingip_pool = "net_external"
@@ -54,7 +54,7 @@ ssh_user_gfs = "ubuntu"
As explained in the general terraform/openstack guide, you need to source your OpenStack credentials file, add your ssh-key to the ssh-agent and setup environment variables for terraform: As explained in the general terraform/openstack guide, you need to source your OpenStack credentials file, add your ssh-key to the ssh-agent and setup environment variables for terraform:
```shell ```
$ source ~/.stackrc $ source ~/.stackrc
$ eval $(ssh-agent -s) $ eval $(ssh-agent -s)
$ ssh-add ~/.ssh/my-desired-key $ ssh-add ~/.ssh/my-desired-key
@@ -67,7 +67,7 @@ $ echo Setting up Terraform creds && \
Then, standing on the kubespray directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster: Then, standing on the kubespray directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
```shell ```
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
``` ```
@@ -75,18 +75,18 @@ This will create both your Kubernetes and Gluster VMs. Make sure that the ansibl
Then, provision your Kubernetes (kubespray) cluster with the following ansible call: Then, provision your Kubernetes (kubespray) cluster with the following ansible call:
```shell ```
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml
``` ```
Finally, provision the glusterfs nodes and add the Persistent Volume setup for GlusterFS in Kubernetes through the following ansible call: Finally, provision the glusterfs nodes and add the Persistent Volume setup for GlusterFS in Kubernetes through the following ansible call:
```shell ```
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./contrib/network-storage/glusterfs/glusterfs.yml ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
``` ```
If you need to destroy the cluster, you can run: If you need to destroy the cluster, you can run:
```shell ```
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
``` ```

View File

@@ -15,10 +15,10 @@
roles: roles:
- { role: glusterfs/server } - { role: glusterfs/server }
- hosts: k8s_cluster - hosts: k8s-cluster
roles: roles:
- { role: glusterfs/client } - { role: glusterfs/client }
- hosts: kube_control_plane[0] - hosts: kube-master[0]
roles: roles:
- { role: kubernetes-pv } - { role: kubernetes-pv }

View File

@@ -14,7 +14,7 @@
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8 # gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9 # gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
# [kube_control_plane] # [kube-master]
# node1 # node1
# node2 # node2
@@ -23,16 +23,16 @@
# node2 # node2
# node3 # node3
# [kube_node] # [kube-node]
# node2 # node2
# node3 # node3
# node4 # node4
# node5 # node5
# node6 # node6
# [k8s_cluster:children] # [k8s-cluster:children]
# kube_node # kube-node
# kube_control_plane # kube-master
# [gfs-cluster] # [gfs-cluster]
# gfs_node1 # gfs_node1

View File

@@ -8,7 +8,7 @@ Installs and configures GlusterFS on Linux.
For GlusterFS to connect between servers, TCP ports `24007`, `24008`, and `24009`/`49152`+ (that port, plus an additional incremented port for each additional server in the cluster; the latter if GlusterFS is version 3.4+), and TCP/UDP port `111` must be open. You can open these using whatever firewall you wish (this can easily be configured using the `geerlingguy.firewall` role). For GlusterFS to connect between servers, TCP ports `24007`, `24008`, and `24009`/`49152`+ (that port, plus an additional incremented port for each additional server in the cluster; the latter if GlusterFS is version 3.4+), and TCP/UDP port `111` must be open. You can open these using whatever firewall you wish (this can easily be configured using the `geerlingguy.firewall` role).
This role performs basic installation and setup of Gluster, but it does not configure or mount bricks (volumes), since that step is easier to do in a series of plays in your own playbook. Ansible 1.9+ includes the [`gluster_volume`](https://docs.ansible.com/ansible/latest/collections/gluster/gluster/gluster_volume_module.html) module to ease the management of Gluster volumes. This role performs basic installation and setup of Gluster, but it does not configure or mount bricks (volumes), since that step is easier to do in a series of plays in your own playbook. Ansible 1.9+ includes the [`gluster_volume`](https://docs.ansible.com/gluster_volume_module.html) module to ease the management of Gluster volumes.
## Role Variables ## Role Variables

View File

@@ -7,7 +7,7 @@
register: glusterfs_ppa_added register: glusterfs_ppa_added
when: glusterfs_ppa_use when: glusterfs_ppa_use
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa 503 - name: Ensure GlusterFS client will reinstall if the PPA was just added.
apt: apt:
name: "{{ item }}" name: "{{ item }}"
state: absent state: absent
@@ -18,7 +18,7 @@
- name: Ensure GlusterFS client is installed. - name: Ensure GlusterFS client is installed.
apt: apt:
name: "{{ item }}" name: "{{ item }}"
state: present state: installed
default_release: "{{ glusterfs_default_release }}" default_release: "{{ glusterfs_default_release }}"
with_items: with_items:
- glusterfs-client - glusterfs-client

View File

@@ -7,7 +7,7 @@
register: glusterfs_ppa_added register: glusterfs_ppa_added
when: glusterfs_ppa_use when: glusterfs_ppa_use
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa 503 - name: Ensure GlusterFS will reinstall if the PPA was just added.
apt: apt:
name: "{{ item }}" name: "{{ item }}"
state: absent state: absent
@@ -19,7 +19,7 @@
- name: Ensure GlusterFS is installed. - name: Ensure GlusterFS is installed.
apt: apt:
name: "{{ item }}" name: "{{ item }}"
state: present state: installed
default_release: "{{ glusterfs_default_release }}" default_release: "{{ glusterfs_default_release }}"
with_items: with_items:
- glusterfs-server - glusterfs-server

View File

@@ -8,7 +8,7 @@
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml} - { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json} - { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
register: gluster_pv register: gluster_pv
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
- name: Kubernetes Apps | Set GlusterFS endpoint and PV - name: Kubernetes Apps | Set GlusterFS endpoint and PV
kube: kube:
@@ -19,4 +19,4 @@
filename: "{{ kube_config_dir }}/{{ item.item.dest }}" filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
state: "{{ item.changed | ternary('latest','present') }}" state: "{{ item.changed | ternary('latest','present') }}"
with_items: "{{ gluster_pv.results }}" with_items: "{{ gluster_pv.results }}"
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined

View File

@@ -8,7 +8,7 @@
{% for host in groups['gfs-cluster'] %} {% for host in groups['gfs-cluster'] %}
{ {
"addresses": [ "addresses": [
{ {
"ip": "{{hostvars[host]['ip']|default(hostvars[host].ansible_default_ipv4['address'])}}" "ip": "{{hostvars[host]['ip']|default(hostvars[host].ansible_default_ipv4['address'])}}"
} }
], ],

View File

@@ -1,7 +1,7 @@
apiVersion: v1 apiVersion: v1
kind: PersistentVolume kind: PersistentVolume
metadata: metadata:
name: glusterfs name: glusterfs
spec: spec:
capacity: capacity:
storage: "{{ hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb }}Gi" storage: "{{ hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb }}Gi"

View File

@@ -1,26 +1,17 @@
# Deploy Heketi/Glusterfs into Kubespray/Kubernetes # Deploy Heketi/Glusterfs into Kubespray/Kubernetes
This playbook aims to automate [this](https://github.com/heketi/heketi/blob/master/docs/admin/install-kubernetes.md) tutorial. It deploys heketi/glusterfs into kubernetes and sets up a storageclass. This playbook aims to automate [this](https://github.com/heketi/heketi/blob/master/docs/admin/install-kubernetes.md) tutorial. It deploys heketi/glusterfs into kubernetes and sets up a storageclass.
## Important notice
> Due to resource limits on the current project maintainers and general lack of contributions we are considering placing Heketi into a [near-maintenance mode](https://github.com/heketi/heketi#important-notice)
## Client Setup ## Client Setup
Heketi provides a CLI that provides users with a means to administer the deployment and configuration of GlusterFS in Kubernetes. [Download and install the heketi-cli](https://github.com/heketi/heketi/releases) on your client machine. Heketi provides a CLI that provides users with a means to administer the deployment and configuration of GlusterFS in Kubernetes. [Download and install the heketi-cli](https://github.com/heketi/heketi/releases) on your client machine.
## Install ## Install
Copy the inventory.yml.sample over to inventory/sample/k8s_heketi_inventory.yml and change it according to your setup. Copy the inventory.yml.sample over to inventory/sample/k8s_heketi_inventory.yml and change it according to your setup.
```
```shell
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi.yml ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi.yml
``` ```
## Tear down ## Tear down
```
```shell
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml
``` ```

View File

@@ -1,5 +1,5 @@
--- ---
- hosts: kube_control_plane[0] - hosts: kube-master[0]
roles: roles:
- { role: tear-down } - { role: tear-down }

View File

@@ -3,7 +3,7 @@
roles: roles:
- { role: prepare } - { role: prepare }
- hosts: kube_control_plane[0] - hosts: kube-master[0]
tags: tags:
- "provision" - "provision"
roles: roles:

View File

@@ -3,17 +3,17 @@ all:
heketi_admin_key: "11elfeinhundertundelf" heketi_admin_key: "11elfeinhundertundelf"
heketi_user_key: "!!einseinseins" heketi_user_key: "!!einseinseins"
children: children:
k8s_cluster: k8s-cluster:
vars: vars:
kubelet_fail_swap_on: false kubelet_fail_swap_on: false
children: children:
kube_control_plane: kube-master:
hosts: hosts:
node1: node1:
etcd: etcd:
hosts: hosts:
node2: node2:
kube_node: kube-node:
hosts: &kube_nodes hosts: &kube_nodes
node1: node1:
node2: node2:

View File

@@ -6,7 +6,7 @@
- name: "Delete bootstrap Heketi." - name: "Delete bootstrap Heketi."
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\"" command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0" when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0"
- name: "Ensure there is nothing left over." # noqa 301 - name: "Ensure there is nothing left over."
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json" command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
register: "heketi_result" register: "heketi_result"
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0" until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"

View File

@@ -13,7 +13,7 @@
- name: "Copy topology configuration into container." - name: "Copy topology configuration into container."
changed_when: false changed_when: false
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json" command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
- name: "Load heketi topology." # noqa 503 - name: "Load heketi topology."
when: "render.changed" when: "render.changed"
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json" command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
register: "load_heketi" register: "load_heketi"

View File

@@ -18,7 +18,7 @@
- name: "Provision database volume." - name: "Provision database volume."
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage" command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
when: "heketi_database_volume_exists is undefined" when: "heketi_database_volume_exists is undefined"
- name: "Copy configuration from pod." # noqa 301 - name: "Copy configuration from pod."
become: true become: true
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json" command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
- name: "Get heketi volume ids." - name: "Get heketi volume ids."

View File

@@ -10,10 +10,10 @@
template: template:
src: "topology.json.j2" src: "topology.json.j2"
dest: "{{ kube_config_dir }}/topology.json" dest: "{{ kube_config_dir }}/topology.json"
- name: "Copy topology configuration into container." # noqa 503 - name: "Copy topology configuration into container."
when: "rendering.changed" when: "rendering.changed"
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json" command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
- name: "Load heketi topology." # noqa 503 - name: "Load heketi topology."
when: "rendering.changed" when: "rendering.changed"
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json" command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
- name: "Get heketi topology." - name: "Get heketi topology."

View File

@@ -22,7 +22,7 @@
ignore_errors: true ignore_errors: true
changed_when: false changed_when: false
- name: "Remove volume groups." # noqa 301 - name: "Remove volume groups."
environment: environment:
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
become: true become: true
@@ -30,7 +30,7 @@
with_items: "{{ volume_groups.stdout_lines }}" with_items: "{{ volume_groups.stdout_lines }}"
loop_control: { loop_var: "volume_group" } loop_control: { loop_var: "volume_group" }
- name: "Remove physical volume from cluster disks." # noqa 301 - name: "Remove physical volume from cluster disks."
environment: environment:
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
become: true become: true

View File

@@ -1,43 +1,43 @@
--- ---
- name: "Remove storage class." # noqa 301 - name: "Remove storage class."
command: "{{ bin_dir }}/kubectl delete storageclass gluster" command: "{{ bin_dir }}/kubectl delete storageclass gluster"
ignore_errors: true ignore_errors: true
- name: "Tear down heketi." # noqa 301 - name: "Tear down heketi."
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\"" command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
ignore_errors: true ignore_errors: true
- name: "Tear down heketi." # noqa 301 - name: "Tear down heketi."
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\"" command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
ignore_errors: true ignore_errors: true
- name: "Tear down bootstrap." - name: "Tear down bootstrap."
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml" include_tasks: "../provision/tasks/bootstrap/tear-down.yml"
- name: "Ensure there is nothing left over." # noqa 301 - name: "Ensure there is nothing left over."
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json" command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
register: "heketi_result" register: "heketi_result"
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0" until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
retries: 60 retries: 60
delay: 5 delay: 5
- name: "Ensure there is nothing left over." # noqa 301 - name: "Ensure there is nothing left over."
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json" command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
register: "heketi_result" register: "heketi_result"
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0" until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
retries: 60 retries: 60
delay: 5 delay: 5
- name: "Tear down glusterfs." # noqa 301 - name: "Tear down glusterfs."
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs" command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
ignore_errors: true ignore_errors: true
- name: "Remove heketi storage service." # noqa 301 - name: "Remove heketi storage service."
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints" command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
ignore_errors: true ignore_errors: true
- name: "Remove heketi gluster role binding" # noqa 301 - name: "Remove heketi gluster role binding"
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin" command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
ignore_errors: true ignore_errors: true
- name: "Remove heketi config secret" # noqa 301 - name: "Remove heketi config secret"
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret" command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
ignore_errors: true ignore_errors: true
- name: "Remove heketi db backup" # noqa 301 - name: "Remove heketi db backup"
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup" command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
ignore_errors: true ignore_errors: true
- name: "Remove heketi service account" # noqa 301 - name: "Remove heketi service account"
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account" command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
ignore_errors: true ignore_errors: true
- name: "Get secrets" - name: "Get secrets"

View File

@@ -1,43 +0,0 @@
# Offline deployment
## manage-offline-container-images.sh
Container image collecting script for offline deployment
This script has two features:
(1) Get container images from an environment which is deployed online.
(2) Deploy local container registry and register the container images to the registry.
Step(1) should be done online site as a preparation, then we bring the gotten images
to the target offline environment.
Then we will run step(2) for registering the images to local registry.
Step(1) can be operated with:
```shell
manage-offline-container-images.sh create
```
Step(2) can be operated with:
```shell
manage-offline-container-images.sh register
```
## generate_list.sh
This script generates the list of downloaded files and the list of container images by `roles/download/defaults/main.yml` file.
Run this script will generates three files, all downloaded files url in files.list, all container images in images.list, all component version in generate.sh.
```shell
bash generate_list.sh
tree temp
temp
├── files.list
├── generate.sh
└── images.list
0 directories, 3 files
```
In some cases you may want to update some component version, you can edit `generate.sh` file, then run `bash generate.sh | grep 'https' > files.list` to update file.list or run `bash generate.sh | grep -v 'https'> images.list` to update images.list.

View File

@@ -1 +0,0 @@
{ "insecure-registries":["HOSTNAME:5000"] }

View File

@@ -1,57 +0,0 @@
#!/bin/bash
set -eo pipefail
CURRENT_DIR=$(cd $(dirname $0); pwd)
TEMP_DIR="${CURRENT_DIR}/temp"
REPO_ROOT_DIR="${CURRENT_DIR%/contrib/offline}"
: ${IMAGE_ARCH:="amd64"}
: ${ANSIBLE_SYSTEM:="linux"}
: ${ANSIBLE_ARCHITECTURE:="x86_64"}
: ${DOWNLOAD_YML:="roles/download/defaults/main.yml"}
: ${KUBE_VERSION_YAML:="roles/kubespray-defaults/defaults/main.yaml"}
mkdir -p ${TEMP_DIR}
# ARCH used in convert {%- if image_arch != 'amd64' -%}-{{ image_arch }}{%- endif -%} to {{arch}}
if [ "${IMAGE_ARCH}" != "amd64" ]; then ARCH="${IMAGE_ARCH}"; fi
cat > ${TEMP_DIR}/generate.sh << EOF
arch=${ARCH}
image_arch=${IMAGE_ARCH}
ansible_system=${ANSIBLE_SYSTEM}
ansible_architecture=${ANSIBLE_ARCHITECTURE}
EOF
# generate all component version by $DOWNLOAD_YML
grep 'kube_version:' ${REPO_ROOT_DIR}/${KUBE_VERSION_YAML} \
| sed 's/: /=/g' >> ${TEMP_DIR}/generate.sh
grep '_version:' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
| sed 's/: /=/g;s/{{/${/g;s/}}/}/g' | tr -d ' ' >> ${TEMP_DIR}/generate.sh
sed -i 's/kube_major_version=.*/kube_major_version=${kube_version%.*}/g' ${TEMP_DIR}/generate.sh
sed -i 's/crictl_version=.*/crictl_version=${kube_version%.*}.0/g' ${TEMP_DIR}/generate.sh
# generate all download files url
grep 'download_url:' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
| sed 's/: /=/g;s/ //g;s/{{/${/g;s/}}/}/g;s/|lower//g;s/^.*_url=/echo /g' >> ${TEMP_DIR}/generate.sh
# generate all images list
grep -E '_repo:|_tag:' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
| sed "s#{%- if image_arch != 'amd64' -%}-{{ image_arch }}{%- endif -%}#{{arch}}#g" \
| sed 's/: /=/g;s/{{/${/g;s/}}/}/g' | tr -d ' ' >> ${TEMP_DIR}/generate.sh
sed -n '/^downloads:/,/download_defaults:/p' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
| sed -n "s/repo: //p;s/tag: //p" | tr -d ' ' | sed 's/{{/${/g;s/}}/}/g' \
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/^/echo /g' >> ${TEMP_DIR}/generate.sh
# special handling for https://github.com/kubernetes-sigs/kubespray/pull/7570
sed -i 's#^coredns_image_repo=.*#coredns_image_repo=${kube_image_repo}$(if printf "%s\\n%s\\n" v1.21 ${kube_version%.*} | sort --check=quiet --version-sort; then echo -n /coredns/coredns;else echo -n /coredns; fi)#' ${TEMP_DIR}/generate.sh
sed -i 's#^coredns_image_tag=.*#coredns_image_tag=$(if printf "%s\\n%s\\n" v1.21 ${kube_version%.*} | sort --check=quiet --version-sort; then echo -n ${coredns_version};else echo -n ${coredns_version/v/}; fi)#' ${TEMP_DIR}/generate.sh
# add kube-* images to images list
KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy"
echo "${KUBE_IMAGES}" | tr ' ' '\n' | xargs -L1 -I {} \
echo 'echo ${kube_image_repo}/{}:${kube_version}' >> ${TEMP_DIR}/generate.sh
# print files.list and images.list
bash ${TEMP_DIR}/generate.sh | grep 'https' | sort > ${TEMP_DIR}/files.list
bash ${TEMP_DIR}/generate.sh | grep -v 'https' | sort > ${TEMP_DIR}/images.list

View File

@@ -1,150 +0,0 @@
#!/bin/bash
OPTION=$1
CURRENT_DIR=$(cd $(dirname $0); pwd)
TEMP_DIR="${CURRENT_DIR}/temp"
IMAGE_TAR_FILE="${CURRENT_DIR}/container-images.tar.gz"
IMAGE_DIR="${CURRENT_DIR}/container-images"
IMAGE_LIST="${IMAGE_DIR}/container-images.txt"
RETRY_COUNT=5
function create_container_image_tar() {
set -e
IMAGES=$(kubectl describe pods --all-namespaces | grep " Image:" | awk '{print $2}' | sort | uniq)
# NOTE: etcd and pause cannot be seen as pods.
# The pause image is used for --pod-infra-container-image option of kubelet.
EXT_IMAGES=$(kubectl cluster-info dump | egrep "quay.io/coreos/etcd:|k8s.gcr.io/pause:" | sed s@\"@@g)
IMAGES="${IMAGES} ${EXT_IMAGES}"
rm -f ${IMAGE_TAR_FILE}
rm -rf ${IMAGE_DIR}
mkdir ${IMAGE_DIR}
cd ${IMAGE_DIR}
sudo docker pull registry:latest
sudo docker save -o registry-latest.tar registry:latest
for image in ${IMAGES}
do
FILE_NAME="$(echo ${image} | sed s@"/"@"-"@g | sed s/":"/"-"/g)".tar
set +e
for step in $(seq 1 ${RETRY_COUNT})
do
sudo docker pull ${image}
if [ $? -eq 0 ]; then
break
fi
echo "Failed to pull ${image} at step ${step}"
if [ ${step} -eq ${RETRY_COUNT} ]; then
exit 1
fi
done
set -e
sudo docker save -o ${FILE_NAME} ${image}
# NOTE: Here removes the following repo parts from each image
# so that these parts will be replaced with Kubespray.
# - kube_image_repo: "k8s.gcr.io"
# - gcr_image_repo: "gcr.io"
# - docker_image_repo: "docker.io"
# - quay_image_repo: "quay.io"
FIRST_PART=$(echo ${image} | awk -F"/" '{print $1}')
if [ "${FIRST_PART}" = "k8s.gcr.io" ] ||
[ "${FIRST_PART}" = "gcr.io" ] ||
[ "${FIRST_PART}" = "docker.io" ] ||
[ "${FIRST_PART}" = "quay.io" ]; then
image=$(echo ${image} | sed s@"${FIRST_PART}/"@@)
fi
echo "${FILE_NAME} ${image}" >> ${IMAGE_LIST}
done
cd ..
sudo chown ${USER} ${IMAGE_DIR}/*
tar -zcvf ${IMAGE_TAR_FILE} ./container-images
rm -rf ${IMAGE_DIR}
echo ""
echo "${IMAGE_TAR_FILE} is created to contain your container images."
echo "Please keep this file and bring it to your offline environment."
}
function register_container_images() {
if [ ! -f ${IMAGE_TAR_FILE} ]; then
echo "${IMAGE_TAR_FILE} should exist."
exit 1
fi
if [ ! -d ${TEMP_DIR} ]; then
mkdir ${TEMP_DIR}
fi
# To avoid "http: server gave http response to https client" error.
LOCALHOST_NAME=$(hostname)
if [ -d /etc/docker/ ]; then
set -e
# Ubuntu18.04, RHEL7/CentOS7
cp ${CURRENT_DIR}/docker-daemon.json ${TEMP_DIR}/docker-daemon.json
sed -i s@"HOSTNAME"@"${LOCALHOST_NAME}"@ ${TEMP_DIR}/docker-daemon.json
sudo cp ${TEMP_DIR}/docker-daemon.json /etc/docker/daemon.json
elif [ -d /etc/containers/ ]; then
set -e
# RHEL8/CentOS8
cp ${CURRENT_DIR}/registries.conf ${TEMP_DIR}/registries.conf
sed -i s@"HOSTNAME"@"${LOCALHOST_NAME}"@ ${TEMP_DIR}/registries.conf
sudo cp ${TEMP_DIR}/registries.conf /etc/containers/registries.conf
else
echo "docker package(docker-ce, etc.) should be installed"
exit 1
fi
tar -zxvf ${IMAGE_TAR_FILE}
sudo docker load -i ${IMAGE_DIR}/registry-latest.tar
sudo docker run --restart=always -d -p 5000:5000 --name registry registry:latest
set +e
set -e
while read -r line; do
file_name=$(echo ${line} | awk '{print $1}')
org_image=$(echo ${line} | awk '{print $2}')
new_image="${LOCALHOST_NAME}:5000/${org_image}"
image_id=$(tar -tf ${IMAGE_DIR}/${file_name} | grep "\.json" | grep -v manifest.json | sed s/"\.json"//)
sudo docker load -i ${IMAGE_DIR}/${file_name}
sudo docker tag ${image_id} ${new_image}
sudo docker push ${new_image}
done <<< "$(cat ${IMAGE_LIST})"
echo "Succeeded to register container images to local registry."
echo "Please specify ${LOCALHOST_NAME}:5000 for the following options in your inventry:"
echo "- kube_image_repo"
echo "- gcr_image_repo"
echo "- docker_image_repo"
echo "- quay_image_repo"
}
if [ "${OPTION}" == "create" ]; then
create_container_image_tar
elif [ "${OPTION}" == "register" ]; then
register_container_images
else
echo "This script has two features:"
echo "(1) Get container images from an environment which is deployed online."
echo "(2) Deploy local container registry and register the container images to the registry."
echo ""
echo "Step(1) should be done online site as a preparation, then we bring"
echo "the gotten images to the target offline environment."
echo "Then we will run step(2) for registering the images to local registry."
echo ""
echo "${IMAGE_TAR_FILE} is created to contain your container images."
echo "Please keep this file and bring it to your offline environment."
echo ""
echo "Step(1) can be operated with:"
echo " $ ./manage-offline-container-images.sh create"
echo ""
echo "Step(2) can be operated with:"
echo " $ ./manage-offline-container-images.sh register"
echo ""
echo "Please specify 'create' or 'register'."
echo ""
exit 1
fi

View File

@@ -1,8 +0,0 @@
[registries.search]
registries = ['registry.access.redhat.com', 'registry.redhat.io', 'docker.io']
[registries.insecure]
registries = ['HOSTNAME:5000']
[registries.block]
registries = []

View File

@@ -1,4 +0,0 @@
---
- hosts: all
roles:
- { role: prepare }

View File

@@ -1,2 +0,0 @@
---
disable_service_firewall: false

View File

@@ -1,23 +0,0 @@
---
- block:
- name: List services
service_facts:
- name: Disable service firewalld
systemd:
name: firewalld
state: stopped
enabled: no
when:
"'firewalld.service' in services"
- name: Disable service ufw
systemd:
name: ufw
state: stopped
enabled: no
when:
"'ufw.service' in services"
when:
- disable_service_firewall

View File

@@ -51,7 +51,7 @@ export SKIP_PIP_INSTALL=1
%doc %{_docdir}/%{name}/inventory/sample/hosts.ini %doc %{_docdir}/%{name}/inventory/sample/hosts.ini
%config %{_sysconfdir}/%{name}/ansible.cfg %config %{_sysconfdir}/%{name}/ansible.cfg
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/all.yml %config %{_sysconfdir}/%{name}/inventory/sample/group_vars/all.yml
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s_cluster.yml %config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s-cluster.yml
%license %{_docdir}/%{name}/LICENSE %license %{_docdir}/%{name}/LICENSE
%{python2_sitelib}/%{srcname}-%{release}-py%{python2_version}.egg-info %{python2_sitelib}/%{srcname}-%{release}-py%{python2_version}.egg-info
%{_datarootdir}/%{name}/roles/ %{_datarootdir}/%{name}/roles/

View File

@@ -1,64 +1,55 @@
# Kubernetes on AWS with Terraform ## Kubernetes on AWS with Terraform
## Overview **Overview:**
This project will create: This project will create:
* VPC with Public and Private Subnets in # Availability Zones
* Bastion Hosts and NAT Gateways in the Public Subnet
* A dynamic number of masters, etcd, and worker nodes in the Private Subnet
* even distributed over the # of Availability Zones
* AWS ELB in the Public Subnet for accessing the Kubernetes API from the internet
- VPC with Public and Private Subnets in # Availability Zones **Requirements**
- Bastion Hosts and NAT Gateways in the Public Subnet
- A dynamic number of masters, etcd, and worker nodes in the Private Subnet
- even distributed over the # of Availability Zones
- AWS ELB in the Public Subnet for accessing the Kubernetes API from the internet
## Requirements
- Terraform 0.12.0 or newer - Terraform 0.12.0 or newer
## How to Use **How to Use:**
- Export the variables for your AWS credentials or edit `credentials.tfvars`: - Export the variables for your AWS credentials or edit `credentials.tfvars`:
```commandline ```
export TF_VAR_AWS_ACCESS_KEY_ID="www" export TF_VAR_AWS_ACCESS_KEY_ID="www"
export TF_VAR_AWS_SECRET_ACCESS_KEY ="xxx" export TF_VAR_AWS_SECRET_ACCESS_KEY ="xxx"
export TF_VAR_AWS_SSH_KEY_NAME="yyy" export TF_VAR_AWS_SSH_KEY_NAME="yyy"
export TF_VAR_AWS_DEFAULT_REGION="zzz" export TF_VAR_AWS_DEFAULT_REGION="zzz"
``` ```
- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use CoreOS as base image. If you want to change this behaviour, see note "Using other distrib than CoreOs" below.
- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use Ubuntu 18.04 LTS (Bionic) as base image. If you want to change this behaviour, see note "Using other distrib than Ubuntu" below.
- Create an AWS EC2 SSH Key - Create an AWS EC2 SSH Key
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials - Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
Example: Example:
```commandline ```commandline
terraform apply -var-file=credentials.tfvars terraform apply -var-file=credentials.tfvars
``` ```
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory` - Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated ssh-bastion.conf. - Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated ssh-bastion.conf.
Ansible automatically detects bastion and changes ssh_args Ansible automatically detects bastion and changes ssh_args
```commandline ```commandline
ssh -F ./ssh-bastion.conf user@$ip ssh -F ./ssh-bastion.conf user@$ip
``` ```
- Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag. - Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag.
Example (this one assumes you are using Ubuntu) Example (this one assumes you are using CoreOS)
```commandline ```commandline
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=ubuntu -b --become-user=root --flush-cache ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=core -b --become-user=root --flush-cache
``` ```
***Using other distrib than CoreOs***
***Using other distrib than Ubuntu*** If you want to use another distribution than CoreOS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.
If you want to use another distribution than Ubuntu 18.04 (Bionic) LTS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.
For example, to use: For example, to use:
- Debian Jessie, replace 'data "aws_ami" "distro"' in variables.tf with - Debian Jessie, replace 'data "aws_ami" "distro"' in variables.tf with
```ini
data "aws_ami" "distro" { data "aws_ami" "distro" {
most_recent = true most_recent = true
@@ -74,11 +65,8 @@ data "aws_ami" "distro" {
owners = ["379101102735"] owners = ["379101102735"]
} }
```
- Ubuntu 16.04, replace 'data "aws_ami" "distro"' in variables.tf with - Ubuntu 16.04, replace 'data "aws_ami" "distro"' in variables.tf with
```ini
data "aws_ami" "distro" { data "aws_ami" "distro" {
most_recent = true most_recent = true
@@ -94,11 +82,8 @@ data "aws_ami" "distro" {
owners = ["099720109477"] owners = ["099720109477"]
} }
```
- Centos 7, replace 'data "aws_ami" "distro"' in variables.tf with - Centos 7, replace 'data "aws_ami" "distro"' in variables.tf with
```ini
data "aws_ami" "distro" { data "aws_ami" "distro" {
most_recent = true most_recent = true
@@ -114,49 +99,23 @@ data "aws_ami" "distro" {
owners = ["688023202711"] owners = ["688023202711"]
} }
```
## Connecting to Kubernetes **Troubleshooting**
You can use the following set of commands to get the kubeconfig file from your newly created cluster. Before running the commands, make sure you are in the project's root folder. ***Remaining AWS IAM Instance Profile***:
```commandline
# Get the controller's IP address.
CONTROLLER_HOST_NAME=$(cat ./inventory/hosts | grep "\[kube_control_plane\]" -A 1 | tail -n 1)
CONTROLLER_IP=$(cat ./inventory/hosts | grep $CONTROLLER_HOST_NAME | grep ansible_host | cut -d'=' -f2)
# Get the hostname of the load balancer.
LB_HOST=$(cat inventory/hosts | grep apiserver_loadbalancer_domain_name | cut -d'"' -f2)
# Get the controller's SSH fingerprint.
ssh-keygen -R $CONTROLLER_IP > /dev/null 2>&1
ssh-keyscan -H $CONTROLLER_IP >> ~/.ssh/known_hosts 2>/dev/null
# Get the kubeconfig from the controller.
mkdir -p ~/.kube
ssh -F ssh-bastion.conf centos@$CONTROLLER_IP "sudo chmod 644 /etc/kubernetes/admin.conf"
scp -F ssh-bastion.conf centos@$CONTROLLER_IP:/etc/kubernetes/admin.conf ~/.kube/config
sed -i "s^server:.*^server: https://$LB_HOST:6443^" ~/.kube/config
kubectl get nodes
```
## Troubleshooting
### Remaining AWS IAM Instance Profile
If the cluster was destroyed without using Terraform it is possible that If the cluster was destroyed without using Terraform it is possible that
the AWS IAM Instance Profiles still remain. To delete them you can use the AWS IAM Instance Profiles still remain. To delete them you can use
the `AWS CLI` with the following command: the `AWS CLI` with the following command:
```
```commandline
aws iam delete-instance-profile --region <region_name> --instance-profile-name <profile_name> aws iam delete-instance-profile --region <region_name> --instance-profile-name <profile_name>
``` ```
### Ansible Inventory doesn't get created ***Ansible Inventory doesn't get created:***
It could happen that Terraform doesn't create an Ansible Inventory file automatically. If this is the case copy the output after `inventory=` and create a file named `hosts`in the directory `inventory` and paste the inventory into the file. It could happen that Terraform doesn't create an Ansible Inventory file automatically. If this is the case copy the output after `inventory=` and create a file named `hosts`in the directory `inventory` and paste the inventory into the file.
## Architecture **Architecture**
Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones. Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones.

View File

@@ -3,9 +3,9 @@ terraform {
} }
provider "aws" { provider "aws" {
access_key = var.AWS_ACCESS_KEY_ID access_key = "${var.AWS_ACCESS_KEY_ID}"
secret_key = var.AWS_SECRET_ACCESS_KEY secret_key = "${var.AWS_SECRET_ACCESS_KEY}"
region = var.AWS_DEFAULT_REGION region = "${var.AWS_DEFAULT_REGION}"
} }
data "aws_availability_zones" "available" {} data "aws_availability_zones" "available" {}
@@ -18,30 +18,30 @@ data "aws_availability_zones" "available" {}
module "aws-vpc" { module "aws-vpc" {
source = "./modules/vpc" source = "./modules/vpc"
aws_cluster_name = var.aws_cluster_name aws_cluster_name = "${var.aws_cluster_name}"
aws_vpc_cidr_block = var.aws_vpc_cidr_block aws_vpc_cidr_block = "${var.aws_vpc_cidr_block}"
aws_avail_zones = slice(data.aws_availability_zones.available.names, 0, 2) aws_avail_zones = "${slice(data.aws_availability_zones.available.names, 0, 2)}"
aws_cidr_subnets_private = var.aws_cidr_subnets_private aws_cidr_subnets_private = "${var.aws_cidr_subnets_private}"
aws_cidr_subnets_public = var.aws_cidr_subnets_public aws_cidr_subnets_public = "${var.aws_cidr_subnets_public}"
default_tags = var.default_tags default_tags = "${var.default_tags}"
} }
module "aws-elb" { module "aws-elb" {
source = "./modules/elb" source = "./modules/elb"
aws_cluster_name = var.aws_cluster_name aws_cluster_name = "${var.aws_cluster_name}"
aws_vpc_id = module.aws-vpc.aws_vpc_id aws_vpc_id = "${module.aws-vpc.aws_vpc_id}"
aws_avail_zones = slice(data.aws_availability_zones.available.names, 0, 2) aws_avail_zones = "${slice(data.aws_availability_zones.available.names, 0, 2)}"
aws_subnet_ids_public = module.aws-vpc.aws_subnet_ids_public aws_subnet_ids_public = "${module.aws-vpc.aws_subnet_ids_public}"
aws_elb_api_port = var.aws_elb_api_port aws_elb_api_port = "${var.aws_elb_api_port}"
k8s_secure_api_port = var.k8s_secure_api_port k8s_secure_api_port = "${var.k8s_secure_api_port}"
default_tags = var.default_tags default_tags = "${var.default_tags}"
} }
module "aws-iam" { module "aws-iam" {
source = "./modules/iam" source = "./modules/iam"
aws_cluster_name = var.aws_cluster_name aws_cluster_name = "${var.aws_cluster_name}"
} }
/* /*
@@ -50,22 +50,22 @@ module "aws-iam" {
*/ */
resource "aws_instance" "bastion-server" { resource "aws_instance" "bastion-server" {
ami = data.aws_ami.distro.id ami = "${data.aws_ami.distro.id}"
instance_type = var.aws_bastion_size instance_type = "${var.aws_bastion_size}"
count = length(var.aws_cidr_subnets_public) count = "${length(var.aws_cidr_subnets_public)}"
associate_public_ip_address = true associate_public_ip_address = true
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index) availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}"
subnet_id = element(module.aws-vpc.aws_subnet_ids_public, count.index) subnet_id = "${element(module.aws-vpc.aws_subnet_ids_public, count.index)}"
vpc_security_group_ids = module.aws-vpc.aws_security_group vpc_security_group_ids = "${module.aws-vpc.aws_security_group}"
key_name = var.AWS_SSH_KEY_NAME key_name = "${var.AWS_SSH_KEY_NAME}"
tags = merge(var.default_tags, tomap({ tags = "${merge(var.default_tags, map(
Name = "kubernetes-${var.aws_cluster_name}-bastion-${count.index}" "Name", "kubernetes-${var.aws_cluster_name}-bastion-${count.index}",
Cluster = var.aws_cluster_name "Cluster", "${var.aws_cluster_name}",
Role = "bastion-${var.aws_cluster_name}-${count.index}" "Role", "bastion-${var.aws_cluster_name}-${count.index}"
})) ))}"
} }
/* /*
@@ -74,71 +74,71 @@ resource "aws_instance" "bastion-server" {
*/ */
resource "aws_instance" "k8s-master" { resource "aws_instance" "k8s-master" {
ami = data.aws_ami.distro.id ami = "${data.aws_ami.distro.id}"
instance_type = var.aws_kube_master_size instance_type = "${var.aws_kube_master_size}"
count = var.aws_kube_master_num count = "${var.aws_kube_master_num}"
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index) availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}"
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index) subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private, count.index)}"
vpc_security_group_ids = module.aws-vpc.aws_security_group vpc_security_group_ids = "${module.aws-vpc.aws_security_group}"
iam_instance_profile = module.aws-iam.kube_control_plane-profile iam_instance_profile = "${module.aws-iam.kube-master-profile}"
key_name = var.AWS_SSH_KEY_NAME key_name = "${var.AWS_SSH_KEY_NAME}"
tags = merge(var.default_tags, tomap({ tags = "${merge(var.default_tags, map(
Name = "kubernetes-${var.aws_cluster_name}-master${count.index}" "Name", "kubernetes-${var.aws_cluster_name}-master${count.index}",
"kubernetes.io/cluster/${var.aws_cluster_name}" = "member" "kubernetes.io/cluster/${var.aws_cluster_name}", "member",
Role = "master" "Role", "master"
})) ))}"
} }
resource "aws_elb_attachment" "attach_master_nodes" { resource "aws_elb_attachment" "attach_master_nodes" {
count = var.aws_kube_master_num count = "${var.aws_kube_master_num}"
elb = module.aws-elb.aws_elb_api_id elb = "${module.aws-elb.aws_elb_api_id}"
instance = element(aws_instance.k8s-master.*.id, count.index) instance = "${element(aws_instance.k8s-master.*.id, count.index)}"
} }
resource "aws_instance" "k8s-etcd" { resource "aws_instance" "k8s-etcd" {
ami = data.aws_ami.distro.id ami = "${data.aws_ami.distro.id}"
instance_type = var.aws_etcd_size instance_type = "${var.aws_etcd_size}"
count = var.aws_etcd_num count = "${var.aws_etcd_num}"
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index) availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}"
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index) subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private, count.index)}"
vpc_security_group_ids = module.aws-vpc.aws_security_group vpc_security_group_ids = "${module.aws-vpc.aws_security_group}"
key_name = var.AWS_SSH_KEY_NAME key_name = "${var.AWS_SSH_KEY_NAME}"
tags = merge(var.default_tags, tomap({ tags = "${merge(var.default_tags, map(
Name = "kubernetes-${var.aws_cluster_name}-etcd${count.index}" "Name", "kubernetes-${var.aws_cluster_name}-etcd${count.index}",
"kubernetes.io/cluster/${var.aws_cluster_name}" = "member" "kubernetes.io/cluster/${var.aws_cluster_name}", "member",
Role = "etcd" "Role", "etcd"
})) ))}"
} }
resource "aws_instance" "k8s-worker" { resource "aws_instance" "k8s-worker" {
ami = data.aws_ami.distro.id ami = "${data.aws_ami.distro.id}"
instance_type = var.aws_kube_worker_size instance_type = "${var.aws_kube_worker_size}"
count = var.aws_kube_worker_num count = "${var.aws_kube_worker_num}"
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index) availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}"
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index) subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private, count.index)}"
vpc_security_group_ids = module.aws-vpc.aws_security_group vpc_security_group_ids = "${module.aws-vpc.aws_security_group}"
iam_instance_profile = module.aws-iam.kube-worker-profile iam_instance_profile = "${module.aws-iam.kube-worker-profile}"
key_name = var.AWS_SSH_KEY_NAME key_name = "${var.AWS_SSH_KEY_NAME}"
tags = merge(var.default_tags, tomap({ tags = "${merge(var.default_tags, map(
Name = "kubernetes-${var.aws_cluster_name}-worker${count.index}" "Name", "kubernetes-${var.aws_cluster_name}-worker${count.index}",
"kubernetes.io/cluster/${var.aws_cluster_name}" = "member" "kubernetes.io/cluster/${var.aws_cluster_name}", "member",
Role = "worker" "Role", "worker"
})) ))}"
} }
/* /*
@@ -146,16 +146,16 @@ resource "aws_instance" "k8s-worker" {
* *
*/ */
data "template_file" "inventory" { data "template_file" "inventory" {
template = file("${path.module}/templates/inventory.tpl") template = "${file("${path.module}/templates/inventory.tpl")}"
vars = { vars = {
public_ip_address_bastion = join("\n", formatlist("bastion ansible_host=%s", aws_instance.bastion-server.*.public_ip)) public_ip_address_bastion = "${join("\n", formatlist("bastion ansible_host=%s", aws_instance.bastion-server.*.public_ip))}"
connection_strings_master = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-master.*.private_dns, aws_instance.k8s-master.*.private_ip)) connection_strings_master = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-master.*.private_dns, aws_instance.k8s-master.*.private_ip))}"
connection_strings_node = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.private_dns, aws_instance.k8s-worker.*.private_ip)) connection_strings_node = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.private_dns, aws_instance.k8s-worker.*.private_ip))}"
connection_strings_etcd = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.private_dns, aws_instance.k8s-etcd.*.private_ip)) connection_strings_etcd = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.private_dns, aws_instance.k8s-etcd.*.private_ip))}"
list_master = join("\n", aws_instance.k8s-master.*.private_dns) list_master = "${join("\n", aws_instance.k8s-master.*.private_dns)}"
list_node = join("\n", aws_instance.k8s-worker.*.private_dns) list_node = "${join("\n", aws_instance.k8s-worker.*.private_dns)}"
list_etcd = join("\n", aws_instance.k8s-etcd.*.private_dns) list_etcd = "${join("\n", aws_instance.k8s-etcd.*.private_dns)}"
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\"" elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
} }
} }
@@ -166,6 +166,6 @@ resource "null_resource" "inventories" {
} }
triggers = { triggers = {
template = data.template_file.inventory.rendered template = "${data.template_file.inventory.rendered}"
} }
} }

View File

@@ -1,19 +1,19 @@
resource "aws_security_group" "aws-elb" { resource "aws_security_group" "aws-elb" {
name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb" name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
vpc_id = var.aws_vpc_id vpc_id = "${var.aws_vpc_id}"
tags = merge(var.default_tags, tomap({ tags = "${merge(var.default_tags, map(
Name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb" "Name", "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
})) ))}"
} }
resource "aws_security_group_rule" "aws-allow-api-access" { resource "aws_security_group_rule" "aws-allow-api-access" {
type = "ingress" type = "ingress"
from_port = var.aws_elb_api_port from_port = "${var.aws_elb_api_port}"
to_port = var.k8s_secure_api_port to_port = "${var.k8s_secure_api_port}"
protocol = "TCP" protocol = "TCP"
cidr_blocks = ["0.0.0.0/0"] cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.aws-elb.id security_group_id = "${aws_security_group.aws-elb.id}"
} }
resource "aws_security_group_rule" "aws-allow-api-egress" { resource "aws_security_group_rule" "aws-allow-api-egress" {
@@ -22,19 +22,19 @@ resource "aws_security_group_rule" "aws-allow-api-egress" {
to_port = 65535 to_port = 65535
protocol = "TCP" protocol = "TCP"
cidr_blocks = ["0.0.0.0/0"] cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.aws-elb.id security_group_id = "${aws_security_group.aws-elb.id}"
} }
# Create a new AWS ELB for K8S API # Create a new AWS ELB for K8S API
resource "aws_elb" "aws-elb-api" { resource "aws_elb" "aws-elb-api" {
name = "kubernetes-elb-${var.aws_cluster_name}" name = "kubernetes-elb-${var.aws_cluster_name}"
subnets = var.aws_subnet_ids_public subnets = var.aws_subnet_ids_public
security_groups = [aws_security_group.aws-elb.id] security_groups = ["${aws_security_group.aws-elb.id}"]
listener { listener {
instance_port = var.k8s_secure_api_port instance_port = "${var.k8s_secure_api_port}"
instance_protocol = "tcp" instance_protocol = "tcp"
lb_port = var.aws_elb_api_port lb_port = "${var.aws_elb_api_port}"
lb_protocol = "tcp" lb_protocol = "tcp"
} }
@@ -42,7 +42,7 @@ resource "aws_elb" "aws-elb-api" {
healthy_threshold = 2 healthy_threshold = 2
unhealthy_threshold = 2 unhealthy_threshold = 2
timeout = 3 timeout = 3
target = "HTTPS:${var.k8s_secure_api_port}/healthz" target = "TCP:${var.k8s_secure_api_port}"
interval = 30 interval = 30
} }
@@ -51,7 +51,7 @@ resource "aws_elb" "aws-elb-api" {
connection_draining = true connection_draining = true
connection_draining_timeout = 400 connection_draining_timeout = 400
tags = merge(var.default_tags, tomap({ tags = "${merge(var.default_tags, map(
Name = "kubernetes-${var.aws_cluster_name}-elb-api" "Name", "kubernetes-${var.aws_cluster_name}-elb-api"
})) ))}"
} }

View File

@@ -1,7 +1,7 @@
output "aws_elb_api_id" { output "aws_elb_api_id" {
value = aws_elb.aws-elb-api.id value = "${aws_elb.aws-elb-api.id}"
} }
output "aws_elb_api_fqdn" { output "aws_elb_api_fqdn" {
value = aws_elb.aws-elb-api.dns_name value = "${aws_elb.aws-elb-api.dns_name}"
} }

View File

@@ -16,15 +16,15 @@ variable "k8s_secure_api_port" {
variable "aws_avail_zones" { variable "aws_avail_zones" {
description = "Availability Zones Used" description = "Availability Zones Used"
type = list(string) type = "list"
} }
variable "aws_subnet_ids_public" { variable "aws_subnet_ids_public" {
description = "IDs of Public Subnets" description = "IDs of Public Subnets"
type = list(string) type = "list"
} }
variable "default_tags" { variable "default_tags" {
description = "Tags for all resources" description = "Tags for all resources"
type = map(string) type = "map"
} }

View File

@@ -1,6 +1,6 @@
#Add AWS Roles for Kubernetes #Add AWS Roles for Kubernetes
resource "aws_iam_role" "kube_control_plane" { resource "aws_iam_role" "kube-master" {
name = "kubernetes-${var.aws_cluster_name}-master" name = "kubernetes-${var.aws_cluster_name}-master"
assume_role_policy = <<EOF assume_role_policy = <<EOF
@@ -40,9 +40,9 @@ EOF
#Add AWS Policies for Kubernetes #Add AWS Policies for Kubernetes
resource "aws_iam_role_policy" "kube_control_plane" { resource "aws_iam_role_policy" "kube-master" {
name = "kubernetes-${var.aws_cluster_name}-master" name = "kubernetes-${var.aws_cluster_name}-master"
role = aws_iam_role.kube_control_plane.id role = "${aws_iam_role.kube-master.id}"
policy = <<EOF policy = <<EOF
{ {
@@ -77,7 +77,7 @@ EOF
resource "aws_iam_role_policy" "kube-worker" { resource "aws_iam_role_policy" "kube-worker" {
name = "kubernetes-${var.aws_cluster_name}-node" name = "kubernetes-${var.aws_cluster_name}-node"
role = aws_iam_role.kube-worker.id role = "${aws_iam_role.kube-worker.id}"
policy = <<EOF policy = <<EOF
{ {
@@ -130,12 +130,12 @@ EOF
#Create AWS Instance Profiles #Create AWS Instance Profiles
resource "aws_iam_instance_profile" "kube_control_plane" { resource "aws_iam_instance_profile" "kube-master" {
name = "kube_${var.aws_cluster_name}_master_profile" name = "kube_${var.aws_cluster_name}_master_profile"
role = aws_iam_role.kube_control_plane.name role = "${aws_iam_role.kube-master.name}"
} }
resource "aws_iam_instance_profile" "kube-worker" { resource "aws_iam_instance_profile" "kube-worker" {
name = "kube_${var.aws_cluster_name}_node_profile" name = "kube_${var.aws_cluster_name}_node_profile"
role = aws_iam_role.kube-worker.name role = "${aws_iam_role.kube-worker.name}"
} }

View File

@@ -1,7 +1,7 @@
output "kube_control_plane-profile" { output "kube-master-profile" {
value = aws_iam_instance_profile.kube_control_plane.name value = "${aws_iam_instance_profile.kube-master.name}"
} }
output "kube-worker-profile" { output "kube-worker-profile" {
value = aws_iam_instance_profile.kube-worker.name value = "${aws_iam_instance_profile.kube-worker.name}"
} }

View File

@@ -1,55 +1,55 @@
resource "aws_vpc" "cluster-vpc" { resource "aws_vpc" "cluster-vpc" {
cidr_block = var.aws_vpc_cidr_block cidr_block = "${var.aws_vpc_cidr_block}"
#DNS Related Entries #DNS Related Entries
enable_dns_support = true enable_dns_support = true
enable_dns_hostnames = true enable_dns_hostnames = true
tags = merge(var.default_tags, tomap({ tags = "${merge(var.default_tags, map(
Name = "kubernetes-${var.aws_cluster_name}-vpc" "Name", "kubernetes-${var.aws_cluster_name}-vpc"
})) ))}"
} }
resource "aws_eip" "cluster-nat-eip" { resource "aws_eip" "cluster-nat-eip" {
count = length(var.aws_cidr_subnets_public) count = "${length(var.aws_cidr_subnets_public)}"
vpc = true vpc = true
} }
resource "aws_internet_gateway" "cluster-vpc-internetgw" { resource "aws_internet_gateway" "cluster-vpc-internetgw" {
vpc_id = aws_vpc.cluster-vpc.id vpc_id = "${aws_vpc.cluster-vpc.id}"
tags = merge(var.default_tags, tomap({ tags = "${merge(var.default_tags, map(
Name = "kubernetes-${var.aws_cluster_name}-internetgw" "Name", "kubernetes-${var.aws_cluster_name}-internetgw"
})) ))}"
} }
resource "aws_subnet" "cluster-vpc-subnets-public" { resource "aws_subnet" "cluster-vpc-subnets-public" {
vpc_id = aws_vpc.cluster-vpc.id vpc_id = "${aws_vpc.cluster-vpc.id}"
count = length(var.aws_avail_zones) count = "${length(var.aws_avail_zones)}"
availability_zone = element(var.aws_avail_zones, count.index) availability_zone = "${element(var.aws_avail_zones, count.index)}"
cidr_block = element(var.aws_cidr_subnets_public, count.index) cidr_block = "${element(var.aws_cidr_subnets_public, count.index)}"
tags = merge(var.default_tags, tomap({ tags = "${merge(var.default_tags, map(
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public" "Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public",
"kubernetes.io/cluster/${var.aws_cluster_name}" = "member" "kubernetes.io/cluster/${var.aws_cluster_name}", "member"
})) ))}"
} }
resource "aws_nat_gateway" "cluster-nat-gateway" { resource "aws_nat_gateway" "cluster-nat-gateway" {
count = length(var.aws_cidr_subnets_public) count = "${length(var.aws_cidr_subnets_public)}"
allocation_id = element(aws_eip.cluster-nat-eip.*.id, count.index) allocation_id = "${element(aws_eip.cluster-nat-eip.*.id, count.index)}"
subnet_id = element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index) subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index)}"
} }
resource "aws_subnet" "cluster-vpc-subnets-private" { resource "aws_subnet" "cluster-vpc-subnets-private" {
vpc_id = aws_vpc.cluster-vpc.id vpc_id = "${aws_vpc.cluster-vpc.id}"
count = length(var.aws_avail_zones) count = "${length(var.aws_avail_zones)}"
availability_zone = element(var.aws_avail_zones, count.index) availability_zone = "${element(var.aws_avail_zones, count.index)}"
cidr_block = element(var.aws_cidr_subnets_private, count.index) cidr_block = "${element(var.aws_cidr_subnets_private, count.index)}"
tags = merge(var.default_tags, tomap({ tags = "${merge(var.default_tags, map(
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private" "Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
})) ))}"
} }
#Routing in VPC #Routing in VPC
@@ -57,53 +57,53 @@ resource "aws_subnet" "cluster-vpc-subnets-private" {
#TODO: Do we need two routing tables for each subnet for redundancy or is one enough? #TODO: Do we need two routing tables for each subnet for redundancy or is one enough?
resource "aws_route_table" "kubernetes-public" { resource "aws_route_table" "kubernetes-public" {
vpc_id = aws_vpc.cluster-vpc.id vpc_id = "${aws_vpc.cluster-vpc.id}"
route { route {
cidr_block = "0.0.0.0/0" cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.cluster-vpc-internetgw.id gateway_id = "${aws_internet_gateway.cluster-vpc-internetgw.id}"
} }
tags = merge(var.default_tags, tomap({ tags = "${merge(var.default_tags, map(
Name = "kubernetes-${var.aws_cluster_name}-routetable-public" "Name", "kubernetes-${var.aws_cluster_name}-routetable-public"
})) ))}"
} }
resource "aws_route_table" "kubernetes-private" { resource "aws_route_table" "kubernetes-private" {
count = length(var.aws_cidr_subnets_private) count = "${length(var.aws_cidr_subnets_private)}"
vpc_id = aws_vpc.cluster-vpc.id vpc_id = "${aws_vpc.cluster-vpc.id}"
route { route {
cidr_block = "0.0.0.0/0" cidr_block = "0.0.0.0/0"
nat_gateway_id = element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index) nat_gateway_id = "${element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index)}"
} }
tags = merge(var.default_tags, tomap({ tags = "${merge(var.default_tags, map(
Name = "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}" "Name", "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
})) ))}"
} }
resource "aws_route_table_association" "kubernetes-public" { resource "aws_route_table_association" "kubernetes-public" {
count = length(var.aws_cidr_subnets_public) count = "${length(var.aws_cidr_subnets_public)}"
subnet_id = element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index) subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index)}"
route_table_id = aws_route_table.kubernetes-public.id route_table_id = "${aws_route_table.kubernetes-public.id}"
} }
resource "aws_route_table_association" "kubernetes-private" { resource "aws_route_table_association" "kubernetes-private" {
count = length(var.aws_cidr_subnets_private) count = "${length(var.aws_cidr_subnets_private)}"
subnet_id = element(aws_subnet.cluster-vpc-subnets-private.*.id, count.index) subnet_id = "${element(aws_subnet.cluster-vpc-subnets-private.*.id, count.index)}"
route_table_id = element(aws_route_table.kubernetes-private.*.id, count.index) route_table_id = "${element(aws_route_table.kubernetes-private.*.id, count.index)}"
} }
#Kubernetes Security Groups #Kubernetes Security Groups
resource "aws_security_group" "kubernetes" { resource "aws_security_group" "kubernetes" {
name = "kubernetes-${var.aws_cluster_name}-securitygroup" name = "kubernetes-${var.aws_cluster_name}-securitygroup"
vpc_id = aws_vpc.cluster-vpc.id vpc_id = "${aws_vpc.cluster-vpc.id}"
tags = merge(var.default_tags, tomap({ tags = "${merge(var.default_tags, map(
Name = "kubernetes-${var.aws_cluster_name}-securitygroup" "Name", "kubernetes-${var.aws_cluster_name}-securitygroup"
})) ))}"
} }
resource "aws_security_group_rule" "allow-all-ingress" { resource "aws_security_group_rule" "allow-all-ingress" {
@@ -111,8 +111,8 @@ resource "aws_security_group_rule" "allow-all-ingress" {
from_port = 0 from_port = 0
to_port = 65535 to_port = 65535
protocol = "-1" protocol = "-1"
cidr_blocks = [var.aws_vpc_cidr_block] cidr_blocks = ["${var.aws_vpc_cidr_block}"]
security_group_id = aws_security_group.kubernetes.id security_group_id = "${aws_security_group.kubernetes.id}"
} }
resource "aws_security_group_rule" "allow-all-egress" { resource "aws_security_group_rule" "allow-all-egress" {
@@ -121,7 +121,7 @@ resource "aws_security_group_rule" "allow-all-egress" {
to_port = 65535 to_port = 65535
protocol = "-1" protocol = "-1"
cidr_blocks = ["0.0.0.0/0"] cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.kubernetes.id security_group_id = "${aws_security_group.kubernetes.id}"
} }
resource "aws_security_group_rule" "allow-ssh-connections" { resource "aws_security_group_rule" "allow-ssh-connections" {
@@ -130,5 +130,5 @@ resource "aws_security_group_rule" "allow-ssh-connections" {
to_port = 22 to_port = 22
protocol = "TCP" protocol = "TCP"
cidr_blocks = ["0.0.0.0/0"] cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.kubernetes.id security_group_id = "${aws_security_group.kubernetes.id}"
} }

View File

@@ -1,5 +1,5 @@
output "aws_vpc_id" { output "aws_vpc_id" {
value = aws_vpc.cluster-vpc.id value = "${aws_vpc.cluster-vpc.id}"
} }
output "aws_subnet_ids_private" { output "aws_subnet_ids_private" {
@@ -15,5 +15,5 @@ output "aws_security_group" {
} }
output "default_tags" { output "default_tags" {
value = var.default_tags value = "${var.default_tags}"
} }

View File

@@ -8,20 +8,20 @@ variable "aws_cluster_name" {
variable "aws_avail_zones" { variable "aws_avail_zones" {
description = "AWS Availability Zones Used" description = "AWS Availability Zones Used"
type = list(string) type = "list"
} }
variable "aws_cidr_subnets_private" { variable "aws_cidr_subnets_private" {
description = "CIDR Blocks for private subnets in Availability zones" description = "CIDR Blocks for private subnets in Availability zones"
type = list(string) type = "list"
} }
variable "aws_cidr_subnets_public" { variable "aws_cidr_subnets_public" {
description = "CIDR Blocks for public subnets in Availability zones" description = "CIDR Blocks for public subnets in Availability zones"
type = list(string) type = "list"
} }
variable "default_tags" { variable "default_tags" {
description = "Default tags for all resources" description = "Default tags for all resources"
type = map(string) type = "map"
} }

View File

@@ -1,17 +1,17 @@
output "bastion_ip" { output "bastion_ip" {
value = join("\n", aws_instance.bastion-server.*.public_ip) value = "${join("\n", aws_instance.bastion-server.*.public_ip)}"
} }
output "masters" { output "masters" {
value = join("\n", aws_instance.k8s-master.*.private_ip) value = "${join("\n", aws_instance.k8s-master.*.private_ip)}"
} }
output "workers" { output "workers" {
value = join("\n", aws_instance.k8s-worker.*.private_ip) value = "${join("\n", aws_instance.k8s-worker.*.private_ip)}"
} }
output "etcd" { output "etcd" {
value = join("\n", aws_instance.k8s-etcd.*.private_ip) value = "${join("\n", aws_instance.k8s-etcd.*.private_ip)}"
} }
output "aws_elb_api_fqdn" { output "aws_elb_api_fqdn" {
@@ -19,9 +19,9 @@ output "aws_elb_api_fqdn" {
} }
output "inventory" { output "inventory" {
value = data.template_file.inventory.rendered value = "${data.template_file.inventory.rendered}"
} }
output "default_tags" { output "default_tags" {
value = var.default_tags value = "${var.default_tags}"
} }

View File

@@ -7,11 +7,11 @@ ${public_ip_address_bastion}
[bastion] [bastion]
${public_ip_address_bastion} ${public_ip_address_bastion}
[kube_control_plane] [kube-master]
${list_master} ${list_master}
[kube_node] [kube-node]
${list_node} ${list_node}
@@ -19,10 +19,10 @@ ${list_node}
${list_etcd} ${list_etcd}
[k8s_cluster:children] [k8s-cluster:children]
kube_node kube-node
kube_control_plane kube-master
[k8s_cluster:vars] [k8s-cluster:vars]
${elb_api_fqdn} ${elb_api_fqdn}

View File

@@ -25,7 +25,7 @@ data "aws_ami" "distro" {
filter { filter {
name = "name" name = "name"
values = ["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"] values = ["CoreOS-stable-*"]
} }
filter { filter {
@@ -33,7 +33,7 @@ data "aws_ami" "distro" {
values = ["hvm"] values = ["hvm"]
} }
owners = ["099720109477"] # Canonical owners = ["595879546273"] #CoreOS
} }
//AWS VPC Variables //AWS VPC Variables
@@ -44,12 +44,12 @@ variable "aws_vpc_cidr_block" {
variable "aws_cidr_subnets_private" { variable "aws_cidr_subnets_private" {
description = "CIDR Blocks for private subnets in Availability Zones" description = "CIDR Blocks for private subnets in Availability Zones"
type = list(string) type = "list"
} }
variable "aws_cidr_subnets_public" { variable "aws_cidr_subnets_public" {
description = "CIDR Blocks for public subnets in Availability Zones" description = "CIDR Blocks for public subnets in Availability Zones"
type = list(string) type = "list"
} }
//AWS EC2 Settings //AWS EC2 Settings
@@ -101,7 +101,7 @@ variable "k8s_secure_api_port" {
variable "default_tags" { variable "default_tags" {
description = "Default tags for all resources" description = "Default tags for all resources"
type = map(string) type = "map"
} }
variable "inventory_file" { variable "inventory_file" {

View File

@@ -1,154 +0,0 @@
# Kubernetes on Exoscale with Terraform
Provision a Kubernetes cluster on [Exoscale](https://www.exoscale.com/) using Terraform and Kubespray
## Overview
The setup looks like following
```text
Kubernetes cluster
+-----------------------+
+---------------+ | +--------------+ |
| | | | +--------------+ |
| API server LB +---------> | | | |
| | | | | Master/etcd | |
+---------------+ | | | node(s) | |
| +-+ | |
| +--------------+ |
| ^ |
| | |
| v |
+---------------+ | +--------------+ |
| | | | +--------------+ |
| Ingress LB +---------> | | | |
| | | | | Worker | |
+---------------+ | | | node(s) | |
| +-+ | |
| +--------------+ |
+-----------------------+
```
## Requirements
* Terraform 0.13.0 or newer
*0.12 also works if you modify the provider block to include version and remove all `versions.tf` files*
## Quickstart
NOTE: *Assumes you are at the root of the kubespray repo*
Copy the sample inventory for your cluster and copy the default terraform variables.
```bash
CLUSTER=my-exoscale-cluster
cp -r inventory/sample inventory/$CLUSTER
cp contrib/terraform/exoscale/default.tfvars inventory/$CLUSTER/
cd inventory/$CLUSTER
```
Edit `default.tfvars` to match your setup. You MUST, at the very least, change `ssh_public_keys`.
```bash
# Ensure $EDITOR points to your favorite editor, e.g., vim, emacs, VS Code, etc.
$EDITOR default.tfvars
```
For authentication you can use the credentials file `~/.cloudstack.ini` or `./cloudstack.ini`.
The file should look like something like this:
```ini
[cloudstack]
key = <API key>
secret = <API secret>
```
Follow the [Exoscale IAM Quick-start](https://community.exoscale.com/documentation/iam/quick-start/) to learn how to generate API keys.
### Encrypted credentials
To have the credentials encrypted at rest, you can use [sops](https://github.com/mozilla/sops) and only decrypt the credentials at runtime.
```bash
cat << EOF > cloudstack.ini
[cloudstack]
key =
secret =
EOF
sops --encrypt --in-place --pgp <PGP key fingerprint> cloudstack.ini
sops cloudstack.ini
```
Run terraform to create the infrastructure
```bash
terraform init ../../contrib/terraform/exoscale
terraform apply -var-file default.tfvars ../../contrib/terraform/exoscale
```
If your cloudstack credentials file is encrypted using sops, run the following:
```bash
terraform init ../../contrib/terraform/exoscale
sops exec-file -no-fifo cloudstack.ini 'CLOUDSTACK_CONFIG={} terraform apply -var-file default.tfvars ../../contrib/terraform/exoscale'
```
You should now have a inventory file named `inventory.ini` that you can use with kubespray.
You can now copy your inventory file and use it with kubespray to set up a cluster.
You can type `terraform output` to find out the IP addresses of the nodes, as well as control-plane and data-plane load-balancer.
It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by:
```bash
ansible -i inventory.ini -m ping all
```
Example to use this with the default sample inventory:
```bash
ansible-playbook -i inventory.ini ../../cluster.yml -b -v
```
## Teardown
The Kubernetes cluster cannot create any load-balancers or disks, hence, teardown is as simple as Terraform destroy:
```bash
terraform destroy -var-file default.tfvars ../../contrib/terraform/exoscale
```
## Variables
### Required
* `ssh_public_keys`: List of public SSH keys to install on all machines
* `zone`: The zone where to run the cluster
* `machines`: Machines to provision. Key of this object will be used as the name of the machine
* `node_type`: The role of this node *(master|worker)*
* `size`: The size to use
* `boot_disk`: The boot disk to use
* `image_name`: Name of the image
* `root_partition_size`: Size *(in GB)* for the root partition
* `ceph_partition_size`: Size *(in GB)* for the partition for rook to use as ceph storage. *(Set to 0 to disable)*
* `node_local_partition_size`: Size *(in GB)* for the partition for node-local-storage. *(Set to 0 to disable)*
* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports)
### Optional
* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project *(Defaults to `default`)*
An example variables file can be found `default.tfvars`
## Known limitations
### Only single disk
Since Exoscale doesn't support additional disks to be mounted onto an instance, this script has the ability to create partitions for [Rook](https://rook.io/) and [node-local-storage](https://kubernetes.io/docs/concepts/storage/volumes/#local).
### No Kubernetes API
The current solution doesn't use the [Exoscale Kubernetes cloud controller](https://github.com/exoscale/exoscale-cloud-controller-manager).
This means that we need to set up a HTTP(S) loadbalancer in front of all workers and set the Ingress controller to DaemonSet mode.

View File

@@ -1,65 +0,0 @@
prefix = "default"
zone = "ch-gva-2"
inventory_file = "inventory.ini"
ssh_public_keys = [
# Put your public SSH key here
"ssh-rsa I-did-not-read-the-docs",
"ssh-rsa I-did-not-read-the-docs 2",
]
machines = {
"master-0" : {
"node_type" : "master",
"size" : "Medium",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
"node_local_partition_size" : 0,
"ceph_partition_size" : 0
}
},
"worker-0" : {
"node_type" : "worker",
"size" : "Large",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
"node_local_partition_size" : 0,
"ceph_partition_size" : 0
}
},
"worker-1" : {
"node_type" : "worker",
"size" : "Large",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
"node_local_partition_size" : 0,
"ceph_partition_size" : 0
}
},
"worker-2" : {
"node_type" : "worker",
"size" : "Large",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
"node_local_partition_size" : 0,
"ceph_partition_size" : 0
}
}
}
nodeport_whitelist = [
"0.0.0.0/0"
]
ssh_whitelist = [
"0.0.0.0/0"
]
api_server_whitelist = [
"0.0.0.0/0"
]

View File

@@ -1,49 +0,0 @@
provider "exoscale" {}
module "kubernetes" {
source = "./modules/kubernetes-cluster"
prefix = var.prefix
machines = var.machines
ssh_public_keys = var.ssh_public_keys
ssh_whitelist = var.ssh_whitelist
api_server_whitelist = var.api_server_whitelist
nodeport_whitelist = var.nodeport_whitelist
}
#
# Generate ansible inventory
#
data "template_file" "inventory" {
template = file("${path.module}/templates/inventory.tpl")
vars = {
connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s etcd_member_name=etcd%d",
keys(module.kubernetes.master_ip_addresses),
values(module.kubernetes.master_ip_addresses).*.public_ip,
values(module.kubernetes.master_ip_addresses).*.private_ip,
range(1, length(module.kubernetes.master_ip_addresses) + 1)))
connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s",
keys(module.kubernetes.worker_ip_addresses),
values(module.kubernetes.worker_ip_addresses).*.public_ip,
values(module.kubernetes.worker_ip_addresses).*.private_ip))
list_master = join("\n", keys(module.kubernetes.master_ip_addresses))
list_worker = join("\n", keys(module.kubernetes.worker_ip_addresses))
api_lb_ip_address = module.kubernetes.control_plane_lb_ip_address
}
}
resource "null_resource" "inventories" {
provisioner "local-exec" {
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
}
triggers = {
template = data.template_file.inventory.rendered
}
}

View File

@@ -1,193 +0,0 @@
data "exoscale_compute_template" "os_image" {
for_each = var.machines
zone = var.zone
name = each.value.boot_disk.image_name
}
data "exoscale_compute" "master_nodes" {
for_each = exoscale_compute.master
id = each.value.id
# Since private IP address is not assigned until the nics are created we need this
depends_on = [exoscale_nic.master_private_network_nic]
}
data "exoscale_compute" "worker_nodes" {
for_each = exoscale_compute.worker
id = each.value.id
# Since private IP address is not assigned until the nics are created we need this
depends_on = [exoscale_nic.worker_private_network_nic]
}
resource "exoscale_network" "private_network" {
zone = var.zone
name = "${var.prefix}-network"
start_ip = cidrhost(var.private_network_cidr, 1)
# cidr -1 = Broadcast address
# cidr -2 = DHCP server address (exoscale specific)
end_ip = cidrhost(var.private_network_cidr, -3)
netmask = cidrnetmask(var.private_network_cidr)
}
resource "exoscale_compute" "master" {
for_each = {
for name, machine in var.machines :
name => machine
if machine.node_type == "master"
}
display_name = "${var.prefix}-${each.key}"
template_id = data.exoscale_compute_template.os_image[each.key].id
size = each.value.size
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
state = "Running"
zone = var.zone
security_groups = [exoscale_security_group.master_sg.name]
user_data = templatefile(
"${path.module}/templates/cloud-init.tmpl",
{
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
node_local_partition_size = each.value.boot_disk.node_local_partition_size
ceph_partition_size = each.value.boot_disk.ceph_partition_size
root_partition_size = each.value.boot_disk.root_partition_size
node_type = "master"
ssh_public_keys = var.ssh_public_keys
}
)
}
resource "exoscale_compute" "worker" {
for_each = {
for name, machine in var.machines :
name => machine
if machine.node_type == "worker"
}
display_name = "${var.prefix}-${each.key}"
template_id = data.exoscale_compute_template.os_image[each.key].id
size = each.value.size
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
state = "Running"
zone = var.zone
security_groups = [exoscale_security_group.worker_sg.name]
user_data = templatefile(
"${path.module}/templates/cloud-init.tmpl",
{
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
node_local_partition_size = each.value.boot_disk.node_local_partition_size
ceph_partition_size = each.value.boot_disk.ceph_partition_size
root_partition_size = each.value.boot_disk.root_partition_size
node_type = "worker"
ssh_public_keys = var.ssh_public_keys
}
)
}
resource "exoscale_nic" "master_private_network_nic" {
for_each = exoscale_compute.master
compute_id = each.value.id
network_id = exoscale_network.private_network.id
}
resource "exoscale_nic" "worker_private_network_nic" {
for_each = exoscale_compute.worker
compute_id = each.value.id
network_id = exoscale_network.private_network.id
}
resource "exoscale_security_group" "master_sg" {
name = "${var.prefix}-master-sg"
description = "Security group for Kubernetes masters"
}
resource "exoscale_security_group_rules" "master_sg_rules" {
security_group_id = exoscale_security_group.master_sg.id
# SSH
ingress {
protocol = "TCP"
cidr_list = var.ssh_whitelist
ports = ["22"]
}
# Kubernetes API
ingress {
protocol = "TCP"
cidr_list = var.api_server_whitelist
ports = ["6443"]
}
}
resource "exoscale_security_group" "worker_sg" {
name = "${var.prefix}-worker-sg"
description = "security group for kubernetes worker nodes"
}
resource "exoscale_security_group_rules" "worker_sg_rules" {
security_group_id = exoscale_security_group.worker_sg.id
# SSH
ingress {
protocol = "TCP"
cidr_list = var.ssh_whitelist
ports = ["22"]
}
# HTTP(S)
ingress {
protocol = "TCP"
cidr_list = ["0.0.0.0/0"]
ports = ["80", "443"]
}
# Kubernetes Nodeport
ingress {
protocol = "TCP"
cidr_list = var.nodeport_whitelist
ports = ["30000-32767"]
}
}
resource "exoscale_ipaddress" "ingress_controller_lb" {
zone = var.zone
healthcheck_mode = "http"
healthcheck_port = 80
healthcheck_path = "/healthz"
healthcheck_interval = 10
healthcheck_timeout = 2
healthcheck_strikes_ok = 2
healthcheck_strikes_fail = 3
}
resource "exoscale_secondary_ipaddress" "ingress_controller_lb" {
for_each = exoscale_compute.worker
compute_id = each.value.id
ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
}
resource "exoscale_ipaddress" "control_plane_lb" {
zone = var.zone
healthcheck_mode = "tcp"
healthcheck_port = 6443
healthcheck_interval = 10
healthcheck_timeout = 2
healthcheck_strikes_ok = 2
healthcheck_strikes_fail = 3
}
resource "exoscale_secondary_ipaddress" "control_plane_lb" {
for_each = exoscale_compute.master
compute_id = each.value.id
ip_address = exoscale_ipaddress.control_plane_lb.ip_address
}

View File

@@ -1,31 +0,0 @@
output "master_ip_addresses" {
value = {
for key, instance in exoscale_compute.master :
instance.name => {
"private_ip" = contains(keys(data.exoscale_compute.master_nodes), key) ? data.exoscale_compute.master_nodes[key].private_network_ip_addresses[0] : ""
"public_ip" = exoscale_compute.master[key].ip_address
}
}
}
output "worker_ip_addresses" {
value = {
for key, instance in exoscale_compute.worker :
instance.name => {
"private_ip" = contains(keys(data.exoscale_compute.worker_nodes), key) ? data.exoscale_compute.worker_nodes[key].private_network_ip_addresses[0] : ""
"public_ip" = exoscale_compute.worker[key].ip_address
}
}
}
output "cluster_private_network_cidr" {
value = var.private_network_cidr
}
output "ingress_controller_lb_ip_address" {
value = exoscale_ipaddress.ingress_controller_lb.ip_address
}
output "control_plane_lb_ip_address" {
value = exoscale_ipaddress.control_plane_lb.ip_address
}

View File

@@ -1,52 +0,0 @@
#cloud-config
%{ if ceph_partition_size > 0 || node_local_partition_size > 0}
bootcmd:
- [ cloud-init-per, once, move-second-header, sgdisk, --move-second-header, /dev/vda ]
%{ if node_local_partition_size > 0 }
# Create partition for node local storage
- [ cloud-init-per, once, create-node-local-part, parted, --script, /dev/vda, 'mkpart extended ext4 ${root_partition_size}GB %{ if ceph_partition_size == 0 }-1%{ else }${root_partition_size + node_local_partition_size}GB%{ endif }' ]
- [ cloud-init-per, once, create-fs-node-local-part, mkfs.ext4, /dev/vda2 ]
%{ endif }
%{ if ceph_partition_size > 0 }
# Create partition for rook to use for ceph
- [ cloud-init-per, once, create-ceph-part, parted, --script, /dev/vda, 'mkpart extended ${root_partition_size + node_local_partition_size}GB -1' ]
%{ endif }
%{ endif }
ssh_authorized_keys:
%{ for ssh_public_key in ssh_public_keys ~}
- ${ssh_public_key}
%{ endfor ~}
write_files:
- path: /etc/netplan/eth1.yaml
content: |
network:
version: 2
ethernets:
eth1:
dhcp4: true
%{ if node_type == "worker" }
# TODO: When a VM is seen as healthy and is added to the EIP loadbalancer
# pool it no longer can send traffic back to itself via the EIP IP
# address.
# Remove this if it ever gets solved.
- path: /etc/netplan/20-eip-fix.yaml
content: |
network:
version: 2
ethernets:
"lo:0":
match:
name: lo
dhcp4: false
addresses:
- ${eip_ip_address}/32
%{ endif }
runcmd:
- netplan apply
%{ if node_local_partition_size > 0 }
- mkdir -p /mnt/disks/node-local-storage
- chown nobody:nogroup /mnt/disks/node-local-storage
- mount /dev/vda2 /mnt/disks/node-local-storage
%{ endif }

View File

@@ -1,42 +0,0 @@
variable "zone" {
type = string
# This is currently the only zone that is supposed to be supporting
# so called "managed private networks".
# See: https://www.exoscale.com/syslog/introducing-managed-private-networks
default = "ch-gva-2"
}
variable "prefix" {}
variable "machines" {
type = map(object({
node_type = string
size = string
boot_disk = object({
image_name = string
root_partition_size = number
ceph_partition_size = number
node_local_partition_size = number
})
}))
}
variable "ssh_public_keys" {
type = list(string)
}
variable "ssh_whitelist" {
type = list(string)
}
variable "api_server_whitelist" {
type = list(string)
}
variable "nodeport_whitelist" {
type = list(string)
}
variable "private_network_cidr" {
default = "172.0.10.0/24"
}

View File

@@ -1,9 +0,0 @@
terraform {
required_providers {
exoscale = {
source = "exoscale/exoscale"
version = ">= 0.21"
}
}
required_version = ">= 0.13"
}

View File

@@ -1,15 +0,0 @@
output "master_ips" {
value = module.kubernetes.master_ip_addresses
}
output "worker_ips" {
value = module.kubernetes.worker_ip_addresses
}
output "ingress_controller_lb_ip_address" {
value = module.kubernetes.ingress_controller_lb_ip_address
}
output "control_plane_lb_ip_address" {
value = module.kubernetes.control_plane_lb_ip_address
}

View File

@@ -1,65 +0,0 @@
prefix = "default"
zone = "ch-gva-2"
inventory_file = "inventory.ini"
ssh_public_keys = [
# Put your public SSH key here
"ssh-rsa I-did-not-read-the-docs",
"ssh-rsa I-did-not-read-the-docs 2",
]
machines = {
"master-0" : {
"node_type" : "master",
"size" : "Small",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
"node_local_partition_size" : 0,
"ceph_partition_size" : 0
}
},
"worker-0" : {
"node_type" : "worker",
"size" : "Large",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
"node_local_partition_size" : 0,
"ceph_partition_size" : 0
}
},
"worker-1" : {
"node_type" : "worker",
"size" : "Large",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
"node_local_partition_size" : 0,
"ceph_partition_size" : 0
}
},
"worker-2" : {
"node_type" : "worker",
"size" : "Large",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
"node_local_partition_size" : 0,
"ceph_partition_size" : 0
}
}
}
nodeport_whitelist = [
"0.0.0.0/0"
]
ssh_whitelist = [
"0.0.0.0/0"
]
api_server_whitelist = [
"0.0.0.0/0"
]

View File

@@ -1 +0,0 @@
../../../../inventory/sample/group_vars

View File

@@ -1,19 +0,0 @@
[all]
${connection_strings_master}
${connection_strings_worker}
[kube_control_plane]
${list_master}
[kube_control_plane:vars]
supplementary_addresses_in_ssl_keys = [ "${api_lb_ip_address}" ]
[etcd]
${list_master}
[kube_node]
${list_worker}
[k8s_cluster:children]
kube_control_plane
kube_node

View File

@@ -1,46 +0,0 @@
variable "zone" {
description = "The zone where to run the cluster"
}
variable "prefix" {
description = "Prefix for resource names"
default = "default"
}
variable "machines" {
description = "Cluster machines"
type = map(object({
node_type = string
size = string
boot_disk = object({
image_name = string
root_partition_size = number
ceph_partition_size = number
node_local_partition_size = number
})
}))
}
variable "ssh_public_keys" {
description = "List of public SSH keys which are injected into the VMs."
type = list(string)
}
variable "ssh_whitelist" {
description = "List of IP ranges (CIDR) to whitelist for ssh"
type = list(string)
}
variable "api_server_whitelist" {
description = "List of IP ranges (CIDR) to whitelist for kubernetes api server"
type = list(string)
}
variable "nodeport_whitelist" {
description = "List of IP ranges (CIDR) to whitelist for kubernetes nodeports"
type = list(string)
}
variable "inventory_file" {
description = "Where to store the generated inventory file"
}

Some files were not shown because too many files have changed in this diff Show More