Compare commits

..

14 Commits

Author SHA1 Message Date
Maxime Guyot
28ee071bd6 Update base image to v2.12.9 (#6492) 2020-08-04 05:10:19 -07:00
Florian Ruynat
0ba1c8ff0a Fix test if openstack_cacert is a base64 string (#6421) (#6475) 2020-08-01 00:29:41 -07:00
Florian Ruynat
d6f859619c Update hashes and set default version to 1.17.9 (#6435) 2020-07-30 00:50:31 -07:00
Florian Ruynat
8a3bd6ba72 Move healthz check to secure ports (#6446) (#6457) 2020-07-29 05:11:47 -07:00
Lovro Seder
d3fca7d47a Fix shellcheck url (#6463) 2020-07-28 06:03:07 -07:00
LucasBoisserie
587f7b33a7 Modify the populate no_proxy task to use a combine rather than relying on the hash_behaviour setting to be set to merge rather than replace (#6112) (#6346)
Co-authored-by: Cody Seavey <seavey.cody@gmail.com>
2020-07-07 08:05:59 -07:00
Florian Ruynat
d3e8fd808e Update hashes and set default version to 1.17.8 (#6336) 2020-06-30 01:58:07 -07:00
Maxime Guyot
6bfc133d6c [2.13] Update base image to v2.12.7 (#6295)
* Use v2.12.7 base image for CI

* Use 19.03.9 in localhost CI (#6201)
2020-06-22 04:32:39 -07:00
Florian Ruynat
3d6b9d6c15 Update hashes and set default to 1.17.7 (#6286) 2020-06-18 23:43:58 -07:00
Etienne Champetier
39fa9503d9 [2.13] Bump CNI plugins to 0.8.6 (#6228)
https://github.com/containernetworking/plugins/releases/tag/v0.8.6
https://github.com/kubernetes/kubernetes/issues/91507

Signed-off-by: Etienne Champetier <champetier.etienne@gmail.com>
(cherry picked from commit 41b44739b1)
2020-06-09 05:23:18 -07:00
spaced
051e08e31c [2.13] Backport CRI-O bugfixes (#6230)
* Enable crio 1.18 (#6197)

* fix CRI-O repos for centos distributions (#6224)

* fix CRI-O repos for centos distributions

* fix CRI-O repos for centos distributions
- revert workarounds

* fix CRI-O repos for centos distributions
- use https for centos repos

* avoid 302 redirects for centos repos

* Use OS packaging default value for apparmor_profile in crio.conf (#6125)

Co-authored-by: jeanfabrice <github@bobo-rousselin.com>
2020-06-04 07:09:15 -07:00
bozzo
dd539cf360 Fix resolv.conf configuration for Fedora CoreOS. (#6155) 2020-05-28 08:02:02 -07:00
Florian Ruynat
31094b1768 Add new Kubernetes version hashes and set default to 1.17.6 (#6183)
* Add new Kubernetes version hashes and set default to 1.17.6

* Do not rebase against master when commiting a release branch
2020-05-25 02:43:11 -07:00
Florian Ruynat
1f4ef0e86e [2.13] Backport recent bugfixes and mainly docker-cli issue (#6179)
* Reorder tests in packet file (#6067)

* Skip molecule tests for Ubuntu 18.04 (#6077)

* Fix kubernetes-dashboard template identation (#6066)

The 98e7a07fba commit udpates the
dashboard version to 2.0.0 but it enable skip login flag wasn't
updated. This change updates its identation to avoid issues when
dashboard_skip_login is enabled.

* Disable OVH CI (#6114)

* Create namespace when dashboard deployment uses customized namespace. (#6107)

* Create namespace when dashboard deployment uses customized namespace.

* Fix syntax.

* Fix apiserver port when upgrading (#6136)

* Fix docker fedora packages (#6097)

* Match docker-cli version with docker-engine version (when available)

* Disable upgrade jobs to allow release 2.13.1 (docker-cli bug)

Co-authored-by: Maxime Guyot <Miouge1@users.noreply.github.com>
Co-authored-by: Victor Morales <electrocucaracha@gmail.com>
Co-authored-by: petruha <5363545+p37ruh4@users.noreply.github.com>
Co-authored-by: Mateus Caruccio <mateus.caruccio@getupcloud.com>
2020-05-22 07:30:37 -07:00
482 changed files with 3521 additions and 31075 deletions

View File

@@ -2,8 +2,15 @@
parseable: true
skip_list:
# see https://docs.ansible.com/ansible-lint/rules/default_rules.html for a list of all default rules
# DO NOT add any other rules to this skip_list, instead use local `# noqa` with a comment explaining WHY it is necessary
# The following rules throw errors.
# These either still need to be corrected in the repository and the rules re-enabled or documented why they are skipped on purpose.
- '301'
- '302'
- '303'
- '305'
- '306'
- '404'
- '503'
# These rules are intentionally skipped:
#

View File

@@ -1,15 +0,0 @@
root = true
[*.{yaml,yml,yml.j2,yaml.j2}]
indent_style = space
indent_size = 2
trim_trailing_whitespace = true
insert_final_newline = true
charset = utf-8
[{Dockerfile}]
indent_style = space
indent_size = 2
trim_trailing_whitespace = true
insert_final_newline = true
charset = utf-8

1
.gitignore vendored
View File

@@ -1,7 +1,6 @@
.vagrant
*.retry
**/vagrant_ansible_inventory
*.iml
temp
.idea
.tox

View File

@@ -8,7 +8,7 @@ stages:
- deploy-special
variables:
KUBESPRAY_VERSION: v2.13.3
KUBESPRAY_VERSION: v2.12.9
FAILFASTCI_NAMESPACE: 'kargo-ci'
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
ANSIBLE_FORCE_COLOR: "true"
@@ -42,7 +42,6 @@ before_script:
- packet
image: quay.io/kubespray/kubespray:$KUBESPRAY_VERSION
artifacts:
when: always
paths:
- cluster-dump/

View File

@@ -18,7 +18,7 @@ packet_ubuntu18-calico-aio:
packet_ubuntu20-calico-aio:
stage: deploy-part1
extends: .packet
when: on_success
when: manual
# ### PR JOBS PART2
@@ -63,16 +63,6 @@ packet_ubuntu16-kube-router-sep:
extends: .packet
when: manual
packet_ubuntu16-kube-router-svc-proxy:
stage: deploy-part2
extends: .packet
when: manual
packet_debian10-cilium-svc-proxy:
stage: deploy-part2
extends: .packet
when: manual
packet_debian10-containerd:
stage: deploy-part2
extends: .packet
@@ -100,7 +90,7 @@ packet_centos8-calico:
extends: .packet
when: on_success
packet_fedora32-weave:
packet_fedora30-weave:
stage: deploy-part2
extends: .packet
when: on_success
@@ -110,11 +100,6 @@ packet_opensuse-canal:
extends: .packet
when: on_success
packet_ubuntu18-ovn4nfv:
stage: deploy-part2
extends: .packet
when: on_success
# Contiv does not work in k8s v1.16
# packet_ubuntu16-contiv-sep:
# stage: deploy-part2
@@ -180,18 +165,13 @@ packet_amazon-linux-2-aio:
extends: .packet
when: manual
packet_fedora32-kube-ovn-containerd:
stage: deploy-part2
extends: .packet
when: on_success
# ### PR JOBS PART3
# Long jobs (45min+)
packet_centos7-weave-upgrade-ha:
stage: deploy-part3
extends: .packet
when: on_success
when: manual
variables:
UPGRADE_TEST: basic
MITOGEN_ENABLE: "false"
@@ -199,7 +179,7 @@ packet_centos7-weave-upgrade-ha:
packet_debian9-calico-upgrade:
stage: deploy-part3
extends: .packet
when: on_success
when: manual
variables:
UPGRADE_TEST: graceful
MITOGEN_ENABLE: "false"
@@ -207,7 +187,7 @@ packet_debian9-calico-upgrade:
packet_debian9-calico-upgrade-once:
stage: deploy-part3
extends: .packet
when: on_success
when: manual
variables:
UPGRADE_TEST: graceful
MITOGEN_ENABLE: "false"

View File

@@ -18,9 +18,6 @@
- echo "$PACKET_PRIVATE_KEY" | base64 -d > ~/.ssh/id_rsa
- chmod 400 ~/.ssh/id_rsa
- echo "$PACKET_PUBLIC_KEY" | base64 -d > ~/.ssh/id_rsa.pub
- mkdir -p group_vars
# Random subnet to avoid routing conflicts
- export TF_VAR_subnet_cidr="10.$(( $RANDOM % 256 )).$(( $RANDOM % 256 )).0/24"
.terraform_validate:
extends: .terraform_install
@@ -38,7 +35,6 @@
when: manual
only: [/^pr-.*$/]
artifacts:
when: always
paths:
- cluster-dump/
variables:
@@ -102,116 +98,80 @@ tf-validate-aws:
# TF_VAR_public_key_path: ""
# TF_VAR_operating_system: ubuntu_18_04
.ovh_variables: &ovh_variables
OS_AUTH_URL: https://auth.cloud.ovh.net/v3
OS_PROJECT_ID: 8d3cd5d737d74227ace462dee0b903fe
OS_PROJECT_NAME: "9361447987648822"
OS_USER_DOMAIN_NAME: Default
OS_PROJECT_DOMAIN_ID: default
OS_USERNAME: 8XuhBMfkKVrk
OS_REGION_NAME: UK1
OS_INTERFACE: public
OS_IDENTITY_API_VERSION: "3"
# .ovh_variables: &ovh_variables
# OS_AUTH_URL: https://auth.cloud.ovh.net/v3
# OS_PROJECT_ID: 8d3cd5d737d74227ace462dee0b903fe
# OS_PROJECT_NAME: "9361447987648822"
# OS_USER_DOMAIN_NAME: Default
# OS_PROJECT_DOMAIN_ID: default
# OS_USERNAME: 8XuhBMfkKVrk
# OS_REGION_NAME: UK1
# OS_INTERFACE: public
# OS_IDENTITY_API_VERSION: "3"
# Elastx is generously donating resources for Kubespray on Openstack CI
# Contacts: @gix @bl0m1
.elastx_variables: &elastx_variables
OS_AUTH_URL: https://ops.elastx.cloud:5000
OS_PROJECT_ID: 564c6b461c6b44b1bb19cdb9c2d928e4
OS_PROJECT_NAME: kubespray_ci
OS_USER_DOMAIN_NAME: Default
OS_PROJECT_DOMAIN_ID: default
OS_USERNAME: kubespray@root314.com
OS_REGION_NAME: se-sto
OS_INTERFACE: public
OS_IDENTITY_API_VERSION: "3"
TF_VAR_router_id: "ab95917c-41fb-4881-b507-3a6dfe9403df"
# Since ELASTX is in Stockholm, Mitogen helps with latency
MITOGEN_ENABLE: "false"
# Mitogen doesn't support interpreter discovery yet
ANSIBLE_PYTHON_INTERPRETER: "/usr/bin/python3"
# tf-ovh_cleanup:
# stage: unit-tests
# tags: [light]
# image: python
# variables:
# <<: *ovh_variables
# before_script:
# - pip install -r scripts/openstack-cleanup/requirements.txt
# script:
# - ./scripts/openstack-cleanup/main.py
tf-elastx_cleanup:
stage: unit-tests
tags: [light]
image: python
variables:
<<: *elastx_variables
before_script:
- pip install -r scripts/openstack-cleanup/requirements.txt
script:
- ./scripts/openstack-cleanup/main.py
# tf-ovh_ubuntu18-calico:
# extends: .terraform_apply
# when: on_success
# variables:
# <<: *ovh_variables
# TF_VERSION: 0.12.24
# PROVIDER: openstack
# CLUSTER: $CI_COMMIT_REF_NAME
# ANSIBLE_TIMEOUT: "60"
# SSH_USER: ubuntu
# TF_VAR_number_of_k8s_masters: "0"
# TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
# TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
# TF_VAR_number_of_etcd: "0"
# TF_VAR_number_of_k8s_nodes: "0"
# TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
# TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
# TF_VAR_number_of_bastions: "0"
# TF_VAR_number_of_k8s_masters_no_etcd: "0"
# TF_VAR_use_neutron: "0"
# TF_VAR_floatingip_pool: "Ext-Net"
# TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
# TF_VAR_network_name: "Ext-Net"
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
# TF_VAR_image: "Ubuntu 18.04"
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
tf-elastx_ubuntu18-calico:
extends: .terraform_apply
stage: deploy-part3
when: on_success
variables:
<<: *elastx_variables
TF_VERSION: 0.12.24
PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME
ANSIBLE_TIMEOUT: "60"
SSH_USER: ubuntu
TF_VAR_number_of_k8s_masters: "1"
TF_VAR_number_of_k8s_masters_no_floating_ip: "0"
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
TF_VAR_number_of_etcd: "0"
TF_VAR_number_of_k8s_nodes: "1"
TF_VAR_number_of_k8s_nodes_no_floating_ip: "0"
TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
TF_VAR_number_of_bastions: "0"
TF_VAR_number_of_k8s_masters_no_etcd: "0"
TF_VAR_floatingip_pool: "elx-public1"
TF_VAR_dns_nameservers: '["1.1.1.1", "8.8.8.8", "8.8.4.4"]'
TF_VAR_use_access_ip: "0"
TF_VAR_external_net: "600b8501-78cb-4155-9c9f-23dfcba88828"
TF_VAR_network_name: "ci-$CI_JOB_ID"
TF_VAR_az_list: '["sto1"]'
TF_VAR_az_list_node: '["sto1"]'
TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
TF_VAR_image: ubuntu-18.04-server-latest
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
tf-ovh_cleanup:
stage: unit-tests
tags: [light]
image: python
environment: ovh
variables:
<<: *ovh_variables
before_script:
- pip install -r scripts/openstack-cleanup/requirements.txt
script:
- ./scripts/openstack-cleanup/main.py
tf-ovh_ubuntu18-calico:
extends: .terraform_apply
when: on_success
environment: ovh
variables:
<<: *ovh_variables
TF_VERSION: 0.12.24
PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME
ANSIBLE_TIMEOUT: "60"
SSH_USER: ubuntu
TF_VAR_number_of_k8s_masters: "0"
TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
TF_VAR_number_of_etcd: "0"
TF_VAR_number_of_k8s_nodes: "0"
TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
TF_VAR_number_of_bastions: "0"
TF_VAR_number_of_k8s_masters_no_etcd: "0"
TF_VAR_use_neutron: "0"
TF_VAR_floatingip_pool: "Ext-Net"
TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
TF_VAR_network_name: "Ext-Net"
TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
TF_VAR_image: "Ubuntu 18.04"
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
# tf-ovh_coreos-calico:
# extends: .terraform_apply
# when: on_success
# variables:
# <<: *ovh_variables
# TF_VERSION: 0.12.24
# PROVIDER: openstack
# CLUSTER: $CI_COMMIT_REF_NAME
# ANSIBLE_TIMEOUT: "60"
# SSH_USER: core
# TF_VAR_number_of_k8s_masters: "0"
# TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
# TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
# TF_VAR_number_of_etcd: "0"
# TF_VAR_number_of_k8s_nodes: "0"
# TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
# TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
# TF_VAR_number_of_bastions: "0"
# TF_VAR_number_of_k8s_masters_no_etcd: "0"
# TF_VAR_use_neutron: "0"
# TF_VAR_floatingip_pool: "Ext-Net"
# TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
# TF_VAR_network_name: "Ext-Net"
# TF_VAR_flavor_k8s_master: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
# TF_VAR_flavor_k8s_node: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
# TF_VAR_image: "CoreOS Stable"
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'

View File

@@ -20,7 +20,7 @@ molecule_tests:
extends: .testcases
variables:
CI_PLATFORM: "vagrant"
SSH_USER: "vagrant"
SSH_USER: "kubespray"
VAGRANT_DEFAULT_PROVIDER: "libvirt"
KUBESPRAY_VAGRANT_CONFIG: tests/files/${CI_JOB_NAME}.rb
tags: [c3.small.x86]
@@ -34,9 +34,9 @@ molecule_tests:
- python -m pip install -r tests/requirements.txt
- ./tests/scripts/vagrant_clean.sh
script:
- ./tests/scripts/testcases_run.sh
- vagrant up
after_script:
- chronic ./tests/scripts/testcases_cleanup.sh
- vagrant destroy --force
vagrant_ubuntu18-flannel:
stage: deploy-part2
@@ -46,9 +46,4 @@ vagrant_ubuntu18-flannel:
vagrant_ubuntu18-weave-medium:
stage: deploy-part2
extends: .vagrant
when: manual
vagrant_ubuntu20-flannel:
stage: deploy-part2
extends: .vagrant
when: on_success
when: manual

View File

@@ -6,12 +6,12 @@ RUN apt update -y && \
apt install -y \
libssl-dev python3-dev sshpass apt-transport-https jq moreutils \
ca-certificates curl gnupg2 software-properties-common python3-pip rsync
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable" \
&& apt update -y && apt-get install docker-ce -y
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable" \
&& apt update -y && apt-get install docker-ce -y
COPY . .
RUN /usr/bin/python3 -m pip install pip -U && /usr/bin/python3 -m pip install -r tests/requirements.txt && python3 -m pip install -r requirements.txt && update-alternatives --install /usr/bin/python python /usr/bin/python3 1
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.17.5/bin/linux/amd64/kubectl \

View File

@@ -5,7 +5,7 @@
If you have questions, check the documentation at [kubespray.io](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
You can get your invite [here](http://slack.k8s.io/)
- Can be deployed on **[AWS](docs/aws.md), GCE, [Azure](docs/azure.md), [OpenStack](docs/openstack.md), [vSphere](docs/vsphere.md), [Packet](docs/packet.md) (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere, Packet (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
- **Highly available** cluster
- **Composable** (Choice of the network plugin for instance)
- Supports most popular **Linux distributions**
@@ -75,7 +75,6 @@ vagrant up
- [Requirements](#requirements)
- [Kubespray vs ...](docs/comparisons.md)
- [Getting started](docs/getting-started.md)
- [Setting up your first cluster](docs/setting-up-your-first-cluster.md)
- [Ansible inventory and tags](docs/ansible.md)
- [Integration with existing ansible repo](docs/integration.md)
- [Deployment data variables](docs/vars.md)
@@ -83,7 +82,7 @@ vagrant up
- [HA mode](docs/ha-mode.md)
- [Network plugins](#network-plugins)
- [Vagrant install](docs/vagrant.md)
- [Flatcar Container Linux bootstrap](docs/flatcar.md)
- [CoreOS bootstrap](docs/coreos.md)
- [Fedora CoreOS bootstrap](docs/fcos.md)
- [Debian Jessie setup](docs/debian.md)
- [openSUSE setup](docs/opensuse.md)
@@ -97,57 +96,53 @@ vagrant up
- [Large deployments](docs/large-deployments.md)
- [Adding/replacing a node](docs/nodes.md)
- [Upgrades basics](docs/upgrades.md)
- [Air-Gap installation](docs/offline-environment.md)
- [Roadmap](docs/roadmap.md)
## Supported Linux Distributions
- **Flatcar Container Linux by Kinvolk**
- **Container Linux by CoreOS**
- **Debian** Buster, Jessie, Stretch, Wheezy
- **Ubuntu** 16.04, 18.04, 20.04
- **CentOS/RHEL** 7, 8 (experimental: see [centos 8 notes](docs/centos8.md))
- **Fedora** 31, 32
- **Ubuntu** 16.04, 18.04
- **CentOS/RHEL** 7, 8 (experimental: see [centos 8 notes](docs/centos8.md)
- **Fedora** 30, 31
- **Fedora CoreOS** (experimental: see [fcos Note](docs/fcos.md))
- **openSUSE** Leap 42.3/Tumbleweed
- **Oracle Linux** 7, 8 (experimental: [centos 8 notes](docs/centos8.md) apply)
- **Oracle Linux** 7
Note: Upstart/SysV init based OS types are not supported.
## Supported Components
- Core
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.18.9
- [etcd](https://github.com/coreos/etcd) v3.4.3
- [docker](https://www.docker.com/) v19.03 (see note)
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.17.9
- [etcd](https://github.com/coreos/etcd) v3.3.12
- [docker](https://www.docker.com/) v18.06 (see note)
- [containerd](https://containerd.io/) v1.2.13
- [cri-o](http://cri-o.io/) v1.17 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
- Network Plugin
- [cni-plugins](https://github.com/containernetworking/plugins) v0.8.6
- [calico](https://github.com/projectcalico/calico) v3.15.2
- [calico](https://github.com/projectcalico/calico) v3.13.2
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
- [cilium](https://github.com/cilium/cilium) v1.8.3
- [cilium](https://github.com/cilium/cilium) v1.7.2
- [contiv](https://github.com/contiv/install) v1.2.1
- [flanneld](https://github.com/coreos/flannel) v0.12.0
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.3.0
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.0.1
- [multus](https://github.com/intel/multus-cni) v3.6.0
- [ovn4nfv](https://github.com/opnfv/ovn4nfv-k8s-plugin) v1.1.0
- [weave](https://github.com/weaveworks/weave) v2.7.0
- [kube-router](https://github.com/cloudnativelabs/kube-router) v0.4.0
- [multus](https://github.com/intel/multus-cni) v3.4.1
- [weave](https://github.com/weaveworks/weave) v2.6.2
- Application
- [ambassador](https://github.com/datawire/ambassador): v1.5
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
- [cert-manager](https://github.com/jetstack/cert-manager) v0.16.1
- [coredns](https://github.com/coredns/coredns) v1.6.7
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.35.0
- [cert-manager](https://github.com/jetstack/cert-manager) v0.11.1
- [coredns](https://github.com/coredns/coredns) v1.6.5
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.30.0
Note: The list of validated [docker versions](https://kubernetes.io/docs/setup/production-environment/container-runtimes/#docker) is 1.13.1, 17.03, 17.06, 17.09, 18.06, 18.09 and 19.03. The recommended docker version is 19.03. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
Note: The list of validated [docker versions](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.16.md) was updated to 1.13.1, 17.03, 17.06, 17.09, 18.06, 18.09. kubeadm now properly recognizes Docker 18.09.0 and newer, but still treats 18.06 as the default supported version. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
## Requirements
- **Minimum required version of Kubernetes is v1.17**
- **Minimum required version of Kubernetes is v1.15**
- **Ansible v2.9+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/downloads.md#offline-environment))
- The target servers are configured to allow **IPv4 forwarding**.
- **Your ssh key must be copied** to all the servers part of your inventory.
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
@@ -182,8 +177,6 @@ You can choose between 10 network plugins. (default: `calico`, except Vagrant us
- [contiv](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
- [ovn4nfv](docs/ovn4nfv.md): [ovn4nfv-k8s-plugins](https://github.com/opnfv/ovn4nfv-k8s-plugin) is the network controller, OVS agent and CNI server to offer basic SFC and OVN overlay networking.
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
(Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)).
@@ -202,12 +195,6 @@ The choice is defined with the variable `kube_network_plugin`. There is also an
option to leverage built-in cloud provider networking instead.
See also [Network checker](docs/netcheck.md).
## Ingress Plugins
- [ambassador](docs/ambassador.md): the Ambassador Ingress Controller and API gateway.
- [nginx](https://kubernetes.github.io/ingress-nginx): the NGINX Ingress Controller.
## Community docs and resources
- [kubernetes.io/docs/setup/production-environment/tools/kubespray/](https://kubernetes.io/docs/setup/production-environment/tools/kubespray/)
@@ -222,8 +209,7 @@ See also [Network checker](docs/netcheck.md).
## CI Tests
[![Build graphs](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/badges/master/pipeline.svg)](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines)
CI/end-to-end tests sponsored by: [CNCF](https://cncf.io), [Packet](https://www.packet.com/), [OVHcloud](https://www.ovhcloud.com/), [ELASTX](https://elastx.se/).
[![Build graphs](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/badges/master/build.svg)](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines)
CI/end-to-end tests sponsored by Google (GCE)
See the [test matrix](docs/test_cases.md) for details.

29
Vagrantfile vendored
View File

@@ -9,29 +9,32 @@ Vagrant.require_version ">= 2.0.0"
CONFIG = File.join(File.dirname(__FILE__), ENV['KUBESPRAY_VAGRANT_CONFIG'] || 'vagrant/config.rb')
COREOS_URL_TEMPLATE = "https://storage.googleapis.com/%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json"
FLATCAR_URL_TEMPLATE = "https://%s.release.flatcar-linux.net/amd64-usr/current/flatcar_production_vagrant.json"
# Uniq disk UUID for libvirt
DISK_UUID = Time.now.utc.to_i
SUPPORTED_OS = {
"coreos-stable" => {box: "coreos-stable", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
"coreos-alpha" => {box: "coreos-alpha", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
"coreos-beta" => {box: "coreos-beta", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
"flatcar-stable" => {box: "flatcar-stable", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["stable"]},
"flatcar-beta" => {box: "flatcar-beta", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["beta"]},
"flatcar-alpha" => {box: "flatcar-alpha", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["alpha"]},
"flatcar-edge" => {box: "flatcar-edge", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["edge"]},
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
"ubuntu2004" => {box: "generic/ubuntu2004", user: "vagrant"},
"ubuntu2004" => {box: "geerlingguy/ubuntu2004", user: "vagrant"},
"centos" => {box: "centos/7", user: "vagrant"},
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
"centos8" => {box: "centos/8", user: "vagrant"},
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
"fedora30" => {box: "fedora/30-cloud-base", user: "vagrant"},
"fedora31" => {box: "fedora/31-cloud-base", user: "vagrant"},
"fedora32" => {box: "fedora/32-cloud-base", user: "vagrant"},
"opensuse" => {box: "bento/opensuse-leap-15.1", user: "vagrant"},
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
}
if File.exist?(CONFIG)
@@ -43,7 +46,7 @@ $num_instances ||= 3
$instance_name_prefix ||= "k8s"
$vm_gui ||= false
$vm_memory ||= 2048
$vm_cpus ||= 2
$vm_cpus ||= 1
$shared_folders ||= {}
$forwarded_ports ||= {}
$subnet ||= "172.18.8"
@@ -51,8 +54,6 @@ $os ||= "ubuntu1804"
$network_plugin ||= "flannel"
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
$multi_networking ||= false
$download_run_once ||= "True"
$download_force_cache ||= "True"
# The first three nodes are etcd servers
$etcd_instances ||= $num_instances
# The first two nodes are kube masters
@@ -67,9 +68,8 @@ $override_disk_size ||= false
$disk_size ||= "20GB"
$local_path_provisioner_enabled ||= false
$local_path_provisioner_claim_root ||= "/opt/local-path-provisioner/"
$libvirt_nested ||= false
$playbook ||= "cluster.yml"
$playbook = "cluster.yml"
host_vars = {}
@@ -145,8 +145,6 @@ Vagrant.configure("2") do |config|
end
node.vm.provider :libvirt do |lv|
lv.nested = $libvirt_nested
lv.cpu_mode = "host-model"
lv.memory = $vm_memory
lv.cpus = $vm_cpus
lv.default_prefix = 'kubespray'
@@ -187,24 +185,19 @@ Vagrant.configure("2") do |config|
# Disable swap for each vm
node.vm.provision "shell", inline: "swapoff -a"
# Disable firewalld on oraclelinux vms
if ["oraclelinux","oraclelinux8"].include? $os
node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
end
host_vars[vm_name] = {
"ip": ip,
"flannel_interface": "eth1",
"kube_network_plugin": $network_plugin,
"kube_network_plugin_multus": $multi_networking,
"download_run_once": $download_run_once,
"download_run_once": "True",
"download_localhost": "False",
"download_cache_dir": ENV['HOME'] + "/kubespray_cache",
# Make kubespray cache even when download_run_once is false
"download_force_cache": $download_force_cache,
"download_force_cache": "True",
# Keeping the cache on the nodes can improve provisioning speed while debugging kubespray
"download_keep_remote_cache": "False",
"docker_rpm_keepcache": "1",
"docker_keepcache": "1",
# These two settings will put kubectl and admin.config in $inventory/artifacts
"kubeconfig_localhost": "True",
"kubectl_localhost": "True",

View File

@@ -1,2 +1,2 @@
---
theme: jekyll-theme-slate
theme: jekyll-theme-slate

View File

@@ -4,7 +4,6 @@
- hosts: all
gather_facts: false
tags: always
tasks:
- name: "Set up proxy environment"
set_fact:
@@ -32,7 +31,6 @@
- { role: bootstrap-os, tags: bootstrap-os}
- name: Gather facts
tags: always
import_playbook: facts.yml
- hosts: k8s-cluster:etcd

View File

@@ -60,7 +60,6 @@ It will create the file ./inventory which can then be used with kubespray, e.g.:
```shell
$ cd kubespray-root-dir
$ sudo pip3 install -r requirements.txt
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all/all.yml" cluster.yml
```

View File

@@ -9,11 +9,18 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
exit 1
fi
ansible-playbook generate-templates.yml
az deployment group create --template-file ./.generated/network.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
if az &>/dev/null; then
echo "azure cli 2.0 found, using it instead of 1.0"
./apply-rg_2.sh "$AZURE_RESOURCE_GROUP"
elif azure &>/dev/null; then
ansible-playbook generate-templates.yml
azure group deployment create -f ./.generated/network.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
azure group deployment create -f ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
else
echo "Azure cli not found"
fi

19
contrib/azurerm/apply-rg_2.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/usr/bin/env bash
set -e
AZURE_RESOURCE_GROUP="$1"
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
echo "AZURE_RESOURCE_GROUP is missing"
exit 1
fi
ansible-playbook generate-templates.yml
az deployment group create --template-file ./.generated/network.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/minions.json -g $AZURE_RESOURCE_GROUP

View File

@@ -9,6 +9,10 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
exit 1
fi
ansible-playbook generate-templates.yml
az group deployment create -g "$AZURE_RESOURCE_GROUP" --template-file ./.generated/clear-rg.json --mode Complete
if az &>/dev/null; then
echo "azure cli 2.0 found, using it instead of 1.0"
./clear-rg_2.sh "$AZURE_RESOURCE_GROUP"
else
ansible-playbook generate-templates.yml
azure group deployment create -g "$AZURE_RESOURCE_GROUP" -f ./.generated/clear-rg.json -m Complete
fi

14
contrib/azurerm/clear-rg_2.sh Executable file
View File

@@ -0,0 +1,14 @@
#!/usr/bin/env bash
set -e
AZURE_RESOURCE_GROUP="$1"
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
echo "AZURE_RESOURCE_GROUP is missing"
exit 1
fi
ansible-playbook generate-templates.yml
az group deployment create -g "$AZURE_RESOURCE_GROUP" --template-file ./.generated/clear-rg.json --mode Complete

View File

@@ -1,6 +1,6 @@
---
- name: Query Azure VMs # noqa 301
- name: Query Azure VMs
command: azure vm list-ip-address --json {{ azure_resource_group }}
register: vm_list_cmd

View File

@@ -1,14 +1,14 @@
---
- name: Query Azure VMs IPs # noqa 301
- name: Query Azure VMs IPs
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
register: vm_ip_list_cmd
- name: Query Azure VMs Roles # noqa 301
- name: Query Azure VMs Roles
command: az vm list -o json --resource-group {{ azure_resource_group }}
register: vm_list_cmd
- name: Query Azure Load Balancer Public IP # noqa 301
- name: Query Azure Load Balancer Public IP
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
register: lb_pubip_cmd

View File

@@ -69,7 +69,7 @@
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
# handle manually
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave) # noqa 301
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
raw: |
echo {{ item | hash('sha1') }} > /etc/machine-id.new
mv -b /etc/machine-id.new /etc/machine-id

View File

@@ -41,7 +41,6 @@ from ruamel.yaml import YAML
import os
import re
import subprocess
import sys
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster',
@@ -70,7 +69,6 @@ MASSIVE_SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 200))
DEBUG = get_var_as_bool("DEBUG", True)
HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
USE_REAL_HOSTNAME = get_var_as_bool("USE_REAL_HOSTNAME", False)
# Configurable as shell vars end
@@ -169,7 +167,6 @@ class KubesprayInventory(object):
# FIXME(mattymo): Fix condition where delete then add reuses highest id
next_host_id = highest_host_id + 1
next_host = ""
all_hosts = existing_hosts.copy()
for host in changed_hosts:
@@ -194,14 +191,8 @@ class KubesprayInventory(object):
self.debug("Skipping existing host {0}.".format(ip))
continue
if USE_REAL_HOSTNAME:
cmd = ("ssh -oStrictHostKeyChecking=no "
+ access_ip + " 'hostname -s'")
next_host = subprocess.check_output(cmd, shell=True)
next_host = next_host.strip().decode('ascii')
else:
next_host = "{0}{1}".format(HOST_PREFIX, next_host_id)
next_host_id += 1
next_host = "{0}{1}".format(HOST_PREFIX, next_host_id)
next_host_id += 1
all_hosts[next_host] = {'ansible_host': access_ip,
'ip': ip,
'access_ip': access_ip}
@@ -238,7 +229,7 @@ class KubesprayInventory(object):
return [ip_address(ip).exploded for ip in range(start, end + 1)]
for host in hosts:
if '-' in host and not (host.startswith('-') or host[0].isalpha()):
if '-' in host and not host.startswith('-'):
start, end = host.strip().split('-')
try:
reworked_hosts.extend(ips(start, end))

12
contrib/metallb/README.md Normal file
View File

@@ -0,0 +1,12 @@
# Deploy MetalLB into Kubespray/Kubernetes
```
MetalLB hooks into your Kubernetes cluster, and provides a network load-balancer implementation. In short, it allows you to create Kubernetes services of type “LoadBalancer” in clusters that dont run on a cloud provider, and thus cannot simply hook into paid products to provide load-balancers.
```
This playbook aims to automate [this](https://metallb.universe.tf/concepts/layer2/). It deploys MetalLB into kubernetes and sets up a layer 2 loadbalancer.
## Install
```
Defaults can be found in contrib/metallb/roles/provision/defaults/main.yml. You can override the defaults by copying the contents of this file to somewhere in inventory/mycluster/group_vars such as inventory/mycluster/groups_vars/k8s-cluster/addons.yml and making any adjustments as required.
ansible-playbook --ask-become -i inventory/sample/hosts.ini contrib/metallb/metallb.yml
```

1
contrib/metallb/library Symbolic link
View File

@@ -0,0 +1 @@
../../library

View File

@@ -0,0 +1,12 @@
---
- hosts: bastion[0]
gather_facts: False
roles:
- { role: kubespray-defaults}
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
- hosts: kube-master[0]
tags:
- "provision"
roles:
- { role: kubespray-defaults}
- { role: provision }

View File

@@ -0,0 +1,16 @@
---
metallb:
ip_range:
- "10.5.0.50-10.5.0.99"
protocol: "layer2"
# additional_address_pools:
# kube_service_pool:
# ip_range:
# - 10.5.1.50-10.5.1.99"
# protocol: "layer2"
# auto_assign: false
limits:
cpu: "100m"
memory: "100Mi"
port: "7472"
version: v0.7.3

View File

@@ -5,18 +5,6 @@
when:
- "kube_proxy_mode == 'ipvs' and not kube_proxy_strict_arp"
- name: Kubernetes Apps | Check cluster settings for MetalLB
fail:
msg: "metallb_ip_range is mandatory to be specified for MetalLB"
when:
- metallb_ip_range is not defined or not metallb_ip_range
- name: Kubernetes Apps | Check BGP peers for MetalLB
fail:
msg: "metallb_peers is mandatory when metallb_protocol is bgp"
when:
- metallb_protocol == 'bgp' and metallb_peers is not defined
- name: Kubernetes Apps | Check AppArmor status
command: which apparmor_parser
register: apparmor_status
@@ -50,25 +38,3 @@
with_items: "{{ rendering.results }}"
when:
- "inventory_hostname == groups['kube-master'][0]"
- name: Kubernetes Apps | Check existing secret of MetalLB
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n metallb-system get secret memberlist"
register: metallb_secret
become: true
ignore_errors: yes
when:
- inventory_hostname == groups['kube-master'][0]
- name: Kubernetes Apps | Create random bytes for MetalLB
command: "openssl rand -base64 32"
register: metallb_rand
when:
- inventory_hostname == groups['kube-master'][0]
- metallb_secret.rc != 0
- name: Kubernetes Apps | Install secret of MetalLB if not existing
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n metallb-system create secret generic memberlist --from-literal=secretkey={{ metallb_rand.stdout }}"
become: true
when:
- inventory_hostname == groups['kube-master'][0]
- metallb_secret.rc != 0

View File

@@ -0,0 +1,25 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
config: |
address-pools:
- name: loadbalanced
protocol: {{ metallb.protocol }}
addresses:
{% for ip_range in metallb.ip_range %}
- {{ ip_range }}
{% endfor %}
{% if metallb.additional_address_pools is defined %}{% for pool in metallb.additional_address_pools %}
- name: {{ pool }}
protocol: {{ metallb.additional_address_pools[pool].protocol }}
addresses:
{% for ip_range in metallb.additional_address_pools[pool].ip_range %}
- {{ ip_range }}
{% endfor %}
auto-assign: {{ metallb.additional_address_pools[pool].auto_assign }}
{% endfor %}
{% endif %}

View File

@@ -0,0 +1,263 @@
apiVersion: v1
kind: Namespace
metadata:
name: metallb-system
labels:
app: metallb
---
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: metallb-system
name: controller
labels:
app: metallb
---
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: metallb-system
name: speaker
labels:
app: metallb
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: metallb-system:controller
labels:
app: metallb
rules:
- apiGroups: [""]
resources: ["services"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["services/status"]
verbs: ["update"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: metallb-system:speaker
labels:
app: metallb
rules:
- apiGroups: [""]
resources: ["services", "endpoints", "nodes"]
verbs: ["get", "list", "watch"]
{% if podsecuritypolicy_enabled %}
- apiGroups: ["policy"]
resourceNames: ["metallb"]
resources: ["podsecuritypolicies"]
verbs: ["use"]
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: metallb
annotations:
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
{% if apparmor_enabled %}
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
{% endif %}
labels:
app: metallb
spec:
privileged: true
allowPrivilegeEscalation: false
allowedCapabilities:
- net_raw
volumes:
- secret
hostNetwork: true
hostPorts:
- min: {{ metallb.port }}
max: {{ metallb.port }}
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
readOnlyRootFilesystem: true
{% endif %}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
namespace: metallb-system
name: config-watcher
labels:
app: metallb
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create"]
---
## Role bindings
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: metallb-system:controller
labels:
app: metallb
subjects:
- kind: ServiceAccount
name: controller
namespace: metallb-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metallb-system:controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: metallb-system:speaker
labels:
app: metallb
subjects:
- kind: ServiceAccount
name: speaker
namespace: metallb-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metallb-system:speaker
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
namespace: metallb-system
name: config-watcher
labels:
app: metallb
subjects:
- kind: ServiceAccount
name: controller
- kind: ServiceAccount
name: speaker
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: config-watcher
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
namespace: metallb-system
name: speaker
labels:
app: metallb
component: speaker
spec:
selector:
matchLabels:
app: metallb
component: speaker
template:
metadata:
labels:
app: metallb
component: speaker
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "{{ metallb.port }}"
spec:
serviceAccountName: speaker
terminationGracePeriodSeconds: 0
hostNetwork: true
containers:
- name: speaker
image: metallb/speaker:{{ metallb.version }}
imagePullPolicy: IfNotPresent
args:
- --port={{ metallb.port }}
- --config=config
env:
- name: METALLB_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
ports:
- name: monitoring
containerPort: {{ metallb.port }}
resources:
limits:
cpu: {{ metallb.limits.cpu }}
memory: {{ metallb.limits.memory }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- all
add:
- net_raw
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: metallb-system
name: controller
labels:
app: metallb
component: controller
spec:
revisionHistoryLimit: 3
selector:
matchLabels:
app: metallb
component: controller
template:
metadata:
labels:
app: metallb
component: controller
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "{{ metallb.port }}"
spec:
serviceAccountName: controller
terminationGracePeriodSeconds: 0
securityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody
containers:
- name: controller
image: metallb/controller:{{ metallb.version }}
imagePullPolicy: IfNotPresent
args:
- --port={{ metallb.port }}
- --config=config
ports:
- name: monitoring
containerPort: {{ metallb.port }}
resources:
limits:
cpu: {{ metallb.limits.cpu }}
memory: {{ metallb.limits.memory }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- all
readOnlyRootFilesystem: true
---

View File

@@ -7,7 +7,7 @@
register: glusterfs_ppa_added
when: glusterfs_ppa_use
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa 503
- name: Ensure GlusterFS client will reinstall if the PPA was just added.
apt:
name: "{{ item }}"
state: absent
@@ -18,7 +18,7 @@
- name: Ensure GlusterFS client is installed.
apt:
name: "{{ item }}"
state: present
state: installed
default_release: "{{ glusterfs_default_release }}"
with_items:
- glusterfs-client

View File

@@ -7,7 +7,7 @@
register: glusterfs_ppa_added
when: glusterfs_ppa_use
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa 503
- name: Ensure GlusterFS will reinstall if the PPA was just added.
apt:
name: "{{ item }}"
state: absent
@@ -19,7 +19,7 @@
- name: Ensure GlusterFS is installed.
apt:
name: "{{ item }}"
state: present
state: installed
default_release: "{{ glusterfs_default_release }}"
with_items:
- glusterfs-server

View File

@@ -8,7 +8,7 @@
{% for host in groups['gfs-cluster'] %}
{
"addresses": [
{
{
"ip": "{{hostvars[host]['ip']|default(hostvars[host].ansible_default_ipv4['address'])}}"
}
],

View File

@@ -1,7 +1,7 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: glusterfs
name: glusterfs
spec:
capacity:
storage: "{{ hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb }}Gi"

View File

@@ -6,7 +6,7 @@
- name: "Delete bootstrap Heketi."
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0"
- name: "Ensure there is nothing left over." # noqa 301
- name: "Ensure there is nothing left over."
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
register: "heketi_result"
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"

View File

@@ -13,7 +13,7 @@
- name: "Copy topology configuration into container."
changed_when: false
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
- name: "Load heketi topology." # noqa 503
- name: "Load heketi topology."
when: "render.changed"
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
register: "load_heketi"

View File

@@ -18,7 +18,7 @@
- name: "Provision database volume."
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
when: "heketi_database_volume_exists is undefined"
- name: "Copy configuration from pod." # noqa 301
- name: "Copy configuration from pod."
become: true
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
- name: "Get heketi volume ids."

View File

@@ -10,10 +10,10 @@
template:
src: "topology.json.j2"
dest: "{{ kube_config_dir }}/topology.json"
- name: "Copy topology configuration into container." # noqa 503
- name: "Copy topology configuration into container."
when: "rendering.changed"
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
- name: "Load heketi topology." # noqa 503
- name: "Load heketi topology."
when: "rendering.changed"
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
- name: "Get heketi topology."

View File

@@ -22,7 +22,7 @@
ignore_errors: true
changed_when: false
- name: "Remove volume groups." # noqa 301
- name: "Remove volume groups."
environment:
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
become: true
@@ -30,7 +30,7 @@
with_items: "{{ volume_groups.stdout_lines }}"
loop_control: { loop_var: "volume_group" }
- name: "Remove physical volume from cluster disks." # noqa 301
- name: "Remove physical volume from cluster disks."
environment:
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
become: true

View File

@@ -1,43 +1,43 @@
---
- name: "Remove storage class." # noqa 301
- name: "Remove storage class."
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
ignore_errors: true
- name: "Tear down heketi." # noqa 301
- name: "Tear down heketi."
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
ignore_errors: true
- name: "Tear down heketi." # noqa 301
- name: "Tear down heketi."
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
ignore_errors: true
- name: "Tear down bootstrap."
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
- name: "Ensure there is nothing left over." # noqa 301
include_tasks: "../provision/tasks/bootstrap/tear-down.yml"
- name: "Ensure there is nothing left over."
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
register: "heketi_result"
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
retries: 60
delay: 5
- name: "Ensure there is nothing left over." # noqa 301
- name: "Ensure there is nothing left over."
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
register: "heketi_result"
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
retries: 60
delay: 5
- name: "Tear down glusterfs." # noqa 301
- name: "Tear down glusterfs."
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
ignore_errors: true
- name: "Remove heketi storage service." # noqa 301
- name: "Remove heketi storage service."
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
ignore_errors: true
- name: "Remove heketi gluster role binding" # noqa 301
- name: "Remove heketi gluster role binding"
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
ignore_errors: true
- name: "Remove heketi config secret" # noqa 301
- name: "Remove heketi config secret"
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
ignore_errors: true
- name: "Remove heketi db backup" # noqa 301
- name: "Remove heketi db backup"
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
ignore_errors: true
- name: "Remove heketi service account" # noqa 301
- name: "Remove heketi service account"
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
ignore_errors: true
- name: "Get secrets"

View File

@@ -22,7 +22,7 @@ export TF_VAR_AWS_SECRET_ACCESS_KEY ="xxx"
export TF_VAR_AWS_SSH_KEY_NAME="yyy"
export TF_VAR_AWS_DEFAULT_REGION="zzz"
```
- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use Ubuntu 18.04 LTS (Bionic) as base image. If you want to change this behaviour, see note "Using other distrib than Ubuntu" below.
- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use CoreOS as base image. If you want to change this behaviour, see note "Using other distrib than CoreOs" below.
- Create an AWS EC2 SSH Key
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
@@ -41,12 +41,12 @@ ssh -F ./ssh-bastion.conf user@$ip
- Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag.
Example (this one assumes you are using Ubuntu)
Example (this one assumes you are using CoreOS)
```commandline
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=ubuntu -b --become-user=root --flush-cache
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=core -b --become-user=root --flush-cache
```
***Using other distrib than Ubuntu***
If you want to use another distribution than Ubuntu 18.04 (Bionic) LTS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.
***Using other distrib than CoreOs***
If you want to use another distribution than CoreOS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.
For example, to use:
- Debian Jessie, replace 'data "aws_ami" "distro"' in variables.tf with

View File

@@ -3,9 +3,9 @@ terraform {
}
provider "aws" {
access_key = var.AWS_ACCESS_KEY_ID
secret_key = var.AWS_SECRET_ACCESS_KEY
region = var.AWS_DEFAULT_REGION
access_key = "${var.AWS_ACCESS_KEY_ID}"
secret_key = "${var.AWS_SECRET_ACCESS_KEY}"
region = "${var.AWS_DEFAULT_REGION}"
}
data "aws_availability_zones" "available" {}
@@ -18,30 +18,30 @@ data "aws_availability_zones" "available" {}
module "aws-vpc" {
source = "./modules/vpc"
aws_cluster_name = var.aws_cluster_name
aws_vpc_cidr_block = var.aws_vpc_cidr_block
aws_avail_zones = slice(data.aws_availability_zones.available.names, 0, 2)
aws_cidr_subnets_private = var.aws_cidr_subnets_private
aws_cidr_subnets_public = var.aws_cidr_subnets_public
default_tags = var.default_tags
aws_cluster_name = "${var.aws_cluster_name}"
aws_vpc_cidr_block = "${var.aws_vpc_cidr_block}"
aws_avail_zones = "${slice(data.aws_availability_zones.available.names, 0, 2)}"
aws_cidr_subnets_private = "${var.aws_cidr_subnets_private}"
aws_cidr_subnets_public = "${var.aws_cidr_subnets_public}"
default_tags = "${var.default_tags}"
}
module "aws-elb" {
source = "./modules/elb"
aws_cluster_name = var.aws_cluster_name
aws_vpc_id = module.aws-vpc.aws_vpc_id
aws_avail_zones = slice(data.aws_availability_zones.available.names, 0, 2)
aws_subnet_ids_public = module.aws-vpc.aws_subnet_ids_public
aws_elb_api_port = var.aws_elb_api_port
k8s_secure_api_port = var.k8s_secure_api_port
default_tags = var.default_tags
aws_cluster_name = "${var.aws_cluster_name}"
aws_vpc_id = "${module.aws-vpc.aws_vpc_id}"
aws_avail_zones = "${slice(data.aws_availability_zones.available.names, 0, 2)}"
aws_subnet_ids_public = "${module.aws-vpc.aws_subnet_ids_public}"
aws_elb_api_port = "${var.aws_elb_api_port}"
k8s_secure_api_port = "${var.k8s_secure_api_port}"
default_tags = "${var.default_tags}"
}
module "aws-iam" {
source = "./modules/iam"
aws_cluster_name = var.aws_cluster_name
aws_cluster_name = "${var.aws_cluster_name}"
}
/*
@@ -50,22 +50,22 @@ module "aws-iam" {
*/
resource "aws_instance" "bastion-server" {
ami = data.aws_ami.distro.id
instance_type = var.aws_bastion_size
count = length(var.aws_cidr_subnets_public)
ami = "${data.aws_ami.distro.id}"
instance_type = "${var.aws_bastion_size}"
count = "${length(var.aws_cidr_subnets_public)}"
associate_public_ip_address = true
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)
subnet_id = element(module.aws-vpc.aws_subnet_ids_public, count.index)
availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_public, count.index)}"
vpc_security_group_ids = module.aws-vpc.aws_security_group
vpc_security_group_ids = "${module.aws-vpc.aws_security_group}"
key_name = var.AWS_SSH_KEY_NAME
key_name = "${var.AWS_SSH_KEY_NAME}"
tags = merge(var.default_tags, map(
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-bastion-${count.index}",
"Cluster", "${var.aws_cluster_name}",
"Role", "bastion-${var.aws_cluster_name}-${count.index}"
))
))}"
}
/*
@@ -74,71 +74,71 @@ resource "aws_instance" "bastion-server" {
*/
resource "aws_instance" "k8s-master" {
ami = data.aws_ami.distro.id
instance_type = var.aws_kube_master_size
ami = "${data.aws_ami.distro.id}"
instance_type = "${var.aws_kube_master_size}"
count = var.aws_kube_master_num
count = "${var.aws_kube_master_num}"
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private, count.index)}"
vpc_security_group_ids = module.aws-vpc.aws_security_group
vpc_security_group_ids = "${module.aws-vpc.aws_security_group}"
iam_instance_profile = module.aws-iam.kube-master-profile
key_name = var.AWS_SSH_KEY_NAME
iam_instance_profile = "${module.aws-iam.kube-master-profile}"
key_name = "${var.AWS_SSH_KEY_NAME}"
tags = merge(var.default_tags, map(
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-master${count.index}",
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
"Role", "master"
))
))}"
}
resource "aws_elb_attachment" "attach_master_nodes" {
count = var.aws_kube_master_num
elb = module.aws-elb.aws_elb_api_id
instance = element(aws_instance.k8s-master.*.id, count.index)
count = "${var.aws_kube_master_num}"
elb = "${module.aws-elb.aws_elb_api_id}"
instance = "${element(aws_instance.k8s-master.*.id, count.index)}"
}
resource "aws_instance" "k8s-etcd" {
ami = data.aws_ami.distro.id
instance_type = var.aws_etcd_size
ami = "${data.aws_ami.distro.id}"
instance_type = "${var.aws_etcd_size}"
count = var.aws_etcd_num
count = "${var.aws_etcd_num}"
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private, count.index)}"
vpc_security_group_ids = module.aws-vpc.aws_security_group
vpc_security_group_ids = "${module.aws-vpc.aws_security_group}"
key_name = var.AWS_SSH_KEY_NAME
key_name = "${var.AWS_SSH_KEY_NAME}"
tags = merge(var.default_tags, map(
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-etcd${count.index}",
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
"Role", "etcd"
))
))}"
}
resource "aws_instance" "k8s-worker" {
ami = data.aws_ami.distro.id
instance_type = var.aws_kube_worker_size
ami = "${data.aws_ami.distro.id}"
instance_type = "${var.aws_kube_worker_size}"
count = var.aws_kube_worker_num
count = "${var.aws_kube_worker_num}"
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private, count.index)}"
vpc_security_group_ids = module.aws-vpc.aws_security_group
vpc_security_group_ids = "${module.aws-vpc.aws_security_group}"
iam_instance_profile = module.aws-iam.kube-worker-profile
key_name = var.AWS_SSH_KEY_NAME
iam_instance_profile = "${module.aws-iam.kube-worker-profile}"
key_name = "${var.AWS_SSH_KEY_NAME}"
tags = merge(var.default_tags, map(
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-worker${count.index}",
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
"Role", "worker"
))
))}"
}
/*
@@ -146,16 +146,16 @@ resource "aws_instance" "k8s-worker" {
*
*/
data "template_file" "inventory" {
template = file("${path.module}/templates/inventory.tpl")
template = "${file("${path.module}/templates/inventory.tpl")}"
vars = {
public_ip_address_bastion = join("\n", formatlist("bastion ansible_host=%s", aws_instance.bastion-server.*.public_ip))
connection_strings_master = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-master.*.private_dns, aws_instance.k8s-master.*.private_ip))
connection_strings_node = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.private_dns, aws_instance.k8s-worker.*.private_ip))
connection_strings_etcd = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.private_dns, aws_instance.k8s-etcd.*.private_ip))
list_master = join("\n", aws_instance.k8s-master.*.private_dns)
list_node = join("\n", aws_instance.k8s-worker.*.private_dns)
list_etcd = join("\n", aws_instance.k8s-etcd.*.private_dns)
public_ip_address_bastion = "${join("\n", formatlist("bastion ansible_host=%s", aws_instance.bastion-server.*.public_ip))}"
connection_strings_master = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-master.*.private_dns, aws_instance.k8s-master.*.private_ip))}"
connection_strings_node = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.private_dns, aws_instance.k8s-worker.*.private_ip))}"
connection_strings_etcd = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.private_dns, aws_instance.k8s-etcd.*.private_ip))}"
list_master = "${join("\n", aws_instance.k8s-master.*.private_dns)}"
list_node = "${join("\n", aws_instance.k8s-worker.*.private_dns)}"
list_etcd = "${join("\n", aws_instance.k8s-etcd.*.private_dns)}"
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
}
}
@@ -166,6 +166,6 @@ resource "null_resource" "inventories" {
}
triggers = {
template = data.template_file.inventory.rendered
template = "${data.template_file.inventory.rendered}"
}
}

View File

@@ -1,19 +1,19 @@
resource "aws_security_group" "aws-elb" {
name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
vpc_id = var.aws_vpc_id
vpc_id = "${var.aws_vpc_id}"
tags = merge(var.default_tags, map(
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
))
))}"
}
resource "aws_security_group_rule" "aws-allow-api-access" {
type = "ingress"
from_port = var.aws_elb_api_port
to_port = var.k8s_secure_api_port
from_port = "${var.aws_elb_api_port}"
to_port = "${var.k8s_secure_api_port}"
protocol = "TCP"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.aws-elb.id
security_group_id = "${aws_security_group.aws-elb.id}"
}
resource "aws_security_group_rule" "aws-allow-api-egress" {
@@ -22,19 +22,19 @@ resource "aws_security_group_rule" "aws-allow-api-egress" {
to_port = 65535
protocol = "TCP"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.aws-elb.id
security_group_id = "${aws_security_group.aws-elb.id}"
}
# Create a new AWS ELB for K8S API
resource "aws_elb" "aws-elb-api" {
name = "kubernetes-elb-${var.aws_cluster_name}"
subnets = var.aws_subnet_ids_public
security_groups = [aws_security_group.aws-elb.id]
security_groups = ["${aws_security_group.aws-elb.id}"]
listener {
instance_port = var.k8s_secure_api_port
instance_port = "${var.k8s_secure_api_port}"
instance_protocol = "tcp"
lb_port = var.aws_elb_api_port
lb_port = "${var.aws_elb_api_port}"
lb_protocol = "tcp"
}
@@ -51,7 +51,7 @@ resource "aws_elb" "aws-elb-api" {
connection_draining = true
connection_draining_timeout = 400
tags = merge(var.default_tags, map(
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-elb-api"
))
))}"
}

View File

@@ -1,7 +1,7 @@
output "aws_elb_api_id" {
value = aws_elb.aws-elb-api.id
value = "${aws_elb.aws-elb-api.id}"
}
output "aws_elb_api_fqdn" {
value = aws_elb.aws-elb-api.dns_name
value = "${aws_elb.aws-elb-api.dns_name}"
}

View File

@@ -42,7 +42,7 @@ EOF
resource "aws_iam_role_policy" "kube-master" {
name = "kubernetes-${var.aws_cluster_name}-master"
role = aws_iam_role.kube-master.id
role = "${aws_iam_role.kube-master.id}"
policy = <<EOF
{
@@ -77,7 +77,7 @@ EOF
resource "aws_iam_role_policy" "kube-worker" {
name = "kubernetes-${var.aws_cluster_name}-node"
role = aws_iam_role.kube-worker.id
role = "${aws_iam_role.kube-worker.id}"
policy = <<EOF
{
@@ -132,10 +132,10 @@ EOF
resource "aws_iam_instance_profile" "kube-master" {
name = "kube_${var.aws_cluster_name}_master_profile"
role = aws_iam_role.kube-master.name
role = "${aws_iam_role.kube-master.name}"
}
resource "aws_iam_instance_profile" "kube-worker" {
name = "kube_${var.aws_cluster_name}_node_profile"
role = aws_iam_role.kube-worker.name
role = "${aws_iam_role.kube-worker.name}"
}

View File

@@ -1,7 +1,7 @@
output "kube-master-profile" {
value = aws_iam_instance_profile.kube-master.name
value = "${aws_iam_instance_profile.kube-master.name}"
}
output "kube-worker-profile" {
value = aws_iam_instance_profile.kube-worker.name
value = "${aws_iam_instance_profile.kube-worker.name}"
}

View File

@@ -1,55 +1,55 @@
resource "aws_vpc" "cluster-vpc" {
cidr_block = var.aws_vpc_cidr_block
cidr_block = "${var.aws_vpc_cidr_block}"
#DNS Related Entries
enable_dns_support = true
enable_dns_hostnames = true
tags = merge(var.default_tags, map(
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-vpc"
))
))}"
}
resource "aws_eip" "cluster-nat-eip" {
count = length(var.aws_cidr_subnets_public)
count = "${length(var.aws_cidr_subnets_public)}"
vpc = true
}
resource "aws_internet_gateway" "cluster-vpc-internetgw" {
vpc_id = aws_vpc.cluster-vpc.id
vpc_id = "${aws_vpc.cluster-vpc.id}"
tags = merge(var.default_tags, map(
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-internetgw"
))
))}"
}
resource "aws_subnet" "cluster-vpc-subnets-public" {
vpc_id = aws_vpc.cluster-vpc.id
count = length(var.aws_avail_zones)
availability_zone = element(var.aws_avail_zones, count.index)
cidr_block = element(var.aws_cidr_subnets_public, count.index)
vpc_id = "${aws_vpc.cluster-vpc.id}"
count = "${length(var.aws_avail_zones)}"
availability_zone = "${element(var.aws_avail_zones, count.index)}"
cidr_block = "${element(var.aws_cidr_subnets_public, count.index)}"
tags = merge(var.default_tags, map(
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public",
"kubernetes.io/cluster/${var.aws_cluster_name}", "member"
))
))}"
}
resource "aws_nat_gateway" "cluster-nat-gateway" {
count = length(var.aws_cidr_subnets_public)
allocation_id = element(aws_eip.cluster-nat-eip.*.id, count.index)
subnet_id = element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index)
count = "${length(var.aws_cidr_subnets_public)}"
allocation_id = "${element(aws_eip.cluster-nat-eip.*.id, count.index)}"
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index)}"
}
resource "aws_subnet" "cluster-vpc-subnets-private" {
vpc_id = aws_vpc.cluster-vpc.id
count = length(var.aws_avail_zones)
availability_zone = element(var.aws_avail_zones, count.index)
cidr_block = element(var.aws_cidr_subnets_private, count.index)
vpc_id = "${aws_vpc.cluster-vpc.id}"
count = "${length(var.aws_avail_zones)}"
availability_zone = "${element(var.aws_avail_zones, count.index)}"
cidr_block = "${element(var.aws_cidr_subnets_private, count.index)}"
tags = merge(var.default_tags, map(
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
))
))}"
}
#Routing in VPC
@@ -57,53 +57,53 @@ resource "aws_subnet" "cluster-vpc-subnets-private" {
#TODO: Do we need two routing tables for each subnet for redundancy or is one enough?
resource "aws_route_table" "kubernetes-public" {
vpc_id = aws_vpc.cluster-vpc.id
vpc_id = "${aws_vpc.cluster-vpc.id}"
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.cluster-vpc-internetgw.id
gateway_id = "${aws_internet_gateway.cluster-vpc-internetgw.id}"
}
tags = merge(var.default_tags, map(
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-routetable-public"
))
))}"
}
resource "aws_route_table" "kubernetes-private" {
count = length(var.aws_cidr_subnets_private)
vpc_id = aws_vpc.cluster-vpc.id
count = "${length(var.aws_cidr_subnets_private)}"
vpc_id = "${aws_vpc.cluster-vpc.id}"
route {
cidr_block = "0.0.0.0/0"
nat_gateway_id = element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index)
nat_gateway_id = "${element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index)}"
}
tags = merge(var.default_tags, map(
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
))
))}"
}
resource "aws_route_table_association" "kubernetes-public" {
count = length(var.aws_cidr_subnets_public)
subnet_id = element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index)
route_table_id = aws_route_table.kubernetes-public.id
count = "${length(var.aws_cidr_subnets_public)}"
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index)}"
route_table_id = "${aws_route_table.kubernetes-public.id}"
}
resource "aws_route_table_association" "kubernetes-private" {
count = length(var.aws_cidr_subnets_private)
subnet_id = element(aws_subnet.cluster-vpc-subnets-private.*.id, count.index)
route_table_id = element(aws_route_table.kubernetes-private.*.id, count.index)
count = "${length(var.aws_cidr_subnets_private)}"
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-private.*.id, count.index)}"
route_table_id = "${element(aws_route_table.kubernetes-private.*.id, count.index)}"
}
#Kubernetes Security Groups
resource "aws_security_group" "kubernetes" {
name = "kubernetes-${var.aws_cluster_name}-securitygroup"
vpc_id = aws_vpc.cluster-vpc.id
vpc_id = "${aws_vpc.cluster-vpc.id}"
tags = merge(var.default_tags, map(
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup"
))
))}"
}
resource "aws_security_group_rule" "allow-all-ingress" {
@@ -111,8 +111,8 @@ resource "aws_security_group_rule" "allow-all-ingress" {
from_port = 0
to_port = 65535
protocol = "-1"
cidr_blocks = [var.aws_vpc_cidr_block]
security_group_id = aws_security_group.kubernetes.id
cidr_blocks = ["${var.aws_vpc_cidr_block}"]
security_group_id = "${aws_security_group.kubernetes.id}"
}
resource "aws_security_group_rule" "allow-all-egress" {
@@ -121,7 +121,7 @@ resource "aws_security_group_rule" "allow-all-egress" {
to_port = 65535
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.kubernetes.id
security_group_id = "${aws_security_group.kubernetes.id}"
}
resource "aws_security_group_rule" "allow-ssh-connections" {
@@ -130,5 +130,5 @@ resource "aws_security_group_rule" "allow-ssh-connections" {
to_port = 22
protocol = "TCP"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.kubernetes.id
security_group_id = "${aws_security_group.kubernetes.id}"
}

View File

@@ -1,5 +1,5 @@
output "aws_vpc_id" {
value = aws_vpc.cluster-vpc.id
value = "${aws_vpc.cluster-vpc.id}"
}
output "aws_subnet_ids_private" {
@@ -15,5 +15,5 @@ output "aws_security_group" {
}
output "default_tags" {
value = var.default_tags
value = "${var.default_tags}"
}

View File

@@ -1,17 +1,17 @@
output "bastion_ip" {
value = join("\n", aws_instance.bastion-server.*.public_ip)
value = "${join("\n", aws_instance.bastion-server.*.public_ip)}"
}
output "masters" {
value = join("\n", aws_instance.k8s-master.*.private_ip)
value = "${join("\n", aws_instance.k8s-master.*.private_ip)}"
}
output "workers" {
value = join("\n", aws_instance.k8s-worker.*.private_ip)
value = "${join("\n", aws_instance.k8s-worker.*.private_ip)}"
}
output "etcd" {
value = join("\n", aws_instance.k8s-etcd.*.private_ip)
value = "${join("\n", aws_instance.k8s-etcd.*.private_ip)}"
}
output "aws_elb_api_fqdn" {
@@ -19,9 +19,9 @@ output "aws_elb_api_fqdn" {
}
output "inventory" {
value = data.template_file.inventory.rendered
value = "${data.template_file.inventory.rendered}"
}
output "default_tags" {
value = var.default_tags
value = "${var.default_tags}"
}

View File

@@ -25,7 +25,7 @@ data "aws_ami" "distro" {
filter {
name = "name"
values = ["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"]
values = ["CoreOS-stable-*"]
}
filter {
@@ -33,7 +33,7 @@ data "aws_ami" "distro" {
values = ["hvm"]
}
owners = ["099720109477"] # Canonical
owners = ["595879546273"] #CoreOS
}
//AWS VPC Variables

View File

@@ -1,11 +1,11 @@
# Kubernetes on OpenStack with Terraform
# Kubernetes on Openstack with Terraform
Provision a Kubernetes cluster with [Terraform](https://www.terraform.io) on
OpenStack.
Openstack.
## Status
This will install a Kubernetes cluster on an OpenStack Cloud. It should work on
This will install a Kubernetes cluster on an Openstack Cloud. It should work on
most modern installs of OpenStack that support the basic services.
### Known compatible public clouds
@@ -72,9 +72,9 @@ specify:
- Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks
- Other properties related to provisioning the hosts
Even if you are using Flatcar Container Linux by Kinvolk for your cluster, you will still
Even if you are using Container Linux by CoreOS for your cluster, you will still
need the GlusterFS VMs to be based on either Debian or RedHat based images.
Flatcar Container Linux by Kinvolk cannot serve GlusterFS, but can connect to it through
Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through
binaries available on hyperkube v1.4.3_coreos.0 or higher.
## Requirements
@@ -482,7 +482,7 @@ So, either a bastion host, or at least master/node with a floating IP are requir
#### Test access
Make sure you can connect to the hosts. Note that Flatcar Container Linux by Kinvolk will have a state `FAILED` due to Python not being present. This is okay, because Python will be installed during bootstrapping, so long as the hosts are not `UNREACHABLE`.
Make sure you can connect to the hosts. Note that Container Linux by CoreOS will have a state `FAILED` due to Python not being present. This is okay, because Python will be installed during bootstrapping, so long as the hosts are not `UNREACHABLE`.
```
$ ansible -i inventory/$CLUSTER/hosts -m ping all
@@ -510,7 +510,7 @@ Edit `inventory/$CLUSTER/group_vars/all/all.yml`:
# Directory where the binaries will be installed
# Default:
# bin_dir: /usr/local/bin
# For Flatcar Container Linux by Kinvolk:
# For Container Linux by CoreOS:
bin_dir: /opt/bin
```
- and **cloud_provider**:
@@ -531,7 +531,7 @@ kube_network_plugin: flannel
# Can be docker_dns, host_resolvconf or none
# Default:
# resolvconf_mode: docker_dns
# For Flatcar Container Linux by Kinvolk:
# For Container Linux by CoreOS:
resolvconf_mode: host_resolvconf
```
- Set max amount of attached cinder volume per host (default 256)

View File

@@ -5,104 +5,102 @@ provider "openstack" {
module "network" {
source = "./modules/network"
external_net = var.external_net
network_name = var.network_name
subnet_cidr = var.subnet_cidr
cluster_name = var.cluster_name
dns_nameservers = var.dns_nameservers
network_dns_domain = var.network_dns_domain
use_neutron = var.use_neutron
router_id = var.router_id
external_net = "${var.external_net}"
network_name = "${var.network_name}"
subnet_cidr = "${var.subnet_cidr}"
cluster_name = "${var.cluster_name}"
dns_nameservers = "${var.dns_nameservers}"
network_dns_domain = "${var.network_dns_domain}"
use_neutron = "${var.use_neutron}"
router_id = "${var.router_id}"
}
module "ips" {
source = "./modules/ips"
number_of_k8s_masters = var.number_of_k8s_masters
number_of_k8s_masters_no_etcd = var.number_of_k8s_masters_no_etcd
number_of_k8s_nodes = var.number_of_k8s_nodes
floatingip_pool = var.floatingip_pool
number_of_bastions = var.number_of_bastions
external_net = var.external_net
network_name = var.network_name
router_id = module.network.router_id
k8s_nodes = var.k8s_nodes
number_of_k8s_masters = "${var.number_of_k8s_masters}"
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
number_of_k8s_nodes = "${var.number_of_k8s_nodes}"
floatingip_pool = "${var.floatingip_pool}"
number_of_bastions = "${var.number_of_bastions}"
external_net = "${var.external_net}"
network_name = "${var.network_name}"
router_id = "${module.network.router_id}"
k8s_nodes = "${var.k8s_nodes}"
}
module "compute" {
source = "./modules/compute"
cluster_name = var.cluster_name
az_list = var.az_list
az_list_node = var.az_list_node
number_of_k8s_masters = var.number_of_k8s_masters
number_of_k8s_masters_no_etcd = var.number_of_k8s_masters_no_etcd
number_of_etcd = var.number_of_etcd
number_of_k8s_masters_no_floating_ip = var.number_of_k8s_masters_no_floating_ip
number_of_k8s_masters_no_floating_ip_no_etcd = var.number_of_k8s_masters_no_floating_ip_no_etcd
number_of_k8s_nodes = var.number_of_k8s_nodes
number_of_bastions = var.number_of_bastions
number_of_k8s_nodes_no_floating_ip = var.number_of_k8s_nodes_no_floating_ip
number_of_gfs_nodes_no_floating_ip = var.number_of_gfs_nodes_no_floating_ip
k8s_nodes = var.k8s_nodes
bastion_root_volume_size_in_gb = var.bastion_root_volume_size_in_gb
etcd_root_volume_size_in_gb = var.etcd_root_volume_size_in_gb
master_root_volume_size_in_gb = var.master_root_volume_size_in_gb
node_root_volume_size_in_gb = var.node_root_volume_size_in_gb
gfs_root_volume_size_in_gb = var.gfs_root_volume_size_in_gb
gfs_volume_size_in_gb = var.gfs_volume_size_in_gb
master_volume_type = var.master_volume_type
public_key_path = var.public_key_path
image = var.image
image_gfs = var.image_gfs
ssh_user = var.ssh_user
ssh_user_gfs = var.ssh_user_gfs
flavor_k8s_master = var.flavor_k8s_master
flavor_k8s_node = var.flavor_k8s_node
flavor_etcd = var.flavor_etcd
flavor_gfs_node = var.flavor_gfs_node
network_name = var.network_name
flavor_bastion = var.flavor_bastion
k8s_master_fips = module.ips.k8s_master_fips
k8s_master_no_etcd_fips = module.ips.k8s_master_no_etcd_fips
k8s_node_fips = module.ips.k8s_node_fips
k8s_nodes_fips = module.ips.k8s_nodes_fips
bastion_fips = module.ips.bastion_fips
bastion_allowed_remote_ips = var.bastion_allowed_remote_ips
master_allowed_remote_ips = var.master_allowed_remote_ips
k8s_allowed_remote_ips = var.k8s_allowed_remote_ips
k8s_allowed_egress_ips = var.k8s_allowed_egress_ips
supplementary_master_groups = var.supplementary_master_groups
supplementary_node_groups = var.supplementary_node_groups
master_allowed_ports = var.master_allowed_ports
worker_allowed_ports = var.worker_allowed_ports
wait_for_floatingip = var.wait_for_floatingip
use_access_ip = var.use_access_ip
use_server_groups = var.use_server_groups
cluster_name = "${var.cluster_name}"
az_list = "${var.az_list}"
az_list_node = "${var.az_list_node}"
number_of_k8s_masters = "${var.number_of_k8s_masters}"
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
number_of_etcd = "${var.number_of_etcd}"
number_of_k8s_masters_no_floating_ip = "${var.number_of_k8s_masters_no_floating_ip}"
number_of_k8s_masters_no_floating_ip_no_etcd = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
number_of_k8s_nodes = "${var.number_of_k8s_nodes}"
number_of_bastions = "${var.number_of_bastions}"
number_of_k8s_nodes_no_floating_ip = "${var.number_of_k8s_nodes_no_floating_ip}"
number_of_gfs_nodes_no_floating_ip = "${var.number_of_gfs_nodes_no_floating_ip}"
k8s_nodes = "${var.k8s_nodes}"
bastion_root_volume_size_in_gb = "${var.bastion_root_volume_size_in_gb}"
etcd_root_volume_size_in_gb = "${var.etcd_root_volume_size_in_gb}"
master_root_volume_size_in_gb = "${var.master_root_volume_size_in_gb}"
node_root_volume_size_in_gb = "${var.node_root_volume_size_in_gb}"
gfs_root_volume_size_in_gb = "${var.gfs_root_volume_size_in_gb}"
gfs_volume_size_in_gb = "${var.gfs_volume_size_in_gb}"
public_key_path = "${var.public_key_path}"
image = "${var.image}"
image_gfs = "${var.image_gfs}"
ssh_user = "${var.ssh_user}"
ssh_user_gfs = "${var.ssh_user_gfs}"
flavor_k8s_master = "${var.flavor_k8s_master}"
flavor_k8s_node = "${var.flavor_k8s_node}"
flavor_etcd = "${var.flavor_etcd}"
flavor_gfs_node = "${var.flavor_gfs_node}"
network_name = "${var.network_name}"
flavor_bastion = "${var.flavor_bastion}"
k8s_master_fips = "${module.ips.k8s_master_fips}"
k8s_master_no_etcd_fips = "${module.ips.k8s_master_no_etcd_fips}"
k8s_node_fips = "${module.ips.k8s_node_fips}"
k8s_nodes_fips = "${module.ips.k8s_nodes_fips}"
bastion_fips = "${module.ips.bastion_fips}"
bastion_allowed_remote_ips = "${var.bastion_allowed_remote_ips}"
master_allowed_remote_ips = "${var.master_allowed_remote_ips}"
k8s_allowed_remote_ips = "${var.k8s_allowed_remote_ips}"
k8s_allowed_egress_ips = "${var.k8s_allowed_egress_ips}"
supplementary_master_groups = "${var.supplementary_master_groups}"
supplementary_node_groups = "${var.supplementary_node_groups}"
worker_allowed_ports = "${var.worker_allowed_ports}"
wait_for_floatingip = "${var.wait_for_floatingip}"
use_access_ip = "${var.use_access_ip}"
use_server_groups = "${var.use_server_groups}"
network_id = module.network.router_id
network_id = "${module.network.router_id}"
}
output "private_subnet_id" {
value = module.network.subnet_id
value = "${module.network.subnet_id}"
}
output "floating_network_id" {
value = var.external_net
value = "${var.external_net}"
}
output "router_id" {
value = module.network.router_id
value = "${module.network.router_id}"
}
output "k8s_master_fips" {
value = concat(module.ips.k8s_master_fips, module.ips.k8s_master_no_etcd_fips)
value = "${concat(module.ips.k8s_master_fips, module.ips.k8s_master_no_etcd_fips)}"
}
output "k8s_node_fips" {
value = var.number_of_k8s_nodes > 0 ? module.ips.k8s_node_fips : [for key, value in module.ips.k8s_nodes_fips : value.address]
value = "${var.number_of_k8s_nodes > 0 ? module.ips.k8s_node_fips : [for key, value in module.ips.k8s_nodes_fips : value.address]}"
}
output "bastion_fips" {
value = module.ips.bastion_fips
value = "${module.ips.bastion_fips}"
}

View File

@@ -1,14 +1,14 @@
data "openstack_images_image_v2" "vm_image" {
name = var.image
name = "${var.image}"
}
data "openstack_images_image_v2" "gfs_image" {
name = var.image_gfs == "" ? var.image : var.image_gfs
name = "${var.image_gfs == "" ? var.image : var.image_gfs}"
}
resource "openstack_compute_keypair_v2" "k8s" {
name = "kubernetes-${var.cluster_name}"
public_key = chomp(file(var.public_key_path))
public_key = "${chomp(file(var.public_key_path))}"
}
resource "openstack_networking_secgroup_v2" "k8s_master" {
@@ -18,43 +18,32 @@ resource "openstack_networking_secgroup_v2" "k8s_master" {
}
resource "openstack_networking_secgroup_rule_v2" "k8s_master" {
count = length(var.master_allowed_remote_ips)
count = "${length(var.master_allowed_remote_ips)}"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = "6443"
port_range_max = "6443"
remote_ip_prefix = var.master_allowed_remote_ips[count.index]
security_group_id = openstack_networking_secgroup_v2.k8s_master.id
}
resource "openstack_networking_secgroup_rule_v2" "k8s_master_ports" {
count = length(var.master_allowed_ports)
direction = "ingress"
ethertype = "IPv4"
protocol = lookup(var.master_allowed_ports[count.index], "protocol", "tcp")
port_range_min = lookup(var.master_allowed_ports[count.index], "port_range_min")
port_range_max = lookup(var.master_allowed_ports[count.index], "port_range_max")
remote_ip_prefix = lookup(var.master_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0")
security_group_id = openstack_networking_secgroup_v2.k8s_master.id
remote_ip_prefix = "${var.master_allowed_remote_ips[count.index]}"
security_group_id = "${openstack_networking_secgroup_v2.k8s_master.id}"
}
resource "openstack_networking_secgroup_v2" "bastion" {
name = "${var.cluster_name}-bastion"
count = var.number_of_bastions != "" ? 1 : 0
count = "${var.number_of_bastions != "" ? 1 : 0}"
description = "${var.cluster_name} - Bastion Server"
delete_default_rules = true
}
resource "openstack_networking_secgroup_rule_v2" "bastion" {
count = var.number_of_bastions != "" ? length(var.bastion_allowed_remote_ips) : 0
count = "${var.number_of_bastions != "" ? length(var.bastion_allowed_remote_ips) : 0}"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = "22"
port_range_max = "22"
remote_ip_prefix = var.bastion_allowed_remote_ips[count.index]
security_group_id = openstack_networking_secgroup_v2.bastion[0].id
remote_ip_prefix = "${var.bastion_allowed_remote_ips[count.index]}"
security_group_id = "${openstack_networking_secgroup_v2.bastion[0].id}"
}
resource "openstack_networking_secgroup_v2" "k8s" {
@@ -66,27 +55,27 @@ resource "openstack_networking_secgroup_v2" "k8s" {
resource "openstack_networking_secgroup_rule_v2" "k8s" {
direction = "ingress"
ethertype = "IPv4"
remote_group_id = openstack_networking_secgroup_v2.k8s.id
security_group_id = openstack_networking_secgroup_v2.k8s.id
remote_group_id = "${openstack_networking_secgroup_v2.k8s.id}"
security_group_id = "${openstack_networking_secgroup_v2.k8s.id}"
}
resource "openstack_networking_secgroup_rule_v2" "k8s_allowed_remote_ips" {
count = length(var.k8s_allowed_remote_ips)
count = "${length(var.k8s_allowed_remote_ips)}"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = "22"
port_range_max = "22"
remote_ip_prefix = var.k8s_allowed_remote_ips[count.index]
security_group_id = openstack_networking_secgroup_v2.k8s.id
remote_ip_prefix = "${var.k8s_allowed_remote_ips[count.index]}"
security_group_id = "${openstack_networking_secgroup_v2.k8s.id}"
}
resource "openstack_networking_secgroup_rule_v2" "egress" {
count = length(var.k8s_allowed_egress_ips)
count = "${length(var.k8s_allowed_egress_ips)}"
direction = "egress"
ethertype = "IPv4"
remote_ip_prefix = var.k8s_allowed_egress_ips[count.index]
security_group_id = openstack_networking_secgroup_v2.k8s.id
remote_ip_prefix = "${var.k8s_allowed_egress_ips[count.index]}"
security_group_id = "${openstack_networking_secgroup_v2.k8s.id}"
}
resource "openstack_networking_secgroup_v2" "worker" {
@@ -96,14 +85,14 @@ resource "openstack_networking_secgroup_v2" "worker" {
}
resource "openstack_networking_secgroup_rule_v2" "worker" {
count = length(var.worker_allowed_ports)
count = "${length(var.worker_allowed_ports)}"
direction = "ingress"
ethertype = "IPv4"
protocol = lookup(var.worker_allowed_ports[count.index], "protocol", "tcp")
port_range_min = lookup(var.worker_allowed_ports[count.index], "port_range_min")
port_range_max = lookup(var.worker_allowed_ports[count.index], "port_range_max")
remote_ip_prefix = lookup(var.worker_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0")
security_group_id = openstack_networking_secgroup_v2.worker.id
protocol = "${lookup(var.worker_allowed_ports[count.index], "protocol", "tcp")}"
port_range_min = "${lookup(var.worker_allowed_ports[count.index], "port_range_min")}"
port_range_max = "${lookup(var.worker_allowed_ports[count.index], "port_range_max")}"
remote_ip_prefix = "${lookup(var.worker_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0")}"
security_group_id = "${openstack_networking_secgroup_v2.worker.id}"
}
resource "openstack_compute_servergroup_v2" "k8s_master" {
@@ -126,17 +115,17 @@ resource "openstack_compute_servergroup_v2" "k8s_etcd" {
resource "openstack_compute_instance_v2" "bastion" {
name = "${var.cluster_name}-bastion-${count.index + 1}"
count = var.number_of_bastions
image_name = var.image
flavor_id = var.flavor_bastion
key_pair = openstack_compute_keypair_v2.k8s.name
count = "${var.number_of_bastions}"
image_name = "${var.image}"
flavor_id = "${var.flavor_bastion}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
dynamic "block_device" {
for_each = var.bastion_root_volume_size_in_gb > 0 ? [var.image] : []
content {
uuid = data.openstack_images_image_v2.vm_image.id
uuid = "${data.openstack_images_image_v2.vm_image.id}"
source_type = "image"
volume_size = var.bastion_root_volume_size_in_gb
volume_size = "${var.bastion_root_volume_size_in_gb}"
boot_index = 0
destination_type = "volume"
delete_on_termination = true
@@ -144,18 +133,18 @@ resource "openstack_compute_instance_v2" "bastion" {
}
network {
name = var.network_name
name = "${var.network_name}"
}
security_groups = [openstack_networking_secgroup_v2.k8s.name,
element(openstack_networking_secgroup_v2.bastion.*.name, count.index),
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
"${element(openstack_networking_secgroup_v2.bastion.*.name, count.index)}",
]
metadata = {
ssh_user = var.ssh_user
ssh_user = "${var.ssh_user}"
kubespray_groups = "bastion"
depends_on = var.network_id
use_access_ip = var.use_access_ip
depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
}
provisioner "local-exec" {
@@ -165,20 +154,19 @@ resource "openstack_compute_instance_v2" "bastion" {
resource "openstack_compute_instance_v2" "k8s_master" {
name = "${var.cluster_name}-k8s-master-${count.index + 1}"
count = var.number_of_k8s_masters
availability_zone = element(var.az_list, count.index)
image_name = var.image
flavor_id = var.flavor_k8s_master
key_pair = openstack_compute_keypair_v2.k8s.name
count = "${var.number_of_k8s_masters}"
availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
dynamic "block_device" {
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
content {
uuid = data.openstack_images_image_v2.vm_image.id
uuid = "${data.openstack_images_image_v2.vm_image.id}"
source_type = "image"
volume_size = var.master_root_volume_size_in_gb
volume_type = var.master_volume_type
volume_size = "${var.master_root_volume_size_in_gb}"
boot_index = 0
destination_type = "volume"
delete_on_termination = true
@@ -186,25 +174,25 @@ resource "openstack_compute_instance_v2" "k8s_master" {
}
network {
name = var.network_name
name = "${var.network_name}"
}
security_groups = [openstack_networking_secgroup_v2.k8s_master.name,
openstack_networking_secgroup_v2.k8s.name,
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
"${openstack_networking_secgroup_v2.k8s.name}",
]
dynamic "scheduler_hints" {
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
content {
group = openstack_compute_servergroup_v2.k8s_master[0].id
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
}
}
metadata = {
ssh_user = var.ssh_user
ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
depends_on = var.network_id
use_access_ip = var.use_access_ip
depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
}
provisioner "local-exec" {
@@ -214,20 +202,19 @@ resource "openstack_compute_instance_v2" "k8s_master" {
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
name = "${var.cluster_name}-k8s-master-ne-${count.index + 1}"
count = var.number_of_k8s_masters_no_etcd
availability_zone = element(var.az_list, count.index)
image_name = var.image
flavor_id = var.flavor_k8s_master
key_pair = openstack_compute_keypair_v2.k8s.name
count = "${var.number_of_k8s_masters_no_etcd}"
availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
dynamic "block_device" {
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
content {
uuid = data.openstack_images_image_v2.vm_image.id
uuid = "${data.openstack_images_image_v2.vm_image.id}"
source_type = "image"
volume_size = var.master_root_volume_size_in_gb
volume_type = var.master_volume_type
volume_size = "${var.master_root_volume_size_in_gb}"
boot_index = 0
destination_type = "volume"
delete_on_termination = true
@@ -235,25 +222,25 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
}
network {
name = var.network_name
name = "${var.network_name}"
}
security_groups = [openstack_networking_secgroup_v2.k8s_master.name,
openstack_networking_secgroup_v2.k8s.name,
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
"${openstack_networking_secgroup_v2.k8s.name}",
]
dynamic "scheduler_hints" {
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
content {
group = openstack_compute_servergroup_v2.k8s_master[0].id
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
}
}
metadata = {
ssh_user = var.ssh_user
ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
depends_on = var.network_id
use_access_ip = var.use_access_ip
depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
}
provisioner "local-exec" {
@@ -263,18 +250,18 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
resource "openstack_compute_instance_v2" "etcd" {
name = "${var.cluster_name}-etcd-${count.index + 1}"
count = var.number_of_etcd
availability_zone = element(var.az_list, count.index)
image_name = var.image
flavor_id = var.flavor_etcd
key_pair = openstack_compute_keypair_v2.k8s.name
count = "${var.number_of_etcd}"
availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_etcd}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
dynamic "block_device" {
for_each = var.etcd_root_volume_size_in_gb > 0 ? [var.image] : []
content {
uuid = data.openstack_images_image_v2.vm_image.id
uuid = "${data.openstack_images_image_v2.vm_image.id}"
source_type = "image"
volume_size = var.etcd_root_volume_size_in_gb
volume_size = "${var.etcd_root_volume_size_in_gb}"
boot_index = 0
destination_type = "volume"
delete_on_termination = true
@@ -282,41 +269,40 @@ resource "openstack_compute_instance_v2" "etcd" {
}
network {
name = var.network_name
name = "${var.network_name}"
}
security_groups = [openstack_networking_secgroup_v2.k8s.name]
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
dynamic "scheduler_hints" {
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : []
content {
group = openstack_compute_servergroup_v2.k8s_etcd[0].id
group = "${openstack_compute_servergroup_v2.k8s_etcd[0].id}"
}
}
metadata = {
ssh_user = var.ssh_user
ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,vault,no-floating"
depends_on = var.network_id
use_access_ip = var.use_access_ip
depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
}
}
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
name = "${var.cluster_name}-k8s-master-nf-${count.index + 1}"
count = var.number_of_k8s_masters_no_floating_ip
availability_zone = element(var.az_list, count.index)
image_name = var.image
flavor_id = var.flavor_k8s_master
key_pair = openstack_compute_keypair_v2.k8s.name
count = "${var.number_of_k8s_masters_no_floating_ip}"
availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
dynamic "block_device" {
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
content {
uuid = data.openstack_images_image_v2.vm_image.id
uuid = "${data.openstack_images_image_v2.vm_image.id}"
source_type = "image"
volume_size = var.master_root_volume_size_in_gb
volume_type = var.master_volume_type
volume_size = "${var.master_root_volume_size_in_gb}"
boot_index = 0
destination_type = "volume"
delete_on_termination = true
@@ -324,43 +310,42 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
}
network {
name = var.network_name
name = "${var.network_name}"
}
security_groups = [openstack_networking_secgroup_v2.k8s_master.name,
openstack_networking_secgroup_v2.k8s.name,
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
"${openstack_networking_secgroup_v2.k8s.name}",
]
dynamic "scheduler_hints" {
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
content {
group = openstack_compute_servergroup_v2.k8s_master[0].id
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
}
}
metadata = {
ssh_user = var.ssh_user
ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
depends_on = var.network_id
use_access_ip = var.use_access_ip
depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
}
}
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index + 1}"
count = var.number_of_k8s_masters_no_floating_ip_no_etcd
availability_zone = element(var.az_list, count.index)
image_name = var.image
flavor_id = var.flavor_k8s_master
key_pair = openstack_compute_keypair_v2.k8s.name
count = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
dynamic "block_device" {
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
content {
uuid = data.openstack_images_image_v2.vm_image.id
uuid = "${data.openstack_images_image_v2.vm_image.id}"
source_type = "image"
volume_size = var.master_root_volume_size_in_gb
volume_type = var.master_volume_type
volume_size = "${var.master_root_volume_size_in_gb}"
boot_index = 0
destination_type = "volume"
delete_on_termination = true
@@ -368,42 +353,42 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
}
network {
name = var.network_name
name = "${var.network_name}"
}
security_groups = [openstack_networking_secgroup_v2.k8s_master.name,
openstack_networking_secgroup_v2.k8s.name,
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
"${openstack_networking_secgroup_v2.k8s.name}",
]
dynamic "scheduler_hints" {
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
content {
group = openstack_compute_servergroup_v2.k8s_master[0].id
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
}
}
metadata = {
ssh_user = var.ssh_user
ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
depends_on = var.network_id
use_access_ip = var.use_access_ip
depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
}
}
resource "openstack_compute_instance_v2" "k8s_node" {
name = "${var.cluster_name}-k8s-node-${count.index + 1}"
count = var.number_of_k8s_nodes
availability_zone = element(var.az_list_node, count.index)
image_name = var.image
flavor_id = var.flavor_k8s_node
key_pair = openstack_compute_keypair_v2.k8s.name
count = "${var.number_of_k8s_nodes}"
availability_zone = "${element(var.az_list_node, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_node}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
dynamic "block_device" {
for_each = var.node_root_volume_size_in_gb > 0 ? [var.image] : []
content {
uuid = data.openstack_images_image_v2.vm_image.id
uuid = "${data.openstack_images_image_v2.vm_image.id}"
source_type = "image"
volume_size = var.node_root_volume_size_in_gb
volume_size = "${var.node_root_volume_size_in_gb}"
boot_index = 0
destination_type = "volume"
delete_on_termination = true
@@ -411,25 +396,25 @@ resource "openstack_compute_instance_v2" "k8s_node" {
}
network {
name = var.network_name
name = "${var.network_name}"
}
security_groups = [openstack_networking_secgroup_v2.k8s.name,
openstack_networking_secgroup_v2.worker.name,
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
"${openstack_networking_secgroup_v2.worker.name}",
]
dynamic "scheduler_hints" {
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
content {
group = openstack_compute_servergroup_v2.k8s_node[0].id
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
}
}
metadata = {
ssh_user = var.ssh_user
ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}"
depends_on = var.network_id
use_access_ip = var.use_access_ip
depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
}
provisioner "local-exec" {
@@ -439,18 +424,18 @@ resource "openstack_compute_instance_v2" "k8s_node" {
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
name = "${var.cluster_name}-k8s-node-nf-${count.index + 1}"
count = var.number_of_k8s_nodes_no_floating_ip
availability_zone = element(var.az_list_node, count.index)
image_name = var.image
flavor_id = var.flavor_k8s_node
key_pair = openstack_compute_keypair_v2.k8s.name
count = "${var.number_of_k8s_nodes_no_floating_ip}"
availability_zone = "${element(var.az_list_node, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_node}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
dynamic "block_device" {
for_each = var.node_root_volume_size_in_gb > 0 ? [var.image] : []
content {
uuid = data.openstack_images_image_v2.vm_image.id
uuid = "${data.openstack_images_image_v2.vm_image.id}"
source_type = "image"
volume_size = var.node_root_volume_size_in_gb
volume_size = "${var.node_root_volume_size_in_gb}"
boot_index = 0
destination_type = "volume"
delete_on_termination = true
@@ -458,42 +443,42 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
}
network {
name = var.network_name
name = "${var.network_name}"
}
security_groups = [openstack_networking_secgroup_v2.k8s.name,
openstack_networking_secgroup_v2.worker.name,
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
"${openstack_networking_secgroup_v2.worker.name}",
]
dynamic "scheduler_hints" {
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
content {
group = openstack_compute_servergroup_v2.k8s_node[0].id
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
}
}
metadata = {
ssh_user = var.ssh_user
ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}"
depends_on = var.network_id
use_access_ip = var.use_access_ip
depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
}
}
resource "openstack_compute_instance_v2" "k8s_nodes" {
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {}
name = "${var.cluster_name}-k8s-node-${each.key}"
availability_zone = each.value.az
image_name = var.image
flavor_id = each.value.flavor
key_pair = openstack_compute_keypair_v2.k8s.name
availability_zone = "${each.value.az}"
image_name = "${var.image}"
flavor_id = "${each.value.flavor}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
dynamic "block_device" {
for_each = var.node_root_volume_size_in_gb > 0 ? [var.image] : []
content {
uuid = data.openstack_images_image_v2.vm_image.id
uuid = "${data.openstack_images_image_v2.vm_image.id}"
source_type = "image"
volume_size = var.node_root_volume_size_in_gb
volume_size = "${var.node_root_volume_size_in_gb}"
boot_index = 0
destination_type = "volume"
delete_on_termination = true
@@ -501,25 +486,25 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
}
network {
name = var.network_name
name = "${var.network_name}"
}
security_groups = [openstack_networking_secgroup_v2.k8s.name,
openstack_networking_secgroup_v2.worker.name,
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
"${openstack_networking_secgroup_v2.worker.name}",
]
dynamic "scheduler_hints" {
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
content {
group = openstack_compute_servergroup_v2.k8s_node[0].id
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
}
}
metadata = {
ssh_user = var.ssh_user
ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-node,k8s-cluster,%{if each.value.floating_ip == false}no-floating,%{endif}${var.supplementary_node_groups}"
depends_on = var.network_id
use_access_ip = var.use_access_ip
depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
}
provisioner "local-exec" {
@@ -529,18 +514,18 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}"
count = var.number_of_gfs_nodes_no_floating_ip
availability_zone = element(var.az_list, count.index)
image_name = var.image_gfs
flavor_id = var.flavor_gfs_node
key_pair = openstack_compute_keypair_v2.k8s.name
count = "${var.number_of_gfs_nodes_no_floating_ip}"
availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image_gfs}"
flavor_id = "${var.flavor_gfs_node}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
dynamic "block_device" {
for_each = var.gfs_root_volume_size_in_gb > 0 ? [var.image] : []
content {
uuid = data.openstack_images_image_v2.vm_image.id
uuid = "${data.openstack_images_image_v2.vm_image.id}"
source_type = "image"
volume_size = var.gfs_root_volume_size_in_gb
volume_size = "${var.gfs_root_volume_size_in_gb}"
boot_index = 0
destination_type = "volume"
delete_on_termination = true
@@ -548,70 +533,70 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
}
network {
name = var.network_name
name = "${var.network_name}"
}
security_groups = [openstack_networking_secgroup_v2.k8s.name]
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
dynamic "scheduler_hints" {
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
content {
group = openstack_compute_servergroup_v2.k8s_node[0].id
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
}
}
metadata = {
ssh_user = var.ssh_user_gfs
ssh_user = "${var.ssh_user_gfs}"
kubespray_groups = "gfs-cluster,network-storage,no-floating"
depends_on = var.network_id
use_access_ip = var.use_access_ip
depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
}
}
resource "openstack_compute_floatingip_associate_v2" "bastion" {
count = var.number_of_bastions
floating_ip = var.bastion_fips[count.index]
instance_id = element(openstack_compute_instance_v2.bastion.*.id, count.index)
wait_until_associated = var.wait_for_floatingip
count = "${var.number_of_bastions}"
floating_ip = "${var.bastion_fips[count.index]}"
instance_id = "${element(openstack_compute_instance_v2.bastion.*.id, count.index)}"
wait_until_associated = "${var.wait_for_floatingip}"
}
resource "openstack_compute_floatingip_associate_v2" "k8s_master" {
count = var.number_of_k8s_masters
instance_id = element(openstack_compute_instance_v2.k8s_master.*.id, count.index)
floating_ip = var.k8s_master_fips[count.index]
wait_until_associated = var.wait_for_floatingip
count = "${var.number_of_k8s_masters}"
instance_id = "${element(openstack_compute_instance_v2.k8s_master.*.id, count.index)}"
floating_ip = "${var.k8s_master_fips[count.index]}"
wait_until_associated = "${var.wait_for_floatingip}"
}
resource "openstack_compute_floatingip_associate_v2" "k8s_master_no_etcd" {
count = var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_etcd : 0
instance_id = element(openstack_compute_instance_v2.k8s_master_no_etcd.*.id, count.index)
floating_ip = var.k8s_master_no_etcd_fips[count.index]
count = "${var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_etcd : 0}"
instance_id = "${element(openstack_compute_instance_v2.k8s_master_no_etcd.*.id, count.index)}"
floating_ip = "${var.k8s_master_no_etcd_fips[count.index]}"
}
resource "openstack_compute_floatingip_associate_v2" "k8s_node" {
count = var.node_root_volume_size_in_gb == 0 ? var.number_of_k8s_nodes : 0
floating_ip = var.k8s_node_fips[count.index]
instance_id = element(openstack_compute_instance_v2.k8s_node[*].id, count.index)
wait_until_associated = var.wait_for_floatingip
count = "${var.node_root_volume_size_in_gb == 0 ? var.number_of_k8s_nodes : 0}"
floating_ip = "${var.k8s_node_fips[count.index]}"
instance_id = "${element(openstack_compute_instance_v2.k8s_node[*].id, count.index)}"
wait_until_associated = "${var.wait_for_floatingip}"
}
resource "openstack_compute_floatingip_associate_v2" "k8s_nodes" {
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip } : {}
floating_ip = var.k8s_nodes_fips[each.key].address
instance_id = openstack_compute_instance_v2.k8s_nodes[each.key].id
wait_until_associated = var.wait_for_floatingip
floating_ip = "${var.k8s_nodes_fips[each.key].address}"
instance_id = "${openstack_compute_instance_v2.k8s_nodes[each.key].id}"
wait_until_associated = "${var.wait_for_floatingip}"
}
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
name = "${var.cluster_name}-glusterfs_volume-${count.index + 1}"
count = var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0
count = "${var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0}"
description = "Non-ephemeral volume for GlusterFS"
size = var.gfs_volume_size_in_gb
size = "${var.gfs_volume_size_in_gb}"
}
resource "openstack_compute_volume_attach_v2" "glusterfs_volume" {
count = var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0
instance_id = element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip.*.id, count.index)
volume_id = element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)
count = "${var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0}"
instance_id = "${element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip.*.id, count.index)}"
volume_id = "${element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)}"
}

View File

@@ -38,8 +38,6 @@ variable "gfs_root_volume_size_in_gb" {}
variable "gfs_volume_size_in_gb" {}
variable "master_volume_type" {}
variable "public_key_path" {}
variable "image" {}
@@ -67,39 +65,39 @@ variable "network_id" {
}
variable "k8s_master_fips" {
type = list
type = "list"
}
variable "k8s_master_no_etcd_fips" {
type = list
type = "list"
}
variable "k8s_node_fips" {
type = list
type = "list"
}
variable "k8s_nodes_fips" {
type = map
type = "map"
}
variable "bastion_fips" {
type = list
type = "list"
}
variable "bastion_allowed_remote_ips" {
type = list
type = "list"
}
variable "master_allowed_remote_ips" {
type = list
type = "list"
}
variable "k8s_allowed_remote_ips" {
type = list
type = "list"
}
variable "k8s_allowed_egress_ips" {
type = list
type = "list"
}
variable "k8s_nodes" {}
@@ -114,12 +112,8 @@ variable "supplementary_node_groups" {
default = ""
}
variable "master_allowed_ports" {
type = list
}
variable "worker_allowed_ports" {
type = list
type = "list"
}
variable "use_access_ip" {}

View File

@@ -1,36 +1,36 @@
resource "null_resource" "dummy_dependency" {
triggers = {
dependency_id = var.router_id
dependency_id = "${var.router_id}"
}
}
resource "openstack_networking_floatingip_v2" "k8s_master" {
count = var.number_of_k8s_masters
pool = var.floatingip_pool
depends_on = [null_resource.dummy_dependency]
count = "${var.number_of_k8s_masters}"
pool = "${var.floatingip_pool}"
depends_on = ["null_resource.dummy_dependency"]
}
resource "openstack_networking_floatingip_v2" "k8s_master_no_etcd" {
count = var.number_of_k8s_masters_no_etcd
pool = var.floatingip_pool
depends_on = [null_resource.dummy_dependency]
count = "${var.number_of_k8s_masters_no_etcd}"
pool = "${var.floatingip_pool}"
depends_on = ["null_resource.dummy_dependency"]
}
resource "openstack_networking_floatingip_v2" "k8s_node" {
count = var.number_of_k8s_nodes
pool = var.floatingip_pool
depends_on = [null_resource.dummy_dependency]
count = "${var.number_of_k8s_nodes}"
pool = "${var.floatingip_pool}"
depends_on = ["null_resource.dummy_dependency"]
}
resource "openstack_networking_floatingip_v2" "bastion" {
count = var.number_of_bastions
pool = var.floatingip_pool
depends_on = [null_resource.dummy_dependency]
count = "${var.number_of_bastions}"
pool = "${var.floatingip_pool}"
depends_on = ["null_resource.dummy_dependency"]
}
resource "openstack_networking_floatingip_v2" "k8s_nodes" {
for_each = var.number_of_k8s_nodes == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip } : {}
pool = var.floatingip_pool
depends_on = [null_resource.dummy_dependency]
pool = "${var.floatingip_pool}"
depends_on = ["null_resource.dummy_dependency"]
}

View File

@@ -1,19 +1,19 @@
output "k8s_master_fips" {
value = openstack_networking_floatingip_v2.k8s_master[*].address
value = "${openstack_networking_floatingip_v2.k8s_master[*].address}"
}
output "k8s_master_no_etcd_fips" {
value = openstack_networking_floatingip_v2.k8s_master_no_etcd[*].address
value = "${openstack_networking_floatingip_v2.k8s_master_no_etcd[*].address}"
}
output "k8s_node_fips" {
value = openstack_networking_floatingip_v2.k8s_node[*].address
value = "${openstack_networking_floatingip_v2.k8s_node[*].address}"
}
output "k8s_nodes_fips" {
value = openstack_networking_floatingip_v2.k8s_nodes
value = "${openstack_networking_floatingip_v2.k8s_nodes}"
}
output "bastion_fips" {
value = openstack_networking_floatingip_v2.bastion[*].address
value = "${openstack_networking_floatingip_v2.bastion[*].address}"
}

View File

@@ -1,33 +1,33 @@
resource "openstack_networking_router_v2" "k8s" {
name = "${var.cluster_name}-router"
count = var.use_neutron == 1 && var.router_id == null ? 1 : 0
count = "${var.use_neutron}" == 1 && "${var.router_id}" == null ? 1 : 0
admin_state_up = "true"
external_network_id = var.external_net
external_network_id = "${var.external_net}"
}
data "openstack_networking_router_v2" "k8s" {
router_id = var.router_id
count = var.use_neutron == 1 && var.router_id != null ? 1 : 0
router_id = "${var.router_id}"
count = "${var.use_neutron}" == 1 && "${var.router_id}" != null ? 1 : 0
}
resource "openstack_networking_network_v2" "k8s" {
name = var.network_name
count = var.use_neutron
dns_domain = var.network_dns_domain != null ? var.network_dns_domain : null
name = "${var.network_name}"
count = "${var.use_neutron}"
dns_domain = var.network_dns_domain != null ? "${var.network_dns_domain}" : null
admin_state_up = "true"
}
resource "openstack_networking_subnet_v2" "k8s" {
name = "${var.cluster_name}-internal-network"
count = var.use_neutron
network_id = openstack_networking_network_v2.k8s[count.index].id
cidr = var.subnet_cidr
count = "${var.use_neutron}"
network_id = "${openstack_networking_network_v2.k8s[count.index].id}"
cidr = "${var.subnet_cidr}"
ip_version = 4
dns_nameservers = var.dns_nameservers
dns_nameservers = "${var.dns_nameservers}"
}
resource "openstack_networking_router_interface_v2" "k8s" {
count = var.use_neutron
count = "${var.use_neutron}"
router_id = "%{if openstack_networking_router_v2.k8s != []}${openstack_networking_router_v2.k8s[count.index].id}%{else}${var.router_id}%{endif}"
subnet_id = openstack_networking_subnet_v2.k8s[count.index].id
subnet_id = "${openstack_networking_subnet_v2.k8s[count.index].id}"
}

View File

@@ -3,9 +3,9 @@ output "router_id" {
}
output "router_internal_port_id" {
value = element(concat(openstack_networking_router_interface_v2.k8s.*.id, [""]), 0)
value = "${element(concat(openstack_networking_router_interface_v2.k8s.*.id, [""]), 0)}"
}
output "subnet_id" {
value = element(concat(openstack_networking_subnet_v2.k8s.*.id, [""]), 0)
value = "${element(concat(openstack_networking_subnet_v2.k8s.*.id, [""]), 0)}"
}

View File

@@ -7,7 +7,7 @@ variable "network_dns_domain" {}
variable "cluster_name" {}
variable "dns_nameservers" {
type = list
type = "list"
}
variable "subnet_cidr" {}

View File

@@ -74,10 +74,6 @@ variable "gfs_volume_size_in_gb" {
default = 75
}
variable "master_volume_type" {
default = "Default"
}
variable "public_key_path" {
description = "The path of the ssh pub key"
default = "~/.ssh/id_rsa.pub"
@@ -135,7 +131,7 @@ variable "network_name" {
variable "network_dns_domain" {
description = "dns_domain for the internal network"
type = string
type = "string"
default = null
}
@@ -146,13 +142,13 @@ variable "use_neutron" {
variable "subnet_cidr" {
description = "Subnet CIDR block."
type = string
type = "string"
default = "10.0.0.0/24"
}
variable "dns_nameservers" {
description = "An array of DNS name server names used by hosts in this subnet."
type = list
type = "list"
default = []
}
@@ -182,36 +178,30 @@ variable "supplementary_node_groups" {
variable "bastion_allowed_remote_ips" {
description = "An array of CIDRs allowed to SSH to hosts"
type = list(string)
type = "list"
default = ["0.0.0.0/0"]
}
variable "master_allowed_remote_ips" {
description = "An array of CIDRs allowed to access API of masters"
type = list(string)
type = "list"
default = ["0.0.0.0/0"]
}
variable "k8s_allowed_remote_ips" {
description = "An array of CIDRs allowed to SSH to hosts"
type = list(string)
type = "list"
default = []
}
variable "k8s_allowed_egress_ips" {
description = "An array of CIDRs allowed for egress traffic"
type = list(string)
type = "list"
default = ["0.0.0.0/0"]
}
variable "master_allowed_ports" {
type = list
default = []
}
variable "worker_allowed_ports" {
type = list
type = "list"
default = [
{

View File

@@ -176,7 +176,7 @@ If you have deployed and destroyed a previous iteration of your cluster, you wil
#### Test access
Make sure you can connect to the hosts. Note that Flatcar Container Linux by Kinvolk will have a state `FAILED` due to Python not being present. This is okay, because Python will be installed during bootstrapping, so long as the hosts are not `UNREACHABLE`.
Make sure you can connect to the hosts. Note that Container Linux by CoreOS will have a state `FAILED` due to Python not being present. This is okay, because Python will be installed during bootstrapping, so long as the hosts are not `UNREACHABLE`.
```
$ ansible -i inventory/$CLUSTER/hosts -m ping all

View File

@@ -223,8 +223,8 @@ def packet_device(resource, tfvars=None):
'provider': 'packet',
}
if raw_attrs['operating_system'] == 'flatcar_stable':
# For Flatcar set the ssh_user to core
if raw_attrs['operating_system'] == 'coreos_stable':
# For CoreOS set the ssh_user to core
attrs.update({'ansible_ssh_user': 'core'})
# add groups based on attrs
@@ -319,7 +319,9 @@ def openstack_host(resource, module_name):
# attrs specific to Mantl
attrs.update({
'role': attrs['metadata'].get('role', 'none')
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
'role': attrs['metadata'].get('role', 'none'),
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
})
# add groups based on attrs
@@ -329,6 +331,10 @@ def openstack_host(resource, module_name):
for item in list(attrs['metadata'].items()))
groups.append('os_region=' + attrs['region'])
# groups specific to Mantl
groups.append('role=' + attrs['metadata'].get('role', 'none'))
groups.append('dc=' + attrs['consul_dc'])
# groups specific to kubespray
for group in attrs['metadata'].get('kubespray_groups', "").split(","):
groups.append(group)

View File

@@ -13,7 +13,7 @@
/usr/local/share/ca-certificates/vault-ca.crt
{%- elif ansible_os_family == "RedHat" -%}
/etc/pki/ca-trust/source/anchors/vault-ca.crt
{%- elif ansible_os_family in ["Flatcar Container Linux by Kinvolk"] -%}
{%- elif ansible_os_family in ["Coreos", "Container Linux by CoreOS", "Flatcar", "Flatcar Container Linux by Kinvolk"] -%}
/etc/ssl/certs/vault-ca.pem
{%- endif %}
@@ -23,9 +23,9 @@
dest: "{{ ca_cert_path }}"
register: vault_ca_cert
- name: bootstrap/ca_trust | update ca-certificates (Debian/Ubuntu/Flatcar)
- name: bootstrap/ca_trust | update ca-certificates (Debian/Ubuntu/CoreOS)
command: update-ca-certificates
when: vault_ca_cert.changed and ansible_os_family in ["Debian", "Flatcar Container Linux by Kinvolk"]
when: vault_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Coreos", "Container Linux by CoreOS", "Flatcar", "Flatcar Container Linux by Kinvolk"]
- name: bootstrap/ca_trust | update ca-certificates (RedHat)
command: update-ca-trust extract

View File

@@ -6,11 +6,11 @@
- name: bootstrap/start_vault_temp | Start single node Vault with file backend
command: >
docker run -d --cap-add=IPC_LOCK --name {{ vault_temp_container_name }}
-p {{ vault_port }}:{{ vault_port }}
-e 'VAULT_LOCAL_CONFIG={{ vault_temp_config|to_json }}'
-v /etc/vault:/etc/vault
{{ vault_image_repo }}:{{ vault_version }} server
docker run -d --cap-add=IPC_LOCK --name {{ vault_temp_container_name }}
-p {{ vault_port }}:{{ vault_port }}
-e 'VAULT_LOCAL_CONFIG={{ vault_temp_config|to_json }}'
-v /etc/vault:/etc/vault
{{ vault_image_repo }}:{{ vault_version }} server
- name: bootstrap/start_vault_temp | Start again single node Vault with file backend
command: docker start {{ vault_temp_container_name }}

View File

@@ -21,9 +21,9 @@
- name: bootstrap/sync_secrets | Print out warning message if secrets are not available and vault is initialized
pause:
prompt: >
Vault orchestration may not be able to proceed. The Vault cluster is initialized, but
'root_token' or 'unseal_keys' were not found in {{ vault_secrets_dir }}. These are
needed for many vault orchestration steps.
Vault orchestration may not be able to proceed. The Vault cluster is initialized, but
'root_token' or 'unseal_keys' were not found in {{ vault_secrets_dir }}. These are
needed for many vault orchestration steps.
when: vault_cluster_is_initialized and not vault_secrets_available
- name: bootstrap/sync_secrets | Cat root_token from a vault host

View File

@@ -25,6 +25,6 @@
- name: check_etcd | Fail if etcd is not available and needed
fail:
msg: >
Unable to start Vault cluster! Etcd is not available at
{{ vault_etcd_url.split(',') | first }} however it is needed by Vault as a backend.
Unable to start Vault cluster! Etcd is not available at
{{ vault_etcd_url.split(',') | first }} however it is needed by Vault as a backend.
when: vault_etcd_needed|d() and not vault_etcd_available

View File

@@ -46,7 +46,7 @@
set_fact:
vault_cluster_is_initialized: >-
{{ vault_is_initialized or
hostvars[item]['vault_is_initialized'] or
('value' in vault_etcd_exists.stdout|default('')) }}
hostvars[item]['vault_is_initialized'] or
('value' in vault_etcd_exists.stdout|default('')) }}
with_items: "{{ groups.vault }}"
run_once: true

View File

@@ -6,9 +6,9 @@
ca_cert: "{{ vault_cert_dir }}/ca.pem"
name: "{{ create_role_name }}"
rules: >-
{%- if create_role_policy_rules|d("default") == "default" -%}
{{
{ 'path': {
{%- if create_role_policy_rules|d("default") == "default" -%}
{{
{ 'path': {
create_role_mount_path + '/issue/' + create_role_name: {'policy': 'write'},
create_role_mount_path + '/roles/' + create_role_name: {'policy': 'read'}
}} | to_json + '\n'
@@ -24,13 +24,13 @@
ca_cert: "{{ vault_cert_dir }}/ca.pem"
secret: "{{ create_role_mount_path }}/roles/{{ create_role_name }}"
data: |
{%- if create_role_options|d("default") == "default" -%}
{
allow_any_name: true
}
{%- else -%}
{{ create_role_options | to_json }}
{%- endif -%}
{%- if create_role_options|d("default") == "default" -%}
{
allow_any_name: true
}
{%- else -%}
{{ create_role_options | to_json }}
{%- endif -%}
## Userpass based auth method

View File

@@ -18,8 +18,8 @@
- name: shared/gen_userpass | Copy credentials to all hosts in the group
copy:
content: >
{{
{'username': gen_userpass_username,
'password': gen_userpass_password} | to_nice_json(indent=4)
}}
{{
{'username': gen_userpass_username,
'password': gen_userpass_password} | to_nice_json(indent=4)
}}
dest: "{{ vault_roles_dir }}/{{ gen_userpass_role }}/userpass"

View File

@@ -1,2 +1,2 @@
[Service]
Environment={% if http_proxy %}"HTTP_PROXY={{ http_proxy }}"{% endif %} {% if https_proxy %}"HTTPS_PROXY={{ https_proxy }}"{% endif %} {% if no_proxy %}"NO_PROXY={{ no_proxy }}"{% endif %}
Environment={% if http_proxy %}"HTTP_PROXY={{ http_proxy }}"{% endif %} {% if https_proxy %}"HTTPS_PROXY={{ https_proxy }}"{% endif %} {% if no_proxy %}"NO_PROXY={{ no_proxy }}"{% endif %}

View File

@@ -9,7 +9,6 @@
* [HA Mode](docs/ha-mode.md)
* [Adding/replacing a node](docs/nodes.md)
* [Large deployments](docs/large-deployments.md)
* [Air-Gap Installation](docs/offline-environment.md)
* CNI
* [Calico](docs/calico.md)
* [Contiv](docs/contiv.md)
@@ -17,8 +16,6 @@
* [Kube Router](docs/kube-router.md)
* [Weave](docs/weave.md)
* [Multus](docs/multus.md)
* Ingress
* [Ambassador](docs/ambassador.md)
* [Cloud providers](docs/cloud.md)
* [AWS](docs/aws.md)
* [Azure](docs/azure.md)
@@ -27,8 +24,8 @@
* [vSphere](/docs/vsphere.md)
* Operating Systems
* [Debian](docs/debian.md)
* [Flatcar Container Linux](docs/flatcar.md)
* [Fedora CoreOS](docs/fcos.md)
* [Coreos](docs/coreos.md)
* [Fedora CoreOS](docs/fcos.md)
* [OpenSUSE](docs/opensuse.md)
* Advanced
* [Proxy](/docs/proxy.md)
@@ -41,5 +38,4 @@
* [Test cases](docs/test_cases.md)
* [Vagrant](docs/vagrant.md)
* [CI Matrix](docs/ci.md)
* [CI Setup](docs/ci-setup.md)
* [Roadmap](docs/roadmap.md)

View File

@@ -1,87 +0,0 @@
# Ambassador
The [Ambassador API Gateway](https://github.com/datawire/ambassador) provides all the functionality of a traditional ingress controller
(e.g., path-based routing) while exposing many additional capabilities such as authentication,
URL rewriting, CORS, rate limiting, and automatic metrics collection.
## Installation
### Configuration
* `ingress_ambassador_namespace` (default `ambassador`): namespace for installing Ambassador.
* `ingress_ambassador_update_window` (default `0 0 * * SUN`): _crontab_-like expression
for specifying when the Operator should try to update the Ambassador API Gateway.
* `ingress_ambassador_version` (defaulkt: `*`): SemVer rule for versions allowed for
installation/updates.
* `ingress_ambassador_secure_port` (default: 443): HTTPS port to listen at.
* `ingress_ambassador_insecure_port` (default: 80): HTTP port to listen at.
### Ambassador Operator
This Ambassador addon deploys the Ambassador Operator, which in turn will install
the [Ambassador API Gateway](https://github.com/datawire/ambassador) in
a Kubernetes cluster.
The Ambassador Operator is a Kubernetes Operator that controls Ambassador's complete lifecycle
in your cluster, automating many of the repeatable tasks you would otherwise have to perform
yourself. Once installed, the Operator will complete installations and seamlessly upgrade to new
versions of Ambassador as they become available.
## Usage
The following example creates simple http-echo services and an `Ingress` object
to route to these services.
Note well that the [Ambassador API Gateway](https://github.com/datawire/ambassador) will automatically load balance `Ingress` resources
that include the annotation `kubernetes.io/ingress.class=ambassador`. All the other
resources will be just ignored.
```yaml
kind: Pod
apiVersion: v1
metadata:
name: foo-app
labels:
app: foo
spec:
containers:
- name: foo-app
image: hashicorp/http-echo
args:
- "-text=foo"
---
kind: Service
apiVersion: v1
metadata:
name: foo-service
spec:
selector:
app: foo
ports:
# Default port used by the image
- port: 5678
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: example-ingress
annotations:
kubernetes.io/ingress.class: ambassador
spec:
rules:
- http:
paths:
- path: /foo
backend:
serviceName: foo-service
servicePort: 5678
```
Now you can test that the ingress is working with curl:
```console
$ export AMB_IP=$(kubectl get service ambassador -n ambassador -o 'go-template={{range .status.loadBalancer.ingress}}{{print .ip "\n"}}{{end}}')
$ curl $AMB_IP/foo
foo
```

View File

@@ -138,7 +138,6 @@ The following tags are defined in playbooks:
| upload | Distributing images/binaries across hosts
| weave | Network plugin Weave
| ingress_alb | AWS ALB Ingress Controller
| ambassador | Ambassador Ingress Controller
Note: Use the ``bash scripts/gen_tags.sh`` command to generate a list of all
tags found in the codebase. New tags will be listed with the empty "Used for"

View File

@@ -13,13 +13,6 @@ Before creating the instances you must first set the `azure_` variables in the `
All of the values can be retrieved using the azure cli tool which can be downloaded here: <https://docs.microsoft.com/en-gb/azure/xplat-cli-install>
After installation you have to run `az login` to get access to your account.
### azure_cloud
Azure Stack has different API endpoints, depending on the Azure Stack deployment. These need to be provided to the Azure SDK.
Possible values are: `AzureChinaCloud`, `AzureGermanCloud`, `AzurePublicCloud` and `AzureUSGovernmentCloud`.
The full list of existing settings for the AzureChinaCloud, AzureGermanCloud, AzurePublicCloud and AzureUSGovernmentCloud
is available in the source code [here](https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/docs/cloud-provider-config.md)
### azure\_tenant\_id + azure\_subscription\_id
run `az account show` to retrieve your subscription id and tenant id:

View File

@@ -38,7 +38,7 @@ or for versions prior to *v1.0.0*:
calicoctl.sh pool show
```
* Show the workloads (ip addresses of containers and their location)
* Show the workloads (ip addresses of containers and their located)
```ShellSession
calicoctl.sh get workloadEndpoint -o wide
@@ -235,15 +235,6 @@ Note that in OpenStack you must allow `ipip` traffic in your security groups,
otherwise you will experience timeouts.
To do this you must add a rule which allows it, for example:
### Optional : Felix configuration via extraenvs of calico node
Possible environment variable parameters for [configuring Felix](https://docs.projectcalico.org/reference/felix/configuration)
```yml
calico_node_extra_envs:
FELIX_DEVICEROUTESOURCEADDRESS: 172.17.0.1
```
```ShellSession
neutron security-group-rule-create --protocol 4 --direction egress k8s-a0tp4t
neutron security-group-rule-create --protocol 4 --direction igress k8s-a0tp4t

View File

@@ -6,4 +6,4 @@ You need to use K8S 1.17+ and to add `calico_iptables_backend: "NFT"` to your co
If you have containers that are using iptables in the host network namespace (`hostNetwork=true`),
you need to ensure they are using iptables-nft.
An example how k8s do the autodetection can be found [in this PR](https://github.com/kubernetes/kubernetes/pull/82966)
An exemple how k8s do the autodetection can be found [in this PR](https://github.com/kubernetes/kubernetes/pull/82966)

View File

@@ -1,20 +0,0 @@
# CI Setup
## Pipeline
1. unit-tests: fast jobs for fast feedback (linting, etc...)
2. deploy-part1: small number of jobs to test if the PR works with default settings
3. deploy-part2: slow jobs testing different platforms, OS, settings, CNI, etc...
4. deploy-part3: very slow jobs (upgrades, etc...)
## Runners
Kubespray has 3 types of GitLab runners:
- packet runners: used for E2E jobs (usually long)
- light runners: used for short lived jobs
- auto scaling runners: used for on-demand resources, see [GitLab docs](https://docs.gitlab.com/runner/configuration/autoscale.html) for more info
## Vagrant
Vagrant jobs are using the [quay.io/kubespray/vagrant](/test-infra/vagrant-docker/Dockerfile) docker image with `/var/run/libvirt/libvirt-sock` exposed from the host, allowing the container to boot VMs on the host.

View File

@@ -4,51 +4,51 @@ To generate this Matrix run `./tests/scripts/md-table/main.py`
## docker
| OS / CNI | calico | canal | cilium | contiv | flannel | kube-ovn | kube-router | macvlan | ovn4nfv | weave |
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :white_check_mark: |
centos8 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
debian10 | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian9 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
fedora31 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
fedora32 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
oracle7 | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu16 | :x: | :white_check_mark: | :x: | :white_check_mark: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :white_check_mark: |
ubuntu18 | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :white_check_mark: |
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
| OS / CNI | calico | canal | cilium | contiv | flannel | kube-ovn | kube-router | macvlan | weave |
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- |
amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :white_check_mark: |
centos8 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian9 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
fedora30 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
fedora31 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
oracle7 | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu16 | :x: | :white_check_mark: | :x: | :white_check_mark: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: |
ubuntu18 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
## crio
| OS / CNI | calico | canal | cilium | contiv | flannel | kube-ovn | kube-router | macvlan | ovn4nfv | weave |
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora31 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora32 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
| OS / CNI | calico | canal | cilium | contiv | flannel | kube-ovn | kube-router | macvlan | weave |
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- |
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora30 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora31 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
## containerd
| OS / CNI | calico | canal | cilium | contiv | flannel | kube-ovn | kube-router | macvlan | ovn4nfv | weave |
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos7 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
centos8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian10 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora31 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora32 | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu18 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
| OS / CNI | calico | canal | cilium | contiv | flannel | kube-ovn | kube-router | macvlan | weave |
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- |
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos7 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
centos8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian10 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora30 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora31 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu18 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |

View File

@@ -1,13 +0,0 @@
# Cilium
## Kube-proxy replacement with Cilium
Cilium can run without kube-proxy by setting `cilium_kube_proxy_replacement`
to `strict`.
Without kube-proxy, cilium needs to know the address of the kube-apiserver
and this must be set globally for all cilium components (agents and operators).
Hence, in this configuration in Kubespray, Cilium will always contact
the external loadbalancer (even from a node in the control plane)
and if there is no external load balancer It will ignore any local load
balancer deployed by Kubespray and **only contacts the first master**.

View File

@@ -40,6 +40,8 @@ is not set, a default resolver is chosen (depending on cloud provider or 8.8.8.8
DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode`` modes. These serve as backup
DNS servers in early cluster deployment when no cluster DNS is available yet.
## DNS modes supported by Kubespray
### coredns_external_zones
Array of optional external zones to coredns forward queries to. It's injected into
@@ -67,23 +69,9 @@ coredns_external_zones:
or as INI
```ini
coredns_external_zones='[{"cache": 30,"zones":["example.com","example.io:453"],"nameservers":["1.1.1.1","2.2.2.2"]}]'
coredns_external_zones=[{"cache": 30,"zones":["example.com","example.io:453"],"nameservers":["1.1.1.1","2.2.2.2"]}]'
```
### dns_etchosts (coredns)
Optional hosts file content to coredns use as /etc/hosts file. This will also be used by nodelocaldns, if enabled.
Example:
```yaml
dns_etchosts: |
192.168.0.100 api.example.com
192.168.0.200 ingress.example.com
```
## DNS modes supported by Kubespray
You can modify how Kubespray sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
### dns_mode
@@ -137,16 +125,6 @@ The following dns options are added to the docker daemon
* timeout:2
* attempts:2
These dns options can be overridden by setting a different list:
```yaml
docker_dns_options:
- ndots:{{ ndots }}
- timeout:2
- attempts:2
- rotate
```
For normal PODs, k8s will ignore these options and setup its own DNS settings for the PODs, taking
the --cluster_dns (either coredns or coredns_dual, depending on dns_mode) kubelet option into account.
For ``hostNetwork: true`` PODs however, k8s will let docker setup DNS settings. Docker containers which
@@ -204,10 +182,6 @@ nodelocaldns_external_zones:
- 192.168.0.53
```
### dns_etchosts (nodelocaldns)
See [dns_etchosts](#dns_etchosts-coredns) above.
## Limitations
* Kubespray has yet ways to configure Kubedns addon to forward requests SkyDns can

View File

@@ -41,3 +41,15 @@ dnsmasq_image_tag: '2.72'
The full list of available vars may be found in the download's ansible role defaults. Those also allow to specify custom urls and local repositories for binaries and container
images as well. See also the DNS stack docs for the related intranet configuration,
so the hosts can resolve those urls and repos.
## Offline environment
In case your servers don't have access to internet (for example when deploying on premises with security constraints), you'll have, first, to setup the appropriate proxies/caches/mirrors and/or internal repositories and registries and, then, adapt the following variables to fit your environment before deploying:
* At least `foo_image_repo` and `foo_download_url` as described before (i.e. in case of use of proxies to registries and binaries repositories, checksums and versions do not necessarily need to be changed).
NOTE: Regarding `foo_image_repo`, when using insecure registries/proxies, you will certainly have to append them to the `docker_insecure_registries` variable in group_vars/all/docker.yml
* `pyrepo_index` (and optionally `pyrepo_cert`)
* Depending on the `container_manager`
* When `container_manager=docker`, `docker_foo_repo_base_url`, `docker_foo_repo_gpgkey`, `dockerproject_bar_repo_base_url` and `dockerproject_bar_repo_gpgkey` (where `foo` is the distribution and `bar` is system package manager)
* When `container_manager=crio`, `crio_rhel_repo_base_url`
* When using Helm, `helm_stable_repo_url`

View File

@@ -1,14 +0,0 @@
Flatcar Container Linux bootstrap
===============
Example with Ansible:
Before running the cluster playbook you must satisfy the following requirements:
General Flatcar Pre-Installation Notes:
- Ensure that the bin_dir is set to `/opt/bin`
- ansible_python_interpreter should be `/opt/bin/python`. This will be laid down by the bootstrap task.
- The default resolvconf_mode setting of `docker_dns` **does not** work for Flatcar. This is because we do not edit the systemd service file for docker on Flatcar nodes. Instead, just use the `host_resolvconf` mode. It should work out of the box.
Then you can proceed to [cluster deployment](#run-deployment)

View File

@@ -12,7 +12,7 @@ to create or modify an Ansible inventory. Currently, it is limited in
functionality and is only used for configuring a basic Kubespray cluster inventory, but it does
support creating inventory file for large clusters as well. It now supports
separated ETCD and Kubernetes master roles from node role if the size exceeds a
certain threshold. Run `python3 contrib/inventory_builder/inventory.py help` for more information.
certain threshold. Run `python3 contrib/inventory_builder/inventory.py help` help for more information.
Example inventory generator usage:
@@ -36,7 +36,7 @@ ansible-playbook -i inventory/mycluster/hosts.yml cluster.yml -b -v \
--private-key=~/.ssh/private_key
```
See more details in the [ansible guide](/docs/ansible.md).
See more details in the [ansible guide](docs/ansible.md).
### Adding nodes
@@ -81,12 +81,12 @@ kube-apiserver via port 8080. A kubeconfig file is not necessary in this case,
because kubectl will use <http://localhost:8080> to connect. The kubeconfig files
generated will point to localhost (on kube-masters) and kube-node hosts will
connect either to a localhost nginx proxy or to a loadbalancer if configured.
More details on this process are in the [HA guide](/docs/ha-mode.md).
More details on this process are in the [HA guide](docs/ha-mode.md).
Kubespray permits connecting to the cluster remotely on any IP of any
kube-master host on port 6443 by default. However, this requires
authentication. One can get a kubeconfig from kube-master hosts
(see [below](#accessing-kubernetes-api)) or connect with a [username and password](/docs/vars.md#user-accounts).
(see [below](#accessing-kubernetes-api)) or connect with a [username and password](vars.md#user-accounts).
For more information on kubeconfig and accessing a Kubernetes cluster, refer to
the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/).
@@ -96,10 +96,10 @@ the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-applicati
Supported version is kubernetes-dashboard v2.0.x :
- Login options are : token/kubeconfig by default, basic can be enabled with `kube_basic_auth: true` inventory variable - not recommended because this requires ABAC api-server which is not tested by kubespray team
- Deployed by default in "kube-system" namespace, can be overridden with `dashboard_namespace: kubernetes-dashboard` in inventory,
- Deployed by default in "kube-system" namespace, can be overriden with `dashboard_namespace: kubernetes-dashboard` in inventory,
- Only serves over https
Access is described in [dashboard docs](https://github.com/kubernetes/dashboard/tree/master/docs/user/accessing-dashboard). With kubespray's default deployment in kube-system namespace, instead of kuberntes-dashboard :
Access is described in [dashboard docs](https://github.com/kubernetes/dashboard/blob/master/docs/user/accessing-dashboard/1.7.x-and-above.md). With kubespray's default deployment in kube-system namespace, instead of kuberntes-dashboard :
- Proxy URL is <http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#/login>
- kubectl commands must be run with "-n kube-system"
@@ -135,8 +135,3 @@ cd inventory/mycluster/artifacts
```
If desired, copy admin.conf to ~/.kube/config.
## Setting up your first cluster
[Setting up your first cluster](/docs/setting-up-your-first-cluster.md) is an
applied step-by-step guide for setting up your first cluster with Kubespray.

View File

@@ -1,68 +0,0 @@
# Kata Containers
[Kata Containers](https://katacontainers.io) is a secure container runtime with lightweight virtual machines that supports multiple hypervisor solutions.
## Hypervisors
_Qemu_ is the only hypervisor supported by Kubespray.
## Installation
To use Kata Containers, set the following variables:
**k8s-cluster.yml**:
```yaml
container_manager: containerd
kata_containers_enabled: true
```
**etcd.yml**:
```yaml
etcd_deployment_type: host
```
## Configuration
### Recommended : Pod Overhead
[Pod Overhead](https://kubernetes.io/docs/concepts/configuration/pod-overhead/) is a feature for accounting for the resources consumed by the Runtime Class used by the Pod.
When this feature is enabled, Kubernetes will count the fixed amount of CPU and memory set in the configuration as used by the virtual machine and not by the containers running in the Pod.
Pod Overhead is mandatory if you run Pods with Kata Containers that use [resources limits](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits).
**Set cgroup driver**:
To enable Pod Overhead feature you have to configure Kubelet with the appropiate cgroup driver, using the following configuration:
```yaml
kubelet_cgroup_driver: cgroupfs
```
**Qemu hypervisor configuration**:
The configuration for the Qemu hypervisor uses the following values:
```yaml
kata_containers_qemu_overhead: true
kata_containers_qemu_overhead_fixed_cpu: 10m
kata_containers_qemu_overhead_fixed_memory: 290Mi
```
### Optional : Select Kata Containers version
Optionally you can select the Kata Containers release version to be installed. The available releases are published in [GitHub](https://github.com/kata-containers/runtime/releases).
```yaml
kata_containers_version: 1.11.1
```
### Optional : Debug
Debug is disabled by default for all the components of Kata Containers. You can change this behaviour with the following configuration:
```yaml
kata_containers_qemu_debug: 'false'
```

View File

@@ -29,7 +29,7 @@ metadata:
name: macvlan-conf
spec:
config: '{
"cniVersion": "0.4.0",
"cniVersion": "0.3.0",
"type": "macvlan",
"master": "eth0",
"mode": "bridge",

View File

@@ -2,58 +2,6 @@
Modified from [comments in #3471](https://github.com/kubernetes-sigs/kubespray/issues/3471#issuecomment-530036084)
## Limitation: Removal of first kube-master and etcd-master
Currently you can't remove the first node in your kube-master and etcd-master list. If you still want to remove this node you have to:
### 1) Change order of current masters
Modify the order of your master list by pushing your first entry to any other position. E.g. if you want to remove `node-1` of the following example:
```yaml
children:
kube-master:
hosts:
node-1:
node-2:
node-3:
kube-node:
hosts:
node-1:
node-2:
node-3:
etcd:
hosts:
node-1:
node-2:
node-3:
```
change your inventory to:
```yaml
children:
kube-master:
hosts:
node-2:
node-3:
node-1:
kube-node:
hosts:
node-2:
node-3:
node-1:
etcd:
hosts:
node-2:
node-3:
node-1:
```
## 2) Upgrade the cluster
run `upgrade-cluster.yml` or `cluster.yml`. Now you are good to go on with the removal.
## Adding/replacing a worker node
This should be the easiest.
@@ -62,16 +10,19 @@ This should be the easiest.
### 2) Run `scale.yml`
You can use `--limit=NODE_NAME` to limit Kubespray to avoid disturbing other nodes in the cluster.
You can use `--limit=node1` to limit Kubespray to avoid disturbing other nodes in the cluster.
Before using `--limit` run playbook `facts.yml` without the limit to refresh facts cache for all nodes.
### 3) Remove an old node with remove-node.yml
### 3) Drain the node that will be removed
```sh
kubectl drain NODE_NAME
```
### 4) Run the remove-node.yml playbook
With the old node still in the inventory, run `remove-node.yml`. You need to pass `-e node=NODE_NAME` to the playbook to limit the execution to the node being removed.
If the node you want to remove is not online, you should add `reset_nodes=false` to your extra-vars: `-e node=NODE_NAME reset_nodes=false`.
Use this flag even when you remove other types of nodes like a master or etcd nodes.
### 5) Remove the node from the inventory
@@ -79,9 +30,32 @@ That's it.
## Adding/replacing a master node
### 1) Run `cluster.yml`
### 1) Recreate apiserver certs manually to include the new master node in the cert SAN field
Append the new host to the inventory and run `cluster.yml`. You can NOT use `scale.yml` for that.
For some reason, Kubespray will not update the apiserver certificate.
Edit `/etc/kubernetes/kubeadm-config.yaml`, include new host in `certSANs` list.
Use kubeadm to recreate the certs.
```sh
cd /etc/kubernetes/ssl
mv apiserver.crt apiserver.crt.old
mv apiserver.key apiserver.key.old
cd /etc/kubernetes
kubeadm init phase certs apiserver --config kubeadm-config.yaml
```
Check the certificate, new host needs to be there.
```sh
openssl x509 -text -noout -in /etc/kubernetes/ssl/apiserver.crt
```
### 2) Run `cluster.yml`
Add the new host to the inventory and run cluster.yml.
### 3) Restart kube-system/nginx-proxy
@@ -94,42 +68,64 @@ docker ps | grep k8s_nginx-proxy_nginx-proxy | awk '{print $1}' | xargs docker r
### 4) Remove old master nodes
With the old node still in the inventory, run `remove-node.yml`. You need to pass `-e node=NODE_NAME` to the playbook to limit the execution to the node being removed.
If the node you want to remove is not online, you should add `reset_nodes=false` to your extra-vars.
If you are replacing a node, remove the old one from the inventory, and remove from the cluster runtime.
## Adding an etcd node
```sh
kubectl drain NODE_NAME
kubectl delete node NODE_NAME
```
After that, the old node can be safely shutdown. Also, make sure to restart nginx-proxy in all remaining nodes (step 3)
From any active master that remains in the cluster, re-upload `kubeadm-config.yaml`
```sh
kubeadm config upload from-file --config /etc/kubernetes/kubeadm-config.yaml
```
## Adding/Replacing an etcd node
You need to make sure there are always an odd number of etcd nodes in the cluster. In such a way, this is always a replace or scale up operation. Either add two new nodes or remove an old one.
### 1) Add the new node running cluster.yml
Update the inventory and run `cluster.yml` passing `--limit=etcd,kube-master -e ignore_assert_errors=yes`.
If the node you want to add as an etcd node is already a worker or master node in your cluster, you have to remove him first using `remove-node.yml`.
Run `upgrade-cluster.yml` also passing `--limit=etcd,kube-master -e ignore_assert_errors=yes`. This is necessary to update all etcd configuration in the cluster.
Run `upgrade-cluster.yml` also passing `--limit=etcd,kube-master -e ignore_assert_errors=yes`. This is necessary to update all etcd configuration in the cluster.
At this point, you will have an even number of nodes.
Everything should still be working, and you should only have problems if the cluster decides to elect a new etcd leader before you remove a node.
Even so, running applications should continue to be available.
At this point, you will have an even number of nodes. Everything should still be working, and you should only have problems if the cluster decides to elect a new etcd leader before you remove a node. Even so, running applications should continue to be available.
If you add multiple ectd nodes with one run, you might want to append `-e etcd_retries=10` to increase the amount of retries between each ectd node join.
Otherwise the etcd cluster might still be processing the first join and fail on subsequent nodes. `etcd_retries=10` might work to join 3 new nodes.
## Removing an etcd node
### 1) Remove an old etcd node
### 2) Remove an old etcd node
With the node still in the inventory, run `remove-node.yml` passing `-e node=NODE_NAME` as the name of the node that should be removed.
If the node you want to remove is not online, you should add `reset_nodes=false` to your extra-vars.
### 2) Make sure only remaining nodes are in your inventory
### 3) Make sure the remaining etcd members have their config updated
Remove `NODE_NAME` from your inventory file.
In each etcd host that remains in the cluster:
### 3) Update kubernetes and network configuration files with the valid list of etcd members
```sh
cat /etc/etcd.env | grep ETCD_INITIAL_CLUSTER
```
Run `cluster.yml` to regenerate the configuration files on all remaining nodes.
Only active etcd members should be in that list.
### 4) Shutdown the old instance
### 4) Remove old etcd members from the cluster runtime
That's it.
Acquire a shell prompt into one of the etcd containers and use etcdctl to remove the old member.
```sh
# list all members
etcdctl member list
# remove old member
etcdctl member remove MEMBER_ID
# careful!!! if you remove a wrong member you will be in trouble
# note: these command lines are actually much bigger, since you need to pass all certificates to etcdctl.
```
### 5) Make sure the apiserver config is correctly updated
In every master node, edit `/etc/kubernetes/manifests/kube-apiserver.yaml`. Make sure only active etcd nodes are still present in the apiserver command line parameter `--etcd-servers=...`.
### 6) Shutdown the old instance

View File

@@ -1,105 +0,0 @@
# Offline environment
In case your servers don't have access to internet (for example when deploying on premises with security constraints), you need to setup:
* a HTTP reverse proxy/cache/mirror to serve some static files (zips and binaries)
* an internal Yum/Deb repository for OS packages
* an internal container image registry that need to be populated with all container images used by Kubespray. Exhaustive list depends on your setup
* [Optional] an internal PyPi server for kubespray python packages (only required if your OS doesn't provide all python packages/versions listed in `requirements.txt`)
* [Optional] an internal Helm registry (only required if `helm_enabled=true`)
## Configure Inventory
Once all artifacts are accessible from your internal network, **adjust** the following variables in your inventory to match your environment:
```yaml
# Registry overrides
gcr_image_repo: "{{ registry_host }}"
docker_image_repo: "{{ registry_host }}"
quay_image_repo: "{{ registry_host }}"
kubeadm_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubeadm"
kubectl_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubectl"
kubelet_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubelet"
# etcd is optional if you **DON'T** use etcd_deployment=host
etcd_download_url: "{{ files_repo }}/kubernetes/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
cni_download_url: "{{ files_repo }}/kubernetes/cni/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
crictl_download_url: "{{ files_repo }}/kubernetes/cri-tools/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
# If using Calico
calicoctl_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
# CentOS/Redhat
## Docker
docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch"
docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
## Containerd
extras_rh_repo_base_url: "{{ yum_repo }}/centos/$releasever/extras/$basearch"
extras_rh_repo_gpgkey: "{{ yum_repo }}/containerd/gpg"
# Fedora
## Docker
docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}"
docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
## Containerd
containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd"
containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
# Debian
## Docker
docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce"
docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg"
## Containerd
containerd_debian_repo_base_url: "{{ ubuntu_repo }}/containerd"
containerd_debian_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg"
containerd_debian_repo_repokey: 'YOURREPOKEY'
# Ubuntu
## Docker
docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce"
docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg"
## Containerd
containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd"
containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg"
containerd_ubuntu_repo_repokey: 'YOURREPOKEY'
# If using helm
helm_stable_repo_url: "{{ helm_registry }}"
```
For the OS specific settings, just define the one matching your OS.
If you use the settings like the one above, you'll need to define in your inventory the following variables:
* `registry_host`: Container image registry. If you _don't_ use the same repository path for the container images that the ones defined in [Download's role defaults](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/download/defaults/main.yml), you need to override the `*_image_repo` for these container images. If you want to make your life easier, use the same repository path, you won't have to override anything else.
* `files_repo`: HTTP webserver or reverse proxy that is able to serve the files listed above. Path is not important, you can store them anywhere as long as it's accessible by kubespray. It's recommended to use `*_version` in the path so that you don't need to modify this setting everytime kubespray upgrades one of these components.
* `yum_repo`/`debian_repo`/`ubuntu_repo`: OS package repository depending of your OS, should point to your internal repository. Adjust the path accordingly.
* `helm_registry`: Helm Registry to use for `stable` Helm Charts if `helm_enabled: true`
## Install Kubespray Python Packages
Look at the `requirements.txt` file and check if your OS provides all packages out-of-the-box (Using the OS package manager). For those missing, you need to either use a proxy that has Internet access (typically from a DMZ) or setup a PyPi server in your network that will host these packages.
If you're using a HTTP(S) proxy to download your python packages:
```bash
sudo pip install --proxy=https://[username:password@]proxyserver:port -r requirements.txt
```
When using an internal PyPi server:
```bash
# If you host all required packages
pip install -i https://pypiserver/pypi -r requirements.txt
# If you only need the ones missing from the OS package manager
pip install -i https://pypiserver/pypi package_you_miss
```
## Run Kubespray as usual
Once all artifacts are in place and your inventory properly set up, you can run kubespray with the regular `cluster.yaml` command:
```bash
ansible-playbook -i inventory/my_airgap_cluster/hosts.yaml -b cluster.yml
```
## Please Note: Offline installation doesn't support CRI-O container runtime at the moment (see [this issue](https://github.com/kubernetes-sigs/kubespray/issues/6233))

View File

@@ -1,27 +1,7 @@
OpenStack
===============
# OpenStack
## Known compatible public clouds
Kubespray has been tested on a number of OpenStack Public Clouds including (in alphabetical order):
- [Auro](https://auro.io/)
- [Betacloud](https://www.betacloud.io/)
- [CityCloud](https://www.citycloud.com/)
- [DreamHost](https://www.dreamhost.com/cloud/computing/)
- [ELASTX](https://elastx.se/)
- [EnterCloudSuite](https://www.entercloudsuite.com/)
- [FugaCloud](https://fuga.cloud/)
- [Open Telekom Cloud](https://cloud.telekom.de/) : requires to set the variable `wait_for_floatingip = "true"` in your cluster.tfvars
- [OVHcloud](https://www.ovhcloud.com/)
- [Rackspace](https://www.rackspace.com/)
- [Ultimum](https://ultimum.io/)
- [VexxHost](https://vexxhost.com/)
- [Zetta](https://www.zetta.io/)
## The in-tree cloud provider
To deploy Kubespray on [OpenStack](https://www.openstack.org/) uncomment the `cloud_provider` option in `group_vars/all/all.yml` and set it to `openstack`.
To deploy kubespray on [OpenStack](https://www.openstack.org/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'openstack'`.
After that make sure to source in your OpenStack credentials like you would do when using `nova-client` or `neutron-client` by using `source path/to/your/openstack-rc` or `. path/to/your/openstack-rc`.
@@ -71,7 +51,7 @@ Given the port ids on the left, you can set the two `allowed-address`(es) in Ope
openstack port set e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
```
If all the VMs in the tenant correspond to Kubespray deployment, you can "sweep run" above with:
If all the VMs in the tenant correspond to kubespray deployment, you can "sweep run" above with:
```bash
openstack port list --device-owner=compute:nova -c ID -f value | xargs -tI@ openstack port set @ --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
@@ -79,20 +59,21 @@ If all the VMs in the tenant correspond to Kubespray deployment, you can "sweep
Now you can finally run the playbook.
## The external cloud provider
Upgrade from the in-tree to the external cloud provider
---------------
The in-tree cloud provider is deprecated and will be removed in a future version of Kubernetes. The target release for removing all remaining in-tree cloud providers is set to 1.21.
The in-tree cloud provider is deprecated and will be removed in a future version of Kubernetes. The target release for removing all remaining in-tree cloud providers is set to 1.21
The new cloud provider is configured to have Octavia by default in Kubespray.
- Enable the new external cloud provider in `group_vars/all/all.yml`:
- Change cloud provider from `cloud_provider: openstack` to the new external Cloud provider:
```yaml
cloud_provider: external
external_cloud_provider: openstack
```
- Enable Cinder CSI in `group_vars/all/openstack.yml`:
- Enable Cinder CSI:
```yaml
cinder_csi_enabled: true
@@ -114,21 +95,6 @@ The new cloud provider is configured to have Octavia by default in Kubespray.
- ExpandCSIVolumes=true
```
- If you are in a case of a multi-nic OpenStack VMs (see [kubernetes/cloud-provider-openstack#407](https://github.com/kubernetes/cloud-provider-openstack/issues/407) and [#6083](https://github.com/kubernetes-sigs/kubespray/issues/6083) for explanation), you should override the default OpenStack networking configuration:
```yaml
external_openstack_network_ipv6_disabled: false
external_openstack_network_internal_networks:
- ""
external_openstack_network_public_networks:
- ""
```
- You can override the default OpenStack metadata configuration (see [#6338](https://github.com/kubernetes-sigs/kubespray/issues/6338) for explanation):
```yaml
external_openstack_metadata_search_order: "configDrive,metadataService"
```
- Run `source path/to/your/openstack-rc` to read your OpenStack credentials like `OS_AUTH_URL`, `OS_USERNAME`, `OS_PASSWORD`, etc. Those variables are used for accessing OpenStack from the external cloud provider.
- Run the `cluster.yml` playbook
- Run the `upgrade-cluster.yml` playbook
- Run the cleanup playbook located under extra_playbooks `extra_playbooks/migrate_openstack_provider.yml` (this will clean up all resources used by the old cloud provider)
- You can remove the feature gates for Volume migration. If you want to enable the possibility to expand CSI volumes you could leave the `ExpandCSIVolumes=true` feature gate

View File

@@ -1,49 +0,0 @@
# OVN4NFV-k8S-Plugin
Intro to [ovn4nfv-k8s-plugin](https://github.com/opnfv/ovn4nfv-k8s-plugin)
## How to use it
* Enable ovn4nfv in `group_vars/k8s-cluster/k8s-cluster.yml`
```yml
...
kube_network_plugin: ovn4nfv
...
```
## Verifying ovn4nfv kube network plugin
* ovn4nfv install ovn control plan in the master and ovn daemonset in all nodes
* Network function Networking(nfn) operator is install in the master and nfn agent is installed in all the node
* ovn4nfv install `ovn4nfvk8s-cni` cni shim binary in `/opt/cni/bin/` and nfn agent act as the cni server
* All ovn4nfv pods are installed in the kube-system
```ShellSession
# From K8s client
# kubectl get pods -n kube-system -l app=ovn-control-plane -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
ovn-control-plane-5f8b7bcc65-w759g 1/1 Running 0 3d18h 192.168.121.25 master <none> <none>
# kubectl get pods -n kube-system -l app=ovn-controller -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
ovn-controller-54zzj 1/1 Running 0 3d18h 192.168.121.24 minion01 <none> <none>
ovn-controller-7cljt 1/1 Running 0 3d18h 192.168.121.25 master <none> <none>
ovn-controller-cx46g 1/1 Running 0 3d18h 192.168.121.15 minion02 <none> <none>
# kubectl get pods -n kube-system -l name=nfn-operator -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nfn-operator-6dc44dbf48-xk9zl 1/1 Running 0 3d18h 192.168.121.25 master <none> <none>
# kubectl get pods -n kube-system -l app=nfn-agent -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nfn-agent-dzlpp 1/1 Running 0 3d18h 192.168.121.15 minion02 <none> <none>
nfn-agent-jcdbn 1/1 Running 0 3d18h 192.168.121.25 master <none> <none>
nfn-agent-lrkzk 1/1 Running 0 3d18h 192.168.121.24 minion01 <none> <none>
# kubectl get pods -n kube-system -l app=ovn4nfv -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
ovn4nfv-cni-5zdz2 1/1 Running 0 3d18h 192.168.121.24 minion01 <none> <none>
ovn4nfv-cni-k5wjp 1/1 Running 0 3d18h 192.168.121.25 master <none> <none>
ovn4nfv-cni-t6z5b 1/1 Running 0 3d18h 192.168.121.15 minion02 <none> <none>
```

View File

@@ -13,7 +13,7 @@
- [ ] Terraform to provision instances on:
- [ ] GCE
- [x] AWS (contrib/terraform/aws)
- [x] OpenStack (contrib/terraform/openstack)
- [x] Openstack (contrib/terraform/openstack)
- [x] Packet
- [ ] Digital Ocean
- [ ] Azure

View File

@@ -1,646 +0,0 @@
# Setting up your first cluster with Kubespray
This tutorial walks you through the detailed steps for setting up Kubernetes
with [Kubespray](https://kubespray.io/).
The guide is inspired on the tutorial [Kubernetes The Hard Way](https://github.com/kelseyhightower/kubernetes-the-hard-way), with the
difference that here we want to showcase how to spin up a Kubernetes cluster
in a more managed fashion with Kubespray.
## Target Audience
The target audience for this tutorial is someone looking for a
hands-on guide to get started with Kubespray.
## Cluster Details
* [kubespray](https://github.com/kubernetes-sigs/kubespray) v2.13.x
* [kubernetes](https://github.com/kubernetes/kubernetes) v1.17.9
## Prerequisites
* Google Cloud Platform: This tutorial leverages the [Google Cloud Platform](https://cloud.google.com/) to streamline provisioning of the compute infrastructure required to bootstrap a Kubernetes cluster from the ground up. [Sign up](https://cloud.google.com/free/) for $300 in free credits.
* Google Cloud Platform SDK: Follow the Google Cloud SDK [documentation](https://cloud.google.com/sdk/) to install and configure the `gcloud` command
line utility. Make sure to set a default compute region and compute zone.
* The [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) command line utility is used to interact with the Kubernetes
API Server.
* Linux or Mac environment with Python 3
## Provisioning Compute Resources
Kubernetes requires a set of machines to host the Kubernetes control plane and the worker nodes where containers are ultimately run. In this lab you will provision the compute resources required for running a secure and highly available Kubernetes cluster across a single [compute zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones).
### Networking
The Kubernetes [networking model](https://kubernetes.io/docs/concepts/cluster-administration/networking/#kubernetes-model) assumes a flat network in which containers and nodes can communicate with each other. In cases where this is not desired [network policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) can limit how groups of containers are allowed to communicate with each other and external network endpoints.
> Setting up network policies is out of scope for this tutorial.
#### Virtual Private Cloud Network
In this section a dedicated [Virtual Private Cloud](https://cloud.google.com/compute/docs/networks-and-firewalls#networks) (VPC) network will be setup to host the Kubernetes cluster.
Create the `kubernetes-the-kubespray-way` custom VPC network:
```ShellSession
gcloud compute networks create kubernetes-the-kubespray-way --subnet-mode custom
```
A [subnet](https://cloud.google.com/compute/docs/vpc/#vpc_networks_and_subnets) must be provisioned with an IP address range large enough to assign a private IP address to each node in the Kubernetes cluster.
Create the `kubernetes` subnet in the `kubernetes-the-hard-way` VPC network:
```ShellSession
gcloud compute networks subnets create kubernetes \
--network kubernetes-the-kubespray-way \
--range 10.240.0.0/24
```
> The `10.240.0.0/24` IP address range can host up to 254 compute instances.
#### Firewall Rules
Create a firewall rule that allows internal communication across all protocols.
It is important to note that the ipip protocol has to be allowed in order for
the calico (see later) networking plugin to work.
```ShellSession
gcloud compute firewall-rules create kubernetes-the-kubespray-way-allow-internal \
--allow tcp,udp,icmp,ipip \
--network kubernetes-the-kubespray-way \
--source-ranges 10.240.0.0/24
```
Create a firewall rule that allows external SSH, ICMP, and HTTPS:
```ShellSession
gcloud compute firewall-rules create kubernetes-the-kubespray-way-allow-external \
--allow tcp:80,tcp:6443,tcp:443,tcp:22,icmp \
--network kubernetes-the-kubespray-way \
--source-ranges 0.0.0.0/0
```
It is not feasible to restrict the firewall to a specific IP address from
where you are accessing the cluster as the nodes also communicate over the public internet and would otherwise run into
this firewall. Technically you could limit the firewall to the (fixed) IP
addresses of the cluster nodes and the remote IP addresses for accessing the
cluster.
### Compute Instances
The compute instances in this lab will be provisioned using [Ubuntu Server](https://www.ubuntu.com/server) 18.04.
Each compute instance will be provisioned with a fixed private IP address and
a public IP address (that can be fixed - see [guide](https://cloud.google.com/compute/docs/ip-addresses/reserve-static-external-ip-address)).
Using fixed public IP addresses has the advantage that our cluster node
configuration does not need to be updated with new public IP addresses every
time the machines are shut down and later on restarted.
Create three compute instances which will host the Kubernetes control plane:
```ShellSession
for i in 0 1 2; do
gcloud compute instances create controller-${i} \
--async \
--boot-disk-size 200GB \
--can-ip-forward \
--image-family ubuntu-1804-lts \
--image-project ubuntu-os-cloud \
--machine-type e2-standard-2 \
--private-network-ip 10.240.0.1${i} \
--scopes compute-rw,storage-ro,service-management,service-control,logging-write,monitoring \
--subnet kubernetes \
--tags kubernetes-the-kubespray-way,controller
done
```
> Do not forget to fix the IP addresses if you plan on re-using the cluster
after temporarily shutting down the VMs - see [guide](https://cloud.google.com/compute/docs/ip-addresses/reserve-static-external-ip-address)
Create three compute instances which will host the Kubernetes worker nodes:
```ShellSession
for i in 0 1 2; do
gcloud compute instances create worker-${i} \
--async \
--boot-disk-size 200GB \
--can-ip-forward \
--image-family ubuntu-1804-lts \
--image-project ubuntu-os-cloud \
--machine-type e2-standard-2 \
--private-network-ip 10.240.0.2${i} \
--scopes compute-rw,storage-ro,service-management,service-control,logging-write,monitoring \
--subnet kubernetes \
--tags kubernetes-the-kubespray-way,worker
done
```
> Do not forget to fix the IP addresses if you plan on re-using the cluster
after temporarily shutting down the VMs - see [guide](https://cloud.google.com/compute/docs/ip-addresses/reserve-static-external-ip-address)
List the compute instances in your default compute zone:
```ShellSession
gcloud compute instances list --filter="tags.items=kubernetes-the-kubespray-way"
```
> Output
```ShellSession
NAME ZONE MACHINE_TYPE PREEMPTIBLE INTERNAL_IP EXTERNAL_IP STATUS
controller-0 us-west1-c e2-standard-2 10.240.0.10 XX.XX.XX.XXX RUNNING
controller-1 us-west1-c e2-standard-2 10.240.0.11 XX.XXX.XXX.XX RUNNING
controller-2 us-west1-c e2-standard-2 10.240.0.12 XX.XXX.XX.XXX RUNNING
worker-0 us-west1-c e2-standard-2 10.240.0.20 XX.XX.XXX.XXX RUNNING
worker-1 us-west1-c e2-standard-2 10.240.0.21 XX.XX.XX.XXX RUNNING
worker-2 us-west1-c e2-standard-2 10.240.0.22 XX.XXX.XX.XX RUNNING
```
### Configuring SSH Access
Kubespray is relying on SSH to configure the controller and worker instances.
Test SSH access to the `controller-0` compute instance:
```ShellSession
IP_CONTROLLER_0=$(gcloud compute instances list --filter="tags.items=kubernetes-the-kubespray-way AND name:controller-0" --format="value(EXTERNAL_IP)")
USERNAME=$(whoami)
ssh $USERNAME@$IP_CONTROLLER_0
```
If this is your first time connecting to a compute instance SSH keys will be
generated for you. In this case you will need to enter a passphrase at the
prompt to continue.
> If you get a 'Remote host identification changed!' warning, you probably
already connected to that IP address in the past with another host key. You
can remove the old host key by running `ssh-keygen -R $IP_CONTROLLER_0`
Please repeat this procedure for all the controller and worker nodes, to
ensure that SSH access is properly functioning for all nodes.
## Set-up Kubespray
The following set of instruction is based on the [Quick Start](https://github.com/kubernetes-sigs/kubespray) but slightly altered for our
set-up.
As Ansible is a python application, we will create a fresh virtual
environment to install the dependencies for the Kubespray playbook:
```ShellSession
python3 -m venv venv
source venv/bin/activate
```
Next, we will git clone the Kubespray code into our working directory:
```ShellSession
git clone https://github.com/kubernetes-sigs/kubespray.git
cd kubespray
git checkout release-2.13
```
Now we need to install the dependencies for Ansible to run the Kubespray
playbook:
```ShellSession
pip install -r requirements.txt
```
Copy ``inventory/sample`` as ``inventory/mycluster``:
```ShellSession
cp -rfp inventory/sample inventory/mycluster
```
Update Ansible inventory file with inventory builder:
```ShellSession
declare -a IPS=($(gcloud compute instances list --filter="tags.items=kubernetes-the-kubespray-way" --format="value(EXTERNAL_IP)" | tr '\n' ' '))
CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
```
Open the generated `inventory/mycluster/hosts.yaml` file and adjust it so
that controller-0, controller-1 and controller-2 are control plane nodes and
worker-0, worker-1 and worker-2 are worker nodes. Also update the `ip` to the respective local VPC IP and
remove the `access_ip`.
The main configuration for the cluster is stored in
`inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml`. In this file we
will update the `supplementary_addresses_in_ssl_keys` with a list of the IP
addresses of the controller nodes. In that way we can access the
kubernetes API server as an administrator from outside the VPC network. You
can also see that the `kube_network_plugin` is by default set to 'calico'.
If you set this to 'cloud', it did not work on GCP at the time of testing.
Kubespray also offers to easily enable popular kubernetes add-ons. You can
modify the
list of add-ons in `inventory/mycluster/group_vars/k8s-cluster/addons.yml`.
Let's enable the metrics server as this is a crucial monitoring element for
the kubernetes cluster, just change the 'false' to 'true' for
`metrics_server_enabled`.
Now we will deploy the configuration:
```ShellSession
ansible-playbook -i inventory/mycluster/hosts.yaml -u $USERNAME -b -v --private-key=~/.ssh/id_rsa cluster.yml
```
Ansible will now execute the playbook, this can take up to 20 minutes.
## Access the kubernetes cluster
We will leverage a kubeconfig file from one of the controller nodes to access
the cluster as administrator from our local workstation.
> In this simplified set-up, we did not include a load balancer that usually
sits on top of the
three controller nodes for a high available API server endpoint. In this
simplified tutorial we connect directly to one of the three
controllers.
First, we need to edit the permission of the kubeconfig file on one of the
controller nodes:
```ShellSession
ssh $USERNAME@$IP_CONTROLLER_0
USERNAME=$(whoami)
sudo chown -R $USERNAME:$USERNAME /etc/kubernetes/admin.conf
exit
```
Now we will copy over the kubeconfig file:
```ShellSession
scp $USERNAME@$IP_CONTROLLER_0:/etc/kubernetes/admin.conf kubespray-do.conf
```
This kubeconfig file uses the internal IP address of the controller node to
access the API server. This kubeconfig file will thus not work of from
outside of the VPC network. We will need to change the API server IP address
to the controller node his external IP address. The external IP address will be
accepted in the
TLS negotation as we added the controllers external IP addresses in the SSL
certificate configuration.
Open the file and modify the server IP address from the local IP to the
external IP address of controller-0, as stored in $IP_CONTROLLER_0.
> Example
```ShellSession
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: XXX
server: https://35.205.205.80:6443
name: cluster.local
...
```
Now, we load the configuration for `kubectl`:
```ShellSession
export KUBECONFIG=$PWD/kubespray-do.conf
```
We should be all set to communicate with our cluster from our local workstation:
```ShellSession
kubectl get nodes
```
> Output
```ShellSession
NAME STATUS ROLES AGE VERSION
controller-0 Ready master 47m v1.17.9
controller-1 Ready master 46m v1.17.9
controller-2 Ready master 46m v1.17.9
worker-0 Ready <none> 45m v1.17.9
worker-1 Ready <none> 45m v1.17.9
worker-2 Ready <none> 45m v1.17.9
```
## Smoke tests
### Metrics
Verify if the metrics server addon was correctly installed and works:
```ShellSession
kubectl top nodes
```
> Output
```ShellSession
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
controller-0 191m 10% 1956Mi 26%
controller-1 190m 10% 1828Mi 24%
controller-2 182m 10% 1839Mi 24%
worker-0 87m 4% 1265Mi 16%
worker-1 102m 5% 1268Mi 16%
worker-2 108m 5% 1299Mi 17%
```
Please note that metrics might not be available at first and need a couple of
minutes before you can actually retrieve them.
### Network
Let's verify if the network layer is properly functioning and pods can reach
each other:
```ShellSession
kubectl run myshell1 -it --rm --image busybox -- sh
hostname -i
# launch myshell2 in seperate terminal (see next code block) and ping the hostname of myshell2
ping <hostname myshell2>
```
```ShellSession
kubectl run myshell2 -it --rm --image busybox -- sh
hostname -i
ping <hostname myshell1>
```
> Output
```ShellSession
PING 10.233.108.2 (10.233.108.2): 56 data bytes
64 bytes from 10.233.108.2: seq=0 ttl=62 time=2.876 ms
64 bytes from 10.233.108.2: seq=1 ttl=62 time=0.398 ms
64 bytes from 10.233.108.2: seq=2 ttl=62 time=0.378 ms
^C
--- 10.233.108.2 ping statistics ---
3 packets transmitted, 3 packets received, 0% packet loss
round-trip min/avg/max = 0.378/1.217/2.876 ms
```
### Deployments
In this section you will verify the ability to create and manage [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/).
Create a deployment for the [nginx](https://nginx.org/en/) web server:
```ShellSession
kubectl create deployment nginx --image=nginx
```
List the pod created by the `nginx` deployment:
```ShellSession
kubectl get pods -l app=nginx
```
> Output
```ShellSession
NAME READY STATUS RESTARTS AGE
nginx-86c57db685-bmtt8 1/1 Running 0 18s
```
#### Port Forwarding
In this section you will verify the ability to access applications remotely using [port forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/).
Retrieve the full name of the `nginx` pod:
```ShellSession
POD_NAME=$(kubectl get pods -l app=nginx -o jsonpath="{.items[0].metadata.name}")
```
Forward port `8080` on your local machine to port `80` of the `nginx` pod:
```ShellSession
kubectl port-forward $POD_NAME 8080:80
```
> Output
```ShellSession
Forwarding from 127.0.0.1:8080 -> 80
Forwarding from [::1]:8080 -> 80
```
In a new terminal make an HTTP request using the forwarding address:
```ShellSession
curl --head http://127.0.0.1:8080
```
> Output
```ShellSession
HTTP/1.1 200 OK
Server: nginx/1.19.1
Date: Thu, 13 Aug 2020 11:12:04 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 07 Jul 2020 15:52:25 GMT
Connection: keep-alive
ETag: "5f049a39-264"
Accept-Ranges: bytes
```
Switch back to the previous terminal and stop the port forwarding to the `nginx` pod:
```ShellSession
Forwarding from 127.0.0.1:8080 -> 80
Forwarding from [::1]:8080 -> 80
Handling connection for 8080
^C
```
#### Logs
In this section you will verify the ability to [retrieve container logs](https://kubernetes.io/docs/concepts/cluster-administration/logging/).
Print the `nginx` pod logs:
```ShellSession
kubectl logs $POD_NAME
```
> Output
```ShellSession
...
127.0.0.1 - - [13/Aug/2020:11:12:04 +0000] "HEAD / HTTP/1.1" 200 0 "-" "curl/7.64.1" "-"
```
#### Exec
In this section you will verify the ability to [execute commands in a container](https://kubernetes.io/docs/tasks/debug-application-cluster/get-shell-running-container/#running-individual-commands-in-a-container).
Print the nginx version by executing the `nginx -v` command in the `nginx` container:
```ShellSession
kubectl exec -ti $POD_NAME -- nginx -v
```
> Output
```ShellSession
nginx version: nginx/1.19.1
```
### Kubernetes services
#### Expose outside of the cluster
In this section you will verify the ability to expose applications using a [Service](https://kubernetes.io/docs/concepts/services-networking/service/).
Expose the `nginx` deployment using a [NodePort](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport) service:
```ShellSession
kubectl expose deployment nginx --port 80 --type NodePort
```
> The LoadBalancer service type can not be used because your cluster is not configured with [cloud provider integration](https://kubernetes.io/docs/getting-started-guides/scratch/#cloud-provider). Setting up cloud provider integration is out of scope for this tutorial.
Retrieve the node port assigned to the `nginx` service:
```ShellSession
NODE_PORT=$(kubectl get svc nginx \
--output=jsonpath='{range .spec.ports[0]}{.nodePort}')
```
Create a firewall rule that allows remote access to the `nginx` node port:
```ShellSession
gcloud compute firewall-rules create kubernetes-the-kubespray-way-allow-nginx-service \
--allow=tcp:${NODE_PORT} \
--network kubernetes-the-kubespray-way
```
Retrieve the external IP address of a worker instance:
```ShellSession
EXTERNAL_IP=$(gcloud compute instances describe worker-0 \
--format 'value(networkInterfaces[0].accessConfigs[0].natIP)')
```
Make an HTTP request using the external IP address and the `nginx` node port:
```ShellSession
curl -I http://${EXTERNAL_IP}:${NODE_PORT}
```
> Output
```ShellSession
HTTP/1.1 200 OK
Server: nginx/1.19.1
Date: Thu, 13 Aug 2020 11:15:02 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 07 Jul 2020 15:52:25 GMT
Connection: keep-alive
ETag: "5f049a39-264"
Accept-Ranges: bytes
```
#### Local DNS
We will now also verify that kubernetes built-in DNS works across namespaces.
Create a namespace:
```ShellSession
kubectl create namespace dev
```
Create an nginx deployment and expose it within the cluster:
```ShellSession
kubectl create deployment nginx --image=nginx -n dev
kubectl expose deployment nginx --port 80 --type ClusterIP -n dev
```
Run a temporary container to see if we can reach the service from the default
namespace:
```ShellSession
kubectl run curly -it --rm --image curlimages/curl:7.70.0 -- /bin/sh
curl --head http://nginx.dev:80
```
> Output
```ShellSession
HTTP/1.1 200 OK
Server: nginx/1.19.1
Date: Thu, 13 Aug 2020 11:15:59 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 07 Jul 2020 15:52:25 GMT
Connection: keep-alive
ETag: "5f049a39-264"
Accept-Ranges: bytes
```
Type `exit` to leave the shell.
## Cleaning Up
### Kubernetes resources
Delete the dev namespace, the nginx deployment and service:
```ShellSession
kubectl delete namespace dev
kubectl delete deployment nginx
kubectl delete svc/ngninx
```
### Kubernetes state
Note: you can skip this step if you want to entirely remove the machines.
If you want to keep the VMs and just remove the cluster state, you can simply
run another Ansible playbook:
```ShellSession
ansible-playbook -i inventory/mycluster/hosts.yaml -u $USERNAME -b -v --private-key=~/.ssh/id_rsa reset.yml
```
Resetting the cluster to the VMs original state usually takes about a couple
of minutes.
### Compute instances
Delete the controller and worker compute instances:
```ShellSession
gcloud -q compute instances delete \
controller-0 controller-1 controller-2 \
worker-0 worker-1 worker-2 \
--zone $(gcloud config get-value compute/zone)
```
<!-- markdownlint-disable no-duplicate-heading -->
### Network
<!-- markdownlint-enable no-duplicate-heading -->
Delete the fixed IP addresses (assuming you named them equal to the VM names),
if any:
```ShellSession
gcloud -q compute addresses delete controller-0 controller-1 controller-2 \
worker-0 worker-1 worker-2
```
Delete the `kubernetes-the-kubespray-way` firewall rules:
```ShellSession
gcloud -q compute firewall-rules delete \
kubernetes-the-kubespray-way-allow-nginx-service \
kubernetes-the-kubespray-way-allow-internal \
kubernetes-the-kubespray-way-allow-external
```
Delete the `kubernetes-the-kubespray-way` network VPC:
```ShellSession
gcloud -q compute networks subnets delete kubernetes
gcloud -q compute networks delete kubernetes-the-kubespray-way
```

View File

@@ -11,23 +11,26 @@ and the `etcd` group merged with the `kube-master`.
`ha` layout consists of two etcd nodes, two masters and a single worker node,
with role intersection.
`scale` layout can be combined with above layouts (`ha-scale`, `separate-scale`). It includes 200 fake hosts
`scale` layout can be combined with above layouts. It includes 200 fake hosts
in the Ansible inventory. This helps test TLS certificate generation at scale
to prevent regressions and profile certain long-running tasks. These nodes are
never actually deployed, but certificates are generated for them.
Note, the canal network plugin deploys flannel as well plus calico policy controller.
## Test cases
## GCE instances
The [CI Matrix](/docs/ci.md) displays OS, Network Plugin and Container Manager tested.
| Stage| Network plugin| OS type| GCE region| Nodes layout
|--------------------|--------------------|--------------------|--------------------|--------------------|
| part1| calico| coreos-stable| us-west1-b| separate|
| part1| canal| debian-8-kubespray| us-east1-b| ha|
| part1| weave| rhel-7| europe-west1-b| default|
| part2| flannel| centos-7| us-west1-a| default|
| part2| calico| debian-8-kubespray| us-central1-b| default|
| part2| canal| coreos-stable| us-east1-b| default|
| special| canal| rhel-7| us-east1-b| separate|
| special| weave| ubuntu-1604-xenial| us-central1-b| default|
| special| calico| centos-7| europe-west1-b| ha-scale|
| special| weave| coreos-alpha| us-west1-a| ha-scale|
All tests are breakdown into 3 "stages" ("Stage" means a build step of the build pipeline) as follows:
- _unit_tests_: Linting, markdown, vagrant & terraform validation etc...
- _part1_: Molecule and AIO tests
- _part2_: Standard tests with different layouts and OS/Runtime/Network
- _part3_: Upgrade jobs, terraform jobs and recover control plane tests
- _special_: Other jobs (manuals)
The steps are ordered as `unit_tests->part1->part2->part3->special`.
The "Stage" means a build step of the build pipeline. The steps are ordered as `part1->part2->special`.

View File

@@ -17,14 +17,14 @@ By default, Vagrant uses Ubuntu 18.04 box to provision a local cluster. You may
Customize `$os` variable in `Vagrantfile` or as override, e.g.,:
```ShellSession
echo '$os = "flatcar-stable"' >> vagrant/config.rb
echo '$os = "coreos-stable"' >> vagrant/config.rb
```
The supported operating systems for vagrant are defined in the `SUPPORTED_OS` constant in the `Vagrantfile`.
## File and image caching
Kubespray can take quite a while to start on a laptop. To improve provisioning speed, the variable 'download_run_once' is set. This will make kubespray download all files and containers just once and then redistributes them to the other nodes and as a bonus, also cache all downloads locally and re-use them on the next provisioning run. For more information on download settings see [download documentation](/docs/downloads.md).
Kubespray can take quite a while to start on a laptop. To improve provisioning speed, the variable 'download_run_once' is set. This will make kubespray download all files and containers just once and then redistributes them to the other nodes and as a bonus, also cache all downloads locally and re-use them on the next provisioning run. For more information on download settings see [download documentation](downloads.md).
## Example use of Vagrant

View File

@@ -38,11 +38,11 @@ Some variables of note include:
* *loadbalancer_apiserver* - If defined, all hosts will connect to this
address instead of localhost for kube-masters and kube-master[0] for
kube-nodes. See more details in the
[HA guide](/docs/ha-mode.md).
[HA guide](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/ha-mode.md).
* *loadbalancer_apiserver_localhost* - makes all hosts to connect to
the apiserver internally load balanced endpoint. Mutual exclusive to the
`loadbalancer_apiserver`. See more details in the
[HA guide](/docs/ha-mode.md).
[HA guide](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/ha-mode.md).
## Cluster variables
@@ -72,6 +72,8 @@ following default cluster parameters:
on the CoreDNS service.
* *cloud_provider* - Enable extra Kubelet option if operating inside GCE or
OpenStack (default is unset)
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
Kubernetes
* *kube_feature_gates* - A list of key=value pairs that describe feature gates for
alpha/experimental Kubernetes features. (defaults is `[]`)
* *authorization_modes* - A list of [authorization mode](
@@ -97,7 +99,6 @@ variables to match your requirements.
addition to Kubespray deployed DNS
* *nameservers* - Array of DNS servers configured for use by hosts
* *searchdomains* - Array of up to 4 search domains
* *dns_etchosts* - Content of hosts file for coredns and nodelocaldns
For more information, see [DNS
Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.md).
@@ -117,16 +118,15 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
is unlikely to work on newer releases. Starting with Kubernetes v1.7
series, this now defaults to ``host``. Before v1.7, the default was Docker.
This is because of cgroup [issues](https://github.com/kubernetes/kubernetes/issues/43704).
* *kubelet_load_modules* - For some things, kubelet needs to load kernel modules. For example,
dynamic kernel services are needed for mounting persistent volumes into containers. These may not be
loaded by preinstall kubernetes processes. For example, ceph and rbd backed volumes. Set this variable to
true to let kubelet load kernel modules.
* *kubelet_cgroup_driver* - Allows manual override of the
cgroup-driver option for Kubelet. By default autodetection is used
to match Docker configuration.
* *kubelet_rotate_certificates* - Auto rotate the kubelet client certificates by requesting new certificates
from the kube-apiserver when the certificate expiration approaches.
* *kubelet_rotate_server_certificates* - Auto rotate the kubelet server certificates by requesting new certificates
from the kube-apiserver when the certificate expiration approaches.
**Note** that server certificates are **not** approved automatically. Approve them manually
(`kubectl get csr`, `kubectl certificate approve`) or implement custom approving controller like
[kubelet-rubber-stamp](https://github.com/kontena/kubelet-rubber-stamp).
* *node_labels* - Labels applied to nodes via kubelet --node-labels parameter.
For example, labels can be set in the inventory as variables or more widely in group_vars.
*node_labels* can be defined either as a dict or a comma-separated labels string:
@@ -169,12 +169,12 @@ in the form of dicts of key-value pairs of configuration parameters that will be
```yml
kubelet_config_extra_args:
evictionHard:
memory.available: "100Mi"
evictionSoftGracePeriod:
EvictionHard:
memory.available: "<100Mi"
EvictionSoftGracePeriod:
memory.available: "30s"
evictionSoft:
memory.available: "300Mi"
EvictionSoft:
memory.available: "<300Mi"
```
The possible vars are:
@@ -182,7 +182,7 @@ The possible vars are:
* *kubelet_config_extra_args*
* *kubelet_node_config_extra_args*
Previously, the same parameters could be passed as flags to kubelet binary with the following vars:
Previously, the same paramaters could be passed as flags to kubelet binary with the following vars:
* *kubelet_custom_flags*
* *kubelet_node_custom_flags*

View File

@@ -0,0 +1,57 @@
---
- hosts: localhost
tasks:
- name: CephFS Provisioner | Install pip packages
pip:
name: "{{ item.name }}"
version: "{{ item.version }}"
state: "{{ item.state }}"
extra_args: "{{ pip_extra_args | default(omit) }}"
with_items:
- { state: "present", name: "docker", version: "3.4.1" }
- { state: "present", name: "docker-compose", version: "1.21.2" }
- name: CephFS Provisioner | Check Go version
shell: |
go version
ignore_errors: yes
register: go_version_result
- name: CephFS Provisioner | Install Go 1.9
shell: |
add-apt-repository -y ppa:gophers/archive
apt-get update
apt-get install -y golang-1.9
ln -fs /usr/lib/go-1.9/bin/* /usr/local/bin/
when: 'go_version_result.rc != 0 or "go version go1.9" not in go_version_result.stdout'
- name: CephFS Provisioner | Check if image exists
shell: |
docker image list | grep 'cephfs-provisioner'
ignore_errors: yes
register: check_image_result
- block:
- name: CephFS Provisioner | Clone repo
git:
repo: https://github.com/kubernetes-incubator/external-storage.git
dest: "~/go/src/github.com/kubernetes-incubator/external-storage"
version: 06fddbe2
clone: yes
update: yes
- name: CephFS Provisioner | Build image
shell: |
cd ~/go/src/github.com/kubernetes-incubator/external-storage
REGISTRY=quay.io/kubespray/ VERSION=06fddbe2 make ceph/cephfs
- name: CephFS Provisioner | Push image
docker_image:
name: quay.io/kubespray/cephfs-provisioner:06fddbe2
push: yes
register: docker_image
retries: 10
until: docker_image is succeeded
when: check_image_result.rc != 0

View File

@@ -16,13 +16,13 @@
src: get_cinder_pvs.sh
dest: /tmp
mode: u+rwx
- name: Get PVs provisioned by in-tree cloud provider # noqa 301
- name: Get PVs provisioned by in-tree cloud provider
command: /tmp/get_cinder_pvs.sh
register: pvs
- name: Remove get_cinder_pvs.sh
file:
path: /tmp/get_cinder_pvs.sh
state: absent
- name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation # noqa 301
- name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation
command: "{{ bin_dir }}/kubectl annotate --overwrite pv {{ item }} pv.kubernetes.io/provisioned-by=cinder.csi.openstack.org"
loop: "{{ pvs.stdout_lines | list }}"

View File

@@ -25,7 +25,7 @@ bin_dir: /usr/local/bin
## Internal loadbalancers for apiservers
# loadbalancer_apiserver_localhost: true
# valid options are "nginx" or "haproxy"
# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy"
# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy"
## Local loadbalancer should use this port
## And must be set port 6443
@@ -35,6 +35,11 @@ loadbalancer_apiserver_port: 6443
loadbalancer_apiserver_healthcheck_port: 8081
### OTHER OPTIONAL VARIABLES
## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
## modules.
# kubelet_load_modules: false
## Upstream dns servers
# upstream_dns_servers:

View File

@@ -1,7 +1,6 @@
## When azure is used, you need to also set the following variables.
## see docs/azure.md for details on how to get these values
# azure_cloud:
# azure_tenant_id:
# azure_subscription_id:
# azure_aad_client_id:

View File

@@ -10,10 +10,6 @@ docker_container_storage_setup: false
## Otherwise docker-storage-setup will be executed incorrectly.
# docker_container_storage_setup_devs: /dev/vdb
## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver)
## Valid options are systemd or cgroupfs, default is systemd
# docker_cgroup_driver: systemd
## Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
docker_dns_servers_strict: false

View File

@@ -28,19 +28,6 @@
# external_openstack_lbaas_monitor_max_retries: "3"
# external_openstack_lbaas_manage_security_groups: false
# external_openstack_lbaas_internal_lb: false
# external_openstack_network_ipv6_disabled: false
# external_openstack_network_internal_networks:
# - ""
# external_openstack_network_public_networks:
# - ""
# external_openstack_metadata_search_order: "configDrive,metadataService"
## Application credentials to authenticate against Keystone API
## Those settings will take precedence over username and password that might be set your environment
## All of them are required
# external_openstack_application_credential_name:
# external_openstack_application_credential_id:
# external_openstack_application_credential_secret:
## The tag of the external OpenStack Cloud Controller image
# external_openstack_cloud_controller_image_tag: "latest"

View File

@@ -19,4 +19,4 @@
# etcd_peer_client_auth: true
## Settings for etcd deployment type
etcd_deployment_type: docker
etcd_deployment_type: docker

View File

@@ -26,7 +26,7 @@ local_path_provisioner_enabled: false
# local_path_provisioner_claim_root: /opt/local-path-provisioner/
# local_path_provisioner_debug: false
# local_path_provisioner_image_repo: "rancher/local-path-provisioner"
# local_path_provisioner_image_tag: "v0.0.14"
# local_path_provisioner_image_tag: "v0.0.2"
# local_path_provisioner_helper_image_repo: "busybox"
# local_path_provisioner_helper_image_tag: "latest"
@@ -103,11 +103,6 @@ ingress_publish_status_address: ""
# ingress_nginx_extra_args:
# - --default-ssl-certificate=default/foo-tls
# ambassador ingress controller deployment
ingress_ambassador_enabled: false
# ingress_ambassador_namespace: "ambassador"
# ingress_ambassador_version: "*"
# ALB ingress controller deployment
ingress_alb_enabled: false
# alb_ingress_aws_region: "us-east-1"
@@ -119,27 +114,3 @@ ingress_alb_enabled: false
# Cert manager deployment
cert_manager_enabled: false
# cert_manager_namespace: "cert-manager"
# MetalLB deployment
metallb_enabled: false
# metallb_ip_range:
# - "10.5.0.50-10.5.0.99"
# metallb_version: v0.9.3
# metallb_protocol: "layer2"
# metallb_port: "7472"
# metallb_limits_cpu: "100m"
# metallb_limits_mem: "100Mi"
# metallb_additional_address_pools:
# kube_service_pool:
# ip_range:
# - "10.5.1.50-10.5.1.99"
# protocol: "layer2"
# auto_assign: false
# metallb_protocol: "bgp"
# metallb_peers:
# - peer_address: 192.0.2.1
# peer_asn: 64512
# my_asn: 4200000000
# - peer_address: 192.0.2.2
# peer_asn: 64513
# my_asn: 4200000000

Some files were not shown because too many files have changed in this diff Show More