Compare commits

..

5 Commits

Author SHA1 Message Date
Etienne Champetier
abe9b40602 Ensure we always fixup kube-proxy kubeconfig (#5524) (#5559)
When running with serial != 100%, like upgrade_cluster.yml, we need to apply this fixup each time
Problem was introduced in 05dc2b3a09

Signed-off-by: Etienne Champetier <champetier.etienne@gmail.com>
(cherry picked from commit 5e9479cded)
2020-03-19 06:33:23 -07:00
Florian Ruynat
b0ccda8a42 Upgrade to Kubernetes 1.15.11 (#5578) 2020-03-19 02:57:13 -07:00
Florent Monbillard
c8dad3f6c6 Backport remove dockerproject (#5682) (#5780)
* Remove dockerproject org (#5548)

* Change dockerproject.org to download.docker.com

dockerproject.org was deprecated in 2017 and has gone down.

* Restore yum repo for containerd

Change-Id: I883bb512a2164a85865b1bd4fb569af0358c8c2b

Co-authored-by: Craig Rodrigues <rodrigc@crodrigues.org>

* remove legacy docker repo in kubernetes/preinstall before any packages installed (#5640)

* Remove dockerproject_.+_repo_.+ variables (#5662)

This 38688a4486 change replaces the
value for dockerproject_.+_repo_.+ docker variables but their new
value was previously defined in other variables. This change removes
the dockerproject_.+_repo_.+ docker variables in favor of the older
ones.

* Remove stale legacy yum docker repo /etc/yum.repos.d/docker.repo (#5569)

* Remove stale legacy yum docker repo /etc/yum.repos.d/docker.repo

* move task 'Remove legacy docker repo file' to pre-upgrade.yml

* fix upgrade procedure when in playbook (#5695)

exists role kubernetes/preinstall and not exists role container-engine

 error 'yum_repo_dir' is undefined

Co-authored-by: Matthew Mosesohn <matthew.mosesohn@gmail.com>
Co-authored-by: Craig Rodrigues <rodrigc@crodrigues.org>
Co-authored-by: Victor Morales <chipahuac@hotmail.com>

Co-authored-by: Kubernetes Prow Robot <k8s-ci-robot@users.noreply.github.com>
Co-authored-by: Matthew Mosesohn <matthew.mosesohn@gmail.com>
Co-authored-by: Craig Rodrigues <rodrigc@crodrigues.org>
Co-authored-by: Victor Morales <chipahuac@hotmail.com>
2020-03-17 07:38:22 -07:00
Etienne Champetier
5ec9ab7ec0 Upgrade to Kubernetes 1.15.6 (#5343)
Signed-off-by: Etienne Champetier <champetier.etienne@gmail.com>
2019-11-22 00:11:29 -08:00
Hugo Blom
73097aa39d Preparing a 2.11.1 release - kubernetes 1.15.5 (#5278)
* add checksums for 1.15.4 and 1.15.5 and set 1.15.5 to default

* Upgrade nodelocaldns to 1.15.5 (#5191)

* do not rebase from master for 2.11 release branch
2019-10-30 01:56:52 -07:00
271 changed files with 1569 additions and 4082 deletions

View File

@@ -5,8 +5,6 @@ skip_list:
# The following rules throw errors. # The following rules throw errors.
# These either still need to be corrected in the repository and the rules re-enabled or documented why they are skipped on purpose. # These either still need to be corrected in the repository and the rules re-enabled or documented why they are skipped on purpose.
- '301' - '301'
- '302'
- '303'
- '305' - '305'
- '306' - '306'
- '404' - '404'

View File

@@ -30,15 +30,14 @@ variables:
before_script: before_script:
- ./tests/scripts/rebase.sh - ./tests/scripts/rebase.sh
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1 - /usr/bin/python -m pip install -r tests/requirements.txt
- python -m pip install -r tests/requirements.txt
- mkdir -p /.ssh - mkdir -p /.ssh
.job: &job .job: &job
tags: tags:
- packet - packet
variables: variables:
KUBESPRAY_VERSION: v2.11.0 KUBESPRAY_VERSION: v2.10.0
image: quay.io/kubespray/kubespray:$KUBESPRAY_VERSION image: quay.io/kubespray/kubespray:$KUBESPRAY_VERSION
.testcases: &testcases .testcases: &testcases
@@ -46,7 +45,6 @@ before_script:
services: services:
- docker:dind - docker:dind
before_script: before_script:
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
- ./tests/scripts/rebase.sh - ./tests/scripts/rebase.sh
- ./tests/scripts/testcases_prepare.sh - ./tests/scripts/testcases_prepare.sh
script: script:
@@ -68,5 +66,6 @@ ci-authorized:
include: include:
- .gitlab-ci/lint.yml - .gitlab-ci/lint.yml
- .gitlab-ci/shellcheck.yml - .gitlab-ci/shellcheck.yml
- .gitlab-ci/digital-ocean.yml
- .gitlab-ci/terraform.yml - .gitlab-ci/terraform.yml
- .gitlab-ci/packet.yml - .gitlab-ci/packet.yml

View File

@@ -0,0 +1,19 @@
---
.do_variables: &do_variables
PRIVATE_KEY: $DO_PRIVATE_KEY
CI_PLATFORM: "do"
SSH_USER: root
.do: &do
extends: .testcases
tags:
- do
do_ubuntu-canal-ha:
stage: deploy-part2
extends: .do
variables:
<<: *do_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]

View File

@@ -2,8 +2,6 @@
yamllint: yamllint:
extends: .job extends: .job
stage: unit-tests stage: unit-tests
variables:
LANG: C.UTF-8
script: script:
- yamllint --strict . - yamllint --strict .
except: ['triggers', 'master'] except: ['triggers', 'master']
@@ -44,20 +42,8 @@ syntax-check:
tox-inventory-builder: tox-inventory-builder:
stage: unit-tests stage: unit-tests
extends: .job extends: .job
before_script:
- ./tests/scripts/rebase.sh
- apt-get update && apt-get install -y python3-pip
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
- python -m pip install -r tests/requirements.txt
script: script:
- pip3 install tox - pip install tox
- cd contrib/inventory_builder && tox - cd contrib/inventory_builder && tox
when: manual
except: ['triggers', 'master'] except: ['triggers', 'master']
markdownlint:
stage: unit-tests
image: node
before_script:
- npm install -g markdownlint-cli
script:
- markdownlint README.md docs --ignore docs/_sidebar.md

View File

@@ -1,126 +1,122 @@
--- ---
.packet_variables: &packet_variables
CI_PLATFORM: "packet"
SSH_USER: "kubespray"
.packet: &packet .packet: &packet
extends: .testcases extends: .testcases
variables: variables:
CI_PLATFORM: "packet" <<: *packet_variables
SSH_USER: "kubespray"
tags: tags:
- packet - packet
only: [/^pr-.*$/] only: [/^pr-.*$/]
except: ['triggers'] except: ['triggers']
.test-upgrade: &test-upgrade
variables:
UPGRADE_TEST: "graceful"
packet_ubuntu18-calico-aio: packet_ubuntu18-calico-aio:
stage: deploy-part1 stage: deploy-part1
extends: .packet <<: *packet
when: on_success when: on_success
# ### PR JOBS PART2 # ### PR JOBS PART2
packet_centos7-flannel-addons: packet_centos7-flannel-addons:
extends: .packet
stage: deploy-part2 stage: deploy-part2
<<: *packet
when: on_success when: on_success
# ### MANUAL JOBS # ### MANUAL JOBS
packet_centos-weave-kubeadm-sep: packet_centos-weave-kubeadm-sep:
stage: deploy-part2 stage: deploy-part2
extends: .packet <<: *packet
when: on_success when: on_success
variables: only: ['triggers']
UPGRADE_TEST: basic except: []
packet_ubuntu-weave-sep: packet_ubuntu-weave-sep:
stage: deploy-part2 stage: deploy-part2
extends: .packet <<: *packet
when: manual when: manual
only: ['triggers']
except: []
# # More builds for PRs/merges (manual) and triggers (auto) # # More builds for PRs/merges (manual) and triggers (auto)
packet_ubuntu-canal-ha: packet_ubuntu-canal-ha:
stage: deploy-special stage: deploy-special
extends: .packet <<: *packet
when: manual when: manual
packet_ubuntu-canal-kubeadm: packet_ubuntu-canal-kubeadm:
stage: deploy-part2 stage: deploy-part2
extends: .packet <<: *packet
when: on_success when: on_success
packet_ubuntu-flannel-ha: packet_ubuntu-flannel-ha:
stage: deploy-part2 stage: deploy-part2
extends: .packet <<: *packet
when: manual when: manual
# Contiv does not work in k8s v1.16 packet_ubuntu-contiv-sep:
# packet_ubuntu-contiv-sep: stage: deploy-part2
# stage: deploy-part2 <<: *packet
# extends: .packet when: on_success
# when: on_success
packet_ubuntu18-cilium-sep: packet_ubuntu18-cilium-sep:
stage: deploy-special stage: deploy-special
extends: .packet <<: *packet
when: manual when: manual
packet_ubuntu18-flannel-containerd: packet_ubuntu18-flannel-containerd:
stage: deploy-part2 stage: deploy-part2
extends: .packet <<: *packet
when: manual when: manual
packet_debian9-macvlan-sep: packet_debian9-macvlan-sep:
stage: deploy-part2 stage: deploy-part2
extends: .packet <<: *packet
when: manual when: on_success
packet_debian9-calico-upgrade: packet_debian9-calico-upgrade:
stage: deploy-part2 stage: deploy-part2
extends: .packet <<: *packet
when: on_success
variables:
UPGRADE_TEST: graceful
packet_debian10-containerd:
stage: deploy-part2
extends: .packet
when: on_success when: on_success
packet_centos7-calico-ha: packet_centos7-calico-ha:
stage: deploy-part2 stage: deploy-part2
extends: .packet <<: *packet
when: manual when: manual
packet_centos7-kube-ovn: packet_centos7-kube-ovn:
stage: deploy-part2 stage: deploy-part2
extends: .packet <<: *packet
when: on_success when: on_success
packet_centos7-kube-router: packet_centos7-kube-router:
stage: deploy-part2 stage: deploy-part2
extends: .packet <<: *packet
when: manual when: on_success
packet_centos7-multus-calico: packet_centos7-multus-calico:
stage: deploy-part2 stage: deploy-part2
extends: .packet <<: *packet
when: manual when: manual
packet_opensuse-canal: packet_opensuse-canal:
stage: deploy-part2 stage: deploy-part2
extends: .packet <<: *packet
when: manual when: manual
packet_oracle-7-canal: packet_oracle-7-canal:
stage: deploy-part2 stage: deploy-part2
extends: .packet <<: *packet
when: manual when: manual
packet_ubuntu-kube-router-sep: packet_ubuntu-kube-router-sep:
stage: deploy-part2 stage: deploy-part2
extends: .packet <<: *packet
when: manual
packet_amazon-linux-2-aio:
stage: deploy-part2
extends: .packet
when: manual when: manual

View File

@@ -3,14 +3,14 @@
.terraform_install: .terraform_install:
extends: .job extends: .job
before_script: before_script:
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
- ./tests/scripts/rebase.sh - ./tests/scripts/rebase.sh
- ./tests/scripts/testcases_prepare.sh - ./tests/scripts/testcases_prepare.sh
- ./tests/scripts/terraform_install.sh - ./tests/scripts/terraform_install.sh
# Set Ansible config # Set Ansible config
- cp ansible.cfg ~/.ansible.cfg - cp ansible.cfg ~/.ansible.cfg
# Prepare inventory # Prepare inventory
- cp contrib/terraform/$PROVIDER/sample-inventory/cluster.tfvars . - if [ "$PROVIDER" == "openstack" ]; then VARIABLEFILE="cluster.tfvars"; else VARIABLEFILE="cluster.tf"; fi
- cp contrib/terraform/$PROVIDER/sample-inventory/$VARIABLEFILE .
- ln -s contrib/terraform/$PROVIDER/hosts - ln -s contrib/terraform/$PROVIDER/hosts
- terraform init contrib/terraform/$PROVIDER - terraform init contrib/terraform/$PROVIDER
# Copy SSH keypair # Copy SSH keypair
@@ -24,7 +24,8 @@
stage: unit-tests stage: unit-tests
only: ['master', /^pr-.*$/] only: ['master', /^pr-.*$/]
script: script:
- terraform validate -var-file=cluster.tfvars contrib/terraform/$PROVIDER - if [ "$PROVIDER" == "openstack" ]; then VARIABLEFILE="cluster.tfvars"; else VARIABLEFILE="cluster.tf"; fi
- terraform validate -var-file=$VARIABLEFILE contrib/terraform/$PROVIDER
- terraform fmt -check -diff contrib/terraform/$PROVIDER - terraform fmt -check -diff contrib/terraform/$PROVIDER
.terraform_apply: .terraform_apply:
@@ -47,51 +48,51 @@
tf-validate-openstack: tf-validate-openstack:
extends: .terraform_validate extends: .terraform_validate
variables: variables:
TF_VERSION: 0.12.12 TF_VERSION: 0.12.6
PROVIDER: openstack PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME CLUSTER: $CI_COMMIT_REF_NAME
tf-validate-packet: tf-validate-packet:
extends: .terraform_validate extends: .terraform_validate
variables: variables:
TF_VERSION: 0.12.12 TF_VERSION: 0.11.11
PROVIDER: packet PROVIDER: packet
CLUSTER: $CI_COMMIT_REF_NAME CLUSTER: $CI_COMMIT_REF_NAME
tf-validate-aws: tf-validate-aws:
extends: .terraform_validate extends: .terraform_validate
variables: variables:
TF_VERSION: 0.12.12 TF_VERSION: 0.11.11
PROVIDER: aws PROVIDER: aws
CLUSTER: $CI_COMMIT_REF_NAME CLUSTER: $CI_COMMIT_REF_NAME
# tf-packet-ubuntu16-default: tf-packet-ubuntu16-default:
# extends: .terraform_apply extends: .terraform_apply
# variables: variables:
# TF_VERSION: 0.12.12 TF_VERSION: 0.11.11
# PROVIDER: packet PROVIDER: packet
# CLUSTER: $CI_COMMIT_REF_NAME CLUSTER: $CI_COMMIT_REF_NAME
# TF_VAR_number_of_k8s_masters: "1" TF_VAR_number_of_k8s_masters: "1"
# TF_VAR_number_of_k8s_nodes: "1" TF_VAR_number_of_k8s_nodes: "1"
# TF_VAR_plan_k8s_masters: t1.small.x86 TF_VAR_plan_k8s_masters: t1.small.x86
# TF_VAR_plan_k8s_nodes: t1.small.x86 TF_VAR_plan_k8s_nodes: t1.small.x86
# TF_VAR_facility: ewr1 TF_VAR_facility: ewr1
# TF_VAR_public_key_path: "" TF_VAR_public_key_path: ""
# TF_VAR_operating_system: ubuntu_16_04 TF_VAR_operating_system: ubuntu_16_04
#
# tf-packet-ubuntu18-default: tf-packet-ubuntu18-default:
# extends: .terraform_apply extends: .terraform_apply
# variables: variables:
# TF_VERSION: 0.12.12 TF_VERSION: 0.11.11
# PROVIDER: packet PROVIDER: packet
# CLUSTER: $CI_COMMIT_REF_NAME CLUSTER: $CI_COMMIT_REF_NAME
# TF_VAR_number_of_k8s_masters: "1" TF_VAR_number_of_k8s_masters: "1"
# TF_VAR_number_of_k8s_nodes: "1" TF_VAR_number_of_k8s_nodes: "1"
# TF_VAR_plan_k8s_masters: t1.small.x86 TF_VAR_plan_k8s_masters: t1.small.x86
# TF_VAR_plan_k8s_nodes: t1.small.x86 TF_VAR_plan_k8s_nodes: t1.small.x86
# TF_VAR_facility: ams1 TF_VAR_facility: ams1
# TF_VAR_public_key_path: "" TF_VAR_public_key_path: ""
# TF_VAR_operating_system: ubuntu_18_04 TF_VAR_operating_system: ubuntu_18_04
.ovh_variables: &ovh_variables .ovh_variables: &ovh_variables
OS_AUTH_URL: https://auth.cloud.ovh.net/v3 OS_AUTH_URL: https://auth.cloud.ovh.net/v3
@@ -109,7 +110,7 @@ tf-ovh_ubuntu18-calico:
when: on_success when: on_success
variables: variables:
<<: *ovh_variables <<: *ovh_variables
TF_VERSION: 0.12.12 TF_VERSION: 0.12.6
PROVIDER: openstack PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME CLUSTER: $CI_COMMIT_REF_NAME
ANSIBLE_TIMEOUT: "60" ANSIBLE_TIMEOUT: "60"
@@ -137,7 +138,7 @@ tf-ovh_coreos-calico:
when: on_success when: on_success
variables: variables:
<<: *ovh_variables <<: *ovh_variables
TF_VERSION: 0.12.12 TF_VERSION: 0.12.6
PROVIDER: openstack PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME CLUSTER: $CI_COMMIT_REF_NAME
ANSIBLE_TIMEOUT: "60" ANSIBLE_TIMEOUT: "60"

View File

@@ -1,2 +0,0 @@
---
MD013: false

View File

@@ -13,6 +13,6 @@ RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - &&
stable" \ stable" \
&& apt update -y && apt-get install docker-ce -y && apt update -y && apt-get install docker-ce -y
COPY . . COPY . .
RUN /usr/bin/python3 -m pip install pip -U && /usr/bin/python3 -m pip install -r tests/requirements.txt && python3 -m pip install -r requirements.txt && update-alternatives --install /usr/bin/python python /usr/bin/python3 1 RUN /usr/bin/python3 -m pip install pip -U && /usr/bin/python3 -m pip install -r tests/requirements.txt && python3 -m pip install -r requirements.txt
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.14.4/bin/linux/amd64/kubectl \ RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.14.4/bin/linux/amd64/kubectl \
&& chmod a+x kubectl && cp kubectl /usr/local/bin/kubectl && chmod a+x kubectl && cp kubectl /usr/local/bin/kubectl

View File

@@ -4,12 +4,18 @@ aliases:
- mattymo - mattymo
- atoms - atoms
- chadswen - chadswen
- mirwan - rsmitty
- miouge1 - bogdando
- riverzhang - bradbeam
- verwilst
- woopstar - woopstar
- riverzhang
- holser
- smana
- verwilst
kubespray-reviewers: kubespray-reviewers:
- jjungnickel - jjungnickel
- archifleks - archifleks
- chapsuk
- mirwan
- miouge1
- holmsten - holmsten

252
README.md
View File

@@ -1,17 +1,19 @@
# Deploy a Production Ready Kubernetes Cluster
![Kubernetes Logo](https://raw.githubusercontent.com/kubernetes-sigs/kubespray/master/docs/img/kubernetes-logo.png) ![Kubernetes Logo](https://raw.githubusercontent.com/kubernetes-sigs/kubespray/master/docs/img/kubernetes-logo.png)
Deploy a Production Ready Kubernetes Cluster
============================================
If you have questions, check the [documentation](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**. If you have questions, check the [documentation](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
You can get your invite [here](http://slack.k8s.io/) You can get your invite [here](http://slack.k8s.io/)
- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere, Packet (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal** - Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere, Packet (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
- **Highly available** cluster - **Highly available** cluster
- **Composable** (Choice of the network plugin for instance) - **Composable** (Choice of the network plugin for instance)
- Supports most popular **Linux distributions** - Supports most popular **Linux distributions**
- **Continuous integration tests** - **Continuous integration tests**
## Quick Start Quick Start
-----------
To deploy the cluster you can use : To deploy the cluster you can use :
@@ -19,35 +21,31 @@ To deploy the cluster you can use :
#### Usage #### Usage
```ShellSession # Install dependencies from ``requirements.txt``
# Install dependencies from ``requirements.txt`` sudo pip install -r requirements.txt
sudo pip install -r requirements.txt
# Copy ``inventory/sample`` as ``inventory/mycluster`` # Copy ``inventory/sample`` as ``inventory/mycluster``
cp -rfp inventory/sample inventory/mycluster cp -rfp inventory/sample inventory/mycluster
# Update Ansible inventory file with inventory builder # Update Ansible inventory file with inventory builder
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5) declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
CONFIG_FILE=inventory/mycluster/inventory.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]} CONFIG_FILE=inventory/mycluster/hosts.yml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
# Review and change parameters under ``inventory/mycluster/group_vars`` # Review and change parameters under ``inventory/mycluster/group_vars``
cat inventory/mycluster/group_vars/all/all.yml cat inventory/mycluster/group_vars/all/all.yml
cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
# Deploy Kubespray with Ansible Playbook - run the playbook as root # Deploy Kubespray with Ansible Playbook - run the playbook as root
# The option `--become` is required, as for example writing SSL keys in /etc/, # The option `--become` is required, as for example writing SSL keys in /etc/,
# installing packages and interacting with various systemd daemons. # installing packages and interacting with various systemd daemons.
# Without --become the playbook will fail to run! # Without --become the playbook will fail to run!
ansible-playbook -i inventory/mycluster/inventory.ini --become --become-user=root cluster.yml ansible-playbook -i inventory/mycluster/hosts.yml --become --become-user=root cluster.yml
```
Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu). Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu).
As a consequence, `ansible-playbook` command will fail with: As a consequence, `ansible-playbook` command will fail with:
```
```raw
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path. ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
``` ```
probably pointing on a task depending on a module present in requirements.txt (i.e. "unseal vault"). probably pointing on a task depending on a module present in requirements.txt (i.e. "unseal vault").
One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible. One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible.
@@ -58,151 +56,155 @@ A workaround consists of setting `ANSIBLE_LIBRARY` and `ANSIBLE_MODULE_UTILS` en
For Vagrant we need to install python dependencies for provisioning tasks. For Vagrant we need to install python dependencies for provisioning tasks.
Check if Python and pip are installed: Check if Python and pip are installed:
```ShellSession python -V && pip -V
python -V && pip -V
```
If this returns the version of the software, you're good to go. If not, download and install Python from here <https://www.python.org/downloads/source/> If this returns the version of the software, you're good to go. If not, download and install Python from here <https://www.python.org/downloads/source/>
Install the necessary requirements Install the necessary requirements
```ShellSession sudo pip install -r requirements.txt
sudo pip install -r requirements.txt vagrant up
vagrant up
```
## Documents Documents
---------
- [Requirements](#requirements) - [Requirements](#requirements)
- [Kubespray vs ...](docs/comparisons.md) - [Kubespray vs ...](docs/comparisons.md)
- [Getting started](docs/getting-started.md) - [Getting started](docs/getting-started.md)
- [Ansible inventory and tags](docs/ansible.md) - [Ansible inventory and tags](docs/ansible.md)
- [Integration with existing ansible repo](docs/integration.md) - [Integration with existing ansible repo](docs/integration.md)
- [Deployment data variables](docs/vars.md) - [Deployment data variables](docs/vars.md)
- [DNS stack](docs/dns-stack.md) - [DNS stack](docs/dns-stack.md)
- [HA mode](docs/ha-mode.md) - [HA mode](docs/ha-mode.md)
- [Network plugins](#network-plugins) - [Network plugins](#network-plugins)
- [Vagrant install](docs/vagrant.md) - [Vagrant install](docs/vagrant.md)
- [CoreOS bootstrap](docs/coreos.md) - [CoreOS bootstrap](docs/coreos.md)
- [Debian Jessie setup](docs/debian.md) - [Debian Jessie setup](docs/debian.md)
- [openSUSE setup](docs/opensuse.md) - [openSUSE setup](docs/opensuse.md)
- [Downloaded artifacts](docs/downloads.md) - [Downloaded artifacts](docs/downloads.md)
- [Cloud providers](docs/cloud.md) - [Cloud providers](docs/cloud.md)
- [OpenStack](docs/openstack.md) - [OpenStack](docs/openstack.md)
- [AWS](docs/aws.md) - [AWS](docs/aws.md)
- [Azure](docs/azure.md) - [Azure](docs/azure.md)
- [vSphere](docs/vsphere.md) - [vSphere](docs/vsphere.md)
- [Packet Host](docs/packet.md) - [Packet Host](docs/packet.md)
- [Large deployments](docs/large-deployments.md) - [Large deployments](docs/large-deployments.md)
- [Upgrades basics](docs/upgrades.md) - [Upgrades basics](docs/upgrades.md)
- [Roadmap](docs/roadmap.md) - [Roadmap](docs/roadmap.md)
## Supported Linux Distributions Supported Linux Distributions
-----------------------------
- **Container Linux by CoreOS** - **Container Linux by CoreOS**
- **Debian** Buster, Jessie, Stretch, Wheezy - **Debian** Buster, Jessie, Stretch, Wheezy
- **Ubuntu** 16.04, 18.04 - **Ubuntu** 16.04, 18.04
- **CentOS/RHEL** 7 - **CentOS/RHEL** 7
- **Fedora** 28 - **Fedora** 28
- **Fedora/CentOS** Atomic - **Fedora/CentOS** Atomic
- **openSUSE** Leap 42.3/Tumbleweed - **openSUSE** Leap 42.3/Tumbleweed
- **Oracle Linux** 7 - **Oracle Linux** 7
Note: Upstart/SysV init based OS types are not supported. Note: Upstart/SysV init based OS types are not supported.
## Supported Components Supported Components
--------------------
- Core - Core
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.16.7 - [kubernetes](https://github.com/kubernetes/kubernetes) v1.15.11
- [etcd](https://github.com/coreos/etcd) v3.3.10 - [etcd](https://github.com/coreos/etcd) v3.3.10
- [docker](https://www.docker.com/) v18.06 (see note) - [docker](https://www.docker.com/) v18.06 (see note)
- [cri-o](http://cri-o.io/) v1.14.0 (experimental: see [CRI-O Note](docs/cri-o.md). Only on centos based OS) - [cri-o](http://cri-o.io/) v1.11.5 (experimental: see [CRI-O Note](docs/cri-o.md). Only on centos based OS)
- Network Plugin - Network Plugin
- [cni-plugins](https://github.com/containernetworking/plugins) v0.8.1 - [cni-plugins](https://github.com/containernetworking/plugins) v0.8.1
- [calico](https://github.com/projectcalico/calico) v3.7.3 - [calico](https://github.com/projectcalico/calico) v3.7.3
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions) - [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
- [cilium](https://github.com/cilium/cilium) v1.5.5 - [cilium](https://github.com/cilium/cilium) v1.5.5
- [contiv](https://github.com/contiv/install) v1.2.1 - [contiv](https://github.com/contiv/install) v1.2.1
- [flanneld](https://github.com/coreos/flannel) v0.11.0 - [flanneld](https://github.com/coreos/flannel) v0.11.0
- [kube-router](https://github.com/cloudnativelabs/kube-router) v0.2.5 - [kube-router](https://github.com/cloudnativelabs/kube-router) v0.2.5
- [multus](https://github.com/intel/multus-cni) v3.2.1 - [multus](https://github.com/intel/multus-cni) v3.2.1
- [weave](https://github.com/weaveworks/weave) v2.5.2 - [weave](https://github.com/weaveworks/weave) v2.5.2
- Application - Application
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11 - [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11 - [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
- [cert-manager](https://github.com/jetstack/cert-manager) v0.11.0 - [cert-manager](https://github.com/jetstack/cert-manager) v0.5.2
- [coredns](https://github.com/coredns/coredns) v1.6.0 - [coredns](https://github.com/coredns/coredns) v1.6.0
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.26.1 - [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.25.1
Note: The list of validated [docker versions](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.16.md) was updated to 1.13.1, 17.03, 17.06, 17.09, 18.06, 18.09. kubeadm now properly recognizes Docker 18.09.0 and newer, but still treats 18.06 as the default supported version. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin). Note: The list of validated [docker versions](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.13.md) was updated to 1.11.1, 1.12.1, 1.13.1, 17.03, 17.06, 17.09, 18.06. kubeadm now properly recognizes Docker 18.09.0 and newer, but still treats 18.06 as the default supported version. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
## Requirements Requirements
------------
- **Minimum required version of Kubernetes is v1.15** - **Minimum required version of Kubernetes is v1.14**
- **Ansible v2.7.8 and python-netaddr is installed on the machine that will run Ansible commands** - **Ansible v2.7.8 (or newer, but [not 2.8.x](https://github.com/kubernetes-sigs/kubespray/issues/4778)) and python-netaddr is installed on the machine
- **Jinja 2.9 (or newer) is required to run the Ansible Playbooks** that will run Ansible commands**
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/downloads.md#offline-environment)) - **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
- The target servers are configured to allow **IPv4 forwarding**. - The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/downloads.md#offline-environment))
- **Your ssh key must be copied** to all the servers part of your inventory. - The target servers are configured to allow **IPv4 forwarding**.
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to. - **Your ssh key must be copied** to all the servers part of your inventory.
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
in order to avoid any issue during deployment you should disable your firewall. in order to avoid any issue during deployment you should disable your firewall.
- If kubespray is ran from non-root user account, correct privilege escalation method - If kubespray is ran from non-root user account, correct privilege escalation method
should be configured in the target servers. Then the `ansible_become` flag should be configured in the target servers. Then the `ansible_become` flag
or command parameters `--become or -b` should be specified. or command parameters `--become or -b` should be specified.
Hardware: Hardware:
These limits are safe guarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide. These limits are safe guarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide.
- Master - Master
- Memory: 1500 MB - Memory: 1500 MB
- Node - Node
- Memory: 1024 MB - Memory: 1024 MB
## Network Plugins Network Plugins
---------------
You can choose between 10 network plugins. (default: `calico`, except Vagrant uses `flannel`) You can choose between 10 network plugins. (default: `calico`, except Vagrant uses `flannel`)
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking. - [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
- [calico](docs/calico.md): bgp (layer 3) networking. - [calico](docs/calico.md): bgp (layer 3) networking.
- [canal](https://github.com/projectcalico/canal): a composition of calico and flannel plugins. - [canal](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic. - [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
- [contiv](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to - [contiv](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks. apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. - [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
(Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)). (Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)).
- [kube-ovn](docs/kube-ovn.md): Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises. - [kube-ovn](docs/kube-ovn.md): Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
- [kube-router](docs/kube-router.md): Kube-router is a L3 CNI for Kubernetes networking aiming to provide operational - [kube-router](docs/kube-router.md): Kube-router is a L3 CNI for Kubernetes networking aiming to provide operational
simplicity and high performance: it uses IPVS to provide Kube Services Proxy (if setup to replace kube-proxy), simplicity and high performance: it uses IPVS to provide Kube Services Proxy (if setup to replace kube-proxy),
iptables for network policies, and BGP for ods L3 networking (with optionally BGP peering with out-of-cluster BGP peers). iptables for network policies, and BGP for ods L3 networking (with optionally BGP peering with out-of-cluster BGP peers).
It can also optionally advertise routes to Kubernetes cluster Pods CIDRs, ClusterIPs, ExternalIPs and LoadBalancerIPs. It can also optionally advertise routes to Kubernetes cluster Pods CIDRs, ClusterIPs, ExternalIPs and LoadBalancerIPs.
- [macvlan](docs/macvlan.md): Macvlan is a Linux network driver. Pods have their own unique Mac and Ip address, connected directly the physical (layer 2) network. - [macvlan](docs/macvlan.md): Macvlan is a Linux network driver. Pods have their own unique Mac and Ip address, connected directly the physical (layer 2) network.
- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc. - [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
The choice is defined with the variable `kube_network_plugin`. There is also an The choice is defined with the variable `kube_network_plugin`. There is also an
option to leverage built-in cloud provider networking instead. option to leverage built-in cloud provider networking instead.
See also [Network checker](docs/netcheck.md). See also [Network checker](docs/netcheck.md).
## Community docs and resources Community docs and resources
----------------------------
- [kubernetes.io/docs/setup/production-environment/tools/kubespray/](https://kubernetes.io/docs/setup/production-environment/tools/kubespray/) - [kubernetes.io/docs/getting-started-guides/kubespray/](https://kubernetes.io/docs/getting-started-guides/kubespray/)
- [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr - [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
- [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty - [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty
- [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=N9q51JgbWu8) - [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=N9q51JgbWu8)
## Tools and projects on top of Kubespray Tools and projects on top of Kubespray
--------------------------------------
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst) - [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/master/doc/integrations/ansible.rst)
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform) - [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
## CI Tests CI Tests
--------
[![Build graphs](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/badges/master/build.svg)](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines) [![Build graphs](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/badges/master/build.svg)](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines)

View File

@@ -3,19 +3,16 @@
The Kubespray Project is released on an as-needed basis. The process is as follows: The Kubespray Project is released on an as-needed basis. The process is as follows:
1. An issue is proposing a new release with a changelog since the last release 1. An issue is proposing a new release with a changelog since the last release
2. At least one of the [approvers](OWNERS_ALIASES) must approve this release 2. At least one of the [OWNERS](OWNERS) must LGTM this release
3. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes 3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
4. An approver creates a release branch in the form `release-vX.Y` 4. The release issue is closed
5. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) docker image is built and tagged 5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
6. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml`
7. The release issue is closed
8. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
## Major/minor releases, merge freezes and milestones ## Major/minor releases, merge freezes and milestones
* Kubespray maintains one branch for major releases (vX.Y). Minor releases are available only as tags. * Kubespray does not maintain stable branches for releases. Releases are tags, not
branches, and there are no backports. Therefore, there is no need for merge
* Security patches and bugs might be backported. freezes as well.
* Fixes for major releases (vX.x.0) and minor releases (vX.Y.x) are delivered * Fixes for major releases (vX.x.0) and minor releases (vX.Y.x) are delivered
via maintenance releases (vX.Y.Z) and assigned to the corresponding open via maintenance releases (vX.Y.Z) and assigned to the corresponding open

2
Vagrantfile vendored
View File

@@ -206,7 +206,7 @@ Vagrant.configure("2") do |config|
ansible.inventory_path = $ansible_inventory_path ansible.inventory_path = $ansible_inventory_path
end end
ansible.become = true ansible.become = true
ansible.limit = "all,localhost" ansible.limit = "all"
ansible.host_key_checking = false ansible.host_key_checking = false
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "-e ansible_become_pass=vagrant"] ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "-e ansible_become_pass=vagrant"]
ansible.host_vars = host_vars ansible.host_vars = host_vars

View File

@@ -16,6 +16,6 @@ library = ./library
callback_whitelist = profile_tasks callback_whitelist = profile_tasks
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
deprecation_warnings=False deprecation_warnings=False
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds, .gpg inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds
[inventory] [inventory]
ignore_patterns = artifacts, credentials ignore_patterns = artifacts, credentials

View File

@@ -78,7 +78,6 @@
- { role: kubespray-defaults} - { role: kubespray-defaults}
- { role: kubernetes/kubeadm, tags: kubeadm} - { role: kubernetes/kubeadm, tags: kubeadm}
- { role: network_plugin, tags: network } - { role: network_plugin, tags: network }
- { role: kubernetes/node-label }
- hosts: calico-rr - hosts: calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"

View File

@@ -7,7 +7,7 @@ cluster_name: example
# node that can be used to access the masters and minions # node that can be used to access the masters and minions
use_bastion: false use_bastion: false
# Set this to a preferred name that will be used as the first part of the dns name for your bastotion host. For example: k8s-bastion.<azureregion>.cloudapp.azure.com. # Set this to a prefered name that will be used as the first part of the dns name for your bastotion host. For example: k8s-bastion.<azureregion>.cloudapp.azure.com.
# This is convenient when exceptions have to be configured on a firewall to allow ssh to the given bastion host. # This is convenient when exceptions have to be configured on a firewall to allow ssh to the given bastion host.
# bastion_domain_prefix: k8s-bastion # bastion_domain_prefix: k8s-bastion

View File

@@ -20,8 +20,6 @@
# Add range of hosts: inventory.py 10.10.1.3-10.10.1.5 # Add range of hosts: inventory.py 10.10.1.3-10.10.1.5
# Add hosts with different ip and access ip: # Add hosts with different ip and access ip:
# inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.1.3 # inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.1.3
# Add hosts with a specific hostname, ip, and optional access ip:
# inventory.py first,10.0.0.1,192.168.10.1 second,10.0.0.2 last,10.0.0.3
# Delete a host: inventory.py -10.10.1.3 # Delete a host: inventory.py -10.10.1.3
# Delete a host by id: inventory.py -node1 # Delete a host by id: inventory.py -node1
# #
@@ -46,8 +44,7 @@ import sys
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster', ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster',
'calico-rr'] 'calico-rr']
PROTECTED_NAMES = ROLES PROTECTED_NAMES = ROLES
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames', AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'load']
'load']
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True, _boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False} '0': False, 'no': False, 'false': False, 'off': False}
yaml = YAML() yaml = YAML()
@@ -82,7 +79,7 @@ class KubesprayInventory(object):
try: try:
self.hosts_file = open(config_file, 'r') self.hosts_file = open(config_file, 'r')
self.yaml_config = yaml.load(self.hosts_file) self.yaml_config = yaml.load(self.hosts_file)
except OSError: except FileNotFoundError:
pass pass
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS: if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
@@ -197,21 +194,8 @@ class KubesprayInventory(object):
'ip': ip, 'ip': ip,
'access_ip': access_ip} 'access_ip': access_ip}
elif host[0].isalpha(): elif host[0].isalpha():
if ',' in host: raise Exception("Adding hosts by hostname is not supported.")
try:
hostname, ip, access_ip = host.split(',')
except Exception:
hostname, ip = host.split(',')
access_ip = ip
if self.exists_hostname(all_hosts, host):
self.debug("Skipping existing host {0}.".format(host))
continue
elif self.exists_ip(all_hosts, ip):
self.debug("Skipping existing host {0}.".format(ip))
continue
all_hosts[hostname] = {'ansible_host': access_ip,
'ip': ip,
'access_ip': access_ip}
return all_hosts return all_hosts
def range2ips(self, hosts): def range2ips(self, hosts):
@@ -222,10 +206,10 @@ class KubesprayInventory(object):
# Python 3.x # Python 3.x
start = int(ip_address(start_address)) start = int(ip_address(start_address))
end = int(ip_address(end_address)) end = int(ip_address(end_address))
except Exception: except:
# Python 2.7 # Python 2.7
start = int(ip_address(str(start_address))) start = int(ip_address(unicode(start_address)))
end = int(ip_address(str(end_address))) end = int(ip_address(unicode(end_address)))
return [ip_address(ip).exploded for ip in range(start, end + 1)] return [ip_address(ip).exploded for ip in range(start, end + 1)]
for host in hosts: for host in hosts:
@@ -364,8 +348,6 @@ class KubesprayInventory(object):
self.print_config() self.print_config()
elif command == 'print_ips': elif command == 'print_ips':
self.print_ips() self.print_ips()
elif command == 'print_hostnames':
self.print_hostnames()
elif command == 'load': elif command == 'load':
self.load_file(args) self.load_file(args)
else: else:
@@ -379,13 +361,11 @@ Available commands:
help - Display this message help - Display this message
print_cfg - Write inventory file to stdout print_cfg - Write inventory file to stdout
print_ips - Write a space-delimited list of IPs from "all" group print_ips - Write a space-delimited list of IPs from "all" group
print_hostnames - Write a space-delimited list of Hostnames from "all" group
Advanced usage: Advanced usage:
Add another host after initial creation: inventory.py 10.10.1.5 Add another host after initial creation: inventory.py 10.10.1.5
Add range of hosts: inventory.py 10.10.1.3-10.10.1.5 Add range of hosts: inventory.py 10.10.1.3-10.10.1.5
Add hosts with different ip and access ip: inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.10.3 Add hosts with different ip and access ip: inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.10.3
Add hosts with a specific hostname, ip, and optional access ip: first,10.0.0.1,192.168.10.1 second,10.0.0.2 last,10.0.0.3
Delete a host: inventory.py -10.10.1.3 Delete a host: inventory.py -10.10.1.3
Delete a host by id: inventory.py -node1 Delete a host by id: inventory.py -node1
@@ -401,9 +381,6 @@ MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
def print_config(self): def print_config(self):
yaml.dump(self.yaml_config, sys.stdout) yaml.dump(self.yaml_config, sys.stdout)
def print_hostnames(self):
print(' '.join(self.yaml_config['all']['hosts'].keys()))
def print_ips(self): def print_ips(self):
ips = [] ips = []
for host, opts in self.yaml_config['all']['hosts'].items(): for host, opts in self.yaml_config['all']['hosts'].items():

View File

@@ -12,7 +12,6 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import inventory
import mock import mock
import unittest import unittest
@@ -23,7 +22,7 @@ path = "./contrib/inventory_builder/"
if path not in sys.path: if path not in sys.path:
sys.path.append(path) sys.path.append(path)
import inventory # noqa import inventory
class TestInventory(unittest.TestCase): class TestInventory(unittest.TestCase):
@@ -44,8 +43,8 @@ class TestInventory(unittest.TestCase):
def test_get_ip_from_opts_invalid(self): def test_get_ip_from_opts_invalid(self):
optstring = "notanaddr=value something random!chars:D" optstring = "notanaddr=value something random!chars:D"
self.assertRaisesRegex(ValueError, "IP parameter not found", self.assertRaisesRegexp(ValueError, "IP parameter not found",
self.inv.get_ip_from_opts, optstring) self.inv.get_ip_from_opts, optstring)
def test_ensure_required_groups(self): def test_ensure_required_groups(self):
groups = ['group1', 'group2'] groups = ['group1', 'group2']
@@ -64,8 +63,8 @@ class TestInventory(unittest.TestCase):
def test_get_host_id_invalid(self): def test_get_host_id_invalid(self):
bad_hostnames = ['node', 'no99de', '01node', 'node.111111'] bad_hostnames = ['node', 'no99de', '01node', 'node.111111']
for hostname in bad_hostnames: for hostname in bad_hostnames:
self.assertRaisesRegex(ValueError, "Host name must end in an", self.assertRaisesRegexp(ValueError, "Host name must end in an",
self.inv.get_host_id, hostname) self.inv.get_host_id, hostname)
def test_build_hostnames_add_one(self): def test_build_hostnames_add_one(self):
changed_hosts = ['10.90.0.2'] changed_hosts = ['10.90.0.2']
@@ -193,8 +192,8 @@ class TestInventory(unittest.TestCase):
('node2', {'ansible_host': '10.90.0.3', ('node2', {'ansible_host': '10.90.0.3',
'ip': '10.90.0.3', 'ip': '10.90.0.3',
'access_ip': '10.90.0.3'})]) 'access_ip': '10.90.0.3'})])
self.assertRaisesRegex(ValueError, "Unable to find host", self.assertRaisesRegexp(ValueError, "Unable to find host",
self.inv.delete_host_by_ip, existing_hosts, ip) self.inv.delete_host_by_ip, existing_hosts, ip)
def test_purge_invalid_hosts(self): def test_purge_invalid_hosts(self):
proper_hostnames = ['node1', 'node2'] proper_hostnames = ['node1', 'node2']
@@ -310,8 +309,8 @@ class TestInventory(unittest.TestCase):
def test_range2ips_incorrect_range(self): def test_range2ips_incorrect_range(self):
host_range = ['10.90.0.4-a.9b.c.e'] host_range = ['10.90.0.4-a.9b.c.e']
self.assertRaisesRegex(Exception, "Range of ip_addresses isn't valid", self.assertRaisesRegexp(Exception, "Range of ip_addresses isn't valid",
self.inv.range2ips, host_range) self.inv.range2ips, host_range)
def test_build_hostnames_different_ips_add_one(self): def test_build_hostnames_different_ips_add_one(self):
changed_hosts = ['10.90.0.2,192.168.0.2'] changed_hosts = ['10.90.0.2,192.168.0.2']

View File

@@ -1,7 +1,7 @@
[tox] [tox]
minversion = 1.6 minversion = 1.6
skipsdist = True skipsdist = True
envlist = pep8, py33 envlist = pep8, py27
[testenv] [testenv]
whitelist_externals = py.test whitelist_externals = py.test

View File

@@ -1,9 +1,4 @@
--- ---
- name: "Kubernetes Apps | Check cluster settings for MetalLB"
fail:
msg: "MetalLB require kube_proxy_strict_arp = true, see https://github.com/danderson/metallb/issues/153#issuecomment-518651132"
when:
- "kube_proxy_mode == 'ipvs' and not kube_proxy_strict_arp"
- name: "Kubernetes Apps | Lay Down MetalLB" - name: "Kubernetes Apps | Lay Down MetalLB"
become: true become: true
template: { src: "{{ item }}.j2", dest: "{{ kube_config_dir }}/{{ item }}" } template: { src: "{{ item }}.j2", dest: "{{ kube_config_dir }}/{{ item }}" }

View File

@@ -115,7 +115,7 @@ roleRef:
kind: Role kind: Role
name: config-watcher name: config-watcher
--- ---
apiVersion: apps/v1 apiVersion: apps/v1beta2
kind: DaemonSet kind: DaemonSet
metadata: metadata:
namespace: metallb-system namespace: metallb-system
@@ -169,7 +169,7 @@ spec:
- net_raw - net_raw
--- ---
apiVersion: apps/v1 apiVersion: apps/v1beta2
kind: Deployment kind: Deployment
metadata: metadata:
namespace: metallb-system namespace: metallb-system

View File

@@ -21,7 +21,7 @@ You can specify a `default_release` for apt on Debian/Ubuntu by overriding this
glusterfs_ppa_use: yes glusterfs_ppa_use: yes
glusterfs_ppa_version: "3.5" glusterfs_ppa_version: "3.5"
For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/) for more info. For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](http://www.gluster.org/community/documentation/index.php/Getting_started_install) for more info.
## Dependencies ## Dependencies

View File

@@ -3,7 +3,7 @@
- name: Include OS-specific variables. - name: Include OS-specific variables.
include_vars: "{{ ansible_os_family }}.yml" include_vars: "{{ ansible_os_family }}.yml"
# Install xfs package # Instal xfs package
- name: install xfs Debian - name: install xfs Debian
apt: name=xfsprogs state=present apt: name=xfsprogs state=present
when: ansible_os_family == "Debian" when: ansible_os_family == "Debian"
@@ -36,7 +36,7 @@
- "{{ gluster_brick_dir }}" - "{{ gluster_brick_dir }}"
- "{{ gluster_mount_dir }}" - "{{ gluster_mount_dir }}"
- name: Configure Gluster volume with replicas - name: Configure Gluster volume.
gluster_volume: gluster_volume:
state: present state: present
name: "{{ gluster_brick_name }}" name: "{{ gluster_brick_name }}"
@@ -46,18 +46,6 @@
host: "{{ inventory_hostname }}" host: "{{ inventory_hostname }}"
force: yes force: yes
run_once: true run_once: true
when: groups['gfs-cluster']|length > 1
- name: Configure Gluster volume without replicas
gluster_volume:
state: present
name: "{{ gluster_brick_name }}"
brick: "{{ gluster_brick_dir }}"
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
host: "{{ inventory_hostname }}"
force: yes
run_once: true
when: groups['gfs-cluster']|length <= 1
- name: Mount glusterfs to retrieve disk size - name: Mount glusterfs to retrieve disk size
mount: mount:

View File

@@ -1,6 +1,6 @@
{ {
"kind": "DaemonSet", "kind": "DaemonSet",
"apiVersion": "apps/v1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "glusterfs", "name": "glusterfs",
"labels": { "labels": {

View File

@@ -30,7 +30,7 @@
}, },
{ {
"kind": "Deployment", "kind": "Deployment",
"apiVersion": "apps/v1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "deploy-heketi", "name": "deploy-heketi",
"labels": { "labels": {

View File

@@ -44,7 +44,7 @@
}, },
{ {
"kind": "Deployment", "kind": "Deployment",
"apiVersion": "apps/v1", "apiVersion": "extensions/v1beta1",
"metadata": { "metadata": {
"name": "heketi", "name": "heketi",
"labels": { "labels": {

View File

@@ -16,7 +16,7 @@
{ {
"addresses": [ "addresses": [
{ {
"ip": "{{ hostvars[node].ip }}" "ip": "{{ hostvars[node]['ansible_facts']['default_ipv4']['address'] }}"
} }
], ],
"ports": [ "ports": [

View File

@@ -12,7 +12,7 @@
"{{ node }}" "{{ node }}"
], ],
"storage": [ "storage": [
"{{ hostvars[node].ip }}" "{{ hostvars[node]['ansible_facts']['default_ipv4']['address'] }}"
] ]
}, },
"zone": 1 "zone": 1

View File

@@ -10,7 +10,7 @@ This project will create:
* AWS ELB in the Public Subnet for accessing the Kubernetes API from the internet * AWS ELB in the Public Subnet for accessing the Kubernetes API from the internet
**Requirements** **Requirements**
- Terraform 0.12.0 or newer - Terraform 0.8.7 or newer
**How to Use:** **How to Use:**

View File

@@ -1,5 +1,5 @@
terraform { terraform {
required_version = ">= 0.12.0" required_version = ">= 0.8.7"
} }
provider "aws" { provider "aws" {
@@ -16,22 +16,22 @@ data "aws_availability_zones" "available" {}
*/ */
module "aws-vpc" { module "aws-vpc" {
source = "./modules/vpc" source = "modules/vpc"
aws_cluster_name = "${var.aws_cluster_name}" aws_cluster_name = "${var.aws_cluster_name}"
aws_vpc_cidr_block = "${var.aws_vpc_cidr_block}" aws_vpc_cidr_block = "${var.aws_vpc_cidr_block}"
aws_avail_zones = "${slice(data.aws_availability_zones.available.names, 0, 2)}" aws_avail_zones = "${slice(data.aws_availability_zones.available.names,0,2)}"
aws_cidr_subnets_private = "${var.aws_cidr_subnets_private}" aws_cidr_subnets_private = "${var.aws_cidr_subnets_private}"
aws_cidr_subnets_public = "${var.aws_cidr_subnets_public}" aws_cidr_subnets_public = "${var.aws_cidr_subnets_public}"
default_tags = "${var.default_tags}" default_tags = "${var.default_tags}"
} }
module "aws-elb" { module "aws-elb" {
source = "./modules/elb" source = "modules/elb"
aws_cluster_name = "${var.aws_cluster_name}" aws_cluster_name = "${var.aws_cluster_name}"
aws_vpc_id = "${module.aws-vpc.aws_vpc_id}" aws_vpc_id = "${module.aws-vpc.aws_vpc_id}"
aws_avail_zones = "${slice(data.aws_availability_zones.available.names, 0, 2)}" aws_avail_zones = "${slice(data.aws_availability_zones.available.names,0,2)}"
aws_subnet_ids_public = "${module.aws-vpc.aws_subnet_ids_public}" aws_subnet_ids_public = "${module.aws-vpc.aws_subnet_ids_public}"
aws_elb_api_port = "${var.aws_elb_api_port}" aws_elb_api_port = "${var.aws_elb_api_port}"
k8s_secure_api_port = "${var.k8s_secure_api_port}" k8s_secure_api_port = "${var.k8s_secure_api_port}"
@@ -39,7 +39,7 @@ module "aws-elb" {
} }
module "aws-iam" { module "aws-iam" {
source = "./modules/iam" source = "modules/iam"
aws_cluster_name = "${var.aws_cluster_name}" aws_cluster_name = "${var.aws_cluster_name}"
} }
@@ -54,18 +54,18 @@ resource "aws_instance" "bastion-server" {
instance_type = "${var.aws_bastion_size}" instance_type = "${var.aws_bastion_size}"
count = "${length(var.aws_cidr_subnets_public)}" count = "${length(var.aws_cidr_subnets_public)}"
associate_public_ip_address = true associate_public_ip_address = true
availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}" availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_public, count.index)}" subnet_id = "${element(module.aws-vpc.aws_subnet_ids_public,count.index)}"
vpc_security_group_ids = "${module.aws-vpc.aws_security_group}" vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"]
key_name = "${var.AWS_SSH_KEY_NAME}" key_name = "${var.AWS_SSH_KEY_NAME}"
tags = "${merge(var.default_tags, map( tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-bastion-${count.index}", "Name", "kubernetes-${var.aws_cluster_name}-bastion-${count.index}",
"Cluster", "${var.aws_cluster_name}", "Cluster", "${var.aws_cluster_name}",
"Role", "bastion-${var.aws_cluster_name}-${count.index}" "Role", "bastion-${var.aws_cluster_name}-${count.index}"
))}" ))}"
} }
/* /*
@@ -79,25 +79,25 @@ resource "aws_instance" "k8s-master" {
count = "${var.aws_kube_master_num}" count = "${var.aws_kube_master_num}"
availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}" availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private, count.index)}" subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
vpc_security_group_ids = "${module.aws-vpc.aws_security_group}" vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"]
iam_instance_profile = "${module.aws-iam.kube-master-profile}" iam_instance_profile = "${module.aws-iam.kube-master-profile}"
key_name = "${var.AWS_SSH_KEY_NAME}" key_name = "${var.AWS_SSH_KEY_NAME}"
tags = "${merge(var.default_tags, map( tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-master${count.index}", "Name", "kubernetes-${var.aws_cluster_name}-master${count.index}",
"kubernetes.io/cluster/${var.aws_cluster_name}", "member", "kubernetes.io/cluster/${var.aws_cluster_name}", "member",
"Role", "master" "Role", "master"
))}" ))}"
} }
resource "aws_elb_attachment" "attach_master_nodes" { resource "aws_elb_attachment" "attach_master_nodes" {
count = "${var.aws_kube_master_num}" count = "${var.aws_kube_master_num}"
elb = "${module.aws-elb.aws_elb_api_id}" elb = "${module.aws-elb.aws_elb_api_id}"
instance = "${element(aws_instance.k8s-master.*.id, count.index)}" instance = "${element(aws_instance.k8s-master.*.id,count.index)}"
} }
resource "aws_instance" "k8s-etcd" { resource "aws_instance" "k8s-etcd" {
@@ -106,18 +106,18 @@ resource "aws_instance" "k8s-etcd" {
count = "${var.aws_etcd_num}" count = "${var.aws_etcd_num}"
availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}" availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private, count.index)}" subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
vpc_security_group_ids = "${module.aws-vpc.aws_security_group}" vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"]
key_name = "${var.AWS_SSH_KEY_NAME}" key_name = "${var.AWS_SSH_KEY_NAME}"
tags = "${merge(var.default_tags, map( tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-etcd${count.index}", "Name", "kubernetes-${var.aws_cluster_name}-etcd${count.index}",
"kubernetes.io/cluster/${var.aws_cluster_name}", "member", "kubernetes.io/cluster/${var.aws_cluster_name}", "member",
"Role", "etcd" "Role", "etcd"
))}" ))}"
} }
resource "aws_instance" "k8s-worker" { resource "aws_instance" "k8s-worker" {
@@ -126,19 +126,19 @@ resource "aws_instance" "k8s-worker" {
count = "${var.aws_kube_worker_num}" count = "${var.aws_kube_worker_num}"
availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}" availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private, count.index)}" subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
vpc_security_group_ids = "${module.aws-vpc.aws_security_group}" vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"]
iam_instance_profile = "${module.aws-iam.kube-worker-profile}" iam_instance_profile = "${module.aws-iam.kube-worker-profile}"
key_name = "${var.AWS_SSH_KEY_NAME}" key_name = "${var.AWS_SSH_KEY_NAME}"
tags = "${merge(var.default_tags, map( tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-worker${count.index}", "Name", "kubernetes-${var.aws_cluster_name}-worker${count.index}",
"kubernetes.io/cluster/${var.aws_cluster_name}", "member", "kubernetes.io/cluster/${var.aws_cluster_name}", "member",
"Role", "worker" "Role", "worker"
))}" ))}"
} }
/* /*
@@ -148,14 +148,14 @@ resource "aws_instance" "k8s-worker" {
data "template_file" "inventory" { data "template_file" "inventory" {
template = "${file("${path.module}/templates/inventory.tpl")}" template = "${file("${path.module}/templates/inventory.tpl")}"
vars = { vars {
public_ip_address_bastion = "${join("\n", formatlist("bastion ansible_host=%s", aws_instance.bastion-server.*.public_ip))}" public_ip_address_bastion = "${join("\n",formatlist("bastion ansible_host=%s" , aws_instance.bastion-server.*.public_ip))}"
connection_strings_master = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}" connection_strings_master = "${join("\n",formatlist("%s ansible_host=%s",aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}"
connection_strings_node = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.tags.Name, aws_instance.k8s-worker.*.private_ip))}" connection_strings_node = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.tags.Name, aws_instance.k8s-worker.*.private_ip))}"
connection_strings_etcd = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.tags.Name, aws_instance.k8s-etcd.*.private_ip))}" connection_strings_etcd = "${join("\n",formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.tags.Name, aws_instance.k8s-etcd.*.private_ip))}"
list_master = "${join("\n", aws_instance.k8s-master.*.tags.Name)}" list_master = "${join("\n",aws_instance.k8s-master.*.tags.Name)}"
list_node = "${join("\n", aws_instance.k8s-worker.*.tags.Name)}" list_node = "${join("\n",aws_instance.k8s-worker.*.tags.Name)}"
list_etcd = "${join("\n", aws_instance.k8s-etcd.*.tags.Name)}" list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}"
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\"" elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
} }
} }
@@ -165,7 +165,7 @@ resource "null_resource" "inventories" {
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}" command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
} }
triggers = { triggers {
template = "${data.template_file.inventory.rendered}" template = "${data.template_file.inventory.rendered}"
} }
} }

View File

@@ -28,7 +28,7 @@ resource "aws_security_group_rule" "aws-allow-api-egress" {
# Create a new AWS ELB for K8S API # Create a new AWS ELB for K8S API
resource "aws_elb" "aws-elb-api" { resource "aws_elb" "aws-elb-api" {
name = "kubernetes-elb-${var.aws_cluster_name}" name = "kubernetes-elb-${var.aws_cluster_name}"
subnets = var.aws_subnet_ids_public subnets = ["${var.aws_subnet_ids_public}"]
security_groups = ["${aws_security_group.aws-elb.id}"] security_groups = ["${aws_security_group.aws-elb.id}"]
listener { listener {

View File

@@ -3,15 +3,15 @@ output "aws_vpc_id" {
} }
output "aws_subnet_ids_private" { output "aws_subnet_ids_private" {
value = aws_subnet.cluster-vpc-subnets-private.*.id value = ["${aws_subnet.cluster-vpc-subnets-private.*.id}"]
} }
output "aws_subnet_ids_public" { output "aws_subnet_ids_public" {
value = aws_subnet.cluster-vpc-subnets-public.*.id value = ["${aws_subnet.cluster-vpc-subnets-public.*.id}"]
} }
output "aws_security_group" { output "aws_security_group" {
value = aws_security_group.kubernetes.*.id value = ["${aws_security_group.kubernetes.*.id}"]
} }
output "default_tags" { output "default_tags" {

View File

@@ -2,9 +2,9 @@
aws_cluster_name = "devtest" aws_cluster_name = "devtest"
#VPC Vars #VPC Vars
aws_vpc_cidr_block = "10.250.192.0/18" aws_vpc_cidr_block = "10.250.192.0/18"
aws_cidr_subnets_private = ["10.250.192.0/20", "10.250.208.0/20"] aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"]
aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"] aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
#Bastion Host #Bastion Host
aws_bastion_size = "t2.medium" aws_bastion_size = "t2.medium"
@@ -12,24 +12,24 @@ aws_bastion_size = "t2.medium"
#Kubernetes Cluster #Kubernetes Cluster
aws_kube_master_num = 3 aws_kube_master_num = 3
aws_kube_master_size = "t2.medium" aws_kube_master_size = "t2.medium"
aws_etcd_num = 3 aws_etcd_num = 3
aws_etcd_size = "t2.medium" aws_etcd_size = "t2.medium"
aws_kube_worker_num = 4 aws_kube_worker_num = 4
aws_kube_worker_size = "t2.medium" aws_kube_worker_size = "t2.medium"
#Settings AWS ELB #Settings AWS ELB
aws_elb_api_port = 6443 aws_elb_api_port = 6443
k8s_secure_api_port = 6443 k8s_secure_api_port = 6443
kube_insecure_apiserver_address = "0.0.0.0" kube_insecure_apiserver_address = "0.0.0.0"
default_tags = { default_tags = {
# Env = "devtest" # Env = "devtest"
# Product = "kubernetes" # Product = "kubernetes"
} }
inventory_file = "../../../inventory/hosts" inventory_file = "../../../inventory/hosts"

View File

@@ -224,9 +224,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|Variable | Description | |Variable | Description |
|---------|-------------| |---------|-------------|
|`cluster_name` | All OpenStack resources will use the Terraform variable`cluster_name` (default`example`) in their name to make it easier to track. For example the first compute resource will be named`example-kubernetes-1`. | |`cluster_name` | All OpenStack resources will use the Terraform variable`cluster_name` (default`example`) in their name to make it easier to track. For example the first compute resource will be named`example-kubernetes-1`. |
|`az_list` | List of Availability Zones available in your OpenStack cluster. |
|`network_name` | The name to be given to the internal network that will be generated | |`network_name` | The name to be given to the internal network that will be generated |
|`network_dns_domain` | (Optional) The dns_domain for the internal network that will be generated |
|`dns_nameservers`| An array of DNS name server names to be used by hosts in the internal subnet. | |`dns_nameservers`| An array of DNS name server names to be used by hosts in the internal subnet. |
|`floatingip_pool` | Name of the pool from which floating IPs will be allocated | |`floatingip_pool` | Name of the pool from which floating IPs will be allocated |
|`external_net` | UUID of the external network that will be routed to | |`external_net` | UUID of the external network that will be routed to |
@@ -248,12 +246,6 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default | |`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default | |`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|`wait_for_floatingip` | Let Terraform poll the instance until the floating IP has been associated, `false` by default. | |`wait_for_floatingip` | Let Terraform poll the instance until the floating IP has been associated, `false` by default. |
|`node_root_volume_size_in_gb` | Size of the root volume for nodes, 0 to use ephemeral storage |
|`master_root_volume_size_in_gb` | Size of the root volume for masters, 0 to use ephemeral storage |
|`gfs_root_volume_size_in_gb` | Size of the root volume for gluster, 0 to use ephemeral storage |
|`etcd_root_volume_size_in_gb` | Size of the root volume for etcd nodes, 0 to use ephemeral storage |
|`bastion_root_volume_size_in_gb` | Size of the root volume for bastions, 0 to use ephemeral storage |
|`use_server_group` | Create and use openstack nova servergroups, default: false |
#### Terraform state files #### Terraform state files
@@ -427,10 +419,7 @@ resolvconf_mode: host_resolvconf
``` ```
node_volume_attach_limit: 26 node_volume_attach_limit: 26
``` ```
- Disable access_ip, this will make all innternal cluster traffic to be sent over local network when a floating IP is attached (default this value is set to 1)
```
use_access_ip: 0
```
### Deploy Kubernetes ### Deploy Kubernetes

View File

@@ -5,13 +5,12 @@ provider "openstack" {
module "network" { module "network" {
source = "./modules/network" source = "./modules/network"
external_net = "${var.external_net}" external_net = "${var.external_net}"
network_name = "${var.network_name}" network_name = "${var.network_name}"
subnet_cidr = "${var.subnet_cidr}" subnet_cidr = "${var.subnet_cidr}"
cluster_name = "${var.cluster_name}" cluster_name = "${var.cluster_name}"
dns_nameservers = "${var.dns_nameservers}" dns_nameservers = "${var.dns_nameservers}"
network_dns_domain = "${var.network_dns_domain}" use_neutron = "${var.use_neutron}"
use_neutron = "${var.use_neutron}"
} }
module "ips" { module "ips" {
@@ -41,11 +40,6 @@ module "compute" {
number_of_bastions = "${var.number_of_bastions}" number_of_bastions = "${var.number_of_bastions}"
number_of_k8s_nodes_no_floating_ip = "${var.number_of_k8s_nodes_no_floating_ip}" number_of_k8s_nodes_no_floating_ip = "${var.number_of_k8s_nodes_no_floating_ip}"
number_of_gfs_nodes_no_floating_ip = "${var.number_of_gfs_nodes_no_floating_ip}" number_of_gfs_nodes_no_floating_ip = "${var.number_of_gfs_nodes_no_floating_ip}"
bastion_root_volume_size_in_gb = "${var.bastion_root_volume_size_in_gb}"
etcd_root_volume_size_in_gb = "${var.etcd_root_volume_size_in_gb}"
master_root_volume_size_in_gb = "${var.master_root_volume_size_in_gb}"
node_root_volume_size_in_gb = "${var.node_root_volume_size_in_gb}"
gfs_root_volume_size_in_gb = "${var.gfs_root_volume_size_in_gb}"
gfs_volume_size_in_gb = "${var.gfs_volume_size_in_gb}" gfs_volume_size_in_gb = "${var.gfs_volume_size_in_gb}"
public_key_path = "${var.public_key_path}" public_key_path = "${var.public_key_path}"
image = "${var.image}" image = "${var.image}"
@@ -70,8 +64,6 @@ module "compute" {
supplementary_node_groups = "${var.supplementary_node_groups}" supplementary_node_groups = "${var.supplementary_node_groups}"
worker_allowed_ports = "${var.worker_allowed_ports}" worker_allowed_ports = "${var.worker_allowed_ports}"
wait_for_floatingip = "${var.wait_for_floatingip}" wait_for_floatingip = "${var.wait_for_floatingip}"
use_access_ip = "${var.use_access_ip}"
use_server_groups = "${var.use_server_groups}"
network_id = "${module.network.router_id}" network_id = "${module.network.router_id}"
} }

View File

@@ -1,11 +1,3 @@
data "openstack_images_image_v2" "vm_image" {
name = "${var.image}"
}
data "openstack_images_image_v2" "gfs_image" {
name = "${var.image_gfs == "" ? var.image : var.image_gfs}"
}
resource "openstack_compute_keypair_v2" "k8s" { resource "openstack_compute_keypair_v2" "k8s" {
name = "kubernetes-${var.cluster_name}" name = "kubernetes-${var.cluster_name}"
public_key = "${chomp(file(var.public_key_path))}" public_key = "${chomp(file(var.public_key_path))}"
@@ -95,27 +87,9 @@ resource "openstack_networking_secgroup_rule_v2" "worker" {
security_group_id = "${openstack_networking_secgroup_v2.worker.id}" security_group_id = "${openstack_networking_secgroup_v2.worker.id}"
} }
resource "openstack_compute_servergroup_v2" "k8s_master" {
count = "%{ if var.use_server_groups }1%{else}0%{endif}"
name = "k8s-master-srvgrp"
policies = ["anti-affinity"]
}
resource "openstack_compute_servergroup_v2" "k8s_node" {
count = "%{ if var.use_server_groups }1%{else}0%{endif}"
name = "k8s-node-srvgrp"
policies = ["anti-affinity"]
}
resource "openstack_compute_servergroup_v2" "k8s_etcd" {
count = "%{ if var.use_server_groups }1%{else}0%{endif}"
name = "k8s-etcd-srvgrp"
policies = ["anti-affinity"]
}
resource "openstack_compute_instance_v2" "bastion" { resource "openstack_compute_instance_v2" "bastion" {
name = "${var.cluster_name}-bastion-${count.index+1}" name = "${var.cluster_name}-bastion-${count.index+1}"
count = "${var.bastion_root_volume_size_in_gb == 0 ? var.number_of_bastions : 0}" count = "${var.number_of_bastions}"
image_name = "${var.image}" image_name = "${var.image}"
flavor_id = "${var.flavor_bastion}" flavor_id = "${var.flavor_bastion}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}" key_pair = "${openstack_compute_keypair_v2.k8s.name}"
@@ -132,43 +106,6 @@ resource "openstack_compute_instance_v2" "bastion" {
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "bastion" kubespray_groups = "bastion"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
}
provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > group_vars/no-floating.yml"
}
}
resource "openstack_compute_instance_v2" "bastion_custom_volume_size" {
name = "${var.cluster_name}-bastion-${count.index+1}"
count = "${var.bastion_root_volume_size_in_gb > 0 ? var.number_of_bastions : 0}"
image_name = "${var.image}"
flavor_id = "${var.flavor_bastion}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
block_device {
uuid = "${data.openstack_images_image_v2.vm_image.id}"
source_type = "image"
volume_size = "${var.bastion_root_volume_size_in_gb}"
boot_index = 0
destination_type = "volume"
delete_on_termination = true
}
network {
name = "${var.network_name}"
}
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
"${element(openstack_networking_secgroup_v2.bastion.*.name, count.index)}",
]
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "bastion"
depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
provisioner "local-exec" { provisioner "local-exec" {
@@ -178,7 +115,7 @@ resource "openstack_compute_instance_v2" "bastion_custom_volume_size" {
resource "openstack_compute_instance_v2" "k8s_master" { resource "openstack_compute_instance_v2" "k8s_master" {
name = "${var.cluster_name}-k8s-master-${count.index+1}" name = "${var.cluster_name}-k8s-master-${count.index+1}"
count = "${var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters : 0}" count = "${var.number_of_k8s_masters}"
availability_zone = "${element(var.az_list, count.index)}" availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}" image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}" flavor_id = "${var.flavor_k8s_master}"
@@ -192,18 +129,10 @@ resource "openstack_compute_instance_v2" "k8s_master" {
"${openstack_networking_secgroup_v2.k8s.name}", "${openstack_networking_secgroup_v2.k8s.name}",
] ]
dynamic "scheduler_hints" {
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
content {
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
}
}
metadata = { metadata = {
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault" kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
provisioner "local-exec" { provisioner "local-exec" {
@@ -211,53 +140,9 @@ resource "openstack_compute_instance_v2" "k8s_master" {
} }
} }
resource "openstack_compute_instance_v2" "k8s_master_custom_volume_size" {
name = "${var.cluster_name}-k8s-master-${count.index+1}"
count = "${var.master_root_volume_size_in_gb > 0 ? var.number_of_k8s_masters : 0}"
availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
block_device {
uuid = "${data.openstack_images_image_v2.vm_image.id}"
source_type = "image"
volume_size = "${var.master_root_volume_size_in_gb}"
boot_index = 0
destination_type = "volume"
delete_on_termination = true
}
network {
name = "${var.network_name}"
}
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
"${openstack_networking_secgroup_v2.k8s.name}",
]
dynamic "scheduler_hints" {
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
content {
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
}
}
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
}
provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
}
}
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" { resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
name = "${var.cluster_name}-k8s-master-ne-${count.index+1}" name = "${var.cluster_name}-k8s-master-ne-${count.index+1}"
count = "${var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_etcd : 0}" count = "${var.number_of_k8s_masters_no_etcd}"
availability_zone = "${element(var.az_list, count.index)}" availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}" image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}" flavor_id = "${var.flavor_k8s_master}"
@@ -270,63 +155,11 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}", security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
"${openstack_networking_secgroup_v2.k8s.name}", "${openstack_networking_secgroup_v2.k8s.name}",
] ]
dynamic "scheduler_hints" {
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
content {
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
}
}
metadata = { metadata = {
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault" kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
}
provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
}
}
resource "openstack_compute_instance_v2" "k8s_master_no_etcd_custom_volume_size" {
name = "${var.cluster_name}-k8s-master-ne-${count.index+1}"
count = "${var.master_root_volume_size_in_gb > 0 ? var.number_of_k8s_masters_no_etcd : 0}"
availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
block_device {
uuid = "${data.openstack_images_image_v2.vm_image.id}"
source_type = "image"
volume_size = "${var.master_root_volume_size_in_gb}"
boot_index = 0
destination_type = "volume"
delete_on_termination = true
}
network {
name = "${var.network_name}"
}
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
"${openstack_networking_secgroup_v2.k8s.name}",
]
dynamic "scheduler_hints" {
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
content {
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
}
}
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
provisioner "local-exec" { provisioner "local-exec" {
@@ -336,7 +169,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd_custom_volume_size"
resource "openstack_compute_instance_v2" "etcd" { resource "openstack_compute_instance_v2" "etcd" {
name = "${var.cluster_name}-etcd-${count.index+1}" name = "${var.cluster_name}-etcd-${count.index+1}"
count = "${var.etcd_root_volume_size_in_gb == 0 ? var.number_of_etcd : 0}" count = "${var.number_of_etcd}"
availability_zone = "${element(var.az_list, count.index)}" availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}" image_name = "${var.image}"
flavor_id = "${var.flavor_etcd}" flavor_id = "${var.flavor_etcd}"
@@ -348,62 +181,16 @@ resource "openstack_compute_instance_v2" "etcd" {
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"] security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
dynamic "scheduler_hints" {
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : []
content {
group = "${openstack_compute_servergroup_v2.k8s_etcd[0].id}"
}
}
metadata = { metadata = {
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,vault,no-floating" kubespray_groups = "etcd,vault,no-floating"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
}
}
resource "openstack_compute_instance_v2" "etcd_custom_volume_size" {
name = "${var.cluster_name}-etcd-${count.index+1}"
count = "${var.etcd_root_volume_size_in_gb > 0 ? var.number_of_etcd : 0}"
availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_etcd}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
block_device {
uuid = "${data.openstack_images_image_v2.vm_image.id}"
source_type = "image"
volume_size = "${var.etcd_root_volume_size_in_gb}"
boot_index = 0
destination_type = "volume"
delete_on_termination = true
}
network {
name = "${var.network_name}"
}
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
dynamic "scheduler_hints" {
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : []
content {
group = "${openstack_compute_servergroup_v2.k8s_etcd[0].id}"
}
}
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,vault,no-floating"
depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
} }
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" { resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}" name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
count = "${var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_floating_ip : 0}" count = "${var.number_of_k8s_masters_no_floating_ip}"
availability_zone = "${element(var.az_list, count.index)}" availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}" image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}" flavor_id = "${var.flavor_k8s_master}"
@@ -416,65 +203,17 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}", security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
"${openstack_networking_secgroup_v2.k8s.name}", "${openstack_networking_secgroup_v2.k8s.name}",
] ]
dynamic "scheduler_hints" {
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
content {
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
}
}
metadata = { metadata = {
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating" kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
}
}
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_custom_volume_size" {
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
count = "${var.master_root_volume_size_in_gb > 0 ? var.number_of_k8s_masters_no_floating_ip : 0}"
availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
block_device {
uuid = "${data.openstack_images_image_v2.vm_image.id}"
source_type = "image"
volume_size = "${var.master_root_volume_size_in_gb}"
boot_index = 0
destination_type = "volume"
delete_on_termination = true
}
network {
name = "${var.network_name}"
}
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
"${openstack_networking_secgroup_v2.k8s.name}",
]
dynamic "scheduler_hints" {
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
content {
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
}
}
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
} }
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" { resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index+1}" name = "${var.cluster_name}-k8s-master-ne-nf-${count.index+1}"
count = "${var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_floating_ip_no_etcd : 0}" count = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
availability_zone = "${element(var.az_list, count.index)}" availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}" image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}" flavor_id = "${var.flavor_k8s_master}"
@@ -487,65 +226,17 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}", security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
"${openstack_networking_secgroup_v2.k8s.name}", "${openstack_networking_secgroup_v2.k8s.name}",
] ]
dynamic "scheduler_hints" {
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
content {
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
}
}
metadata = { metadata = {
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating" kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
}
}
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd_custom_volume_size" {
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index+1}"
count = "${var.master_root_volume_size_in_gb > 0 ? var.number_of_k8s_masters_no_floating_ip_no_etcd : 0}"
availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
block_device {
uuid = "${data.openstack_images_image_v2.vm_image.id}"
source_type = "image"
volume_size = "${var.master_root_volume_size_in_gb}"
boot_index = 0
destination_type = "volume"
delete_on_termination = true
}
network {
name = "${var.network_name}"
}
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
"${openstack_networking_secgroup_v2.k8s.name}",
]
dynamic "scheduler_hints" {
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
content {
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
}
}
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
} }
resource "openstack_compute_instance_v2" "k8s_node" { resource "openstack_compute_instance_v2" "k8s_node" {
name = "${var.cluster_name}-k8s-node-${count.index+1}" name = "${var.cluster_name}-k8s-node-${count.index+1}"
count = "${var.node_root_volume_size_in_gb == 0 ? var.number_of_k8s_nodes : 0}" count = "${var.number_of_k8s_nodes}"
availability_zone = "${element(var.az_list, count.index)}" availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}" image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_node}" flavor_id = "${var.flavor_k8s_node}"
@@ -559,62 +250,10 @@ resource "openstack_compute_instance_v2" "k8s_node" {
"${openstack_networking_secgroup_v2.worker.name}", "${openstack_networking_secgroup_v2.worker.name}",
] ]
dynamic "scheduler_hints" {
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
content {
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
}
}
metadata = { metadata = {
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}" kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
}
provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > group_vars/no-floating.yml"
}
}
resource "openstack_compute_instance_v2" "k8s_node_custom_volume_size" {
name = "${var.cluster_name}-k8s-node-${count.index+1}"
count = "${var.node_root_volume_size_in_gb > 0 ? var.number_of_k8s_nodes : 0}"
availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_node}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
block_device {
uuid = "${data.openstack_images_image_v2.vm_image.id}"
source_type = "image"
volume_size = "${var.node_root_volume_size_in_gb}"
boot_index = 0
destination_type = "volume"
delete_on_termination = true
}
network {
name = "${var.network_name}"
}
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
"${openstack_networking_secgroup_v2.worker.name}",
]
dynamic "scheduler_hints" {
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
content {
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
}
}
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}"
depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
provisioner "local-exec" { provisioner "local-exec" {
@@ -624,7 +263,7 @@ resource "openstack_compute_instance_v2" "k8s_node_custom_volume_size" {
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" { resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}" name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
count = "${var.node_root_volume_size_in_gb == 0 ? var.number_of_k8s_nodes_no_floating_ip : 0}" count = "${var.number_of_k8s_nodes_no_floating_ip}"
availability_zone = "${element(var.az_list, count.index)}" availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}" image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_node}" flavor_id = "${var.flavor_k8s_node}"
@@ -637,133 +276,51 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}", security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
"${openstack_networking_secgroup_v2.worker.name}", "${openstack_networking_secgroup_v2.worker.name}",
] ]
dynamic "scheduler_hints" {
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
content {
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
}
}
metadata = { metadata = {
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}" kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
}
}
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip_custom_volume_size" {
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
count = "${var.node_root_volume_size_in_gb > 0 ? var.number_of_k8s_nodes_no_floating_ip : 0}"
availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_node}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
block_device {
uuid = "${data.openstack_images_image_v2.vm_image.id}"
source_type = "image"
volume_size = "${var.node_root_volume_size_in_gb}"
boot_index = 0
destination_type = "volume"
delete_on_termination = true
}
network {
name = "${var.network_name}"
}
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
"${openstack_networking_secgroup_v2.worker.name}",
]
dynamic "scheduler_hints" {
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
content {
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
}
}
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}"
depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
} }
resource "openstack_compute_floatingip_associate_v2" "bastion" { resource "openstack_compute_floatingip_associate_v2" "bastion" {
count = "${var.bastion_root_volume_size_in_gb == 0 ? var.number_of_bastions : 0}" count = "${var.number_of_bastions}"
floating_ip = "${var.bastion_fips[count.index]}" floating_ip = "${var.bastion_fips[count.index]}"
instance_id = "${element(openstack_compute_instance_v2.bastion.*.id, count.index)}" instance_id = "${element(openstack_compute_instance_v2.bastion.*.id, count.index)}"
wait_until_associated = "${var.wait_for_floatingip}" wait_until_associated = "${var.wait_for_floatingip}"
} }
resource "openstack_compute_floatingip_associate_v2" "bastion_custom_volume_size" {
count = "${var.bastion_root_volume_size_in_gb > 0 ? var.number_of_bastions : 0}"
floating_ip = "${var.bastion_fips[count.index]}"
instance_id = "${element(openstack_compute_instance_v2.bastion_custom_volume_size.*.id, count.index)}"
wait_until_associated = "${var.wait_for_floatingip}"
}
resource "openstack_compute_floatingip_associate_v2" "k8s_master" { resource "openstack_compute_floatingip_associate_v2" "k8s_master" {
count = "${var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters : 0}" count = "${var.number_of_k8s_masters}"
instance_id = "${element(openstack_compute_instance_v2.k8s_master.*.id, count.index)}" instance_id = "${element(openstack_compute_instance_v2.k8s_master.*.id, count.index)}"
floating_ip = "${var.k8s_master_fips[count.index]}" floating_ip = "${var.k8s_master_fips[count.index]}"
wait_until_associated = "${var.wait_for_floatingip}" wait_until_associated = "${var.wait_for_floatingip}"
} }
resource "openstack_compute_floatingip_associate_v2" "k8s_master_custom_volume_size" {
count = "${var.master_root_volume_size_in_gb > 0 ? var.number_of_k8s_masters : 0}"
instance_id = "${element(openstack_compute_instance_v2.k8s_master_custom_volume_size.*.id, count.index)}"
floating_ip = "${var.k8s_master_fips[count.index]}"
wait_until_associated = "${var.wait_for_floatingip}"
}
resource "openstack_compute_floatingip_associate_v2" "k8s_master_no_etcd" { resource "openstack_compute_floatingip_associate_v2" "k8s_master_no_etcd" {
count = "${var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_etcd : 0}" count = "${var.number_of_k8s_masters_no_etcd}"
instance_id = "${element(openstack_compute_instance_v2.k8s_master_no_etcd.*.id, count.index)}" instance_id = "${element(openstack_compute_instance_v2.k8s_master_no_etcd.*.id, count.index)}"
floating_ip = "${var.k8s_master_no_etcd_fips[count.index]}" floating_ip = "${var.k8s_master_no_etcd_fips[count.index]}"
} }
resource "openstack_compute_floatingip_associate_v2" "k8s_master_no_etcd_custom_volume_size" {
count = "${var.master_root_volume_size_in_gb > 0 ? var.number_of_k8s_masters_no_etcd : 0}"
instance_id = "${element(openstack_compute_instance_v2.k8s_master_no_etcd_custom_volume_size.*.id, count.index)}"
floating_ip = "${var.k8s_master_no_etcd_fips[count.index]}"
}
resource "openstack_compute_floatingip_associate_v2" "k8s_node" { resource "openstack_compute_floatingip_associate_v2" "k8s_node" {
count = "${var.node_root_volume_size_in_gb == 0 ? var.number_of_k8s_nodes : 0}" count = "${var.number_of_k8s_nodes}"
floating_ip = "${var.k8s_node_fips[count.index]}" floating_ip = "${var.k8s_node_fips[count.index]}"
instance_id = "${element(openstack_compute_instance_v2.k8s_node.*.id, count.index)}" instance_id = "${element(openstack_compute_instance_v2.k8s_node.*.id, count.index)}"
wait_until_associated = "${var.wait_for_floatingip}" wait_until_associated = "${var.wait_for_floatingip}"
} }
resource "openstack_compute_floatingip_associate_v2" "k8s_node_custom_volume_size" {
count = "${var.node_root_volume_size_in_gb > 0 ? var.number_of_k8s_nodes : 0}"
floating_ip = "${var.k8s_node_fips[count.index]}"
instance_id = "${element(openstack_compute_instance_v2.k8s_node_custom_volume_size.*.id, count.index)}"
wait_until_associated = "${var.wait_for_floatingip}"
}
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" { resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
name = "${var.cluster_name}-glusterfs_volume-${count.index+1}" name = "${var.cluster_name}-glusterfs_volume-${count.index+1}"
count = "${var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0}" count = "${var.number_of_gfs_nodes_no_floating_ip}"
description = "Non-ephemeral volume for GlusterFS"
size = "${var.gfs_volume_size_in_gb}"
}
resource "openstack_blockstorage_volume_v2" "glusterfs_volume_custom_volume_size" {
name = "${var.cluster_name}-glusterfs_volume-${count.index+1}"
count = "${var.gfs_root_volume_size_in_gb > 0 ? var.number_of_gfs_nodes_no_floating_ip : 0}"
description = "Non-ephemeral volume for GlusterFS" description = "Non-ephemeral volume for GlusterFS"
size = "${var.gfs_volume_size_in_gb}" size = "${var.gfs_volume_size_in_gb}"
} }
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" { resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}" name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
count = "${var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0}" count = "${var.number_of_gfs_nodes_no_floating_ip}"
availability_zone = "${element(var.az_list, count.index)}" availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image_gfs}" image_name = "${var.image_gfs}"
flavor_id = "${var.flavor_gfs_node}" flavor_id = "${var.flavor_gfs_node}"
@@ -774,68 +331,16 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
} }
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"] security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
dynamic "scheduler_hints" {
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
content {
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
}
}
metadata = { metadata = {
ssh_user = "${var.ssh_user_gfs}" ssh_user = "${var.ssh_user_gfs}"
kubespray_groups = "gfs-cluster,network-storage,no-floating" kubespray_groups = "gfs-cluster,network-storage,no-floating"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
}
}
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip_custom_volume_size" {
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
count = "${var.gfs_root_volume_size_in_gb > 0 ? var.number_of_gfs_nodes_no_floating_ip : 0}"
availability_zone = "${element(var.az_list, count.index)}"
image_name = "${var.image}"
flavor_id = "${var.flavor_gfs_node}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
block_device {
uuid = "${data.openstack_images_image_v2.gfs_image.id}"
source_type = "image"
volume_size = "${var.gfs_root_volume_size_in_gb}"
boot_index = 0
destination_type = "volume"
delete_on_termination = true
}
network {
name = "${var.network_name}"
}
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
dynamic "scheduler_hints" {
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
content {
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
}
}
metadata = {
ssh_user = "${var.ssh_user_gfs}"
kubespray_groups = "gfs-cluster,network-storage,no-floating"
depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
} }
resource "openstack_compute_volume_attach_v2" "glusterfs_volume" { resource "openstack_compute_volume_attach_v2" "glusterfs_volume" {
count = "${var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0}" count = "${var.number_of_gfs_nodes_no_floating_ip}"
instance_id = "${element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip.*.id, count.index)}" instance_id = "${element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip.*.id, count.index)}"
volume_id = "${element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)}" volume_id = "${element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)}"
} }
resource "openstack_compute_volume_attach_v2" "glusterfs_volume_custom_root_volume_size" {
count = "${var.gfs_root_volume_size_in_gb > 0 ? var.number_of_gfs_nodes_no_floating_ip : 0}"
instance_id = "${element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip_custom_volume_size.*.id, count.index)}"
volume_id = "${element(openstack_blockstorage_volume_v2.glusterfs_volume_custom_volume_size.*.id, count.index)}"
}

View File

@@ -22,16 +22,6 @@ variable "number_of_bastions" {}
variable "number_of_gfs_nodes_no_floating_ip" {} variable "number_of_gfs_nodes_no_floating_ip" {}
variable "bastion_root_volume_size_in_gb" {}
variable "etcd_root_volume_size_in_gb" {}
variable "master_root_volume_size_in_gb" {}
variable "node_root_volume_size_in_gb" {}
variable "gfs_root_volume_size_in_gb" {}
variable "gfs_volume_size_in_gb" {} variable "gfs_volume_size_in_gb" {}
variable "public_key_path" {} variable "public_key_path" {}
@@ -105,9 +95,3 @@ variable "supplementary_node_groups" {
variable "worker_allowed_ports" { variable "worker_allowed_ports" {
type = "list" type = "list"
} }
variable "use_access_ip" {}
variable "use_server_groups" {
type = bool
}

View File

@@ -14,4 +14,4 @@ variable "network_name" {}
variable "router_id" { variable "router_id" {
default = "" default = ""
} }

View File

@@ -8,7 +8,6 @@ resource "openstack_networking_router_v2" "k8s" {
resource "openstack_networking_network_v2" "k8s" { resource "openstack_networking_network_v2" "k8s" {
name = "${var.network_name}" name = "${var.network_name}"
count = "${var.use_neutron}" count = "${var.use_neutron}"
dns_domain = var.network_dns_domain != null ? "${var.network_dns_domain}" : null
admin_state_up = "true" admin_state_up = "true"
} }

View File

@@ -2,8 +2,6 @@ variable "external_net" {}
variable "network_name" {} variable "network_name" {}
variable "network_dns_domain" {}
variable "cluster_name" {} variable "cluster_name" {}
variable "dns_nameservers" { variable "dns_nameservers" {

View File

@@ -1,9 +1,6 @@
# your Kubernetes cluster name here # your Kubernetes cluster name here
cluster_name = "i-didnt-read-the-docs" cluster_name = "i-didnt-read-the-docs"
# list of availability zones available in your OpenStack cluster
#az_list = ["nova"]
# SSH key to use for access to nodes # SSH key to use for access to nodes
public_key_path = "~/.ssh/id_rsa.pub" public_key_path = "~/.ssh/id_rsa.pub"

View File

@@ -44,26 +44,6 @@ variable "number_of_gfs_nodes_no_floating_ip" {
default = 0 default = 0
} }
variable "bastion_root_volume_size_in_gb" {
default = 0
}
variable "etcd_root_volume_size_in_gb" {
default = 0
}
variable "master_root_volume_size_in_gb" {
default = 0
}
variable "node_root_volume_size_in_gb" {
default = 0
}
variable "gfs_root_volume_size_in_gb" {
default = 0
}
variable "gfs_volume_size_in_gb" { variable "gfs_volume_size_in_gb" {
default = 75 default = 75
} }
@@ -75,12 +55,12 @@ variable "public_key_path" {
variable "image" { variable "image" {
description = "the image to use" description = "the image to use"
default = "" default = "ubuntu-14.04"
} }
variable "image_gfs" { variable "image_gfs" {
description = "Glance image to use for GlusterFS" description = "Glance image to use for GlusterFS"
default = "" default = "ubuntu-16.04"
} }
variable "ssh_user" { variable "ssh_user" {
@@ -123,12 +103,6 @@ variable "network_name" {
default = "internal" default = "internal"
} }
variable "network_dns_domain" {
description = "dns_domain for the internal network"
type = "string"
default = null
}
variable "use_neutron" { variable "use_neutron" {
description = "Use neutron" description = "Use neutron"
default = 1 default = 1
@@ -206,11 +180,3 @@ variable "worker_allowed_ports" {
}, },
] ]
} }
variable "use_access_ip" {
default = 1
}
variable "use_server_groups" {
default = false
}

View File

@@ -38,7 +38,7 @@ now six total etcd replicas.
## SSH Key Setup ## SSH Key Setup
An SSH keypair is required so Ansible can access the newly provisioned nodes (bare metal Packet hosts). By default, the public SSH key defined in cluster.tfvars will be installed in authorized_key on the newly provisioned nodes (~/.ssh/id_rsa.pub). Terraform will upload this public key and then it will be distributed out to all the nodes. If you have already set this public key in Packet (i.e. via the portal), then set the public keyfile name in cluster.tfvars to blank to prevent the duplicate key from being uploaded which will cause an error. An SSH keypair is required so Ansible can access the newly provisioned nodes (bare metal Packet hosts). By default, the public SSH key defined in cluster.tf will be installed in authorized_key on the newly provisioned nodes (~/.ssh/id_rsa.pub). Terraform will upload this public key and then it will be distributed out to all the nodes. If you have already set this public key in Packet (i.e. via the portal), then set the public keyfile name in cluster.tf to blank to prevent the duplicate key from being uploaded which will cause an error.
If you don't already have a keypair generated (~/.ssh/id_rsa and ~/.ssh/id_rsa.pub), then a new keypair can be generated with the command: If you don't already have a keypair generated (~/.ssh/id_rsa and ~/.ssh/id_rsa.pub), then a new keypair can be generated with the command:
@@ -72,7 +72,7 @@ If someone gets this key, they can startup/shutdown hosts in your project!
For more information on how to generate an API key or find your project ID, please see: For more information on how to generate an API key or find your project ID, please see:
https://support.packet.com/kb/articles/api-integrations https://support.packet.com/kb/articles/api-integrations
The Packet Project ID associated with the key will be set later in cluster.tfvars. The Packet Project ID associated with the key will be set later in cluster.tf.
For more information about the API, please see: For more information about the API, please see:
https://www.packet.com/developers/api/ https://www.packet.com/developers/api/
@@ -88,7 +88,7 @@ Note that to deploy several clusters within the same project you need to use [te
The construction of the cluster is driven by values found in The construction of the cluster is driven by values found in
[variables.tf](variables.tf). [variables.tf](variables.tf).
For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`. For your cluster, edit `inventory/$CLUSTER/cluster.tf`.
The `cluster_name` is used to set a tag on each server deployed as part of this cluster. The `cluster_name` is used to set a tag on each server deployed as part of this cluster.
This helps when identifying which hosts are associated with each cluster. This helps when identifying which hosts are associated with each cluster.
@@ -138,7 +138,7 @@ This should finish fairly quickly telling you Terraform has successfully initial
You can apply the Terraform configuration to your cluster with the following command You can apply the Terraform configuration to your cluster with the following command
issued from your cluster's inventory directory (`inventory/$CLUSTER`): issued from your cluster's inventory directory (`inventory/$CLUSTER`):
```ShellSession ```ShellSession
$ terraform apply -var-file=cluster.tfvars ../../contrib/terraform/packet $ terraform apply -var-file=cluster.tf ../../contrib/terraform/packet
$ export ANSIBLE_HOST_KEY_CHECKING=False $ export ANSIBLE_HOST_KEY_CHECKING=False
$ ansible-playbook -i hosts ../../cluster.yml $ ansible-playbook -i hosts ../../cluster.yml
``` ```
@@ -147,7 +147,7 @@ $ ansible-playbook -i hosts ../../cluster.yml
You can destroy your new cluster with the following command issued from the cluster's inventory directory: You can destroy your new cluster with the following command issued from the cluster's inventory directory:
```ShellSession ```ShellSession
$ terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/packet $ terraform destroy -var-file=cluster.tf ../../contrib/terraform/packet
``` ```
If you've started the Ansible run, it may also be a good idea to do some manual cleanup: If you've started the Ansible run, it may also be a good idea to do some manual cleanup:

View File

@@ -4,60 +4,59 @@ provider "packet" {
} }
resource "packet_ssh_key" "k8s" { resource "packet_ssh_key" "k8s" {
count = var.public_key_path != "" ? 1 : 0 count = "${var.public_key_path != "" ? 1 : 0}"
name = "kubernetes-${var.cluster_name}" name = "kubernetes-${var.cluster_name}"
public_key = chomp(file(var.public_key_path)) public_key = "${chomp(file(var.public_key_path))}"
} }
resource "packet_device" "k8s_master" { resource "packet_device" "k8s_master" {
depends_on = [packet_ssh_key.k8s] depends_on = ["packet_ssh_key.k8s"]
count = var.number_of_k8s_masters count = "${var.number_of_k8s_masters}"
hostname = "${var.cluster_name}-k8s-master-${count.index + 1}" hostname = "${var.cluster_name}-k8s-master-${count.index+1}"
plan = var.plan_k8s_masters plan = "${var.plan_k8s_masters}"
facilities = [var.facility] facilities = ["${var.facility}"]
operating_system = var.operating_system operating_system = "${var.operating_system}"
billing_cycle = var.billing_cycle billing_cycle = "${var.billing_cycle}"
project_id = var.packet_project_id project_id = "${var.packet_project_id}"
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-master", "etcd", "kube-node"] tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-master", "etcd", "kube-node"]
} }
resource "packet_device" "k8s_master_no_etcd" { resource "packet_device" "k8s_master_no_etcd" {
depends_on = [packet_ssh_key.k8s] depends_on = ["packet_ssh_key.k8s"]
count = var.number_of_k8s_masters_no_etcd count = "${var.number_of_k8s_masters_no_etcd}"
hostname = "${var.cluster_name}-k8s-master-${count.index + 1}" hostname = "${var.cluster_name}-k8s-master-${count.index+1}"
plan = var.plan_k8s_masters_no_etcd plan = "${var.plan_k8s_masters_no_etcd}"
facilities = [var.facility] facilities = ["${var.facility}"]
operating_system = var.operating_system operating_system = "${var.operating_system}"
billing_cycle = var.billing_cycle billing_cycle = "${var.billing_cycle}"
project_id = var.packet_project_id project_id = "${var.packet_project_id}"
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-master"] tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-master"]
} }
resource "packet_device" "k8s_etcd" { resource "packet_device" "k8s_etcd" {
depends_on = [packet_ssh_key.k8s] depends_on = ["packet_ssh_key.k8s"]
count = var.number_of_etcd count = "${var.number_of_etcd}"
hostname = "${var.cluster_name}-etcd-${count.index + 1}" hostname = "${var.cluster_name}-etcd-${count.index+1}"
plan = var.plan_etcd plan = "${var.plan_etcd}"
facilities = [var.facility] facilities = ["${var.facility}"]
operating_system = var.operating_system operating_system = "${var.operating_system}"
billing_cycle = var.billing_cycle billing_cycle = "${var.billing_cycle}"
project_id = var.packet_project_id project_id = "${var.packet_project_id}"
tags = ["cluster-${var.cluster_name}", "etcd"] tags = ["cluster-${var.cluster_name}", "etcd"]
} }
resource "packet_device" "k8s_node" { resource "packet_device" "k8s_node" {
depends_on = [packet_ssh_key.k8s] depends_on = ["packet_ssh_key.k8s"]
count = var.number_of_k8s_nodes count = "${var.number_of_k8s_nodes}"
hostname = "${var.cluster_name}-k8s-node-${count.index + 1}" hostname = "${var.cluster_name}-k8s-node-${count.index+1}"
plan = var.plan_k8s_nodes plan = "${var.plan_k8s_nodes}"
facilities = [var.facility] facilities = ["${var.facility}"]
operating_system = var.operating_system operating_system = "${var.operating_system}"
billing_cycle = var.billing_cycle billing_cycle = "${var.billing_cycle}"
project_id = var.packet_project_id project_id = "${var.packet_project_id}"
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-node"] tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-node"]
} }

View File

@@ -1,16 +1,15 @@
output "k8s_masters" { output "k8s_masters" {
value = packet_device.k8s_master.*.access_public_ipv4 value = "${packet_device.k8s_master.*.access_public_ipv4}"
} }
output "k8s_masters_no_etc" { output "k8s_masters_no_etc" {
value = packet_device.k8s_master_no_etcd.*.access_public_ipv4 value = "${packet_device.k8s_master_no_etcd.*.access_public_ipv4}"
} }
output "k8s_etcds" { output "k8s_etcds" {
value = packet_device.k8s_etcd.*.access_public_ipv4 value = "${packet_device.k8s_etcd.*.access_public_ipv4}"
} }
output "k8s_nodes" { output "k8s_nodes" {
value = packet_device.k8s_node.*.access_public_ipv4 value = "${packet_device.k8s_node.*.access_public_ipv4}"
} }

View File

@@ -54,4 +54,3 @@ variable "number_of_etcd" {
variable "number_of_k8s_nodes" { variable "number_of_k8s_nodes" {
default = 0 default = 0
} }

View File

@@ -1,4 +0,0 @@
terraform {
required_version = ">= 0.12"
}

View File

@@ -73,7 +73,7 @@ def iterresources(filenames):
# In version 4 the structure changes so we need to iterate # In version 4 the structure changes so we need to iterate
# each instance inside the resource branch. # each instance inside the resource branch.
for resource in state['resources']: for resource in state['resources']:
name = resource['provider'].split('.')[-1] name = resource['module'].split('.')[-1]
for instance in resource['instances']: for instance in resource['instances']:
key = "{}.{}".format(resource['type'], resource['name']) key = "{}.{}".format(resource['type'], resource['name'])
if 'index_key' in instance: if 'index_key' in instance:
@@ -182,9 +182,6 @@ def parse_list(source, prefix, sep='.'):
def parse_bool(string_form): def parse_bool(string_form):
if type(string_form) is bool:
return string_form
token = string_form.lower()[0] token = string_form.lower()[0]
if token == 't': if token == 't':
@@ -213,7 +210,7 @@ def packet_device(resource, tfvars=None):
'state': raw_attrs['state'], 'state': raw_attrs['state'],
# ansible # ansible
'ansible_ssh_host': raw_attrs['network.0.address'], 'ansible_ssh_host': raw_attrs['network.0.address'],
'ansible_ssh_user': 'root', # Use root by default in packet 'ansible_ssh_user': 'root', # it's always "root" on Packet
# generic # generic
'ipv4_address': raw_attrs['network.0.address'], 'ipv4_address': raw_attrs['network.0.address'],
'public_ipv4': raw_attrs['network.0.address'], 'public_ipv4': raw_attrs['network.0.address'],
@@ -223,10 +220,6 @@ def packet_device(resource, tfvars=None):
'provider': 'packet', 'provider': 'packet',
} }
if raw_attrs['operating_system'] == 'coreos_stable':
# For CoreOS set the ssh_user to core
attrs.update({'ansible_ssh_user': 'core'})
# add groups based on attrs # add groups based on attrs
groups.append('packet_operating_system=' + attrs['operating_system']) groups.append('packet_operating_system=' + attrs['operating_system'])
groups.append('packet_locked=%s' % attrs['locked']) groups.append('packet_locked=%s' % attrs['locked'])
@@ -346,20 +339,14 @@ def iter_host_ips(hosts, ips):
'''Update hosts that have an entry in the floating IP list''' '''Update hosts that have an entry in the floating IP list'''
for host in hosts: for host in hosts:
host_id = host[1]['id'] host_id = host[1]['id']
if host_id in ips: if host_id in ips:
ip = ips[host_id] ip = ips[host_id]
host[1].update({ host[1].update({
'access_ip_v4': ip, 'access_ip_v4': ip,
'access_ip': ip, 'access_ip': ip,
'public_ipv4': ip, 'public_ipv4': ip,
'ansible_ssh_host': ip, 'ansible_ssh_host': ip,
}) })
if 'use_access_ip' in host[1]['metadata'] and host[1]['metadata']['use_access_ip'] == "0":
host[1].pop('access_ip')
yield host yield host

View File

@@ -13,7 +13,7 @@
/usr/local/share/ca-certificates/vault-ca.crt /usr/local/share/ca-certificates/vault-ca.crt
{%- elif ansible_os_family == "RedHat" -%} {%- elif ansible_os_family == "RedHat" -%}
/etc/pki/ca-trust/source/anchors/vault-ca.crt /etc/pki/ca-trust/source/anchors/vault-ca.crt
{%- elif ansible_os_family in ["Coreos", "Container Linux by CoreOS"] -%} {%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%}
/etc/ssl/certs/vault-ca.pem /etc/ssl/certs/vault-ca.pem
{%- endif %} {%- endif %}
@@ -25,7 +25,7 @@
- name: bootstrap/ca_trust | update ca-certificates (Debian/Ubuntu/CoreOS) - name: bootstrap/ca_trust | update ca-certificates (Debian/Ubuntu/CoreOS)
command: update-ca-certificates command: update-ca-certificates
when: vault_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Coreos", "Container Linux by CoreOS"] when: vault_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"]
- name: bootstrap/ca_trust | update ca-certificates (RedHat) - name: bootstrap/ca_trust | update ca-certificates (RedHat)
command: update-ca-trust extract command: update-ca-trust extract

View File

@@ -21,7 +21,7 @@
- name: bootstrap/sync_secrets | Print out warning message if secrets are not available and vault is initialized - name: bootstrap/sync_secrets | Print out warning message if secrets are not available and vault is initialized
pause: pause:
prompt: > prompt: >
Vault orchestration may not be able to proceed. The Vault cluster is initialized, but Vault orchestration may not be able to proceed. The Vault cluster is initialzed, but
'root_token' or 'unseal_keys' were not found in {{ vault_secrets_dir }}. These are 'root_token' or 'unseal_keys' were not found in {{ vault_secrets_dir }}. These are
needed for many vault orchestration steps. needed for many vault orchestration steps.
when: vault_cluster_is_initialized and not vault_secrets_available when: vault_cluster_is_initialized and not vault_secrets_available

View File

@@ -3,6 +3,7 @@
* [Getting started](/docs/getting-started.md) * [Getting started](/docs/getting-started.md)
* [Ansible](docs/ansible.md) * [Ansible](docs/ansible.md)
* [Variables](/docs/vars.md) * [Variables](/docs/vars.md)
* [Ansible](/docs/ansible.md)
* Operations * Operations
* [Integration](docs/integration.md) * [Integration](docs/integration.md)
* [Upgrades](/docs/upgrades.md) * [Upgrades](/docs/upgrades.md)

View File

@@ -1,7 +1,9 @@
# Ansible variables Ansible variables
===============
## Inventory
Inventory
-------------
The inventory is composed of 3 groups: The inventory is composed of 3 groups:
* **kube-node** : list of kubernetes nodes where the pods will run. * **kube-node** : list of kubernetes nodes where the pods will run.
@@ -12,7 +14,7 @@ Note: do not modify the children of _k8s-cluster_, like putting
the _etcd_ group into the _k8s-cluster_, unless you are certain the _etcd_ group into the _k8s-cluster_, unless you are certain
to do that and you have it fully contained in the latter: to do that and you have it fully contained in the latter:
```ShellSession ```
k8s-cluster ⊂ etcd => kube-node ∩ etcd = etcd k8s-cluster ⊂ etcd => kube-node ∩ etcd = etcd
``` ```
@@ -30,7 +32,7 @@ There are also two special groups:
Below is a complete inventory example: Below is a complete inventory example:
```ini ```
## Configure 'ip' variable to bind kubernetes services on a ## Configure 'ip' variable to bind kubernetes services on a
## different ip than the default iface ## different ip than the default iface
node1 ansible_host=95.54.0.12 ip=10.3.0.1 node1 ansible_host=95.54.0.12 ip=10.3.0.1
@@ -61,7 +63,8 @@ kube-node
kube-master kube-master
``` ```
## Group vars and overriding variables precedence Group vars and overriding variables precedence
----------------------------------------------
The group variables to control main deployment options are located in the directory ``inventory/sample/group_vars``. The group variables to control main deployment options are located in the directory ``inventory/sample/group_vars``.
Optional variables are located in the `inventory/sample/group_vars/all.yml`. Optional variables are located in the `inventory/sample/group_vars/all.yml`.
@@ -70,7 +73,7 @@ Mandatory variables that are common for at least one role (or a node group) can
There are also role vars for docker, kubernetes preinstall and master roles. There are also role vars for docker, kubernetes preinstall and master roles.
According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable), According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
those cannot be overridden from the group vars. In order to override, one should use those cannot be overridden from the group vars. In order to override, one should use
the `-e` runtime flags (most simple way) or other layers described in the docs. the `-e ` runtime flags (most simple way) or other layers described in the docs.
Kubespray uses only a few layers to override things (or expect them to Kubespray uses only a few layers to override things (or expect them to
be overridden for roles): be overridden for roles):
@@ -94,8 +97,8 @@ block vars (only for tasks in block) | Kubespray overrides for internal roles' l
task vars (only for the task) | Unused for roles, but only for helper scripts task vars (only for the task) | Unused for roles, but only for helper scripts
**extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml`` **extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml``
## Ansible tags Ansible tags
------------
The following tags are defined in playbooks: The following tags are defined in playbooks:
| Tag name | Used for | Tag name | Used for
@@ -142,25 +145,21 @@ Note: Use the ``bash scripts/gen_tags.sh`` command to generate a list of all
tags found in the codebase. New tags will be listed with the empty "Used for" tags found in the codebase. New tags will be listed with the empty "Used for"
field. field.
## Example commands Example commands
----------------
Example command to filter and apply only DNS configuration tasks and skip Example command to filter and apply only DNS configuration tasks and skip
everything else related to host OS configuration and downloading images of containers: everything else related to host OS configuration and downloading images of containers:
```ShellSession ```
ansible-playbook -i inventory/sample/hosts.ini cluster.yml --tags preinstall,facts --skip-tags=download,bootstrap-os ansible-playbook -i inventory/sample/hosts.ini cluster.yml --tags preinstall,facts --skip-tags=download,bootstrap-os
``` ```
And this play only removes the K8s cluster DNS resolver IP from hosts' /etc/resolv.conf files: And this play only removes the K8s cluster DNS resolver IP from hosts' /etc/resolv.conf files:
```
```ShellSession
ansible-playbook -i inventory/sample/hosts.ini -e dns_mode='none' cluster.yml --tags resolvconf ansible-playbook -i inventory/sample/hosts.ini -e dns_mode='none' cluster.yml --tags resolvconf
``` ```
And this prepares all container images locally (at the ansible runner node) without installing And this prepares all container images locally (at the ansible runner node) without installing
or upgrading related stuff or trying to upload container to K8s cluster nodes: or upgrading related stuff or trying to upload container to K8s cluster nodes:
```
```ShellSession
ansible-playbook -i inventory/sample/hosts.ini cluster.yml \ ansible-playbook -i inventory/sample/hosts.ini cluster.yml \
-e download_run_once=true -e download_localhost=true \ -e download_run_once=true -e download_localhost=true \
--tags download --skip-tags upload,upgrade --tags download --skip-tags upload,upgrade
@@ -168,14 +167,14 @@ ansible-playbook -i inventory/sample/hosts.ini cluster.yml \
Note: use `--tags` and `--skip-tags` wise and only if you're 100% sure what you're doing. Note: use `--tags` and `--skip-tags` wise and only if you're 100% sure what you're doing.
## Bastion host Bastion host
--------------
If you prefer to not make your nodes publicly accessible (nodes with private IPs only), If you prefer to not make your nodes publicly accessible (nodes with private IPs only),
you can use a so called *bastion* host to connect to your nodes. To specify and use a bastion, you can use a so called *bastion* host to connect to your nodes. To specify and use a bastion,
simply add a line to your inventory, where you have to replace x.x.x.x with the public IP of the simply add a line to your inventory, where you have to replace x.x.x.x with the public IP of the
bastion host. bastion host.
```ShellSession ```
[bastion] [bastion]
bastion ansible_host=x.x.x.x bastion ansible_host=x.x.x.x
``` ```

View File

@@ -1,7 +1,6 @@
# Architecture compatibility ## Architecture compatibility
The following table shows the impact of the CPU architecture on compatible features: The following table shows the impact of the CPU architecture on compatible features:
- amd64: Cluster using only x86/amd64 CPUs - amd64: Cluster using only x86/amd64 CPUs
- arm64: Cluster using only arm64 CPUs - arm64: Cluster using only arm64 CPUs
- amd64 + arm64: Cluster with a mix of x86/amd64 and arm64 CPUs - amd64 + arm64: Cluster with a mix of x86/amd64 and arm64 CPUs

View File

@@ -1,22 +1,23 @@
# Atomic host bootstrap Atomic host bootstrap
=====================
Atomic host testing has been done with the network plugin flannel. Change the inventory var `kube_network_plugin: flannel`. Atomic host testing has been done with the network plugin flannel. Change the inventory var `kube_network_plugin: flannel`.
Note: Flannel is the only plugin that has currently been tested with atomic Note: Flannel is the only plugin that has currently been tested with atomic
## Vagrant ### Vagrant
* For bootstrapping with Vagrant, use box centos/atomic-host or fedora/atomic-host * For bootstrapping with Vagrant, use box centos/atomic-host or fedora/atomic-host
* Update VagrantFile variable `local_release_dir` to `/var/vagrant/temp`. * Update VagrantFile variable `local_release_dir` to `/var/vagrant/temp`.
* Update `vm_memory = 2048` and `vm_cpus = 2` * Update `vm_memory = 2048` and `vm_cpus = 2`
* Networking on vagrant hosts has to be brought up manually once they are booted. * Networking on vagrant hosts has to be brought up manually once they are booted.
```ShellSession ```
vagrant ssh vagrant ssh
sudo /sbin/ifup enp0s8 sudo /sbin/ifup enp0s8
``` ```
* For users of vagrant-libvirt download centos/atomic-host qcow2 format from <https://wiki.centos.org/SpecialInterestGroup/Atomic/Download/> * For users of vagrant-libvirt download centos/atomic-host qcow2 format from https://wiki.centos.org/SpecialInterestGroup/Atomic/Download/
* For users of vagrant-libvirt download fedora/atomic-host qcow2 format from <https://dl.fedoraproject.org/pub/alt/atomic/stable/> * For users of vagrant-libvirt download fedora/atomic-host qcow2 format from https://getfedora.org/en/atomic/download/
Then you can proceed to [cluster deployment](#run-deployment) Then you can proceed to [cluster deployment](#run-deployment)

View File

@@ -1,10 +1,11 @@
# AWS AWS
===============
To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`. Refer to the [Kubespray Configuration](#kubespray-configuration) for customizing the provider. To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`. Refer to the [Kubespray Configuration](#kubespray-configuration) for customizing the provider.
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role. Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
You would also need to tag the resources in your VPC accordingly for the aws provider to utilize them. Tag the subnets, route tables and all instances that kubernetes will be run on with key `kubernetes.io/cluster/$cluster_name` (`$cluster_name` must be a unique identifier for the cluster). Tag the subnets that must be targeted by external ELBs with the key `kubernetes.io/role/elb` and internal ELBs with the key `kubernetes.io/role/internal-elb`. You would also need to tag the resources in your VPC accordingly for the aws provider to utilize them. Tag the subnets, route tables and all instances that kubernetes will be run on with key `kubernetes.io/cluster/$cluster_name` (`$cluster_name` must be a unique identifier for the cluster). Tag the subnets that must be targetted by external ELBs with the key `kubernetes.io/role/elb` and internal ELBs with the key `kubernetes.io/role/internal-elb`.
Make sure your VPC has both DNS Hostnames support and Private DNS enabled. Make sure your VPC has both DNS Hostnames support and Private DNS enabled.
@@ -12,13 +13,11 @@ The next step is to make sure the hostnames in your `inventory` file are identic
You can now create your cluster! You can now create your cluster!
## Dynamic Inventory ### Dynamic Inventory ###
There is also a dynamic inventory script for AWS that can be used if desired. However, be aware that it makes some certain assumptions about how you'll create your inventory. It also does not handle all use cases and groups that we may use as part of more advanced deployments. Additions welcome. There is also a dynamic inventory script for AWS that can be used if desired. However, be aware that it makes some certain assumptions about how you'll create your inventory. It also does not handle all use cases and groups that we may use as part of more advanced deployments. Additions welcome.
This will produce an inventory that is passed into Ansible that looks like the following: This will produce an inventory that is passed into Ansible that looks like the following:
```
```json
{ {
"_meta": { "_meta": {
"hostvars": { "hostvars": {
@@ -49,18 +48,15 @@ This will produce an inventory that is passed into Ansible that looks like the f
``` ```
Guide: Guide:
- Create instances in AWS as needed. - Create instances in AWS as needed.
- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube-master`, `etcd`, or `kube-node`. You can also share roles like `kube-master, etcd` - Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube-master`, `etcd`, or `kube-node`. You can also share roles like `kube-master, etcd`
- Copy the `kubespray-aws-inventory.py` script from `kubespray/contrib/aws_inventory` to the `kubespray/inventory` directory. - Copy the `kubespray-aws-inventory.py` script from `kubespray/contrib/aws_inventory` to the `kubespray/inventory` directory.
- Set the following AWS credentials and info as environment variables in your terminal: - Set the following AWS credentials and info as environment variables in your terminal:
```
```ShellSession
export AWS_ACCESS_KEY_ID="xxxxx" export AWS_ACCESS_KEY_ID="xxxxx"
export AWS_SECRET_ACCESS_KEY="yyyyy" export AWS_SECRET_ACCESS_KEY="yyyyy"
export REGION="us-east-2" export REGION="us-east-2"
``` ```
- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kubespray-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml` - We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kubespray-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml`
## Kubespray configuration ## Kubespray configuration
@@ -79,3 +75,4 @@ aws_kubernetes_cluster_id|string|KubernetesClusterID is the cluster id we'll use
aws_disable_security_group_ingress|bool|The aws provider creates an inbound rule per load balancer on the node security group. However, this can run into the AWS security group rule limit of 50 if many LoadBalancers are created. This flag disables the automatic ingress creation. It requires that the user has setup a rule that allows inbound traffic on kubelet ports from the local VPC subnet (so load balancers can access it). E.g. 10.82.0.0/16 30000-32000. aws_disable_security_group_ingress|bool|The aws provider creates an inbound rule per load balancer on the node security group. However, this can run into the AWS security group rule limit of 50 if many LoadBalancers are created. This flag disables the automatic ingress creation. It requires that the user has setup a rule that allows inbound traffic on kubelet ports from the local VPC subnet (so load balancers can access it). E.g. 10.82.0.0/16 30000-32000.
aws_elb_security_group|string|Only in Kubelet version >= 1.7 : AWS has a hard limit of 500 security groups. For large clusters creating a security group for each ELB can cause the max number of security groups to be reached. If this is set instead of creating a new Security group for each ELB this security group will be used instead. aws_elb_security_group|string|Only in Kubelet version >= 1.7 : AWS has a hard limit of 500 security groups. For large clusters creating a security group for each ELB can cause the max number of security groups to be reached. If this is set instead of creating a new Security group for each ELB this security group will be used instead.
aws_disable_strict_zone_check|bool|During the instantiation of an new AWS cloud provider, the detected region is validated against a known set of regions. In a non-standard, AWS like environment (e.g. Eucalyptus), this check may be undesirable. Setting this to true will disable the check and provide a warning that the check was skipped. Please note that this is an experimental feature and work-in-progress for the moment. aws_disable_strict_zone_check|bool|During the instantiation of an new AWS cloud provider, the detected region is validated against a known set of regions. In a non-standard, AWS like environment (e.g. Eucalyptus), this check may be undesirable. Setting this to true will disable the check and provide a warning that the check was skipped. Please note that this is an experimental feature and work-in-progress for the moment.

View File

@@ -1,4 +1,5 @@
# Azure Azure
===============
To deploy Kubernetes on [Azure](https://azure.microsoft.com) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'azure'`. To deploy Kubernetes on [Azure](https://azure.microsoft.com) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'azure'`.
@@ -6,46 +7,41 @@ All your instances are required to run in a resource group and a routing table h
Not all features are supported yet though, for a list of the current status have a look [here](https://github.com/colemickens/azure-kubernetes-status) Not all features are supported yet though, for a list of the current status have a look [here](https://github.com/colemickens/azure-kubernetes-status)
## Parameters ### Parameters
Before creating the instances you must first set the `azure_` variables in the `group_vars/all.yml` file. Before creating the instances you must first set the `azure_` variables in the `group_vars/all.yml` file.
All of the values can be retrieved using the azure cli tool which can be downloaded here: <https://docs.microsoft.com/en-gb/azure/xplat-cli-install> All of the values can be retrieved using the azure cli tool which can be downloaded here: https://docs.microsoft.com/en-gb/azure/xplat-cli-install
After installation you have to run `azure login` to get access to your account. After installation you have to run `azure login` to get access to your account.
### azure\_tenant\_id + azure\_subscription\_id
#### azure\_tenant\_id + azure\_subscription\_id
run `azure account show` to retrieve your subscription id and tenant id: run `azure account show` to retrieve your subscription id and tenant id:
`azure_tenant_id` -> Tenant ID field `azure_tenant_id` -> Tenant ID field
`azure_subscription_id` -> ID field `azure_subscription_id` -> ID field
### azure\_location
#### azure\_location
The region your instances are located, can be something like `westeurope` or `westcentralus`. A full list of region names can be retrieved via `azure location list` The region your instances are located, can be something like `westeurope` or `westcentralus`. A full list of region names can be retrieved via `azure location list`
### azure\_resource\_group
#### azure\_resource\_group
The name of the resource group your instances are in, can be retrieved via `azure group list` The name of the resource group your instances are in, can be retrieved via `azure group list`
### azure\_vnet\_name #### azure\_vnet\_name
The name of the virtual network your instances are in, can be retrieved via `azure network vnet list` The name of the virtual network your instances are in, can be retrieved via `azure network vnet list`
### azure\_subnet\_name #### azure\_subnet\_name
The name of the subnet your instances are in, can be retrieved via `azure network vnet subnet list --resource-group RESOURCE_GROUP --vnet-name VNET_NAME` The name of the subnet your instances are in, can be retrieved via `azure network vnet subnet list --resource-group RESOURCE_GROUP --vnet-name VNET_NAME`
### azure\_security\_group\_name #### azure\_security\_group\_name
The name of the network security group your instances are in, can be retrieved via `azure network nsg list` The name of the network security group your instances are in, can be retrieved via `azure network nsg list`
### azure\_aad\_client\_id + azure\_aad\_client\_secret #### azure\_aad\_client\_id + azure\_aad\_client\_secret
These will have to be generated first: These will have to be generated first:
- Create an Azure AD Application with: - Create an Azure AD Application with:
`azure ad app create --display-name kubernetes --identifier-uris http://kubernetes --homepage http://example.com --password CLIENT_SECRET` `azure ad app create --display-name kubernetes --identifier-uris http://kubernetes --homepage http://example.com --password CLIENT_SECRET`
display name, identifier-uri, homepage and the password can be chosen display name, identifier-uri, homepage and the password can be choosen
Note the AppId in the output. Note the AppId in the output.
- Create Service principal for the application with: - Create Service principal for the application with:
`azure ad sp create --id AppId` `azure ad sp create --id AppId`
@@ -53,31 +49,27 @@ This is the AppId from the last command
- Create the role assignment with: - Create the role assignment with:
`azure role assignment create --role "Owner" --assignee http://kubernetes --subscription SUBSCRIPTION_ID` `azure role assignment create --role "Owner" --assignee http://kubernetes --subscription SUBSCRIPTION_ID`
azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your chosen secret. azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your choosen secret.
### azure\_loadbalancer\_sku
#### azure\_loadbalancer\_sku
Sku of Load Balancer and Public IP. Candidate values are: basic and standard. Sku of Load Balancer and Public IP. Candidate values are: basic and standard.
### azure\_exclude\_master\_from\_standard\_lb #### azure\_exclude\_master\_from\_standard\_lb
azure\_exclude\_master\_from\_standard\_lb excludes master nodes from `standard` load balancer. azure\_exclude\_master\_from\_standard\_lb excludes master nodes from `standard` load balancer.
### azure\_disable\_outbound\_snat #### azure\_disable\_outbound\_snat
azure\_disable\_outbound\_snat disables the outbound SNAT for public load balancer rules. It should only be set when azure\_exclude\_master\_from\_standard\_lb is `standard`. azure\_disable\_outbound\_snat disables the outbound SNAT for public load balancer rules. It should only be set when azure\_exclude\_master\_from\_standard\_lb is `standard`.
### azure\_primary\_availability\_set\_name #### azure\_primary\_availability\_set\_name
(Optional) The name of the availability set that should be used as the load balancer backend .If this is set, the Azure
(Optional) The name of the availability set that should be used as the load balancer backend .If this is set, the Azure cloudprovider will only add nodes from that availability set to the load balancer backend pool. If this is not set, and
cloudprovider will only add nodes from that availability set to the load balancer backend pool. If this is not set, and multiple agent pools (availability sets) are used, then the cloudprovider will try to add all nodes to a single backend
multiple agent pools (availability sets) are used, then the cloudprovider will try to add all nodes to a single backend
pool which is forbidden. In other words, if you use multiple agent pools (availability sets), you MUST set this field. pool which is forbidden. In other words, if you use multiple agent pools (availability sets), you MUST set this field.
### azure\_use\_instance\_metadata #### azure\_use\_instance\_metadata
Use instance metadata service where possible Use instance metadata service where possible
## Provisioning Azure with Resource Group Templates ## Provisioning Azure with Resource Group Templates
You'll find Resource Group Templates and scripts to provision the required infrastructure to Azure in [*contrib/azurerm*](../contrib/azurerm/README.md) You'll find Resource Group Templates and scripts to provision the required infrastructure to Azure in [*contrib/azurerm*](../contrib/azurerm/README.md)

View File

@@ -1,83 +1,82 @@
# Calico Calico
===========
N.B. **Version 2.6.5 upgrade to 3.1.1 is upgrading etcd store to etcdv3** ---
**N.B. Version 2.6.5 upgrade to 3.1.1 is upgrading etcd store to etcdv3**
If you create automated backups of etcdv2 please switch for creating etcdv3 backups, as kubernetes and calico now uses etcdv3 If you create automated backups of etcdv2 please switch for creating etcdv3 backups, as kubernetes and calico now uses etcdv3
After migration you can check `/tmp/calico_upgrade/` directory for converted items to etcdv3. After migration you can check `/tmp/calico_upgrade/` directory for converted items to etcdv3.
**PLEASE TEST upgrade before upgrading production cluster.** **PLEASE TEST upgrade before upgrading production cluster.**
---
Check if the calico-node container is running Check if the calico-node container is running
```ShellSession ```
docker ps | grep calico docker ps | grep calico
``` ```
The **calicoctl** command allows to check the status of the network workloads. The **calicoctl** command allows to check the status of the network workloads.
* Check the status of Calico nodes * Check the status of Calico nodes
```ShellSession ```
calicoctl node status calicoctl node status
``` ```
or for versions prior to *v1.0.0*: or for versions prior to *v1.0.0*:
```ShellSession ```
calicoctl status calicoctl status
``` ```
* Show the configured network subnet for containers * Show the configured network subnet for containers
```ShellSession ```
calicoctl get ippool -o wide calicoctl get ippool -o wide
``` ```
or for versions prior to *v1.0.0*: or for versions prior to *v1.0.0*:
```ShellSession ```
calicoctl pool show calicoctl pool show
``` ```
* Show the workloads (ip addresses of containers and their located) * Show the workloads (ip addresses of containers and their located)
```ShellSession ```
calicoctl get workloadEndpoint -o wide calicoctl get workloadEndpoint -o wide
``` ```
and and
```ShellSession ```
calicoctl get hostEndpoint -o wide calicoctl get hostEndpoint -o wide
``` ```
or for versions prior *v1.0.0*: or for versions prior *v1.0.0*:
```ShellSession ```
calicoctl endpoint show --detail calicoctl endpoint show --detail
``` ```
## Configuration ##### Optional : Define network backend
### Optional : Define network backend
In some cases you may want to define Calico network backend. Allowed values are 'bird', 'gobgp' or 'none'. Bird is a default value. In some cases you may want to define Calico network backend. Allowed values are 'bird', 'gobgp' or 'none'. Bird is a default value.
To re-define you need to edit the inventory and add a group variable `calico_network_backend` To re-define you need to edit the inventory and add a group variable `calico_network_backend`
```yml ```
calico_network_backend: none calico_network_backend: none
``` ```
### Optional : Define the default pool CIDR ##### Optional : Define the default pool CIDR
By default, `kube_pods_subnet` is used as the IP range CIDR for the default IP Pool. By default, `kube_pods_subnet` is used as the IP range CIDR for the default IP Pool.
In some cases you may want to add several pools and not have them considered by Kubernetes as external (which means that they must be within or equal to the range defined in `kube_pods_subnet`), it starts with the default IP Pool of which IP range CIDR can by defined in group_vars (k8s-cluster/k8s-net-calico.yml): In some cases you may want to add several pools and not have them considered by Kubernetes as external (which means that they must be within or equal to the range defined in `kube_pods_subnet`), it starts with the default IP Pool of which IP range CIDR can by defined in group_vars (k8s-cluster/k8s-net-calico.yml):
```ShellSession ```
calico_pool_cidr: 10.233.64.0/20 calico_pool_cidr: 10.233.64.0/20
``` ```
### Optional : BGP Peering with border routers ##### Optional : BGP Peering with border routers
In some cases you may want to route the pods subnet and so NAT is not needed on the nodes. In some cases you may want to route the pods subnet and so NAT is not needed on the nodes.
For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located. For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located.
@@ -85,11 +84,11 @@ The following variables need to be set:
`peer_with_router` to enable the peering with the datacenter's border router (default value: false). `peer_with_router` to enable the peering with the datacenter's border router (default value: false).
you'll need to edit the inventory and add a hostvar `local_as` by node. you'll need to edit the inventory and add a hostvar `local_as` by node.
```ShellSession ```
node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx
``` ```
### Optional : Defining BGP peers ##### Optional : Defining BGP peers
Peers can be defined using the `peers` variable (see docs/calico_peer_example examples). Peers can be defined using the `peers` variable (see docs/calico_peer_example examples).
In order to define global peers, the `peers` variable can be defined in group_vars with the "scope" attribute of each global peer set to "global". In order to define global peers, the `peers` variable can be defined in group_vars with the "scope" attribute of each global peer set to "global".
@@ -98,17 +97,16 @@ NB: Ansible's `hash_behaviour` is by default set to "replace", thus defining bot
Since calico 3.4, Calico supports advertising Kubernetes service cluster IPs over BGP, just as it advertises pod IPs. Since calico 3.4, Calico supports advertising Kubernetes service cluster IPs over BGP, just as it advertises pod IPs.
This can be enabled by setting the following variable as follow in group_vars (k8s-cluster/k8s-net-calico.yml) This can be enabled by setting the following variable as follow in group_vars (k8s-cluster/k8s-net-calico.yml)
```
```yml
calico_advertise_cluster_ips: true calico_advertise_cluster_ips: true
``` ```
### Optional : Define global AS number ##### Optional : Define global AS number
Optional parameter `global_as_num` defines Calico global AS number (`/calico/bgp/v1/global/as_num` etcd key). Optional parameter `global_as_num` defines Calico global AS number (`/calico/bgp/v1/global/as_num` etcd key).
It defaults to "64512". It defaults to "64512".
### Optional : BGP Peering with route reflectors ##### Optional : BGP Peering with route reflectors
At large scale you may want to disable full node-to-node mesh in order to At large scale you may want to disable full node-to-node mesh in order to
optimize your BGP topology and improve `calico-node` containers' start times. optimize your BGP topology and improve `calico-node` containers' start times.
@@ -116,8 +114,8 @@ optimize your BGP topology and improve `calico-node` containers' start times.
To do so you can deploy BGP route reflectors and peer `calico-node` with them as To do so you can deploy BGP route reflectors and peer `calico-node` with them as
recommended here: recommended here:
* <https://hub.docker.com/r/calico/routereflector/> * https://hub.docker.com/r/calico/routereflector/
* <https://docs.projectcalico.org/v3.1/reference/private-cloud/l3-interconnect-fabric> * https://docs.projectcalico.org/v3.1/reference/private-cloud/l3-interconnect-fabric
You need to edit your inventory and add: You need to edit your inventory and add:
@@ -129,7 +127,7 @@ You need to edit your inventory and add:
Here's an example of Kubespray inventory with standalone route reflectors: Here's an example of Kubespray inventory with standalone route reflectors:
```ini ```
[all] [all]
rr0 ansible_ssh_host=10.210.1.10 ip=10.210.1.10 rr0 ansible_ssh_host=10.210.1.10 ip=10.210.1.10
rr1 ansible_ssh_host=10.210.1.11 ip=10.210.1.11 rr1 ansible_ssh_host=10.210.1.11 ip=10.210.1.11
@@ -179,35 +177,35 @@ The inventory above will deploy the following topology assuming that calico's
![Image](figures/kubespray-calico-rr.png?raw=true) ![Image](figures/kubespray-calico-rr.png?raw=true)
### Optional : Define default endpoint to host action ##### Optional : Define default endpoint to host action
By default Calico blocks traffic from endpoints to the host itself by using an iptables DROP action. When using it in kubernetes the action has to be changed to RETURN (default in kubespray) or ACCEPT (see https://github.com/projectcalico/felix/issues/660 and https://github.com/projectcalico/calicoctl/issues/1389). Otherwise all network packets from pods (with hostNetwork=False) to services endpoints (with hostNetwork=True) within the same node are dropped.
By default Calico blocks traffic from endpoints to the host itself by using an iptables DROP action. When using it in kubernetes the action has to be changed to RETURN (default in kubespray) or ACCEPT (see <https://github.com/projectcalico/felix/issues/660> and <https://github.com/projectcalico/calicoctl/issues/1389).> Otherwise all network packets from pods (with hostNetwork=False) to services endpoints (with hostNetwork=True) within the same node are dropped.
To re-define default action please set the following variable in your inventory: To re-define default action please set the following variable in your inventory:
```
```yml
calico_endpoint_to_host_action: "ACCEPT" calico_endpoint_to_host_action: "ACCEPT"
``` ```
## Optional : Define address on which Felix will respond to health requests ##### Optional : Define address on which Felix will respond to health requests
Since Calico 3.2.0, HealthCheck default behavior changed from listening on all interfaces to just listening on localhost. Since Calico 3.2.0, HealthCheck default behavior changed from listening on all interfaces to just listening on localhost.
To re-define health host please set the following variable in your inventory: To re-define health host please set the following variable in your inventory:
```
```yml
calico_healthhost: "0.0.0.0" calico_healthhost: "0.0.0.0"
``` ```
## Cloud providers configuration Cloud providers configuration
=============================
Please refer to the official documentation, for example [GCE configuration](http://docs.projectcalico.org/v1.5/getting-started/docker/installation/gce) requires a security rule for calico ip-ip tunnels. Note, calico is always configured with ``ipip: true`` if the cloud provider was defined. Please refer to the official documentation, for example [GCE configuration](http://docs.projectcalico.org/v1.5/getting-started/docker/installation/gce) requires a security rule for calico ip-ip tunnels. Note, calico is always configured with ``ipip: true`` if the cloud provider was defined.
### Optional : Ignore kernel's RPF check setting ##### Optional : Ignore kernel's RPF check setting
By default the felix agent(calico-node) will abort if the Kernel RPF setting is not 'strict'. If you want Calico to ignore the Kernel setting: By default the felix agent(calico-node) will abort if the Kernel RPF setting is not 'strict'. If you want Calico to ignore the Kernel setting:
```yml ```
calico_node_ignorelooserpf: true calico_node_ignorelooserpf: true
``` ```
@@ -215,7 +213,7 @@ Note that in OpenStack you must allow `ipip` traffic in your security groups,
otherwise you will experience timeouts. otherwise you will experience timeouts.
To do this you must add a rule which allows it, for example: To do this you must add a rule which allows it, for example:
```ShellSession ```
neutron security-group-rule-create --protocol 4 --direction egress k8s-a0tp4t neutron security-group-rule-create --protocol 4 --direction egress k8s-a0tp4t
neutron security-group-rule-create --protocol 4 --direction igress k8s-a0tp4t neutron security-group-rule-create --protocol 4 --direction igress k8s-a0tp4t
``` ```

View File

@@ -1,102 +0,0 @@
# Cinder CSI Driver
Cinder CSI driver allows you to provision volumes over an OpenStack deployment. The Kubernetes historic in-tree cloud provider is deprecated and will be removed in future versions.
To enable Cinder CSI driver, uncomment the `cinder_csi_enabled` option in `group_vars/all/openstack.yml` and set it to `true`.
To set the number of replicas for the Cinder CSI controller, you can change `cinder_csi_controller_replicas` option in `group_vars/all/openstack.yml`.
You need to source the OpenStack credentials you use to deploy your machines that will host Kubernetes: `source path/to/your/openstack-rc` or `. path/to/your/openstack-rc`.
Make sure the hostnames in your `inventory` file are identical to your instance names in OpenStack. Otherwise [cinder](https://docs.openstack.org/cinder/latest/) won't work as expected.
If you want to deploy the cinder provisioner used with Cinder CSI Driver, you should set `persistent_volumes_enabled` in `group_vars/k8s-cluster/k8s-cluster.yml` to `true`.
You can now run the kubespray playbook (cluster.yml) to deploy Kubernetes over OpenStack with Cinder CSI Driver enabled.
## Usage example
To check if Cinder CSI Driver works properly, see first that the cinder-csi pods are running:
```ShellSession
$ kubectl -n kube-system get pods | grep cinder
csi-cinder-controllerplugin-7f8bf99785-cpb5v 5/5 Running 0 100m
csi-cinder-nodeplugin-rm5x2 2/2 Running 0 100m
```
Check the associated storage class (if you enabled persistent_volumes):
```ShellSession
$ kubectl get storageclass
NAME PROVISIONER AGE
cinder-csi cinder.csi.openstack.org 100m
```
You can run a PVC and an Nginx Pod using this file `nginx.yaml`:
```yml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: csi-pvc-cinderplugin
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: cinder-csi
---
apiVersion: v1
kind: Pod
metadata:
name: nginx
spec:
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: nginx
ports:
- containerPort: 80
protocol: TCP
volumeMounts:
- mountPath: /var/lib/www/html
name: csi-data-cinderplugin
volumes:
- name: csi-data-cinderplugin
persistentVolumeClaim:
claimName: csi-pvc-cinderplugin
readOnly: false
```
Apply this conf to your cluster: ```kubectl apply -f nginx.yml```
You should see the PVC provisioned and bound:
```ShellSession
$ kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
csi-pvc-cinderplugin Bound pvc-f21ad0a1-5b7b-405e-a462-48da5cb76beb 1Gi RWO cinder-csi 8s
```
And the volume mounted to the Nginx Pod (wait until the Pod is Running):
```ShellSession
kubectl exec -it nginx -- df -h | grep /var/lib/www/html
/dev/vdb 976M 2.6M 958M 1% /var/lib/www/html
```
## Compatibility with in-tree cloud provider
It is not necessary to enable OpenStack as a cloud provider for Cinder CSI Driver to work.
Though, you can run both the in-tree openstack cloud provider and the Cinder CSI Driver at the same time. The storage class provisioners associated to each one of them are differently named.
## Cinder v2 support
For the moment, only Cinder v3 is supported by the CSI Driver.
## More info
For further information about the Cinder CSI Driver, you can refer to this page: [Cloud Provider OpenStack](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/using-cinder-csi-plugin.md).

View File

@@ -1,13 +1,13 @@
# Cloud providers Cloud providers
==============
## Provisioning #### Provisioning
You can deploy instances in your cloud environment in several different ways. Examples include Terraform, Ansible (ec2 and gce modules), and manual creation. You can deploy instances in your cloud environment in several different ways. Examples include Terraform, Ansible (ec2 and gce modules), and manual creation.
## Deploy kubernetes #### Deploy kubernetes
With ansible-playbook command With ansible-playbook command
```
```ShellSession
ansible-playbook -u smana -e ansible_ssh_user=admin -e cloud_provider=[aws|gce] -b --become-user=root -i inventory/single.cfg cluster.yml ansible-playbook -u smana -e ansible_ssh_user=admin -e cloud_provider=[aws|gce] -b --become-user=root -i inventory/single.cfg cluster.yml
``` ```

View File

@@ -7,4 +7,4 @@ This network plugin only unpacks CNI plugins version `cni_version` into `/opt/cn
It's intended usage is for custom CNI configuration, e.g. manual routing tables + bridge + loopback CNI plugin outside kubespray scope. Furthermore, it's used for non-kubespray supported CNI plugins which you can install afterward. It's intended usage is for custom CNI configuration, e.g. manual routing tables + bridge + loopback CNI plugin outside kubespray scope. Furthermore, it's used for non-kubespray supported CNI plugins which you can install afterward.
You are required to fill `/etc/cni/net.d` with valid CNI configuration after using kubespray. You are required to fill `/etc/cni/net.d` with valid CNI configuration after using kubespray.

View File

@@ -1,6 +1,5 @@
# Comparaison Kubespray vs [Kops](https://github.com/kubernetes/kops)
---------------
## Kubespray vs [Kops](https://github.com/kubernetes/kops)
Kubespray runs on bare metal and most clouds, using Ansible as its substrate for Kubespray runs on bare metal and most clouds, using Ansible as its substrate for
provisioning and orchestration. Kops performs the provisioning and orchestration provisioning and orchestration. Kops performs the provisioning and orchestration
@@ -11,7 +10,8 @@ however, is more tightly integrated with the unique features of the clouds it
supports so it could be a better choice if you know that you will only be using supports so it could be a better choice if you know that you will only be using
one platform for the foreseeable future. one platform for the foreseeable future.
## Kubespray vs [Kubeadm](https://github.com/kubernetes/kubeadm) Kubespray vs [Kubeadm](https://github.com/kubernetes/kubeadm)
------------------
Kubeadm provides domain Knowledge of Kubernetes clusters' life cycle Kubeadm provides domain Knowledge of Kubernetes clusters' life cycle
management, including self-hosted layouts, dynamic discovery services and so management, including self-hosted layouts, dynamic discovery services and so
@@ -19,9 +19,9 @@ on. Had it belonged to the new [operators world](https://coreos.com/blog/introdu
it may have been named a "Kubernetes cluster operator". Kubespray however, it may have been named a "Kubernetes cluster operator". Kubespray however,
does generic configuration management tasks from the "OS operators" ansible does generic configuration management tasks from the "OS operators" ansible
world, plus some initial K8s clustering (with networking plugins included) and world, plus some initial K8s clustering (with networking plugins included) and
control plane bootstrapping. control plane bootstrapping.
Kubespray supports `kubeadm` for cluster creation since v2.3 Kubespray supports `kubeadm` for cluster creation since v2.3
(and deprecated non-kubeadm deployment starting from v2.8) (and deprecated non-kubeadm deployment starting from v2.8)
in order to consume life cycle management domain knowledge from it in order to consume life cycle management domain knowledge from it
and offload generic OS configuration things from it, which hopefully benefits both sides. and offload generic OS configuration things from it, which hopefully benefits both sides.

View File

@@ -1,4 +1,5 @@
# Contiv Contiv
======
Here is the [Contiv documentation](http://contiv.github.io/documents/). Here is the [Contiv documentation](http://contiv.github.io/documents/).
@@ -9,6 +10,7 @@ There are two ways to manage Contiv:
* a web UI managed by the api proxy service * a web UI managed by the api proxy service
* a CLI named `netctl` * a CLI named `netctl`
### Interfaces ### Interfaces
#### The Web Interface #### The Web Interface
@@ -25,6 +27,7 @@ contiv_generate_certificate: true
The default credentials to log in are: admin/admin. The default credentials to log in are: admin/admin.
#### The Command Line Interface #### The Command Line Interface
The second way to modify the Contiv configuration is to use the CLI. To do this, you have to connect to the server and export an environment variable to tell netctl how to connect to the cluster: The second way to modify the Contiv configuration is to use the CLI. To do this, you have to connect to the server and export an environment variable to tell netctl how to connect to the cluster:
@@ -41,6 +44,7 @@ contiv_netmaster_port: 9999
The CLI doesn't use the authentication process needed by the web interface. The CLI doesn't use the authentication process needed by the web interface.
### Network configuration ### Network configuration
The default configuration uses VXLAN to create an overlay. Two networks are created by default: The default configuration uses VXLAN to create an overlay. Two networks are created by default:

View File

@@ -6,7 +6,6 @@ Example with Ansible:
Before running the cluster playbook you must satisfy the following requirements: Before running the cluster playbook you must satisfy the following requirements:
General CoreOS Pre-Installation Notes: General CoreOS Pre-Installation Notes:
- Ensure that the bin_dir is set to `/opt/bin` - Ensure that the bin_dir is set to `/opt/bin`
- ansible_python_interpreter should be `/opt/bin/python`. This will be laid down by the bootstrap task. - ansible_python_interpreter should be `/opt/bin/python`. This will be laid down by the bootstrap task.
- The default resolvconf_mode setting of `docker_dns` **does not** work for CoreOS. This is because we do not edit the systemd service file for docker on CoreOS nodes. Instead, just use the `host_resolvconf` mode. It should work out of the box. - The default resolvconf_mode setting of `docker_dns` **does not** work for CoreOS. This is because we do not edit the systemd service file for docker on CoreOS nodes. Instead, just use the `host_resolvconf` mode. It should work out of the box.

View File

@@ -1,4 +1,5 @@
# CRI-O CRI-O
===============
[CRI-O] is a lightweight container runtime for Kubernetes. [CRI-O] is a lightweight container runtime for Kubernetes.
Kubespray supports basic functionality for using CRI-O as the default container runtime in a cluster. Kubespray supports basic functionality for using CRI-O as the default container runtime in a cluster.
@@ -9,14 +10,14 @@ Kubespray supports basic functionality for using CRI-O as the default container
_To use CRI-O instead of Docker, set the following variables:_ _To use CRI-O instead of Docker, set the following variables:_
## all.yml #### all.yml
```yaml ```yaml
download_container: false download_container: false
skip_downloads: false skip_downloads: false
``` ```
## k8s-cluster.yml #### k8s-cluster.yml
```yaml ```yaml
etcd_deployment_type: host etcd_deployment_type: host

View File

@@ -1,14 +1,15 @@
# Debian Jessie Debian Jessie
===============
Debian Jessie installation Notes: Debian Jessie installation Notes:
- Add - Add
```GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"``` ```GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"```
to /etc/default/grub. Then update with to /etc/default/grub. Then update with
```ShellSession ```
sudo update-grub sudo update-grub
sudo update-grub2 sudo update-grub2
sudo reboot sudo reboot
@@ -22,7 +23,7 @@ Debian Jessie installation Notes:
- Add the Ansible repository and install Ansible to get a proper version - Add the Ansible repository and install Ansible to get a proper version
```ShellSession ```
sudo add-apt-repository ppa:ansible/ansible sudo add-apt-repository ppa:ansible/ansible
sudo apt-get update sudo apt-get update
sudo apt-get install ansible sudo apt-get install ansible
@@ -33,4 +34,5 @@ Debian Jessie installation Notes:
```sudo apt-get install python-jinja2=2.8-1~bpo8+1 python-netaddr``` ```sudo apt-get install python-jinja2=2.8-1~bpo8+1 python-netaddr```
Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment) Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment)

View File

@@ -1,4 +1,5 @@
# K8s DNS stack by Kubespray K8s DNS stack by Kubespray
======================
For K8s cluster nodes, Kubespray configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/) For K8s cluster nodes, Kubespray configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/)
[cluster add-on](http://releases.k8s.io/master/cluster/addons/README.md) [cluster add-on](http://releases.k8s.io/master/cluster/addons/README.md)
@@ -8,89 +9,78 @@ to serve as an authoritative DNS server for a given ``dns_domain`` and its
Other nodes in the inventory, like external storage nodes or a separate etcd cluster Other nodes in the inventory, like external storage nodes or a separate etcd cluster
node group, considered non-cluster and left up to the user to configure DNS resolve. node group, considered non-cluster and left up to the user to configure DNS resolve.
## DNS variables
DNS variables
=============
There are several global variables which can be used to modify DNS settings: There are several global variables which can be used to modify DNS settings:
### ndots #### ndots
ndots value to be used in ``/etc/resolv.conf`` ndots value to be used in ``/etc/resolv.conf``
It is important to note that multiple search domains combined with high ``ndots`` It is important to note that multiple search domains combined with high ``ndots``
values lead to poor performance of DNS stack, so please choose it wisely. values lead to poor performance of DNS stack, so please choose it wisely.
### searchdomains #### searchdomains
Custom search domains to be added in addition to the cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``). Custom search domains to be added in addition to the cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``).
Most Linux systems limit the total number of search domains to 6 and the total length of all search domains Most Linux systems limit the total number of search domains to 6 and the total length of all search domains
to 256 characters. Depending on the length of ``dns_domain``, you're limited to less than the total limit. to 256 characters. Depending on the length of ``dns_domain``, you're limitted to less then the total limit.
Please note that ``resolvconf_mode: docker_dns`` will automatically add your systems search domains as Please note that ``resolvconf_mode: docker_dns`` will automatically add your systems search domains as
additional search domains. Please take this into the accounts for the limits. additional search domains. Please take this into the accounts for the limits.
### nameservers #### nameservers
This variable is only used by ``resolvconf_mode: host_resolvconf``. These nameservers are added to the hosts This variable is only used by ``resolvconf_mode: host_resolvconf``. These nameservers are added to the hosts
``/etc/resolv.conf`` *after* ``upstream_dns_servers`` and thus serve as backup nameservers. If this variable ``/etc/resolv.conf`` *after* ``upstream_dns_servers`` and thus serve as backup nameservers. If this variable
is not set, a default resolver is chosen (depending on cloud provider or 8.8.8.8 when no cloud provider is specified). is not set, a default resolver is chosen (depending on cloud provider or 8.8.8.8 when no cloud provider is specified).
### upstream_dns_servers #### upstream_dns_servers
DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode`` modes. These serve as backup DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode`` modes. These serve as backup
DNS servers in early cluster deployment when no cluster DNS is available yet. DNS servers in early cluster deployment when no cluster DNS is available yet.
## DNS modes supported by Kubespray DNS modes supported by Kubespray
============================
You can modify how Kubespray sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``. You can modify how Kubespray sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
### dns_mode ## dns_mode
``dns_mode`` configures how Kubespray will setup cluster DNS. There are four modes available: ``dns_mode`` configures how Kubespray will setup cluster DNS. There are four modes available:
#### dns_mode: coredns (default) #### coredns (default)
This installs CoreDNS as the default cluster DNS for all queries. This installs CoreDNS as the default cluster DNS for all queries.
#### dns_mode: coredns_dual #### coredns_dual
This installs CoreDNS as the default cluster DNS for all queries, plus a secondary CoreDNS stack. This installs CoreDNS as the default cluster DNS for all queries, plus a secondary CoreDNS stack.
#### dns_mode: manual #### manual
This does not install coredns, but allows you to specify This does not install coredns, but allows you to specify
`manual_dns_server`, which will be configured on nodes for handling Pod DNS. `manual_dns_server`, which will be configured on nodes for handling Pod DNS.
Use this method if you plan to install your own DNS server in the cluster after Use this method if you plan to install your own DNS server in the cluster after
initial deployment. initial deployment.
#### dns_mode: none #### none
This does not install any of DNS solution at all. This basically disables cluster DNS completely and This does not install any of DNS solution at all. This basically disables cluster DNS completely and
leaves you with a non functional cluster. leaves you with a non functional cluster.
## resolvconf_mode ## resolvconf_mode
``resolvconf_mode`` configures how Kubespray will setup DNS for ``hostNetwork: true`` PODs and non-k8s containers. ``resolvconf_mode`` configures how Kubespray will setup DNS for ``hostNetwork: true`` PODs and non-k8s containers.
There are three modes available: There are three modes available:
### resolvconf_mode: docker_dns (default) #### docker_dns (default)
This sets up the docker daemon with additional --dns/--dns-search/--dns-opt flags. This sets up the docker daemon with additional --dns/--dns-search/--dns-opt flags.
The following nameservers are added to the docker daemon (in the same order as listed here): The following nameservers are added to the docker daemon (in the same order as listed here):
* cluster nameserver (depends on dns_mode) * cluster nameserver (depends on dns_mode)
* content of optional upstream_dns_servers variable * content of optional upstream_dns_servers variable
* host system nameservers (read from hosts /etc/resolv.conf) * host system nameservers (read from hosts /etc/resolv.conf)
The following search domains are added to the docker daemon (in the same order as listed here): The following search domains are added to the docker daemon (in the same order as listed here):
* cluster domains (``default.svc.{{ dns_domain }}``, ``svc.{{ dns_domain }}``) * cluster domains (``default.svc.{{ dns_domain }}``, ``svc.{{ dns_domain }}``)
* content of optional searchdomains variable * content of optional searchdomains variable
* host system search domains (read from hosts /etc/resolv.conf) * host system search domains (read from hosts /etc/resolv.conf)
The following dns options are added to the docker daemon The following dns options are added to the docker daemon
* ndots:{{ ndots }} * ndots:{{ ndots }}
* timeout:2 * timeout:2
* attempts:2 * attempts:2
@@ -106,9 +96,8 @@ DNS queries to the cluster DNS will timeout after a few seconds, resulting in th
used as a backup nameserver. After cluster DNS is running, all queries will be answered by the cluster DNS used as a backup nameserver. After cluster DNS is running, all queries will be answered by the cluster DNS
servers, which in turn will forward queries to the system nameserver if required. servers, which in turn will forward queries to the system nameserver if required.
#### resolvconf_mode: host_resolvconf #### host_resolvconf
This activates the classic Kubespray behaviour that modifies the hosts ``/etc/resolv.conf`` file and dhclient
This activates the classic Kubespray behavior that modifies the hosts ``/etc/resolv.conf`` file and dhclient
configuration to point to the cluster dns server (either coredns or coredns_dual, depending on dns_mode). configuration to point to the cluster dns server (either coredns or coredns_dual, depending on dns_mode).
As cluster DNS is not available on early deployment stage, this mode is split into 2 stages. In the first As cluster DNS is not available on early deployment stage, this mode is split into 2 stages. In the first
@@ -119,21 +108,21 @@ the other nameservers as backups.
Also note, existing records will be purged from the `/etc/resolv.conf`, Also note, existing records will be purged from the `/etc/resolv.conf`,
including resolvconf's base/head/cloud-init config files and those that come from dhclient. including resolvconf's base/head/cloud-init config files and those that come from dhclient.
#### resolvconf_mode: none #### none
Does nothing regarding ``/etc/resolv.conf``. This leaves you with a cluster that works as expected in most cases. Does nothing regarding ``/etc/resolv.conf``. This leaves you with a cluster that works as expected in most cases.
The only exception is that ``hostNetwork: true`` PODs and non-k8s managed containers will not be able to resolve The only exception is that ``hostNetwork: true`` PODs and non-k8s managed containers will not be able to resolve
cluster service names. cluster service names.
## Nodelocal DNS cache ## Nodelocal DNS cache
Setting ``enable_nodelocaldns`` to ``true`` will make pods reach out to the dns (core-dns) caching agent running on the same node, thereby avoiding iptables DNAT rules and connection tracking. The local caching agent will query core-dns (depending on what main DNS plugin is configured in your cluster) for cache misses of cluster hostnames(cluster.local suffix by default). Setting ``enable_nodelocaldns`` to ``true`` will make pods reach out to the dns (core-dns) caching agent running on the same node, thereby avoiding iptables DNAT rules and connection tracking. The local caching agent will query core-dns (depending on what main DNS plugin is configured in your cluster) for cache misses of cluster hostnames(cluster.local suffix by default).
More information on the rationale behind this implementation can be found [here](https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/0030-nodelocal-dns-cache.md). More information on the rationale behind this implementation can be found [here](https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/0030-nodelocal-dns-cache.md).
**As per the 2.10 release, Nodelocal DNS cache is enabled by default.** **As per the 2.10 release, Nodelocal DNS cache is enabled by default.**
## Limitations
Limitations
-----------
* Kubespray has yet ways to configure Kubedns addon to forward requests SkyDns can * Kubespray has yet ways to configure Kubedns addon to forward requests SkyDns can
not answer with authority to arbitrary recursive resolvers. This task is left not answer with authority to arbitrary recursive resolvers. This task is left

View File

@@ -1,4 +1,5 @@
# Downloading binaries and containers Downloading binaries and containers
===================================
Kubespray supports several download/upload modes. The default is: Kubespray supports several download/upload modes. The default is:
@@ -29,13 +30,11 @@ Container images may be defined by its repo and tag, for example:
Note, the SHA256 digest and the image tag must be both specified and correspond Note, the SHA256 digest and the image tag must be both specified and correspond
to each other. The given example above is represented by the following vars: to each other. The given example above is represented by the following vars:
```yaml ```yaml
dnsmasq_digest_checksum: 7c883354f6ea9876d176fe1d30132515478b2859d6fc0cbf9223ffdc09168193 dnsmasq_digest_checksum: 7c883354f6ea9876d176fe1d30132515478b2859d6fc0cbf9223ffdc09168193
dnsmasq_image_repo: andyshinn/dnsmasq dnsmasq_image_repo: andyshinn/dnsmasq
dnsmasq_image_tag: '2.72' dnsmasq_image_tag: '2.72'
``` ```
The full list of available vars may be found in the download's ansible role defaults. Those also allow to specify custom urls and local repositories for binaries and container The full list of available vars may be found in the download's ansible role defaults. Those also allow to specify custom urls and local repositories for binaries and container
images as well. See also the DNS stack docs for the related intranet configuration, images as well. See also the DNS stack docs for the related intranet configuration,
so the hosts can resolve those urls and repos. so the hosts can resolve those urls and repos.

View File

@@ -1,8 +1,9 @@
# Flannel Flannel
==============
* Flannel configuration file should have been created there * Flannel configuration file should have been created there
```ShellSession ```
cat /run/flannel/subnet.env cat /run/flannel/subnet.env
FLANNEL_NETWORK=10.233.0.0/18 FLANNEL_NETWORK=10.233.0.0/18
FLANNEL_SUBNET=10.233.16.1/24 FLANNEL_SUBNET=10.233.16.1/24
@@ -12,7 +13,7 @@ FLANNEL_IPMASQ=false
* Check if the network interface has been created * Check if the network interface has been created
```ShellSession ```
ip a show dev flannel.1 ip a show dev flannel.1
4: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default 4: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
link/ether e2:f3:a7:0f:bf:cb brd ff:ff:ff:ff:ff:ff link/ether e2:f3:a7:0f:bf:cb brd ff:ff:ff:ff:ff:ff
@@ -24,7 +25,7 @@ ip a show dev flannel.1
* Try to run a container and check its ip address * Try to run a container and check its ip address
```ShellSession ```
kubectl run test --image=busybox --command -- tail -f /dev/null kubectl run test --image=busybox --command -- tail -f /dev/null
replicationcontroller "test" created replicationcontroller "test" created
@@ -32,7 +33,7 @@ kubectl describe po test-34ozs | grep ^IP
IP: 10.233.16.2 IP: 10.233.16.2
``` ```
```ShellSession ```
kubectl exec test-34ozs -- ip a show dev eth0 kubectl exec test-34ozs -- ip a show dev eth0
8: eth0@if9: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1450 qdisc noqueue 8: eth0@if9: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1450 qdisc noqueue
link/ether 02:42:0a:e9:2b:03 brd ff:ff:ff:ff:ff:ff link/ether 02:42:0a:e9:2b:03 brd ff:ff:ff:ff:ff:ff

View File

@@ -1,6 +1,8 @@
# Getting started Getting started
===============
## Building your own inventory Building your own inventory
---------------------------
Ansible inventory can be stored in 3 formats: YAML, JSON, or INI-like. There is Ansible inventory can be stored in 3 formats: YAML, JSON, or INI-like. There is
an example inventory located an example inventory located
@@ -16,41 +18,38 @@ certain threshold. Run `python3 contrib/inventory_builder/inventory.py help` hel
Example inventory generator usage: Example inventory generator usage:
```ShellSession cp -r inventory/sample inventory/mycluster
cp -r inventory/sample inventory/mycluster declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5) CONFIG_FILE=inventory/mycluster/hosts.yml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
CONFIG_FILE=inventory/mycluster/hosts.yml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
```
Then use `inventory/mycluster/hosts.yml` as inventory file. Then use `inventory/mycluster/hosts.yml` as inventory file.
## Starting custom deployment Starting custom deployment
--------------------------
Once you have an inventory, you may want to customize deployment data vars Once you have an inventory, you may want to customize deployment data vars
and start the deployment: and start the deployment:
**IMPORTANT**: Edit my\_inventory/groups\_vars/\*.yaml to override data vars: **IMPORTANT**: Edit my\_inventory/groups\_vars/\*.yaml to override data vars:
```ShellSession ansible-playbook -i inventory/mycluster/hosts.yml cluster.yml -b -v \
ansible-playbook -i inventory/mycluster/hosts.yml cluster.yml -b -v \ --private-key=~/.ssh/private_key
--private-key=~/.ssh/private_key
```
See more details in the [ansible guide](ansible.md). See more details in the [ansible guide](ansible.md).
### Adding nodes Adding nodes
------------
You may want to add worker, master or etcd nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters. You may want to add worker, master or etcd nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters.
- Add the new worker node to your inventory in the appropriate group (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)). - Add the new worker node to your inventory in the appropriate group (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
- Run the ansible-playbook command, substituting `cluster.yml` for `scale.yml`: - Run the ansible-playbook command, substituting `cluster.yml` for `scale.yml`:
```ShellSession ansible-playbook -i inventory/mycluster/hosts.yml scale.yml -b -v \
ansible-playbook -i inventory/mycluster/hosts.yml scale.yml -b -v \ --private-key=~/.ssh/private_key
--private-key=~/.ssh/private_key
```
### Remove nodes Remove nodes
------------
You may want to remove **master**, **worker**, or **etcd** nodes from your You may want to remove **master**, **worker**, or **etcd** nodes from your
existing cluster. This can be done by re-running the `remove-node.yml` existing cluster. This can be done by re-running the `remove-node.yml`
@@ -62,11 +61,10 @@ when doing something like autoscaling your clusters. Of course, if a node
is not working, you can remove the node and install it again. is not working, you can remove the node and install it again.
Use `--extra-vars "node=<nodename>,<nodename2>"` to select the node(s) you want to delete. Use `--extra-vars "node=<nodename>,<nodename2>"` to select the node(s) you want to delete.
```
```ShellSession
ansible-playbook -i inventory/mycluster/hosts.yml remove-node.yml -b -v \ ansible-playbook -i inventory/mycluster/hosts.yml remove-node.yml -b -v \
--private-key=~/.ssh/private_key \ --private-key=~/.ssh/private_key \
--extra-vars "node=nodename,nodename2" --extra-vars "node=nodename,nodename2"
``` ```
If a node is completely unreachable by ssh, add `--extra-vars reset_nodes=no` If a node is completely unreachable by ssh, add `--extra-vars reset_nodes=no`
@@ -74,7 +72,8 @@ to skip the node reset step. If one node is unavailable, but others you wish
to remove are able to connect via SSH, you could set reset_nodes=no as a host to remove are able to connect via SSH, you could set reset_nodes=no as a host
var in inventory. var in inventory.
## Connecting to Kubernetes Connecting to Kubernetes
------------------------
By default, Kubespray configures kube-master hosts with insecure access to By default, Kubespray configures kube-master hosts with insecure access to
kube-apiserver via port 8080. A kubeconfig file is not necessary in this case, kube-apiserver via port 8080. A kubeconfig file is not necessary in this case,
@@ -96,14 +95,15 @@ file yourself.
For more information on kubeconfig and accessing a Kubernetes cluster, refer to For more information on kubeconfig and accessing a Kubernetes cluster, refer to
the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/). the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/).
## Accessing Kubernetes Dashboard Accessing Kubernetes Dashboard
------------------------------
As of kubernetes-dashboard v1.7.x: As of kubernetes-dashboard v1.7.x:
- New login options that use apiserver auth proxying of token/basic/kubeconfig by default - New login options that use apiserver auth proxying of token/basic/kubeconfig by default
- Requires RBAC in authorization\_modes - Requires RBAC in authorization\_modes
- Only serves over https - Only serves over https
- No longer available at <https://first_master:6443/ui> until apiserver is updated with the https proxy URL - No longer available at <https://first_master:6443/ui> until apiserver is updated with the https proxy URL
If the variable `dashboard_enabled` is set (default is true), then you can access the Kubernetes Dashboard at the following URL, You will be prompted for credentials: If the variable `dashboard_enabled` is set (default is true), then you can access the Kubernetes Dashboard at the following URL, You will be prompted for credentials:
<https://first_master:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login> <https://first_master:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login>
@@ -113,20 +113,19 @@ Or you can run 'kubectl proxy' from your local machine to access dashboard in yo
It is recommended to access dashboard from behind a gateway (like Ingress Controller) that enforces an authentication token. Details and other access options here: <https://github.com/kubernetes/dashboard/wiki/Accessing-Dashboard---1.7.X-and-above> It is recommended to access dashboard from behind a gateway (like Ingress Controller) that enforces an authentication token. Details and other access options here: <https://github.com/kubernetes/dashboard/wiki/Accessing-Dashboard---1.7.X-and-above>
## Accessing Kubernetes API Accessing Kubernetes API
------------------------
The main client of Kubernetes is `kubectl`. It is installed on each kube-master The main client of Kubernetes is `kubectl`. It is installed on each kube-master
host and can optionally be configured on your ansible host by setting host and can optionally be configured on your ansible host by setting
`kubectl_localhost: true` and `kubeconfig_localhost: true` in the configuration: `kubectl_localhost: true` and `kubeconfig_localhost: true` in the configuration:
- If `kubectl_localhost` enabled, `kubectl` will download onto `/usr/local/bin/` and setup with bash completion. A helper script `inventory/mycluster/artifacts/kubectl.sh` also created for setup with below `admin.conf`. - If `kubectl_localhost` enabled, `kubectl` will download onto `/usr/local/bin/` and setup with bash completion. A helper script `inventory/mycluster/artifacts/kubectl.sh` also created for setup with below `admin.conf`.
- If `kubeconfig_localhost` enabled `admin.conf` will appear in the `inventory/mycluster/artifacts/` directory after deployment. - If `kubeconfig_localhost` enabled `admin.conf` will appear in the `inventory/mycluster/artifacts/` directory after deployment.
You can see a list of nodes by running the following commands: You can see a list of nodes by running the following commands:
```ShellSession cd inventory/mycluster/artifacts
cd inventory/mycluster/artifacts ./kubectl.sh get nodes
./kubectl.sh get nodes
```
If desired, copy admin.conf to ~/.kube/config. If desired, copy admin.conf to ~/.kube/config.

View File

@@ -1,18 +1,19 @@
# HA endpoints for K8s HA endpoints for K8s
====================
The following components require a highly available endpoints: The following components require a highly available endpoints:
* etcd cluster, * etcd cluster,
* kube-apiserver service instances. * kube-apiserver service instances.
The latter relies on a 3rd side reverse proxy, like Nginx or HAProxy, to The latter relies on a 3rd side reverse proxy, like Nginx or HAProxy, to
achieve the same goal. achieve the same goal.
## Etcd Etcd
----
The etcd clients (kube-api-masters) are configured with the list of all etcd peers. If the etcd-cluster has multiple instances, it's configured in HA already. The etcd clients (kube-api-masters) are configured with the list of all etcd peers. If the etcd-cluster has multiple instances, it's configured in HA already.
## Kube-apiserver Kube-apiserver
--------------
K8s components require a loadbalancer to access the apiservers via a reverse K8s components require a loadbalancer to access the apiservers via a reverse
proxy. Kubespray includes support for an nginx-based proxy that resides on each proxy. Kubespray includes support for an nginx-based proxy that resides on each
@@ -49,8 +50,7 @@ provides access for external clients, while the internal LB accepts client
connections only to the localhost. connections only to the localhost.
Given a frontend `VIP` address and `IP1, IP2` addresses of backends, here is Given a frontend `VIP` address and `IP1, IP2` addresses of backends, here is
an example configuration for a HAProxy service acting as an external LB: an example configuration for a HAProxy service acting as an external LB:
```
```raw
listen kubernetes-apiserver-https listen kubernetes-apiserver-https
bind <VIP>:8383 bind <VIP>:8383
option ssl-hello-chk option ssl-hello-chk
@@ -66,8 +66,7 @@ listen kubernetes-apiserver-https
And the corresponding example global vars for such a "cluster-aware" And the corresponding example global vars for such a "cluster-aware"
external LB with the cluster API access modes configured in Kubespray: external LB with the cluster API access modes configured in Kubespray:
```
```yml
apiserver_loadbalancer_domain_name: "my-apiserver-lb.example.com" apiserver_loadbalancer_domain_name: "my-apiserver-lb.example.com"
loadbalancer_apiserver: loadbalancer_apiserver:
address: <VIP> address: <VIP>
@@ -102,15 +101,14 @@ exclusive to `loadbalancer_apiserver_localhost`.
Access API endpoints are evaluated automatically, as the following: Access API endpoints are evaluated automatically, as the following:
| Endpoint type | kube-master | non-master | external | | Endpoint type | kube-master | non-master | external |
|------------------------------|------------------|-------------------------|-----------------------| |------------------------------|----------------|---------------------|---------------------|
| Local LB (default) | `https://bip:sp` | `https://lc:nsp` | `https://m[0].aip:sp` | | Local LB (default) | https://bip:sp | https://lc:nsp | https://m[0].aip:sp |
| Local LB + Unmanaged here LB | `https://bip:sp` | `https://lc:nsp` | `https://ext` | | Local LB + Unmanaged here LB | https://bip:sp | https://lc:nsp | https://ext |
| External LB, no internal | `https://bip:sp` | `<https://lb:lp>` | `https://lb:lp` | | External LB, no internal | https://bip:sp | https://lb:lp | https://lb:lp |
| No ext/int LB | `https://bip:sp` | `<https://m[0].aip:sp>` | `https://m[0].aip:sp` | | No ext/int LB | https://bip:sp | https://m[0].aip:sp | https://m[0].aip:sp |
Where: Where:
* `m[0]` - the first node in the `kube-master` group; * `m[0]` - the first node in the `kube-master` group;
* `lb` - LB FQDN, `apiserver_loadbalancer_domain_name`; * `lb` - LB FQDN, `apiserver_loadbalancer_domain_name`;
* `ext` - Externally load balanced VIP:port and FQDN, not managed by Kubespray; * `ext` - Externally load balanced VIP:port and FQDN, not managed by Kubespray;
@@ -134,19 +132,16 @@ Kubespray, the masters' APIs are accessed via the insecure endpoint, which
consists of the local `kube_apiserver_insecure_bind_address` and consists of the local `kube_apiserver_insecure_bind_address` and
`kube_apiserver_insecure_port`. `kube_apiserver_insecure_port`.
## Optional configurations Optional configurations
------------------------
### ETCD with a LB ### ETCD with a LB
In order to use an external loadbalancing (L4/TCP or L7 w/ SSL Passthrough VIP), the following variables need to be overridden in group_vars In order to use an external loadbalancing (L4/TCP or L7 w/ SSL Passthrough VIP), the following variables need to be overridden in group_vars
* `etcd_access_addresses` * `etcd_access_addresses`
* `etcd_client_url` * `etcd_client_url`
* `etcd_cert_alt_names` * `etcd_cert_alt_names`
* `etcd_cert_alt_ips` * `etcd_cert_alt_ips`
#### Example of a VIP w/ FQDN #### Example of a VIP w/ FQDN
```yaml ```yaml
etcd_access_addresses: https://etcd.example.com:2379 etcd_access_addresses: https://etcd.example.com:2379
etcd_client_url: https://etcd.example.com:2379 etcd_client_url: https://etcd.example.com:2379

View File

@@ -3,13 +3,12 @@
1. Fork [kubespray repo](https://github.com/kubernetes-sigs/kubespray) to your personal/organisation account on github. 1. Fork [kubespray repo](https://github.com/kubernetes-sigs/kubespray) to your personal/organisation account on github.
Note: Note:
* All forked public repos at github will be also public, so **never commit sensitive data to your public forks**. * All forked public repos at github will be also public, so **never commit sensitive data to your public forks**.
* List of all forked repos could be retrieved from github page of original project. * List of all forked repos could be retrieved from github page of original project.
2. Add **forked repo** as submodule to desired folder in your existent ansible repo(for example 3d/kubespray): 2. Add **forked repo** as submodule to desired folder in your existent ansible repo(for example 3d/kubespray):
```git submodule add https://github.com/YOUR_GITHUB/kubespray.git kubespray``` ```git submodule add https://github.com/YOUR_GITHUB/kubespray.git kubespray```
Git will create _.gitmodules_ file in your existent ansible repo: Git will create _.gitmodules_ file in your existent ansible repo:
```
```ini
[submodule "3d/kubespray"] [submodule "3d/kubespray"]
path = 3d/kubespray path = 3d/kubespray
url = https://github.com/YOUR_GITHUB/kubespray.git url = https://github.com/YOUR_GITHUB/kubespray.git
@@ -22,8 +21,7 @@
```git remote add upstream https://github.com/kubernetes-sigs/kubespray.git``` ```git remote add upstream https://github.com/kubernetes-sigs/kubespray.git```
5. Sync your master branch with upstream: 5. Sync your master branch with upstream:
```
```ShellSession
git checkout master git checkout master
git fetch upstream git fetch upstream
git merge upstream/master git merge upstream/master
@@ -35,21 +33,19 @@
***Never*** use master branch of your repository for your commits. ***Never*** use master branch of your repository for your commits.
7. Modify path to library and roles in your ansible.cfg file (role naming should be uniq, you may have to rename your existent roles if they have same names as kubespray project): 7. Modify path to library and roles in your ansible.cfg file (role naming should be uniq, you may have to rename your existent roles if they have same names as kubespray project):
```
```ini
... ...
library = 3d/kubespray/library/ library = 3d/kubespray/library/
roles_path = 3d/kubespray/roles/ roles_path = 3d/kubespray/roles/
... ...
``` ```
8. Copy and modify configs from kubespray `group_vars` folder to corresponding `group_vars` folder in your existent project. 8. Copy and modify configs from kubespray `group_vars` folder to corresponging `group_vars` folder in your existent project.
You could rename *all.yml* config to something else, i.e. *kubespray.yml* and create corresponding group in your inventory file, which will include all hosts groups related to kubernetes setup. You could rename *all.yml* config to something else, i.e. *kubespray.yml* and create corresponding group in your inventory file, which will include all hosts groups related to kubernetes setup.
9. Modify your ansible inventory file by adding mapping of your existent groups (if any) to kubespray naming. 9. Modify your ansible inventory file by adding mapping of your existent groups (if any) to kubespray naming.
For example: For example:
```
```ini
... ...
#Kargo groups: #Kargo groups:
[kube-node:children] [kube-node:children]
@@ -69,62 +65,54 @@ You could rename *all.yml* config to something else, i.e. *kubespray.yml* and cr
[kubespray:children] [kubespray:children]
kubernetes kubernetes
``` ```
* Last entry here needed to apply kubespray.yml config file, renamed from all.yml of kubespray project. * Last entry here needed to apply kubespray.yml config file, renamed from all.yml of kubespray project.
10. Now you can include kubespray tasks in you existent playbooks by including cluster.yml file: 10. Now you can include kubespray tasks in you existent playbooks by including cluster.yml file:
```
```yml
- name: Include kubespray tasks - name: Include kubespray tasks
include: 3d/kubespray/cluster.yml include: 3d/kubespray/cluster.yml
``` ```
Or your could copy separate tasks from cluster.yml into your ansible repository. Or your could copy separate tasks from cluster.yml into your ansible repository.
11. Commit changes to your ansible repo. Keep in mind, that submodule folder is just a link to the git commit hash of your forked repo. 11. Commit changes to your ansible repo. Keep in mind, that submodule folder is just a link to the git commit hash of your forked repo.
When you update your "work" branch you need to commit changes to ansible repo as well. When you update your "work" branch you need to commit changes to ansible repo as well.
Other members of your team should use ```git submodule sync```, ```git submodule update --init``` to get actual code from submodule. Other members of your team should use ```git submodule sync```, ```git submodule update --init``` to get actual code from submodule.
## Contributing # Contributing
If you made useful changes or fixed a bug in existent kubespray repo, use this flow for PRs to original kubespray repo. If you made useful changes or fixed a bug in existent kubespray repo, use this flow for PRs to original kubespray repo.
1. Sign the [CNCF CLA](https://git.k8s.io/community/CLA.md). 0. Sign the [CNCF CLA](https://git.k8s.io/community/CLA.md).
2. Change working directory to git submodule directory (3d/kubespray). 1. Change working directory to git submodule directory (3d/kubespray).
3. Setup desired user.name and user.email for submodule. 2. Setup desired user.name and user.email for submodule.
If kubespray is only one submodule in your repo you could use something like: If kubespray is only one submodule in your repo you could use something like:
```git submodule foreach --recursive 'git config user.name "First Last" && git config user.email "your-email-addres@used.for.cncf"'``` ```git submodule foreach --recursive 'git config user.name "First Last" && git config user.email "your-email-addres@used.for.cncf"'```
4. Sync with upstream master: 3. Sync with upstream master:
```
```ShellSession
git fetch upstream git fetch upstream
git merge upstream/master git merge upstream/master
git push origin master git push origin master
``` ```
4. Create new branch for the specific fixes that you want to contribute:
5. Create new branch for the specific fixes that you want to contribute:
```git checkout -b fixes-name-date-index``` ```git checkout -b fixes-name-date-index```
Branch name should be self explaining to you, adding date and/or index will help you to track/delete your old PRs. Branch name should be self explaining to you, adding date and/or index will help you to track/delete your old PRs.
6. Find git hash of your commit in "work" repo and apply it to newly created "fix" repo: 5. Find git hash of your commit in "work" repo and apply it to newly created "fix" repo:
```
```ShellSession
git cherry-pick <COMMIT_HASH> git cherry-pick <COMMIT_HASH>
``` ```
6. If your have several temporary-stage commits - squash them using [```git rebase -i```](http://eli.thegreenplace.net/2014/02/19/squashing-github-pull-requests-into-a-single-commit)
7. If your have several temporary-stage commits - squash them using [```git rebase -i```](http://eli.thegreenplace.net/2014/02/19/squashing-github-pull-requests-into-a-single-commit)
Also you could use interactive rebase (```git rebase -i HEAD~10```) to delete commits which you don't want to contribute into original repo. Also you could use interactive rebase (```git rebase -i HEAD~10```) to delete commits which you don't want to contribute into original repo.
8. When your changes is in place, you need to check upstream repo one more time because it could be changed during your work. 7. When your changes is in place, you need to check upstream repo one more time because it could be changed during your work.
Check that you're on correct branch: Check that you're on correct branch:
```git status``` ```git status```
And pull changes from upstream (if any): And pull changes from upstream (if any):
```git pull --rebase upstream master``` ```git pull --rebase upstream master```
9. Now push your changes to your **fork** repo with ```git push```. If your branch doesn't exists on github, git will propose you to use something like ```git push --set-upstream origin fixes-name-date-index```. 8. Now push your changes to your **fork** repo with ```git push```. If your branch doesn't exists on github, git will propose you to use something like ```git push --set-upstream origin fixes-name-date-index```.
10. Open you forked repo in browser, on the main page you will see proposition to create pull request for your newly created branch. Check proposed diff of your PR. If something is wrong you could safely delete "fix" branch on github using ```git push origin --delete fixes-name-date-index```, ```git branch -D fixes-name-date-index``` and start whole process from the beginning. 9. Open you forked repo in browser, on the main page you will see proposition to create pull request for your newly created branch. Check proposed diff of your PR. If something is wrong you could safely delete "fix" branch on github using ```git push origin --delete fixes-name-date-index```, ```git branch -D fixes-name-date-index``` and start whole process from the beginning.
If everything is fine - add description about your changes (what they do and why they're needed) and confirm pull request creation. If everything is fine - add description about your changes (what they do and why they're needed) and confirm pull request creation.

View File

@@ -1,5 +1,5 @@
# Kube-OVN Kube-OVN
===========
Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises. Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
For more information please check [Kube-OVN documentation](https://github.com/alauda/kube-ovn) For more information please check [Kube-OVN documentation](https://github.com/alauda/kube-ovn)
@@ -7,8 +7,7 @@ For more information please check [Kube-OVN documentation](https://github.com/al
## How to use it ## How to use it
Enable kube-ovn in `group_vars/k8s-cluster/k8s-cluster.yml` Enable kube-ovn in `group_vars/k8s-cluster/k8s-cluster.yml`
```
```yml
... ...
kube_network_plugin: kube-ovn kube_network_plugin: kube-ovn
... ...
@@ -20,7 +19,7 @@ Kube-OVN run ovn and controller in `kube-ovn` namespace
* Check the status of kube-ovn pods * Check the status of kube-ovn pods
```ShellSession ```
# From the CLI # From the CLI
kubectl get pod -n kube-ovn kubectl get pod -n kube-ovn
@@ -38,7 +37,7 @@ ovs-ovn-r5frh 1/1 Running 0 4d16h
* Check the default and node subnet * Check the default and node subnet
```ShellSession ```
# From the CLI # From the CLI
kubectl get subnet kubectl get subnet
@@ -46,4 +45,4 @@ kubectl get subnet
NAME PROTOCOL CIDR PRIVATE NAT NAME PROTOCOL CIDR PRIVATE NAT
join IPv4 100.64.0.0/16 false false join IPv4 100.64.0.0/16 false false
ovn-default IPv4 10.16.0.0/16 false true ovn-default IPv4 10.16.0.0/16 false true
``` ```

View File

@@ -1,4 +1,5 @@
# Kube-router Kube-router
===========
Kube-router is a L3 CNI provider, as such it will setup IPv4 routing between Kube-router is a L3 CNI provider, as such it will setup IPv4 routing between
nodes to provide Pods' networks reachability. nodes to provide Pods' networks reachability.
@@ -11,7 +12,7 @@ Kube-router runs its pods as a `DaemonSet` in the `kube-system` namespace:
* Check the status of kube-router pods * Check the status of kube-router pods
```ShellSession ```
# From the CLI # From the CLI
kubectl get pod --namespace=kube-system -l k8s-app=kube-router -owide kubectl get pod --namespace=kube-system -l k8s-app=kube-router -owide
@@ -28,7 +29,7 @@ kube-router-x2xs7 1/1 Running 0 2d 192.168.186.10 my
* Peek at kube-router container logs: * Peek at kube-router container logs:
```ShellSession ```
# From the CLI # From the CLI
kubectl logs --namespace=kube-system -l k8s-app=kube-router | grep Peer.Up kubectl logs --namespace=kube-system -l k8s-app=kube-router | grep Peer.Up
@@ -55,24 +56,24 @@ You need to `kubectl exec -it ...` into a kube-router container to use these, se
## Kube-router configuration ## Kube-router configuration
You can change the default configuration by overriding `kube_router_...` variables You can change the default configuration by overriding `kube_router_...` variables
(as found at `roles/network_plugin/kube-router/defaults/main.yml`), (as found at `roles/network_plugin/kube-router/defaults/main.yml`),
these are named to follow `kube-router` command-line options as per these are named to follow `kube-router` command-line options as per
<https://www.kube-router.io/docs/user-guide/#try-kube-router-with-cluster-installers>. <https://www.kube-router.io/docs/user-guide/#try-kube-router-with-cluster-installers>.
## Advanced BGP Capabilities ## Advanced BGP Capabilities
https://github.com/cloudnativelabs/kube-router#advanced-bgp-capabilities
<https://github.com/cloudnativelabs/kube-router#advanced-bgp-capabilities>
If you have other networking devices or SDN systems that talk BGP, kube-router will fit in perfectly. If you have other networking devices or SDN systems that talk BGP, kube-router will fit in perfectly.
From a simple full node-to-node mesh to per-node peering configurations, most routing needs can be attained. From a simple full node-to-node mesh to per-node peering configurations, most routing needs can be attained.
The configuration is Kubernetes native (annotations) just like the rest of kube-router. The configuration is Kubernetes native (annotations) just like the rest of kube-router.
For more details please refer to the <https://github.com/cloudnativelabs/kube-router/blob/master/docs/bgp.md.> For more details please refer to the https://github.com/cloudnativelabs/kube-router/blob/master/docs/bgp.md.
Next options will set up annotations for kube-router, using `kubectl annotate` command. Next options will set up annotations for kube-router, using `kubectl annotate` command.
```yml ```
kube_router_annotations_master: [] kube_router_annotations_master: []
kube_router_annotations_node: [] kube_router_annotations_node: []
kube_router_annotations_all: [] kube_router_annotations_all: []

View File

@@ -26,7 +26,7 @@ By default the normal behavior looks like:
> etcd in 6-7 seconds or even longer when etcd cannot commit data to quorum > etcd in 6-7 seconds or even longer when etcd cannot commit data to quorum
> nodes. > nodes.
## Failure # Failure
Kubelet will try to make `nodeStatusUpdateRetry` post attempts. Currently Kubelet will try to make `nodeStatusUpdateRetry` post attempts. Currently
`nodeStatusUpdateRetry` is constantly set to 5 in `nodeStatusUpdateRetry` is constantly set to 5 in
@@ -50,7 +50,7 @@ Kube proxy has a watcher over API. Once pods are evicted, Kube proxy will
notice and will update iptables of the node. It will remove endpoints from notice and will update iptables of the node. It will remove endpoints from
services so pods from failed node won't be accessible anymore. services so pods from failed node won't be accessible anymore.
## Recommendations for different cases # Recommendations for different cases
## Fast Update and Fast Reaction ## Fast Update and Fast Reaction

View File

@@ -15,7 +15,7 @@ For a large scaled deployments, consider the following configuration changes:
load on a delegate (the first K8s master node) then retrying failed load on a delegate (the first K8s master node) then retrying failed
push or download operations. push or download operations.
* Tune parameters for DNS related applications * Tune parameters for DNS related applications
Those are ``dns_replicas``, ``dns_cpu_limit``, Those are ``dns_replicas``, ``dns_cpu_limit``,
``dns_cpu_requests``, ``dns_memory_limit``, ``dns_memory_requests``. ``dns_cpu_requests``, ``dns_memory_limit``, ``dns_memory_requests``.
Please note that limits must always be greater than or equal to requests. Please note that limits must always be greater than or equal to requests.

View File

@@ -1,18 +1,20 @@
# Macvlan Macvlan
===============
How to use it :
-------------
## How to use it
* Enable macvlan in `group_vars/k8s-cluster/k8s-cluster.yml` * Enable macvlan in `group_vars/k8s-cluster/k8s-cluster.yml`
```
```yml
... ...
kube_network_plugin: macvlan kube_network_plugin: macvlan
... ...
``` ```
* Adjust the `macvlan_interface` in `group_vars/k8s-cluster/k8s-net-macvlan.yml` or by host in the `host.yml` file:
```yml * Adjust the `macvlan_interface` in `group_vars/k8s-cluster/k8s-net-macvlan.yml` or by host in the `host.yml` file:
```
all: all:
hosts: hosts:
node1: node1:
@@ -22,20 +24,25 @@ all:
macvlan_interface: ens5 macvlan_interface: ens5
``` ```
## Issue encountered
* Service DNS
Issue encountered :
-------------
- Service DNS
reply from unexpected source: reply from unexpected source:
add `kube_proxy_masquerade_all: true` in `group_vars/all/all.yml` add `kube_proxy_masquerade_all: true` in `group_vars/all/all.yml`
* Disable nodelocaldns
- Disable nodelocaldns
The nodelocal dns IP is not reacheable. The nodelocal dns IP is not reacheable.
Disable it in `sample/group_vars/k8s-cluster/k8s-cluster.yml` Disable it in `sample/group_vars/k8s-cluster/k8s-cluster.yml`
```
```yml
enable_nodelocaldns: false enable_nodelocaldns: false
``` ```

View File

@@ -1,4 +1,5 @@
# Multus Multus
===========
Multus is a meta CNI plugin that provides multiple network interface support to Multus is a meta CNI plugin that provides multiple network interface support to
pods. For each interface, Multus delegates CNI calls to secondary CNI plugins pods. For each interface, Multus delegates CNI calls to secondary CNI plugins
@@ -9,19 +10,17 @@ See [multus documentation](https://github.com/intel/multus-cni).
## Multus installation ## Multus installation
Since Multus itself does not implement networking, it requires a master plugin, which is specified through the variable `kube_network_plugin`. To enable Multus an additional variable `kube_network_plugin_multus` must be set to `true`. For example, Since Multus itself does not implement networking, it requires a master plugin, which is specified through the variable `kube_network_plugin`. To enable Multus an additional variable `kube_network_plugin_multus` must be set to `true`. For example,
```
```yml
kube_network_plugin: calico kube_network_plugin: calico
kube_network_plugin_multus: true kube_network_plugin_multus: true
``` ```
will install Multus and Calico and configure Multus to use Calico as the primary network plugin. will install Multus and Calico and configure Multus to use Calico as the primary network plugin.
## Using Multus ## Using Multus
Once Multus is installed, you can create CNI configurations (as a CRD objects) for additional networks, in this case a macvlan CNI configuration is defined. You may replace the config field with any valid CNI configuration where the CNI binary is available on the nodes. Once Multus is installed, you can create CNI configurations (as a CRD objects) for additional networks, in this case a macvlan CNI configuration is defined. You may replace the config field with any valid CNI configuration where the CNI binary is available on the nodes.
```ShellSession ```
cat <<EOF | kubectl create -f - cat <<EOF | kubectl create -f -
apiVersion: "k8s.cni.cncf.io/v1" apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition kind: NetworkAttachmentDefinition
@@ -49,7 +48,7 @@ EOF
You may then create a pod with and additional interface that connects to this network using annotations. The annotation correlates to the name in the NetworkAttachmentDefinition above. You may then create a pod with and additional interface that connects to this network using annotations. The annotation correlates to the name in the NetworkAttachmentDefinition above.
```ShellSession ```
cat <<EOF | kubectl create -f - cat <<EOF | kubectl create -f -
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
@@ -67,8 +66,8 @@ EOF
You may now inspect the pod and see that there is an additional interface configured: You may now inspect the pod and see that there is an additional interface configured:
```ShellSession ```
kubectl exec -it samplepod -- ip a $ kubectl exec -it samplepod -- ip a
``` ```
For more details on how to use Multus, please visit <https://github.com/intel/multus-cni> For more details on how to use Multus, please visit https://github.com/intel/multus-cni

View File

@@ -1,4 +1,5 @@
# Network Checker Application Network Checker Application
===========================
With the ``deploy_netchecker`` var enabled (defaults to false), Kubespray deploys a With the ``deploy_netchecker`` var enabled (defaults to false), Kubespray deploys a
Network Checker Application from the 3rd side `l23network/k8s-netchecker` docker Network Checker Application from the 3rd side `l23network/k8s-netchecker` docker
@@ -13,17 +14,14 @@ logs.
To get the most recent and cluster-wide network connectivity report, run from To get the most recent and cluster-wide network connectivity report, run from
any of the cluster nodes: any of the cluster nodes:
```
```ShellSession
curl http://localhost:31081/api/v1/connectivity_check curl http://localhost:31081/api/v1/connectivity_check
``` ```
Note that Kubespray does not invoke the check but only deploys the application, if Note that Kubespray does not invoke the check but only deploys the application, if
requested. requested.
There are related application specific variables: There are related application specific variables:
```
```yml
netchecker_port: 31081 netchecker_port: 31081
agent_report_interval: 15 agent_report_interval: 15
netcheck_namespace: default netcheck_namespace: default
@@ -35,7 +33,7 @@ combination of the ``netcheck_namespace.dns_domain`` vars, for example the
to the non default namespace, make sure as well to adjust the ``searchdomains`` var to the non default namespace, make sure as well to adjust the ``searchdomains`` var
so the resulting search domain records to contain that namespace, like: so the resulting search domain records to contain that namespace, like:
```yml ```
search: foospace.cluster.local default.cluster.local ... search: foospace.cluster.local default.cluster.local ...
nameserver: ... nameserver: ...
``` ```

View File

@@ -5,9 +5,6 @@ To deploy kubespray on [OpenStack](https://www.openstack.org/) uncomment the `cl
After that make sure to source in your OpenStack credentials like you would do when using `nova-client` or `neutron-client` by using `source path/to/your/openstack-rc` or `. path/to/your/openstack-rc`. After that make sure to source in your OpenStack credentials like you would do when using `nova-client` or `neutron-client` by using `source path/to/your/openstack-rc` or `. path/to/your/openstack-rc`.
For those who prefer to pass the OpenStack CA certificate as a string, one can
base64 encode the cacert file and store it in the variable `openstack_cacert`.
The next step is to make sure the hostnames in your `inventory` file are identical to your instance names in OpenStack. The next step is to make sure the hostnames in your `inventory` file are identical to your instance names in OpenStack.
Otherwise [cinder](https://wiki.openstack.org/wiki/Cinder) won't work as expected. Otherwise [cinder](https://wiki.openstack.org/wiki/Cinder) won't work as expected.

View File

@@ -1,10 +1,11 @@
# openSUSE Leap 15.0 and Tumbleweed openSUSE Leap 15.0 and Tumbleweed
===============
openSUSE Leap installation Notes: openSUSE Leap installation Notes:
- Install Ansible - Install Ansible
```ShellSession ```
sudo zypper ref sudo zypper ref
sudo zypper -n install ansible sudo zypper -n install ansible
@@ -14,4 +15,5 @@ openSUSE Leap installation Notes:
```sudo zypper -n install python-Jinja2 python-netaddr``` ```sudo zypper -n install python-Jinja2 python-netaddr```
Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment) Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment)

View File

@@ -1,4 +1,5 @@
# Packet Packet
===============
Kubespray provides support for bare metal deployments using the [Packet bare metal cloud](http://www.packet.com). Kubespray provides support for bare metal deployments using the [Packet bare metal cloud](http://www.packet.com).
Deploying upon bare metal allows Kubernetes to run at locations where an existing public or private cloud might not exist such Deploying upon bare metal allows Kubernetes to run at locations where an existing public or private cloud might not exist such
@@ -36,11 +37,10 @@ Terraform is required to deploy the bare metal infrastructure. The steps below a
[More terraform installation options are available.](https://learn.hashicorp.com/terraform/getting-started/install.html) [More terraform installation options are available.](https://learn.hashicorp.com/terraform/getting-started/install.html)
Grab the latest version of Terraform and install it. Grab the latest version of Terraform and install it.
```bash ```bash
echo "https://releases.hashicorp.com/terraform/$(curl -s https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r -M '.current_version')/terraform_$(curl -s https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r -M '.current_version')_darwin_amd64.zip" echo "https://releases.hashicorp.com/terraform/$(curl -s https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r -M '.current_version')/terraform_$(curl -s https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r -M '.current_version')_darwin_amd64.zip"
sudo yum install unzip sudo yum install unzip
sudo unzip terraform_0.12.12_linux_amd64.zip -d /usr/local/bin/ sudo unzip terraform_0.11.11_linux_amd64.zip -d /usr/local/bin/
``` ```
## Download Kubespray ## Download Kubespray
@@ -55,7 +55,7 @@ sudo pip install -r requirements.txt
## Cluster Definition ## Cluster Definition
In this example, a new cluster called "alpha" will be created. In this example, a new cluster called "alpha" will be created.
```bash ```bash
cp -LRp contrib/terraform/packet/sample-inventory inventory/alpha cp -LRp contrib/terraform/packet/sample-inventory inventory/alpha
@@ -67,9 +67,8 @@ Details about the cluster, such as the name, as well as the authentication token
for Packet need to be defined. To find these values see [Packet API Integration](https://support.packet.com/kb/articles/api-integrations) for Packet need to be defined. To find these values see [Packet API Integration](https://support.packet.com/kb/articles/api-integrations)
```bash ```bash
vi cluster.tfvars vi cluster.tf
``` ```
* cluster_name = alpha * cluster_name = alpha
* packet_project_id = ABCDEFGHIJKLMNOPQRSTUVWXYZ123456 * packet_project_id = ABCDEFGHIJKLMNOPQRSTUVWXYZ123456
* public_key_path = 12345678-90AB-CDEF-GHIJ-KLMNOPQRSTUV * public_key_path = 12345678-90AB-CDEF-GHIJ-KLMNOPQRSTUV
@@ -85,7 +84,7 @@ terraform init ../../contrib/terraform/packet/
Run Terraform to deploy the hardware. Run Terraform to deploy the hardware.
```bash ```bash
terraform apply -var-file=cluster.tfvars ../../contrib/terraform/packet terraform apply -var-file=cluster.tf ../../contrib/terraform/packet
``` ```
## Run Kubespray Playbooks ## Run Kubespray Playbooks
@@ -95,3 +94,4 @@ With the bare metal infrastructure deployed, Kubespray can now install Kubernete
```bash ```bash
ansible-playbook --become -i inventory/alpha/hosts cluster.yml ansible-playbook --become -i inventory/alpha/hosts cluster.yml
``` ```

View File

@@ -13,4 +13,4 @@ If you set http and https proxy, all nodes and loadbalancer will be excluded fro
## Set additional addresses to default no_proxy (all cluster nodes and loadbalancer) ## Set additional addresses to default no_proxy (all cluster nodes and loadbalancer)
`additional_no_proxy: "aditional_host,"` `additional_no_proxy: "aditional_host,"`

View File

@@ -1,5 +1,6 @@
# Recovering the control plane Recovering the control plane
============================
To recover from broken nodes in the control plane use the "recover\-control\-plane.yml" playbook. To recover from broken nodes in the control plane use the "recover\-control\-plane.yml" playbook.

View File

@@ -1,15 +1,14 @@
# Kubespray's roadmap Kubespray's roadmap
=================
## Self deployment (pull-mode) [#320](https://github.com/kubespray/kubespray/issues/320)
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kubespray/issues/320)
- the playbook would install and configure docker and the etcd cluster - the playbook would install and configure docker and the etcd cluster
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars. - the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
- a "kubespray" container would be deployed (kubespray-cli, ansible-playbook) - a "kubespray" container would be deployed (kubespray-cli, ansible-playbook)
- to be discussed, a way to provide the inventory - to be discussed, a way to provide the inventory
- **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kubespray/issues/321) - **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kubespray/issues/321)
## Provisioning and cloud providers ### Provisioning and cloud providers
- [ ] Terraform to provision instances on: - [ ] Terraform to provision instances on:
- [ ] GCE - [ ] GCE
- [x] AWS (contrib/terraform/aws) - [x] AWS (contrib/terraform/aws)
@@ -20,40 +19,36 @@
- [ ] On AWS autoscaling, multi AZ - [ ] On AWS autoscaling, multi AZ
- [ ] On Azure autoscaling, create loadbalancer [#297](https://github.com/kubespray/kubespray/issues/297) - [ ] On Azure autoscaling, create loadbalancer [#297](https://github.com/kubespray/kubespray/issues/297)
- [ ] On GCE be able to create a loadbalancer automatically (IAM ?) [#280](https://github.com/kubespray/kubespray/issues/280) - [ ] On GCE be able to create a loadbalancer automatically (IAM ?) [#280](https://github.com/kubespray/kubespray/issues/280)
- [x] **TLS bootstrap** support for kubelet (covered by kubeadm, but not in standard deployment) [#234](https://github.com/kubespray/kubespray/issues/234) - [x] **TLS boostrap** support for kubelet (covered by kubeadm, but not in standard deployment) [#234](https://github.com/kubespray/kubespray/issues/234)
(related issues: <https://github.com/kubernetes/kubernetes/pull/20439> <https://github.com/kubernetes/kubernetes/issues/18112)> (related issues: https://github.com/kubernetes/kubernetes/pull/20439 <br>
https://github.com/kubernetes/kubernetes/issues/18112)
## Tests
### Tests
- [x] Run kubernetes e2e tests - [x] Run kubernetes e2e tests
- [ ] Test idempotency on single OS but for all network plugins/container engines - [ ] Test idempotency on single OS but for all network plugins/container engines
- [ ] single test on AWS per day - [ ] single test on AWS per day
- [ ] test scale up cluster: +1 etcd, +1 master, +1 node - [ ] test scale up cluster: +1 etcd, +1 master, +1 node
- [x] Reorganize CI test vars into group var files - [x] Reorganize CI test vars into group var files
## Lifecycle ### Lifecycle
- [ ] Upgrade granularity: select components to upgrade and skip others - [ ] Upgrade granularity: select components to upgrade and skip others
## Networking ### Networking
- [ ] Opencontrail - [ ] Opencontrail
- [ ] Consolidate roles/network_plugin and roles/kubernetes-apps/network_plugin - [ ] Consolidate roles/network_plugin and roles/kubernetes-apps/network_plugin
## Kubespray API ### Kubespray API
- Perform all actions through an **API** - Perform all actions through an **API**
- Store inventories / configurations of multiple clusters - Store inventories / configurations of multiple clusters
- Make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory - Make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
## Addons (helm or native ansible) ### Addons (helm or native ansible)
- [x] Helm - [x] Helm
- [x] Ingress-nginx - [x] Ingress-nginx
- [x] kubernetes-dashboard - [x] kubernetes-dashboard
## Others
### Others
- Organize and update documentation (split in categories) - Organize and update documentation (split in categories)
- Refactor downloads so it all runs in the beginning of deployment - Refactor downloads so it all runs in the beginning of deployment
- Make bootstrapping OS more consistent - Make bootstrapping OS more consistent

View File

@@ -1,7 +1,9 @@
# Node Layouts Node Layouts
------------
There are four node layout types: `default`, `separate`, `ha`, and `scale`. There are four node layout types: `default`, `separate`, `ha`, and `scale`.
`default` is a non-HA two nodes setup with one separate `kube-node` `default` is a non-HA two nodes setup with one separate `kube-node`
and the `etcd` group merged with the `kube-master`. and the `etcd` group merged with the `kube-master`.
@@ -18,7 +20,8 @@ never actually deployed, but certificates are generated for them.
Note, the canal network plugin deploys flannel as well plus calico policy controller. Note, the canal network plugin deploys flannel as well plus calico policy controller.
## GCE instances GCE instances
-------------
| Stage| Network plugin| OS type| GCE region| Nodes layout | Stage| Network plugin| OS type| GCE region| Nodes layout
|--------------------|--------------------|--------------------|--------------------|--------------------| |--------------------|--------------------|--------------------|--------------------|--------------------|

View File

@@ -1,9 +1,12 @@
# Upgrading Kubernetes in Kubespray Upgrading Kubernetes in Kubespray
=============================
#### Description
Kubespray handles upgrades the same way it handles initial deployment. That is to Kubespray handles upgrades the same way it handles initial deployment. That is to
say that each component is laid down in a fixed order. say that each component is laid down in a fixed order. You should be able to
upgrade from Kubespray tag 2.0 up to the current master without difficulty. You can
You can also individually control versions of components by explicitly defining their also individually control versions of components by explicitly defining their
versions. Here are all version vars for each component: versions. Here are all version vars for each component:
* docker_version * docker_version
@@ -15,26 +18,22 @@ versions. Here are all version vars for each component:
* flannel_version * flannel_version
* kubedns_version * kubedns_version
:warning: [Attempting to upgrade from an older release straight to the latest release is unsupported and likely to break something](https://github.com/kubernetes-sigs/kubespray/issues/3849#issuecomment-451386515) :warning: #### Unsafe upgrade example
See [Multiple Upgrades](#multiple-upgrades) for how to upgrade from older releases to the latest release
## Unsafe upgrade example
If you wanted to upgrade just kube_version from v1.4.3 to v1.4.6, you could If you wanted to upgrade just kube_version from v1.4.3 to v1.4.6, you could
deploy the following way: deploy the following way:
```ShellSession ```
ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=v1.4.3 ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=v1.4.3
``` ```
And then repeat with v1.4.6 as kube_version: And then repeat with v1.4.6 as kube_version:
```ShellSession ```
ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=v1.4.6 ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=v1.4.6
``` ```
## Graceful upgrade #### Graceful upgrade
Kubespray also supports cordon, drain and uncordoning of nodes when performing Kubespray also supports cordon, drain and uncordoning of nodes when performing
a cluster upgrade. There is a separate playbook used for this purpose. It is a cluster upgrade. There is a separate playbook used for this purpose. It is
@@ -42,234 +41,21 @@ important to note that upgrade-cluster.yml can only be used for upgrading an
existing cluster. That means there must be at least 1 kube-master already existing cluster. That means there must be at least 1 kube-master already
deployed. deployed.
```ShellSession ```
git fetch origin
git checkout origin/master
ansible-playbook upgrade-cluster.yml -b -i inventory/sample/hosts.ini -e kube_version=v1.6.0 ansible-playbook upgrade-cluster.yml -b -i inventory/sample/hosts.ini -e kube_version=v1.6.0
``` ```
After a successful upgrade, the Server Version should be updated: After a successul upgrade, the Server Version should be updated:
```ShellSession ```
$ kubectl version $ kubectl version
Client Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.0", GitCommit:"fff5156092b56e6bd60fff75aad4dc9de6b6ef37", GitTreeState:"clean", BuildDate:"2017-03-28T19:15:41Z", GoVersion:"go1.8", Compiler:"gc", Platform:"darwin/amd64"} Client Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.0", GitCommit:"fff5156092b56e6bd60fff75aad4dc9de6b6ef37", GitTreeState:"clean", BuildDate:"2017-03-28T19:15:41Z", GoVersion:"go1.8", Compiler:"gc", Platform:"darwin/amd64"}
Server Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.0+coreos.0", GitCommit:"8031716957d697332f9234ddf85febb07ac6c3e3", GitTreeState:"clean", BuildDate:"2017-03-29T04:33:09Z", GoVersion:"go1.7.5", Compiler:"gc", Platform:"linux/amd64"} Server Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.0+coreos.0", GitCommit:"8031716957d697332f9234ddf85febb07ac6c3e3", GitTreeState:"clean", BuildDate:"2017-03-29T04:33:09Z", GoVersion:"go1.7.5", Compiler:"gc", Platform:"linux/amd64"}
``` ```
## Multiple upgrades #### Upgrade order
:warning: [Do not skip releases when upgrading--upgrade by one tag at a time.](https://github.com/kubernetes-sigs/kubespray/issues/3849#issuecomment-451386515) :warning:
For instance, if you're on v2.6.0, then check out v2.7.0, run the upgrade, check out the next tag, and run the next upgrade, etc.
Assuming you don't explicitly define a kubernetes version in your k8s-cluster.yml, you simply check out the next tag and run the upgrade-cluster.yml playbook
* If you do define kubernetes version in your inventory (e.g. group_vars/k8s-cluster.yml) then either make sure to update it before running upgrade-cluster, or specify the new version you're upgrading to: `ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml -e kube_version=v1.11.3`
Otherwise, the upgrade will leave your cluster at the same k8s version defined in your inventory vars.
The below example shows taking a cluster that was set up for v2.6.0 up to v2.10.0
```ShellSession
$ kubectl get node
NAME STATUS ROLES AGE VERSION
apollo Ready master,node 1h v1.10.4
boomer Ready master,node 42m v1.10.4
caprica Ready master,node 42m v1.10.4
$ git describe --tags
v2.6.0
$ git tag
...
v2.6.0
v2.7.0
v2.8.0
v2.8.1
v2.8.2
...
$ git checkout v2.7.0
Previous HEAD position was 8b3ce6e4 bump upgrade tests to v2.5.0 commit (#3087)
HEAD is now at 05dabb7e Fix Bionic networking restart error #3430 (#3431)
# NOTE: May need to sudo pip3 install -r requirements.txt when upgrading.
ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml
...
$ kubectl get node
NAME STATUS ROLES AGE VERSION
apollo Ready master,node 1h v1.11.3
boomer Ready master,node 1h v1.11.3
caprica Ready master,node 1h v1.11.3
$ git checkout v2.8.0
Previous HEAD position was 05dabb7e Fix Bionic networking restart error #3430 (#3431)
HEAD is now at 9051aa52 Fix ubuntu-contiv test failed (#3808)
```
:info: NOTE: Review changes between the sample inventory and your inventory when upgrading versions. :info:
Some deprecations between versions that mean you can't just upgrade straight from 2.7.0 to 2.8.0 if you started with the sample inventory.
In this case, I set "kubeadm_enabled" to false, knowing that it is deprecated and removed by 2.9.0, to delay converting the cluster to kubeadm as long as I could.
```ShellSession
$ ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml
...
"msg": "DEPRECATION: non-kubeadm deployment is deprecated from v2.9. Will be removed in next release."
...
Are you sure you want to deploy cluster using the deprecated non-kubeadm mode. (output is hidden):
yes
...
$ kubectl get node
NAME STATUS ROLES AGE VERSION
apollo Ready master,node 114m v1.12.3
boomer Ready master,node 114m v1.12.3
caprica Ready master,node 114m v1.12.3
$ git checkout v2.8.1
Previous HEAD position was 9051aa52 Fix ubuntu-contiv test failed (#3808)
HEAD is now at 2ac1c756 More Feature/2.8 backports for 2.8.1 (#3911)
$ ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml
...
"msg": "DEPRECATION: non-kubeadm deployment is deprecated from v2.9. Will be removed in next release."
...
Are you sure you want to deploy cluster using the deprecated non-kubeadm mode. (output is hidden):
yes
...
$ kubectl get node
NAME STATUS ROLES AGE VERSION
apollo Ready master,node 2h36m v1.12.4
boomer Ready master,node 2h36m v1.12.4
caprica Ready master,node 2h36m v1.12.4
$ git checkout v2.8.2
Previous HEAD position was 2ac1c756 More Feature/2.8 backports for 2.8.1 (#3911)
HEAD is now at 4167807f Upgrade to 1.12.5 (#4066)
$ ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml
...
"msg": "DEPRECATION: non-kubeadm deployment is deprecated from v2.9. Will be removed in next release."
...
Are you sure you want to deploy cluster using the deprecated non-kubeadm mode. (output is hidden):
yes
...
$ kubectl get node
NAME STATUS ROLES AGE VERSION
apollo Ready master,node 3h3m v1.12.5
boomer Ready master,node 3h3m v1.12.5
caprica Ready master,node 3h3m v1.12.5
$ git checkout v2.8.3
Previous HEAD position was 4167807f Upgrade to 1.12.5 (#4066)
HEAD is now at ea41fc5e backport cve-2019-5736 to release-2.8 (#4234)
$ ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml
...
"msg": "DEPRECATION: non-kubeadm deployment is deprecated from v2.9. Will be removed in next release."
...
Are you sure you want to deploy cluster using the deprecated non-kubeadm mode. (output is hidden):
yes
...
$ kubectl get node
NAME STATUS ROLES AGE VERSION
apollo Ready master,node 5h18m v1.12.5
boomer Ready master,node 5h18m v1.12.5
caprica Ready master,node 5h18m v1.12.5
$ git checkout v2.8.4
Previous HEAD position was ea41fc5e backport cve-2019-5736 to release-2.8 (#4234)
HEAD is now at 3901480b go to k8s 1.12.7 (#4400)
$ ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml
...
"msg": "DEPRECATION: non-kubeadm deployment is deprecated from v2.9. Will be removed in next release."
...
Are you sure you want to deploy cluster using the deprecated non-kubeadm mode. (output is hidden):
yes
...
$ kubectl get node
NAME STATUS ROLES AGE VERSION
apollo Ready master,node 5h37m v1.12.7
boomer Ready master,node 5h37m v1.12.7
caprica Ready master,node 5h37m v1.12.7
$ git checkout v2.8.5
Previous HEAD position was 3901480b go to k8s 1.12.7 (#4400)
HEAD is now at 6f97687d Release 2.8 robust san handling (#4478)
$ ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml
...
"msg": "DEPRECATION: non-kubeadm deployment is deprecated from v2.9. Will be removed in next release."
...
Are you sure you want to deploy cluster using the deprecated non-kubeadm mode. (output is hidden):
yes
...
$ kubectl get node
NAME STATUS ROLES AGE VERSION
apollo Ready master,node 5h45m v1.12.7
boomer Ready master,node 5h45m v1.12.7
caprica Ready master,node 5h45m v1.12.7
$ git checkout v2.9.0
Previous HEAD position was 6f97687d Release 2.8 robust san handling (#4478)
HEAD is now at a4e65c7c Upgrade to Ansible >2.7.0 (#4471)
```
:warning: IMPORTANT: Some of the variable formats changed in the k8s-cluster.yml between 2.8.5 and 2.9.0 :warning:
If you do not keep your inventory copy up to date, **your upgrade will fail** and your first master will be left non-functional until fixed and re-run.
It is at this point the cluster was upgraded from non-kubeadm to kubeadm as per the deprecation warning.
```ShellSession
ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml
...
$ kubectl get node
NAME STATUS ROLES AGE VERSION
apollo Ready master,node 6h54m v1.13.5
boomer Ready master,node 6h55m v1.13.5
caprica Ready master,node 6h54m v1.13.5
# Watch out: 2.10.0 is hiding between 2.1.2 and 2.2.0
$ git tag
...
v2.1.0
v2.1.1
v2.1.2
v2.10.0
v2.2.0
...
$ git checkout v2.10.0
Previous HEAD position was a4e65c7c Upgrade to Ansible >2.7.0 (#4471)
HEAD is now at dcd9c950 Add etcd role dependency on kube user to avoid etcd role failure when running scale.yml with a fresh node. (#3240) (#4479)
ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml
...
$ kubectl get node
NAME STATUS ROLES AGE VERSION
apollo Ready master,node 7h40m v1.14.1
boomer Ready master,node 7h40m v1.14.1
caprica Ready master,node 7h40m v1.14.1
```
## Upgrade order
As mentioned above, components are upgraded in the order in which they were As mentioned above, components are upgraded in the order in which they were
installed in the Ansible playbook. The order of component installation is as installed in the Ansible playbook. The order of component installation is as
@@ -282,7 +68,7 @@ follows:
* kube-apiserver, kube-scheduler, and kube-controller-manager * kube-apiserver, kube-scheduler, and kube-controller-manager
* Add-ons (such as KubeDNS) * Add-ons (such as KubeDNS)
## Upgrade considerations #### Upgrade considerations
Kubespray supports rotating certificates used for etcd and Kubernetes Kubespray supports rotating certificates used for etcd and Kubernetes
components, but some manual steps may be required. If you have a pod that components, but some manual steps may be required. If you have a pod that
@@ -308,48 +94,48 @@ hosts.
Upgrade docker: Upgrade docker:
```ShellSession ```
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=docker ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=docker
``` ```
Upgrade etcd: Upgrade etcd:
```ShellSession ```
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=etcd ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=etcd
``` ```
Upgrade vault: Upgrade vault:
```ShellSession ```
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=vault ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=vault
``` ```
Upgrade kubelet: Upgrade kubelet:
```ShellSession ```
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=node --skip-tags=k8s-gen-certs,k8s-gen-tokens ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=node --skip-tags=k8s-gen-certs,k8s-gen-tokens
``` ```
Upgrade Kubernetes master components: Upgrade Kubernetes master components:
```ShellSession ```
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=master ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=master
``` ```
Upgrade network plugins: Upgrade network plugins:
```ShellSession ```
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=network ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=network
``` ```
Upgrade all add-ons: Upgrade all add-ons:
```ShellSession ```
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=apps ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=apps
``` ```
Upgrade just helm (assuming `helm_enabled` is true): Upgrade just helm (assuming `helm_enabled` is true):
```ShellSession ```
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=helm ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=helm
``` ```

View File

@@ -1,36 +1,39 @@
# Vagrant Introduction
============
Assuming you have Vagrant 2.0+ installed with virtualbox, libvirt/qemu or vmware, but is untested) you should be able to launch a 3 node Kubernetes cluster by simply running `vagrant up`. This will spin up 3 VMs and install kubernetes on them. Once they are completed you can connect to any of them by running `vagrant ssh k8s-[1..3]`. Assuming you have Vagrant 2.0+ installed with virtualbox, libvirt/qemu or vmware, but is untested) you should be able to launch a 3 node Kubernetes cluster by simply running `vagrant up`. This will spin up 3 VMs and install kubernetes on them. Once they are completed you can connect to any of them by running `vagrant ssh k8s-[1..3]`.
To give an estimate of the expected duration of a provisioning run: On a dual core i5-6300u laptop with an SSD, provisioning takes around 13 to 15 minutes, once the container images and other files are cached. Note that libvirt/qemu is recommended over virtualbox as it is quite a bit faster, especially during boot-up time. To give an estimate of the expected duration of a provisioning run: On a dual core i5-6300u laptop with an SSD, provisioning takes around 13 to 15 minutes, once the container images and other files are cached. Note that libvirt/qemu is recommended over virtualbox as it is quite a bit faster, especcially during boot-up time.
For proper performance a minimum of 12GB RAM is recommended. It is possible to run a 3 node cluster on a laptop with 8GB of RAM using the default Vagrantfile, provided you have 8GB zram swap configured and not much more than a browser and a mail client running. If you decide to run on such a machine, then also make sure that any tmpfs devices, that are mounted, are mostly empty and disable any swapfiles mounted on HDD/SSD or you will be in for some serious swap-madness. Things can get a bit sluggish during provisioning, but when that's done, the system will actually be able to perform quite well. For proper performance a mimimum of 12GB RAM is recommended. It is possible to run a 3 node cluster on a laptop with 8GB of RAM using the default Vagrantfile, provided you have 8GB zram swap configured and not much more than a browser and a mail client running. If you decide to run on such a machine, then also make sure that any tnpfs devices, that are mounted, are mostly empty and disable any swapfiles mounted on HDD/SSD or you will be in for some serious swap-madness. Things can get a bit sluggish during provisioning, but when that's done, the system will actually be able to perform quite well.
## Customize Vagrant Customize Vagrant
=================
You can override the default settings in the `Vagrantfile` either by directly modifying the `Vagrantfile` or through an override file. In the same directory as the `Vagrantfile`, create a folder called `vagrant` and create `config.rb` file in it. An example of how to configure this file is given below. You can override the default settings in the `Vagrantfile` either by directly modifying the `Vagrantfile` or through an override file. In the same directory as the `Vagrantfile`, create a folder called `vagrant` and create `config.rb` file in it. An example of how to configure this file is given below.
## Use alternative OS for Vagrant Use alternative OS for Vagrant
==============================
By default, Vagrant uses Ubuntu 18.04 box to provision a local cluster. You may use an alternative supported operating system for your local cluster. By default, Vagrant uses Ubuntu 18.04 box to provision a local cluster. You may use an alternative supported operating system for your local cluster.
Customize `$os` variable in `Vagrantfile` or as override, e.g.,: Customize `$os` variable in `Vagrantfile` or as override, e.g.,:
```ShellSession echo '$os = "coreos-stable"' >> vagrant/config.rb
echo '$os = "coreos-stable"' >> vagrant/config.rb
```
The supported operating systems for vagrant are defined in the `SUPPORTED_OS` constant in the `Vagrantfile`. The supported operating systems for vagrant are defined in the `SUPPORTED_OS` constant in the `Vagrantfile`.
## File and image caching File and image caching
======================
Kubespray can take quite a while to start on a laptop. To improve provisioning speed, the variable 'download_run_once' is set. This will make kubespray download all files and containers just once and then redistributes them to the other nodes and as a bonus, also cache all downloads locally and re-use them on the next provisioning run. For more information on download settings see [download documentation](downloads.md). Kubespray can take quite a while to start on a laptop. To improve provisioning speed, the variable 'download_run_once' is set. This will make kubespray download all files and containers just once and then redistributes them to the other nodes and as a bonus, also cache all downloads locally and re-use them on the next provisioning run. For more information on download settings see [download documentation](docs/downloads.md).
## Example use of Vagrant Example use of Vagrant
======================
The following is an example of setting up and running kubespray using `vagrant`. For repeated runs, you could save the script to a file in the root of the kubespray and run it by executing 'source <name_of_the_file>. The following is an example of setting up and running kubespray using `vagrant`. For repeated runs, you could save the script to a file in the root of the kubespray and run it by executing 'source <name_of_the_file>.
```ShellSession ```
# use virtualenv to install all python requirements # use virtualenv to install all python requirements
VENVDIR=venv VENVDIR=venv
virtualenv --python=/usr/bin/python3.7 $VENVDIR virtualenv --python=/usr/bin/python3.7 $VENVDIR
@@ -73,38 +76,28 @@ sudo ln -s $INV/artifacts/kubectl /usr/local/bin/kubectl
#or #or
export PATH=$PATH:$INV/artifacts export PATH=$PATH:$INV/artifacts
``` ```
If a vagrant run failed and you've made some changes to fix the issue causing the fail, here is how you would re-run ansible: If a vagrant run failed and you've made some changes to fix the issue causing the fail, here is how you would re-run ansible:
```
```ShellSession
ansible-playbook -vvv -i .vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory cluster.yml ansible-playbook -vvv -i .vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory cluster.yml
``` ```
If all went well, you check if it's all working as expected: If all went well, you check if it's all working as expected:
```
```ShellSession
kubectl get nodes kubectl get nodes
``` ```
The output should look like this: The output should look like this:
```
```ShellSession
$ kubectl get nodes $ kubectl get nodes
NAME STATUS ROLES AGE VERSION NAME STATUS ROLES AGE VERSION
kub-1 Ready master 32m v1.14.1 kub-1 Ready master 32m v1.14.1
kub-2 Ready master 31m v1.14.1 kub-2 Ready master 31m v1.14.1
kub-3 Ready <none> 31m v1.14.1 kub-3 Ready <none> 31m v1.14.1
``` ```
Another nice test is the following: Another nice test is the following:
```
```ShellSession
kubectl get po --all-namespaces -o wide kubectl get po --all-namespaces -o wide
``` ```
Which should yield something like the following: Which should yield something like the following:
```
```ShellSession
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-system coredns-97c4b444f-9wm86 1/1 Running 0 31m 10.233.66.2 kub-3 <none> <none> kube-system coredns-97c4b444f-9wm86 1/1 Running 0 31m 10.233.66.2 kub-3 <none> <none>
kube-system coredns-97c4b444f-g7hqx 0/1 Pending 0 30m <none> <none> <none> <none> kube-system coredns-97c4b444f-g7hqx 0/1 Pending 0 30m <none> <none> <none> <none>
@@ -127,12 +120,10 @@ kube-system nodelocaldns-2x7vh 1/1 Running 0
kube-system nodelocaldns-fpvnz 1/1 Running 0 31m 10.0.20.103 kub-3 <none> <none> kube-system nodelocaldns-fpvnz 1/1 Running 0 31m 10.0.20.103 kub-3 <none> <none>
kube-system nodelocaldns-h2f42 1/1 Running 0 31m 10.0.20.101 kub-1 <none> <none> kube-system nodelocaldns-h2f42 1/1 Running 0 31m 10.0.20.101 kub-1 <none> <none>
``` ```
Create clusteradmin rbac and get the login token for the dashboard: Create clusteradmin rbac and get the login token for the dashboard:
```
```ShellSession
kubectl create -f contrib/misc/clusteradmin-rbac.yml kubectl create -f contrib/misc/clusteradmin-rbac.yml
kubectl -n kube-system describe secret kubernetes-dashboard-token | grep 'token:' | grep -o '[^ ]\+$' kubectl -n kube-system describe secret kubernetes-dashboard-token | grep 'token:' | grep -o '[^ ]\+$'
``` ```
Copy it to the clipboard and now log in to the [dashboard](https://10.0.20.101:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login). Copy it to the clipboard and now log in to the [dashboard](https://10.0.20.101:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login).

View File

@@ -1,6 +1,7 @@
# Configurable Parameters in Kubespray Configurable Parameters in Kubespray
================================
## Generic Ansible variables #### Generic Ansible variables
You can view facts gathered by Ansible automatically You can view facts gathered by Ansible automatically
[here](http://docs.ansible.com/ansible/playbooks_variables.html#information-discovered-from-systems-facts). [here](http://docs.ansible.com/ansible/playbooks_variables.html#information-discovered-from-systems-facts).
@@ -11,7 +12,7 @@ Some variables of note include:
* *ansible_default_ipv4.address*: IP address Ansible automatically chooses. * *ansible_default_ipv4.address*: IP address Ansible automatically chooses.
Generated based on the output from the command ``ip -4 route get 8.8.8.8`` Generated based on the output from the command ``ip -4 route get 8.8.8.8``
## Common vars that are used in Kubespray #### Common vars that are used in Kubespray
* *calico_version* - Specify version of Calico to use * *calico_version* - Specify version of Calico to use
* *calico_cni_version* - Specify version of Calico CNI plugin to use * *calico_cni_version* - Specify version of Calico CNI plugin to use
@@ -27,7 +28,7 @@ Some variables of note include:
* *nameservers* - Array of nameservers to use for DNS lookup * *nameservers* - Array of nameservers to use for DNS lookup
* *preinstall_selinux_state* - Set selinux state, permitted values are permissive and disabled. * *preinstall_selinux_state* - Set selinux state, permitted values are permissive and disabled.
## Addressing variables #### Addressing variables
* *ip* - IP to use for binding services (host var) * *ip* - IP to use for binding services (host var)
* *access_ip* - IP for other hosts to use to connect to. Often required when * *access_ip* - IP for other hosts to use to connect to. Often required when
@@ -44,7 +45,7 @@ Some variables of note include:
`loadbalancer_apiserver`. See more details in the `loadbalancer_apiserver`. See more details in the
[HA guide](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/ha-mode.md). [HA guide](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/ha-mode.md).
## Cluster variables #### Cluster variables
Kubernetes needs some parameters in order to get deployed. These are the Kubernetes needs some parameters in order to get deployed. These are the
following default cluster parameters: following default cluster parameters:
@@ -85,7 +86,7 @@ Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances'
private addresses, make sure to pick another values for ``kube_service_addresses`` private addresses, make sure to pick another values for ``kube_service_addresses``
and ``kube_pods_subnet``, for example from the ``172.18.0.0/16``. and ``kube_pods_subnet``, for example from the ``172.18.0.0/16``.
## DNS variables #### DNS variables
By default, hosts are set up with 8.8.8.8 as an upstream DNS server and all By default, hosts are set up with 8.8.8.8 as an upstream DNS server and all
other settings from your existing /etc/resolv.conf are lost. Set the following other settings from your existing /etc/resolv.conf are lost. Set the following
@@ -99,7 +100,7 @@ variables to match your requirements.
For more information, see [DNS For more information, see [DNS
Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.md). Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.md).
## Other service variables #### Other service variables
* *docker_options* - Commonly used to set * *docker_options* - Commonly used to set
``--insecure-registry=myregistry.mydomain:5000`` ``--insecure-registry=myregistry.mydomain:5000``
@@ -124,24 +125,20 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
* *node_labels* - Labels applied to nodes via kubelet --node-labels parameter. * *node_labels* - Labels applied to nodes via kubelet --node-labels parameter.
For example, labels can be set in the inventory as variables or more widely in group_vars. For example, labels can be set in the inventory as variables or more widely in group_vars.
*node_labels* can be defined either as a dict or a comma-separated labels string: *node_labels* can be defined either as a dict or a comma-separated labels string:
```
```yml
node_labels: node_labels:
label1_name: label1_value label1_name: label1_value
label2_name: label2_value label2_name: label2_value
node_labels: "label1_name=label1_value,label2_name=label2_value" node_labels: "label1_name=label1_value,label2_name=label2_value"
``` ```
* *node_taints* - Taints applied to nodes via kubelet --register-with-taints parameter. * *node_taints* - Taints applied to nodes via kubelet --register-with-taints parameter.
For example, taints can be set in the inventory as variables or more widely in group_vars. For example, taints can be set in the inventory as variables or more widely in group_vars.
*node_taints* has to be defined as a list of strings in format `key=value:effect`, e.g.: *node_taints* has to be defined as a list of strings in format `key=value:effect`, e.g.:
```
```yml
node_taints: node_taints:
- "node.example.com/external=true:NoSchedule" - "node.example.com/external=true:NoSchedule"
``` ```
* *podsecuritypolicy_enabled* - When set to `true`, enables the PodSecurityPolicy admission controller and defines two policies `privileged` (applying to all resources in `kube-system` namespace and kubelet) and `restricted` (applying all other namespaces). * *podsecuritypolicy_enabled* - When set to `true`, enables the PodSecurityPolicy admission controller and defines two policies `privileged` (applying to all resources in `kube-system` namespace and kubelet) and `restricted` (applying all other namespaces).
Addons deployed in kube-system namespaces are handled. Addons deployed in kube-system namespaces are handled.
* *kubernetes_audit* - When set to `true`, enables Auditing. * *kubernetes_audit* - When set to `true`, enables Auditing.
@@ -154,34 +151,22 @@ node_taints:
By default, the `audit_policy_file` contains [default rules](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/kubernetes/master/templates/apiserver-audit-policy.yaml.j2) that can be overridden with the `audit_policy_custom_rules` variable. By default, the `audit_policy_file` contains [default rules](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/kubernetes/master/templates/apiserver-audit-policy.yaml.j2) that can be overridden with the `audit_policy_custom_rules` variable.
### Custom flags for Kube Components ##### Custom flags for Kube Components
For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. This can be done by providing a list of flags. The `kubelet_node_custom_flags` apply kubelet settings only to nodes and not masters. Example: For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. This can be done by providing a list of flags. The `kubelet_node_custom_flags` apply kubelet settings only to nodes and not masters. Example:
```
```yml
kubelet_custom_flags: kubelet_custom_flags:
- "--eviction-hard=memory.available<100Mi" - "--eviction-hard=memory.available<100Mi"
- "--eviction-soft-grace-period=memory.available=30s" - "--eviction-soft-grace-period=memory.available=30s"
- "--eviction-soft=memory.available<300Mi" - "--eviction-soft=memory.available<300Mi"
``` ```
The possible vars are: The possible vars are:
* *apiserver_custom_flags*
* *controller_mgr_custom_flags*
* *scheduler_custom_flags*
* *kubelet_custom_flags* * *kubelet_custom_flags*
* *kubelet_node_custom_flags* * *kubelet_node_custom_flags*
Extra flags for the API server, controller, and scheduler components can be specified using these variables, #### User accounts
in the form of dicts of key-value pairs of configuration parameters that will be inserted into the kubeadm YAML config file:
* *kube_kubeadm_apiserver_extra_args*
* *kube_kubeadm_controller_extra_args*
* *kube_kubeadm_scheduler_extra_args*
## App variables
* *helm_version* - Defaults to v2.x, set to a v3 version (e.g. `v3.0.1` ) to install Helm 3.x (no more Tiller!). When changing this to 3 in an existing cluster, Tiller will be left alone and has to be removed manually.
## User accounts
By default, a user with admin rights is created, named `kube`. By default, a user with admin rights is created, named `kube`.
The password can be viewed after deployment by looking at the file The password can be viewed after deployment by looking at the file

View File

@@ -1,7 +1,6 @@
# vSphere cloud provider # vSphere cloud provider
Kubespray can be deployed with vSphere as Cloud provider. This feature supports Kubespray can be deployed with vSphere as Cloud provider. This feature supports
- Volumes - Volumes
- Persistent Volumes - Persistent Volumes
- Storage Classes and provisioning of volumes. - Storage Classes and provisioning of volumes.
@@ -12,16 +11,15 @@ Kubespray can be deployed with vSphere as Cloud provider. This feature supports
You need at first to configure your vSphere environment by following the [official documentation](https://kubernetes.io/docs/getting-started-guides/vsphere/#vsphere-cloud-provider). You need at first to configure your vSphere environment by following the [official documentation](https://kubernetes.io/docs/getting-started-guides/vsphere/#vsphere-cloud-provider).
After this step you should have: After this step you should have:
- UUID activated for each VM where Kubernetes will be deployed - UUID activated for each VM where Kubernetes will be deployed
- A vSphere account with required privileges - A vSphere account with required privileges
If you intend to leverage the [zone and region node labeling](https://kubernetes.io/docs/reference/kubernetes-api/labels-annotations-taints/#failure-domain-beta-kubernetes-io-region), create a tag category for both the zone and region in vCenter. The tags can then be applied at the host, cluster, datacenter, or folder level, and the cloud provider will walk the hierarchy to extract and apply the labels to the Kubernetes nodes. If you intend to leverage the [zone and region node labeling](https://kubernetes.io/docs/reference/kubernetes-api/labels-annotations-taints/#failure-domain-beta-kubernetes-io-region), create a tag category for both the zone and region in vCenter. The tags can then be applied at the host, cluster, datacenter, or folder level, and the cloud provider will walk the hierarchy to extract and apply the labels to the Kubernetes nodes.
## Kubespray configuration ## Kubespray configuration
First you must define the cloud provider in `inventory/sample/group_vars/all.yml` and set it to `vsphere`. First you must define the cloud provider in `inventory/sample/group_vars/all.yml` and set it to `vsphere`.
```yml ```yml
cloud_provider: vsphere cloud_provider: vsphere
``` ```
@@ -63,8 +61,7 @@ vsphere_resource_pool: "K8s-Pool"
## Deployment ## Deployment
Once the configuration is set, you can execute the playbook again to apply the new configuration Once the configuration is set, you can execute the playbook again to apply the new configuration
```
```ShellSession
cd kubespray cd kubespray
ansible-playbook -i inventory/sample/hosts.ini -b -v cluster.yml ansible-playbook -i inventory/sample/hosts.ini -b -v cluster.yml
``` ```

View File

@@ -1,4 +1,5 @@
# Weave Weave
=======
Weave 2.0.1 is supported by kubespray Weave 2.0.1 is supported by kubespray
@@ -10,7 +11,7 @@ Weave encryption is supported for all communication
* To use Weave encryption, specify a strong password (if no password, no encryption) * To use Weave encryption, specify a strong password (if no password, no encryption)
```ShellSession ```
# In file ./inventory/sample/group_vars/k8s-cluster.yml # In file ./inventory/sample/group_vars/k8s-cluster.yml
weave_password: EnterPasswordHere weave_password: EnterPasswordHere
``` ```
@@ -21,19 +22,18 @@ Weave is deployed by kubespray using a daemonSet
* Check the status of Weave containers * Check the status of Weave containers
```ShellSession ```
# From client # From client
kubectl -n kube-system get pods | grep weave kubectl -n kube-system get pods | grep weave
# output # output
weave-net-50wd2 2/2 Running 0 2m weave-net-50wd2 2/2 Running 0 2m
weave-net-js9rb 2/2 Running 0 2m weave-net-js9rb 2/2 Running 0 2m
``` ```
There must be as many pods as nodes (here kubernetes have 2 nodes so there are 2 weave pods). There must be as many pods as nodes (here kubernetes have 2 nodes so there are 2 weave pods).
* Check status of weave (connection,encryption ...) for each node * Check status of weave (connection,encryption ...) for each node
```ShellSession ```
# On nodes # On nodes
curl http://127.0.0.1:6784/status curl http://127.0.0.1:6784/status
# output on node1 # output on node1
@@ -57,14 +57,14 @@ Version: 2.0.1 (up to date; next check at 2017/08/01 13:51:34)
* Check parameters of weave for each node * Check parameters of weave for each node
```ShellSession ```
# On nodes # On nodes
ps -aux | grep weaver ps -aux | grep weaver
# output on node1 (here its use seed mode) # output on node1 (here its use seed mode)
root 8559 0.2 3.0 365280 62700 ? Sl 08:25 0:00 /home/weave/weaver --name=fa:16:3e:b3:d6:b2 --port=6783 --datapath=datapath --host-root=/host --http-addr=127.0.0.1:6784 --status-addr=0.0.0.0:6782 --docker-api= --no-dns --db-prefix=/weavedb/weave-net --ipalloc-range=10.233.64.0/18 --nickname=node1 --ipalloc-init seed=fa:16:3e:b3:d6:b2,fa:16:3e:f0:50:53 --conn-limit=30 --expect-npc 192.168.208.28 192.168.208.19 root 8559 0.2 3.0 365280 62700 ? Sl 08:25 0:00 /home/weave/weaver --name=fa:16:3e:b3:d6:b2 --port=6783 --datapath=datapath --host-root=/host --http-addr=127.0.0.1:6784 --status-addr=0.0.0.0:6782 --docker-api= --no-dns --db-prefix=/weavedb/weave-net --ipalloc-range=10.233.64.0/18 --nickname=node1 --ipalloc-init seed=fa:16:3e:b3:d6:b2,fa:16:3e:f0:50:53 --conn-limit=30 --expect-npc 192.168.208.28 192.168.208.19
``` ```
## Consensus mode (default mode) ### Consensus mode (default mode)
This mode is best to use on static size cluster This mode is best to use on static size cluster
@@ -72,18 +72,18 @@ This mode is best to use on static size cluster
This mode is best to use on dynamic size cluster This mode is best to use on dynamic size cluster
The seed mode also allows multi-clouds and hybrid on-premise/cloud clusters deployment. The seed mode also allows multi-clouds and hybrid on-premise/cloud clusters deployement.
* Switch from consensus mode to seed mode * Switch from consensus mode to seed mode
```ShellSession ```
# In file ./inventory/sample/group_vars/k8s-cluster.yml # In file ./inventory/sample/group_vars/k8s-cluster.yml
weave_mode_seed: true weave_mode_seed: true
``` ```
These two variables are only used when `weave_mode_seed` is set to `true` (**/!\ do not manually change these values**) These two variables are only used when `weave_mode_seed` is set to `true` (**/!\ do not manually change these values**)
```ShellSession ```
# In file ./inventory/sample/group_vars/k8s-cluster.yml # In file ./inventory/sample/group_vars/k8s-cluster.yml
weave_seed: uninitialized weave_seed: uninitialized
weave_peers: uninitialized weave_peers: uninitialized

View File

@@ -42,7 +42,7 @@ docker_rpm_keepcache: 0
# - https://registry.docker-cn.com # - https://registry.docker-cn.com
# - https://mirror.aliyuncs.com # - https://mirror.aliyuncs.com
## If non-empty will override default system MountFlags value. ## If non-empty will override default system MounFlags value.
## This option takes a mount propagation flag: shared, slave ## This option takes a mount propagation flag: shared, slave
## or private, which control whether mounts in the file system ## or private, which control whether mounts in the file system
## namespace set up for docker will receive or propagate mounts ## namespace set up for docker will receive or propagate mounts
@@ -52,3 +52,19 @@ docker_rpm_keepcache: 0
## A string of extra options to pass to the docker daemon. ## A string of extra options to pass to the docker daemon.
## This string should be exactly as you wish it to appear. ## This string should be exactly as you wish it to appear.
docker_options: >- docker_options: >-
{%- if docker_insecure_registries is defined %}
{{ docker_insecure_registries | map('regex_replace', '^(.*)$', '--insecure-registry=\1' ) | list | join(' ') }}
{%- endif %}
{% if docker_registry_mirrors is defined %}
{{ docker_registry_mirrors | map('regex_replace', '^(.*)$', '--registry-mirror=\1' ) | list | join(' ') }}
{%- endif %}
{%- if docker_version != "latest" and docker_version is version('17.05', '<') %}
--graph={{ docker_daemon_graph }} {% if ansible_os_family not in ["openSUSE Leap", "openSUSE Tumbleweed", "Suse"] %}{{ docker_log_opts }}{% endif %}
{%- else %}
--data-root={{ docker_daemon_graph }} {% if ansible_os_family not in ["openSUSE Leap", "openSUSE Tumbleweed", "Suse"] %}{{ docker_log_opts }}{% endif %}
{%- endif %}
{%- if ansible_architecture == "aarch64" and ansible_os_family == "RedHat" %}
--add-runtime docker-runc=/usr/libexec/docker/docker-runc-current
--default-runtime=docker-runc --exec-opt native.cgroupdriver=systemd
--userland-proxy-path=/usr/libexec/docker/docker-proxy-current --signature-verification=false
{%- endif -%}

View File

@@ -8,7 +8,7 @@
# oci_vnc_id: # oci_vnc_id:
# oci_subnet1_id: # oci_subnet1_id:
# oci_subnet2_id: # oci_subnet2_id:
## Overrideeeeeeee these default/optional behaviors if you wish ## Overide these default/optional behaviors if you wish
# oci_security_list_management: All # oci_security_list_management: All
## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples. ## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples.
# oci_security_lists: # oci_security_lists:

View File

@@ -14,8 +14,3 @@
# openstack_lbaas_monitor_delay: "1m" # openstack_lbaas_monitor_delay: "1m"
# openstack_lbaas_monitor_timeout: "30s" # openstack_lbaas_monitor_timeout: "30s"
# openstack_lbaas_monitor_max_retries: "3" # openstack_lbaas_monitor_max_retries: "3"
## To use Cinder CSI plugin to provision volumes set this value to true
## Make sure to source in the openstack credentials
# cinder_csi_enabled: true
# cinder_csi_controller_replicas: 1

View File

@@ -98,8 +98,6 @@ ingress_publish_status_address: ""
# 9000: "default/example-go:8080" # 9000: "default/example-go:8080"
# ingress_nginx_configmap_udp_services: # ingress_nginx_configmap_udp_services:
# 53: "kube-system/coredns:53" # 53: "kube-system/coredns:53"
# ingress_nginx_extra_args:
# - --default-ssl-certificate=default/foo-tls
# Cert manager deployment # Cert manager deployment
cert_manager_enabled: false cert_manager_enabled: false

View File

@@ -20,10 +20,10 @@ kube_users_dir: "{{ kube_config_dir }}/users"
kube_api_anonymous_auth: true kube_api_anonymous_auth: true
## Change this to use another Kubernetes version, e.g. a current beta release ## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.16.7 kube_version: v1.15.11
# kubernetes image repo define # kubernetes image repo define
kube_image_repo: "{{ gcr_image_repo }}/google-containers" kube_image_repo: "gcr.io/google-containers"
# Where the binaries will be downloaded. # Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G) # Note: ensure that you've enough disk space (about 1G)
@@ -101,10 +101,6 @@ kube_apiserver_insecure_port: 0 # (disabled)
# Can be ipvs, iptables # Can be ipvs, iptables
kube_proxy_mode: ipvs kube_proxy_mode: ipvs
# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface
# must be set to true for MetalLB to work
kube_proxy_strict_arp: false
# A string slice of values which specify the addresses to use for NodePorts. # A string slice of values which specify the addresses to use for NodePorts.
# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). # Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32).
# The default empty string slice ([]) means to use all local addresses. # The default empty string slice ([]) means to use all local addresses.
@@ -225,7 +221,7 @@ persistent_volumes_enabled: false
# nvidia_accelerator_enabled: true # nvidia_accelerator_enabled: true
## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset. ## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset.
## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2' ## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2'
## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers. ## Array with nvida_gpu_nodes, leave empty or comment if you dont't want to install drivers.
## Labels and taints won't be set to nodes if they are not in the array. ## Labels and taints won't be set to nodes if they are not in the array.
# nvidia_gpu_nodes: # nvidia_gpu_nodes:
# - kube-gpu-001 # - kube-gpu-001

View File

@@ -11,9 +11,6 @@
# add default ippool name # add default ippool name
# calico_pool_name: "default-pool" # calico_pool_name: "default-pool"
# add default ippool blockSize (defaults kube_network_node_prefix)
# calico_pool_blocksize: 24
# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise) # add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise)
# calico_pool_cidr: 1.2.3.4/5 # calico_pool_cidr: 1.2.3.4/5
@@ -34,8 +31,5 @@
# Use typha (only with kdd) # Use typha (only with kdd)
# typha_enabled: false # typha_enabled: false
# Generate TLS certs for secure typha<->calico-node communication
# typha_secure: false
# Number of typha replicas # Number of typha replicas
# typha_replicas: 1 # typha_replicas: 1

View File

@@ -1,7 +1,7 @@
# see roles/network_plugin/canal/defaults/main.yml # see roles/network_plugin/canal/defaults/main.yml
# The interface used by canal for host <-> host communication. # The interface used by canal for host <-> host communication.
# If left blank, then the interface is choosing using the node's # If left blank, then the interface is chosing using the node's
# default route. # default route.
# canal_iface: "" # canal_iface: ""

View File

@@ -34,14 +34,14 @@
- { role: remove-node/pre-remove, tags: pre-remove } - { role: remove-node/pre-remove, tags: pre-remove }
- hosts: "{{ node | default('kube-node') }}" - hosts: "{{ node | default('kube-node') }}"
gather_facts: yes gather_facts: no
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- { role: reset, tags: reset, when: reset_nodes|default(True) } - { role: reset, tags: reset, when: reset_nodes|default(True) }
# Currently cannot remove first master or etcd # Currently cannot remove first master or etcd
- hosts: "{{ node | default('kube-master[1:]:etcd[:1]') }}" - hosts: "{{ node | default('kube-master[1:]:etcd[:1]') }}"
gather_facts: yes gather_facts: no
roles: roles:
- { role: kubespray-defaults } - { role: kubespray-defaults }
- { role: remove-node/post-remove, tags: post-remove } - { role: remove-node/post-remove, tags: post-remove }

View File

@@ -1,7 +1,6 @@
--- ---
- hosts: localhost - hosts: localhost
become: no become: no
gather_facts: False
tasks: tasks:
- name: "Check ansible version >=2.7.8" - name: "Check ansible version >=2.7.8"
assert: assert:

Some files were not shown because too many files have changed in this diff Show More